1 /* bnx2.c: Broadcom NX2 network driver.
2  *
3  * Copyright (c) 2004-2013 Broadcom Corporation
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation.
8  *
9  * Written by: Michael Chan  (mchan@broadcom.com)
10  */
11 
12 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13 
14 #include <linux/module.h>
15 #include <linux/moduleparam.h>
16 
17 #include <linux/stringify.h>
18 #include <linux/kernel.h>
19 #include <linux/timer.h>
20 #include <linux/errno.h>
21 #include <linux/ioport.h>
22 #include <linux/slab.h>
23 #include <linux/vmalloc.h>
24 #include <linux/interrupt.h>
25 #include <linux/pci.h>
26 #include <linux/init.h>
27 #include <linux/netdevice.h>
28 #include <linux/etherdevice.h>
29 #include <linux/skbuff.h>
30 #include <linux/dma-mapping.h>
31 #include <linux/bitops.h>
32 #include <asm/io.h>
33 #include <asm/irq.h>
34 #include <linux/delay.h>
35 #include <asm/byteorder.h>
36 #include <asm/page.h>
37 #include <linux/time.h>
38 #include <linux/ethtool.h>
39 #include <linux/mii.h>
40 #include <linux/if.h>
41 #include <linux/if_vlan.h>
42 #include <net/ip.h>
43 #include <net/tcp.h>
44 #include <net/checksum.h>
45 #include <linux/workqueue.h>
46 #include <linux/crc32.h>
47 #include <linux/prefetch.h>
48 #include <linux/cache.h>
49 #include <linux/firmware.h>
50 #include <linux/log2.h>
51 #include <linux/aer.h>
52 
53 #if defined(CONFIG_CNIC) || defined(CONFIG_CNIC_MODULE)
54 #define BCM_CNIC 1
55 #include "cnic_if.h"
56 #endif
57 #include "bnx2.h"
58 #include "bnx2_fw.h"
59 
60 #define DRV_MODULE_NAME		"bnx2"
61 #define DRV_MODULE_VERSION	"2.2.4"
62 #define DRV_MODULE_RELDATE	"Aug 05, 2013"
63 #define FW_MIPS_FILE_06		"bnx2/bnx2-mips-06-6.2.3.fw"
64 #define FW_RV2P_FILE_06		"bnx2/bnx2-rv2p-06-6.0.15.fw"
65 #define FW_MIPS_FILE_09		"bnx2/bnx2-mips-09-6.2.1b.fw"
66 #define FW_RV2P_FILE_09_Ax	"bnx2/bnx2-rv2p-09ax-6.0.17.fw"
67 #define FW_RV2P_FILE_09		"bnx2/bnx2-rv2p-09-6.0.17.fw"
68 
69 #define RUN_AT(x) (jiffies + (x))
70 
71 /* Time in jiffies before concluding the transmitter is hung. */
72 #define TX_TIMEOUT  (5*HZ)
73 
74 static char version[] =
75 	"Broadcom NetXtreme II Gigabit Ethernet Driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
76 
77 MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>");
78 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708/5709/5716 Driver");
79 MODULE_LICENSE("GPL");
80 MODULE_VERSION(DRV_MODULE_VERSION);
81 MODULE_FIRMWARE(FW_MIPS_FILE_06);
82 MODULE_FIRMWARE(FW_RV2P_FILE_06);
83 MODULE_FIRMWARE(FW_MIPS_FILE_09);
84 MODULE_FIRMWARE(FW_RV2P_FILE_09);
85 MODULE_FIRMWARE(FW_RV2P_FILE_09_Ax);
86 
87 static int disable_msi = 0;
88 
89 module_param(disable_msi, int, 0);
90 MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
91 
92 typedef enum {
93 	BCM5706 = 0,
94 	NC370T,
95 	NC370I,
96 	BCM5706S,
97 	NC370F,
98 	BCM5708,
99 	BCM5708S,
100 	BCM5709,
101 	BCM5709S,
102 	BCM5716,
103 	BCM5716S,
104 } board_t;
105 
106 /* indexed by board_t, above */
107 static struct {
108 	char *name;
109 } board_info[] = {
110 	{ "Broadcom NetXtreme II BCM5706 1000Base-T" },
111 	{ "HP NC370T Multifunction Gigabit Server Adapter" },
112 	{ "HP NC370i Multifunction Gigabit Server Adapter" },
113 	{ "Broadcom NetXtreme II BCM5706 1000Base-SX" },
114 	{ "HP NC370F Multifunction Gigabit Server Adapter" },
115 	{ "Broadcom NetXtreme II BCM5708 1000Base-T" },
116 	{ "Broadcom NetXtreme II BCM5708 1000Base-SX" },
117 	{ "Broadcom NetXtreme II BCM5709 1000Base-T" },
118 	{ "Broadcom NetXtreme II BCM5709 1000Base-SX" },
119 	{ "Broadcom NetXtreme II BCM5716 1000Base-T" },
120 	{ "Broadcom NetXtreme II BCM5716 1000Base-SX" },
121 	};
122 
123 static DEFINE_PCI_DEVICE_TABLE(bnx2_pci_tbl) = {
124 	{ PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
125 	  PCI_VENDOR_ID_HP, 0x3101, 0, 0, NC370T },
126 	{ PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
127 	  PCI_VENDOR_ID_HP, 0x3106, 0, 0, NC370I },
128 	{ PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
129 	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706 },
130 	{ PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708,
131 	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708 },
132 	{ PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
133 	  PCI_VENDOR_ID_HP, 0x3102, 0, 0, NC370F },
134 	{ PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
135 	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706S },
136 	{ PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708S,
137 	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708S },
138 	{ PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709,
139 	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709 },
140 	{ PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709S,
141 	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709S },
142 	{ PCI_VENDOR_ID_BROADCOM, 0x163b,
143 	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5716 },
144 	{ PCI_VENDOR_ID_BROADCOM, 0x163c,
145 	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5716S },
146 	{ 0, }
147 };
148 
149 static const struct flash_spec flash_table[] =
150 {
151 #define BUFFERED_FLAGS		(BNX2_NV_BUFFERED | BNX2_NV_TRANSLATE)
152 #define NONBUFFERED_FLAGS	(BNX2_NV_WREN)
153 	/* Slow EEPROM */
154 	{0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
155 	 BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
156 	 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
157 	 "EEPROM - slow"},
158 	/* Expansion entry 0001 */
159 	{0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
160 	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
161 	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
162 	 "Entry 0001"},
163 	/* Saifun SA25F010 (non-buffered flash) */
164 	/* strap, cfg1, & write1 need updates */
165 	{0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
166 	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
167 	 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
168 	 "Non-buffered flash (128kB)"},
169 	/* Saifun SA25F020 (non-buffered flash) */
170 	/* strap, cfg1, & write1 need updates */
171 	{0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
172 	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
173 	 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
174 	 "Non-buffered flash (256kB)"},
175 	/* Expansion entry 0100 */
176 	{0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
177 	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
178 	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
179 	 "Entry 0100"},
180 	/* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
181 	{0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
182 	 NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
183 	 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
184 	 "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
185 	/* Entry 0110: ST M45PE20 (non-buffered flash)*/
186 	{0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
187 	 NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
188 	 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
189 	 "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
190 	/* Saifun SA25F005 (non-buffered flash) */
191 	/* strap, cfg1, & write1 need updates */
192 	{0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
193 	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
194 	 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
195 	 "Non-buffered flash (64kB)"},
196 	/* Fast EEPROM */
197 	{0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
198 	 BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
199 	 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
200 	 "EEPROM - fast"},
201 	/* Expansion entry 1001 */
202 	{0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
203 	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
204 	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
205 	 "Entry 1001"},
206 	/* Expansion entry 1010 */
207 	{0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
208 	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
209 	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
210 	 "Entry 1010"},
211 	/* ATMEL AT45DB011B (buffered flash) */
212 	{0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
213 	 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
214 	 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
215 	 "Buffered flash (128kB)"},
216 	/* Expansion entry 1100 */
217 	{0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
218 	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
219 	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
220 	 "Entry 1100"},
221 	/* Expansion entry 1101 */
222 	{0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
223 	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
224 	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
225 	 "Entry 1101"},
226 	/* Ateml Expansion entry 1110 */
227 	{0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
228 	 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
229 	 BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
230 	 "Entry 1110 (Atmel)"},
231 	/* ATMEL AT45DB021B (buffered flash) */
232 	{0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
233 	 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
234 	 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
235 	 "Buffered flash (256kB)"},
236 };
237 
238 static const struct flash_spec flash_5709 = {
239 	.flags		= BNX2_NV_BUFFERED,
240 	.page_bits	= BCM5709_FLASH_PAGE_BITS,
241 	.page_size	= BCM5709_FLASH_PAGE_SIZE,
242 	.addr_mask	= BCM5709_FLASH_BYTE_ADDR_MASK,
243 	.total_size	= BUFFERED_FLASH_TOTAL_SIZE*2,
244 	.name		= "5709 Buffered flash (256kB)",
245 };
246 
247 MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
248 
249 static void bnx2_init_napi(struct bnx2 *bp);
250 static void bnx2_del_napi(struct bnx2 *bp);
251 
252 static inline u32 bnx2_tx_avail(struct bnx2 *bp, struct bnx2_tx_ring_info *txr)
253 {
254 	u32 diff;
255 
256 	/* Tell compiler to fetch tx_prod and tx_cons from memory. */
257 	barrier();
258 
259 	/* The ring uses 256 indices for 255 entries, one of them
260 	 * needs to be skipped.
261 	 */
262 	diff = txr->tx_prod - txr->tx_cons;
263 	if (unlikely(diff >= BNX2_TX_DESC_CNT)) {
264 		diff &= 0xffff;
265 		if (diff == BNX2_TX_DESC_CNT)
266 			diff = BNX2_MAX_TX_DESC_CNT;
267 	}
268 	return bp->tx_ring_size - diff;
269 }
270 
271 static u32
272 bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset)
273 {
274 	u32 val;
275 
276 	spin_lock_bh(&bp->indirect_lock);
277 	BNX2_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
278 	val = BNX2_RD(bp, BNX2_PCICFG_REG_WINDOW);
279 	spin_unlock_bh(&bp->indirect_lock);
280 	return val;
281 }
282 
283 static void
284 bnx2_reg_wr_ind(struct bnx2 *bp, u32 offset, u32 val)
285 {
286 	spin_lock_bh(&bp->indirect_lock);
287 	BNX2_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
288 	BNX2_WR(bp, BNX2_PCICFG_REG_WINDOW, val);
289 	spin_unlock_bh(&bp->indirect_lock);
290 }
291 
292 static void
293 bnx2_shmem_wr(struct bnx2 *bp, u32 offset, u32 val)
294 {
295 	bnx2_reg_wr_ind(bp, bp->shmem_base + offset, val);
296 }
297 
298 static u32
299 bnx2_shmem_rd(struct bnx2 *bp, u32 offset)
300 {
301 	return bnx2_reg_rd_ind(bp, bp->shmem_base + offset);
302 }
303 
304 static void
305 bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
306 {
307 	offset += cid_addr;
308 	spin_lock_bh(&bp->indirect_lock);
309 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
310 		int i;
311 
312 		BNX2_WR(bp, BNX2_CTX_CTX_DATA, val);
313 		BNX2_WR(bp, BNX2_CTX_CTX_CTRL,
314 			offset | BNX2_CTX_CTX_CTRL_WRITE_REQ);
315 		for (i = 0; i < 5; i++) {
316 			val = BNX2_RD(bp, BNX2_CTX_CTX_CTRL);
317 			if ((val & BNX2_CTX_CTX_CTRL_WRITE_REQ) == 0)
318 				break;
319 			udelay(5);
320 		}
321 	} else {
322 		BNX2_WR(bp, BNX2_CTX_DATA_ADR, offset);
323 		BNX2_WR(bp, BNX2_CTX_DATA, val);
324 	}
325 	spin_unlock_bh(&bp->indirect_lock);
326 }
327 
328 #ifdef BCM_CNIC
329 static int
330 bnx2_drv_ctl(struct net_device *dev, struct drv_ctl_info *info)
331 {
332 	struct bnx2 *bp = netdev_priv(dev);
333 	struct drv_ctl_io *io = &info->data.io;
334 
335 	switch (info->cmd) {
336 	case DRV_CTL_IO_WR_CMD:
337 		bnx2_reg_wr_ind(bp, io->offset, io->data);
338 		break;
339 	case DRV_CTL_IO_RD_CMD:
340 		io->data = bnx2_reg_rd_ind(bp, io->offset);
341 		break;
342 	case DRV_CTL_CTX_WR_CMD:
343 		bnx2_ctx_wr(bp, io->cid_addr, io->offset, io->data);
344 		break;
345 	default:
346 		return -EINVAL;
347 	}
348 	return 0;
349 }
350 
351 static void bnx2_setup_cnic_irq_info(struct bnx2 *bp)
352 {
353 	struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
354 	struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
355 	int sb_id;
356 
357 	if (bp->flags & BNX2_FLAG_USING_MSIX) {
358 		cp->drv_state |= CNIC_DRV_STATE_USING_MSIX;
359 		bnapi->cnic_present = 0;
360 		sb_id = bp->irq_nvecs;
361 		cp->irq_arr[0].irq_flags |= CNIC_IRQ_FL_MSIX;
362 	} else {
363 		cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX;
364 		bnapi->cnic_tag = bnapi->last_status_idx;
365 		bnapi->cnic_present = 1;
366 		sb_id = 0;
367 		cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX;
368 	}
369 
370 	cp->irq_arr[0].vector = bp->irq_tbl[sb_id].vector;
371 	cp->irq_arr[0].status_blk = (void *)
372 		((unsigned long) bnapi->status_blk.msi +
373 		(BNX2_SBLK_MSIX_ALIGN_SIZE * sb_id));
374 	cp->irq_arr[0].status_blk_num = sb_id;
375 	cp->num_irq = 1;
376 }
377 
378 static int bnx2_register_cnic(struct net_device *dev, struct cnic_ops *ops,
379 			      void *data)
380 {
381 	struct bnx2 *bp = netdev_priv(dev);
382 	struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
383 
384 	if (ops == NULL)
385 		return -EINVAL;
386 
387 	if (cp->drv_state & CNIC_DRV_STATE_REGD)
388 		return -EBUSY;
389 
390 	if (!bnx2_reg_rd_ind(bp, BNX2_FW_MAX_ISCSI_CONN))
391 		return -ENODEV;
392 
393 	bp->cnic_data = data;
394 	rcu_assign_pointer(bp->cnic_ops, ops);
395 
396 	cp->num_irq = 0;
397 	cp->drv_state = CNIC_DRV_STATE_REGD;
398 
399 	bnx2_setup_cnic_irq_info(bp);
400 
401 	return 0;
402 }
403 
404 static int bnx2_unregister_cnic(struct net_device *dev)
405 {
406 	struct bnx2 *bp = netdev_priv(dev);
407 	struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
408 	struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
409 
410 	mutex_lock(&bp->cnic_lock);
411 	cp->drv_state = 0;
412 	bnapi->cnic_present = 0;
413 	RCU_INIT_POINTER(bp->cnic_ops, NULL);
414 	mutex_unlock(&bp->cnic_lock);
415 	synchronize_rcu();
416 	return 0;
417 }
418 
419 static struct cnic_eth_dev *bnx2_cnic_probe(struct net_device *dev)
420 {
421 	struct bnx2 *bp = netdev_priv(dev);
422 	struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
423 
424 	if (!cp->max_iscsi_conn)
425 		return NULL;
426 
427 	cp->drv_owner = THIS_MODULE;
428 	cp->chip_id = bp->chip_id;
429 	cp->pdev = bp->pdev;
430 	cp->io_base = bp->regview;
431 	cp->drv_ctl = bnx2_drv_ctl;
432 	cp->drv_register_cnic = bnx2_register_cnic;
433 	cp->drv_unregister_cnic = bnx2_unregister_cnic;
434 
435 	return cp;
436 }
437 
438 static void
439 bnx2_cnic_stop(struct bnx2 *bp)
440 {
441 	struct cnic_ops *c_ops;
442 	struct cnic_ctl_info info;
443 
444 	mutex_lock(&bp->cnic_lock);
445 	c_ops = rcu_dereference_protected(bp->cnic_ops,
446 					  lockdep_is_held(&bp->cnic_lock));
447 	if (c_ops) {
448 		info.cmd = CNIC_CTL_STOP_CMD;
449 		c_ops->cnic_ctl(bp->cnic_data, &info);
450 	}
451 	mutex_unlock(&bp->cnic_lock);
452 }
453 
454 static void
455 bnx2_cnic_start(struct bnx2 *bp)
456 {
457 	struct cnic_ops *c_ops;
458 	struct cnic_ctl_info info;
459 
460 	mutex_lock(&bp->cnic_lock);
461 	c_ops = rcu_dereference_protected(bp->cnic_ops,
462 					  lockdep_is_held(&bp->cnic_lock));
463 	if (c_ops) {
464 		if (!(bp->flags & BNX2_FLAG_USING_MSIX)) {
465 			struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
466 
467 			bnapi->cnic_tag = bnapi->last_status_idx;
468 		}
469 		info.cmd = CNIC_CTL_START_CMD;
470 		c_ops->cnic_ctl(bp->cnic_data, &info);
471 	}
472 	mutex_unlock(&bp->cnic_lock);
473 }
474 
475 #else
476 
477 static void
478 bnx2_cnic_stop(struct bnx2 *bp)
479 {
480 }
481 
482 static void
483 bnx2_cnic_start(struct bnx2 *bp)
484 {
485 }
486 
487 #endif
488 
489 static int
490 bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val)
491 {
492 	u32 val1;
493 	int i, ret;
494 
495 	if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
496 		val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
497 		val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
498 
499 		BNX2_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
500 		BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
501 
502 		udelay(40);
503 	}
504 
505 	val1 = (bp->phy_addr << 21) | (reg << 16) |
506 		BNX2_EMAC_MDIO_COMM_COMMAND_READ | BNX2_EMAC_MDIO_COMM_DISEXT |
507 		BNX2_EMAC_MDIO_COMM_START_BUSY;
508 	BNX2_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
509 
510 	for (i = 0; i < 50; i++) {
511 		udelay(10);
512 
513 		val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_COMM);
514 		if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
515 			udelay(5);
516 
517 			val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_COMM);
518 			val1 &= BNX2_EMAC_MDIO_COMM_DATA;
519 
520 			break;
521 		}
522 	}
523 
524 	if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY) {
525 		*val = 0x0;
526 		ret = -EBUSY;
527 	}
528 	else {
529 		*val = val1;
530 		ret = 0;
531 	}
532 
533 	if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
534 		val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
535 		val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
536 
537 		BNX2_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
538 		BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
539 
540 		udelay(40);
541 	}
542 
543 	return ret;
544 }
545 
546 static int
547 bnx2_write_phy(struct bnx2 *bp, u32 reg, u32 val)
548 {
549 	u32 val1;
550 	int i, ret;
551 
552 	if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
553 		val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
554 		val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
555 
556 		BNX2_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
557 		BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
558 
559 		udelay(40);
560 	}
561 
562 	val1 = (bp->phy_addr << 21) | (reg << 16) | val |
563 		BNX2_EMAC_MDIO_COMM_COMMAND_WRITE |
564 		BNX2_EMAC_MDIO_COMM_START_BUSY | BNX2_EMAC_MDIO_COMM_DISEXT;
565 	BNX2_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
566 
567 	for (i = 0; i < 50; i++) {
568 		udelay(10);
569 
570 		val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_COMM);
571 		if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
572 			udelay(5);
573 			break;
574 		}
575 	}
576 
577 	if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)
578         	ret = -EBUSY;
579 	else
580 		ret = 0;
581 
582 	if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
583 		val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
584 		val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
585 
586 		BNX2_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
587 		BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
588 
589 		udelay(40);
590 	}
591 
592 	return ret;
593 }
594 
595 static void
596 bnx2_disable_int(struct bnx2 *bp)
597 {
598 	int i;
599 	struct bnx2_napi *bnapi;
600 
601 	for (i = 0; i < bp->irq_nvecs; i++) {
602 		bnapi = &bp->bnx2_napi[i];
603 		BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
604 		       BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
605 	}
606 	BNX2_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
607 }
608 
609 static void
610 bnx2_enable_int(struct bnx2 *bp)
611 {
612 	int i;
613 	struct bnx2_napi *bnapi;
614 
615 	for (i = 0; i < bp->irq_nvecs; i++) {
616 		bnapi = &bp->bnx2_napi[i];
617 
618 		BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
619 			BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
620 			BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
621 			bnapi->last_status_idx);
622 
623 		BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
624 			BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
625 			bnapi->last_status_idx);
626 	}
627 	BNX2_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
628 }
629 
630 static void
631 bnx2_disable_int_sync(struct bnx2 *bp)
632 {
633 	int i;
634 
635 	atomic_inc(&bp->intr_sem);
636 	if (!netif_running(bp->dev))
637 		return;
638 
639 	bnx2_disable_int(bp);
640 	for (i = 0; i < bp->irq_nvecs; i++)
641 		synchronize_irq(bp->irq_tbl[i].vector);
642 }
643 
644 static void
645 bnx2_napi_disable(struct bnx2 *bp)
646 {
647 	int i;
648 
649 	for (i = 0; i < bp->irq_nvecs; i++)
650 		napi_disable(&bp->bnx2_napi[i].napi);
651 }
652 
653 static void
654 bnx2_napi_enable(struct bnx2 *bp)
655 {
656 	int i;
657 
658 	for (i = 0; i < bp->irq_nvecs; i++)
659 		napi_enable(&bp->bnx2_napi[i].napi);
660 }
661 
662 static void
663 bnx2_netif_stop(struct bnx2 *bp, bool stop_cnic)
664 {
665 	if (stop_cnic)
666 		bnx2_cnic_stop(bp);
667 	if (netif_running(bp->dev)) {
668 		bnx2_napi_disable(bp);
669 		netif_tx_disable(bp->dev);
670 	}
671 	bnx2_disable_int_sync(bp);
672 	netif_carrier_off(bp->dev);	/* prevent tx timeout */
673 }
674 
675 static void
676 bnx2_netif_start(struct bnx2 *bp, bool start_cnic)
677 {
678 	if (atomic_dec_and_test(&bp->intr_sem)) {
679 		if (netif_running(bp->dev)) {
680 			netif_tx_wake_all_queues(bp->dev);
681 			spin_lock_bh(&bp->phy_lock);
682 			if (bp->link_up)
683 				netif_carrier_on(bp->dev);
684 			spin_unlock_bh(&bp->phy_lock);
685 			bnx2_napi_enable(bp);
686 			bnx2_enable_int(bp);
687 			if (start_cnic)
688 				bnx2_cnic_start(bp);
689 		}
690 	}
691 }
692 
693 static void
694 bnx2_free_tx_mem(struct bnx2 *bp)
695 {
696 	int i;
697 
698 	for (i = 0; i < bp->num_tx_rings; i++) {
699 		struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
700 		struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
701 
702 		if (txr->tx_desc_ring) {
703 			dma_free_coherent(&bp->pdev->dev, TXBD_RING_SIZE,
704 					  txr->tx_desc_ring,
705 					  txr->tx_desc_mapping);
706 			txr->tx_desc_ring = NULL;
707 		}
708 		kfree(txr->tx_buf_ring);
709 		txr->tx_buf_ring = NULL;
710 	}
711 }
712 
713 static void
714 bnx2_free_rx_mem(struct bnx2 *bp)
715 {
716 	int i;
717 
718 	for (i = 0; i < bp->num_rx_rings; i++) {
719 		struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
720 		struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
721 		int j;
722 
723 		for (j = 0; j < bp->rx_max_ring; j++) {
724 			if (rxr->rx_desc_ring[j])
725 				dma_free_coherent(&bp->pdev->dev, RXBD_RING_SIZE,
726 						  rxr->rx_desc_ring[j],
727 						  rxr->rx_desc_mapping[j]);
728 			rxr->rx_desc_ring[j] = NULL;
729 		}
730 		vfree(rxr->rx_buf_ring);
731 		rxr->rx_buf_ring = NULL;
732 
733 		for (j = 0; j < bp->rx_max_pg_ring; j++) {
734 			if (rxr->rx_pg_desc_ring[j])
735 				dma_free_coherent(&bp->pdev->dev, RXBD_RING_SIZE,
736 						  rxr->rx_pg_desc_ring[j],
737 						  rxr->rx_pg_desc_mapping[j]);
738 			rxr->rx_pg_desc_ring[j] = NULL;
739 		}
740 		vfree(rxr->rx_pg_ring);
741 		rxr->rx_pg_ring = NULL;
742 	}
743 }
744 
745 static int
746 bnx2_alloc_tx_mem(struct bnx2 *bp)
747 {
748 	int i;
749 
750 	for (i = 0; i < bp->num_tx_rings; i++) {
751 		struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
752 		struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
753 
754 		txr->tx_buf_ring = kzalloc(SW_TXBD_RING_SIZE, GFP_KERNEL);
755 		if (txr->tx_buf_ring == NULL)
756 			return -ENOMEM;
757 
758 		txr->tx_desc_ring =
759 			dma_alloc_coherent(&bp->pdev->dev, TXBD_RING_SIZE,
760 					   &txr->tx_desc_mapping, GFP_KERNEL);
761 		if (txr->tx_desc_ring == NULL)
762 			return -ENOMEM;
763 	}
764 	return 0;
765 }
766 
767 static int
768 bnx2_alloc_rx_mem(struct bnx2 *bp)
769 {
770 	int i;
771 
772 	for (i = 0; i < bp->num_rx_rings; i++) {
773 		struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
774 		struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
775 		int j;
776 
777 		rxr->rx_buf_ring =
778 			vzalloc(SW_RXBD_RING_SIZE * bp->rx_max_ring);
779 		if (rxr->rx_buf_ring == NULL)
780 			return -ENOMEM;
781 
782 		for (j = 0; j < bp->rx_max_ring; j++) {
783 			rxr->rx_desc_ring[j] =
784 				dma_alloc_coherent(&bp->pdev->dev,
785 						   RXBD_RING_SIZE,
786 						   &rxr->rx_desc_mapping[j],
787 						   GFP_KERNEL);
788 			if (rxr->rx_desc_ring[j] == NULL)
789 				return -ENOMEM;
790 
791 		}
792 
793 		if (bp->rx_pg_ring_size) {
794 			rxr->rx_pg_ring = vzalloc(SW_RXPG_RING_SIZE *
795 						  bp->rx_max_pg_ring);
796 			if (rxr->rx_pg_ring == NULL)
797 				return -ENOMEM;
798 
799 		}
800 
801 		for (j = 0; j < bp->rx_max_pg_ring; j++) {
802 			rxr->rx_pg_desc_ring[j] =
803 				dma_alloc_coherent(&bp->pdev->dev,
804 						   RXBD_RING_SIZE,
805 						   &rxr->rx_pg_desc_mapping[j],
806 						   GFP_KERNEL);
807 			if (rxr->rx_pg_desc_ring[j] == NULL)
808 				return -ENOMEM;
809 
810 		}
811 	}
812 	return 0;
813 }
814 
815 static void
816 bnx2_free_mem(struct bnx2 *bp)
817 {
818 	int i;
819 	struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
820 
821 	bnx2_free_tx_mem(bp);
822 	bnx2_free_rx_mem(bp);
823 
824 	for (i = 0; i < bp->ctx_pages; i++) {
825 		if (bp->ctx_blk[i]) {
826 			dma_free_coherent(&bp->pdev->dev, BNX2_PAGE_SIZE,
827 					  bp->ctx_blk[i],
828 					  bp->ctx_blk_mapping[i]);
829 			bp->ctx_blk[i] = NULL;
830 		}
831 	}
832 	if (bnapi->status_blk.msi) {
833 		dma_free_coherent(&bp->pdev->dev, bp->status_stats_size,
834 				  bnapi->status_blk.msi,
835 				  bp->status_blk_mapping);
836 		bnapi->status_blk.msi = NULL;
837 		bp->stats_blk = NULL;
838 	}
839 }
840 
841 static int
842 bnx2_alloc_mem(struct bnx2 *bp)
843 {
844 	int i, status_blk_size, err;
845 	struct bnx2_napi *bnapi;
846 	void *status_blk;
847 
848 	/* Combine status and statistics blocks into one allocation. */
849 	status_blk_size = L1_CACHE_ALIGN(sizeof(struct status_block));
850 	if (bp->flags & BNX2_FLAG_MSIX_CAP)
851 		status_blk_size = L1_CACHE_ALIGN(BNX2_MAX_MSIX_HW_VEC *
852 						 BNX2_SBLK_MSIX_ALIGN_SIZE);
853 	bp->status_stats_size = status_blk_size +
854 				sizeof(struct statistics_block);
855 
856 	status_blk = dma_zalloc_coherent(&bp->pdev->dev, bp->status_stats_size,
857 					 &bp->status_blk_mapping, GFP_KERNEL);
858 	if (status_blk == NULL)
859 		goto alloc_mem_err;
860 
861 	bnapi = &bp->bnx2_napi[0];
862 	bnapi->status_blk.msi = status_blk;
863 	bnapi->hw_tx_cons_ptr =
864 		&bnapi->status_blk.msi->status_tx_quick_consumer_index0;
865 	bnapi->hw_rx_cons_ptr =
866 		&bnapi->status_blk.msi->status_rx_quick_consumer_index0;
867 	if (bp->flags & BNX2_FLAG_MSIX_CAP) {
868 		for (i = 1; i < bp->irq_nvecs; i++) {
869 			struct status_block_msix *sblk;
870 
871 			bnapi = &bp->bnx2_napi[i];
872 
873 			sblk = (status_blk + BNX2_SBLK_MSIX_ALIGN_SIZE * i);
874 			bnapi->status_blk.msix = sblk;
875 			bnapi->hw_tx_cons_ptr =
876 				&sblk->status_tx_quick_consumer_index;
877 			bnapi->hw_rx_cons_ptr =
878 				&sblk->status_rx_quick_consumer_index;
879 			bnapi->int_num = i << 24;
880 		}
881 	}
882 
883 	bp->stats_blk = status_blk + status_blk_size;
884 
885 	bp->stats_blk_mapping = bp->status_blk_mapping + status_blk_size;
886 
887 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
888 		bp->ctx_pages = 0x2000 / BNX2_PAGE_SIZE;
889 		if (bp->ctx_pages == 0)
890 			bp->ctx_pages = 1;
891 		for (i = 0; i < bp->ctx_pages; i++) {
892 			bp->ctx_blk[i] = dma_alloc_coherent(&bp->pdev->dev,
893 						BNX2_PAGE_SIZE,
894 						&bp->ctx_blk_mapping[i],
895 						GFP_KERNEL);
896 			if (bp->ctx_blk[i] == NULL)
897 				goto alloc_mem_err;
898 		}
899 	}
900 
901 	err = bnx2_alloc_rx_mem(bp);
902 	if (err)
903 		goto alloc_mem_err;
904 
905 	err = bnx2_alloc_tx_mem(bp);
906 	if (err)
907 		goto alloc_mem_err;
908 
909 	return 0;
910 
911 alloc_mem_err:
912 	bnx2_free_mem(bp);
913 	return -ENOMEM;
914 }
915 
916 static void
917 bnx2_report_fw_link(struct bnx2 *bp)
918 {
919 	u32 fw_link_status = 0;
920 
921 	if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
922 		return;
923 
924 	if (bp->link_up) {
925 		u32 bmsr;
926 
927 		switch (bp->line_speed) {
928 		case SPEED_10:
929 			if (bp->duplex == DUPLEX_HALF)
930 				fw_link_status = BNX2_LINK_STATUS_10HALF;
931 			else
932 				fw_link_status = BNX2_LINK_STATUS_10FULL;
933 			break;
934 		case SPEED_100:
935 			if (bp->duplex == DUPLEX_HALF)
936 				fw_link_status = BNX2_LINK_STATUS_100HALF;
937 			else
938 				fw_link_status = BNX2_LINK_STATUS_100FULL;
939 			break;
940 		case SPEED_1000:
941 			if (bp->duplex == DUPLEX_HALF)
942 				fw_link_status = BNX2_LINK_STATUS_1000HALF;
943 			else
944 				fw_link_status = BNX2_LINK_STATUS_1000FULL;
945 			break;
946 		case SPEED_2500:
947 			if (bp->duplex == DUPLEX_HALF)
948 				fw_link_status = BNX2_LINK_STATUS_2500HALF;
949 			else
950 				fw_link_status = BNX2_LINK_STATUS_2500FULL;
951 			break;
952 		}
953 
954 		fw_link_status |= BNX2_LINK_STATUS_LINK_UP;
955 
956 		if (bp->autoneg) {
957 			fw_link_status |= BNX2_LINK_STATUS_AN_ENABLED;
958 
959 			bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
960 			bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
961 
962 			if (!(bmsr & BMSR_ANEGCOMPLETE) ||
963 			    bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT)
964 				fw_link_status |= BNX2_LINK_STATUS_PARALLEL_DET;
965 			else
966 				fw_link_status |= BNX2_LINK_STATUS_AN_COMPLETE;
967 		}
968 	}
969 	else
970 		fw_link_status = BNX2_LINK_STATUS_LINK_DOWN;
971 
972 	bnx2_shmem_wr(bp, BNX2_LINK_STATUS, fw_link_status);
973 }
974 
975 static char *
976 bnx2_xceiver_str(struct bnx2 *bp)
977 {
978 	return (bp->phy_port == PORT_FIBRE) ? "SerDes" :
979 		((bp->phy_flags & BNX2_PHY_FLAG_SERDES) ? "Remote Copper" :
980 		 "Copper");
981 }
982 
983 static void
984 bnx2_report_link(struct bnx2 *bp)
985 {
986 	if (bp->link_up) {
987 		netif_carrier_on(bp->dev);
988 		netdev_info(bp->dev, "NIC %s Link is Up, %d Mbps %s duplex",
989 			    bnx2_xceiver_str(bp),
990 			    bp->line_speed,
991 			    bp->duplex == DUPLEX_FULL ? "full" : "half");
992 
993 		if (bp->flow_ctrl) {
994 			if (bp->flow_ctrl & FLOW_CTRL_RX) {
995 				pr_cont(", receive ");
996 				if (bp->flow_ctrl & FLOW_CTRL_TX)
997 					pr_cont("& transmit ");
998 			}
999 			else {
1000 				pr_cont(", transmit ");
1001 			}
1002 			pr_cont("flow control ON");
1003 		}
1004 		pr_cont("\n");
1005 	} else {
1006 		netif_carrier_off(bp->dev);
1007 		netdev_err(bp->dev, "NIC %s Link is Down\n",
1008 			   bnx2_xceiver_str(bp));
1009 	}
1010 
1011 	bnx2_report_fw_link(bp);
1012 }
1013 
1014 static void
1015 bnx2_resolve_flow_ctrl(struct bnx2 *bp)
1016 {
1017 	u32 local_adv, remote_adv;
1018 
1019 	bp->flow_ctrl = 0;
1020 	if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
1021 		(AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
1022 
1023 		if (bp->duplex == DUPLEX_FULL) {
1024 			bp->flow_ctrl = bp->req_flow_ctrl;
1025 		}
1026 		return;
1027 	}
1028 
1029 	if (bp->duplex != DUPLEX_FULL) {
1030 		return;
1031 	}
1032 
1033 	if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1034 	    (BNX2_CHIP(bp) == BNX2_CHIP_5708)) {
1035 		u32 val;
1036 
1037 		bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
1038 		if (val & BCM5708S_1000X_STAT1_TX_PAUSE)
1039 			bp->flow_ctrl |= FLOW_CTRL_TX;
1040 		if (val & BCM5708S_1000X_STAT1_RX_PAUSE)
1041 			bp->flow_ctrl |= FLOW_CTRL_RX;
1042 		return;
1043 	}
1044 
1045 	bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1046 	bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
1047 
1048 	if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1049 		u32 new_local_adv = 0;
1050 		u32 new_remote_adv = 0;
1051 
1052 		if (local_adv & ADVERTISE_1000XPAUSE)
1053 			new_local_adv |= ADVERTISE_PAUSE_CAP;
1054 		if (local_adv & ADVERTISE_1000XPSE_ASYM)
1055 			new_local_adv |= ADVERTISE_PAUSE_ASYM;
1056 		if (remote_adv & ADVERTISE_1000XPAUSE)
1057 			new_remote_adv |= ADVERTISE_PAUSE_CAP;
1058 		if (remote_adv & ADVERTISE_1000XPSE_ASYM)
1059 			new_remote_adv |= ADVERTISE_PAUSE_ASYM;
1060 
1061 		local_adv = new_local_adv;
1062 		remote_adv = new_remote_adv;
1063 	}
1064 
1065 	/* See Table 28B-3 of 802.3ab-1999 spec. */
1066 	if (local_adv & ADVERTISE_PAUSE_CAP) {
1067 		if(local_adv & ADVERTISE_PAUSE_ASYM) {
1068 	                if (remote_adv & ADVERTISE_PAUSE_CAP) {
1069 				bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
1070 			}
1071 			else if (remote_adv & ADVERTISE_PAUSE_ASYM) {
1072 				bp->flow_ctrl = FLOW_CTRL_RX;
1073 			}
1074 		}
1075 		else {
1076 			if (remote_adv & ADVERTISE_PAUSE_CAP) {
1077 				bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
1078 			}
1079 		}
1080 	}
1081 	else if (local_adv & ADVERTISE_PAUSE_ASYM) {
1082 		if ((remote_adv & ADVERTISE_PAUSE_CAP) &&
1083 			(remote_adv & ADVERTISE_PAUSE_ASYM)) {
1084 
1085 			bp->flow_ctrl = FLOW_CTRL_TX;
1086 		}
1087 	}
1088 }
1089 
1090 static int
1091 bnx2_5709s_linkup(struct bnx2 *bp)
1092 {
1093 	u32 val, speed;
1094 
1095 	bp->link_up = 1;
1096 
1097 	bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_GP_STATUS);
1098 	bnx2_read_phy(bp, MII_BNX2_GP_TOP_AN_STATUS1, &val);
1099 	bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1100 
1101 	if ((bp->autoneg & AUTONEG_SPEED) == 0) {
1102 		bp->line_speed = bp->req_line_speed;
1103 		bp->duplex = bp->req_duplex;
1104 		return 0;
1105 	}
1106 	speed = val & MII_BNX2_GP_TOP_AN_SPEED_MSK;
1107 	switch (speed) {
1108 		case MII_BNX2_GP_TOP_AN_SPEED_10:
1109 			bp->line_speed = SPEED_10;
1110 			break;
1111 		case MII_BNX2_GP_TOP_AN_SPEED_100:
1112 			bp->line_speed = SPEED_100;
1113 			break;
1114 		case MII_BNX2_GP_TOP_AN_SPEED_1G:
1115 		case MII_BNX2_GP_TOP_AN_SPEED_1GKV:
1116 			bp->line_speed = SPEED_1000;
1117 			break;
1118 		case MII_BNX2_GP_TOP_AN_SPEED_2_5G:
1119 			bp->line_speed = SPEED_2500;
1120 			break;
1121 	}
1122 	if (val & MII_BNX2_GP_TOP_AN_FD)
1123 		bp->duplex = DUPLEX_FULL;
1124 	else
1125 		bp->duplex = DUPLEX_HALF;
1126 	return 0;
1127 }
1128 
1129 static int
1130 bnx2_5708s_linkup(struct bnx2 *bp)
1131 {
1132 	u32 val;
1133 
1134 	bp->link_up = 1;
1135 	bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
1136 	switch (val & BCM5708S_1000X_STAT1_SPEED_MASK) {
1137 		case BCM5708S_1000X_STAT1_SPEED_10:
1138 			bp->line_speed = SPEED_10;
1139 			break;
1140 		case BCM5708S_1000X_STAT1_SPEED_100:
1141 			bp->line_speed = SPEED_100;
1142 			break;
1143 		case BCM5708S_1000X_STAT1_SPEED_1G:
1144 			bp->line_speed = SPEED_1000;
1145 			break;
1146 		case BCM5708S_1000X_STAT1_SPEED_2G5:
1147 			bp->line_speed = SPEED_2500;
1148 			break;
1149 	}
1150 	if (val & BCM5708S_1000X_STAT1_FD)
1151 		bp->duplex = DUPLEX_FULL;
1152 	else
1153 		bp->duplex = DUPLEX_HALF;
1154 
1155 	return 0;
1156 }
1157 
1158 static int
1159 bnx2_5706s_linkup(struct bnx2 *bp)
1160 {
1161 	u32 bmcr, local_adv, remote_adv, common;
1162 
1163 	bp->link_up = 1;
1164 	bp->line_speed = SPEED_1000;
1165 
1166 	bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1167 	if (bmcr & BMCR_FULLDPLX) {
1168 		bp->duplex = DUPLEX_FULL;
1169 	}
1170 	else {
1171 		bp->duplex = DUPLEX_HALF;
1172 	}
1173 
1174 	if (!(bmcr & BMCR_ANENABLE)) {
1175 		return 0;
1176 	}
1177 
1178 	bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1179 	bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
1180 
1181 	common = local_adv & remote_adv;
1182 	if (common & (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL)) {
1183 
1184 		if (common & ADVERTISE_1000XFULL) {
1185 			bp->duplex = DUPLEX_FULL;
1186 		}
1187 		else {
1188 			bp->duplex = DUPLEX_HALF;
1189 		}
1190 	}
1191 
1192 	return 0;
1193 }
1194 
1195 static int
1196 bnx2_copper_linkup(struct bnx2 *bp)
1197 {
1198 	u32 bmcr;
1199 
1200 	bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1201 	if (bmcr & BMCR_ANENABLE) {
1202 		u32 local_adv, remote_adv, common;
1203 
1204 		bnx2_read_phy(bp, MII_CTRL1000, &local_adv);
1205 		bnx2_read_phy(bp, MII_STAT1000, &remote_adv);
1206 
1207 		common = local_adv & (remote_adv >> 2);
1208 		if (common & ADVERTISE_1000FULL) {
1209 			bp->line_speed = SPEED_1000;
1210 			bp->duplex = DUPLEX_FULL;
1211 		}
1212 		else if (common & ADVERTISE_1000HALF) {
1213 			bp->line_speed = SPEED_1000;
1214 			bp->duplex = DUPLEX_HALF;
1215 		}
1216 		else {
1217 			bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1218 			bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
1219 
1220 			common = local_adv & remote_adv;
1221 			if (common & ADVERTISE_100FULL) {
1222 				bp->line_speed = SPEED_100;
1223 				bp->duplex = DUPLEX_FULL;
1224 			}
1225 			else if (common & ADVERTISE_100HALF) {
1226 				bp->line_speed = SPEED_100;
1227 				bp->duplex = DUPLEX_HALF;
1228 			}
1229 			else if (common & ADVERTISE_10FULL) {
1230 				bp->line_speed = SPEED_10;
1231 				bp->duplex = DUPLEX_FULL;
1232 			}
1233 			else if (common & ADVERTISE_10HALF) {
1234 				bp->line_speed = SPEED_10;
1235 				bp->duplex = DUPLEX_HALF;
1236 			}
1237 			else {
1238 				bp->line_speed = 0;
1239 				bp->link_up = 0;
1240 			}
1241 		}
1242 	}
1243 	else {
1244 		if (bmcr & BMCR_SPEED100) {
1245 			bp->line_speed = SPEED_100;
1246 		}
1247 		else {
1248 			bp->line_speed = SPEED_10;
1249 		}
1250 		if (bmcr & BMCR_FULLDPLX) {
1251 			bp->duplex = DUPLEX_FULL;
1252 		}
1253 		else {
1254 			bp->duplex = DUPLEX_HALF;
1255 		}
1256 	}
1257 
1258 	return 0;
1259 }
1260 
1261 static void
1262 bnx2_init_rx_context(struct bnx2 *bp, u32 cid)
1263 {
1264 	u32 val, rx_cid_addr = GET_CID_ADDR(cid);
1265 
1266 	val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE;
1267 	val |= BNX2_L2CTX_CTX_TYPE_SIZE_L2;
1268 	val |= 0x02 << 8;
1269 
1270 	if (bp->flow_ctrl & FLOW_CTRL_TX)
1271 		val |= BNX2_L2CTX_FLOW_CTRL_ENABLE;
1272 
1273 	bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_CTX_TYPE, val);
1274 }
1275 
1276 static void
1277 bnx2_init_all_rx_contexts(struct bnx2 *bp)
1278 {
1279 	int i;
1280 	u32 cid;
1281 
1282 	for (i = 0, cid = RX_CID; i < bp->num_rx_rings; i++, cid++) {
1283 		if (i == 1)
1284 			cid = RX_RSS_CID;
1285 		bnx2_init_rx_context(bp, cid);
1286 	}
1287 }
1288 
1289 static void
1290 bnx2_set_mac_link(struct bnx2 *bp)
1291 {
1292 	u32 val;
1293 
1294 	BNX2_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x2620);
1295 	if (bp->link_up && (bp->line_speed == SPEED_1000) &&
1296 		(bp->duplex == DUPLEX_HALF)) {
1297 		BNX2_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x26ff);
1298 	}
1299 
1300 	/* Configure the EMAC mode register. */
1301 	val = BNX2_RD(bp, BNX2_EMAC_MODE);
1302 
1303 	val &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
1304 		BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
1305 		BNX2_EMAC_MODE_25G_MODE);
1306 
1307 	if (bp->link_up) {
1308 		switch (bp->line_speed) {
1309 			case SPEED_10:
1310 				if (BNX2_CHIP(bp) != BNX2_CHIP_5706) {
1311 					val |= BNX2_EMAC_MODE_PORT_MII_10M;
1312 					break;
1313 				}
1314 				/* fall through */
1315 			case SPEED_100:
1316 				val |= BNX2_EMAC_MODE_PORT_MII;
1317 				break;
1318 			case SPEED_2500:
1319 				val |= BNX2_EMAC_MODE_25G_MODE;
1320 				/* fall through */
1321 			case SPEED_1000:
1322 				val |= BNX2_EMAC_MODE_PORT_GMII;
1323 				break;
1324 		}
1325 	}
1326 	else {
1327 		val |= BNX2_EMAC_MODE_PORT_GMII;
1328 	}
1329 
1330 	/* Set the MAC to operate in the appropriate duplex mode. */
1331 	if (bp->duplex == DUPLEX_HALF)
1332 		val |= BNX2_EMAC_MODE_HALF_DUPLEX;
1333 	BNX2_WR(bp, BNX2_EMAC_MODE, val);
1334 
1335 	/* Enable/disable rx PAUSE. */
1336 	bp->rx_mode &= ~BNX2_EMAC_RX_MODE_FLOW_EN;
1337 
1338 	if (bp->flow_ctrl & FLOW_CTRL_RX)
1339 		bp->rx_mode |= BNX2_EMAC_RX_MODE_FLOW_EN;
1340 	BNX2_WR(bp, BNX2_EMAC_RX_MODE, bp->rx_mode);
1341 
1342 	/* Enable/disable tx PAUSE. */
1343 	val = BNX2_RD(bp, BNX2_EMAC_TX_MODE);
1344 	val &= ~BNX2_EMAC_TX_MODE_FLOW_EN;
1345 
1346 	if (bp->flow_ctrl & FLOW_CTRL_TX)
1347 		val |= BNX2_EMAC_TX_MODE_FLOW_EN;
1348 	BNX2_WR(bp, BNX2_EMAC_TX_MODE, val);
1349 
1350 	/* Acknowledge the interrupt. */
1351 	BNX2_WR(bp, BNX2_EMAC_STATUS, BNX2_EMAC_STATUS_LINK_CHANGE);
1352 
1353 	bnx2_init_all_rx_contexts(bp);
1354 }
1355 
1356 static void
1357 bnx2_enable_bmsr1(struct bnx2 *bp)
1358 {
1359 	if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1360 	    (BNX2_CHIP(bp) == BNX2_CHIP_5709))
1361 		bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1362 			       MII_BNX2_BLK_ADDR_GP_STATUS);
1363 }
1364 
1365 static void
1366 bnx2_disable_bmsr1(struct bnx2 *bp)
1367 {
1368 	if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1369 	    (BNX2_CHIP(bp) == BNX2_CHIP_5709))
1370 		bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1371 			       MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1372 }
1373 
1374 static int
1375 bnx2_test_and_enable_2g5(struct bnx2 *bp)
1376 {
1377 	u32 up1;
1378 	int ret = 1;
1379 
1380 	if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1381 		return 0;
1382 
1383 	if (bp->autoneg & AUTONEG_SPEED)
1384 		bp->advertising |= ADVERTISED_2500baseX_Full;
1385 
1386 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
1387 		bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1388 
1389 	bnx2_read_phy(bp, bp->mii_up1, &up1);
1390 	if (!(up1 & BCM5708S_UP1_2G5)) {
1391 		up1 |= BCM5708S_UP1_2G5;
1392 		bnx2_write_phy(bp, bp->mii_up1, up1);
1393 		ret = 0;
1394 	}
1395 
1396 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
1397 		bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1398 			       MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1399 
1400 	return ret;
1401 }
1402 
1403 static int
1404 bnx2_test_and_disable_2g5(struct bnx2 *bp)
1405 {
1406 	u32 up1;
1407 	int ret = 0;
1408 
1409 	if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1410 		return 0;
1411 
1412 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
1413 		bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1414 
1415 	bnx2_read_phy(bp, bp->mii_up1, &up1);
1416 	if (up1 & BCM5708S_UP1_2G5) {
1417 		up1 &= ~BCM5708S_UP1_2G5;
1418 		bnx2_write_phy(bp, bp->mii_up1, up1);
1419 		ret = 1;
1420 	}
1421 
1422 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
1423 		bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1424 			       MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1425 
1426 	return ret;
1427 }
1428 
1429 static void
1430 bnx2_enable_forced_2g5(struct bnx2 *bp)
1431 {
1432 	u32 uninitialized_var(bmcr);
1433 	int err;
1434 
1435 	if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1436 		return;
1437 
1438 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
1439 		u32 val;
1440 
1441 		bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1442 			       MII_BNX2_BLK_ADDR_SERDES_DIG);
1443 		if (!bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val)) {
1444 			val &= ~MII_BNX2_SD_MISC1_FORCE_MSK;
1445 			val |= MII_BNX2_SD_MISC1_FORCE |
1446 				MII_BNX2_SD_MISC1_FORCE_2_5G;
1447 			bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1448 		}
1449 
1450 		bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1451 			       MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1452 		err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1453 
1454 	} else if (BNX2_CHIP(bp) == BNX2_CHIP_5708) {
1455 		err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1456 		if (!err)
1457 			bmcr |= BCM5708S_BMCR_FORCE_2500;
1458 	} else {
1459 		return;
1460 	}
1461 
1462 	if (err)
1463 		return;
1464 
1465 	if (bp->autoneg & AUTONEG_SPEED) {
1466 		bmcr &= ~BMCR_ANENABLE;
1467 		if (bp->req_duplex == DUPLEX_FULL)
1468 			bmcr |= BMCR_FULLDPLX;
1469 	}
1470 	bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1471 }
1472 
1473 static void
1474 bnx2_disable_forced_2g5(struct bnx2 *bp)
1475 {
1476 	u32 uninitialized_var(bmcr);
1477 	int err;
1478 
1479 	if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1480 		return;
1481 
1482 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
1483 		u32 val;
1484 
1485 		bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1486 			       MII_BNX2_BLK_ADDR_SERDES_DIG);
1487 		if (!bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val)) {
1488 			val &= ~MII_BNX2_SD_MISC1_FORCE;
1489 			bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1490 		}
1491 
1492 		bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1493 			       MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1494 		err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1495 
1496 	} else if (BNX2_CHIP(bp) == BNX2_CHIP_5708) {
1497 		err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1498 		if (!err)
1499 			bmcr &= ~BCM5708S_BMCR_FORCE_2500;
1500 	} else {
1501 		return;
1502 	}
1503 
1504 	if (err)
1505 		return;
1506 
1507 	if (bp->autoneg & AUTONEG_SPEED)
1508 		bmcr |= BMCR_SPEED1000 | BMCR_ANENABLE | BMCR_ANRESTART;
1509 	bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1510 }
1511 
1512 static void
1513 bnx2_5706s_force_link_dn(struct bnx2 *bp, int start)
1514 {
1515 	u32 val;
1516 
1517 	bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS, MII_EXPAND_SERDES_CTL);
1518 	bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
1519 	if (start)
1520 		bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val & 0xff0f);
1521 	else
1522 		bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val | 0xc0);
1523 }
1524 
1525 static int
1526 bnx2_set_link(struct bnx2 *bp)
1527 {
1528 	u32 bmsr;
1529 	u8 link_up;
1530 
1531 	if (bp->loopback == MAC_LOOPBACK || bp->loopback == PHY_LOOPBACK) {
1532 		bp->link_up = 1;
1533 		return 0;
1534 	}
1535 
1536 	if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
1537 		return 0;
1538 
1539 	link_up = bp->link_up;
1540 
1541 	bnx2_enable_bmsr1(bp);
1542 	bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1543 	bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1544 	bnx2_disable_bmsr1(bp);
1545 
1546 	if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1547 	    (BNX2_CHIP(bp) == BNX2_CHIP_5706)) {
1548 		u32 val, an_dbg;
1549 
1550 		if (bp->phy_flags & BNX2_PHY_FLAG_FORCED_DOWN) {
1551 			bnx2_5706s_force_link_dn(bp, 0);
1552 			bp->phy_flags &= ~BNX2_PHY_FLAG_FORCED_DOWN;
1553 		}
1554 		val = BNX2_RD(bp, BNX2_EMAC_STATUS);
1555 
1556 		bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
1557 		bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
1558 		bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
1559 
1560 		if ((val & BNX2_EMAC_STATUS_LINK) &&
1561 		    !(an_dbg & MISC_SHDW_AN_DBG_NOSYNC))
1562 			bmsr |= BMSR_LSTATUS;
1563 		else
1564 			bmsr &= ~BMSR_LSTATUS;
1565 	}
1566 
1567 	if (bmsr & BMSR_LSTATUS) {
1568 		bp->link_up = 1;
1569 
1570 		if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1571 			if (BNX2_CHIP(bp) == BNX2_CHIP_5706)
1572 				bnx2_5706s_linkup(bp);
1573 			else if (BNX2_CHIP(bp) == BNX2_CHIP_5708)
1574 				bnx2_5708s_linkup(bp);
1575 			else if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
1576 				bnx2_5709s_linkup(bp);
1577 		}
1578 		else {
1579 			bnx2_copper_linkup(bp);
1580 		}
1581 		bnx2_resolve_flow_ctrl(bp);
1582 	}
1583 	else {
1584 		if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1585 		    (bp->autoneg & AUTONEG_SPEED))
1586 			bnx2_disable_forced_2g5(bp);
1587 
1588 		if (bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT) {
1589 			u32 bmcr;
1590 
1591 			bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1592 			bmcr |= BMCR_ANENABLE;
1593 			bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1594 
1595 			bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
1596 		}
1597 		bp->link_up = 0;
1598 	}
1599 
1600 	if (bp->link_up != link_up) {
1601 		bnx2_report_link(bp);
1602 	}
1603 
1604 	bnx2_set_mac_link(bp);
1605 
1606 	return 0;
1607 }
1608 
1609 static int
1610 bnx2_reset_phy(struct bnx2 *bp)
1611 {
1612 	int i;
1613 	u32 reg;
1614 
1615         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_RESET);
1616 
1617 #define PHY_RESET_MAX_WAIT 100
1618 	for (i = 0; i < PHY_RESET_MAX_WAIT; i++) {
1619 		udelay(10);
1620 
1621 		bnx2_read_phy(bp, bp->mii_bmcr, &reg);
1622 		if (!(reg & BMCR_RESET)) {
1623 			udelay(20);
1624 			break;
1625 		}
1626 	}
1627 	if (i == PHY_RESET_MAX_WAIT) {
1628 		return -EBUSY;
1629 	}
1630 	return 0;
1631 }
1632 
1633 static u32
1634 bnx2_phy_get_pause_adv(struct bnx2 *bp)
1635 {
1636 	u32 adv = 0;
1637 
1638 	if ((bp->req_flow_ctrl & (FLOW_CTRL_RX | FLOW_CTRL_TX)) ==
1639 		(FLOW_CTRL_RX | FLOW_CTRL_TX)) {
1640 
1641 		if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1642 			adv = ADVERTISE_1000XPAUSE;
1643 		}
1644 		else {
1645 			adv = ADVERTISE_PAUSE_CAP;
1646 		}
1647 	}
1648 	else if (bp->req_flow_ctrl & FLOW_CTRL_TX) {
1649 		if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1650 			adv = ADVERTISE_1000XPSE_ASYM;
1651 		}
1652 		else {
1653 			adv = ADVERTISE_PAUSE_ASYM;
1654 		}
1655 	}
1656 	else if (bp->req_flow_ctrl & FLOW_CTRL_RX) {
1657 		if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1658 			adv = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1659 		}
1660 		else {
1661 			adv = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1662 		}
1663 	}
1664 	return adv;
1665 }
1666 
1667 static int bnx2_fw_sync(struct bnx2 *, u32, int, int);
1668 
1669 static int
1670 bnx2_setup_remote_phy(struct bnx2 *bp, u8 port)
1671 __releases(&bp->phy_lock)
1672 __acquires(&bp->phy_lock)
1673 {
1674 	u32 speed_arg = 0, pause_adv;
1675 
1676 	pause_adv = bnx2_phy_get_pause_adv(bp);
1677 
1678 	if (bp->autoneg & AUTONEG_SPEED) {
1679 		speed_arg |= BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG;
1680 		if (bp->advertising & ADVERTISED_10baseT_Half)
1681 			speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1682 		if (bp->advertising & ADVERTISED_10baseT_Full)
1683 			speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1684 		if (bp->advertising & ADVERTISED_100baseT_Half)
1685 			speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1686 		if (bp->advertising & ADVERTISED_100baseT_Full)
1687 			speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1688 		if (bp->advertising & ADVERTISED_1000baseT_Full)
1689 			speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1690 		if (bp->advertising & ADVERTISED_2500baseX_Full)
1691 			speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1692 	} else {
1693 		if (bp->req_line_speed == SPEED_2500)
1694 			speed_arg = BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1695 		else if (bp->req_line_speed == SPEED_1000)
1696 			speed_arg = BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1697 		else if (bp->req_line_speed == SPEED_100) {
1698 			if (bp->req_duplex == DUPLEX_FULL)
1699 				speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1700 			else
1701 				speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1702 		} else if (bp->req_line_speed == SPEED_10) {
1703 			if (bp->req_duplex == DUPLEX_FULL)
1704 				speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1705 			else
1706 				speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1707 		}
1708 	}
1709 
1710 	if (pause_adv & (ADVERTISE_1000XPAUSE | ADVERTISE_PAUSE_CAP))
1711 		speed_arg |= BNX2_NETLINK_SET_LINK_FC_SYM_PAUSE;
1712 	if (pause_adv & (ADVERTISE_1000XPSE_ASYM | ADVERTISE_PAUSE_ASYM))
1713 		speed_arg |= BNX2_NETLINK_SET_LINK_FC_ASYM_PAUSE;
1714 
1715 	if (port == PORT_TP)
1716 		speed_arg |= BNX2_NETLINK_SET_LINK_PHY_APP_REMOTE |
1717 			     BNX2_NETLINK_SET_LINK_ETH_AT_WIRESPEED;
1718 
1719 	bnx2_shmem_wr(bp, BNX2_DRV_MB_ARG0, speed_arg);
1720 
1721 	spin_unlock_bh(&bp->phy_lock);
1722 	bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_CMD_SET_LINK, 1, 0);
1723 	spin_lock_bh(&bp->phy_lock);
1724 
1725 	return 0;
1726 }
1727 
1728 static int
1729 bnx2_setup_serdes_phy(struct bnx2 *bp, u8 port)
1730 __releases(&bp->phy_lock)
1731 __acquires(&bp->phy_lock)
1732 {
1733 	u32 adv, bmcr;
1734 	u32 new_adv = 0;
1735 
1736 	if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
1737 		return bnx2_setup_remote_phy(bp, port);
1738 
1739 	if (!(bp->autoneg & AUTONEG_SPEED)) {
1740 		u32 new_bmcr;
1741 		int force_link_down = 0;
1742 
1743 		if (bp->req_line_speed == SPEED_2500) {
1744 			if (!bnx2_test_and_enable_2g5(bp))
1745 				force_link_down = 1;
1746 		} else if (bp->req_line_speed == SPEED_1000) {
1747 			if (bnx2_test_and_disable_2g5(bp))
1748 				force_link_down = 1;
1749 		}
1750 		bnx2_read_phy(bp, bp->mii_adv, &adv);
1751 		adv &= ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF);
1752 
1753 		bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1754 		new_bmcr = bmcr & ~BMCR_ANENABLE;
1755 		new_bmcr |= BMCR_SPEED1000;
1756 
1757 		if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
1758 			if (bp->req_line_speed == SPEED_2500)
1759 				bnx2_enable_forced_2g5(bp);
1760 			else if (bp->req_line_speed == SPEED_1000) {
1761 				bnx2_disable_forced_2g5(bp);
1762 				new_bmcr &= ~0x2000;
1763 			}
1764 
1765 		} else if (BNX2_CHIP(bp) == BNX2_CHIP_5708) {
1766 			if (bp->req_line_speed == SPEED_2500)
1767 				new_bmcr |= BCM5708S_BMCR_FORCE_2500;
1768 			else
1769 				new_bmcr = bmcr & ~BCM5708S_BMCR_FORCE_2500;
1770 		}
1771 
1772 		if (bp->req_duplex == DUPLEX_FULL) {
1773 			adv |= ADVERTISE_1000XFULL;
1774 			new_bmcr |= BMCR_FULLDPLX;
1775 		}
1776 		else {
1777 			adv |= ADVERTISE_1000XHALF;
1778 			new_bmcr &= ~BMCR_FULLDPLX;
1779 		}
1780 		if ((new_bmcr != bmcr) || (force_link_down)) {
1781 			/* Force a link down visible on the other side */
1782 			if (bp->link_up) {
1783 				bnx2_write_phy(bp, bp->mii_adv, adv &
1784 					       ~(ADVERTISE_1000XFULL |
1785 						 ADVERTISE_1000XHALF));
1786 				bnx2_write_phy(bp, bp->mii_bmcr, bmcr |
1787 					BMCR_ANRESTART | BMCR_ANENABLE);
1788 
1789 				bp->link_up = 0;
1790 				netif_carrier_off(bp->dev);
1791 				bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1792 				bnx2_report_link(bp);
1793 			}
1794 			bnx2_write_phy(bp, bp->mii_adv, adv);
1795 			bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1796 		} else {
1797 			bnx2_resolve_flow_ctrl(bp);
1798 			bnx2_set_mac_link(bp);
1799 		}
1800 		return 0;
1801 	}
1802 
1803 	bnx2_test_and_enable_2g5(bp);
1804 
1805 	if (bp->advertising & ADVERTISED_1000baseT_Full)
1806 		new_adv |= ADVERTISE_1000XFULL;
1807 
1808 	new_adv |= bnx2_phy_get_pause_adv(bp);
1809 
1810 	bnx2_read_phy(bp, bp->mii_adv, &adv);
1811 	bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1812 
1813 	bp->serdes_an_pending = 0;
1814 	if ((adv != new_adv) || ((bmcr & BMCR_ANENABLE) == 0)) {
1815 		/* Force a link down visible on the other side */
1816 		if (bp->link_up) {
1817 			bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
1818 			spin_unlock_bh(&bp->phy_lock);
1819 			msleep(20);
1820 			spin_lock_bh(&bp->phy_lock);
1821 		}
1822 
1823 		bnx2_write_phy(bp, bp->mii_adv, new_adv);
1824 		bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART |
1825 			BMCR_ANENABLE);
1826 		/* Speed up link-up time when the link partner
1827 		 * does not autonegotiate which is very common
1828 		 * in blade servers. Some blade servers use
1829 		 * IPMI for kerboard input and it's important
1830 		 * to minimize link disruptions. Autoneg. involves
1831 		 * exchanging base pages plus 3 next pages and
1832 		 * normally completes in about 120 msec.
1833 		 */
1834 		bp->current_interval = BNX2_SERDES_AN_TIMEOUT;
1835 		bp->serdes_an_pending = 1;
1836 		mod_timer(&bp->timer, jiffies + bp->current_interval);
1837 	} else {
1838 		bnx2_resolve_flow_ctrl(bp);
1839 		bnx2_set_mac_link(bp);
1840 	}
1841 
1842 	return 0;
1843 }
1844 
1845 #define ETHTOOL_ALL_FIBRE_SPEED						\
1846 	(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) ?			\
1847 		(ADVERTISED_2500baseX_Full | ADVERTISED_1000baseT_Full) :\
1848 		(ADVERTISED_1000baseT_Full)
1849 
1850 #define ETHTOOL_ALL_COPPER_SPEED					\
1851 	(ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |		\
1852 	ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |		\
1853 	ADVERTISED_1000baseT_Full)
1854 
1855 #define PHY_ALL_10_100_SPEED (ADVERTISE_10HALF | ADVERTISE_10FULL | \
1856 	ADVERTISE_100HALF | ADVERTISE_100FULL | ADVERTISE_CSMA)
1857 
1858 #define PHY_ALL_1000_SPEED (ADVERTISE_1000HALF | ADVERTISE_1000FULL)
1859 
1860 static void
1861 bnx2_set_default_remote_link(struct bnx2 *bp)
1862 {
1863 	u32 link;
1864 
1865 	if (bp->phy_port == PORT_TP)
1866 		link = bnx2_shmem_rd(bp, BNX2_RPHY_COPPER_LINK);
1867 	else
1868 		link = bnx2_shmem_rd(bp, BNX2_RPHY_SERDES_LINK);
1869 
1870 	if (link & BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG) {
1871 		bp->req_line_speed = 0;
1872 		bp->autoneg |= AUTONEG_SPEED;
1873 		bp->advertising = ADVERTISED_Autoneg;
1874 		if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1875 			bp->advertising |= ADVERTISED_10baseT_Half;
1876 		if (link & BNX2_NETLINK_SET_LINK_SPEED_10FULL)
1877 			bp->advertising |= ADVERTISED_10baseT_Full;
1878 		if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1879 			bp->advertising |= ADVERTISED_100baseT_Half;
1880 		if (link & BNX2_NETLINK_SET_LINK_SPEED_100FULL)
1881 			bp->advertising |= ADVERTISED_100baseT_Full;
1882 		if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1883 			bp->advertising |= ADVERTISED_1000baseT_Full;
1884 		if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1885 			bp->advertising |= ADVERTISED_2500baseX_Full;
1886 	} else {
1887 		bp->autoneg = 0;
1888 		bp->advertising = 0;
1889 		bp->req_duplex = DUPLEX_FULL;
1890 		if (link & BNX2_NETLINK_SET_LINK_SPEED_10) {
1891 			bp->req_line_speed = SPEED_10;
1892 			if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1893 				bp->req_duplex = DUPLEX_HALF;
1894 		}
1895 		if (link & BNX2_NETLINK_SET_LINK_SPEED_100) {
1896 			bp->req_line_speed = SPEED_100;
1897 			if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1898 				bp->req_duplex = DUPLEX_HALF;
1899 		}
1900 		if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1901 			bp->req_line_speed = SPEED_1000;
1902 		if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1903 			bp->req_line_speed = SPEED_2500;
1904 	}
1905 }
1906 
1907 static void
1908 bnx2_set_default_link(struct bnx2 *bp)
1909 {
1910 	if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
1911 		bnx2_set_default_remote_link(bp);
1912 		return;
1913 	}
1914 
1915 	bp->autoneg = AUTONEG_SPEED | AUTONEG_FLOW_CTRL;
1916 	bp->req_line_speed = 0;
1917 	if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1918 		u32 reg;
1919 
1920 		bp->advertising = ETHTOOL_ALL_FIBRE_SPEED | ADVERTISED_Autoneg;
1921 
1922 		reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_CONFIG);
1923 		reg &= BNX2_PORT_HW_CFG_CFG_DFLT_LINK_MASK;
1924 		if (reg == BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G) {
1925 			bp->autoneg = 0;
1926 			bp->req_line_speed = bp->line_speed = SPEED_1000;
1927 			bp->req_duplex = DUPLEX_FULL;
1928 		}
1929 	} else
1930 		bp->advertising = ETHTOOL_ALL_COPPER_SPEED | ADVERTISED_Autoneg;
1931 }
1932 
1933 static void
1934 bnx2_send_heart_beat(struct bnx2 *bp)
1935 {
1936 	u32 msg;
1937 	u32 addr;
1938 
1939 	spin_lock(&bp->indirect_lock);
1940 	msg = (u32) (++bp->fw_drv_pulse_wr_seq & BNX2_DRV_PULSE_SEQ_MASK);
1941 	addr = bp->shmem_base + BNX2_DRV_PULSE_MB;
1942 	BNX2_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, addr);
1943 	BNX2_WR(bp, BNX2_PCICFG_REG_WINDOW, msg);
1944 	spin_unlock(&bp->indirect_lock);
1945 }
1946 
1947 static void
1948 bnx2_remote_phy_event(struct bnx2 *bp)
1949 {
1950 	u32 msg;
1951 	u8 link_up = bp->link_up;
1952 	u8 old_port;
1953 
1954 	msg = bnx2_shmem_rd(bp, BNX2_LINK_STATUS);
1955 
1956 	if (msg & BNX2_LINK_STATUS_HEART_BEAT_EXPIRED)
1957 		bnx2_send_heart_beat(bp);
1958 
1959 	msg &= ~BNX2_LINK_STATUS_HEART_BEAT_EXPIRED;
1960 
1961 	if ((msg & BNX2_LINK_STATUS_LINK_UP) == BNX2_LINK_STATUS_LINK_DOWN)
1962 		bp->link_up = 0;
1963 	else {
1964 		u32 speed;
1965 
1966 		bp->link_up = 1;
1967 		speed = msg & BNX2_LINK_STATUS_SPEED_MASK;
1968 		bp->duplex = DUPLEX_FULL;
1969 		switch (speed) {
1970 			case BNX2_LINK_STATUS_10HALF:
1971 				bp->duplex = DUPLEX_HALF;
1972 				/* fall through */
1973 			case BNX2_LINK_STATUS_10FULL:
1974 				bp->line_speed = SPEED_10;
1975 				break;
1976 			case BNX2_LINK_STATUS_100HALF:
1977 				bp->duplex = DUPLEX_HALF;
1978 				/* fall through */
1979 			case BNX2_LINK_STATUS_100BASE_T4:
1980 			case BNX2_LINK_STATUS_100FULL:
1981 				bp->line_speed = SPEED_100;
1982 				break;
1983 			case BNX2_LINK_STATUS_1000HALF:
1984 				bp->duplex = DUPLEX_HALF;
1985 				/* fall through */
1986 			case BNX2_LINK_STATUS_1000FULL:
1987 				bp->line_speed = SPEED_1000;
1988 				break;
1989 			case BNX2_LINK_STATUS_2500HALF:
1990 				bp->duplex = DUPLEX_HALF;
1991 				/* fall through */
1992 			case BNX2_LINK_STATUS_2500FULL:
1993 				bp->line_speed = SPEED_2500;
1994 				break;
1995 			default:
1996 				bp->line_speed = 0;
1997 				break;
1998 		}
1999 
2000 		bp->flow_ctrl = 0;
2001 		if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
2002 		    (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
2003 			if (bp->duplex == DUPLEX_FULL)
2004 				bp->flow_ctrl = bp->req_flow_ctrl;
2005 		} else {
2006 			if (msg & BNX2_LINK_STATUS_TX_FC_ENABLED)
2007 				bp->flow_ctrl |= FLOW_CTRL_TX;
2008 			if (msg & BNX2_LINK_STATUS_RX_FC_ENABLED)
2009 				bp->flow_ctrl |= FLOW_CTRL_RX;
2010 		}
2011 
2012 		old_port = bp->phy_port;
2013 		if (msg & BNX2_LINK_STATUS_SERDES_LINK)
2014 			bp->phy_port = PORT_FIBRE;
2015 		else
2016 			bp->phy_port = PORT_TP;
2017 
2018 		if (old_port != bp->phy_port)
2019 			bnx2_set_default_link(bp);
2020 
2021 	}
2022 	if (bp->link_up != link_up)
2023 		bnx2_report_link(bp);
2024 
2025 	bnx2_set_mac_link(bp);
2026 }
2027 
2028 static int
2029 bnx2_set_remote_link(struct bnx2 *bp)
2030 {
2031 	u32 evt_code;
2032 
2033 	evt_code = bnx2_shmem_rd(bp, BNX2_FW_EVT_CODE_MB);
2034 	switch (evt_code) {
2035 		case BNX2_FW_EVT_CODE_LINK_EVENT:
2036 			bnx2_remote_phy_event(bp);
2037 			break;
2038 		case BNX2_FW_EVT_CODE_SW_TIMER_EXPIRATION_EVENT:
2039 		default:
2040 			bnx2_send_heart_beat(bp);
2041 			break;
2042 	}
2043 	return 0;
2044 }
2045 
2046 static int
2047 bnx2_setup_copper_phy(struct bnx2 *bp)
2048 __releases(&bp->phy_lock)
2049 __acquires(&bp->phy_lock)
2050 {
2051 	u32 bmcr;
2052 	u32 new_bmcr;
2053 
2054 	bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
2055 
2056 	if (bp->autoneg & AUTONEG_SPEED) {
2057 		u32 adv_reg, adv1000_reg;
2058 		u32 new_adv = 0;
2059 		u32 new_adv1000 = 0;
2060 
2061 		bnx2_read_phy(bp, bp->mii_adv, &adv_reg);
2062 		adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP |
2063 			ADVERTISE_PAUSE_ASYM);
2064 
2065 		bnx2_read_phy(bp, MII_CTRL1000, &adv1000_reg);
2066 		adv1000_reg &= PHY_ALL_1000_SPEED;
2067 
2068 		new_adv = ethtool_adv_to_mii_adv_t(bp->advertising);
2069 		new_adv |= ADVERTISE_CSMA;
2070 		new_adv |= bnx2_phy_get_pause_adv(bp);
2071 
2072 		new_adv1000 |= ethtool_adv_to_mii_ctrl1000_t(bp->advertising);
2073 
2074 		if ((adv1000_reg != new_adv1000) ||
2075 			(adv_reg != new_adv) ||
2076 			((bmcr & BMCR_ANENABLE) == 0)) {
2077 
2078 			bnx2_write_phy(bp, bp->mii_adv, new_adv);
2079 			bnx2_write_phy(bp, MII_CTRL1000, new_adv1000);
2080 			bnx2_write_phy(bp, bp->mii_bmcr, BMCR_ANRESTART |
2081 				BMCR_ANENABLE);
2082 		}
2083 		else if (bp->link_up) {
2084 			/* Flow ctrl may have changed from auto to forced */
2085 			/* or vice-versa. */
2086 
2087 			bnx2_resolve_flow_ctrl(bp);
2088 			bnx2_set_mac_link(bp);
2089 		}
2090 		return 0;
2091 	}
2092 
2093 	new_bmcr = 0;
2094 	if (bp->req_line_speed == SPEED_100) {
2095 		new_bmcr |= BMCR_SPEED100;
2096 	}
2097 	if (bp->req_duplex == DUPLEX_FULL) {
2098 		new_bmcr |= BMCR_FULLDPLX;
2099 	}
2100 	if (new_bmcr != bmcr) {
2101 		u32 bmsr;
2102 
2103 		bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2104 		bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2105 
2106 		if (bmsr & BMSR_LSTATUS) {
2107 			/* Force link down */
2108 			bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
2109 			spin_unlock_bh(&bp->phy_lock);
2110 			msleep(50);
2111 			spin_lock_bh(&bp->phy_lock);
2112 
2113 			bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2114 			bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2115 		}
2116 
2117 		bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
2118 
2119 		/* Normally, the new speed is setup after the link has
2120 		 * gone down and up again. In some cases, link will not go
2121 		 * down so we need to set up the new speed here.
2122 		 */
2123 		if (bmsr & BMSR_LSTATUS) {
2124 			bp->line_speed = bp->req_line_speed;
2125 			bp->duplex = bp->req_duplex;
2126 			bnx2_resolve_flow_ctrl(bp);
2127 			bnx2_set_mac_link(bp);
2128 		}
2129 	} else {
2130 		bnx2_resolve_flow_ctrl(bp);
2131 		bnx2_set_mac_link(bp);
2132 	}
2133 	return 0;
2134 }
2135 
2136 static int
2137 bnx2_setup_phy(struct bnx2 *bp, u8 port)
2138 __releases(&bp->phy_lock)
2139 __acquires(&bp->phy_lock)
2140 {
2141 	if (bp->loopback == MAC_LOOPBACK)
2142 		return 0;
2143 
2144 	if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
2145 		return bnx2_setup_serdes_phy(bp, port);
2146 	}
2147 	else {
2148 		return bnx2_setup_copper_phy(bp);
2149 	}
2150 }
2151 
2152 static int
2153 bnx2_init_5709s_phy(struct bnx2 *bp, int reset_phy)
2154 {
2155 	u32 val;
2156 
2157 	bp->mii_bmcr = MII_BMCR + 0x10;
2158 	bp->mii_bmsr = MII_BMSR + 0x10;
2159 	bp->mii_bmsr1 = MII_BNX2_GP_TOP_AN_STATUS1;
2160 	bp->mii_adv = MII_ADVERTISE + 0x10;
2161 	bp->mii_lpa = MII_LPA + 0x10;
2162 	bp->mii_up1 = MII_BNX2_OVER1G_UP1;
2163 
2164 	bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_AER);
2165 	bnx2_write_phy(bp, MII_BNX2_AER_AER, MII_BNX2_AER_AER_AN_MMD);
2166 
2167 	bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
2168 	if (reset_phy)
2169 		bnx2_reset_phy(bp);
2170 
2171 	bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_SERDES_DIG);
2172 
2173 	bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, &val);
2174 	val &= ~MII_BNX2_SD_1000XCTL1_AUTODET;
2175 	val |= MII_BNX2_SD_1000XCTL1_FIBER;
2176 	bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, val);
2177 
2178 	bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
2179 	bnx2_read_phy(bp, MII_BNX2_OVER1G_UP1, &val);
2180 	if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)
2181 		val |= BCM5708S_UP1_2G5;
2182 	else
2183 		val &= ~BCM5708S_UP1_2G5;
2184 	bnx2_write_phy(bp, MII_BNX2_OVER1G_UP1, val);
2185 
2186 	bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_BAM_NXTPG);
2187 	bnx2_read_phy(bp, MII_BNX2_BAM_NXTPG_CTL, &val);
2188 	val |= MII_BNX2_NXTPG_CTL_T2 | MII_BNX2_NXTPG_CTL_BAM;
2189 	bnx2_write_phy(bp, MII_BNX2_BAM_NXTPG_CTL, val);
2190 
2191 	bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_CL73_USERB0);
2192 
2193 	val = MII_BNX2_CL73_BAM_EN | MII_BNX2_CL73_BAM_STA_MGR_EN |
2194 	      MII_BNX2_CL73_BAM_NP_AFT_BP_EN;
2195 	bnx2_write_phy(bp, MII_BNX2_CL73_BAM_CTL1, val);
2196 
2197 	bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
2198 
2199 	return 0;
2200 }
2201 
2202 static int
2203 bnx2_init_5708s_phy(struct bnx2 *bp, int reset_phy)
2204 {
2205 	u32 val;
2206 
2207 	if (reset_phy)
2208 		bnx2_reset_phy(bp);
2209 
2210 	bp->mii_up1 = BCM5708S_UP1;
2211 
2212 	bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG3);
2213 	bnx2_write_phy(bp, BCM5708S_DIG_3_0, BCM5708S_DIG_3_0_USE_IEEE);
2214 	bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
2215 
2216 	bnx2_read_phy(bp, BCM5708S_1000X_CTL1, &val);
2217 	val |= BCM5708S_1000X_CTL1_FIBER_MODE | BCM5708S_1000X_CTL1_AUTODET_EN;
2218 	bnx2_write_phy(bp, BCM5708S_1000X_CTL1, val);
2219 
2220 	bnx2_read_phy(bp, BCM5708S_1000X_CTL2, &val);
2221 	val |= BCM5708S_1000X_CTL2_PLLEL_DET_EN;
2222 	bnx2_write_phy(bp, BCM5708S_1000X_CTL2, val);
2223 
2224 	if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) {
2225 		bnx2_read_phy(bp, BCM5708S_UP1, &val);
2226 		val |= BCM5708S_UP1_2G5;
2227 		bnx2_write_phy(bp, BCM5708S_UP1, val);
2228 	}
2229 
2230 	if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_A0) ||
2231 	    (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_B0) ||
2232 	    (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_B1)) {
2233 		/* increase tx signal amplitude */
2234 		bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2235 			       BCM5708S_BLK_ADDR_TX_MISC);
2236 		bnx2_read_phy(bp, BCM5708S_TX_ACTL1, &val);
2237 		val &= ~BCM5708S_TX_ACTL1_DRIVER_VCM;
2238 		bnx2_write_phy(bp, BCM5708S_TX_ACTL1, val);
2239 		bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
2240 	}
2241 
2242 	val = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_CONFIG) &
2243 	      BNX2_PORT_HW_CFG_CFG_TXCTL3_MASK;
2244 
2245 	if (val) {
2246 		u32 is_backplane;
2247 
2248 		is_backplane = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG);
2249 		if (is_backplane & BNX2_SHARED_HW_CFG_PHY_BACKPLANE) {
2250 			bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2251 				       BCM5708S_BLK_ADDR_TX_MISC);
2252 			bnx2_write_phy(bp, BCM5708S_TX_ACTL3, val);
2253 			bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2254 				       BCM5708S_BLK_ADDR_DIG);
2255 		}
2256 	}
2257 	return 0;
2258 }
2259 
2260 static int
2261 bnx2_init_5706s_phy(struct bnx2 *bp, int reset_phy)
2262 {
2263 	if (reset_phy)
2264 		bnx2_reset_phy(bp);
2265 
2266 	bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
2267 
2268 	if (BNX2_CHIP(bp) == BNX2_CHIP_5706)
2269 		BNX2_WR(bp, BNX2_MISC_GP_HW_CTL0, 0x300);
2270 
2271 	if (bp->dev->mtu > 1500) {
2272 		u32 val;
2273 
2274 		/* Set extended packet length bit */
2275 		bnx2_write_phy(bp, 0x18, 0x7);
2276 		bnx2_read_phy(bp, 0x18, &val);
2277 		bnx2_write_phy(bp, 0x18, (val & 0xfff8) | 0x4000);
2278 
2279 		bnx2_write_phy(bp, 0x1c, 0x6c00);
2280 		bnx2_read_phy(bp, 0x1c, &val);
2281 		bnx2_write_phy(bp, 0x1c, (val & 0x3ff) | 0xec02);
2282 	}
2283 	else {
2284 		u32 val;
2285 
2286 		bnx2_write_phy(bp, 0x18, 0x7);
2287 		bnx2_read_phy(bp, 0x18, &val);
2288 		bnx2_write_phy(bp, 0x18, val & ~0x4007);
2289 
2290 		bnx2_write_phy(bp, 0x1c, 0x6c00);
2291 		bnx2_read_phy(bp, 0x1c, &val);
2292 		bnx2_write_phy(bp, 0x1c, (val & 0x3fd) | 0xec00);
2293 	}
2294 
2295 	return 0;
2296 }
2297 
2298 static int
2299 bnx2_init_copper_phy(struct bnx2 *bp, int reset_phy)
2300 {
2301 	u32 val;
2302 
2303 	if (reset_phy)
2304 		bnx2_reset_phy(bp);
2305 
2306 	if (bp->phy_flags & BNX2_PHY_FLAG_CRC_FIX) {
2307 		bnx2_write_phy(bp, 0x18, 0x0c00);
2308 		bnx2_write_phy(bp, 0x17, 0x000a);
2309 		bnx2_write_phy(bp, 0x15, 0x310b);
2310 		bnx2_write_phy(bp, 0x17, 0x201f);
2311 		bnx2_write_phy(bp, 0x15, 0x9506);
2312 		bnx2_write_phy(bp, 0x17, 0x401f);
2313 		bnx2_write_phy(bp, 0x15, 0x14e2);
2314 		bnx2_write_phy(bp, 0x18, 0x0400);
2315 	}
2316 
2317 	if (bp->phy_flags & BNX2_PHY_FLAG_DIS_EARLY_DAC) {
2318 		bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS,
2319 			       MII_BNX2_DSP_EXPAND_REG | 0x8);
2320 		bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
2321 		val &= ~(1 << 8);
2322 		bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val);
2323 	}
2324 
2325 	if (bp->dev->mtu > 1500) {
2326 		/* Set extended packet length bit */
2327 		bnx2_write_phy(bp, 0x18, 0x7);
2328 		bnx2_read_phy(bp, 0x18, &val);
2329 		bnx2_write_phy(bp, 0x18, val | 0x4000);
2330 
2331 		bnx2_read_phy(bp, 0x10, &val);
2332 		bnx2_write_phy(bp, 0x10, val | 0x1);
2333 	}
2334 	else {
2335 		bnx2_write_phy(bp, 0x18, 0x7);
2336 		bnx2_read_phy(bp, 0x18, &val);
2337 		bnx2_write_phy(bp, 0x18, val & ~0x4007);
2338 
2339 		bnx2_read_phy(bp, 0x10, &val);
2340 		bnx2_write_phy(bp, 0x10, val & ~0x1);
2341 	}
2342 
2343 	/* ethernet@wirespeed */
2344 	bnx2_write_phy(bp, 0x18, 0x7007);
2345 	bnx2_read_phy(bp, 0x18, &val);
2346 	bnx2_write_phy(bp, 0x18, val | (1 << 15) | (1 << 4));
2347 	return 0;
2348 }
2349 
2350 
2351 static int
2352 bnx2_init_phy(struct bnx2 *bp, int reset_phy)
2353 __releases(&bp->phy_lock)
2354 __acquires(&bp->phy_lock)
2355 {
2356 	u32 val;
2357 	int rc = 0;
2358 
2359 	bp->phy_flags &= ~BNX2_PHY_FLAG_INT_MODE_MASK;
2360 	bp->phy_flags |= BNX2_PHY_FLAG_INT_MODE_LINK_READY;
2361 
2362 	bp->mii_bmcr = MII_BMCR;
2363 	bp->mii_bmsr = MII_BMSR;
2364 	bp->mii_bmsr1 = MII_BMSR;
2365 	bp->mii_adv = MII_ADVERTISE;
2366 	bp->mii_lpa = MII_LPA;
2367 
2368 	BNX2_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
2369 
2370 	if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
2371 		goto setup_phy;
2372 
2373 	bnx2_read_phy(bp, MII_PHYSID1, &val);
2374 	bp->phy_id = val << 16;
2375 	bnx2_read_phy(bp, MII_PHYSID2, &val);
2376 	bp->phy_id |= val & 0xffff;
2377 
2378 	if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
2379 		if (BNX2_CHIP(bp) == BNX2_CHIP_5706)
2380 			rc = bnx2_init_5706s_phy(bp, reset_phy);
2381 		else if (BNX2_CHIP(bp) == BNX2_CHIP_5708)
2382 			rc = bnx2_init_5708s_phy(bp, reset_phy);
2383 		else if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
2384 			rc = bnx2_init_5709s_phy(bp, reset_phy);
2385 	}
2386 	else {
2387 		rc = bnx2_init_copper_phy(bp, reset_phy);
2388 	}
2389 
2390 setup_phy:
2391 	if (!rc)
2392 		rc = bnx2_setup_phy(bp, bp->phy_port);
2393 
2394 	return rc;
2395 }
2396 
2397 static int
2398 bnx2_set_mac_loopback(struct bnx2 *bp)
2399 {
2400 	u32 mac_mode;
2401 
2402 	mac_mode = BNX2_RD(bp, BNX2_EMAC_MODE);
2403 	mac_mode &= ~BNX2_EMAC_MODE_PORT;
2404 	mac_mode |= BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK;
2405 	BNX2_WR(bp, BNX2_EMAC_MODE, mac_mode);
2406 	bp->link_up = 1;
2407 	return 0;
2408 }
2409 
2410 static int bnx2_test_link(struct bnx2 *);
2411 
2412 static int
2413 bnx2_set_phy_loopback(struct bnx2 *bp)
2414 {
2415 	u32 mac_mode;
2416 	int rc, i;
2417 
2418 	spin_lock_bh(&bp->phy_lock);
2419 	rc = bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK | BMCR_FULLDPLX |
2420 			    BMCR_SPEED1000);
2421 	spin_unlock_bh(&bp->phy_lock);
2422 	if (rc)
2423 		return rc;
2424 
2425 	for (i = 0; i < 10; i++) {
2426 		if (bnx2_test_link(bp) == 0)
2427 			break;
2428 		msleep(100);
2429 	}
2430 
2431 	mac_mode = BNX2_RD(bp, BNX2_EMAC_MODE);
2432 	mac_mode &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
2433 		      BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
2434 		      BNX2_EMAC_MODE_25G_MODE);
2435 
2436 	mac_mode |= BNX2_EMAC_MODE_PORT_GMII;
2437 	BNX2_WR(bp, BNX2_EMAC_MODE, mac_mode);
2438 	bp->link_up = 1;
2439 	return 0;
2440 }
2441 
2442 static void
2443 bnx2_dump_mcp_state(struct bnx2 *bp)
2444 {
2445 	struct net_device *dev = bp->dev;
2446 	u32 mcp_p0, mcp_p1;
2447 
2448 	netdev_err(dev, "<--- start MCP states dump --->\n");
2449 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
2450 		mcp_p0 = BNX2_MCP_STATE_P0;
2451 		mcp_p1 = BNX2_MCP_STATE_P1;
2452 	} else {
2453 		mcp_p0 = BNX2_MCP_STATE_P0_5708;
2454 		mcp_p1 = BNX2_MCP_STATE_P1_5708;
2455 	}
2456 	netdev_err(dev, "DEBUG: MCP_STATE_P0[%08x] MCP_STATE_P1[%08x]\n",
2457 		   bnx2_reg_rd_ind(bp, mcp_p0), bnx2_reg_rd_ind(bp, mcp_p1));
2458 	netdev_err(dev, "DEBUG: MCP mode[%08x] state[%08x] evt_mask[%08x]\n",
2459 		   bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_MODE),
2460 		   bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_STATE),
2461 		   bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_EVENT_MASK));
2462 	netdev_err(dev, "DEBUG: pc[%08x] pc[%08x] instr[%08x]\n",
2463 		   bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_PROGRAM_COUNTER),
2464 		   bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_PROGRAM_COUNTER),
2465 		   bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_INSTRUCTION));
2466 	netdev_err(dev, "DEBUG: shmem states:\n");
2467 	netdev_err(dev, "DEBUG: drv_mb[%08x] fw_mb[%08x] link_status[%08x]",
2468 		   bnx2_shmem_rd(bp, BNX2_DRV_MB),
2469 		   bnx2_shmem_rd(bp, BNX2_FW_MB),
2470 		   bnx2_shmem_rd(bp, BNX2_LINK_STATUS));
2471 	pr_cont(" drv_pulse_mb[%08x]\n", bnx2_shmem_rd(bp, BNX2_DRV_PULSE_MB));
2472 	netdev_err(dev, "DEBUG: dev_info_signature[%08x] reset_type[%08x]",
2473 		   bnx2_shmem_rd(bp, BNX2_DEV_INFO_SIGNATURE),
2474 		   bnx2_shmem_rd(bp, BNX2_BC_STATE_RESET_TYPE));
2475 	pr_cont(" condition[%08x]\n",
2476 		bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION));
2477 	DP_SHMEM_LINE(bp, BNX2_BC_RESET_TYPE);
2478 	DP_SHMEM_LINE(bp, 0x3cc);
2479 	DP_SHMEM_LINE(bp, 0x3dc);
2480 	DP_SHMEM_LINE(bp, 0x3ec);
2481 	netdev_err(dev, "DEBUG: 0x3fc[%08x]\n", bnx2_shmem_rd(bp, 0x3fc));
2482 	netdev_err(dev, "<--- end MCP states dump --->\n");
2483 }
2484 
2485 static int
2486 bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int ack, int silent)
2487 {
2488 	int i;
2489 	u32 val;
2490 
2491 	bp->fw_wr_seq++;
2492 	msg_data |= bp->fw_wr_seq;
2493 
2494 	bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data);
2495 
2496 	if (!ack)
2497 		return 0;
2498 
2499 	/* wait for an acknowledgement. */
2500 	for (i = 0; i < (BNX2_FW_ACK_TIME_OUT_MS / 10); i++) {
2501 		msleep(10);
2502 
2503 		val = bnx2_shmem_rd(bp, BNX2_FW_MB);
2504 
2505 		if ((val & BNX2_FW_MSG_ACK) == (msg_data & BNX2_DRV_MSG_SEQ))
2506 			break;
2507 	}
2508 	if ((msg_data & BNX2_DRV_MSG_DATA) == BNX2_DRV_MSG_DATA_WAIT0)
2509 		return 0;
2510 
2511 	/* If we timed out, inform the firmware that this is the case. */
2512 	if ((val & BNX2_FW_MSG_ACK) != (msg_data & BNX2_DRV_MSG_SEQ)) {
2513 		msg_data &= ~BNX2_DRV_MSG_CODE;
2514 		msg_data |= BNX2_DRV_MSG_CODE_FW_TIMEOUT;
2515 
2516 		bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data);
2517 		if (!silent) {
2518 			pr_err("fw sync timeout, reset code = %x\n", msg_data);
2519 			bnx2_dump_mcp_state(bp);
2520 		}
2521 
2522 		return -EBUSY;
2523 	}
2524 
2525 	if ((val & BNX2_FW_MSG_STATUS_MASK) != BNX2_FW_MSG_STATUS_OK)
2526 		return -EIO;
2527 
2528 	return 0;
2529 }
2530 
2531 static int
2532 bnx2_init_5709_context(struct bnx2 *bp)
2533 {
2534 	int i, ret = 0;
2535 	u32 val;
2536 
2537 	val = BNX2_CTX_COMMAND_ENABLED | BNX2_CTX_COMMAND_MEM_INIT | (1 << 12);
2538 	val |= (BNX2_PAGE_BITS - 8) << 16;
2539 	BNX2_WR(bp, BNX2_CTX_COMMAND, val);
2540 	for (i = 0; i < 10; i++) {
2541 		val = BNX2_RD(bp, BNX2_CTX_COMMAND);
2542 		if (!(val & BNX2_CTX_COMMAND_MEM_INIT))
2543 			break;
2544 		udelay(2);
2545 	}
2546 	if (val & BNX2_CTX_COMMAND_MEM_INIT)
2547 		return -EBUSY;
2548 
2549 	for (i = 0; i < bp->ctx_pages; i++) {
2550 		int j;
2551 
2552 		if (bp->ctx_blk[i])
2553 			memset(bp->ctx_blk[i], 0, BNX2_PAGE_SIZE);
2554 		else
2555 			return -ENOMEM;
2556 
2557 		BNX2_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA0,
2558 			(bp->ctx_blk_mapping[i] & 0xffffffff) |
2559 			BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID);
2560 		BNX2_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA1,
2561 			(u64) bp->ctx_blk_mapping[i] >> 32);
2562 		BNX2_WR(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL, i |
2563 			BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
2564 		for (j = 0; j < 10; j++) {
2565 
2566 			val = BNX2_RD(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL);
2567 			if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ))
2568 				break;
2569 			udelay(5);
2570 		}
2571 		if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) {
2572 			ret = -EBUSY;
2573 			break;
2574 		}
2575 	}
2576 	return ret;
2577 }
2578 
2579 static void
2580 bnx2_init_context(struct bnx2 *bp)
2581 {
2582 	u32 vcid;
2583 
2584 	vcid = 96;
2585 	while (vcid) {
2586 		u32 vcid_addr, pcid_addr, offset;
2587 		int i;
2588 
2589 		vcid--;
2590 
2591 		if (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) {
2592 			u32 new_vcid;
2593 
2594 			vcid_addr = GET_PCID_ADDR(vcid);
2595 			if (vcid & 0x8) {
2596 				new_vcid = 0x60 + (vcid & 0xf0) + (vcid & 0x7);
2597 			}
2598 			else {
2599 				new_vcid = vcid;
2600 			}
2601 			pcid_addr = GET_PCID_ADDR(new_vcid);
2602 		}
2603 		else {
2604 	    		vcid_addr = GET_CID_ADDR(vcid);
2605 			pcid_addr = vcid_addr;
2606 		}
2607 
2608 		for (i = 0; i < (CTX_SIZE / PHY_CTX_SIZE); i++) {
2609 			vcid_addr += (i << PHY_CTX_SHIFT);
2610 			pcid_addr += (i << PHY_CTX_SHIFT);
2611 
2612 			BNX2_WR(bp, BNX2_CTX_VIRT_ADDR, vcid_addr);
2613 			BNX2_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
2614 
2615 			/* Zero out the context. */
2616 			for (offset = 0; offset < PHY_CTX_SIZE; offset += 4)
2617 				bnx2_ctx_wr(bp, vcid_addr, offset, 0);
2618 		}
2619 	}
2620 }
2621 
2622 static int
2623 bnx2_alloc_bad_rbuf(struct bnx2 *bp)
2624 {
2625 	u16 *good_mbuf;
2626 	u32 good_mbuf_cnt;
2627 	u32 val;
2628 
2629 	good_mbuf = kmalloc(512 * sizeof(u16), GFP_KERNEL);
2630 	if (good_mbuf == NULL)
2631 		return -ENOMEM;
2632 
2633 	BNX2_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
2634 		BNX2_MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE);
2635 
2636 	good_mbuf_cnt = 0;
2637 
2638 	/* Allocate a bunch of mbufs and save the good ones in an array. */
2639 	val = bnx2_reg_rd_ind(bp, BNX2_RBUF_STATUS1);
2640 	while (val & BNX2_RBUF_STATUS1_FREE_COUNT) {
2641 		bnx2_reg_wr_ind(bp, BNX2_RBUF_COMMAND,
2642 				BNX2_RBUF_COMMAND_ALLOC_REQ);
2643 
2644 		val = bnx2_reg_rd_ind(bp, BNX2_RBUF_FW_BUF_ALLOC);
2645 
2646 		val &= BNX2_RBUF_FW_BUF_ALLOC_VALUE;
2647 
2648 		/* The addresses with Bit 9 set are bad memory blocks. */
2649 		if (!(val & (1 << 9))) {
2650 			good_mbuf[good_mbuf_cnt] = (u16) val;
2651 			good_mbuf_cnt++;
2652 		}
2653 
2654 		val = bnx2_reg_rd_ind(bp, BNX2_RBUF_STATUS1);
2655 	}
2656 
2657 	/* Free the good ones back to the mbuf pool thus discarding
2658 	 * all the bad ones. */
2659 	while (good_mbuf_cnt) {
2660 		good_mbuf_cnt--;
2661 
2662 		val = good_mbuf[good_mbuf_cnt];
2663 		val = (val << 9) | val | 1;
2664 
2665 		bnx2_reg_wr_ind(bp, BNX2_RBUF_FW_BUF_FREE, val);
2666 	}
2667 	kfree(good_mbuf);
2668 	return 0;
2669 }
2670 
2671 static void
2672 bnx2_set_mac_addr(struct bnx2 *bp, u8 *mac_addr, u32 pos)
2673 {
2674 	u32 val;
2675 
2676 	val = (mac_addr[0] << 8) | mac_addr[1];
2677 
2678 	BNX2_WR(bp, BNX2_EMAC_MAC_MATCH0 + (pos * 8), val);
2679 
2680 	val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
2681 		(mac_addr[4] << 8) | mac_addr[5];
2682 
2683 	BNX2_WR(bp, BNX2_EMAC_MAC_MATCH1 + (pos * 8), val);
2684 }
2685 
2686 static inline int
2687 bnx2_alloc_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index, gfp_t gfp)
2688 {
2689 	dma_addr_t mapping;
2690 	struct bnx2_sw_pg *rx_pg = &rxr->rx_pg_ring[index];
2691 	struct bnx2_rx_bd *rxbd =
2692 		&rxr->rx_pg_desc_ring[BNX2_RX_RING(index)][BNX2_RX_IDX(index)];
2693 	struct page *page = alloc_page(gfp);
2694 
2695 	if (!page)
2696 		return -ENOMEM;
2697 	mapping = dma_map_page(&bp->pdev->dev, page, 0, PAGE_SIZE,
2698 			       PCI_DMA_FROMDEVICE);
2699 	if (dma_mapping_error(&bp->pdev->dev, mapping)) {
2700 		__free_page(page);
2701 		return -EIO;
2702 	}
2703 
2704 	rx_pg->page = page;
2705 	dma_unmap_addr_set(rx_pg, mapping, mapping);
2706 	rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2707 	rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2708 	return 0;
2709 }
2710 
2711 static void
2712 bnx2_free_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
2713 {
2714 	struct bnx2_sw_pg *rx_pg = &rxr->rx_pg_ring[index];
2715 	struct page *page = rx_pg->page;
2716 
2717 	if (!page)
2718 		return;
2719 
2720 	dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(rx_pg, mapping),
2721 		       PAGE_SIZE, PCI_DMA_FROMDEVICE);
2722 
2723 	__free_page(page);
2724 	rx_pg->page = NULL;
2725 }
2726 
2727 static inline int
2728 bnx2_alloc_rx_data(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index, gfp_t gfp)
2729 {
2730 	u8 *data;
2731 	struct bnx2_sw_bd *rx_buf = &rxr->rx_buf_ring[index];
2732 	dma_addr_t mapping;
2733 	struct bnx2_rx_bd *rxbd =
2734 		&rxr->rx_desc_ring[BNX2_RX_RING(index)][BNX2_RX_IDX(index)];
2735 
2736 	data = kmalloc(bp->rx_buf_size, gfp);
2737 	if (!data)
2738 		return -ENOMEM;
2739 
2740 	mapping = dma_map_single(&bp->pdev->dev,
2741 				 get_l2_fhdr(data),
2742 				 bp->rx_buf_use_size,
2743 				 PCI_DMA_FROMDEVICE);
2744 	if (dma_mapping_error(&bp->pdev->dev, mapping)) {
2745 		kfree(data);
2746 		return -EIO;
2747 	}
2748 
2749 	rx_buf->data = data;
2750 	dma_unmap_addr_set(rx_buf, mapping, mapping);
2751 
2752 	rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2753 	rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2754 
2755 	rxr->rx_prod_bseq += bp->rx_buf_use_size;
2756 
2757 	return 0;
2758 }
2759 
2760 static int
2761 bnx2_phy_event_is_set(struct bnx2 *bp, struct bnx2_napi *bnapi, u32 event)
2762 {
2763 	struct status_block *sblk = bnapi->status_blk.msi;
2764 	u32 new_link_state, old_link_state;
2765 	int is_set = 1;
2766 
2767 	new_link_state = sblk->status_attn_bits & event;
2768 	old_link_state = sblk->status_attn_bits_ack & event;
2769 	if (new_link_state != old_link_state) {
2770 		if (new_link_state)
2771 			BNX2_WR(bp, BNX2_PCICFG_STATUS_BIT_SET_CMD, event);
2772 		else
2773 			BNX2_WR(bp, BNX2_PCICFG_STATUS_BIT_CLEAR_CMD, event);
2774 	} else
2775 		is_set = 0;
2776 
2777 	return is_set;
2778 }
2779 
2780 static void
2781 bnx2_phy_int(struct bnx2 *bp, struct bnx2_napi *bnapi)
2782 {
2783 	spin_lock(&bp->phy_lock);
2784 
2785 	if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_LINK_STATE))
2786 		bnx2_set_link(bp);
2787 	if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_TIMER_ABORT))
2788 		bnx2_set_remote_link(bp);
2789 
2790 	spin_unlock(&bp->phy_lock);
2791 
2792 }
2793 
2794 static inline u16
2795 bnx2_get_hw_tx_cons(struct bnx2_napi *bnapi)
2796 {
2797 	u16 cons;
2798 
2799 	/* Tell compiler that status block fields can change. */
2800 	barrier();
2801 	cons = *bnapi->hw_tx_cons_ptr;
2802 	barrier();
2803 	if (unlikely((cons & BNX2_MAX_TX_DESC_CNT) == BNX2_MAX_TX_DESC_CNT))
2804 		cons++;
2805 	return cons;
2806 }
2807 
2808 static int
2809 bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
2810 {
2811 	struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
2812 	u16 hw_cons, sw_cons, sw_ring_cons;
2813 	int tx_pkt = 0, index;
2814 	unsigned int tx_bytes = 0;
2815 	struct netdev_queue *txq;
2816 
2817 	index = (bnapi - bp->bnx2_napi);
2818 	txq = netdev_get_tx_queue(bp->dev, index);
2819 
2820 	hw_cons = bnx2_get_hw_tx_cons(bnapi);
2821 	sw_cons = txr->tx_cons;
2822 
2823 	while (sw_cons != hw_cons) {
2824 		struct bnx2_sw_tx_bd *tx_buf;
2825 		struct sk_buff *skb;
2826 		int i, last;
2827 
2828 		sw_ring_cons = BNX2_TX_RING_IDX(sw_cons);
2829 
2830 		tx_buf = &txr->tx_buf_ring[sw_ring_cons];
2831 		skb = tx_buf->skb;
2832 
2833 		/* prefetch skb_end_pointer() to speedup skb_shinfo(skb) */
2834 		prefetch(&skb->end);
2835 
2836 		/* partial BD completions possible with TSO packets */
2837 		if (tx_buf->is_gso) {
2838 			u16 last_idx, last_ring_idx;
2839 
2840 			last_idx = sw_cons + tx_buf->nr_frags + 1;
2841 			last_ring_idx = sw_ring_cons + tx_buf->nr_frags + 1;
2842 			if (unlikely(last_ring_idx >= BNX2_MAX_TX_DESC_CNT)) {
2843 				last_idx++;
2844 			}
2845 			if (((s16) ((s16) last_idx - (s16) hw_cons)) > 0) {
2846 				break;
2847 			}
2848 		}
2849 
2850 		dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(tx_buf, mapping),
2851 			skb_headlen(skb), PCI_DMA_TODEVICE);
2852 
2853 		tx_buf->skb = NULL;
2854 		last = tx_buf->nr_frags;
2855 
2856 		for (i = 0; i < last; i++) {
2857 			struct bnx2_sw_tx_bd *tx_buf;
2858 
2859 			sw_cons = BNX2_NEXT_TX_BD(sw_cons);
2860 
2861 			tx_buf = &txr->tx_buf_ring[BNX2_TX_RING_IDX(sw_cons)];
2862 			dma_unmap_page(&bp->pdev->dev,
2863 				dma_unmap_addr(tx_buf, mapping),
2864 				skb_frag_size(&skb_shinfo(skb)->frags[i]),
2865 				PCI_DMA_TODEVICE);
2866 		}
2867 
2868 		sw_cons = BNX2_NEXT_TX_BD(sw_cons);
2869 
2870 		tx_bytes += skb->len;
2871 		dev_kfree_skb(skb);
2872 		tx_pkt++;
2873 		if (tx_pkt == budget)
2874 			break;
2875 
2876 		if (hw_cons == sw_cons)
2877 			hw_cons = bnx2_get_hw_tx_cons(bnapi);
2878 	}
2879 
2880 	netdev_tx_completed_queue(txq, tx_pkt, tx_bytes);
2881 	txr->hw_tx_cons = hw_cons;
2882 	txr->tx_cons = sw_cons;
2883 
2884 	/* Need to make the tx_cons update visible to bnx2_start_xmit()
2885 	 * before checking for netif_tx_queue_stopped().  Without the
2886 	 * memory barrier, there is a small possibility that bnx2_start_xmit()
2887 	 * will miss it and cause the queue to be stopped forever.
2888 	 */
2889 	smp_mb();
2890 
2891 	if (unlikely(netif_tx_queue_stopped(txq)) &&
2892 		     (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh)) {
2893 		__netif_tx_lock(txq, smp_processor_id());
2894 		if ((netif_tx_queue_stopped(txq)) &&
2895 		    (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh))
2896 			netif_tx_wake_queue(txq);
2897 		__netif_tx_unlock(txq);
2898 	}
2899 
2900 	return tx_pkt;
2901 }
2902 
2903 static void
2904 bnx2_reuse_rx_skb_pages(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
2905 			struct sk_buff *skb, int count)
2906 {
2907 	struct bnx2_sw_pg *cons_rx_pg, *prod_rx_pg;
2908 	struct bnx2_rx_bd *cons_bd, *prod_bd;
2909 	int i;
2910 	u16 hw_prod, prod;
2911 	u16 cons = rxr->rx_pg_cons;
2912 
2913 	cons_rx_pg = &rxr->rx_pg_ring[cons];
2914 
2915 	/* The caller was unable to allocate a new page to replace the
2916 	 * last one in the frags array, so we need to recycle that page
2917 	 * and then free the skb.
2918 	 */
2919 	if (skb) {
2920 		struct page *page;
2921 		struct skb_shared_info *shinfo;
2922 
2923 		shinfo = skb_shinfo(skb);
2924 		shinfo->nr_frags--;
2925 		page = skb_frag_page(&shinfo->frags[shinfo->nr_frags]);
2926 		__skb_frag_set_page(&shinfo->frags[shinfo->nr_frags], NULL);
2927 
2928 		cons_rx_pg->page = page;
2929 		dev_kfree_skb(skb);
2930 	}
2931 
2932 	hw_prod = rxr->rx_pg_prod;
2933 
2934 	for (i = 0; i < count; i++) {
2935 		prod = BNX2_RX_PG_RING_IDX(hw_prod);
2936 
2937 		prod_rx_pg = &rxr->rx_pg_ring[prod];
2938 		cons_rx_pg = &rxr->rx_pg_ring[cons];
2939 		cons_bd = &rxr->rx_pg_desc_ring[BNX2_RX_RING(cons)]
2940 						[BNX2_RX_IDX(cons)];
2941 		prod_bd = &rxr->rx_pg_desc_ring[BNX2_RX_RING(prod)]
2942 						[BNX2_RX_IDX(prod)];
2943 
2944 		if (prod != cons) {
2945 			prod_rx_pg->page = cons_rx_pg->page;
2946 			cons_rx_pg->page = NULL;
2947 			dma_unmap_addr_set(prod_rx_pg, mapping,
2948 				dma_unmap_addr(cons_rx_pg, mapping));
2949 
2950 			prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2951 			prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
2952 
2953 		}
2954 		cons = BNX2_RX_PG_RING_IDX(BNX2_NEXT_RX_BD(cons));
2955 		hw_prod = BNX2_NEXT_RX_BD(hw_prod);
2956 	}
2957 	rxr->rx_pg_prod = hw_prod;
2958 	rxr->rx_pg_cons = cons;
2959 }
2960 
2961 static inline void
2962 bnx2_reuse_rx_data(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
2963 		   u8 *data, u16 cons, u16 prod)
2964 {
2965 	struct bnx2_sw_bd *cons_rx_buf, *prod_rx_buf;
2966 	struct bnx2_rx_bd *cons_bd, *prod_bd;
2967 
2968 	cons_rx_buf = &rxr->rx_buf_ring[cons];
2969 	prod_rx_buf = &rxr->rx_buf_ring[prod];
2970 
2971 	dma_sync_single_for_device(&bp->pdev->dev,
2972 		dma_unmap_addr(cons_rx_buf, mapping),
2973 		BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
2974 
2975 	rxr->rx_prod_bseq += bp->rx_buf_use_size;
2976 
2977 	prod_rx_buf->data = data;
2978 
2979 	if (cons == prod)
2980 		return;
2981 
2982 	dma_unmap_addr_set(prod_rx_buf, mapping,
2983 			dma_unmap_addr(cons_rx_buf, mapping));
2984 
2985 	cons_bd = &rxr->rx_desc_ring[BNX2_RX_RING(cons)][BNX2_RX_IDX(cons)];
2986 	prod_bd = &rxr->rx_desc_ring[BNX2_RX_RING(prod)][BNX2_RX_IDX(prod)];
2987 	prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2988 	prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
2989 }
2990 
2991 static struct sk_buff *
2992 bnx2_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u8 *data,
2993 	    unsigned int len, unsigned int hdr_len, dma_addr_t dma_addr,
2994 	    u32 ring_idx)
2995 {
2996 	int err;
2997 	u16 prod = ring_idx & 0xffff;
2998 	struct sk_buff *skb;
2999 
3000 	err = bnx2_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC);
3001 	if (unlikely(err)) {
3002 		bnx2_reuse_rx_data(bp, rxr, data, (u16) (ring_idx >> 16), prod);
3003 error:
3004 		if (hdr_len) {
3005 			unsigned int raw_len = len + 4;
3006 			int pages = PAGE_ALIGN(raw_len - hdr_len) >> PAGE_SHIFT;
3007 
3008 			bnx2_reuse_rx_skb_pages(bp, rxr, NULL, pages);
3009 		}
3010 		return NULL;
3011 	}
3012 
3013 	dma_unmap_single(&bp->pdev->dev, dma_addr, bp->rx_buf_use_size,
3014 			 PCI_DMA_FROMDEVICE);
3015 	skb = build_skb(data, 0);
3016 	if (!skb) {
3017 		kfree(data);
3018 		goto error;
3019 	}
3020 	skb_reserve(skb, ((u8 *)get_l2_fhdr(data) - data) + BNX2_RX_OFFSET);
3021 	if (hdr_len == 0) {
3022 		skb_put(skb, len);
3023 		return skb;
3024 	} else {
3025 		unsigned int i, frag_len, frag_size, pages;
3026 		struct bnx2_sw_pg *rx_pg;
3027 		u16 pg_cons = rxr->rx_pg_cons;
3028 		u16 pg_prod = rxr->rx_pg_prod;
3029 
3030 		frag_size = len + 4 - hdr_len;
3031 		pages = PAGE_ALIGN(frag_size) >> PAGE_SHIFT;
3032 		skb_put(skb, hdr_len);
3033 
3034 		for (i = 0; i < pages; i++) {
3035 			dma_addr_t mapping_old;
3036 
3037 			frag_len = min(frag_size, (unsigned int) PAGE_SIZE);
3038 			if (unlikely(frag_len <= 4)) {
3039 				unsigned int tail = 4 - frag_len;
3040 
3041 				rxr->rx_pg_cons = pg_cons;
3042 				rxr->rx_pg_prod = pg_prod;
3043 				bnx2_reuse_rx_skb_pages(bp, rxr, NULL,
3044 							pages - i);
3045 				skb->len -= tail;
3046 				if (i == 0) {
3047 					skb->tail -= tail;
3048 				} else {
3049 					skb_frag_t *frag =
3050 						&skb_shinfo(skb)->frags[i - 1];
3051 					skb_frag_size_sub(frag, tail);
3052 					skb->data_len -= tail;
3053 				}
3054 				return skb;
3055 			}
3056 			rx_pg = &rxr->rx_pg_ring[pg_cons];
3057 
3058 			/* Don't unmap yet.  If we're unable to allocate a new
3059 			 * page, we need to recycle the page and the DMA addr.
3060 			 */
3061 			mapping_old = dma_unmap_addr(rx_pg, mapping);
3062 			if (i == pages - 1)
3063 				frag_len -= 4;
3064 
3065 			skb_fill_page_desc(skb, i, rx_pg->page, 0, frag_len);
3066 			rx_pg->page = NULL;
3067 
3068 			err = bnx2_alloc_rx_page(bp, rxr,
3069 						 BNX2_RX_PG_RING_IDX(pg_prod),
3070 						 GFP_ATOMIC);
3071 			if (unlikely(err)) {
3072 				rxr->rx_pg_cons = pg_cons;
3073 				rxr->rx_pg_prod = pg_prod;
3074 				bnx2_reuse_rx_skb_pages(bp, rxr, skb,
3075 							pages - i);
3076 				return NULL;
3077 			}
3078 
3079 			dma_unmap_page(&bp->pdev->dev, mapping_old,
3080 				       PAGE_SIZE, PCI_DMA_FROMDEVICE);
3081 
3082 			frag_size -= frag_len;
3083 			skb->data_len += frag_len;
3084 			skb->truesize += PAGE_SIZE;
3085 			skb->len += frag_len;
3086 
3087 			pg_prod = BNX2_NEXT_RX_BD(pg_prod);
3088 			pg_cons = BNX2_RX_PG_RING_IDX(BNX2_NEXT_RX_BD(pg_cons));
3089 		}
3090 		rxr->rx_pg_prod = pg_prod;
3091 		rxr->rx_pg_cons = pg_cons;
3092 	}
3093 	return skb;
3094 }
3095 
3096 static inline u16
3097 bnx2_get_hw_rx_cons(struct bnx2_napi *bnapi)
3098 {
3099 	u16 cons;
3100 
3101 	/* Tell compiler that status block fields can change. */
3102 	barrier();
3103 	cons = *bnapi->hw_rx_cons_ptr;
3104 	barrier();
3105 	if (unlikely((cons & BNX2_MAX_RX_DESC_CNT) == BNX2_MAX_RX_DESC_CNT))
3106 		cons++;
3107 	return cons;
3108 }
3109 
3110 static int
3111 bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
3112 {
3113 	struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3114 	u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod;
3115 	struct l2_fhdr *rx_hdr;
3116 	int rx_pkt = 0, pg_ring_used = 0;
3117 
3118 	hw_cons = bnx2_get_hw_rx_cons(bnapi);
3119 	sw_cons = rxr->rx_cons;
3120 	sw_prod = rxr->rx_prod;
3121 
3122 	/* Memory barrier necessary as speculative reads of the rx
3123 	 * buffer can be ahead of the index in the status block
3124 	 */
3125 	rmb();
3126 	while (sw_cons != hw_cons) {
3127 		unsigned int len, hdr_len;
3128 		u32 status;
3129 		struct bnx2_sw_bd *rx_buf, *next_rx_buf;
3130 		struct sk_buff *skb;
3131 		dma_addr_t dma_addr;
3132 		u8 *data;
3133 		u16 next_ring_idx;
3134 
3135 		sw_ring_cons = BNX2_RX_RING_IDX(sw_cons);
3136 		sw_ring_prod = BNX2_RX_RING_IDX(sw_prod);
3137 
3138 		rx_buf = &rxr->rx_buf_ring[sw_ring_cons];
3139 		data = rx_buf->data;
3140 		rx_buf->data = NULL;
3141 
3142 		rx_hdr = get_l2_fhdr(data);
3143 		prefetch(rx_hdr);
3144 
3145 		dma_addr = dma_unmap_addr(rx_buf, mapping);
3146 
3147 		dma_sync_single_for_cpu(&bp->pdev->dev, dma_addr,
3148 			BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH,
3149 			PCI_DMA_FROMDEVICE);
3150 
3151 		next_ring_idx = BNX2_RX_RING_IDX(BNX2_NEXT_RX_BD(sw_cons));
3152 		next_rx_buf = &rxr->rx_buf_ring[next_ring_idx];
3153 		prefetch(get_l2_fhdr(next_rx_buf->data));
3154 
3155 		len = rx_hdr->l2_fhdr_pkt_len;
3156 		status = rx_hdr->l2_fhdr_status;
3157 
3158 		hdr_len = 0;
3159 		if (status & L2_FHDR_STATUS_SPLIT) {
3160 			hdr_len = rx_hdr->l2_fhdr_ip_xsum;
3161 			pg_ring_used = 1;
3162 		} else if (len > bp->rx_jumbo_thresh) {
3163 			hdr_len = bp->rx_jumbo_thresh;
3164 			pg_ring_used = 1;
3165 		}
3166 
3167 		if (unlikely(status & (L2_FHDR_ERRORS_BAD_CRC |
3168 				       L2_FHDR_ERRORS_PHY_DECODE |
3169 				       L2_FHDR_ERRORS_ALIGNMENT |
3170 				       L2_FHDR_ERRORS_TOO_SHORT |
3171 				       L2_FHDR_ERRORS_GIANT_FRAME))) {
3172 
3173 			bnx2_reuse_rx_data(bp, rxr, data, sw_ring_cons,
3174 					  sw_ring_prod);
3175 			if (pg_ring_used) {
3176 				int pages;
3177 
3178 				pages = PAGE_ALIGN(len - hdr_len) >> PAGE_SHIFT;
3179 
3180 				bnx2_reuse_rx_skb_pages(bp, rxr, NULL, pages);
3181 			}
3182 			goto next_rx;
3183 		}
3184 
3185 		len -= 4;
3186 
3187 		if (len <= bp->rx_copy_thresh) {
3188 			skb = netdev_alloc_skb(bp->dev, len + 6);
3189 			if (skb == NULL) {
3190 				bnx2_reuse_rx_data(bp, rxr, data, sw_ring_cons,
3191 						  sw_ring_prod);
3192 				goto next_rx;
3193 			}
3194 
3195 			/* aligned copy */
3196 			memcpy(skb->data,
3197 			       (u8 *)rx_hdr + BNX2_RX_OFFSET - 6,
3198 			       len + 6);
3199 			skb_reserve(skb, 6);
3200 			skb_put(skb, len);
3201 
3202 			bnx2_reuse_rx_data(bp, rxr, data,
3203 				sw_ring_cons, sw_ring_prod);
3204 
3205 		} else {
3206 			skb = bnx2_rx_skb(bp, rxr, data, len, hdr_len, dma_addr,
3207 					  (sw_ring_cons << 16) | sw_ring_prod);
3208 			if (!skb)
3209 				goto next_rx;
3210 		}
3211 		if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) &&
3212 		    !(bp->rx_mode & BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG))
3213 			__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rx_hdr->l2_fhdr_vlan_tag);
3214 
3215 		skb->protocol = eth_type_trans(skb, bp->dev);
3216 
3217 		if ((len > (bp->dev->mtu + ETH_HLEN)) &&
3218 			(ntohs(skb->protocol) != 0x8100)) {
3219 
3220 			dev_kfree_skb(skb);
3221 			goto next_rx;
3222 
3223 		}
3224 
3225 		skb_checksum_none_assert(skb);
3226 		if ((bp->dev->features & NETIF_F_RXCSUM) &&
3227 			(status & (L2_FHDR_STATUS_TCP_SEGMENT |
3228 			L2_FHDR_STATUS_UDP_DATAGRAM))) {
3229 
3230 			if (likely((status & (L2_FHDR_ERRORS_TCP_XSUM |
3231 					      L2_FHDR_ERRORS_UDP_XSUM)) == 0))
3232 				skb->ip_summed = CHECKSUM_UNNECESSARY;
3233 		}
3234 		if ((bp->dev->features & NETIF_F_RXHASH) &&
3235 		    ((status & L2_FHDR_STATUS_USE_RXHASH) ==
3236 		     L2_FHDR_STATUS_USE_RXHASH))
3237 			skb->rxhash = rx_hdr->l2_fhdr_hash;
3238 
3239 		skb_record_rx_queue(skb, bnapi - &bp->bnx2_napi[0]);
3240 		napi_gro_receive(&bnapi->napi, skb);
3241 		rx_pkt++;
3242 
3243 next_rx:
3244 		sw_cons = BNX2_NEXT_RX_BD(sw_cons);
3245 		sw_prod = BNX2_NEXT_RX_BD(sw_prod);
3246 
3247 		if ((rx_pkt == budget))
3248 			break;
3249 
3250 		/* Refresh hw_cons to see if there is new work */
3251 		if (sw_cons == hw_cons) {
3252 			hw_cons = bnx2_get_hw_rx_cons(bnapi);
3253 			rmb();
3254 		}
3255 	}
3256 	rxr->rx_cons = sw_cons;
3257 	rxr->rx_prod = sw_prod;
3258 
3259 	if (pg_ring_used)
3260 		BNX2_WR16(bp, rxr->rx_pg_bidx_addr, rxr->rx_pg_prod);
3261 
3262 	BNX2_WR16(bp, rxr->rx_bidx_addr, sw_prod);
3263 
3264 	BNX2_WR(bp, rxr->rx_bseq_addr, rxr->rx_prod_bseq);
3265 
3266 	mmiowb();
3267 
3268 	return rx_pkt;
3269 
3270 }
3271 
3272 /* MSI ISR - The only difference between this and the INTx ISR
3273  * is that the MSI interrupt is always serviced.
3274  */
3275 static irqreturn_t
3276 bnx2_msi(int irq, void *dev_instance)
3277 {
3278 	struct bnx2_napi *bnapi = dev_instance;
3279 	struct bnx2 *bp = bnapi->bp;
3280 
3281 	prefetch(bnapi->status_blk.msi);
3282 	BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3283 		BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
3284 		BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3285 
3286 	/* Return here if interrupt is disabled. */
3287 	if (unlikely(atomic_read(&bp->intr_sem) != 0))
3288 		return IRQ_HANDLED;
3289 
3290 	napi_schedule(&bnapi->napi);
3291 
3292 	return IRQ_HANDLED;
3293 }
3294 
3295 static irqreturn_t
3296 bnx2_msi_1shot(int irq, void *dev_instance)
3297 {
3298 	struct bnx2_napi *bnapi = dev_instance;
3299 	struct bnx2 *bp = bnapi->bp;
3300 
3301 	prefetch(bnapi->status_blk.msi);
3302 
3303 	/* Return here if interrupt is disabled. */
3304 	if (unlikely(atomic_read(&bp->intr_sem) != 0))
3305 		return IRQ_HANDLED;
3306 
3307 	napi_schedule(&bnapi->napi);
3308 
3309 	return IRQ_HANDLED;
3310 }
3311 
3312 static irqreturn_t
3313 bnx2_interrupt(int irq, void *dev_instance)
3314 {
3315 	struct bnx2_napi *bnapi = dev_instance;
3316 	struct bnx2 *bp = bnapi->bp;
3317 	struct status_block *sblk = bnapi->status_blk.msi;
3318 
3319 	/* When using INTx, it is possible for the interrupt to arrive
3320 	 * at the CPU before the status block posted prior to the
3321 	 * interrupt. Reading a register will flush the status block.
3322 	 * When using MSI, the MSI message will always complete after
3323 	 * the status block write.
3324 	 */
3325 	if ((sblk->status_idx == bnapi->last_status_idx) &&
3326 	    (BNX2_RD(bp, BNX2_PCICFG_MISC_STATUS) &
3327 	     BNX2_PCICFG_MISC_STATUS_INTA_VALUE))
3328 		return IRQ_NONE;
3329 
3330 	BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3331 		BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
3332 		BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3333 
3334 	/* Read back to deassert IRQ immediately to avoid too many
3335 	 * spurious interrupts.
3336 	 */
3337 	BNX2_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
3338 
3339 	/* Return here if interrupt is shared and is disabled. */
3340 	if (unlikely(atomic_read(&bp->intr_sem) != 0))
3341 		return IRQ_HANDLED;
3342 
3343 	if (napi_schedule_prep(&bnapi->napi)) {
3344 		bnapi->last_status_idx = sblk->status_idx;
3345 		__napi_schedule(&bnapi->napi);
3346 	}
3347 
3348 	return IRQ_HANDLED;
3349 }
3350 
3351 static inline int
3352 bnx2_has_fast_work(struct bnx2_napi *bnapi)
3353 {
3354 	struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
3355 	struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3356 
3357 	if ((bnx2_get_hw_rx_cons(bnapi) != rxr->rx_cons) ||
3358 	    (bnx2_get_hw_tx_cons(bnapi) != txr->hw_tx_cons))
3359 		return 1;
3360 	return 0;
3361 }
3362 
3363 #define STATUS_ATTN_EVENTS	(STATUS_ATTN_BITS_LINK_STATE | \
3364 				 STATUS_ATTN_BITS_TIMER_ABORT)
3365 
3366 static inline int
3367 bnx2_has_work(struct bnx2_napi *bnapi)
3368 {
3369 	struct status_block *sblk = bnapi->status_blk.msi;
3370 
3371 	if (bnx2_has_fast_work(bnapi))
3372 		return 1;
3373 
3374 #ifdef BCM_CNIC
3375 	if (bnapi->cnic_present && (bnapi->cnic_tag != sblk->status_idx))
3376 		return 1;
3377 #endif
3378 
3379 	if ((sblk->status_attn_bits & STATUS_ATTN_EVENTS) !=
3380 	    (sblk->status_attn_bits_ack & STATUS_ATTN_EVENTS))
3381 		return 1;
3382 
3383 	return 0;
3384 }
3385 
3386 static void
3387 bnx2_chk_missed_msi(struct bnx2 *bp)
3388 {
3389 	struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
3390 	u32 msi_ctrl;
3391 
3392 	if (bnx2_has_work(bnapi)) {
3393 		msi_ctrl = BNX2_RD(bp, BNX2_PCICFG_MSI_CONTROL);
3394 		if (!(msi_ctrl & BNX2_PCICFG_MSI_CONTROL_ENABLE))
3395 			return;
3396 
3397 		if (bnapi->last_status_idx == bp->idle_chk_status_idx) {
3398 			BNX2_WR(bp, BNX2_PCICFG_MSI_CONTROL, msi_ctrl &
3399 				~BNX2_PCICFG_MSI_CONTROL_ENABLE);
3400 			BNX2_WR(bp, BNX2_PCICFG_MSI_CONTROL, msi_ctrl);
3401 			bnx2_msi(bp->irq_tbl[0].vector, bnapi);
3402 		}
3403 	}
3404 
3405 	bp->idle_chk_status_idx = bnapi->last_status_idx;
3406 }
3407 
3408 #ifdef BCM_CNIC
3409 static void bnx2_poll_cnic(struct bnx2 *bp, struct bnx2_napi *bnapi)
3410 {
3411 	struct cnic_ops *c_ops;
3412 
3413 	if (!bnapi->cnic_present)
3414 		return;
3415 
3416 	rcu_read_lock();
3417 	c_ops = rcu_dereference(bp->cnic_ops);
3418 	if (c_ops)
3419 		bnapi->cnic_tag = c_ops->cnic_handler(bp->cnic_data,
3420 						      bnapi->status_blk.msi);
3421 	rcu_read_unlock();
3422 }
3423 #endif
3424 
3425 static void bnx2_poll_link(struct bnx2 *bp, struct bnx2_napi *bnapi)
3426 {
3427 	struct status_block *sblk = bnapi->status_blk.msi;
3428 	u32 status_attn_bits = sblk->status_attn_bits;
3429 	u32 status_attn_bits_ack = sblk->status_attn_bits_ack;
3430 
3431 	if ((status_attn_bits & STATUS_ATTN_EVENTS) !=
3432 	    (status_attn_bits_ack & STATUS_ATTN_EVENTS)) {
3433 
3434 		bnx2_phy_int(bp, bnapi);
3435 
3436 		/* This is needed to take care of transient status
3437 		 * during link changes.
3438 		 */
3439 		BNX2_WR(bp, BNX2_HC_COMMAND,
3440 			bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
3441 		BNX2_RD(bp, BNX2_HC_COMMAND);
3442 	}
3443 }
3444 
3445 static int bnx2_poll_work(struct bnx2 *bp, struct bnx2_napi *bnapi,
3446 			  int work_done, int budget)
3447 {
3448 	struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
3449 	struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3450 
3451 	if (bnx2_get_hw_tx_cons(bnapi) != txr->hw_tx_cons)
3452 		bnx2_tx_int(bp, bnapi, 0);
3453 
3454 	if (bnx2_get_hw_rx_cons(bnapi) != rxr->rx_cons)
3455 		work_done += bnx2_rx_int(bp, bnapi, budget - work_done);
3456 
3457 	return work_done;
3458 }
3459 
3460 static int bnx2_poll_msix(struct napi_struct *napi, int budget)
3461 {
3462 	struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
3463 	struct bnx2 *bp = bnapi->bp;
3464 	int work_done = 0;
3465 	struct status_block_msix *sblk = bnapi->status_blk.msix;
3466 
3467 	while (1) {
3468 		work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
3469 		if (unlikely(work_done >= budget))
3470 			break;
3471 
3472 		bnapi->last_status_idx = sblk->status_idx;
3473 		/* status idx must be read before checking for more work. */
3474 		rmb();
3475 		if (likely(!bnx2_has_fast_work(bnapi))) {
3476 
3477 			napi_complete(napi);
3478 			BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
3479 				BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3480 				bnapi->last_status_idx);
3481 			break;
3482 		}
3483 	}
3484 	return work_done;
3485 }
3486 
3487 static int bnx2_poll(struct napi_struct *napi, int budget)
3488 {
3489 	struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
3490 	struct bnx2 *bp = bnapi->bp;
3491 	int work_done = 0;
3492 	struct status_block *sblk = bnapi->status_blk.msi;
3493 
3494 	while (1) {
3495 		bnx2_poll_link(bp, bnapi);
3496 
3497 		work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
3498 
3499 #ifdef BCM_CNIC
3500 		bnx2_poll_cnic(bp, bnapi);
3501 #endif
3502 
3503 		/* bnapi->last_status_idx is used below to tell the hw how
3504 		 * much work has been processed, so we must read it before
3505 		 * checking for more work.
3506 		 */
3507 		bnapi->last_status_idx = sblk->status_idx;
3508 
3509 		if (unlikely(work_done >= budget))
3510 			break;
3511 
3512 		rmb();
3513 		if (likely(!bnx2_has_work(bnapi))) {
3514 			napi_complete(napi);
3515 			if (likely(bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)) {
3516 				BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3517 					BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3518 					bnapi->last_status_idx);
3519 				break;
3520 			}
3521 			BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3522 				BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3523 				BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
3524 				bnapi->last_status_idx);
3525 
3526 			BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3527 				BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3528 				bnapi->last_status_idx);
3529 			break;
3530 		}
3531 	}
3532 
3533 	return work_done;
3534 }
3535 
3536 /* Called with rtnl_lock from vlan functions and also netif_tx_lock
3537  * from set_multicast.
3538  */
3539 static void
3540 bnx2_set_rx_mode(struct net_device *dev)
3541 {
3542 	struct bnx2 *bp = netdev_priv(dev);
3543 	u32 rx_mode, sort_mode;
3544 	struct netdev_hw_addr *ha;
3545 	int i;
3546 
3547 	if (!netif_running(dev))
3548 		return;
3549 
3550 	spin_lock_bh(&bp->phy_lock);
3551 
3552 	rx_mode = bp->rx_mode & ~(BNX2_EMAC_RX_MODE_PROMISCUOUS |
3553 				  BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG);
3554 	sort_mode = 1 | BNX2_RPM_SORT_USER0_BC_EN;
3555 	if (!(dev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
3556 	     (bp->flags & BNX2_FLAG_CAN_KEEP_VLAN))
3557 		rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
3558 	if (dev->flags & IFF_PROMISC) {
3559 		/* Promiscuous mode. */
3560 		rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
3561 		sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
3562 			     BNX2_RPM_SORT_USER0_PROM_VLAN;
3563 	}
3564 	else if (dev->flags & IFF_ALLMULTI) {
3565 		for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3566 			BNX2_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3567 				0xffffffff);
3568         	}
3569 		sort_mode |= BNX2_RPM_SORT_USER0_MC_EN;
3570 	}
3571 	else {
3572 		/* Accept one or more multicast(s). */
3573 		u32 mc_filter[NUM_MC_HASH_REGISTERS];
3574 		u32 regidx;
3575 		u32 bit;
3576 		u32 crc;
3577 
3578 		memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS);
3579 
3580 		netdev_for_each_mc_addr(ha, dev) {
3581 			crc = ether_crc_le(ETH_ALEN, ha->addr);
3582 			bit = crc & 0xff;
3583 			regidx = (bit & 0xe0) >> 5;
3584 			bit &= 0x1f;
3585 			mc_filter[regidx] |= (1 << bit);
3586 		}
3587 
3588 		for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3589 			BNX2_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3590 				mc_filter[i]);
3591 		}
3592 
3593 		sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN;
3594 	}
3595 
3596 	if (netdev_uc_count(dev) > BNX2_MAX_UNICAST_ADDRESSES) {
3597 		rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
3598 		sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
3599 			     BNX2_RPM_SORT_USER0_PROM_VLAN;
3600 	} else if (!(dev->flags & IFF_PROMISC)) {
3601 		/* Add all entries into to the match filter list */
3602 		i = 0;
3603 		netdev_for_each_uc_addr(ha, dev) {
3604 			bnx2_set_mac_addr(bp, ha->addr,
3605 					  i + BNX2_START_UNICAST_ADDRESS_INDEX);
3606 			sort_mode |= (1 <<
3607 				      (i + BNX2_START_UNICAST_ADDRESS_INDEX));
3608 			i++;
3609 		}
3610 
3611 	}
3612 
3613 	if (rx_mode != bp->rx_mode) {
3614 		bp->rx_mode = rx_mode;
3615 		BNX2_WR(bp, BNX2_EMAC_RX_MODE, rx_mode);
3616 	}
3617 
3618 	BNX2_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3619 	BNX2_WR(bp, BNX2_RPM_SORT_USER0, sort_mode);
3620 	BNX2_WR(bp, BNX2_RPM_SORT_USER0, sort_mode | BNX2_RPM_SORT_USER0_ENA);
3621 
3622 	spin_unlock_bh(&bp->phy_lock);
3623 }
3624 
3625 static int
3626 check_fw_section(const struct firmware *fw,
3627 		 const struct bnx2_fw_file_section *section,
3628 		 u32 alignment, bool non_empty)
3629 {
3630 	u32 offset = be32_to_cpu(section->offset);
3631 	u32 len = be32_to_cpu(section->len);
3632 
3633 	if ((offset == 0 && len != 0) || offset >= fw->size || offset & 3)
3634 		return -EINVAL;
3635 	if ((non_empty && len == 0) || len > fw->size - offset ||
3636 	    len & (alignment - 1))
3637 		return -EINVAL;
3638 	return 0;
3639 }
3640 
3641 static int
3642 check_mips_fw_entry(const struct firmware *fw,
3643 		    const struct bnx2_mips_fw_file_entry *entry)
3644 {
3645 	if (check_fw_section(fw, &entry->text, 4, true) ||
3646 	    check_fw_section(fw, &entry->data, 4, false) ||
3647 	    check_fw_section(fw, &entry->rodata, 4, false))
3648 		return -EINVAL;
3649 	return 0;
3650 }
3651 
3652 static void bnx2_release_firmware(struct bnx2 *bp)
3653 {
3654 	if (bp->rv2p_firmware) {
3655 		release_firmware(bp->mips_firmware);
3656 		release_firmware(bp->rv2p_firmware);
3657 		bp->rv2p_firmware = NULL;
3658 	}
3659 }
3660 
3661 static int bnx2_request_uncached_firmware(struct bnx2 *bp)
3662 {
3663 	const char *mips_fw_file, *rv2p_fw_file;
3664 	const struct bnx2_mips_fw_file *mips_fw;
3665 	const struct bnx2_rv2p_fw_file *rv2p_fw;
3666 	int rc;
3667 
3668 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
3669 		mips_fw_file = FW_MIPS_FILE_09;
3670 		if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5709_A0) ||
3671 		    (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5709_A1))
3672 			rv2p_fw_file = FW_RV2P_FILE_09_Ax;
3673 		else
3674 			rv2p_fw_file = FW_RV2P_FILE_09;
3675 	} else {
3676 		mips_fw_file = FW_MIPS_FILE_06;
3677 		rv2p_fw_file = FW_RV2P_FILE_06;
3678 	}
3679 
3680 	rc = request_firmware(&bp->mips_firmware, mips_fw_file, &bp->pdev->dev);
3681 	if (rc) {
3682 		pr_err("Can't load firmware file \"%s\"\n", mips_fw_file);
3683 		goto out;
3684 	}
3685 
3686 	rc = request_firmware(&bp->rv2p_firmware, rv2p_fw_file, &bp->pdev->dev);
3687 	if (rc) {
3688 		pr_err("Can't load firmware file \"%s\"\n", rv2p_fw_file);
3689 		goto err_release_mips_firmware;
3690 	}
3691 	mips_fw = (const struct bnx2_mips_fw_file *) bp->mips_firmware->data;
3692 	rv2p_fw = (const struct bnx2_rv2p_fw_file *) bp->rv2p_firmware->data;
3693 	if (bp->mips_firmware->size < sizeof(*mips_fw) ||
3694 	    check_mips_fw_entry(bp->mips_firmware, &mips_fw->com) ||
3695 	    check_mips_fw_entry(bp->mips_firmware, &mips_fw->cp) ||
3696 	    check_mips_fw_entry(bp->mips_firmware, &mips_fw->rxp) ||
3697 	    check_mips_fw_entry(bp->mips_firmware, &mips_fw->tpat) ||
3698 	    check_mips_fw_entry(bp->mips_firmware, &mips_fw->txp)) {
3699 		pr_err("Firmware file \"%s\" is invalid\n", mips_fw_file);
3700 		rc = -EINVAL;
3701 		goto err_release_firmware;
3702 	}
3703 	if (bp->rv2p_firmware->size < sizeof(*rv2p_fw) ||
3704 	    check_fw_section(bp->rv2p_firmware, &rv2p_fw->proc1.rv2p, 8, true) ||
3705 	    check_fw_section(bp->rv2p_firmware, &rv2p_fw->proc2.rv2p, 8, true)) {
3706 		pr_err("Firmware file \"%s\" is invalid\n", rv2p_fw_file);
3707 		rc = -EINVAL;
3708 		goto err_release_firmware;
3709 	}
3710 out:
3711 	return rc;
3712 
3713 err_release_firmware:
3714 	release_firmware(bp->rv2p_firmware);
3715 	bp->rv2p_firmware = NULL;
3716 err_release_mips_firmware:
3717 	release_firmware(bp->mips_firmware);
3718 	goto out;
3719 }
3720 
3721 static int bnx2_request_firmware(struct bnx2 *bp)
3722 {
3723 	return bp->rv2p_firmware ? 0 : bnx2_request_uncached_firmware(bp);
3724 }
3725 
3726 static u32
3727 rv2p_fw_fixup(u32 rv2p_proc, int idx, u32 loc, u32 rv2p_code)
3728 {
3729 	switch (idx) {
3730 	case RV2P_P1_FIXUP_PAGE_SIZE_IDX:
3731 		rv2p_code &= ~RV2P_BD_PAGE_SIZE_MSK;
3732 		rv2p_code |= RV2P_BD_PAGE_SIZE;
3733 		break;
3734 	}
3735 	return rv2p_code;
3736 }
3737 
3738 static int
3739 load_rv2p_fw(struct bnx2 *bp, u32 rv2p_proc,
3740 	     const struct bnx2_rv2p_fw_file_entry *fw_entry)
3741 {
3742 	u32 rv2p_code_len, file_offset;
3743 	__be32 *rv2p_code;
3744 	int i;
3745 	u32 val, cmd, addr;
3746 
3747 	rv2p_code_len = be32_to_cpu(fw_entry->rv2p.len);
3748 	file_offset = be32_to_cpu(fw_entry->rv2p.offset);
3749 
3750 	rv2p_code = (__be32 *)(bp->rv2p_firmware->data + file_offset);
3751 
3752 	if (rv2p_proc == RV2P_PROC1) {
3753 		cmd = BNX2_RV2P_PROC1_ADDR_CMD_RDWR;
3754 		addr = BNX2_RV2P_PROC1_ADDR_CMD;
3755 	} else {
3756 		cmd = BNX2_RV2P_PROC2_ADDR_CMD_RDWR;
3757 		addr = BNX2_RV2P_PROC2_ADDR_CMD;
3758 	}
3759 
3760 	for (i = 0; i < rv2p_code_len; i += 8) {
3761 		BNX2_WR(bp, BNX2_RV2P_INSTR_HIGH, be32_to_cpu(*rv2p_code));
3762 		rv2p_code++;
3763 		BNX2_WR(bp, BNX2_RV2P_INSTR_LOW, be32_to_cpu(*rv2p_code));
3764 		rv2p_code++;
3765 
3766 		val = (i / 8) | cmd;
3767 		BNX2_WR(bp, addr, val);
3768 	}
3769 
3770 	rv2p_code = (__be32 *)(bp->rv2p_firmware->data + file_offset);
3771 	for (i = 0; i < 8; i++) {
3772 		u32 loc, code;
3773 
3774 		loc = be32_to_cpu(fw_entry->fixup[i]);
3775 		if (loc && ((loc * 4) < rv2p_code_len)) {
3776 			code = be32_to_cpu(*(rv2p_code + loc - 1));
3777 			BNX2_WR(bp, BNX2_RV2P_INSTR_HIGH, code);
3778 			code = be32_to_cpu(*(rv2p_code + loc));
3779 			code = rv2p_fw_fixup(rv2p_proc, i, loc, code);
3780 			BNX2_WR(bp, BNX2_RV2P_INSTR_LOW, code);
3781 
3782 			val = (loc / 2) | cmd;
3783 			BNX2_WR(bp, addr, val);
3784 		}
3785 	}
3786 
3787 	/* Reset the processor, un-stall is done later. */
3788 	if (rv2p_proc == RV2P_PROC1) {
3789 		BNX2_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC1_RESET);
3790 	}
3791 	else {
3792 		BNX2_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC2_RESET);
3793 	}
3794 
3795 	return 0;
3796 }
3797 
3798 static int
3799 load_cpu_fw(struct bnx2 *bp, const struct cpu_reg *cpu_reg,
3800 	    const struct bnx2_mips_fw_file_entry *fw_entry)
3801 {
3802 	u32 addr, len, file_offset;
3803 	__be32 *data;
3804 	u32 offset;
3805 	u32 val;
3806 
3807 	/* Halt the CPU. */
3808 	val = bnx2_reg_rd_ind(bp, cpu_reg->mode);
3809 	val |= cpu_reg->mode_value_halt;
3810 	bnx2_reg_wr_ind(bp, cpu_reg->mode, val);
3811 	bnx2_reg_wr_ind(bp, cpu_reg->state, cpu_reg->state_value_clear);
3812 
3813 	/* Load the Text area. */
3814 	addr = be32_to_cpu(fw_entry->text.addr);
3815 	len = be32_to_cpu(fw_entry->text.len);
3816 	file_offset = be32_to_cpu(fw_entry->text.offset);
3817 	data = (__be32 *)(bp->mips_firmware->data + file_offset);
3818 
3819 	offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
3820 	if (len) {
3821 		int j;
3822 
3823 		for (j = 0; j < (len / 4); j++, offset += 4)
3824 			bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
3825 	}
3826 
3827 	/* Load the Data area. */
3828 	addr = be32_to_cpu(fw_entry->data.addr);
3829 	len = be32_to_cpu(fw_entry->data.len);
3830 	file_offset = be32_to_cpu(fw_entry->data.offset);
3831 	data = (__be32 *)(bp->mips_firmware->data + file_offset);
3832 
3833 	offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
3834 	if (len) {
3835 		int j;
3836 
3837 		for (j = 0; j < (len / 4); j++, offset += 4)
3838 			bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
3839 	}
3840 
3841 	/* Load the Read-Only area. */
3842 	addr = be32_to_cpu(fw_entry->rodata.addr);
3843 	len = be32_to_cpu(fw_entry->rodata.len);
3844 	file_offset = be32_to_cpu(fw_entry->rodata.offset);
3845 	data = (__be32 *)(bp->mips_firmware->data + file_offset);
3846 
3847 	offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
3848 	if (len) {
3849 		int j;
3850 
3851 		for (j = 0; j < (len / 4); j++, offset += 4)
3852 			bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
3853 	}
3854 
3855 	/* Clear the pre-fetch instruction. */
3856 	bnx2_reg_wr_ind(bp, cpu_reg->inst, 0);
3857 
3858 	val = be32_to_cpu(fw_entry->start_addr);
3859 	bnx2_reg_wr_ind(bp, cpu_reg->pc, val);
3860 
3861 	/* Start the CPU. */
3862 	val = bnx2_reg_rd_ind(bp, cpu_reg->mode);
3863 	val &= ~cpu_reg->mode_value_halt;
3864 	bnx2_reg_wr_ind(bp, cpu_reg->state, cpu_reg->state_value_clear);
3865 	bnx2_reg_wr_ind(bp, cpu_reg->mode, val);
3866 
3867 	return 0;
3868 }
3869 
3870 static int
3871 bnx2_init_cpus(struct bnx2 *bp)
3872 {
3873 	const struct bnx2_mips_fw_file *mips_fw =
3874 		(const struct bnx2_mips_fw_file *) bp->mips_firmware->data;
3875 	const struct bnx2_rv2p_fw_file *rv2p_fw =
3876 		(const struct bnx2_rv2p_fw_file *) bp->rv2p_firmware->data;
3877 	int rc;
3878 
3879 	/* Initialize the RV2P processor. */
3880 	load_rv2p_fw(bp, RV2P_PROC1, &rv2p_fw->proc1);
3881 	load_rv2p_fw(bp, RV2P_PROC2, &rv2p_fw->proc2);
3882 
3883 	/* Initialize the RX Processor. */
3884 	rc = load_cpu_fw(bp, &cpu_reg_rxp, &mips_fw->rxp);
3885 	if (rc)
3886 		goto init_cpu_err;
3887 
3888 	/* Initialize the TX Processor. */
3889 	rc = load_cpu_fw(bp, &cpu_reg_txp, &mips_fw->txp);
3890 	if (rc)
3891 		goto init_cpu_err;
3892 
3893 	/* Initialize the TX Patch-up Processor. */
3894 	rc = load_cpu_fw(bp, &cpu_reg_tpat, &mips_fw->tpat);
3895 	if (rc)
3896 		goto init_cpu_err;
3897 
3898 	/* Initialize the Completion Processor. */
3899 	rc = load_cpu_fw(bp, &cpu_reg_com, &mips_fw->com);
3900 	if (rc)
3901 		goto init_cpu_err;
3902 
3903 	/* Initialize the Command Processor. */
3904 	rc = load_cpu_fw(bp, &cpu_reg_cp, &mips_fw->cp);
3905 
3906 init_cpu_err:
3907 	return rc;
3908 }
3909 
3910 static void
3911 bnx2_setup_wol(struct bnx2 *bp)
3912 {
3913 	int i;
3914 	u32 val, wol_msg;
3915 
3916 	if (bp->wol) {
3917 		u32 advertising;
3918 		u8 autoneg;
3919 
3920 		autoneg = bp->autoneg;
3921 		advertising = bp->advertising;
3922 
3923 		if (bp->phy_port == PORT_TP) {
3924 			bp->autoneg = AUTONEG_SPEED;
3925 			bp->advertising = ADVERTISED_10baseT_Half |
3926 				ADVERTISED_10baseT_Full |
3927 				ADVERTISED_100baseT_Half |
3928 				ADVERTISED_100baseT_Full |
3929 				ADVERTISED_Autoneg;
3930 		}
3931 
3932 		spin_lock_bh(&bp->phy_lock);
3933 		bnx2_setup_phy(bp, bp->phy_port);
3934 		spin_unlock_bh(&bp->phy_lock);
3935 
3936 		bp->autoneg = autoneg;
3937 		bp->advertising = advertising;
3938 
3939 		bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
3940 
3941 		val = BNX2_RD(bp, BNX2_EMAC_MODE);
3942 
3943 		/* Enable port mode. */
3944 		val &= ~BNX2_EMAC_MODE_PORT;
3945 		val |= BNX2_EMAC_MODE_MPKT_RCVD |
3946 		       BNX2_EMAC_MODE_ACPI_RCVD |
3947 		       BNX2_EMAC_MODE_MPKT;
3948 		if (bp->phy_port == PORT_TP) {
3949 			val |= BNX2_EMAC_MODE_PORT_MII;
3950 		} else {
3951 			val |= BNX2_EMAC_MODE_PORT_GMII;
3952 			if (bp->line_speed == SPEED_2500)
3953 				val |= BNX2_EMAC_MODE_25G_MODE;
3954 		}
3955 
3956 		BNX2_WR(bp, BNX2_EMAC_MODE, val);
3957 
3958 		/* receive all multicast */
3959 		for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3960 			BNX2_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3961 				0xffffffff);
3962 		}
3963 		BNX2_WR(bp, BNX2_EMAC_RX_MODE, BNX2_EMAC_RX_MODE_SORT_MODE);
3964 
3965 		val = 1 | BNX2_RPM_SORT_USER0_BC_EN | BNX2_RPM_SORT_USER0_MC_EN;
3966 		BNX2_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3967 		BNX2_WR(bp, BNX2_RPM_SORT_USER0, val);
3968 		BNX2_WR(bp, BNX2_RPM_SORT_USER0, val | BNX2_RPM_SORT_USER0_ENA);
3969 
3970 		/* Need to enable EMAC and RPM for WOL. */
3971 		BNX2_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
3972 			BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE |
3973 			BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE |
3974 			BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE);
3975 
3976 		val = BNX2_RD(bp, BNX2_RPM_CONFIG);
3977 		val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3978 		BNX2_WR(bp, BNX2_RPM_CONFIG, val);
3979 
3980 		wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
3981 	} else {
3982 			wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
3983 	}
3984 
3985 	if (!(bp->flags & BNX2_FLAG_NO_WOL))
3986 		bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT3 | wol_msg, 1, 0);
3987 
3988 }
3989 
3990 static int
3991 bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
3992 {
3993 	switch (state) {
3994 	case PCI_D0: {
3995 		u32 val;
3996 
3997 		pci_enable_wake(bp->pdev, PCI_D0, false);
3998 		pci_set_power_state(bp->pdev, PCI_D0);
3999 
4000 		val = BNX2_RD(bp, BNX2_EMAC_MODE);
4001 		val |= BNX2_EMAC_MODE_MPKT_RCVD | BNX2_EMAC_MODE_ACPI_RCVD;
4002 		val &= ~BNX2_EMAC_MODE_MPKT;
4003 		BNX2_WR(bp, BNX2_EMAC_MODE, val);
4004 
4005 		val = BNX2_RD(bp, BNX2_RPM_CONFIG);
4006 		val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
4007 		BNX2_WR(bp, BNX2_RPM_CONFIG, val);
4008 		break;
4009 	}
4010 	case PCI_D3hot: {
4011 		bnx2_setup_wol(bp);
4012 		pci_wake_from_d3(bp->pdev, bp->wol);
4013 		if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) ||
4014 		    (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A1)) {
4015 
4016 			if (bp->wol)
4017 				pci_set_power_state(bp->pdev, PCI_D3hot);
4018 		} else {
4019 			pci_set_power_state(bp->pdev, PCI_D3hot);
4020 		}
4021 
4022 		/* No more memory access after this point until
4023 		 * device is brought back to D0.
4024 		 */
4025 		break;
4026 	}
4027 	default:
4028 		return -EINVAL;
4029 	}
4030 	return 0;
4031 }
4032 
4033 static int
4034 bnx2_acquire_nvram_lock(struct bnx2 *bp)
4035 {
4036 	u32 val;
4037 	int j;
4038 
4039 	/* Request access to the flash interface. */
4040 	BNX2_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_SET2);
4041 	for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4042 		val = BNX2_RD(bp, BNX2_NVM_SW_ARB);
4043 		if (val & BNX2_NVM_SW_ARB_ARB_ARB2)
4044 			break;
4045 
4046 		udelay(5);
4047 	}
4048 
4049 	if (j >= NVRAM_TIMEOUT_COUNT)
4050 		return -EBUSY;
4051 
4052 	return 0;
4053 }
4054 
4055 static int
4056 bnx2_release_nvram_lock(struct bnx2 *bp)
4057 {
4058 	int j;
4059 	u32 val;
4060 
4061 	/* Relinquish nvram interface. */
4062 	BNX2_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_CLR2);
4063 
4064 	for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4065 		val = BNX2_RD(bp, BNX2_NVM_SW_ARB);
4066 		if (!(val & BNX2_NVM_SW_ARB_ARB_ARB2))
4067 			break;
4068 
4069 		udelay(5);
4070 	}
4071 
4072 	if (j >= NVRAM_TIMEOUT_COUNT)
4073 		return -EBUSY;
4074 
4075 	return 0;
4076 }
4077 
4078 
4079 static int
4080 bnx2_enable_nvram_write(struct bnx2 *bp)
4081 {
4082 	u32 val;
4083 
4084 	val = BNX2_RD(bp, BNX2_MISC_CFG);
4085 	BNX2_WR(bp, BNX2_MISC_CFG, val | BNX2_MISC_CFG_NVM_WR_EN_PCI);
4086 
4087 	if (bp->flash_info->flags & BNX2_NV_WREN) {
4088 		int j;
4089 
4090 		BNX2_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4091 		BNX2_WR(bp, BNX2_NVM_COMMAND,
4092 			BNX2_NVM_COMMAND_WREN | BNX2_NVM_COMMAND_DOIT);
4093 
4094 		for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4095 			udelay(5);
4096 
4097 			val = BNX2_RD(bp, BNX2_NVM_COMMAND);
4098 			if (val & BNX2_NVM_COMMAND_DONE)
4099 				break;
4100 		}
4101 
4102 		if (j >= NVRAM_TIMEOUT_COUNT)
4103 			return -EBUSY;
4104 	}
4105 	return 0;
4106 }
4107 
4108 static void
4109 bnx2_disable_nvram_write(struct bnx2 *bp)
4110 {
4111 	u32 val;
4112 
4113 	val = BNX2_RD(bp, BNX2_MISC_CFG);
4114 	BNX2_WR(bp, BNX2_MISC_CFG, val & ~BNX2_MISC_CFG_NVM_WR_EN);
4115 }
4116 
4117 
4118 static void
4119 bnx2_enable_nvram_access(struct bnx2 *bp)
4120 {
4121 	u32 val;
4122 
4123 	val = BNX2_RD(bp, BNX2_NVM_ACCESS_ENABLE);
4124 	/* Enable both bits, even on read. */
4125 	BNX2_WR(bp, BNX2_NVM_ACCESS_ENABLE,
4126 		val | BNX2_NVM_ACCESS_ENABLE_EN | BNX2_NVM_ACCESS_ENABLE_WR_EN);
4127 }
4128 
4129 static void
4130 bnx2_disable_nvram_access(struct bnx2 *bp)
4131 {
4132 	u32 val;
4133 
4134 	val = BNX2_RD(bp, BNX2_NVM_ACCESS_ENABLE);
4135 	/* Disable both bits, even after read. */
4136 	BNX2_WR(bp, BNX2_NVM_ACCESS_ENABLE,
4137 		val & ~(BNX2_NVM_ACCESS_ENABLE_EN |
4138 			BNX2_NVM_ACCESS_ENABLE_WR_EN));
4139 }
4140 
4141 static int
4142 bnx2_nvram_erase_page(struct bnx2 *bp, u32 offset)
4143 {
4144 	u32 cmd;
4145 	int j;
4146 
4147 	if (bp->flash_info->flags & BNX2_NV_BUFFERED)
4148 		/* Buffered flash, no erase needed */
4149 		return 0;
4150 
4151 	/* Build an erase command */
4152 	cmd = BNX2_NVM_COMMAND_ERASE | BNX2_NVM_COMMAND_WR |
4153 	      BNX2_NVM_COMMAND_DOIT;
4154 
4155 	/* Need to clear DONE bit separately. */
4156 	BNX2_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4157 
4158 	/* Address of the NVRAM to read from. */
4159 	BNX2_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
4160 
4161 	/* Issue an erase command. */
4162 	BNX2_WR(bp, BNX2_NVM_COMMAND, cmd);
4163 
4164 	/* Wait for completion. */
4165 	for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4166 		u32 val;
4167 
4168 		udelay(5);
4169 
4170 		val = BNX2_RD(bp, BNX2_NVM_COMMAND);
4171 		if (val & BNX2_NVM_COMMAND_DONE)
4172 			break;
4173 	}
4174 
4175 	if (j >= NVRAM_TIMEOUT_COUNT)
4176 		return -EBUSY;
4177 
4178 	return 0;
4179 }
4180 
4181 static int
4182 bnx2_nvram_read_dword(struct bnx2 *bp, u32 offset, u8 *ret_val, u32 cmd_flags)
4183 {
4184 	u32 cmd;
4185 	int j;
4186 
4187 	/* Build the command word. */
4188 	cmd = BNX2_NVM_COMMAND_DOIT | cmd_flags;
4189 
4190 	/* Calculate an offset of a buffered flash, not needed for 5709. */
4191 	if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
4192 		offset = ((offset / bp->flash_info->page_size) <<
4193 			   bp->flash_info->page_bits) +
4194 			  (offset % bp->flash_info->page_size);
4195 	}
4196 
4197 	/* Need to clear DONE bit separately. */
4198 	BNX2_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4199 
4200 	/* Address of the NVRAM to read from. */
4201 	BNX2_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
4202 
4203 	/* Issue a read command. */
4204 	BNX2_WR(bp, BNX2_NVM_COMMAND, cmd);
4205 
4206 	/* Wait for completion. */
4207 	for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4208 		u32 val;
4209 
4210 		udelay(5);
4211 
4212 		val = BNX2_RD(bp, BNX2_NVM_COMMAND);
4213 		if (val & BNX2_NVM_COMMAND_DONE) {
4214 			__be32 v = cpu_to_be32(BNX2_RD(bp, BNX2_NVM_READ));
4215 			memcpy(ret_val, &v, 4);
4216 			break;
4217 		}
4218 	}
4219 	if (j >= NVRAM_TIMEOUT_COUNT)
4220 		return -EBUSY;
4221 
4222 	return 0;
4223 }
4224 
4225 
4226 static int
4227 bnx2_nvram_write_dword(struct bnx2 *bp, u32 offset, u8 *val, u32 cmd_flags)
4228 {
4229 	u32 cmd;
4230 	__be32 val32;
4231 	int j;
4232 
4233 	/* Build the command word. */
4234 	cmd = BNX2_NVM_COMMAND_DOIT | BNX2_NVM_COMMAND_WR | cmd_flags;
4235 
4236 	/* Calculate an offset of a buffered flash, not needed for 5709. */
4237 	if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
4238 		offset = ((offset / bp->flash_info->page_size) <<
4239 			  bp->flash_info->page_bits) +
4240 			 (offset % bp->flash_info->page_size);
4241 	}
4242 
4243 	/* Need to clear DONE bit separately. */
4244 	BNX2_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4245 
4246 	memcpy(&val32, val, 4);
4247 
4248 	/* Write the data. */
4249 	BNX2_WR(bp, BNX2_NVM_WRITE, be32_to_cpu(val32));
4250 
4251 	/* Address of the NVRAM to write to. */
4252 	BNX2_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
4253 
4254 	/* Issue the write command. */
4255 	BNX2_WR(bp, BNX2_NVM_COMMAND, cmd);
4256 
4257 	/* Wait for completion. */
4258 	for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4259 		udelay(5);
4260 
4261 		if (BNX2_RD(bp, BNX2_NVM_COMMAND) & BNX2_NVM_COMMAND_DONE)
4262 			break;
4263 	}
4264 	if (j >= NVRAM_TIMEOUT_COUNT)
4265 		return -EBUSY;
4266 
4267 	return 0;
4268 }
4269 
4270 static int
4271 bnx2_init_nvram(struct bnx2 *bp)
4272 {
4273 	u32 val;
4274 	int j, entry_count, rc = 0;
4275 	const struct flash_spec *flash;
4276 
4277 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
4278 		bp->flash_info = &flash_5709;
4279 		goto get_flash_size;
4280 	}
4281 
4282 	/* Determine the selected interface. */
4283 	val = BNX2_RD(bp, BNX2_NVM_CFG1);
4284 
4285 	entry_count = ARRAY_SIZE(flash_table);
4286 
4287 	if (val & 0x40000000) {
4288 
4289 		/* Flash interface has been reconfigured */
4290 		for (j = 0, flash = &flash_table[0]; j < entry_count;
4291 		     j++, flash++) {
4292 			if ((val & FLASH_BACKUP_STRAP_MASK) ==
4293 			    (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
4294 				bp->flash_info = flash;
4295 				break;
4296 			}
4297 		}
4298 	}
4299 	else {
4300 		u32 mask;
4301 		/* Not yet been reconfigured */
4302 
4303 		if (val & (1 << 23))
4304 			mask = FLASH_BACKUP_STRAP_MASK;
4305 		else
4306 			mask = FLASH_STRAP_MASK;
4307 
4308 		for (j = 0, flash = &flash_table[0]; j < entry_count;
4309 			j++, flash++) {
4310 
4311 			if ((val & mask) == (flash->strapping & mask)) {
4312 				bp->flash_info = flash;
4313 
4314 				/* Request access to the flash interface. */
4315 				if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4316 					return rc;
4317 
4318 				/* Enable access to flash interface */
4319 				bnx2_enable_nvram_access(bp);
4320 
4321 				/* Reconfigure the flash interface */
4322 				BNX2_WR(bp, BNX2_NVM_CFG1, flash->config1);
4323 				BNX2_WR(bp, BNX2_NVM_CFG2, flash->config2);
4324 				BNX2_WR(bp, BNX2_NVM_CFG3, flash->config3);
4325 				BNX2_WR(bp, BNX2_NVM_WRITE1, flash->write1);
4326 
4327 				/* Disable access to flash interface */
4328 				bnx2_disable_nvram_access(bp);
4329 				bnx2_release_nvram_lock(bp);
4330 
4331 				break;
4332 			}
4333 		}
4334 	} /* if (val & 0x40000000) */
4335 
4336 	if (j == entry_count) {
4337 		bp->flash_info = NULL;
4338 		pr_alert("Unknown flash/EEPROM type\n");
4339 		return -ENODEV;
4340 	}
4341 
4342 get_flash_size:
4343 	val = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG2);
4344 	val &= BNX2_SHARED_HW_CFG2_NVM_SIZE_MASK;
4345 	if (val)
4346 		bp->flash_size = val;
4347 	else
4348 		bp->flash_size = bp->flash_info->total_size;
4349 
4350 	return rc;
4351 }
4352 
4353 static int
4354 bnx2_nvram_read(struct bnx2 *bp, u32 offset, u8 *ret_buf,
4355 		int buf_size)
4356 {
4357 	int rc = 0;
4358 	u32 cmd_flags, offset32, len32, extra;
4359 
4360 	if (buf_size == 0)
4361 		return 0;
4362 
4363 	/* Request access to the flash interface. */
4364 	if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4365 		return rc;
4366 
4367 	/* Enable access to flash interface */
4368 	bnx2_enable_nvram_access(bp);
4369 
4370 	len32 = buf_size;
4371 	offset32 = offset;
4372 	extra = 0;
4373 
4374 	cmd_flags = 0;
4375 
4376 	if (offset32 & 3) {
4377 		u8 buf[4];
4378 		u32 pre_len;
4379 
4380 		offset32 &= ~3;
4381 		pre_len = 4 - (offset & 3);
4382 
4383 		if (pre_len >= len32) {
4384 			pre_len = len32;
4385 			cmd_flags = BNX2_NVM_COMMAND_FIRST |
4386 				    BNX2_NVM_COMMAND_LAST;
4387 		}
4388 		else {
4389 			cmd_flags = BNX2_NVM_COMMAND_FIRST;
4390 		}
4391 
4392 		rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4393 
4394 		if (rc)
4395 			return rc;
4396 
4397 		memcpy(ret_buf, buf + (offset & 3), pre_len);
4398 
4399 		offset32 += 4;
4400 		ret_buf += pre_len;
4401 		len32 -= pre_len;
4402 	}
4403 	if (len32 & 3) {
4404 		extra = 4 - (len32 & 3);
4405 		len32 = (len32 + 4) & ~3;
4406 	}
4407 
4408 	if (len32 == 4) {
4409 		u8 buf[4];
4410 
4411 		if (cmd_flags)
4412 			cmd_flags = BNX2_NVM_COMMAND_LAST;
4413 		else
4414 			cmd_flags = BNX2_NVM_COMMAND_FIRST |
4415 				    BNX2_NVM_COMMAND_LAST;
4416 
4417 		rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4418 
4419 		memcpy(ret_buf, buf, 4 - extra);
4420 	}
4421 	else if (len32 > 0) {
4422 		u8 buf[4];
4423 
4424 		/* Read the first word. */
4425 		if (cmd_flags)
4426 			cmd_flags = 0;
4427 		else
4428 			cmd_flags = BNX2_NVM_COMMAND_FIRST;
4429 
4430 		rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, cmd_flags);
4431 
4432 		/* Advance to the next dword. */
4433 		offset32 += 4;
4434 		ret_buf += 4;
4435 		len32 -= 4;
4436 
4437 		while (len32 > 4 && rc == 0) {
4438 			rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, 0);
4439 
4440 			/* Advance to the next dword. */
4441 			offset32 += 4;
4442 			ret_buf += 4;
4443 			len32 -= 4;
4444 		}
4445 
4446 		if (rc)
4447 			return rc;
4448 
4449 		cmd_flags = BNX2_NVM_COMMAND_LAST;
4450 		rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4451 
4452 		memcpy(ret_buf, buf, 4 - extra);
4453 	}
4454 
4455 	/* Disable access to flash interface */
4456 	bnx2_disable_nvram_access(bp);
4457 
4458 	bnx2_release_nvram_lock(bp);
4459 
4460 	return rc;
4461 }
4462 
4463 static int
4464 bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
4465 		int buf_size)
4466 {
4467 	u32 written, offset32, len32;
4468 	u8 *buf, start[4], end[4], *align_buf = NULL, *flash_buffer = NULL;
4469 	int rc = 0;
4470 	int align_start, align_end;
4471 
4472 	buf = data_buf;
4473 	offset32 = offset;
4474 	len32 = buf_size;
4475 	align_start = align_end = 0;
4476 
4477 	if ((align_start = (offset32 & 3))) {
4478 		offset32 &= ~3;
4479 		len32 += align_start;
4480 		if (len32 < 4)
4481 			len32 = 4;
4482 		if ((rc = bnx2_nvram_read(bp, offset32, start, 4)))
4483 			return rc;
4484 	}
4485 
4486 	if (len32 & 3) {
4487 		align_end = 4 - (len32 & 3);
4488 		len32 += align_end;
4489 		if ((rc = bnx2_nvram_read(bp, offset32 + len32 - 4, end, 4)))
4490 			return rc;
4491 	}
4492 
4493 	if (align_start || align_end) {
4494 		align_buf = kmalloc(len32, GFP_KERNEL);
4495 		if (align_buf == NULL)
4496 			return -ENOMEM;
4497 		if (align_start) {
4498 			memcpy(align_buf, start, 4);
4499 		}
4500 		if (align_end) {
4501 			memcpy(align_buf + len32 - 4, end, 4);
4502 		}
4503 		memcpy(align_buf + align_start, data_buf, buf_size);
4504 		buf = align_buf;
4505 	}
4506 
4507 	if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4508 		flash_buffer = kmalloc(264, GFP_KERNEL);
4509 		if (flash_buffer == NULL) {
4510 			rc = -ENOMEM;
4511 			goto nvram_write_end;
4512 		}
4513 	}
4514 
4515 	written = 0;
4516 	while ((written < len32) && (rc == 0)) {
4517 		u32 page_start, page_end, data_start, data_end;
4518 		u32 addr, cmd_flags;
4519 		int i;
4520 
4521 	        /* Find the page_start addr */
4522 		page_start = offset32 + written;
4523 		page_start -= (page_start % bp->flash_info->page_size);
4524 		/* Find the page_end addr */
4525 		page_end = page_start + bp->flash_info->page_size;
4526 		/* Find the data_start addr */
4527 		data_start = (written == 0) ? offset32 : page_start;
4528 		/* Find the data_end addr */
4529 		data_end = (page_end > offset32 + len32) ?
4530 			(offset32 + len32) : page_end;
4531 
4532 		/* Request access to the flash interface. */
4533 		if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4534 			goto nvram_write_end;
4535 
4536 		/* Enable access to flash interface */
4537 		bnx2_enable_nvram_access(bp);
4538 
4539 		cmd_flags = BNX2_NVM_COMMAND_FIRST;
4540 		if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4541 			int j;
4542 
4543 			/* Read the whole page into the buffer
4544 			 * (non-buffer flash only) */
4545 			for (j = 0; j < bp->flash_info->page_size; j += 4) {
4546 				if (j == (bp->flash_info->page_size - 4)) {
4547 					cmd_flags |= BNX2_NVM_COMMAND_LAST;
4548 				}
4549 				rc = bnx2_nvram_read_dword(bp,
4550 					page_start + j,
4551 					&flash_buffer[j],
4552 					cmd_flags);
4553 
4554 				if (rc)
4555 					goto nvram_write_end;
4556 
4557 				cmd_flags = 0;
4558 			}
4559 		}
4560 
4561 		/* Enable writes to flash interface (unlock write-protect) */
4562 		if ((rc = bnx2_enable_nvram_write(bp)) != 0)
4563 			goto nvram_write_end;
4564 
4565 		/* Loop to write back the buffer data from page_start to
4566 		 * data_start */
4567 		i = 0;
4568 		if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4569 			/* Erase the page */
4570 			if ((rc = bnx2_nvram_erase_page(bp, page_start)) != 0)
4571 				goto nvram_write_end;
4572 
4573 			/* Re-enable the write again for the actual write */
4574 			bnx2_enable_nvram_write(bp);
4575 
4576 			for (addr = page_start; addr < data_start;
4577 				addr += 4, i += 4) {
4578 
4579 				rc = bnx2_nvram_write_dword(bp, addr,
4580 					&flash_buffer[i], cmd_flags);
4581 
4582 				if (rc != 0)
4583 					goto nvram_write_end;
4584 
4585 				cmd_flags = 0;
4586 			}
4587 		}
4588 
4589 		/* Loop to write the new data from data_start to data_end */
4590 		for (addr = data_start; addr < data_end; addr += 4, i += 4) {
4591 			if ((addr == page_end - 4) ||
4592 				((bp->flash_info->flags & BNX2_NV_BUFFERED) &&
4593 				 (addr == data_end - 4))) {
4594 
4595 				cmd_flags |= BNX2_NVM_COMMAND_LAST;
4596 			}
4597 			rc = bnx2_nvram_write_dword(bp, addr, buf,
4598 				cmd_flags);
4599 
4600 			if (rc != 0)
4601 				goto nvram_write_end;
4602 
4603 			cmd_flags = 0;
4604 			buf += 4;
4605 		}
4606 
4607 		/* Loop to write back the buffer data from data_end
4608 		 * to page_end */
4609 		if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4610 			for (addr = data_end; addr < page_end;
4611 				addr += 4, i += 4) {
4612 
4613 				if (addr == page_end-4) {
4614 					cmd_flags = BNX2_NVM_COMMAND_LAST;
4615                 		}
4616 				rc = bnx2_nvram_write_dword(bp, addr,
4617 					&flash_buffer[i], cmd_flags);
4618 
4619 				if (rc != 0)
4620 					goto nvram_write_end;
4621 
4622 				cmd_flags = 0;
4623 			}
4624 		}
4625 
4626 		/* Disable writes to flash interface (lock write-protect) */
4627 		bnx2_disable_nvram_write(bp);
4628 
4629 		/* Disable access to flash interface */
4630 		bnx2_disable_nvram_access(bp);
4631 		bnx2_release_nvram_lock(bp);
4632 
4633 		/* Increment written */
4634 		written += data_end - data_start;
4635 	}
4636 
4637 nvram_write_end:
4638 	kfree(flash_buffer);
4639 	kfree(align_buf);
4640 	return rc;
4641 }
4642 
4643 static void
4644 bnx2_init_fw_cap(struct bnx2 *bp)
4645 {
4646 	u32 val, sig = 0;
4647 
4648 	bp->phy_flags &= ~BNX2_PHY_FLAG_REMOTE_PHY_CAP;
4649 	bp->flags &= ~BNX2_FLAG_CAN_KEEP_VLAN;
4650 
4651 	if (!(bp->flags & BNX2_FLAG_ASF_ENABLE))
4652 		bp->flags |= BNX2_FLAG_CAN_KEEP_VLAN;
4653 
4654 	val = bnx2_shmem_rd(bp, BNX2_FW_CAP_MB);
4655 	if ((val & BNX2_FW_CAP_SIGNATURE_MASK) != BNX2_FW_CAP_SIGNATURE)
4656 		return;
4657 
4658 	if ((val & BNX2_FW_CAP_CAN_KEEP_VLAN) == BNX2_FW_CAP_CAN_KEEP_VLAN) {
4659 		bp->flags |= BNX2_FLAG_CAN_KEEP_VLAN;
4660 		sig |= BNX2_DRV_ACK_CAP_SIGNATURE | BNX2_FW_CAP_CAN_KEEP_VLAN;
4661 	}
4662 
4663 	if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
4664 	    (val & BNX2_FW_CAP_REMOTE_PHY_CAPABLE)) {
4665 		u32 link;
4666 
4667 		bp->phy_flags |= BNX2_PHY_FLAG_REMOTE_PHY_CAP;
4668 
4669 		link = bnx2_shmem_rd(bp, BNX2_LINK_STATUS);
4670 		if (link & BNX2_LINK_STATUS_SERDES_LINK)
4671 			bp->phy_port = PORT_FIBRE;
4672 		else
4673 			bp->phy_port = PORT_TP;
4674 
4675 		sig |= BNX2_DRV_ACK_CAP_SIGNATURE |
4676 		       BNX2_FW_CAP_REMOTE_PHY_CAPABLE;
4677 	}
4678 
4679 	if (netif_running(bp->dev) && sig)
4680 		bnx2_shmem_wr(bp, BNX2_DRV_ACK_CAP_MB, sig);
4681 }
4682 
4683 static void
4684 bnx2_setup_msix_tbl(struct bnx2 *bp)
4685 {
4686 	BNX2_WR(bp, BNX2_PCI_GRC_WINDOW_ADDR, BNX2_PCI_GRC_WINDOW_ADDR_SEP_WIN);
4687 
4688 	BNX2_WR(bp, BNX2_PCI_GRC_WINDOW2_ADDR, BNX2_MSIX_TABLE_ADDR);
4689 	BNX2_WR(bp, BNX2_PCI_GRC_WINDOW3_ADDR, BNX2_MSIX_PBA_ADDR);
4690 }
4691 
4692 static int
4693 bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
4694 {
4695 	u32 val;
4696 	int i, rc = 0;
4697 	u8 old_port;
4698 
4699 	/* Wait for the current PCI transaction to complete before
4700 	 * issuing a reset. */
4701 	if ((BNX2_CHIP(bp) == BNX2_CHIP_5706) ||
4702 	    (BNX2_CHIP(bp) == BNX2_CHIP_5708)) {
4703 		BNX2_WR(bp, BNX2_MISC_ENABLE_CLR_BITS,
4704 			BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
4705 			BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
4706 			BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
4707 			BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
4708 		val = BNX2_RD(bp, BNX2_MISC_ENABLE_CLR_BITS);
4709 		udelay(5);
4710 	} else {  /* 5709 */
4711 		val = BNX2_RD(bp, BNX2_MISC_NEW_CORE_CTL);
4712 		val &= ~BNX2_MISC_NEW_CORE_CTL_DMA_ENABLE;
4713 		BNX2_WR(bp, BNX2_MISC_NEW_CORE_CTL, val);
4714 		val = BNX2_RD(bp, BNX2_MISC_NEW_CORE_CTL);
4715 
4716 		for (i = 0; i < 100; i++) {
4717 			msleep(1);
4718 			val = BNX2_RD(bp, BNX2_PCICFG_DEVICE_CONTROL);
4719 			if (!(val & BNX2_PCICFG_DEVICE_STATUS_NO_PEND))
4720 				break;
4721 		}
4722 	}
4723 
4724 	/* Wait for the firmware to tell us it is ok to issue a reset. */
4725 	bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1, 1);
4726 
4727 	/* Deposit a driver reset signature so the firmware knows that
4728 	 * this is a soft reset. */
4729 	bnx2_shmem_wr(bp, BNX2_DRV_RESET_SIGNATURE,
4730 		      BNX2_DRV_RESET_SIGNATURE_MAGIC);
4731 
4732 	/* Do a dummy read to force the chip to complete all current transaction
4733 	 * before we issue a reset. */
4734 	val = BNX2_RD(bp, BNX2_MISC_ID);
4735 
4736 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
4737 		BNX2_WR(bp, BNX2_MISC_COMMAND, BNX2_MISC_COMMAND_SW_RESET);
4738 		BNX2_RD(bp, BNX2_MISC_COMMAND);
4739 		udelay(5);
4740 
4741 		val = BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4742 		      BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
4743 
4744 		BNX2_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
4745 
4746 	} else {
4747 		val = BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4748 		      BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4749 		      BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
4750 
4751 		/* Chip reset. */
4752 		BNX2_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
4753 
4754 		/* Reading back any register after chip reset will hang the
4755 		 * bus on 5706 A0 and A1.  The msleep below provides plenty
4756 		 * of margin for write posting.
4757 		 */
4758 		if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) ||
4759 		    (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A1))
4760 			msleep(20);
4761 
4762 		/* Reset takes approximate 30 usec */
4763 		for (i = 0; i < 10; i++) {
4764 			val = BNX2_RD(bp, BNX2_PCICFG_MISC_CONFIG);
4765 			if ((val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4766 				    BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0)
4767 				break;
4768 			udelay(10);
4769 		}
4770 
4771 		if (val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4772 			   BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
4773 			pr_err("Chip reset did not complete\n");
4774 			return -EBUSY;
4775 		}
4776 	}
4777 
4778 	/* Make sure byte swapping is properly configured. */
4779 	val = BNX2_RD(bp, BNX2_PCI_SWAP_DIAG0);
4780 	if (val != 0x01020304) {
4781 		pr_err("Chip not in correct endian mode\n");
4782 		return -ENODEV;
4783 	}
4784 
4785 	/* Wait for the firmware to finish its initialization. */
4786 	rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT1 | reset_code, 1, 0);
4787 	if (rc)
4788 		return rc;
4789 
4790 	spin_lock_bh(&bp->phy_lock);
4791 	old_port = bp->phy_port;
4792 	bnx2_init_fw_cap(bp);
4793 	if ((bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) &&
4794 	    old_port != bp->phy_port)
4795 		bnx2_set_default_remote_link(bp);
4796 	spin_unlock_bh(&bp->phy_lock);
4797 
4798 	if (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) {
4799 		/* Adjust the voltage regular to two steps lower.  The default
4800 		 * of this register is 0x0000000e. */
4801 		BNX2_WR(bp, BNX2_MISC_VREG_CONTROL, 0x000000fa);
4802 
4803 		/* Remove bad rbuf memory from the free pool. */
4804 		rc = bnx2_alloc_bad_rbuf(bp);
4805 	}
4806 
4807 	if (bp->flags & BNX2_FLAG_USING_MSIX) {
4808 		bnx2_setup_msix_tbl(bp);
4809 		/* Prevent MSIX table reads and write from timing out */
4810 		BNX2_WR(bp, BNX2_MISC_ECO_HW_CTL,
4811 			BNX2_MISC_ECO_HW_CTL_LARGE_GRC_TMOUT_EN);
4812 	}
4813 
4814 	return rc;
4815 }
4816 
4817 static int
4818 bnx2_init_chip(struct bnx2 *bp)
4819 {
4820 	u32 val, mtu;
4821 	int rc, i;
4822 
4823 	/* Make sure the interrupt is not active. */
4824 	BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD, BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
4825 
4826 	val = BNX2_DMA_CONFIG_DATA_BYTE_SWAP |
4827 	      BNX2_DMA_CONFIG_DATA_WORD_SWAP |
4828 #ifdef __BIG_ENDIAN
4829 	      BNX2_DMA_CONFIG_CNTL_BYTE_SWAP |
4830 #endif
4831 	      BNX2_DMA_CONFIG_CNTL_WORD_SWAP |
4832 	      DMA_READ_CHANS << 12 |
4833 	      DMA_WRITE_CHANS << 16;
4834 
4835 	val |= (0x2 << 20) | (1 << 11);
4836 
4837 	if ((bp->flags & BNX2_FLAG_PCIX) && (bp->bus_speed_mhz == 133))
4838 		val |= (1 << 23);
4839 
4840 	if ((BNX2_CHIP(bp) == BNX2_CHIP_5706) &&
4841 	    (BNX2_CHIP_ID(bp) != BNX2_CHIP_ID_5706_A0) &&
4842 	    !(bp->flags & BNX2_FLAG_PCIX))
4843 		val |= BNX2_DMA_CONFIG_CNTL_PING_PONG_DMA;
4844 
4845 	BNX2_WR(bp, BNX2_DMA_CONFIG, val);
4846 
4847 	if (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) {
4848 		val = BNX2_RD(bp, BNX2_TDMA_CONFIG);
4849 		val |= BNX2_TDMA_CONFIG_ONE_DMA;
4850 		BNX2_WR(bp, BNX2_TDMA_CONFIG, val);
4851 	}
4852 
4853 	if (bp->flags & BNX2_FLAG_PCIX) {
4854 		u16 val16;
4855 
4856 		pci_read_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4857 				     &val16);
4858 		pci_write_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4859 				      val16 & ~PCI_X_CMD_ERO);
4860 	}
4861 
4862 	BNX2_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
4863 		BNX2_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
4864 		BNX2_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
4865 		BNX2_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
4866 
4867 	/* Initialize context mapping and zero out the quick contexts.  The
4868 	 * context block must have already been enabled. */
4869 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
4870 		rc = bnx2_init_5709_context(bp);
4871 		if (rc)
4872 			return rc;
4873 	} else
4874 		bnx2_init_context(bp);
4875 
4876 	if ((rc = bnx2_init_cpus(bp)) != 0)
4877 		return rc;
4878 
4879 	bnx2_init_nvram(bp);
4880 
4881 	bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
4882 
4883 	val = BNX2_RD(bp, BNX2_MQ_CONFIG);
4884 	val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
4885 	val |= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
4886 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
4887 		val |= BNX2_MQ_CONFIG_BIN_MQ_MODE;
4888 		if (BNX2_CHIP_REV(bp) == BNX2_CHIP_REV_Ax)
4889 			val |= BNX2_MQ_CONFIG_HALT_DIS;
4890 	}
4891 
4892 	BNX2_WR(bp, BNX2_MQ_CONFIG, val);
4893 
4894 	val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
4895 	BNX2_WR(bp, BNX2_MQ_KNL_BYP_WIND_START, val);
4896 	BNX2_WR(bp, BNX2_MQ_KNL_WIND_END, val);
4897 
4898 	val = (BNX2_PAGE_BITS - 8) << 24;
4899 	BNX2_WR(bp, BNX2_RV2P_CONFIG, val);
4900 
4901 	/* Configure page size. */
4902 	val = BNX2_RD(bp, BNX2_TBDR_CONFIG);
4903 	val &= ~BNX2_TBDR_CONFIG_PAGE_SIZE;
4904 	val |= (BNX2_PAGE_BITS - 8) << 24 | 0x40;
4905 	BNX2_WR(bp, BNX2_TBDR_CONFIG, val);
4906 
4907 	val = bp->mac_addr[0] +
4908 	      (bp->mac_addr[1] << 8) +
4909 	      (bp->mac_addr[2] << 16) +
4910 	      bp->mac_addr[3] +
4911 	      (bp->mac_addr[4] << 8) +
4912 	      (bp->mac_addr[5] << 16);
4913 	BNX2_WR(bp, BNX2_EMAC_BACKOFF_SEED, val);
4914 
4915 	/* Program the MTU.  Also include 4 bytes for CRC32. */
4916 	mtu = bp->dev->mtu;
4917 	val = mtu + ETH_HLEN + ETH_FCS_LEN;
4918 	if (val > (MAX_ETHERNET_PACKET_SIZE + 4))
4919 		val |= BNX2_EMAC_RX_MTU_SIZE_JUMBO_ENA;
4920 	BNX2_WR(bp, BNX2_EMAC_RX_MTU_SIZE, val);
4921 
4922 	if (mtu < 1500)
4923 		mtu = 1500;
4924 
4925 	bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG, BNX2_RBUF_CONFIG_VAL(mtu));
4926 	bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG2, BNX2_RBUF_CONFIG2_VAL(mtu));
4927 	bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG3, BNX2_RBUF_CONFIG3_VAL(mtu));
4928 
4929 	memset(bp->bnx2_napi[0].status_blk.msi, 0, bp->status_stats_size);
4930 	for (i = 0; i < BNX2_MAX_MSIX_VEC; i++)
4931 		bp->bnx2_napi[i].last_status_idx = 0;
4932 
4933 	bp->idle_chk_status_idx = 0xffff;
4934 
4935 	bp->rx_mode = BNX2_EMAC_RX_MODE_SORT_MODE;
4936 
4937 	/* Set up how to generate a link change interrupt. */
4938 	BNX2_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
4939 
4940 	BNX2_WR(bp, BNX2_HC_STATUS_ADDR_L,
4941 		(u64) bp->status_blk_mapping & 0xffffffff);
4942 	BNX2_WR(bp, BNX2_HC_STATUS_ADDR_H, (u64) bp->status_blk_mapping >> 32);
4943 
4944 	BNX2_WR(bp, BNX2_HC_STATISTICS_ADDR_L,
4945 		(u64) bp->stats_blk_mapping & 0xffffffff);
4946 	BNX2_WR(bp, BNX2_HC_STATISTICS_ADDR_H,
4947 		(u64) bp->stats_blk_mapping >> 32);
4948 
4949 	BNX2_WR(bp, BNX2_HC_TX_QUICK_CONS_TRIP,
4950 		(bp->tx_quick_cons_trip_int << 16) | bp->tx_quick_cons_trip);
4951 
4952 	BNX2_WR(bp, BNX2_HC_RX_QUICK_CONS_TRIP,
4953 		(bp->rx_quick_cons_trip_int << 16) | bp->rx_quick_cons_trip);
4954 
4955 	BNX2_WR(bp, BNX2_HC_COMP_PROD_TRIP,
4956 		(bp->comp_prod_trip_int << 16) | bp->comp_prod_trip);
4957 
4958 	BNX2_WR(bp, BNX2_HC_TX_TICKS, (bp->tx_ticks_int << 16) | bp->tx_ticks);
4959 
4960 	BNX2_WR(bp, BNX2_HC_RX_TICKS, (bp->rx_ticks_int << 16) | bp->rx_ticks);
4961 
4962 	BNX2_WR(bp, BNX2_HC_COM_TICKS,
4963 		(bp->com_ticks_int << 16) | bp->com_ticks);
4964 
4965 	BNX2_WR(bp, BNX2_HC_CMD_TICKS,
4966 		(bp->cmd_ticks_int << 16) | bp->cmd_ticks);
4967 
4968 	if (bp->flags & BNX2_FLAG_BROKEN_STATS)
4969 		BNX2_WR(bp, BNX2_HC_STATS_TICKS, 0);
4970 	else
4971 		BNX2_WR(bp, BNX2_HC_STATS_TICKS, bp->stats_ticks);
4972 	BNX2_WR(bp, BNX2_HC_STAT_COLLECT_TICKS, 0xbb8);  /* 3ms */
4973 
4974 	if (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A1)
4975 		val = BNX2_HC_CONFIG_COLLECT_STATS;
4976 	else {
4977 		val = BNX2_HC_CONFIG_RX_TMR_MODE | BNX2_HC_CONFIG_TX_TMR_MODE |
4978 		      BNX2_HC_CONFIG_COLLECT_STATS;
4979 	}
4980 
4981 	if (bp->flags & BNX2_FLAG_USING_MSIX) {
4982 		BNX2_WR(bp, BNX2_HC_MSIX_BIT_VECTOR,
4983 			BNX2_HC_MSIX_BIT_VECTOR_VAL);
4984 
4985 		val |= BNX2_HC_CONFIG_SB_ADDR_INC_128B;
4986 	}
4987 
4988 	if (bp->flags & BNX2_FLAG_ONE_SHOT_MSI)
4989 		val |= BNX2_HC_CONFIG_ONE_SHOT | BNX2_HC_CONFIG_USE_INT_PARAM;
4990 
4991 	BNX2_WR(bp, BNX2_HC_CONFIG, val);
4992 
4993 	if (bp->rx_ticks < 25)
4994 		bnx2_reg_wr_ind(bp, BNX2_FW_RX_LOW_LATENCY, 1);
4995 	else
4996 		bnx2_reg_wr_ind(bp, BNX2_FW_RX_LOW_LATENCY, 0);
4997 
4998 	for (i = 1; i < bp->irq_nvecs; i++) {
4999 		u32 base = ((i - 1) * BNX2_HC_SB_CONFIG_SIZE) +
5000 			   BNX2_HC_SB_CONFIG_1;
5001 
5002 		BNX2_WR(bp, base,
5003 			BNX2_HC_SB_CONFIG_1_TX_TMR_MODE |
5004 			BNX2_HC_SB_CONFIG_1_RX_TMR_MODE |
5005 			BNX2_HC_SB_CONFIG_1_ONE_SHOT);
5006 
5007 		BNX2_WR(bp, base + BNX2_HC_TX_QUICK_CONS_TRIP_OFF,
5008 			(bp->tx_quick_cons_trip_int << 16) |
5009 			 bp->tx_quick_cons_trip);
5010 
5011 		BNX2_WR(bp, base + BNX2_HC_TX_TICKS_OFF,
5012 			(bp->tx_ticks_int << 16) | bp->tx_ticks);
5013 
5014 		BNX2_WR(bp, base + BNX2_HC_RX_QUICK_CONS_TRIP_OFF,
5015 			(bp->rx_quick_cons_trip_int << 16) |
5016 			bp->rx_quick_cons_trip);
5017 
5018 		BNX2_WR(bp, base + BNX2_HC_RX_TICKS_OFF,
5019 			(bp->rx_ticks_int << 16) | bp->rx_ticks);
5020 	}
5021 
5022 	/* Clear internal stats counters. */
5023 	BNX2_WR(bp, BNX2_HC_COMMAND, BNX2_HC_COMMAND_CLR_STAT_NOW);
5024 
5025 	BNX2_WR(bp, BNX2_HC_ATTN_BITS_ENABLE, STATUS_ATTN_EVENTS);
5026 
5027 	/* Initialize the receive filter. */
5028 	bnx2_set_rx_mode(bp->dev);
5029 
5030 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
5031 		val = BNX2_RD(bp, BNX2_MISC_NEW_CORE_CTL);
5032 		val |= BNX2_MISC_NEW_CORE_CTL_DMA_ENABLE;
5033 		BNX2_WR(bp, BNX2_MISC_NEW_CORE_CTL, val);
5034 	}
5035 	rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT2 | BNX2_DRV_MSG_CODE_RESET,
5036 			  1, 0);
5037 
5038 	BNX2_WR(bp, BNX2_MISC_ENABLE_SET_BITS, BNX2_MISC_ENABLE_DEFAULT);
5039 	BNX2_RD(bp, BNX2_MISC_ENABLE_SET_BITS);
5040 
5041 	udelay(20);
5042 
5043 	bp->hc_cmd = BNX2_RD(bp, BNX2_HC_COMMAND);
5044 
5045 	return rc;
5046 }
5047 
5048 static void
5049 bnx2_clear_ring_states(struct bnx2 *bp)
5050 {
5051 	struct bnx2_napi *bnapi;
5052 	struct bnx2_tx_ring_info *txr;
5053 	struct bnx2_rx_ring_info *rxr;
5054 	int i;
5055 
5056 	for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
5057 		bnapi = &bp->bnx2_napi[i];
5058 		txr = &bnapi->tx_ring;
5059 		rxr = &bnapi->rx_ring;
5060 
5061 		txr->tx_cons = 0;
5062 		txr->hw_tx_cons = 0;
5063 		rxr->rx_prod_bseq = 0;
5064 		rxr->rx_prod = 0;
5065 		rxr->rx_cons = 0;
5066 		rxr->rx_pg_prod = 0;
5067 		rxr->rx_pg_cons = 0;
5068 	}
5069 }
5070 
5071 static void
5072 bnx2_init_tx_context(struct bnx2 *bp, u32 cid, struct bnx2_tx_ring_info *txr)
5073 {
5074 	u32 val, offset0, offset1, offset2, offset3;
5075 	u32 cid_addr = GET_CID_ADDR(cid);
5076 
5077 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
5078 		offset0 = BNX2_L2CTX_TYPE_XI;
5079 		offset1 = BNX2_L2CTX_CMD_TYPE_XI;
5080 		offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI;
5081 		offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI;
5082 	} else {
5083 		offset0 = BNX2_L2CTX_TYPE;
5084 		offset1 = BNX2_L2CTX_CMD_TYPE;
5085 		offset2 = BNX2_L2CTX_TBDR_BHADDR_HI;
5086 		offset3 = BNX2_L2CTX_TBDR_BHADDR_LO;
5087 	}
5088 	val = BNX2_L2CTX_TYPE_TYPE_L2 | BNX2_L2CTX_TYPE_SIZE_L2;
5089 	bnx2_ctx_wr(bp, cid_addr, offset0, val);
5090 
5091 	val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
5092 	bnx2_ctx_wr(bp, cid_addr, offset1, val);
5093 
5094 	val = (u64) txr->tx_desc_mapping >> 32;
5095 	bnx2_ctx_wr(bp, cid_addr, offset2, val);
5096 
5097 	val = (u64) txr->tx_desc_mapping & 0xffffffff;
5098 	bnx2_ctx_wr(bp, cid_addr, offset3, val);
5099 }
5100 
5101 static void
5102 bnx2_init_tx_ring(struct bnx2 *bp, int ring_num)
5103 {
5104 	struct bnx2_tx_bd *txbd;
5105 	u32 cid = TX_CID;
5106 	struct bnx2_napi *bnapi;
5107 	struct bnx2_tx_ring_info *txr;
5108 
5109 	bnapi = &bp->bnx2_napi[ring_num];
5110 	txr = &bnapi->tx_ring;
5111 
5112 	if (ring_num == 0)
5113 		cid = TX_CID;
5114 	else
5115 		cid = TX_TSS_CID + ring_num - 1;
5116 
5117 	bp->tx_wake_thresh = bp->tx_ring_size / 2;
5118 
5119 	txbd = &txr->tx_desc_ring[BNX2_MAX_TX_DESC_CNT];
5120 
5121 	txbd->tx_bd_haddr_hi = (u64) txr->tx_desc_mapping >> 32;
5122 	txbd->tx_bd_haddr_lo = (u64) txr->tx_desc_mapping & 0xffffffff;
5123 
5124 	txr->tx_prod = 0;
5125 	txr->tx_prod_bseq = 0;
5126 
5127 	txr->tx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BIDX;
5128 	txr->tx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BSEQ;
5129 
5130 	bnx2_init_tx_context(bp, cid, txr);
5131 }
5132 
5133 static void
5134 bnx2_init_rxbd_rings(struct bnx2_rx_bd *rx_ring[], dma_addr_t dma[],
5135 		     u32 buf_size, int num_rings)
5136 {
5137 	int i;
5138 	struct bnx2_rx_bd *rxbd;
5139 
5140 	for (i = 0; i < num_rings; i++) {
5141 		int j;
5142 
5143 		rxbd = &rx_ring[i][0];
5144 		for (j = 0; j < BNX2_MAX_RX_DESC_CNT; j++, rxbd++) {
5145 			rxbd->rx_bd_len = buf_size;
5146 			rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
5147 		}
5148 		if (i == (num_rings - 1))
5149 			j = 0;
5150 		else
5151 			j = i + 1;
5152 		rxbd->rx_bd_haddr_hi = (u64) dma[j] >> 32;
5153 		rxbd->rx_bd_haddr_lo = (u64) dma[j] & 0xffffffff;
5154 	}
5155 }
5156 
5157 static void
5158 bnx2_init_rx_ring(struct bnx2 *bp, int ring_num)
5159 {
5160 	int i;
5161 	u16 prod, ring_prod;
5162 	u32 cid, rx_cid_addr, val;
5163 	struct bnx2_napi *bnapi = &bp->bnx2_napi[ring_num];
5164 	struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
5165 
5166 	if (ring_num == 0)
5167 		cid = RX_CID;
5168 	else
5169 		cid = RX_RSS_CID + ring_num - 1;
5170 
5171 	rx_cid_addr = GET_CID_ADDR(cid);
5172 
5173 	bnx2_init_rxbd_rings(rxr->rx_desc_ring, rxr->rx_desc_mapping,
5174 			     bp->rx_buf_use_size, bp->rx_max_ring);
5175 
5176 	bnx2_init_rx_context(bp, cid);
5177 
5178 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
5179 		val = BNX2_RD(bp, BNX2_MQ_MAP_L2_5);
5180 		BNX2_WR(bp, BNX2_MQ_MAP_L2_5, val | BNX2_MQ_MAP_L2_5_ARM);
5181 	}
5182 
5183 	bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, 0);
5184 	if (bp->rx_pg_ring_size) {
5185 		bnx2_init_rxbd_rings(rxr->rx_pg_desc_ring,
5186 				     rxr->rx_pg_desc_mapping,
5187 				     PAGE_SIZE, bp->rx_max_pg_ring);
5188 		val = (bp->rx_buf_use_size << 16) | PAGE_SIZE;
5189 		bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, val);
5190 		bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_RBDC_KEY,
5191 		       BNX2_L2CTX_RBDC_JUMBO_KEY - ring_num);
5192 
5193 		val = (u64) rxr->rx_pg_desc_mapping[0] >> 32;
5194 		bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_HI, val);
5195 
5196 		val = (u64) rxr->rx_pg_desc_mapping[0] & 0xffffffff;
5197 		bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_LO, val);
5198 
5199 		if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
5200 			BNX2_WR(bp, BNX2_MQ_MAP_L2_3, BNX2_MQ_MAP_L2_3_DEFAULT);
5201 	}
5202 
5203 	val = (u64) rxr->rx_desc_mapping[0] >> 32;
5204 	bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_HI, val);
5205 
5206 	val = (u64) rxr->rx_desc_mapping[0] & 0xffffffff;
5207 	bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_LO, val);
5208 
5209 	ring_prod = prod = rxr->rx_pg_prod;
5210 	for (i = 0; i < bp->rx_pg_ring_size; i++) {
5211 		if (bnx2_alloc_rx_page(bp, rxr, ring_prod, GFP_KERNEL) < 0) {
5212 			netdev_warn(bp->dev, "init'ed rx page ring %d with %d/%d pages only\n",
5213 				    ring_num, i, bp->rx_pg_ring_size);
5214 			break;
5215 		}
5216 		prod = BNX2_NEXT_RX_BD(prod);
5217 		ring_prod = BNX2_RX_PG_RING_IDX(prod);
5218 	}
5219 	rxr->rx_pg_prod = prod;
5220 
5221 	ring_prod = prod = rxr->rx_prod;
5222 	for (i = 0; i < bp->rx_ring_size; i++) {
5223 		if (bnx2_alloc_rx_data(bp, rxr, ring_prod, GFP_KERNEL) < 0) {
5224 			netdev_warn(bp->dev, "init'ed rx ring %d with %d/%d skbs only\n",
5225 				    ring_num, i, bp->rx_ring_size);
5226 			break;
5227 		}
5228 		prod = BNX2_NEXT_RX_BD(prod);
5229 		ring_prod = BNX2_RX_RING_IDX(prod);
5230 	}
5231 	rxr->rx_prod = prod;
5232 
5233 	rxr->rx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_BDIDX;
5234 	rxr->rx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_BSEQ;
5235 	rxr->rx_pg_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_PG_BDIDX;
5236 
5237 	BNX2_WR16(bp, rxr->rx_pg_bidx_addr, rxr->rx_pg_prod);
5238 	BNX2_WR16(bp, rxr->rx_bidx_addr, prod);
5239 
5240 	BNX2_WR(bp, rxr->rx_bseq_addr, rxr->rx_prod_bseq);
5241 }
5242 
5243 static void
5244 bnx2_init_all_rings(struct bnx2 *bp)
5245 {
5246 	int i;
5247 	u32 val;
5248 
5249 	bnx2_clear_ring_states(bp);
5250 
5251 	BNX2_WR(bp, BNX2_TSCH_TSS_CFG, 0);
5252 	for (i = 0; i < bp->num_tx_rings; i++)
5253 		bnx2_init_tx_ring(bp, i);
5254 
5255 	if (bp->num_tx_rings > 1)
5256 		BNX2_WR(bp, BNX2_TSCH_TSS_CFG, ((bp->num_tx_rings - 1) << 24) |
5257 			(TX_TSS_CID << 7));
5258 
5259 	BNX2_WR(bp, BNX2_RLUP_RSS_CONFIG, 0);
5260 	bnx2_reg_wr_ind(bp, BNX2_RXP_SCRATCH_RSS_TBL_SZ, 0);
5261 
5262 	for (i = 0; i < bp->num_rx_rings; i++)
5263 		bnx2_init_rx_ring(bp, i);
5264 
5265 	if (bp->num_rx_rings > 1) {
5266 		u32 tbl_32 = 0;
5267 
5268 		for (i = 0; i < BNX2_RXP_SCRATCH_RSS_TBL_MAX_ENTRIES; i++) {
5269 			int shift = (i % 8) << 2;
5270 
5271 			tbl_32 |= (i % (bp->num_rx_rings - 1)) << shift;
5272 			if ((i % 8) == 7) {
5273 				BNX2_WR(bp, BNX2_RLUP_RSS_DATA, tbl_32);
5274 				BNX2_WR(bp, BNX2_RLUP_RSS_COMMAND, (i >> 3) |
5275 					BNX2_RLUP_RSS_COMMAND_RSS_WRITE_MASK |
5276 					BNX2_RLUP_RSS_COMMAND_WRITE |
5277 					BNX2_RLUP_RSS_COMMAND_HASH_MASK);
5278 				tbl_32 = 0;
5279 			}
5280 		}
5281 
5282 		val = BNX2_RLUP_RSS_CONFIG_IPV4_RSS_TYPE_ALL_XI |
5283 		      BNX2_RLUP_RSS_CONFIG_IPV6_RSS_TYPE_ALL_XI;
5284 
5285 		BNX2_WR(bp, BNX2_RLUP_RSS_CONFIG, val);
5286 
5287 	}
5288 }
5289 
5290 static u32 bnx2_find_max_ring(u32 ring_size, u32 max_size)
5291 {
5292 	u32 max, num_rings = 1;
5293 
5294 	while (ring_size > BNX2_MAX_RX_DESC_CNT) {
5295 		ring_size -= BNX2_MAX_RX_DESC_CNT;
5296 		num_rings++;
5297 	}
5298 	/* round to next power of 2 */
5299 	max = max_size;
5300 	while ((max & num_rings) == 0)
5301 		max >>= 1;
5302 
5303 	if (num_rings != max)
5304 		max <<= 1;
5305 
5306 	return max;
5307 }
5308 
5309 static void
5310 bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size)
5311 {
5312 	u32 rx_size, rx_space, jumbo_size;
5313 
5314 	/* 8 for CRC and VLAN */
5315 	rx_size = bp->dev->mtu + ETH_HLEN + BNX2_RX_OFFSET + 8;
5316 
5317 	rx_space = SKB_DATA_ALIGN(rx_size + BNX2_RX_ALIGN) + NET_SKB_PAD +
5318 		SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
5319 
5320 	bp->rx_copy_thresh = BNX2_RX_COPY_THRESH;
5321 	bp->rx_pg_ring_size = 0;
5322 	bp->rx_max_pg_ring = 0;
5323 	bp->rx_max_pg_ring_idx = 0;
5324 	if ((rx_space > PAGE_SIZE) && !(bp->flags & BNX2_FLAG_JUMBO_BROKEN)) {
5325 		int pages = PAGE_ALIGN(bp->dev->mtu - 40) >> PAGE_SHIFT;
5326 
5327 		jumbo_size = size * pages;
5328 		if (jumbo_size > BNX2_MAX_TOTAL_RX_PG_DESC_CNT)
5329 			jumbo_size = BNX2_MAX_TOTAL_RX_PG_DESC_CNT;
5330 
5331 		bp->rx_pg_ring_size = jumbo_size;
5332 		bp->rx_max_pg_ring = bnx2_find_max_ring(jumbo_size,
5333 							BNX2_MAX_RX_PG_RINGS);
5334 		bp->rx_max_pg_ring_idx =
5335 			(bp->rx_max_pg_ring * BNX2_RX_DESC_CNT) - 1;
5336 		rx_size = BNX2_RX_COPY_THRESH + BNX2_RX_OFFSET;
5337 		bp->rx_copy_thresh = 0;
5338 	}
5339 
5340 	bp->rx_buf_use_size = rx_size;
5341 	/* hw alignment + build_skb() overhead*/
5342 	bp->rx_buf_size = SKB_DATA_ALIGN(bp->rx_buf_use_size + BNX2_RX_ALIGN) +
5343 		NET_SKB_PAD + SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
5344 	bp->rx_jumbo_thresh = rx_size - BNX2_RX_OFFSET;
5345 	bp->rx_ring_size = size;
5346 	bp->rx_max_ring = bnx2_find_max_ring(size, BNX2_MAX_RX_RINGS);
5347 	bp->rx_max_ring_idx = (bp->rx_max_ring * BNX2_RX_DESC_CNT) - 1;
5348 }
5349 
5350 static void
5351 bnx2_free_tx_skbs(struct bnx2 *bp)
5352 {
5353 	int i;
5354 
5355 	for (i = 0; i < bp->num_tx_rings; i++) {
5356 		struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
5357 		struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
5358 		int j;
5359 
5360 		if (txr->tx_buf_ring == NULL)
5361 			continue;
5362 
5363 		for (j = 0; j < BNX2_TX_DESC_CNT; ) {
5364 			struct bnx2_sw_tx_bd *tx_buf = &txr->tx_buf_ring[j];
5365 			struct sk_buff *skb = tx_buf->skb;
5366 			int k, last;
5367 
5368 			if (skb == NULL) {
5369 				j = BNX2_NEXT_TX_BD(j);
5370 				continue;
5371 			}
5372 
5373 			dma_unmap_single(&bp->pdev->dev,
5374 					 dma_unmap_addr(tx_buf, mapping),
5375 					 skb_headlen(skb),
5376 					 PCI_DMA_TODEVICE);
5377 
5378 			tx_buf->skb = NULL;
5379 
5380 			last = tx_buf->nr_frags;
5381 			j = BNX2_NEXT_TX_BD(j);
5382 			for (k = 0; k < last; k++, j = BNX2_NEXT_TX_BD(j)) {
5383 				tx_buf = &txr->tx_buf_ring[BNX2_TX_RING_IDX(j)];
5384 				dma_unmap_page(&bp->pdev->dev,
5385 					dma_unmap_addr(tx_buf, mapping),
5386 					skb_frag_size(&skb_shinfo(skb)->frags[k]),
5387 					PCI_DMA_TODEVICE);
5388 			}
5389 			dev_kfree_skb(skb);
5390 		}
5391 		netdev_tx_reset_queue(netdev_get_tx_queue(bp->dev, i));
5392 	}
5393 }
5394 
5395 static void
5396 bnx2_free_rx_skbs(struct bnx2 *bp)
5397 {
5398 	int i;
5399 
5400 	for (i = 0; i < bp->num_rx_rings; i++) {
5401 		struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
5402 		struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
5403 		int j;
5404 
5405 		if (rxr->rx_buf_ring == NULL)
5406 			return;
5407 
5408 		for (j = 0; j < bp->rx_max_ring_idx; j++) {
5409 			struct bnx2_sw_bd *rx_buf = &rxr->rx_buf_ring[j];
5410 			u8 *data = rx_buf->data;
5411 
5412 			if (data == NULL)
5413 				continue;
5414 
5415 			dma_unmap_single(&bp->pdev->dev,
5416 					 dma_unmap_addr(rx_buf, mapping),
5417 					 bp->rx_buf_use_size,
5418 					 PCI_DMA_FROMDEVICE);
5419 
5420 			rx_buf->data = NULL;
5421 
5422 			kfree(data);
5423 		}
5424 		for (j = 0; j < bp->rx_max_pg_ring_idx; j++)
5425 			bnx2_free_rx_page(bp, rxr, j);
5426 	}
5427 }
5428 
5429 static void
5430 bnx2_free_skbs(struct bnx2 *bp)
5431 {
5432 	bnx2_free_tx_skbs(bp);
5433 	bnx2_free_rx_skbs(bp);
5434 }
5435 
5436 static int
5437 bnx2_reset_nic(struct bnx2 *bp, u32 reset_code)
5438 {
5439 	int rc;
5440 
5441 	rc = bnx2_reset_chip(bp, reset_code);
5442 	bnx2_free_skbs(bp);
5443 	if (rc)
5444 		return rc;
5445 
5446 	if ((rc = bnx2_init_chip(bp)) != 0)
5447 		return rc;
5448 
5449 	bnx2_init_all_rings(bp);
5450 	return 0;
5451 }
5452 
5453 static int
5454 bnx2_init_nic(struct bnx2 *bp, int reset_phy)
5455 {
5456 	int rc;
5457 
5458 	if ((rc = bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET)) != 0)
5459 		return rc;
5460 
5461 	spin_lock_bh(&bp->phy_lock);
5462 	bnx2_init_phy(bp, reset_phy);
5463 	bnx2_set_link(bp);
5464 	if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
5465 		bnx2_remote_phy_event(bp);
5466 	spin_unlock_bh(&bp->phy_lock);
5467 	return 0;
5468 }
5469 
5470 static int
5471 bnx2_shutdown_chip(struct bnx2 *bp)
5472 {
5473 	u32 reset_code;
5474 
5475 	if (bp->flags & BNX2_FLAG_NO_WOL)
5476 		reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
5477 	else if (bp->wol)
5478 		reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
5479 	else
5480 		reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
5481 
5482 	return bnx2_reset_chip(bp, reset_code);
5483 }
5484 
5485 static int
5486 bnx2_test_registers(struct bnx2 *bp)
5487 {
5488 	int ret;
5489 	int i, is_5709;
5490 	static const struct {
5491 		u16   offset;
5492 		u16   flags;
5493 #define BNX2_FL_NOT_5709	1
5494 		u32   rw_mask;
5495 		u32   ro_mask;
5496 	} reg_tbl[] = {
5497 		{ 0x006c, 0, 0x00000000, 0x0000003f },
5498 		{ 0x0090, 0, 0xffffffff, 0x00000000 },
5499 		{ 0x0094, 0, 0x00000000, 0x00000000 },
5500 
5501 		{ 0x0404, BNX2_FL_NOT_5709, 0x00003f00, 0x00000000 },
5502 		{ 0x0418, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5503 		{ 0x041c, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5504 		{ 0x0420, BNX2_FL_NOT_5709, 0x00000000, 0x80ffffff },
5505 		{ 0x0424, BNX2_FL_NOT_5709, 0x00000000, 0x00000000 },
5506 		{ 0x0428, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
5507 		{ 0x0450, BNX2_FL_NOT_5709, 0x00000000, 0x0000ffff },
5508 		{ 0x0454, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5509 		{ 0x0458, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5510 
5511 		{ 0x0808, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5512 		{ 0x0854, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5513 		{ 0x0868, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5514 		{ 0x086c, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5515 		{ 0x0870, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5516 		{ 0x0874, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5517 
5518 		{ 0x0c00, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
5519 		{ 0x0c04, BNX2_FL_NOT_5709, 0x00000000, 0x03ff0001 },
5520 		{ 0x0c08, BNX2_FL_NOT_5709,  0x0f0ff073, 0x00000000 },
5521 
5522 		{ 0x1000, 0, 0x00000000, 0x00000001 },
5523 		{ 0x1004, BNX2_FL_NOT_5709, 0x00000000, 0x000f0001 },
5524 
5525 		{ 0x1408, 0, 0x01c00800, 0x00000000 },
5526 		{ 0x149c, 0, 0x8000ffff, 0x00000000 },
5527 		{ 0x14a8, 0, 0x00000000, 0x000001ff },
5528 		{ 0x14ac, 0, 0x0fffffff, 0x10000000 },
5529 		{ 0x14b0, 0, 0x00000002, 0x00000001 },
5530 		{ 0x14b8, 0, 0x00000000, 0x00000000 },
5531 		{ 0x14c0, 0, 0x00000000, 0x00000009 },
5532 		{ 0x14c4, 0, 0x00003fff, 0x00000000 },
5533 		{ 0x14cc, 0, 0x00000000, 0x00000001 },
5534 		{ 0x14d0, 0, 0xffffffff, 0x00000000 },
5535 
5536 		{ 0x1800, 0, 0x00000000, 0x00000001 },
5537 		{ 0x1804, 0, 0x00000000, 0x00000003 },
5538 
5539 		{ 0x2800, 0, 0x00000000, 0x00000001 },
5540 		{ 0x2804, 0, 0x00000000, 0x00003f01 },
5541 		{ 0x2808, 0, 0x0f3f3f03, 0x00000000 },
5542 		{ 0x2810, 0, 0xffff0000, 0x00000000 },
5543 		{ 0x2814, 0, 0xffff0000, 0x00000000 },
5544 		{ 0x2818, 0, 0xffff0000, 0x00000000 },
5545 		{ 0x281c, 0, 0xffff0000, 0x00000000 },
5546 		{ 0x2834, 0, 0xffffffff, 0x00000000 },
5547 		{ 0x2840, 0, 0x00000000, 0xffffffff },
5548 		{ 0x2844, 0, 0x00000000, 0xffffffff },
5549 		{ 0x2848, 0, 0xffffffff, 0x00000000 },
5550 		{ 0x284c, 0, 0xf800f800, 0x07ff07ff },
5551 
5552 		{ 0x2c00, 0, 0x00000000, 0x00000011 },
5553 		{ 0x2c04, 0, 0x00000000, 0x00030007 },
5554 
5555 		{ 0x3c00, 0, 0x00000000, 0x00000001 },
5556 		{ 0x3c04, 0, 0x00000000, 0x00070000 },
5557 		{ 0x3c08, 0, 0x00007f71, 0x07f00000 },
5558 		{ 0x3c0c, 0, 0x1f3ffffc, 0x00000000 },
5559 		{ 0x3c10, 0, 0xffffffff, 0x00000000 },
5560 		{ 0x3c14, 0, 0x00000000, 0xffffffff },
5561 		{ 0x3c18, 0, 0x00000000, 0xffffffff },
5562 		{ 0x3c1c, 0, 0xfffff000, 0x00000000 },
5563 		{ 0x3c20, 0, 0xffffff00, 0x00000000 },
5564 
5565 		{ 0x5004, 0, 0x00000000, 0x0000007f },
5566 		{ 0x5008, 0, 0x0f0007ff, 0x00000000 },
5567 
5568 		{ 0x5c00, 0, 0x00000000, 0x00000001 },
5569 		{ 0x5c04, 0, 0x00000000, 0x0003000f },
5570 		{ 0x5c08, 0, 0x00000003, 0x00000000 },
5571 		{ 0x5c0c, 0, 0x0000fff8, 0x00000000 },
5572 		{ 0x5c10, 0, 0x00000000, 0xffffffff },
5573 		{ 0x5c80, 0, 0x00000000, 0x0f7113f1 },
5574 		{ 0x5c84, 0, 0x00000000, 0x0000f333 },
5575 		{ 0x5c88, 0, 0x00000000, 0x00077373 },
5576 		{ 0x5c8c, 0, 0x00000000, 0x0007f737 },
5577 
5578 		{ 0x6808, 0, 0x0000ff7f, 0x00000000 },
5579 		{ 0x680c, 0, 0xffffffff, 0x00000000 },
5580 		{ 0x6810, 0, 0xffffffff, 0x00000000 },
5581 		{ 0x6814, 0, 0xffffffff, 0x00000000 },
5582 		{ 0x6818, 0, 0xffffffff, 0x00000000 },
5583 		{ 0x681c, 0, 0xffffffff, 0x00000000 },
5584 		{ 0x6820, 0, 0x00ff00ff, 0x00000000 },
5585 		{ 0x6824, 0, 0x00ff00ff, 0x00000000 },
5586 		{ 0x6828, 0, 0x00ff00ff, 0x00000000 },
5587 		{ 0x682c, 0, 0x03ff03ff, 0x00000000 },
5588 		{ 0x6830, 0, 0x03ff03ff, 0x00000000 },
5589 		{ 0x6834, 0, 0x03ff03ff, 0x00000000 },
5590 		{ 0x6838, 0, 0x03ff03ff, 0x00000000 },
5591 		{ 0x683c, 0, 0x0000ffff, 0x00000000 },
5592 		{ 0x6840, 0, 0x00000ff0, 0x00000000 },
5593 		{ 0x6844, 0, 0x00ffff00, 0x00000000 },
5594 		{ 0x684c, 0, 0xffffffff, 0x00000000 },
5595 		{ 0x6850, 0, 0x7f7f7f7f, 0x00000000 },
5596 		{ 0x6854, 0, 0x7f7f7f7f, 0x00000000 },
5597 		{ 0x6858, 0, 0x7f7f7f7f, 0x00000000 },
5598 		{ 0x685c, 0, 0x7f7f7f7f, 0x00000000 },
5599 		{ 0x6908, 0, 0x00000000, 0x0001ff0f },
5600 		{ 0x690c, 0, 0x00000000, 0x0ffe00f0 },
5601 
5602 		{ 0xffff, 0, 0x00000000, 0x00000000 },
5603 	};
5604 
5605 	ret = 0;
5606 	is_5709 = 0;
5607 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
5608 		is_5709 = 1;
5609 
5610 	for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
5611 		u32 offset, rw_mask, ro_mask, save_val, val;
5612 		u16 flags = reg_tbl[i].flags;
5613 
5614 		if (is_5709 && (flags & BNX2_FL_NOT_5709))
5615 			continue;
5616 
5617 		offset = (u32) reg_tbl[i].offset;
5618 		rw_mask = reg_tbl[i].rw_mask;
5619 		ro_mask = reg_tbl[i].ro_mask;
5620 
5621 		save_val = readl(bp->regview + offset);
5622 
5623 		writel(0, bp->regview + offset);
5624 
5625 		val = readl(bp->regview + offset);
5626 		if ((val & rw_mask) != 0) {
5627 			goto reg_test_err;
5628 		}
5629 
5630 		if ((val & ro_mask) != (save_val & ro_mask)) {
5631 			goto reg_test_err;
5632 		}
5633 
5634 		writel(0xffffffff, bp->regview + offset);
5635 
5636 		val = readl(bp->regview + offset);
5637 		if ((val & rw_mask) != rw_mask) {
5638 			goto reg_test_err;
5639 		}
5640 
5641 		if ((val & ro_mask) != (save_val & ro_mask)) {
5642 			goto reg_test_err;
5643 		}
5644 
5645 		writel(save_val, bp->regview + offset);
5646 		continue;
5647 
5648 reg_test_err:
5649 		writel(save_val, bp->regview + offset);
5650 		ret = -ENODEV;
5651 		break;
5652 	}
5653 	return ret;
5654 }
5655 
5656 static int
5657 bnx2_do_mem_test(struct bnx2 *bp, u32 start, u32 size)
5658 {
5659 	static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0x55555555,
5660 		0xaaaaaaaa , 0xaa55aa55, 0x55aa55aa };
5661 	int i;
5662 
5663 	for (i = 0; i < sizeof(test_pattern) / 4; i++) {
5664 		u32 offset;
5665 
5666 		for (offset = 0; offset < size; offset += 4) {
5667 
5668 			bnx2_reg_wr_ind(bp, start + offset, test_pattern[i]);
5669 
5670 			if (bnx2_reg_rd_ind(bp, start + offset) !=
5671 				test_pattern[i]) {
5672 				return -ENODEV;
5673 			}
5674 		}
5675 	}
5676 	return 0;
5677 }
5678 
5679 static int
5680 bnx2_test_memory(struct bnx2 *bp)
5681 {
5682 	int ret = 0;
5683 	int i;
5684 	static struct mem_entry {
5685 		u32   offset;
5686 		u32   len;
5687 	} mem_tbl_5706[] = {
5688 		{ 0x60000,  0x4000 },
5689 		{ 0xa0000,  0x3000 },
5690 		{ 0xe0000,  0x4000 },
5691 		{ 0x120000, 0x4000 },
5692 		{ 0x1a0000, 0x4000 },
5693 		{ 0x160000, 0x4000 },
5694 		{ 0xffffffff, 0    },
5695 	},
5696 	mem_tbl_5709[] = {
5697 		{ 0x60000,  0x4000 },
5698 		{ 0xa0000,  0x3000 },
5699 		{ 0xe0000,  0x4000 },
5700 		{ 0x120000, 0x4000 },
5701 		{ 0x1a0000, 0x4000 },
5702 		{ 0xffffffff, 0    },
5703 	};
5704 	struct mem_entry *mem_tbl;
5705 
5706 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
5707 		mem_tbl = mem_tbl_5709;
5708 	else
5709 		mem_tbl = mem_tbl_5706;
5710 
5711 	for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
5712 		if ((ret = bnx2_do_mem_test(bp, mem_tbl[i].offset,
5713 			mem_tbl[i].len)) != 0) {
5714 			return ret;
5715 		}
5716 	}
5717 
5718 	return ret;
5719 }
5720 
5721 #define BNX2_MAC_LOOPBACK	0
5722 #define BNX2_PHY_LOOPBACK	1
5723 
5724 static int
5725 bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
5726 {
5727 	unsigned int pkt_size, num_pkts, i;
5728 	struct sk_buff *skb;
5729 	u8 *data;
5730 	unsigned char *packet;
5731 	u16 rx_start_idx, rx_idx;
5732 	dma_addr_t map;
5733 	struct bnx2_tx_bd *txbd;
5734 	struct bnx2_sw_bd *rx_buf;
5735 	struct l2_fhdr *rx_hdr;
5736 	int ret = -ENODEV;
5737 	struct bnx2_napi *bnapi = &bp->bnx2_napi[0], *tx_napi;
5738 	struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
5739 	struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
5740 
5741 	tx_napi = bnapi;
5742 
5743 	txr = &tx_napi->tx_ring;
5744 	rxr = &bnapi->rx_ring;
5745 	if (loopback_mode == BNX2_MAC_LOOPBACK) {
5746 		bp->loopback = MAC_LOOPBACK;
5747 		bnx2_set_mac_loopback(bp);
5748 	}
5749 	else if (loopback_mode == BNX2_PHY_LOOPBACK) {
5750 		if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
5751 			return 0;
5752 
5753 		bp->loopback = PHY_LOOPBACK;
5754 		bnx2_set_phy_loopback(bp);
5755 	}
5756 	else
5757 		return -EINVAL;
5758 
5759 	pkt_size = min(bp->dev->mtu + ETH_HLEN, bp->rx_jumbo_thresh - 4);
5760 	skb = netdev_alloc_skb(bp->dev, pkt_size);
5761 	if (!skb)
5762 		return -ENOMEM;
5763 	packet = skb_put(skb, pkt_size);
5764 	memcpy(packet, bp->dev->dev_addr, ETH_ALEN);
5765 	memset(packet + ETH_ALEN, 0x0, 8);
5766 	for (i = 14; i < pkt_size; i++)
5767 		packet[i] = (unsigned char) (i & 0xff);
5768 
5769 	map = dma_map_single(&bp->pdev->dev, skb->data, pkt_size,
5770 			     PCI_DMA_TODEVICE);
5771 	if (dma_mapping_error(&bp->pdev->dev, map)) {
5772 		dev_kfree_skb(skb);
5773 		return -EIO;
5774 	}
5775 
5776 	BNX2_WR(bp, BNX2_HC_COMMAND,
5777 		bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
5778 
5779 	BNX2_RD(bp, BNX2_HC_COMMAND);
5780 
5781 	udelay(5);
5782 	rx_start_idx = bnx2_get_hw_rx_cons(bnapi);
5783 
5784 	num_pkts = 0;
5785 
5786 	txbd = &txr->tx_desc_ring[BNX2_TX_RING_IDX(txr->tx_prod)];
5787 
5788 	txbd->tx_bd_haddr_hi = (u64) map >> 32;
5789 	txbd->tx_bd_haddr_lo = (u64) map & 0xffffffff;
5790 	txbd->tx_bd_mss_nbytes = pkt_size;
5791 	txbd->tx_bd_vlan_tag_flags = TX_BD_FLAGS_START | TX_BD_FLAGS_END;
5792 
5793 	num_pkts++;
5794 	txr->tx_prod = BNX2_NEXT_TX_BD(txr->tx_prod);
5795 	txr->tx_prod_bseq += pkt_size;
5796 
5797 	BNX2_WR16(bp, txr->tx_bidx_addr, txr->tx_prod);
5798 	BNX2_WR(bp, txr->tx_bseq_addr, txr->tx_prod_bseq);
5799 
5800 	udelay(100);
5801 
5802 	BNX2_WR(bp, BNX2_HC_COMMAND,
5803 		bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
5804 
5805 	BNX2_RD(bp, BNX2_HC_COMMAND);
5806 
5807 	udelay(5);
5808 
5809 	dma_unmap_single(&bp->pdev->dev, map, pkt_size, PCI_DMA_TODEVICE);
5810 	dev_kfree_skb(skb);
5811 
5812 	if (bnx2_get_hw_tx_cons(tx_napi) != txr->tx_prod)
5813 		goto loopback_test_done;
5814 
5815 	rx_idx = bnx2_get_hw_rx_cons(bnapi);
5816 	if (rx_idx != rx_start_idx + num_pkts) {
5817 		goto loopback_test_done;
5818 	}
5819 
5820 	rx_buf = &rxr->rx_buf_ring[rx_start_idx];
5821 	data = rx_buf->data;
5822 
5823 	rx_hdr = get_l2_fhdr(data);
5824 	data = (u8 *)rx_hdr + BNX2_RX_OFFSET;
5825 
5826 	dma_sync_single_for_cpu(&bp->pdev->dev,
5827 		dma_unmap_addr(rx_buf, mapping),
5828 		bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
5829 
5830 	if (rx_hdr->l2_fhdr_status &
5831 		(L2_FHDR_ERRORS_BAD_CRC |
5832 		L2_FHDR_ERRORS_PHY_DECODE |
5833 		L2_FHDR_ERRORS_ALIGNMENT |
5834 		L2_FHDR_ERRORS_TOO_SHORT |
5835 		L2_FHDR_ERRORS_GIANT_FRAME)) {
5836 
5837 		goto loopback_test_done;
5838 	}
5839 
5840 	if ((rx_hdr->l2_fhdr_pkt_len - 4) != pkt_size) {
5841 		goto loopback_test_done;
5842 	}
5843 
5844 	for (i = 14; i < pkt_size; i++) {
5845 		if (*(data + i) != (unsigned char) (i & 0xff)) {
5846 			goto loopback_test_done;
5847 		}
5848 	}
5849 
5850 	ret = 0;
5851 
5852 loopback_test_done:
5853 	bp->loopback = 0;
5854 	return ret;
5855 }
5856 
5857 #define BNX2_MAC_LOOPBACK_FAILED	1
5858 #define BNX2_PHY_LOOPBACK_FAILED	2
5859 #define BNX2_LOOPBACK_FAILED		(BNX2_MAC_LOOPBACK_FAILED |	\
5860 					 BNX2_PHY_LOOPBACK_FAILED)
5861 
5862 static int
5863 bnx2_test_loopback(struct bnx2 *bp)
5864 {
5865 	int rc = 0;
5866 
5867 	if (!netif_running(bp->dev))
5868 		return BNX2_LOOPBACK_FAILED;
5869 
5870 	bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
5871 	spin_lock_bh(&bp->phy_lock);
5872 	bnx2_init_phy(bp, 1);
5873 	spin_unlock_bh(&bp->phy_lock);
5874 	if (bnx2_run_loopback(bp, BNX2_MAC_LOOPBACK))
5875 		rc |= BNX2_MAC_LOOPBACK_FAILED;
5876 	if (bnx2_run_loopback(bp, BNX2_PHY_LOOPBACK))
5877 		rc |= BNX2_PHY_LOOPBACK_FAILED;
5878 	return rc;
5879 }
5880 
5881 #define NVRAM_SIZE 0x200
5882 #define CRC32_RESIDUAL 0xdebb20e3
5883 
5884 static int
5885 bnx2_test_nvram(struct bnx2 *bp)
5886 {
5887 	__be32 buf[NVRAM_SIZE / 4];
5888 	u8 *data = (u8 *) buf;
5889 	int rc = 0;
5890 	u32 magic, csum;
5891 
5892 	if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
5893 		goto test_nvram_done;
5894 
5895         magic = be32_to_cpu(buf[0]);
5896 	if (magic != 0x669955aa) {
5897 		rc = -ENODEV;
5898 		goto test_nvram_done;
5899 	}
5900 
5901 	if ((rc = bnx2_nvram_read(bp, 0x100, data, NVRAM_SIZE)) != 0)
5902 		goto test_nvram_done;
5903 
5904 	csum = ether_crc_le(0x100, data);
5905 	if (csum != CRC32_RESIDUAL) {
5906 		rc = -ENODEV;
5907 		goto test_nvram_done;
5908 	}
5909 
5910 	csum = ether_crc_le(0x100, data + 0x100);
5911 	if (csum != CRC32_RESIDUAL) {
5912 		rc = -ENODEV;
5913 	}
5914 
5915 test_nvram_done:
5916 	return rc;
5917 }
5918 
5919 static int
5920 bnx2_test_link(struct bnx2 *bp)
5921 {
5922 	u32 bmsr;
5923 
5924 	if (!netif_running(bp->dev))
5925 		return -ENODEV;
5926 
5927 	if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
5928 		if (bp->link_up)
5929 			return 0;
5930 		return -ENODEV;
5931 	}
5932 	spin_lock_bh(&bp->phy_lock);
5933 	bnx2_enable_bmsr1(bp);
5934 	bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
5935 	bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
5936 	bnx2_disable_bmsr1(bp);
5937 	spin_unlock_bh(&bp->phy_lock);
5938 
5939 	if (bmsr & BMSR_LSTATUS) {
5940 		return 0;
5941 	}
5942 	return -ENODEV;
5943 }
5944 
5945 static int
5946 bnx2_test_intr(struct bnx2 *bp)
5947 {
5948 	int i;
5949 	u16 status_idx;
5950 
5951 	if (!netif_running(bp->dev))
5952 		return -ENODEV;
5953 
5954 	status_idx = BNX2_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff;
5955 
5956 	/* This register is not touched during run-time. */
5957 	BNX2_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
5958 	BNX2_RD(bp, BNX2_HC_COMMAND);
5959 
5960 	for (i = 0; i < 10; i++) {
5961 		if ((BNX2_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff) !=
5962 			status_idx) {
5963 
5964 			break;
5965 		}
5966 
5967 		msleep_interruptible(10);
5968 	}
5969 	if (i < 10)
5970 		return 0;
5971 
5972 	return -ENODEV;
5973 }
5974 
5975 /* Determining link for parallel detection. */
5976 static int
5977 bnx2_5706_serdes_has_link(struct bnx2 *bp)
5978 {
5979 	u32 mode_ctl, an_dbg, exp;
5980 
5981 	if (bp->phy_flags & BNX2_PHY_FLAG_NO_PARALLEL)
5982 		return 0;
5983 
5984 	bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_MODE_CTL);
5985 	bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &mode_ctl);
5986 
5987 	if (!(mode_ctl & MISC_SHDW_MODE_CTL_SIG_DET))
5988 		return 0;
5989 
5990 	bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
5991 	bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
5992 	bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
5993 
5994 	if (an_dbg & (MISC_SHDW_AN_DBG_NOSYNC | MISC_SHDW_AN_DBG_RUDI_INVALID))
5995 		return 0;
5996 
5997 	bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS, MII_EXPAND_REG1);
5998 	bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &exp);
5999 	bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &exp);
6000 
6001 	if (exp & MII_EXPAND_REG1_RUDI_C)	/* receiving CONFIG */
6002 		return 0;
6003 
6004 	return 1;
6005 }
6006 
6007 static void
6008 bnx2_5706_serdes_timer(struct bnx2 *bp)
6009 {
6010 	int check_link = 1;
6011 
6012 	spin_lock(&bp->phy_lock);
6013 	if (bp->serdes_an_pending) {
6014 		bp->serdes_an_pending--;
6015 		check_link = 0;
6016 	} else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
6017 		u32 bmcr;
6018 
6019 		bp->current_interval = BNX2_TIMER_INTERVAL;
6020 
6021 		bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
6022 
6023 		if (bmcr & BMCR_ANENABLE) {
6024 			if (bnx2_5706_serdes_has_link(bp)) {
6025 				bmcr &= ~BMCR_ANENABLE;
6026 				bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
6027 				bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
6028 				bp->phy_flags |= BNX2_PHY_FLAG_PARALLEL_DETECT;
6029 			}
6030 		}
6031 	}
6032 	else if ((bp->link_up) && (bp->autoneg & AUTONEG_SPEED) &&
6033 		 (bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT)) {
6034 		u32 phy2;
6035 
6036 		bnx2_write_phy(bp, 0x17, 0x0f01);
6037 		bnx2_read_phy(bp, 0x15, &phy2);
6038 		if (phy2 & 0x20) {
6039 			u32 bmcr;
6040 
6041 			bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
6042 			bmcr |= BMCR_ANENABLE;
6043 			bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
6044 
6045 			bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
6046 		}
6047 	} else
6048 		bp->current_interval = BNX2_TIMER_INTERVAL;
6049 
6050 	if (check_link) {
6051 		u32 val;
6052 
6053 		bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
6054 		bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &val);
6055 		bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &val);
6056 
6057 		if (bp->link_up && (val & MISC_SHDW_AN_DBG_NOSYNC)) {
6058 			if (!(bp->phy_flags & BNX2_PHY_FLAG_FORCED_DOWN)) {
6059 				bnx2_5706s_force_link_dn(bp, 1);
6060 				bp->phy_flags |= BNX2_PHY_FLAG_FORCED_DOWN;
6061 			} else
6062 				bnx2_set_link(bp);
6063 		} else if (!bp->link_up && !(val & MISC_SHDW_AN_DBG_NOSYNC))
6064 			bnx2_set_link(bp);
6065 	}
6066 	spin_unlock(&bp->phy_lock);
6067 }
6068 
6069 static void
6070 bnx2_5708_serdes_timer(struct bnx2 *bp)
6071 {
6072 	if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
6073 		return;
6074 
6075 	if ((bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) == 0) {
6076 		bp->serdes_an_pending = 0;
6077 		return;
6078 	}
6079 
6080 	spin_lock(&bp->phy_lock);
6081 	if (bp->serdes_an_pending)
6082 		bp->serdes_an_pending--;
6083 	else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
6084 		u32 bmcr;
6085 
6086 		bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
6087 		if (bmcr & BMCR_ANENABLE) {
6088 			bnx2_enable_forced_2g5(bp);
6089 			bp->current_interval = BNX2_SERDES_FORCED_TIMEOUT;
6090 		} else {
6091 			bnx2_disable_forced_2g5(bp);
6092 			bp->serdes_an_pending = 2;
6093 			bp->current_interval = BNX2_TIMER_INTERVAL;
6094 		}
6095 
6096 	} else
6097 		bp->current_interval = BNX2_TIMER_INTERVAL;
6098 
6099 	spin_unlock(&bp->phy_lock);
6100 }
6101 
6102 static void
6103 bnx2_timer(unsigned long data)
6104 {
6105 	struct bnx2 *bp = (struct bnx2 *) data;
6106 
6107 	if (!netif_running(bp->dev))
6108 		return;
6109 
6110 	if (atomic_read(&bp->intr_sem) != 0)
6111 		goto bnx2_restart_timer;
6112 
6113 	if ((bp->flags & (BNX2_FLAG_USING_MSI | BNX2_FLAG_ONE_SHOT_MSI)) ==
6114 	     BNX2_FLAG_USING_MSI)
6115 		bnx2_chk_missed_msi(bp);
6116 
6117 	bnx2_send_heart_beat(bp);
6118 
6119 	bp->stats_blk->stat_FwRxDrop =
6120 		bnx2_reg_rd_ind(bp, BNX2_FW_RX_DROP_COUNT);
6121 
6122 	/* workaround occasional corrupted counters */
6123 	if ((bp->flags & BNX2_FLAG_BROKEN_STATS) && bp->stats_ticks)
6124 		BNX2_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd |
6125 			BNX2_HC_COMMAND_STATS_NOW);
6126 
6127 	if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
6128 		if (BNX2_CHIP(bp) == BNX2_CHIP_5706)
6129 			bnx2_5706_serdes_timer(bp);
6130 		else
6131 			bnx2_5708_serdes_timer(bp);
6132 	}
6133 
6134 bnx2_restart_timer:
6135 	mod_timer(&bp->timer, jiffies + bp->current_interval);
6136 }
6137 
6138 static int
6139 bnx2_request_irq(struct bnx2 *bp)
6140 {
6141 	unsigned long flags;
6142 	struct bnx2_irq *irq;
6143 	int rc = 0, i;
6144 
6145 	if (bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)
6146 		flags = 0;
6147 	else
6148 		flags = IRQF_SHARED;
6149 
6150 	for (i = 0; i < bp->irq_nvecs; i++) {
6151 		irq = &bp->irq_tbl[i];
6152 		rc = request_irq(irq->vector, irq->handler, flags, irq->name,
6153 				 &bp->bnx2_napi[i]);
6154 		if (rc)
6155 			break;
6156 		irq->requested = 1;
6157 	}
6158 	return rc;
6159 }
6160 
6161 static void
6162 __bnx2_free_irq(struct bnx2 *bp)
6163 {
6164 	struct bnx2_irq *irq;
6165 	int i;
6166 
6167 	for (i = 0; i < bp->irq_nvecs; i++) {
6168 		irq = &bp->irq_tbl[i];
6169 		if (irq->requested)
6170 			free_irq(irq->vector, &bp->bnx2_napi[i]);
6171 		irq->requested = 0;
6172 	}
6173 }
6174 
6175 static void
6176 bnx2_free_irq(struct bnx2 *bp)
6177 {
6178 
6179 	__bnx2_free_irq(bp);
6180 	if (bp->flags & BNX2_FLAG_USING_MSI)
6181 		pci_disable_msi(bp->pdev);
6182 	else if (bp->flags & BNX2_FLAG_USING_MSIX)
6183 		pci_disable_msix(bp->pdev);
6184 
6185 	bp->flags &= ~(BNX2_FLAG_USING_MSI_OR_MSIX | BNX2_FLAG_ONE_SHOT_MSI);
6186 }
6187 
6188 static void
6189 bnx2_enable_msix(struct bnx2 *bp, int msix_vecs)
6190 {
6191 	int i, total_vecs, rc;
6192 	struct msix_entry msix_ent[BNX2_MAX_MSIX_VEC];
6193 	struct net_device *dev = bp->dev;
6194 	const int len = sizeof(bp->irq_tbl[0].name);
6195 
6196 	bnx2_setup_msix_tbl(bp);
6197 	BNX2_WR(bp, BNX2_PCI_MSIX_CONTROL, BNX2_MAX_MSIX_HW_VEC - 1);
6198 	BNX2_WR(bp, BNX2_PCI_MSIX_TBL_OFF_BIR, BNX2_PCI_GRC_WINDOW2_BASE);
6199 	BNX2_WR(bp, BNX2_PCI_MSIX_PBA_OFF_BIT, BNX2_PCI_GRC_WINDOW3_BASE);
6200 
6201 	/*  Need to flush the previous three writes to ensure MSI-X
6202 	 *  is setup properly */
6203 	BNX2_RD(bp, BNX2_PCI_MSIX_CONTROL);
6204 
6205 	for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
6206 		msix_ent[i].entry = i;
6207 		msix_ent[i].vector = 0;
6208 	}
6209 
6210 	total_vecs = msix_vecs;
6211 #ifdef BCM_CNIC
6212 	total_vecs++;
6213 #endif
6214 	rc = -ENOSPC;
6215 	while (total_vecs >= BNX2_MIN_MSIX_VEC) {
6216 		rc = pci_enable_msix(bp->pdev, msix_ent, total_vecs);
6217 		if (rc <= 0)
6218 			break;
6219 		if (rc > 0)
6220 			total_vecs = rc;
6221 	}
6222 
6223 	if (rc != 0)
6224 		return;
6225 
6226 	msix_vecs = total_vecs;
6227 #ifdef BCM_CNIC
6228 	msix_vecs--;
6229 #endif
6230 	bp->irq_nvecs = msix_vecs;
6231 	bp->flags |= BNX2_FLAG_USING_MSIX | BNX2_FLAG_ONE_SHOT_MSI;
6232 	for (i = 0; i < total_vecs; i++) {
6233 		bp->irq_tbl[i].vector = msix_ent[i].vector;
6234 		snprintf(bp->irq_tbl[i].name, len, "%s-%d", dev->name, i);
6235 		bp->irq_tbl[i].handler = bnx2_msi_1shot;
6236 	}
6237 }
6238 
6239 static int
6240 bnx2_setup_int_mode(struct bnx2 *bp, int dis_msi)
6241 {
6242 	int cpus = netif_get_num_default_rss_queues();
6243 	int msix_vecs;
6244 
6245 	if (!bp->num_req_rx_rings)
6246 		msix_vecs = max(cpus + 1, bp->num_req_tx_rings);
6247 	else if (!bp->num_req_tx_rings)
6248 		msix_vecs = max(cpus, bp->num_req_rx_rings);
6249 	else
6250 		msix_vecs = max(bp->num_req_rx_rings, bp->num_req_tx_rings);
6251 
6252 	msix_vecs = min(msix_vecs, RX_MAX_RINGS);
6253 
6254 	bp->irq_tbl[0].handler = bnx2_interrupt;
6255 	strcpy(bp->irq_tbl[0].name, bp->dev->name);
6256 	bp->irq_nvecs = 1;
6257 	bp->irq_tbl[0].vector = bp->pdev->irq;
6258 
6259 	if ((bp->flags & BNX2_FLAG_MSIX_CAP) && !dis_msi)
6260 		bnx2_enable_msix(bp, msix_vecs);
6261 
6262 	if ((bp->flags & BNX2_FLAG_MSI_CAP) && !dis_msi &&
6263 	    !(bp->flags & BNX2_FLAG_USING_MSIX)) {
6264 		if (pci_enable_msi(bp->pdev) == 0) {
6265 			bp->flags |= BNX2_FLAG_USING_MSI;
6266 			if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
6267 				bp->flags |= BNX2_FLAG_ONE_SHOT_MSI;
6268 				bp->irq_tbl[0].handler = bnx2_msi_1shot;
6269 			} else
6270 				bp->irq_tbl[0].handler = bnx2_msi;
6271 
6272 			bp->irq_tbl[0].vector = bp->pdev->irq;
6273 		}
6274 	}
6275 
6276 	if (!bp->num_req_tx_rings)
6277 		bp->num_tx_rings = rounddown_pow_of_two(bp->irq_nvecs);
6278 	else
6279 		bp->num_tx_rings = min(bp->irq_nvecs, bp->num_req_tx_rings);
6280 
6281 	if (!bp->num_req_rx_rings)
6282 		bp->num_rx_rings = bp->irq_nvecs;
6283 	else
6284 		bp->num_rx_rings = min(bp->irq_nvecs, bp->num_req_rx_rings);
6285 
6286 	netif_set_real_num_tx_queues(bp->dev, bp->num_tx_rings);
6287 
6288 	return netif_set_real_num_rx_queues(bp->dev, bp->num_rx_rings);
6289 }
6290 
6291 /* Called with rtnl_lock */
6292 static int
6293 bnx2_open(struct net_device *dev)
6294 {
6295 	struct bnx2 *bp = netdev_priv(dev);
6296 	int rc;
6297 
6298 	rc = bnx2_request_firmware(bp);
6299 	if (rc < 0)
6300 		goto out;
6301 
6302 	netif_carrier_off(dev);
6303 
6304 	bnx2_disable_int(bp);
6305 
6306 	rc = bnx2_setup_int_mode(bp, disable_msi);
6307 	if (rc)
6308 		goto open_err;
6309 	bnx2_init_napi(bp);
6310 	bnx2_napi_enable(bp);
6311 	rc = bnx2_alloc_mem(bp);
6312 	if (rc)
6313 		goto open_err;
6314 
6315 	rc = bnx2_request_irq(bp);
6316 	if (rc)
6317 		goto open_err;
6318 
6319 	rc = bnx2_init_nic(bp, 1);
6320 	if (rc)
6321 		goto open_err;
6322 
6323 	mod_timer(&bp->timer, jiffies + bp->current_interval);
6324 
6325 	atomic_set(&bp->intr_sem, 0);
6326 
6327 	memset(bp->temp_stats_blk, 0, sizeof(struct statistics_block));
6328 
6329 	bnx2_enable_int(bp);
6330 
6331 	if (bp->flags & BNX2_FLAG_USING_MSI) {
6332 		/* Test MSI to make sure it is working
6333 		 * If MSI test fails, go back to INTx mode
6334 		 */
6335 		if (bnx2_test_intr(bp) != 0) {
6336 			netdev_warn(bp->dev, "No interrupt was generated using MSI, switching to INTx mode. Please report this failure to the PCI maintainer and include system chipset information.\n");
6337 
6338 			bnx2_disable_int(bp);
6339 			bnx2_free_irq(bp);
6340 
6341 			bnx2_setup_int_mode(bp, 1);
6342 
6343 			rc = bnx2_init_nic(bp, 0);
6344 
6345 			if (!rc)
6346 				rc = bnx2_request_irq(bp);
6347 
6348 			if (rc) {
6349 				del_timer_sync(&bp->timer);
6350 				goto open_err;
6351 			}
6352 			bnx2_enable_int(bp);
6353 		}
6354 	}
6355 	if (bp->flags & BNX2_FLAG_USING_MSI)
6356 		netdev_info(dev, "using MSI\n");
6357 	else if (bp->flags & BNX2_FLAG_USING_MSIX)
6358 		netdev_info(dev, "using MSIX\n");
6359 
6360 	netif_tx_start_all_queues(dev);
6361 out:
6362 	return rc;
6363 
6364 open_err:
6365 	bnx2_napi_disable(bp);
6366 	bnx2_free_skbs(bp);
6367 	bnx2_free_irq(bp);
6368 	bnx2_free_mem(bp);
6369 	bnx2_del_napi(bp);
6370 	bnx2_release_firmware(bp);
6371 	goto out;
6372 }
6373 
6374 static void
6375 bnx2_reset_task(struct work_struct *work)
6376 {
6377 	struct bnx2 *bp = container_of(work, struct bnx2, reset_task);
6378 	int rc;
6379 	u16 pcicmd;
6380 
6381 	rtnl_lock();
6382 	if (!netif_running(bp->dev)) {
6383 		rtnl_unlock();
6384 		return;
6385 	}
6386 
6387 	bnx2_netif_stop(bp, true);
6388 
6389 	pci_read_config_word(bp->pdev, PCI_COMMAND, &pcicmd);
6390 	if (!(pcicmd & PCI_COMMAND_MEMORY)) {
6391 		/* in case PCI block has reset */
6392 		pci_restore_state(bp->pdev);
6393 		pci_save_state(bp->pdev);
6394 	}
6395 	rc = bnx2_init_nic(bp, 1);
6396 	if (rc) {
6397 		netdev_err(bp->dev, "failed to reset NIC, closing\n");
6398 		bnx2_napi_enable(bp);
6399 		dev_close(bp->dev);
6400 		rtnl_unlock();
6401 		return;
6402 	}
6403 
6404 	atomic_set(&bp->intr_sem, 1);
6405 	bnx2_netif_start(bp, true);
6406 	rtnl_unlock();
6407 }
6408 
6409 #define BNX2_FTQ_ENTRY(ftq) { __stringify(ftq##FTQ_CTL), BNX2_##ftq##FTQ_CTL }
6410 
6411 static void
6412 bnx2_dump_ftq(struct bnx2 *bp)
6413 {
6414 	int i;
6415 	u32 reg, bdidx, cid, valid;
6416 	struct net_device *dev = bp->dev;
6417 	static const struct ftq_reg {
6418 		char *name;
6419 		u32 off;
6420 	} ftq_arr[] = {
6421 		BNX2_FTQ_ENTRY(RV2P_P),
6422 		BNX2_FTQ_ENTRY(RV2P_T),
6423 		BNX2_FTQ_ENTRY(RV2P_M),
6424 		BNX2_FTQ_ENTRY(TBDR_),
6425 		BNX2_FTQ_ENTRY(TDMA_),
6426 		BNX2_FTQ_ENTRY(TXP_),
6427 		BNX2_FTQ_ENTRY(TXP_),
6428 		BNX2_FTQ_ENTRY(TPAT_),
6429 		BNX2_FTQ_ENTRY(RXP_C),
6430 		BNX2_FTQ_ENTRY(RXP_),
6431 		BNX2_FTQ_ENTRY(COM_COMXQ_),
6432 		BNX2_FTQ_ENTRY(COM_COMTQ_),
6433 		BNX2_FTQ_ENTRY(COM_COMQ_),
6434 		BNX2_FTQ_ENTRY(CP_CPQ_),
6435 	};
6436 
6437 	netdev_err(dev, "<--- start FTQ dump --->\n");
6438 	for (i = 0; i < ARRAY_SIZE(ftq_arr); i++)
6439 		netdev_err(dev, "%s %08x\n", ftq_arr[i].name,
6440 			   bnx2_reg_rd_ind(bp, ftq_arr[i].off));
6441 
6442 	netdev_err(dev, "CPU states:\n");
6443 	for (reg = BNX2_TXP_CPU_MODE; reg <= BNX2_CP_CPU_MODE; reg += 0x40000)
6444 		netdev_err(dev, "%06x mode %x state %x evt_mask %x pc %x pc %x instr %x\n",
6445 			   reg, bnx2_reg_rd_ind(bp, reg),
6446 			   bnx2_reg_rd_ind(bp, reg + 4),
6447 			   bnx2_reg_rd_ind(bp, reg + 8),
6448 			   bnx2_reg_rd_ind(bp, reg + 0x1c),
6449 			   bnx2_reg_rd_ind(bp, reg + 0x1c),
6450 			   bnx2_reg_rd_ind(bp, reg + 0x20));
6451 
6452 	netdev_err(dev, "<--- end FTQ dump --->\n");
6453 	netdev_err(dev, "<--- start TBDC dump --->\n");
6454 	netdev_err(dev, "TBDC free cnt: %ld\n",
6455 		   BNX2_RD(bp, BNX2_TBDC_STATUS) & BNX2_TBDC_STATUS_FREE_CNT);
6456 	netdev_err(dev, "LINE     CID  BIDX   CMD  VALIDS\n");
6457 	for (i = 0; i < 0x20; i++) {
6458 		int j = 0;
6459 
6460 		BNX2_WR(bp, BNX2_TBDC_BD_ADDR, i);
6461 		BNX2_WR(bp, BNX2_TBDC_CAM_OPCODE,
6462 			BNX2_TBDC_CAM_OPCODE_OPCODE_CAM_READ);
6463 		BNX2_WR(bp, BNX2_TBDC_COMMAND, BNX2_TBDC_COMMAND_CMD_REG_ARB);
6464 		while ((BNX2_RD(bp, BNX2_TBDC_COMMAND) &
6465 			BNX2_TBDC_COMMAND_CMD_REG_ARB) && j < 100)
6466 			j++;
6467 
6468 		cid = BNX2_RD(bp, BNX2_TBDC_CID);
6469 		bdidx = BNX2_RD(bp, BNX2_TBDC_BIDX);
6470 		valid = BNX2_RD(bp, BNX2_TBDC_CAM_OPCODE);
6471 		netdev_err(dev, "%02x    %06x  %04lx   %02x    [%x]\n",
6472 			   i, cid, bdidx & BNX2_TBDC_BDIDX_BDIDX,
6473 			   bdidx >> 24, (valid >> 8) & 0x0ff);
6474 	}
6475 	netdev_err(dev, "<--- end TBDC dump --->\n");
6476 }
6477 
6478 static void
6479 bnx2_dump_state(struct bnx2 *bp)
6480 {
6481 	struct net_device *dev = bp->dev;
6482 	u32 val1, val2;
6483 
6484 	pci_read_config_dword(bp->pdev, PCI_COMMAND, &val1);
6485 	netdev_err(dev, "DEBUG: intr_sem[%x] PCI_CMD[%08x]\n",
6486 		   atomic_read(&bp->intr_sem), val1);
6487 	pci_read_config_dword(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &val1);
6488 	pci_read_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG, &val2);
6489 	netdev_err(dev, "DEBUG: PCI_PM[%08x] PCI_MISC_CFG[%08x]\n", val1, val2);
6490 	netdev_err(dev, "DEBUG: EMAC_TX_STATUS[%08x] EMAC_RX_STATUS[%08x]\n",
6491 		   BNX2_RD(bp, BNX2_EMAC_TX_STATUS),
6492 		   BNX2_RD(bp, BNX2_EMAC_RX_STATUS));
6493 	netdev_err(dev, "DEBUG: RPM_MGMT_PKT_CTRL[%08x]\n",
6494 		   BNX2_RD(bp, BNX2_RPM_MGMT_PKT_CTRL));
6495 	netdev_err(dev, "DEBUG: HC_STATS_INTERRUPT_STATUS[%08x]\n",
6496 		   BNX2_RD(bp, BNX2_HC_STATS_INTERRUPT_STATUS));
6497 	if (bp->flags & BNX2_FLAG_USING_MSIX)
6498 		netdev_err(dev, "DEBUG: PBA[%08x]\n",
6499 			   BNX2_RD(bp, BNX2_PCI_GRC_WINDOW3_BASE));
6500 }
6501 
6502 static void
6503 bnx2_tx_timeout(struct net_device *dev)
6504 {
6505 	struct bnx2 *bp = netdev_priv(dev);
6506 
6507 	bnx2_dump_ftq(bp);
6508 	bnx2_dump_state(bp);
6509 	bnx2_dump_mcp_state(bp);
6510 
6511 	/* This allows the netif to be shutdown gracefully before resetting */
6512 	schedule_work(&bp->reset_task);
6513 }
6514 
6515 /* Called with netif_tx_lock.
6516  * bnx2_tx_int() runs without netif_tx_lock unless it needs to call
6517  * netif_wake_queue().
6518  */
6519 static netdev_tx_t
6520 bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
6521 {
6522 	struct bnx2 *bp = netdev_priv(dev);
6523 	dma_addr_t mapping;
6524 	struct bnx2_tx_bd *txbd;
6525 	struct bnx2_sw_tx_bd *tx_buf;
6526 	u32 len, vlan_tag_flags, last_frag, mss;
6527 	u16 prod, ring_prod;
6528 	int i;
6529 	struct bnx2_napi *bnapi;
6530 	struct bnx2_tx_ring_info *txr;
6531 	struct netdev_queue *txq;
6532 
6533 	/*  Determine which tx ring we will be placed on */
6534 	i = skb_get_queue_mapping(skb);
6535 	bnapi = &bp->bnx2_napi[i];
6536 	txr = &bnapi->tx_ring;
6537 	txq = netdev_get_tx_queue(dev, i);
6538 
6539 	if (unlikely(bnx2_tx_avail(bp, txr) <
6540 	    (skb_shinfo(skb)->nr_frags + 1))) {
6541 		netif_tx_stop_queue(txq);
6542 		netdev_err(dev, "BUG! Tx ring full when queue awake!\n");
6543 
6544 		return NETDEV_TX_BUSY;
6545 	}
6546 	len = skb_headlen(skb);
6547 	prod = txr->tx_prod;
6548 	ring_prod = BNX2_TX_RING_IDX(prod);
6549 
6550 	vlan_tag_flags = 0;
6551 	if (skb->ip_summed == CHECKSUM_PARTIAL) {
6552 		vlan_tag_flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
6553 	}
6554 
6555 	if (vlan_tx_tag_present(skb)) {
6556 		vlan_tag_flags |=
6557 			(TX_BD_FLAGS_VLAN_TAG | (vlan_tx_tag_get(skb) << 16));
6558 	}
6559 
6560 	if ((mss = skb_shinfo(skb)->gso_size)) {
6561 		u32 tcp_opt_len;
6562 		struct iphdr *iph;
6563 
6564 		vlan_tag_flags |= TX_BD_FLAGS_SW_LSO;
6565 
6566 		tcp_opt_len = tcp_optlen(skb);
6567 
6568 		if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) {
6569 			u32 tcp_off = skb_transport_offset(skb) -
6570 				      sizeof(struct ipv6hdr) - ETH_HLEN;
6571 
6572 			vlan_tag_flags |= ((tcp_opt_len >> 2) << 8) |
6573 					  TX_BD_FLAGS_SW_FLAGS;
6574 			if (likely(tcp_off == 0))
6575 				vlan_tag_flags &= ~TX_BD_FLAGS_TCP6_OFF0_MSK;
6576 			else {
6577 				tcp_off >>= 3;
6578 				vlan_tag_flags |= ((tcp_off & 0x3) <<
6579 						   TX_BD_FLAGS_TCP6_OFF0_SHL) |
6580 						  ((tcp_off & 0x10) <<
6581 						   TX_BD_FLAGS_TCP6_OFF4_SHL);
6582 				mss |= (tcp_off & 0xc) << TX_BD_TCP6_OFF2_SHL;
6583 			}
6584 		} else {
6585 			iph = ip_hdr(skb);
6586 			if (tcp_opt_len || (iph->ihl > 5)) {
6587 				vlan_tag_flags |= ((iph->ihl - 5) +
6588 						   (tcp_opt_len >> 2)) << 8;
6589 			}
6590 		}
6591 	} else
6592 		mss = 0;
6593 
6594 	mapping = dma_map_single(&bp->pdev->dev, skb->data, len, PCI_DMA_TODEVICE);
6595 	if (dma_mapping_error(&bp->pdev->dev, mapping)) {
6596 		dev_kfree_skb(skb);
6597 		return NETDEV_TX_OK;
6598 	}
6599 
6600 	tx_buf = &txr->tx_buf_ring[ring_prod];
6601 	tx_buf->skb = skb;
6602 	dma_unmap_addr_set(tx_buf, mapping, mapping);
6603 
6604 	txbd = &txr->tx_desc_ring[ring_prod];
6605 
6606 	txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
6607 	txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
6608 	txbd->tx_bd_mss_nbytes = len | (mss << 16);
6609 	txbd->tx_bd_vlan_tag_flags = vlan_tag_flags | TX_BD_FLAGS_START;
6610 
6611 	last_frag = skb_shinfo(skb)->nr_frags;
6612 	tx_buf->nr_frags = last_frag;
6613 	tx_buf->is_gso = skb_is_gso(skb);
6614 
6615 	for (i = 0; i < last_frag; i++) {
6616 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
6617 
6618 		prod = BNX2_NEXT_TX_BD(prod);
6619 		ring_prod = BNX2_TX_RING_IDX(prod);
6620 		txbd = &txr->tx_desc_ring[ring_prod];
6621 
6622 		len = skb_frag_size(frag);
6623 		mapping = skb_frag_dma_map(&bp->pdev->dev, frag, 0, len,
6624 					   DMA_TO_DEVICE);
6625 		if (dma_mapping_error(&bp->pdev->dev, mapping))
6626 			goto dma_error;
6627 		dma_unmap_addr_set(&txr->tx_buf_ring[ring_prod], mapping,
6628 				   mapping);
6629 
6630 		txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
6631 		txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
6632 		txbd->tx_bd_mss_nbytes = len | (mss << 16);
6633 		txbd->tx_bd_vlan_tag_flags = vlan_tag_flags;
6634 
6635 	}
6636 	txbd->tx_bd_vlan_tag_flags |= TX_BD_FLAGS_END;
6637 
6638 	/* Sync BD data before updating TX mailbox */
6639 	wmb();
6640 
6641 	netdev_tx_sent_queue(txq, skb->len);
6642 
6643 	prod = BNX2_NEXT_TX_BD(prod);
6644 	txr->tx_prod_bseq += skb->len;
6645 
6646 	BNX2_WR16(bp, txr->tx_bidx_addr, prod);
6647 	BNX2_WR(bp, txr->tx_bseq_addr, txr->tx_prod_bseq);
6648 
6649 	mmiowb();
6650 
6651 	txr->tx_prod = prod;
6652 
6653 	if (unlikely(bnx2_tx_avail(bp, txr) <= MAX_SKB_FRAGS)) {
6654 		netif_tx_stop_queue(txq);
6655 
6656 		/* netif_tx_stop_queue() must be done before checking
6657 		 * tx index in bnx2_tx_avail() below, because in
6658 		 * bnx2_tx_int(), we update tx index before checking for
6659 		 * netif_tx_queue_stopped().
6660 		 */
6661 		smp_mb();
6662 		if (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh)
6663 			netif_tx_wake_queue(txq);
6664 	}
6665 
6666 	return NETDEV_TX_OK;
6667 dma_error:
6668 	/* save value of frag that failed */
6669 	last_frag = i;
6670 
6671 	/* start back at beginning and unmap skb */
6672 	prod = txr->tx_prod;
6673 	ring_prod = BNX2_TX_RING_IDX(prod);
6674 	tx_buf = &txr->tx_buf_ring[ring_prod];
6675 	tx_buf->skb = NULL;
6676 	dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(tx_buf, mapping),
6677 			 skb_headlen(skb), PCI_DMA_TODEVICE);
6678 
6679 	/* unmap remaining mapped pages */
6680 	for (i = 0; i < last_frag; i++) {
6681 		prod = BNX2_NEXT_TX_BD(prod);
6682 		ring_prod = BNX2_TX_RING_IDX(prod);
6683 		tx_buf = &txr->tx_buf_ring[ring_prod];
6684 		dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(tx_buf, mapping),
6685 			       skb_frag_size(&skb_shinfo(skb)->frags[i]),
6686 			       PCI_DMA_TODEVICE);
6687 	}
6688 
6689 	dev_kfree_skb(skb);
6690 	return NETDEV_TX_OK;
6691 }
6692 
6693 /* Called with rtnl_lock */
6694 static int
6695 bnx2_close(struct net_device *dev)
6696 {
6697 	struct bnx2 *bp = netdev_priv(dev);
6698 
6699 	bnx2_disable_int_sync(bp);
6700 	bnx2_napi_disable(bp);
6701 	netif_tx_disable(dev);
6702 	del_timer_sync(&bp->timer);
6703 	bnx2_shutdown_chip(bp);
6704 	bnx2_free_irq(bp);
6705 	bnx2_free_skbs(bp);
6706 	bnx2_free_mem(bp);
6707 	bnx2_del_napi(bp);
6708 	bp->link_up = 0;
6709 	netif_carrier_off(bp->dev);
6710 	return 0;
6711 }
6712 
6713 static void
6714 bnx2_save_stats(struct bnx2 *bp)
6715 {
6716 	u32 *hw_stats = (u32 *) bp->stats_blk;
6717 	u32 *temp_stats = (u32 *) bp->temp_stats_blk;
6718 	int i;
6719 
6720 	/* The 1st 10 counters are 64-bit counters */
6721 	for (i = 0; i < 20; i += 2) {
6722 		u32 hi;
6723 		u64 lo;
6724 
6725 		hi = temp_stats[i] + hw_stats[i];
6726 		lo = (u64) temp_stats[i + 1] + (u64) hw_stats[i + 1];
6727 		if (lo > 0xffffffff)
6728 			hi++;
6729 		temp_stats[i] = hi;
6730 		temp_stats[i + 1] = lo & 0xffffffff;
6731 	}
6732 
6733 	for ( ; i < sizeof(struct statistics_block) / 4; i++)
6734 		temp_stats[i] += hw_stats[i];
6735 }
6736 
6737 #define GET_64BIT_NET_STATS64(ctr)		\
6738 	(((u64) (ctr##_hi) << 32) + (u64) (ctr##_lo))
6739 
6740 #define GET_64BIT_NET_STATS(ctr)				\
6741 	GET_64BIT_NET_STATS64(bp->stats_blk->ctr) +		\
6742 	GET_64BIT_NET_STATS64(bp->temp_stats_blk->ctr)
6743 
6744 #define GET_32BIT_NET_STATS(ctr)				\
6745 	(unsigned long) (bp->stats_blk->ctr +			\
6746 			 bp->temp_stats_blk->ctr)
6747 
6748 static struct rtnl_link_stats64 *
6749 bnx2_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *net_stats)
6750 {
6751 	struct bnx2 *bp = netdev_priv(dev);
6752 
6753 	if (bp->stats_blk == NULL)
6754 		return net_stats;
6755 
6756 	net_stats->rx_packets =
6757 		GET_64BIT_NET_STATS(stat_IfHCInUcastPkts) +
6758 		GET_64BIT_NET_STATS(stat_IfHCInMulticastPkts) +
6759 		GET_64BIT_NET_STATS(stat_IfHCInBroadcastPkts);
6760 
6761 	net_stats->tx_packets =
6762 		GET_64BIT_NET_STATS(stat_IfHCOutUcastPkts) +
6763 		GET_64BIT_NET_STATS(stat_IfHCOutMulticastPkts) +
6764 		GET_64BIT_NET_STATS(stat_IfHCOutBroadcastPkts);
6765 
6766 	net_stats->rx_bytes =
6767 		GET_64BIT_NET_STATS(stat_IfHCInOctets);
6768 
6769 	net_stats->tx_bytes =
6770 		GET_64BIT_NET_STATS(stat_IfHCOutOctets);
6771 
6772 	net_stats->multicast =
6773 		GET_64BIT_NET_STATS(stat_IfHCInMulticastPkts);
6774 
6775 	net_stats->collisions =
6776 		GET_32BIT_NET_STATS(stat_EtherStatsCollisions);
6777 
6778 	net_stats->rx_length_errors =
6779 		GET_32BIT_NET_STATS(stat_EtherStatsUndersizePkts) +
6780 		GET_32BIT_NET_STATS(stat_EtherStatsOverrsizePkts);
6781 
6782 	net_stats->rx_over_errors =
6783 		GET_32BIT_NET_STATS(stat_IfInFTQDiscards) +
6784 		GET_32BIT_NET_STATS(stat_IfInMBUFDiscards);
6785 
6786 	net_stats->rx_frame_errors =
6787 		GET_32BIT_NET_STATS(stat_Dot3StatsAlignmentErrors);
6788 
6789 	net_stats->rx_crc_errors =
6790 		GET_32BIT_NET_STATS(stat_Dot3StatsFCSErrors);
6791 
6792 	net_stats->rx_errors = net_stats->rx_length_errors +
6793 		net_stats->rx_over_errors + net_stats->rx_frame_errors +
6794 		net_stats->rx_crc_errors;
6795 
6796 	net_stats->tx_aborted_errors =
6797 		GET_32BIT_NET_STATS(stat_Dot3StatsExcessiveCollisions) +
6798 		GET_32BIT_NET_STATS(stat_Dot3StatsLateCollisions);
6799 
6800 	if ((BNX2_CHIP(bp) == BNX2_CHIP_5706) ||
6801 	    (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_A0))
6802 		net_stats->tx_carrier_errors = 0;
6803 	else {
6804 		net_stats->tx_carrier_errors =
6805 			GET_32BIT_NET_STATS(stat_Dot3StatsCarrierSenseErrors);
6806 	}
6807 
6808 	net_stats->tx_errors =
6809 		GET_32BIT_NET_STATS(stat_emac_tx_stat_dot3statsinternalmactransmiterrors) +
6810 		net_stats->tx_aborted_errors +
6811 		net_stats->tx_carrier_errors;
6812 
6813 	net_stats->rx_missed_errors =
6814 		GET_32BIT_NET_STATS(stat_IfInFTQDiscards) +
6815 		GET_32BIT_NET_STATS(stat_IfInMBUFDiscards) +
6816 		GET_32BIT_NET_STATS(stat_FwRxDrop);
6817 
6818 	return net_stats;
6819 }
6820 
6821 /* All ethtool functions called with rtnl_lock */
6822 
6823 static int
6824 bnx2_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
6825 {
6826 	struct bnx2 *bp = netdev_priv(dev);
6827 	int support_serdes = 0, support_copper = 0;
6828 
6829 	cmd->supported = SUPPORTED_Autoneg;
6830 	if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
6831 		support_serdes = 1;
6832 		support_copper = 1;
6833 	} else if (bp->phy_port == PORT_FIBRE)
6834 		support_serdes = 1;
6835 	else
6836 		support_copper = 1;
6837 
6838 	if (support_serdes) {
6839 		cmd->supported |= SUPPORTED_1000baseT_Full |
6840 			SUPPORTED_FIBRE;
6841 		if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)
6842 			cmd->supported |= SUPPORTED_2500baseX_Full;
6843 
6844 	}
6845 	if (support_copper) {
6846 		cmd->supported |= SUPPORTED_10baseT_Half |
6847 			SUPPORTED_10baseT_Full |
6848 			SUPPORTED_100baseT_Half |
6849 			SUPPORTED_100baseT_Full |
6850 			SUPPORTED_1000baseT_Full |
6851 			SUPPORTED_TP;
6852 
6853 	}
6854 
6855 	spin_lock_bh(&bp->phy_lock);
6856 	cmd->port = bp->phy_port;
6857 	cmd->advertising = bp->advertising;
6858 
6859 	if (bp->autoneg & AUTONEG_SPEED) {
6860 		cmd->autoneg = AUTONEG_ENABLE;
6861 	} else {
6862 		cmd->autoneg = AUTONEG_DISABLE;
6863 	}
6864 
6865 	if (netif_carrier_ok(dev)) {
6866 		ethtool_cmd_speed_set(cmd, bp->line_speed);
6867 		cmd->duplex = bp->duplex;
6868 	}
6869 	else {
6870 		ethtool_cmd_speed_set(cmd, -1);
6871 		cmd->duplex = -1;
6872 	}
6873 	spin_unlock_bh(&bp->phy_lock);
6874 
6875 	cmd->transceiver = XCVR_INTERNAL;
6876 	cmd->phy_address = bp->phy_addr;
6877 
6878 	return 0;
6879 }
6880 
6881 static int
6882 bnx2_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
6883 {
6884 	struct bnx2 *bp = netdev_priv(dev);
6885 	u8 autoneg = bp->autoneg;
6886 	u8 req_duplex = bp->req_duplex;
6887 	u16 req_line_speed = bp->req_line_speed;
6888 	u32 advertising = bp->advertising;
6889 	int err = -EINVAL;
6890 
6891 	spin_lock_bh(&bp->phy_lock);
6892 
6893 	if (cmd->port != PORT_TP && cmd->port != PORT_FIBRE)
6894 		goto err_out_unlock;
6895 
6896 	if (cmd->port != bp->phy_port &&
6897 	    !(bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP))
6898 		goto err_out_unlock;
6899 
6900 	/* If device is down, we can store the settings only if the user
6901 	 * is setting the currently active port.
6902 	 */
6903 	if (!netif_running(dev) && cmd->port != bp->phy_port)
6904 		goto err_out_unlock;
6905 
6906 	if (cmd->autoneg == AUTONEG_ENABLE) {
6907 		autoneg |= AUTONEG_SPEED;
6908 
6909 		advertising = cmd->advertising;
6910 		if (cmd->port == PORT_TP) {
6911 			advertising &= ETHTOOL_ALL_COPPER_SPEED;
6912 			if (!advertising)
6913 				advertising = ETHTOOL_ALL_COPPER_SPEED;
6914 		} else {
6915 			advertising &= ETHTOOL_ALL_FIBRE_SPEED;
6916 			if (!advertising)
6917 				advertising = ETHTOOL_ALL_FIBRE_SPEED;
6918 		}
6919 		advertising |= ADVERTISED_Autoneg;
6920 	}
6921 	else {
6922 		u32 speed = ethtool_cmd_speed(cmd);
6923 		if (cmd->port == PORT_FIBRE) {
6924 			if ((speed != SPEED_1000 &&
6925 			     speed != SPEED_2500) ||
6926 			    (cmd->duplex != DUPLEX_FULL))
6927 				goto err_out_unlock;
6928 
6929 			if (speed == SPEED_2500 &&
6930 			    !(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
6931 				goto err_out_unlock;
6932 		} else if (speed == SPEED_1000 || speed == SPEED_2500)
6933 			goto err_out_unlock;
6934 
6935 		autoneg &= ~AUTONEG_SPEED;
6936 		req_line_speed = speed;
6937 		req_duplex = cmd->duplex;
6938 		advertising = 0;
6939 	}
6940 
6941 	bp->autoneg = autoneg;
6942 	bp->advertising = advertising;
6943 	bp->req_line_speed = req_line_speed;
6944 	bp->req_duplex = req_duplex;
6945 
6946 	err = 0;
6947 	/* If device is down, the new settings will be picked up when it is
6948 	 * brought up.
6949 	 */
6950 	if (netif_running(dev))
6951 		err = bnx2_setup_phy(bp, cmd->port);
6952 
6953 err_out_unlock:
6954 	spin_unlock_bh(&bp->phy_lock);
6955 
6956 	return err;
6957 }
6958 
6959 static void
6960 bnx2_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
6961 {
6962 	struct bnx2 *bp = netdev_priv(dev);
6963 
6964 	strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
6965 	strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
6966 	strlcpy(info->bus_info, pci_name(bp->pdev), sizeof(info->bus_info));
6967 	strlcpy(info->fw_version, bp->fw_version, sizeof(info->fw_version));
6968 }
6969 
6970 #define BNX2_REGDUMP_LEN		(32 * 1024)
6971 
6972 static int
6973 bnx2_get_regs_len(struct net_device *dev)
6974 {
6975 	return BNX2_REGDUMP_LEN;
6976 }
6977 
6978 static void
6979 bnx2_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p)
6980 {
6981 	u32 *p = _p, i, offset;
6982 	u8 *orig_p = _p;
6983 	struct bnx2 *bp = netdev_priv(dev);
6984 	static const u32 reg_boundaries[] = {
6985 		0x0000, 0x0098, 0x0400, 0x045c,
6986 		0x0800, 0x0880, 0x0c00, 0x0c10,
6987 		0x0c30, 0x0d08, 0x1000, 0x101c,
6988 		0x1040, 0x1048, 0x1080, 0x10a4,
6989 		0x1400, 0x1490, 0x1498, 0x14f0,
6990 		0x1500, 0x155c, 0x1580, 0x15dc,
6991 		0x1600, 0x1658, 0x1680, 0x16d8,
6992 		0x1800, 0x1820, 0x1840, 0x1854,
6993 		0x1880, 0x1894, 0x1900, 0x1984,
6994 		0x1c00, 0x1c0c, 0x1c40, 0x1c54,
6995 		0x1c80, 0x1c94, 0x1d00, 0x1d84,
6996 		0x2000, 0x2030, 0x23c0, 0x2400,
6997 		0x2800, 0x2820, 0x2830, 0x2850,
6998 		0x2b40, 0x2c10, 0x2fc0, 0x3058,
6999 		0x3c00, 0x3c94, 0x4000, 0x4010,
7000 		0x4080, 0x4090, 0x43c0, 0x4458,
7001 		0x4c00, 0x4c18, 0x4c40, 0x4c54,
7002 		0x4fc0, 0x5010, 0x53c0, 0x5444,
7003 		0x5c00, 0x5c18, 0x5c80, 0x5c90,
7004 		0x5fc0, 0x6000, 0x6400, 0x6428,
7005 		0x6800, 0x6848, 0x684c, 0x6860,
7006 		0x6888, 0x6910, 0x8000
7007 	};
7008 
7009 	regs->version = 0;
7010 
7011 	memset(p, 0, BNX2_REGDUMP_LEN);
7012 
7013 	if (!netif_running(bp->dev))
7014 		return;
7015 
7016 	i = 0;
7017 	offset = reg_boundaries[0];
7018 	p += offset;
7019 	while (offset < BNX2_REGDUMP_LEN) {
7020 		*p++ = BNX2_RD(bp, offset);
7021 		offset += 4;
7022 		if (offset == reg_boundaries[i + 1]) {
7023 			offset = reg_boundaries[i + 2];
7024 			p = (u32 *) (orig_p + offset);
7025 			i += 2;
7026 		}
7027 	}
7028 }
7029 
7030 static void
7031 bnx2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7032 {
7033 	struct bnx2 *bp = netdev_priv(dev);
7034 
7035 	if (bp->flags & BNX2_FLAG_NO_WOL) {
7036 		wol->supported = 0;
7037 		wol->wolopts = 0;
7038 	}
7039 	else {
7040 		wol->supported = WAKE_MAGIC;
7041 		if (bp->wol)
7042 			wol->wolopts = WAKE_MAGIC;
7043 		else
7044 			wol->wolopts = 0;
7045 	}
7046 	memset(&wol->sopass, 0, sizeof(wol->sopass));
7047 }
7048 
7049 static int
7050 bnx2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7051 {
7052 	struct bnx2 *bp = netdev_priv(dev);
7053 
7054 	if (wol->wolopts & ~WAKE_MAGIC)
7055 		return -EINVAL;
7056 
7057 	if (wol->wolopts & WAKE_MAGIC) {
7058 		if (bp->flags & BNX2_FLAG_NO_WOL)
7059 			return -EINVAL;
7060 
7061 		bp->wol = 1;
7062 	}
7063 	else {
7064 		bp->wol = 0;
7065 	}
7066 
7067 	device_set_wakeup_enable(&bp->pdev->dev, bp->wol);
7068 
7069 	return 0;
7070 }
7071 
7072 static int
7073 bnx2_nway_reset(struct net_device *dev)
7074 {
7075 	struct bnx2 *bp = netdev_priv(dev);
7076 	u32 bmcr;
7077 
7078 	if (!netif_running(dev))
7079 		return -EAGAIN;
7080 
7081 	if (!(bp->autoneg & AUTONEG_SPEED)) {
7082 		return -EINVAL;
7083 	}
7084 
7085 	spin_lock_bh(&bp->phy_lock);
7086 
7087 	if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
7088 		int rc;
7089 
7090 		rc = bnx2_setup_remote_phy(bp, bp->phy_port);
7091 		spin_unlock_bh(&bp->phy_lock);
7092 		return rc;
7093 	}
7094 
7095 	/* Force a link down visible on the other side */
7096 	if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
7097 		bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
7098 		spin_unlock_bh(&bp->phy_lock);
7099 
7100 		msleep(20);
7101 
7102 		spin_lock_bh(&bp->phy_lock);
7103 
7104 		bp->current_interval = BNX2_SERDES_AN_TIMEOUT;
7105 		bp->serdes_an_pending = 1;
7106 		mod_timer(&bp->timer, jiffies + bp->current_interval);
7107 	}
7108 
7109 	bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
7110 	bmcr &= ~BMCR_LOOPBACK;
7111 	bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART | BMCR_ANENABLE);
7112 
7113 	spin_unlock_bh(&bp->phy_lock);
7114 
7115 	return 0;
7116 }
7117 
7118 static u32
7119 bnx2_get_link(struct net_device *dev)
7120 {
7121 	struct bnx2 *bp = netdev_priv(dev);
7122 
7123 	return bp->link_up;
7124 }
7125 
7126 static int
7127 bnx2_get_eeprom_len(struct net_device *dev)
7128 {
7129 	struct bnx2 *bp = netdev_priv(dev);
7130 
7131 	if (bp->flash_info == NULL)
7132 		return 0;
7133 
7134 	return (int) bp->flash_size;
7135 }
7136 
7137 static int
7138 bnx2_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
7139 		u8 *eebuf)
7140 {
7141 	struct bnx2 *bp = netdev_priv(dev);
7142 	int rc;
7143 
7144 	/* parameters already validated in ethtool_get_eeprom */
7145 
7146 	rc = bnx2_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
7147 
7148 	return rc;
7149 }
7150 
7151 static int
7152 bnx2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
7153 		u8 *eebuf)
7154 {
7155 	struct bnx2 *bp = netdev_priv(dev);
7156 	int rc;
7157 
7158 	/* parameters already validated in ethtool_set_eeprom */
7159 
7160 	rc = bnx2_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
7161 
7162 	return rc;
7163 }
7164 
7165 static int
7166 bnx2_get_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
7167 {
7168 	struct bnx2 *bp = netdev_priv(dev);
7169 
7170 	memset(coal, 0, sizeof(struct ethtool_coalesce));
7171 
7172 	coal->rx_coalesce_usecs = bp->rx_ticks;
7173 	coal->rx_max_coalesced_frames = bp->rx_quick_cons_trip;
7174 	coal->rx_coalesce_usecs_irq = bp->rx_ticks_int;
7175 	coal->rx_max_coalesced_frames_irq = bp->rx_quick_cons_trip_int;
7176 
7177 	coal->tx_coalesce_usecs = bp->tx_ticks;
7178 	coal->tx_max_coalesced_frames = bp->tx_quick_cons_trip;
7179 	coal->tx_coalesce_usecs_irq = bp->tx_ticks_int;
7180 	coal->tx_max_coalesced_frames_irq = bp->tx_quick_cons_trip_int;
7181 
7182 	coal->stats_block_coalesce_usecs = bp->stats_ticks;
7183 
7184 	return 0;
7185 }
7186 
7187 static int
7188 bnx2_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
7189 {
7190 	struct bnx2 *bp = netdev_priv(dev);
7191 
7192 	bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
7193 	if (bp->rx_ticks > 0x3ff) bp->rx_ticks = 0x3ff;
7194 
7195 	bp->rx_quick_cons_trip = (u16) coal->rx_max_coalesced_frames;
7196 	if (bp->rx_quick_cons_trip > 0xff) bp->rx_quick_cons_trip = 0xff;
7197 
7198 	bp->rx_ticks_int = (u16) coal->rx_coalesce_usecs_irq;
7199 	if (bp->rx_ticks_int > 0x3ff) bp->rx_ticks_int = 0x3ff;
7200 
7201 	bp->rx_quick_cons_trip_int = (u16) coal->rx_max_coalesced_frames_irq;
7202 	if (bp->rx_quick_cons_trip_int > 0xff)
7203 		bp->rx_quick_cons_trip_int = 0xff;
7204 
7205 	bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
7206 	if (bp->tx_ticks > 0x3ff) bp->tx_ticks = 0x3ff;
7207 
7208 	bp->tx_quick_cons_trip = (u16) coal->tx_max_coalesced_frames;
7209 	if (bp->tx_quick_cons_trip > 0xff) bp->tx_quick_cons_trip = 0xff;
7210 
7211 	bp->tx_ticks_int = (u16) coal->tx_coalesce_usecs_irq;
7212 	if (bp->tx_ticks_int > 0x3ff) bp->tx_ticks_int = 0x3ff;
7213 
7214 	bp->tx_quick_cons_trip_int = (u16) coal->tx_max_coalesced_frames_irq;
7215 	if (bp->tx_quick_cons_trip_int > 0xff) bp->tx_quick_cons_trip_int =
7216 		0xff;
7217 
7218 	bp->stats_ticks = coal->stats_block_coalesce_usecs;
7219 	if (bp->flags & BNX2_FLAG_BROKEN_STATS) {
7220 		if (bp->stats_ticks != 0 && bp->stats_ticks != USEC_PER_SEC)
7221 			bp->stats_ticks = USEC_PER_SEC;
7222 	}
7223 	if (bp->stats_ticks > BNX2_HC_STATS_TICKS_HC_STAT_TICKS)
7224 		bp->stats_ticks = BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
7225 	bp->stats_ticks &= BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
7226 
7227 	if (netif_running(bp->dev)) {
7228 		bnx2_netif_stop(bp, true);
7229 		bnx2_init_nic(bp, 0);
7230 		bnx2_netif_start(bp, true);
7231 	}
7232 
7233 	return 0;
7234 }
7235 
7236 static void
7237 bnx2_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
7238 {
7239 	struct bnx2 *bp = netdev_priv(dev);
7240 
7241 	ering->rx_max_pending = BNX2_MAX_TOTAL_RX_DESC_CNT;
7242 	ering->rx_jumbo_max_pending = BNX2_MAX_TOTAL_RX_PG_DESC_CNT;
7243 
7244 	ering->rx_pending = bp->rx_ring_size;
7245 	ering->rx_jumbo_pending = bp->rx_pg_ring_size;
7246 
7247 	ering->tx_max_pending = BNX2_MAX_TX_DESC_CNT;
7248 	ering->tx_pending = bp->tx_ring_size;
7249 }
7250 
7251 static int
7252 bnx2_change_ring_size(struct bnx2 *bp, u32 rx, u32 tx, bool reset_irq)
7253 {
7254 	if (netif_running(bp->dev)) {
7255 		/* Reset will erase chipset stats; save them */
7256 		bnx2_save_stats(bp);
7257 
7258 		bnx2_netif_stop(bp, true);
7259 		bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
7260 		if (reset_irq) {
7261 			bnx2_free_irq(bp);
7262 			bnx2_del_napi(bp);
7263 		} else {
7264 			__bnx2_free_irq(bp);
7265 		}
7266 		bnx2_free_skbs(bp);
7267 		bnx2_free_mem(bp);
7268 	}
7269 
7270 	bnx2_set_rx_ring_size(bp, rx);
7271 	bp->tx_ring_size = tx;
7272 
7273 	if (netif_running(bp->dev)) {
7274 		int rc = 0;
7275 
7276 		if (reset_irq) {
7277 			rc = bnx2_setup_int_mode(bp, disable_msi);
7278 			bnx2_init_napi(bp);
7279 		}
7280 
7281 		if (!rc)
7282 			rc = bnx2_alloc_mem(bp);
7283 
7284 		if (!rc)
7285 			rc = bnx2_request_irq(bp);
7286 
7287 		if (!rc)
7288 			rc = bnx2_init_nic(bp, 0);
7289 
7290 		if (rc) {
7291 			bnx2_napi_enable(bp);
7292 			dev_close(bp->dev);
7293 			return rc;
7294 		}
7295 #ifdef BCM_CNIC
7296 		mutex_lock(&bp->cnic_lock);
7297 		/* Let cnic know about the new status block. */
7298 		if (bp->cnic_eth_dev.drv_state & CNIC_DRV_STATE_REGD)
7299 			bnx2_setup_cnic_irq_info(bp);
7300 		mutex_unlock(&bp->cnic_lock);
7301 #endif
7302 		bnx2_netif_start(bp, true);
7303 	}
7304 	return 0;
7305 }
7306 
7307 static int
7308 bnx2_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
7309 {
7310 	struct bnx2 *bp = netdev_priv(dev);
7311 	int rc;
7312 
7313 	if ((ering->rx_pending > BNX2_MAX_TOTAL_RX_DESC_CNT) ||
7314 		(ering->tx_pending > BNX2_MAX_TX_DESC_CNT) ||
7315 		(ering->tx_pending <= MAX_SKB_FRAGS)) {
7316 
7317 		return -EINVAL;
7318 	}
7319 	rc = bnx2_change_ring_size(bp, ering->rx_pending, ering->tx_pending,
7320 				   false);
7321 	return rc;
7322 }
7323 
7324 static void
7325 bnx2_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
7326 {
7327 	struct bnx2 *bp = netdev_priv(dev);
7328 
7329 	epause->autoneg = ((bp->autoneg & AUTONEG_FLOW_CTRL) != 0);
7330 	epause->rx_pause = ((bp->flow_ctrl & FLOW_CTRL_RX) != 0);
7331 	epause->tx_pause = ((bp->flow_ctrl & FLOW_CTRL_TX) != 0);
7332 }
7333 
7334 static int
7335 bnx2_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
7336 {
7337 	struct bnx2 *bp = netdev_priv(dev);
7338 
7339 	bp->req_flow_ctrl = 0;
7340 	if (epause->rx_pause)
7341 		bp->req_flow_ctrl |= FLOW_CTRL_RX;
7342 	if (epause->tx_pause)
7343 		bp->req_flow_ctrl |= FLOW_CTRL_TX;
7344 
7345 	if (epause->autoneg) {
7346 		bp->autoneg |= AUTONEG_FLOW_CTRL;
7347 	}
7348 	else {
7349 		bp->autoneg &= ~AUTONEG_FLOW_CTRL;
7350 	}
7351 
7352 	if (netif_running(dev)) {
7353 		spin_lock_bh(&bp->phy_lock);
7354 		bnx2_setup_phy(bp, bp->phy_port);
7355 		spin_unlock_bh(&bp->phy_lock);
7356 	}
7357 
7358 	return 0;
7359 }
7360 
7361 static struct {
7362 	char string[ETH_GSTRING_LEN];
7363 } bnx2_stats_str_arr[] = {
7364 	{ "rx_bytes" },
7365 	{ "rx_error_bytes" },
7366 	{ "tx_bytes" },
7367 	{ "tx_error_bytes" },
7368 	{ "rx_ucast_packets" },
7369 	{ "rx_mcast_packets" },
7370 	{ "rx_bcast_packets" },
7371 	{ "tx_ucast_packets" },
7372 	{ "tx_mcast_packets" },
7373 	{ "tx_bcast_packets" },
7374 	{ "tx_mac_errors" },
7375 	{ "tx_carrier_errors" },
7376 	{ "rx_crc_errors" },
7377 	{ "rx_align_errors" },
7378 	{ "tx_single_collisions" },
7379 	{ "tx_multi_collisions" },
7380 	{ "tx_deferred" },
7381 	{ "tx_excess_collisions" },
7382 	{ "tx_late_collisions" },
7383 	{ "tx_total_collisions" },
7384 	{ "rx_fragments" },
7385 	{ "rx_jabbers" },
7386 	{ "rx_undersize_packets" },
7387 	{ "rx_oversize_packets" },
7388 	{ "rx_64_byte_packets" },
7389 	{ "rx_65_to_127_byte_packets" },
7390 	{ "rx_128_to_255_byte_packets" },
7391 	{ "rx_256_to_511_byte_packets" },
7392 	{ "rx_512_to_1023_byte_packets" },
7393 	{ "rx_1024_to_1522_byte_packets" },
7394 	{ "rx_1523_to_9022_byte_packets" },
7395 	{ "tx_64_byte_packets" },
7396 	{ "tx_65_to_127_byte_packets" },
7397 	{ "tx_128_to_255_byte_packets" },
7398 	{ "tx_256_to_511_byte_packets" },
7399 	{ "tx_512_to_1023_byte_packets" },
7400 	{ "tx_1024_to_1522_byte_packets" },
7401 	{ "tx_1523_to_9022_byte_packets" },
7402 	{ "rx_xon_frames" },
7403 	{ "rx_xoff_frames" },
7404 	{ "tx_xon_frames" },
7405 	{ "tx_xoff_frames" },
7406 	{ "rx_mac_ctrl_frames" },
7407 	{ "rx_filtered_packets" },
7408 	{ "rx_ftq_discards" },
7409 	{ "rx_discards" },
7410 	{ "rx_fw_discards" },
7411 };
7412 
7413 #define BNX2_NUM_STATS ARRAY_SIZE(bnx2_stats_str_arr)
7414 
7415 #define STATS_OFFSET32(offset_name) (offsetof(struct statistics_block, offset_name) / 4)
7416 
7417 static const unsigned long bnx2_stats_offset_arr[BNX2_NUM_STATS] = {
7418     STATS_OFFSET32(stat_IfHCInOctets_hi),
7419     STATS_OFFSET32(stat_IfHCInBadOctets_hi),
7420     STATS_OFFSET32(stat_IfHCOutOctets_hi),
7421     STATS_OFFSET32(stat_IfHCOutBadOctets_hi),
7422     STATS_OFFSET32(stat_IfHCInUcastPkts_hi),
7423     STATS_OFFSET32(stat_IfHCInMulticastPkts_hi),
7424     STATS_OFFSET32(stat_IfHCInBroadcastPkts_hi),
7425     STATS_OFFSET32(stat_IfHCOutUcastPkts_hi),
7426     STATS_OFFSET32(stat_IfHCOutMulticastPkts_hi),
7427     STATS_OFFSET32(stat_IfHCOutBroadcastPkts_hi),
7428     STATS_OFFSET32(stat_emac_tx_stat_dot3statsinternalmactransmiterrors),
7429     STATS_OFFSET32(stat_Dot3StatsCarrierSenseErrors),
7430     STATS_OFFSET32(stat_Dot3StatsFCSErrors),
7431     STATS_OFFSET32(stat_Dot3StatsAlignmentErrors),
7432     STATS_OFFSET32(stat_Dot3StatsSingleCollisionFrames),
7433     STATS_OFFSET32(stat_Dot3StatsMultipleCollisionFrames),
7434     STATS_OFFSET32(stat_Dot3StatsDeferredTransmissions),
7435     STATS_OFFSET32(stat_Dot3StatsExcessiveCollisions),
7436     STATS_OFFSET32(stat_Dot3StatsLateCollisions),
7437     STATS_OFFSET32(stat_EtherStatsCollisions),
7438     STATS_OFFSET32(stat_EtherStatsFragments),
7439     STATS_OFFSET32(stat_EtherStatsJabbers),
7440     STATS_OFFSET32(stat_EtherStatsUndersizePkts),
7441     STATS_OFFSET32(stat_EtherStatsOverrsizePkts),
7442     STATS_OFFSET32(stat_EtherStatsPktsRx64Octets),
7443     STATS_OFFSET32(stat_EtherStatsPktsRx65Octetsto127Octets),
7444     STATS_OFFSET32(stat_EtherStatsPktsRx128Octetsto255Octets),
7445     STATS_OFFSET32(stat_EtherStatsPktsRx256Octetsto511Octets),
7446     STATS_OFFSET32(stat_EtherStatsPktsRx512Octetsto1023Octets),
7447     STATS_OFFSET32(stat_EtherStatsPktsRx1024Octetsto1522Octets),
7448     STATS_OFFSET32(stat_EtherStatsPktsRx1523Octetsto9022Octets),
7449     STATS_OFFSET32(stat_EtherStatsPktsTx64Octets),
7450     STATS_OFFSET32(stat_EtherStatsPktsTx65Octetsto127Octets),
7451     STATS_OFFSET32(stat_EtherStatsPktsTx128Octetsto255Octets),
7452     STATS_OFFSET32(stat_EtherStatsPktsTx256Octetsto511Octets),
7453     STATS_OFFSET32(stat_EtherStatsPktsTx512Octetsto1023Octets),
7454     STATS_OFFSET32(stat_EtherStatsPktsTx1024Octetsto1522Octets),
7455     STATS_OFFSET32(stat_EtherStatsPktsTx1523Octetsto9022Octets),
7456     STATS_OFFSET32(stat_XonPauseFramesReceived),
7457     STATS_OFFSET32(stat_XoffPauseFramesReceived),
7458     STATS_OFFSET32(stat_OutXonSent),
7459     STATS_OFFSET32(stat_OutXoffSent),
7460     STATS_OFFSET32(stat_MacControlFramesReceived),
7461     STATS_OFFSET32(stat_IfInFramesL2FilterDiscards),
7462     STATS_OFFSET32(stat_IfInFTQDiscards),
7463     STATS_OFFSET32(stat_IfInMBUFDiscards),
7464     STATS_OFFSET32(stat_FwRxDrop),
7465 };
7466 
7467 /* stat_IfHCInBadOctets and stat_Dot3StatsCarrierSenseErrors are
7468  * skipped because of errata.
7469  */
7470 static u8 bnx2_5706_stats_len_arr[BNX2_NUM_STATS] = {
7471 	8,0,8,8,8,8,8,8,8,8,
7472 	4,0,4,4,4,4,4,4,4,4,
7473 	4,4,4,4,4,4,4,4,4,4,
7474 	4,4,4,4,4,4,4,4,4,4,
7475 	4,4,4,4,4,4,4,
7476 };
7477 
7478 static u8 bnx2_5708_stats_len_arr[BNX2_NUM_STATS] = {
7479 	8,0,8,8,8,8,8,8,8,8,
7480 	4,4,4,4,4,4,4,4,4,4,
7481 	4,4,4,4,4,4,4,4,4,4,
7482 	4,4,4,4,4,4,4,4,4,4,
7483 	4,4,4,4,4,4,4,
7484 };
7485 
7486 #define BNX2_NUM_TESTS 6
7487 
7488 static struct {
7489 	char string[ETH_GSTRING_LEN];
7490 } bnx2_tests_str_arr[BNX2_NUM_TESTS] = {
7491 	{ "register_test (offline)" },
7492 	{ "memory_test (offline)" },
7493 	{ "loopback_test (offline)" },
7494 	{ "nvram_test (online)" },
7495 	{ "interrupt_test (online)" },
7496 	{ "link_test (online)" },
7497 };
7498 
7499 static int
7500 bnx2_get_sset_count(struct net_device *dev, int sset)
7501 {
7502 	switch (sset) {
7503 	case ETH_SS_TEST:
7504 		return BNX2_NUM_TESTS;
7505 	case ETH_SS_STATS:
7506 		return BNX2_NUM_STATS;
7507 	default:
7508 		return -EOPNOTSUPP;
7509 	}
7510 }
7511 
7512 static void
7513 bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf)
7514 {
7515 	struct bnx2 *bp = netdev_priv(dev);
7516 
7517 	memset(buf, 0, sizeof(u64) * BNX2_NUM_TESTS);
7518 	if (etest->flags & ETH_TEST_FL_OFFLINE) {
7519 		int i;
7520 
7521 		bnx2_netif_stop(bp, true);
7522 		bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_DIAG);
7523 		bnx2_free_skbs(bp);
7524 
7525 		if (bnx2_test_registers(bp) != 0) {
7526 			buf[0] = 1;
7527 			etest->flags |= ETH_TEST_FL_FAILED;
7528 		}
7529 		if (bnx2_test_memory(bp) != 0) {
7530 			buf[1] = 1;
7531 			etest->flags |= ETH_TEST_FL_FAILED;
7532 		}
7533 		if ((buf[2] = bnx2_test_loopback(bp)) != 0)
7534 			etest->flags |= ETH_TEST_FL_FAILED;
7535 
7536 		if (!netif_running(bp->dev))
7537 			bnx2_shutdown_chip(bp);
7538 		else {
7539 			bnx2_init_nic(bp, 1);
7540 			bnx2_netif_start(bp, true);
7541 		}
7542 
7543 		/* wait for link up */
7544 		for (i = 0; i < 7; i++) {
7545 			if (bp->link_up)
7546 				break;
7547 			msleep_interruptible(1000);
7548 		}
7549 	}
7550 
7551 	if (bnx2_test_nvram(bp) != 0) {
7552 		buf[3] = 1;
7553 		etest->flags |= ETH_TEST_FL_FAILED;
7554 	}
7555 	if (bnx2_test_intr(bp) != 0) {
7556 		buf[4] = 1;
7557 		etest->flags |= ETH_TEST_FL_FAILED;
7558 	}
7559 
7560 	if (bnx2_test_link(bp) != 0) {
7561 		buf[5] = 1;
7562 		etest->flags |= ETH_TEST_FL_FAILED;
7563 
7564 	}
7565 }
7566 
7567 static void
7568 bnx2_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
7569 {
7570 	switch (stringset) {
7571 	case ETH_SS_STATS:
7572 		memcpy(buf, bnx2_stats_str_arr,
7573 			sizeof(bnx2_stats_str_arr));
7574 		break;
7575 	case ETH_SS_TEST:
7576 		memcpy(buf, bnx2_tests_str_arr,
7577 			sizeof(bnx2_tests_str_arr));
7578 		break;
7579 	}
7580 }
7581 
7582 static void
7583 bnx2_get_ethtool_stats(struct net_device *dev,
7584 		struct ethtool_stats *stats, u64 *buf)
7585 {
7586 	struct bnx2 *bp = netdev_priv(dev);
7587 	int i;
7588 	u32 *hw_stats = (u32 *) bp->stats_blk;
7589 	u32 *temp_stats = (u32 *) bp->temp_stats_blk;
7590 	u8 *stats_len_arr = NULL;
7591 
7592 	if (hw_stats == NULL) {
7593 		memset(buf, 0, sizeof(u64) * BNX2_NUM_STATS);
7594 		return;
7595 	}
7596 
7597 	if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) ||
7598 	    (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A1) ||
7599 	    (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A2) ||
7600 	    (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_A0))
7601 		stats_len_arr = bnx2_5706_stats_len_arr;
7602 	else
7603 		stats_len_arr = bnx2_5708_stats_len_arr;
7604 
7605 	for (i = 0; i < BNX2_NUM_STATS; i++) {
7606 		unsigned long offset;
7607 
7608 		if (stats_len_arr[i] == 0) {
7609 			/* skip this counter */
7610 			buf[i] = 0;
7611 			continue;
7612 		}
7613 
7614 		offset = bnx2_stats_offset_arr[i];
7615 		if (stats_len_arr[i] == 4) {
7616 			/* 4-byte counter */
7617 			buf[i] = (u64) *(hw_stats + offset) +
7618 				 *(temp_stats + offset);
7619 			continue;
7620 		}
7621 		/* 8-byte counter */
7622 		buf[i] = (((u64) *(hw_stats + offset)) << 32) +
7623 			 *(hw_stats + offset + 1) +
7624 			 (((u64) *(temp_stats + offset)) << 32) +
7625 			 *(temp_stats + offset + 1);
7626 	}
7627 }
7628 
7629 static int
7630 bnx2_set_phys_id(struct net_device *dev, enum ethtool_phys_id_state state)
7631 {
7632 	struct bnx2 *bp = netdev_priv(dev);
7633 
7634 	switch (state) {
7635 	case ETHTOOL_ID_ACTIVE:
7636 		bp->leds_save = BNX2_RD(bp, BNX2_MISC_CFG);
7637 		BNX2_WR(bp, BNX2_MISC_CFG, BNX2_MISC_CFG_LEDMODE_MAC);
7638 		return 1;	/* cycle on/off once per second */
7639 
7640 	case ETHTOOL_ID_ON:
7641 		BNX2_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE |
7642 			BNX2_EMAC_LED_1000MB_OVERRIDE |
7643 			BNX2_EMAC_LED_100MB_OVERRIDE |
7644 			BNX2_EMAC_LED_10MB_OVERRIDE |
7645 			BNX2_EMAC_LED_TRAFFIC_OVERRIDE |
7646 			BNX2_EMAC_LED_TRAFFIC);
7647 		break;
7648 
7649 	case ETHTOOL_ID_OFF:
7650 		BNX2_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE);
7651 		break;
7652 
7653 	case ETHTOOL_ID_INACTIVE:
7654 		BNX2_WR(bp, BNX2_EMAC_LED, 0);
7655 		BNX2_WR(bp, BNX2_MISC_CFG, bp->leds_save);
7656 		break;
7657 	}
7658 
7659 	return 0;
7660 }
7661 
7662 static netdev_features_t
7663 bnx2_fix_features(struct net_device *dev, netdev_features_t features)
7664 {
7665 	struct bnx2 *bp = netdev_priv(dev);
7666 
7667 	if (!(bp->flags & BNX2_FLAG_CAN_KEEP_VLAN))
7668 		features |= NETIF_F_HW_VLAN_CTAG_RX;
7669 
7670 	return features;
7671 }
7672 
7673 static int
7674 bnx2_set_features(struct net_device *dev, netdev_features_t features)
7675 {
7676 	struct bnx2 *bp = netdev_priv(dev);
7677 
7678 	/* TSO with VLAN tag won't work with current firmware */
7679 	if (features & NETIF_F_HW_VLAN_CTAG_TX)
7680 		dev->vlan_features |= (dev->hw_features & NETIF_F_ALL_TSO);
7681 	else
7682 		dev->vlan_features &= ~NETIF_F_ALL_TSO;
7683 
7684 	if ((!!(features & NETIF_F_HW_VLAN_CTAG_RX) !=
7685 	    !!(bp->rx_mode & BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG)) &&
7686 	    netif_running(dev)) {
7687 		bnx2_netif_stop(bp, false);
7688 		dev->features = features;
7689 		bnx2_set_rx_mode(dev);
7690 		bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_KEEP_VLAN_UPDATE, 0, 1);
7691 		bnx2_netif_start(bp, false);
7692 		return 1;
7693 	}
7694 
7695 	return 0;
7696 }
7697 
7698 static void bnx2_get_channels(struct net_device *dev,
7699 			      struct ethtool_channels *channels)
7700 {
7701 	struct bnx2 *bp = netdev_priv(dev);
7702 	u32 max_rx_rings = 1;
7703 	u32 max_tx_rings = 1;
7704 
7705 	if ((bp->flags & BNX2_FLAG_MSIX_CAP) && !disable_msi) {
7706 		max_rx_rings = RX_MAX_RINGS;
7707 		max_tx_rings = TX_MAX_RINGS;
7708 	}
7709 
7710 	channels->max_rx = max_rx_rings;
7711 	channels->max_tx = max_tx_rings;
7712 	channels->max_other = 0;
7713 	channels->max_combined = 0;
7714 	channels->rx_count = bp->num_rx_rings;
7715 	channels->tx_count = bp->num_tx_rings;
7716 	channels->other_count = 0;
7717 	channels->combined_count = 0;
7718 }
7719 
7720 static int bnx2_set_channels(struct net_device *dev,
7721 			      struct ethtool_channels *channels)
7722 {
7723 	struct bnx2 *bp = netdev_priv(dev);
7724 	u32 max_rx_rings = 1;
7725 	u32 max_tx_rings = 1;
7726 	int rc = 0;
7727 
7728 	if ((bp->flags & BNX2_FLAG_MSIX_CAP) && !disable_msi) {
7729 		max_rx_rings = RX_MAX_RINGS;
7730 		max_tx_rings = TX_MAX_RINGS;
7731 	}
7732 	if (channels->rx_count > max_rx_rings ||
7733 	    channels->tx_count > max_tx_rings)
7734 		return -EINVAL;
7735 
7736 	bp->num_req_rx_rings = channels->rx_count;
7737 	bp->num_req_tx_rings = channels->tx_count;
7738 
7739 	if (netif_running(dev))
7740 		rc = bnx2_change_ring_size(bp, bp->rx_ring_size,
7741 					   bp->tx_ring_size, true);
7742 
7743 	return rc;
7744 }
7745 
7746 static const struct ethtool_ops bnx2_ethtool_ops = {
7747 	.get_settings		= bnx2_get_settings,
7748 	.set_settings		= bnx2_set_settings,
7749 	.get_drvinfo		= bnx2_get_drvinfo,
7750 	.get_regs_len		= bnx2_get_regs_len,
7751 	.get_regs		= bnx2_get_regs,
7752 	.get_wol		= bnx2_get_wol,
7753 	.set_wol		= bnx2_set_wol,
7754 	.nway_reset		= bnx2_nway_reset,
7755 	.get_link		= bnx2_get_link,
7756 	.get_eeprom_len		= bnx2_get_eeprom_len,
7757 	.get_eeprom		= bnx2_get_eeprom,
7758 	.set_eeprom		= bnx2_set_eeprom,
7759 	.get_coalesce		= bnx2_get_coalesce,
7760 	.set_coalesce		= bnx2_set_coalesce,
7761 	.get_ringparam		= bnx2_get_ringparam,
7762 	.set_ringparam		= bnx2_set_ringparam,
7763 	.get_pauseparam		= bnx2_get_pauseparam,
7764 	.set_pauseparam		= bnx2_set_pauseparam,
7765 	.self_test		= bnx2_self_test,
7766 	.get_strings		= bnx2_get_strings,
7767 	.set_phys_id		= bnx2_set_phys_id,
7768 	.get_ethtool_stats	= bnx2_get_ethtool_stats,
7769 	.get_sset_count		= bnx2_get_sset_count,
7770 	.get_channels		= bnx2_get_channels,
7771 	.set_channels		= bnx2_set_channels,
7772 };
7773 
7774 /* Called with rtnl_lock */
7775 static int
7776 bnx2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
7777 {
7778 	struct mii_ioctl_data *data = if_mii(ifr);
7779 	struct bnx2 *bp = netdev_priv(dev);
7780 	int err;
7781 
7782 	switch(cmd) {
7783 	case SIOCGMIIPHY:
7784 		data->phy_id = bp->phy_addr;
7785 
7786 		/* fallthru */
7787 	case SIOCGMIIREG: {
7788 		u32 mii_regval;
7789 
7790 		if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
7791 			return -EOPNOTSUPP;
7792 
7793 		if (!netif_running(dev))
7794 			return -EAGAIN;
7795 
7796 		spin_lock_bh(&bp->phy_lock);
7797 		err = bnx2_read_phy(bp, data->reg_num & 0x1f, &mii_regval);
7798 		spin_unlock_bh(&bp->phy_lock);
7799 
7800 		data->val_out = mii_regval;
7801 
7802 		return err;
7803 	}
7804 
7805 	case SIOCSMIIREG:
7806 		if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
7807 			return -EOPNOTSUPP;
7808 
7809 		if (!netif_running(dev))
7810 			return -EAGAIN;
7811 
7812 		spin_lock_bh(&bp->phy_lock);
7813 		err = bnx2_write_phy(bp, data->reg_num & 0x1f, data->val_in);
7814 		spin_unlock_bh(&bp->phy_lock);
7815 
7816 		return err;
7817 
7818 	default:
7819 		/* do nothing */
7820 		break;
7821 	}
7822 	return -EOPNOTSUPP;
7823 }
7824 
7825 /* Called with rtnl_lock */
7826 static int
7827 bnx2_change_mac_addr(struct net_device *dev, void *p)
7828 {
7829 	struct sockaddr *addr = p;
7830 	struct bnx2 *bp = netdev_priv(dev);
7831 
7832 	if (!is_valid_ether_addr(addr->sa_data))
7833 		return -EADDRNOTAVAIL;
7834 
7835 	memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
7836 	if (netif_running(dev))
7837 		bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
7838 
7839 	return 0;
7840 }
7841 
7842 /* Called with rtnl_lock */
7843 static int
7844 bnx2_change_mtu(struct net_device *dev, int new_mtu)
7845 {
7846 	struct bnx2 *bp = netdev_priv(dev);
7847 
7848 	if (((new_mtu + ETH_HLEN) > MAX_ETHERNET_JUMBO_PACKET_SIZE) ||
7849 		((new_mtu + ETH_HLEN) < MIN_ETHERNET_PACKET_SIZE))
7850 		return -EINVAL;
7851 
7852 	dev->mtu = new_mtu;
7853 	return bnx2_change_ring_size(bp, bp->rx_ring_size, bp->tx_ring_size,
7854 				     false);
7855 }
7856 
7857 #ifdef CONFIG_NET_POLL_CONTROLLER
7858 static void
7859 poll_bnx2(struct net_device *dev)
7860 {
7861 	struct bnx2 *bp = netdev_priv(dev);
7862 	int i;
7863 
7864 	for (i = 0; i < bp->irq_nvecs; i++) {
7865 		struct bnx2_irq *irq = &bp->irq_tbl[i];
7866 
7867 		disable_irq(irq->vector);
7868 		irq->handler(irq->vector, &bp->bnx2_napi[i]);
7869 		enable_irq(irq->vector);
7870 	}
7871 }
7872 #endif
7873 
7874 static void
7875 bnx2_get_5709_media(struct bnx2 *bp)
7876 {
7877 	u32 val = BNX2_RD(bp, BNX2_MISC_DUAL_MEDIA_CTRL);
7878 	u32 bond_id = val & BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID;
7879 	u32 strap;
7880 
7881 	if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_C)
7882 		return;
7883 	else if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_S) {
7884 		bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7885 		return;
7886 	}
7887 
7888 	if (val & BNX2_MISC_DUAL_MEDIA_CTRL_STRAP_OVERRIDE)
7889 		strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL) >> 21;
7890 	else
7891 		strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL_STRAP) >> 8;
7892 
7893 	if (bp->func == 0) {
7894 		switch (strap) {
7895 		case 0x4:
7896 		case 0x5:
7897 		case 0x6:
7898 			bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7899 			return;
7900 		}
7901 	} else {
7902 		switch (strap) {
7903 		case 0x1:
7904 		case 0x2:
7905 		case 0x4:
7906 			bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7907 			return;
7908 		}
7909 	}
7910 }
7911 
7912 static void
7913 bnx2_get_pci_speed(struct bnx2 *bp)
7914 {
7915 	u32 reg;
7916 
7917 	reg = BNX2_RD(bp, BNX2_PCICFG_MISC_STATUS);
7918 	if (reg & BNX2_PCICFG_MISC_STATUS_PCIX_DET) {
7919 		u32 clkreg;
7920 
7921 		bp->flags |= BNX2_FLAG_PCIX;
7922 
7923 		clkreg = BNX2_RD(bp, BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS);
7924 
7925 		clkreg &= BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET;
7926 		switch (clkreg) {
7927 		case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ:
7928 			bp->bus_speed_mhz = 133;
7929 			break;
7930 
7931 		case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ:
7932 			bp->bus_speed_mhz = 100;
7933 			break;
7934 
7935 		case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ:
7936 		case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ:
7937 			bp->bus_speed_mhz = 66;
7938 			break;
7939 
7940 		case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ:
7941 		case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ:
7942 			bp->bus_speed_mhz = 50;
7943 			break;
7944 
7945 		case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW:
7946 		case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ:
7947 		case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ:
7948 			bp->bus_speed_mhz = 33;
7949 			break;
7950 		}
7951 	}
7952 	else {
7953 		if (reg & BNX2_PCICFG_MISC_STATUS_M66EN)
7954 			bp->bus_speed_mhz = 66;
7955 		else
7956 			bp->bus_speed_mhz = 33;
7957 	}
7958 
7959 	if (reg & BNX2_PCICFG_MISC_STATUS_32BIT_DET)
7960 		bp->flags |= BNX2_FLAG_PCI_32BIT;
7961 
7962 }
7963 
7964 static void
7965 bnx2_read_vpd_fw_ver(struct bnx2 *bp)
7966 {
7967 	int rc, i, j;
7968 	u8 *data;
7969 	unsigned int block_end, rosize, len;
7970 
7971 #define BNX2_VPD_NVRAM_OFFSET	0x300
7972 #define BNX2_VPD_LEN		128
7973 #define BNX2_MAX_VER_SLEN	30
7974 
7975 	data = kmalloc(256, GFP_KERNEL);
7976 	if (!data)
7977 		return;
7978 
7979 	rc = bnx2_nvram_read(bp, BNX2_VPD_NVRAM_OFFSET, data + BNX2_VPD_LEN,
7980 			     BNX2_VPD_LEN);
7981 	if (rc)
7982 		goto vpd_done;
7983 
7984 	for (i = 0; i < BNX2_VPD_LEN; i += 4) {
7985 		data[i] = data[i + BNX2_VPD_LEN + 3];
7986 		data[i + 1] = data[i + BNX2_VPD_LEN + 2];
7987 		data[i + 2] = data[i + BNX2_VPD_LEN + 1];
7988 		data[i + 3] = data[i + BNX2_VPD_LEN];
7989 	}
7990 
7991 	i = pci_vpd_find_tag(data, 0, BNX2_VPD_LEN, PCI_VPD_LRDT_RO_DATA);
7992 	if (i < 0)
7993 		goto vpd_done;
7994 
7995 	rosize = pci_vpd_lrdt_size(&data[i]);
7996 	i += PCI_VPD_LRDT_TAG_SIZE;
7997 	block_end = i + rosize;
7998 
7999 	if (block_end > BNX2_VPD_LEN)
8000 		goto vpd_done;
8001 
8002 	j = pci_vpd_find_info_keyword(data, i, rosize,
8003 				      PCI_VPD_RO_KEYWORD_MFR_ID);
8004 	if (j < 0)
8005 		goto vpd_done;
8006 
8007 	len = pci_vpd_info_field_size(&data[j]);
8008 
8009 	j += PCI_VPD_INFO_FLD_HDR_SIZE;
8010 	if (j + len > block_end || len != 4 ||
8011 	    memcmp(&data[j], "1028", 4))
8012 		goto vpd_done;
8013 
8014 	j = pci_vpd_find_info_keyword(data, i, rosize,
8015 				      PCI_VPD_RO_KEYWORD_VENDOR0);
8016 	if (j < 0)
8017 		goto vpd_done;
8018 
8019 	len = pci_vpd_info_field_size(&data[j]);
8020 
8021 	j += PCI_VPD_INFO_FLD_HDR_SIZE;
8022 	if (j + len > block_end || len > BNX2_MAX_VER_SLEN)
8023 		goto vpd_done;
8024 
8025 	memcpy(bp->fw_version, &data[j], len);
8026 	bp->fw_version[len] = ' ';
8027 
8028 vpd_done:
8029 	kfree(data);
8030 }
8031 
8032 static int
8033 bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
8034 {
8035 	struct bnx2 *bp;
8036 	int rc, i, j;
8037 	u32 reg;
8038 	u64 dma_mask, persist_dma_mask;
8039 	int err;
8040 
8041 	SET_NETDEV_DEV(dev, &pdev->dev);
8042 	bp = netdev_priv(dev);
8043 
8044 	bp->flags = 0;
8045 	bp->phy_flags = 0;
8046 
8047 	bp->temp_stats_blk =
8048 		kzalloc(sizeof(struct statistics_block), GFP_KERNEL);
8049 
8050 	if (bp->temp_stats_blk == NULL) {
8051 		rc = -ENOMEM;
8052 		goto err_out;
8053 	}
8054 
8055 	/* enable device (incl. PCI PM wakeup), and bus-mastering */
8056 	rc = pci_enable_device(pdev);
8057 	if (rc) {
8058 		dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
8059 		goto err_out;
8060 	}
8061 
8062 	if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
8063 		dev_err(&pdev->dev,
8064 			"Cannot find PCI device base address, aborting\n");
8065 		rc = -ENODEV;
8066 		goto err_out_disable;
8067 	}
8068 
8069 	rc = pci_request_regions(pdev, DRV_MODULE_NAME);
8070 	if (rc) {
8071 		dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
8072 		goto err_out_disable;
8073 	}
8074 
8075 	pci_set_master(pdev);
8076 
8077 	bp->pm_cap = pdev->pm_cap;
8078 	if (bp->pm_cap == 0) {
8079 		dev_err(&pdev->dev,
8080 			"Cannot find power management capability, aborting\n");
8081 		rc = -EIO;
8082 		goto err_out_release;
8083 	}
8084 
8085 	bp->dev = dev;
8086 	bp->pdev = pdev;
8087 
8088 	spin_lock_init(&bp->phy_lock);
8089 	spin_lock_init(&bp->indirect_lock);
8090 #ifdef BCM_CNIC
8091 	mutex_init(&bp->cnic_lock);
8092 #endif
8093 	INIT_WORK(&bp->reset_task, bnx2_reset_task);
8094 
8095 	bp->regview = pci_iomap(pdev, 0, MB_GET_CID_ADDR(TX_TSS_CID +
8096 							 TX_MAX_TSS_RINGS + 1));
8097 	if (!bp->regview) {
8098 		dev_err(&pdev->dev, "Cannot map register space, aborting\n");
8099 		rc = -ENOMEM;
8100 		goto err_out_release;
8101 	}
8102 
8103 	/* Configure byte swap and enable write to the reg_window registers.
8104 	 * Rely on CPU to do target byte swapping on big endian systems
8105 	 * The chip's target access swapping will not swap all accesses
8106 	 */
8107 	BNX2_WR(bp, BNX2_PCICFG_MISC_CONFIG,
8108 		BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
8109 		BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP);
8110 
8111 	bp->chip_id = BNX2_RD(bp, BNX2_MISC_ID);
8112 
8113 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
8114 		if (!pci_is_pcie(pdev)) {
8115 			dev_err(&pdev->dev, "Not PCIE, aborting\n");
8116 			rc = -EIO;
8117 			goto err_out_unmap;
8118 		}
8119 		bp->flags |= BNX2_FLAG_PCIE;
8120 		if (BNX2_CHIP_REV(bp) == BNX2_CHIP_REV_Ax)
8121 			bp->flags |= BNX2_FLAG_JUMBO_BROKEN;
8122 
8123 		/* AER (Advanced Error Reporting) hooks */
8124 		err = pci_enable_pcie_error_reporting(pdev);
8125 		if (!err)
8126 			bp->flags |= BNX2_FLAG_AER_ENABLED;
8127 
8128 	} else {
8129 		bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX);
8130 		if (bp->pcix_cap == 0) {
8131 			dev_err(&pdev->dev,
8132 				"Cannot find PCIX capability, aborting\n");
8133 			rc = -EIO;
8134 			goto err_out_unmap;
8135 		}
8136 		bp->flags |= BNX2_FLAG_BROKEN_STATS;
8137 	}
8138 
8139 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709 &&
8140 	    BNX2_CHIP_REV(bp) != BNX2_CHIP_REV_Ax) {
8141 		if (pdev->msix_cap)
8142 			bp->flags |= BNX2_FLAG_MSIX_CAP;
8143 	}
8144 
8145 	if (BNX2_CHIP_ID(bp) != BNX2_CHIP_ID_5706_A0 &&
8146 	    BNX2_CHIP_ID(bp) != BNX2_CHIP_ID_5706_A1) {
8147 		if (pdev->msi_cap)
8148 			bp->flags |= BNX2_FLAG_MSI_CAP;
8149 	}
8150 
8151 	/* 5708 cannot support DMA addresses > 40-bit.  */
8152 	if (BNX2_CHIP(bp) == BNX2_CHIP_5708)
8153 		persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
8154 	else
8155 		persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
8156 
8157 	/* Configure DMA attributes. */
8158 	if (pci_set_dma_mask(pdev, dma_mask) == 0) {
8159 		dev->features |= NETIF_F_HIGHDMA;
8160 		rc = pci_set_consistent_dma_mask(pdev, persist_dma_mask);
8161 		if (rc) {
8162 			dev_err(&pdev->dev,
8163 				"pci_set_consistent_dma_mask failed, aborting\n");
8164 			goto err_out_unmap;
8165 		}
8166 	} else if ((rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) != 0) {
8167 		dev_err(&pdev->dev, "System does not support DMA, aborting\n");
8168 		goto err_out_unmap;
8169 	}
8170 
8171 	if (!(bp->flags & BNX2_FLAG_PCIE))
8172 		bnx2_get_pci_speed(bp);
8173 
8174 	/* 5706A0 may falsely detect SERR and PERR. */
8175 	if (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) {
8176 		reg = BNX2_RD(bp, PCI_COMMAND);
8177 		reg &= ~(PCI_COMMAND_SERR | PCI_COMMAND_PARITY);
8178 		BNX2_WR(bp, PCI_COMMAND, reg);
8179 	} else if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A1) &&
8180 		!(bp->flags & BNX2_FLAG_PCIX)) {
8181 
8182 		dev_err(&pdev->dev,
8183 			"5706 A1 can only be used in a PCIX bus, aborting\n");
8184 		goto err_out_unmap;
8185 	}
8186 
8187 	bnx2_init_nvram(bp);
8188 
8189 	reg = bnx2_reg_rd_ind(bp, BNX2_SHM_HDR_SIGNATURE);
8190 
8191 	if (bnx2_reg_rd_ind(bp, BNX2_MCP_TOE_ID) & BNX2_MCP_TOE_ID_FUNCTION_ID)
8192 		bp->func = 1;
8193 
8194 	if ((reg & BNX2_SHM_HDR_SIGNATURE_SIG_MASK) ==
8195 	    BNX2_SHM_HDR_SIGNATURE_SIG) {
8196 		u32 off = bp->func << 2;
8197 
8198 		bp->shmem_base = bnx2_reg_rd_ind(bp, BNX2_SHM_HDR_ADDR_0 + off);
8199 	} else
8200 		bp->shmem_base = HOST_VIEW_SHMEM_BASE;
8201 
8202 	/* Get the permanent MAC address.  First we need to make sure the
8203 	 * firmware is actually running.
8204 	 */
8205 	reg = bnx2_shmem_rd(bp, BNX2_DEV_INFO_SIGNATURE);
8206 
8207 	if ((reg & BNX2_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
8208 	    BNX2_DEV_INFO_SIGNATURE_MAGIC) {
8209 		dev_err(&pdev->dev, "Firmware not running, aborting\n");
8210 		rc = -ENODEV;
8211 		goto err_out_unmap;
8212 	}
8213 
8214 	bnx2_read_vpd_fw_ver(bp);
8215 
8216 	j = strlen(bp->fw_version);
8217 	reg = bnx2_shmem_rd(bp, BNX2_DEV_INFO_BC_REV);
8218 	for (i = 0; i < 3 && j < 24; i++) {
8219 		u8 num, k, skip0;
8220 
8221 		if (i == 0) {
8222 			bp->fw_version[j++] = 'b';
8223 			bp->fw_version[j++] = 'c';
8224 			bp->fw_version[j++] = ' ';
8225 		}
8226 		num = (u8) (reg >> (24 - (i * 8)));
8227 		for (k = 100, skip0 = 1; k >= 1; num %= k, k /= 10) {
8228 			if (num >= k || !skip0 || k == 1) {
8229 				bp->fw_version[j++] = (num / k) + '0';
8230 				skip0 = 0;
8231 			}
8232 		}
8233 		if (i != 2)
8234 			bp->fw_version[j++] = '.';
8235 	}
8236 	reg = bnx2_shmem_rd(bp, BNX2_PORT_FEATURE);
8237 	if (reg & BNX2_PORT_FEATURE_WOL_ENABLED)
8238 		bp->wol = 1;
8239 
8240 	if (reg & BNX2_PORT_FEATURE_ASF_ENABLED) {
8241 		bp->flags |= BNX2_FLAG_ASF_ENABLE;
8242 
8243 		for (i = 0; i < 30; i++) {
8244 			reg = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION);
8245 			if (reg & BNX2_CONDITION_MFW_RUN_MASK)
8246 				break;
8247 			msleep(10);
8248 		}
8249 	}
8250 	reg = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION);
8251 	reg &= BNX2_CONDITION_MFW_RUN_MASK;
8252 	if (reg != BNX2_CONDITION_MFW_RUN_UNKNOWN &&
8253 	    reg != BNX2_CONDITION_MFW_RUN_NONE) {
8254 		u32 addr = bnx2_shmem_rd(bp, BNX2_MFW_VER_PTR);
8255 
8256 		if (j < 32)
8257 			bp->fw_version[j++] = ' ';
8258 		for (i = 0; i < 3 && j < 28; i++) {
8259 			reg = bnx2_reg_rd_ind(bp, addr + i * 4);
8260 			reg = be32_to_cpu(reg);
8261 			memcpy(&bp->fw_version[j], &reg, 4);
8262 			j += 4;
8263 		}
8264 	}
8265 
8266 	reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_MAC_UPPER);
8267 	bp->mac_addr[0] = (u8) (reg >> 8);
8268 	bp->mac_addr[1] = (u8) reg;
8269 
8270 	reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_MAC_LOWER);
8271 	bp->mac_addr[2] = (u8) (reg >> 24);
8272 	bp->mac_addr[3] = (u8) (reg >> 16);
8273 	bp->mac_addr[4] = (u8) (reg >> 8);
8274 	bp->mac_addr[5] = (u8) reg;
8275 
8276 	bp->tx_ring_size = BNX2_MAX_TX_DESC_CNT;
8277 	bnx2_set_rx_ring_size(bp, 255);
8278 
8279 	bp->tx_quick_cons_trip_int = 2;
8280 	bp->tx_quick_cons_trip = 20;
8281 	bp->tx_ticks_int = 18;
8282 	bp->tx_ticks = 80;
8283 
8284 	bp->rx_quick_cons_trip_int = 2;
8285 	bp->rx_quick_cons_trip = 12;
8286 	bp->rx_ticks_int = 18;
8287 	bp->rx_ticks = 18;
8288 
8289 	bp->stats_ticks = USEC_PER_SEC & BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
8290 
8291 	bp->current_interval = BNX2_TIMER_INTERVAL;
8292 
8293 	bp->phy_addr = 1;
8294 
8295 	/* Disable WOL support if we are running on a SERDES chip. */
8296 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
8297 		bnx2_get_5709_media(bp);
8298 	else if (BNX2_CHIP_BOND(bp) & BNX2_CHIP_BOND_SERDES_BIT)
8299 		bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
8300 
8301 	bp->phy_port = PORT_TP;
8302 	if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
8303 		bp->phy_port = PORT_FIBRE;
8304 		reg = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG);
8305 		if (!(reg & BNX2_SHARED_HW_CFG_GIG_LINK_ON_VAUX)) {
8306 			bp->flags |= BNX2_FLAG_NO_WOL;
8307 			bp->wol = 0;
8308 		}
8309 		if (BNX2_CHIP(bp) == BNX2_CHIP_5706) {
8310 			/* Don't do parallel detect on this board because of
8311 			 * some board problems.  The link will not go down
8312 			 * if we do parallel detect.
8313 			 */
8314 			if (pdev->subsystem_vendor == PCI_VENDOR_ID_HP &&
8315 			    pdev->subsystem_device == 0x310c)
8316 				bp->phy_flags |= BNX2_PHY_FLAG_NO_PARALLEL;
8317 		} else {
8318 			bp->phy_addr = 2;
8319 			if (reg & BNX2_SHARED_HW_CFG_PHY_2_5G)
8320 				bp->phy_flags |= BNX2_PHY_FLAG_2_5G_CAPABLE;
8321 		}
8322 	} else if (BNX2_CHIP(bp) == BNX2_CHIP_5706 ||
8323 		   BNX2_CHIP(bp) == BNX2_CHIP_5708)
8324 		bp->phy_flags |= BNX2_PHY_FLAG_CRC_FIX;
8325 	else if (BNX2_CHIP(bp) == BNX2_CHIP_5709 &&
8326 		 (BNX2_CHIP_REV(bp) == BNX2_CHIP_REV_Ax ||
8327 		  BNX2_CHIP_REV(bp) == BNX2_CHIP_REV_Bx))
8328 		bp->phy_flags |= BNX2_PHY_FLAG_DIS_EARLY_DAC;
8329 
8330 	bnx2_init_fw_cap(bp);
8331 
8332 	if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_A0) ||
8333 	    (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_B0) ||
8334 	    (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_B1) ||
8335 	    !(BNX2_RD(bp, BNX2_PCI_CONFIG_3) & BNX2_PCI_CONFIG_3_VAUX_PRESET)) {
8336 		bp->flags |= BNX2_FLAG_NO_WOL;
8337 		bp->wol = 0;
8338 	}
8339 
8340 	if (bp->flags & BNX2_FLAG_NO_WOL)
8341 		device_set_wakeup_capable(&bp->pdev->dev, false);
8342 	else
8343 		device_set_wakeup_enable(&bp->pdev->dev, bp->wol);
8344 
8345 	if (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) {
8346 		bp->tx_quick_cons_trip_int =
8347 			bp->tx_quick_cons_trip;
8348 		bp->tx_ticks_int = bp->tx_ticks;
8349 		bp->rx_quick_cons_trip_int =
8350 			bp->rx_quick_cons_trip;
8351 		bp->rx_ticks_int = bp->rx_ticks;
8352 		bp->comp_prod_trip_int = bp->comp_prod_trip;
8353 		bp->com_ticks_int = bp->com_ticks;
8354 		bp->cmd_ticks_int = bp->cmd_ticks;
8355 	}
8356 
8357 	/* Disable MSI on 5706 if AMD 8132 bridge is found.
8358 	 *
8359 	 * MSI is defined to be 32-bit write.  The 5706 does 64-bit MSI writes
8360 	 * with byte enables disabled on the unused 32-bit word.  This is legal
8361 	 * but causes problems on the AMD 8132 which will eventually stop
8362 	 * responding after a while.
8363 	 *
8364 	 * AMD believes this incompatibility is unique to the 5706, and
8365 	 * prefers to locally disable MSI rather than globally disabling it.
8366 	 */
8367 	if (BNX2_CHIP(bp) == BNX2_CHIP_5706 && disable_msi == 0) {
8368 		struct pci_dev *amd_8132 = NULL;
8369 
8370 		while ((amd_8132 = pci_get_device(PCI_VENDOR_ID_AMD,
8371 						  PCI_DEVICE_ID_AMD_8132_BRIDGE,
8372 						  amd_8132))) {
8373 
8374 			if (amd_8132->revision >= 0x10 &&
8375 			    amd_8132->revision <= 0x13) {
8376 				disable_msi = 1;
8377 				pci_dev_put(amd_8132);
8378 				break;
8379 			}
8380 		}
8381 	}
8382 
8383 	bnx2_set_default_link(bp);
8384 	bp->req_flow_ctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
8385 
8386 	init_timer(&bp->timer);
8387 	bp->timer.expires = RUN_AT(BNX2_TIMER_INTERVAL);
8388 	bp->timer.data = (unsigned long) bp;
8389 	bp->timer.function = bnx2_timer;
8390 
8391 #ifdef BCM_CNIC
8392 	if (bnx2_shmem_rd(bp, BNX2_ISCSI_INITIATOR) & BNX2_ISCSI_INITIATOR_EN)
8393 		bp->cnic_eth_dev.max_iscsi_conn =
8394 			(bnx2_shmem_rd(bp, BNX2_ISCSI_MAX_CONN) &
8395 			 BNX2_ISCSI_MAX_CONN_MASK) >> BNX2_ISCSI_MAX_CONN_SHIFT;
8396 	bp->cnic_probe = bnx2_cnic_probe;
8397 #endif
8398 	pci_save_state(pdev);
8399 
8400 	return 0;
8401 
8402 err_out_unmap:
8403 	if (bp->flags & BNX2_FLAG_AER_ENABLED) {
8404 		pci_disable_pcie_error_reporting(pdev);
8405 		bp->flags &= ~BNX2_FLAG_AER_ENABLED;
8406 	}
8407 
8408 	pci_iounmap(pdev, bp->regview);
8409 	bp->regview = NULL;
8410 
8411 err_out_release:
8412 	pci_release_regions(pdev);
8413 
8414 err_out_disable:
8415 	pci_disable_device(pdev);
8416 
8417 err_out:
8418 	return rc;
8419 }
8420 
8421 static char *
8422 bnx2_bus_string(struct bnx2 *bp, char *str)
8423 {
8424 	char *s = str;
8425 
8426 	if (bp->flags & BNX2_FLAG_PCIE) {
8427 		s += sprintf(s, "PCI Express");
8428 	} else {
8429 		s += sprintf(s, "PCI");
8430 		if (bp->flags & BNX2_FLAG_PCIX)
8431 			s += sprintf(s, "-X");
8432 		if (bp->flags & BNX2_FLAG_PCI_32BIT)
8433 			s += sprintf(s, " 32-bit");
8434 		else
8435 			s += sprintf(s, " 64-bit");
8436 		s += sprintf(s, " %dMHz", bp->bus_speed_mhz);
8437 	}
8438 	return str;
8439 }
8440 
8441 static void
8442 bnx2_del_napi(struct bnx2 *bp)
8443 {
8444 	int i;
8445 
8446 	for (i = 0; i < bp->irq_nvecs; i++)
8447 		netif_napi_del(&bp->bnx2_napi[i].napi);
8448 }
8449 
8450 static void
8451 bnx2_init_napi(struct bnx2 *bp)
8452 {
8453 	int i;
8454 
8455 	for (i = 0; i < bp->irq_nvecs; i++) {
8456 		struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
8457 		int (*poll)(struct napi_struct *, int);
8458 
8459 		if (i == 0)
8460 			poll = bnx2_poll;
8461 		else
8462 			poll = bnx2_poll_msix;
8463 
8464 		netif_napi_add(bp->dev, &bp->bnx2_napi[i].napi, poll, 64);
8465 		bnapi->bp = bp;
8466 	}
8467 }
8468 
8469 static const struct net_device_ops bnx2_netdev_ops = {
8470 	.ndo_open		= bnx2_open,
8471 	.ndo_start_xmit		= bnx2_start_xmit,
8472 	.ndo_stop		= bnx2_close,
8473 	.ndo_get_stats64	= bnx2_get_stats64,
8474 	.ndo_set_rx_mode	= bnx2_set_rx_mode,
8475 	.ndo_do_ioctl		= bnx2_ioctl,
8476 	.ndo_validate_addr	= eth_validate_addr,
8477 	.ndo_set_mac_address	= bnx2_change_mac_addr,
8478 	.ndo_change_mtu		= bnx2_change_mtu,
8479 	.ndo_fix_features	= bnx2_fix_features,
8480 	.ndo_set_features	= bnx2_set_features,
8481 	.ndo_tx_timeout		= bnx2_tx_timeout,
8482 #ifdef CONFIG_NET_POLL_CONTROLLER
8483 	.ndo_poll_controller	= poll_bnx2,
8484 #endif
8485 };
8486 
8487 static int
8488 bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
8489 {
8490 	static int version_printed = 0;
8491 	struct net_device *dev;
8492 	struct bnx2 *bp;
8493 	int rc;
8494 	char str[40];
8495 
8496 	if (version_printed++ == 0)
8497 		pr_info("%s", version);
8498 
8499 	/* dev zeroed in init_etherdev */
8500 	dev = alloc_etherdev_mq(sizeof(*bp), TX_MAX_RINGS);
8501 	if (!dev)
8502 		return -ENOMEM;
8503 
8504 	rc = bnx2_init_board(pdev, dev);
8505 	if (rc < 0)
8506 		goto err_free;
8507 
8508 	dev->netdev_ops = &bnx2_netdev_ops;
8509 	dev->watchdog_timeo = TX_TIMEOUT;
8510 	dev->ethtool_ops = &bnx2_ethtool_ops;
8511 
8512 	bp = netdev_priv(dev);
8513 
8514 	pci_set_drvdata(pdev, dev);
8515 
8516 	memcpy(dev->dev_addr, bp->mac_addr, ETH_ALEN);
8517 
8518 	dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG |
8519 		NETIF_F_TSO | NETIF_F_TSO_ECN |
8520 		NETIF_F_RXHASH | NETIF_F_RXCSUM;
8521 
8522 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
8523 		dev->hw_features |= NETIF_F_IPV6_CSUM | NETIF_F_TSO6;
8524 
8525 	dev->vlan_features = dev->hw_features;
8526 	dev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
8527 	dev->features |= dev->hw_features;
8528 	dev->priv_flags |= IFF_UNICAST_FLT;
8529 
8530 	if ((rc = register_netdev(dev))) {
8531 		dev_err(&pdev->dev, "Cannot register net device\n");
8532 		goto error;
8533 	}
8534 
8535 	netdev_info(dev, "%s (%c%d) %s found at mem %lx, IRQ %d, "
8536 		    "node addr %pM\n", board_info[ent->driver_data].name,
8537 		    ((BNX2_CHIP_ID(bp) & 0xf000) >> 12) + 'A',
8538 		    ((BNX2_CHIP_ID(bp) & 0x0ff0) >> 4),
8539 		    bnx2_bus_string(bp, str), (long)pci_resource_start(pdev, 0),
8540 		    pdev->irq, dev->dev_addr);
8541 
8542 	return 0;
8543 
8544 error:
8545 	pci_iounmap(pdev, bp->regview);
8546 	pci_release_regions(pdev);
8547 	pci_disable_device(pdev);
8548 err_free:
8549 	free_netdev(dev);
8550 	return rc;
8551 }
8552 
8553 static void
8554 bnx2_remove_one(struct pci_dev *pdev)
8555 {
8556 	struct net_device *dev = pci_get_drvdata(pdev);
8557 	struct bnx2 *bp = netdev_priv(dev);
8558 
8559 	unregister_netdev(dev);
8560 
8561 	del_timer_sync(&bp->timer);
8562 	cancel_work_sync(&bp->reset_task);
8563 
8564 	pci_iounmap(bp->pdev, bp->regview);
8565 
8566 	kfree(bp->temp_stats_blk);
8567 
8568 	if (bp->flags & BNX2_FLAG_AER_ENABLED) {
8569 		pci_disable_pcie_error_reporting(pdev);
8570 		bp->flags &= ~BNX2_FLAG_AER_ENABLED;
8571 	}
8572 
8573 	bnx2_release_firmware(bp);
8574 
8575 	free_netdev(dev);
8576 
8577 	pci_release_regions(pdev);
8578 	pci_disable_device(pdev);
8579 }
8580 
8581 static int
8582 bnx2_suspend(struct device *device)
8583 {
8584 	struct pci_dev *pdev = to_pci_dev(device);
8585 	struct net_device *dev = pci_get_drvdata(pdev);
8586 	struct bnx2 *bp = netdev_priv(dev);
8587 
8588 	if (netif_running(dev)) {
8589 		cancel_work_sync(&bp->reset_task);
8590 		bnx2_netif_stop(bp, true);
8591 		netif_device_detach(dev);
8592 		del_timer_sync(&bp->timer);
8593 		bnx2_shutdown_chip(bp);
8594 		__bnx2_free_irq(bp);
8595 		bnx2_free_skbs(bp);
8596 	}
8597 	bnx2_setup_wol(bp);
8598 	return 0;
8599 }
8600 
8601 static int
8602 bnx2_resume(struct device *device)
8603 {
8604 	struct pci_dev *pdev = to_pci_dev(device);
8605 	struct net_device *dev = pci_get_drvdata(pdev);
8606 	struct bnx2 *bp = netdev_priv(dev);
8607 
8608 	if (!netif_running(dev))
8609 		return 0;
8610 
8611 	bnx2_set_power_state(bp, PCI_D0);
8612 	netif_device_attach(dev);
8613 	bnx2_request_irq(bp);
8614 	bnx2_init_nic(bp, 1);
8615 	bnx2_netif_start(bp, true);
8616 	return 0;
8617 }
8618 
8619 #ifdef CONFIG_PM_SLEEP
8620 static SIMPLE_DEV_PM_OPS(bnx2_pm_ops, bnx2_suspend, bnx2_resume);
8621 #define BNX2_PM_OPS (&bnx2_pm_ops)
8622 
8623 #else
8624 
8625 #define BNX2_PM_OPS NULL
8626 
8627 #endif /* CONFIG_PM_SLEEP */
8628 /**
8629  * bnx2_io_error_detected - called when PCI error is detected
8630  * @pdev: Pointer to PCI device
8631  * @state: The current pci connection state
8632  *
8633  * This function is called after a PCI bus error affecting
8634  * this device has been detected.
8635  */
8636 static pci_ers_result_t bnx2_io_error_detected(struct pci_dev *pdev,
8637 					       pci_channel_state_t state)
8638 {
8639 	struct net_device *dev = pci_get_drvdata(pdev);
8640 	struct bnx2 *bp = netdev_priv(dev);
8641 
8642 	rtnl_lock();
8643 	netif_device_detach(dev);
8644 
8645 	if (state == pci_channel_io_perm_failure) {
8646 		rtnl_unlock();
8647 		return PCI_ERS_RESULT_DISCONNECT;
8648 	}
8649 
8650 	if (netif_running(dev)) {
8651 		bnx2_netif_stop(bp, true);
8652 		del_timer_sync(&bp->timer);
8653 		bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
8654 	}
8655 
8656 	pci_disable_device(pdev);
8657 	rtnl_unlock();
8658 
8659 	/* Request a slot slot reset. */
8660 	return PCI_ERS_RESULT_NEED_RESET;
8661 }
8662 
8663 /**
8664  * bnx2_io_slot_reset - called after the pci bus has been reset.
8665  * @pdev: Pointer to PCI device
8666  *
8667  * Restart the card from scratch, as if from a cold-boot.
8668  */
8669 static pci_ers_result_t bnx2_io_slot_reset(struct pci_dev *pdev)
8670 {
8671 	struct net_device *dev = pci_get_drvdata(pdev);
8672 	struct bnx2 *bp = netdev_priv(dev);
8673 	pci_ers_result_t result = PCI_ERS_RESULT_DISCONNECT;
8674 	int err = 0;
8675 
8676 	rtnl_lock();
8677 	if (pci_enable_device(pdev)) {
8678 		dev_err(&pdev->dev,
8679 			"Cannot re-enable PCI device after reset\n");
8680 	} else {
8681 		pci_set_master(pdev);
8682 		pci_restore_state(pdev);
8683 		pci_save_state(pdev);
8684 
8685 		if (netif_running(dev))
8686 			err = bnx2_init_nic(bp, 1);
8687 
8688 		if (!err)
8689 			result = PCI_ERS_RESULT_RECOVERED;
8690 	}
8691 
8692 	if (result != PCI_ERS_RESULT_RECOVERED && netif_running(dev)) {
8693 		bnx2_napi_enable(bp);
8694 		dev_close(dev);
8695 	}
8696 	rtnl_unlock();
8697 
8698 	if (!(bp->flags & BNX2_FLAG_AER_ENABLED))
8699 		return result;
8700 
8701 	err = pci_cleanup_aer_uncorrect_error_status(pdev);
8702 	if (err) {
8703 		dev_err(&pdev->dev,
8704 			"pci_cleanup_aer_uncorrect_error_status failed 0x%0x\n",
8705 			 err); /* non-fatal, continue */
8706 	}
8707 
8708 	return result;
8709 }
8710 
8711 /**
8712  * bnx2_io_resume - called when traffic can start flowing again.
8713  * @pdev: Pointer to PCI device
8714  *
8715  * This callback is called when the error recovery driver tells us that
8716  * its OK to resume normal operation.
8717  */
8718 static void bnx2_io_resume(struct pci_dev *pdev)
8719 {
8720 	struct net_device *dev = pci_get_drvdata(pdev);
8721 	struct bnx2 *bp = netdev_priv(dev);
8722 
8723 	rtnl_lock();
8724 	if (netif_running(dev))
8725 		bnx2_netif_start(bp, true);
8726 
8727 	netif_device_attach(dev);
8728 	rtnl_unlock();
8729 }
8730 
8731 static void bnx2_shutdown(struct pci_dev *pdev)
8732 {
8733 	struct net_device *dev = pci_get_drvdata(pdev);
8734 	struct bnx2 *bp;
8735 
8736 	if (!dev)
8737 		return;
8738 
8739 	bp = netdev_priv(dev);
8740 	if (!bp)
8741 		return;
8742 
8743 	rtnl_lock();
8744 	if (netif_running(dev))
8745 		dev_close(bp->dev);
8746 
8747 	if (system_state == SYSTEM_POWER_OFF)
8748 		bnx2_set_power_state(bp, PCI_D3hot);
8749 
8750 	rtnl_unlock();
8751 }
8752 
8753 static const struct pci_error_handlers bnx2_err_handler = {
8754 	.error_detected	= bnx2_io_error_detected,
8755 	.slot_reset	= bnx2_io_slot_reset,
8756 	.resume		= bnx2_io_resume,
8757 };
8758 
8759 static struct pci_driver bnx2_pci_driver = {
8760 	.name		= DRV_MODULE_NAME,
8761 	.id_table	= bnx2_pci_tbl,
8762 	.probe		= bnx2_init_one,
8763 	.remove		= bnx2_remove_one,
8764 	.driver.pm	= BNX2_PM_OPS,
8765 	.err_handler	= &bnx2_err_handler,
8766 	.shutdown	= bnx2_shutdown,
8767 };
8768 
8769 module_pci_driver(bnx2_pci_driver);
8770