1 /* bnx2.c: QLogic bnx2 network driver.
2  *
3  * Copyright (c) 2004-2014 Broadcom Corporation
4  * Copyright (c) 2014-2015 QLogic Corporation
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License as published by
8  * the Free Software Foundation.
9  *
10  * Written by: Michael Chan  (mchan@broadcom.com)
11  */
12 
13 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14 
15 #include <linux/module.h>
16 #include <linux/moduleparam.h>
17 
18 #include <linux/stringify.h>
19 #include <linux/kernel.h>
20 #include <linux/timer.h>
21 #include <linux/errno.h>
22 #include <linux/ioport.h>
23 #include <linux/slab.h>
24 #include <linux/vmalloc.h>
25 #include <linux/interrupt.h>
26 #include <linux/pci.h>
27 #include <linux/netdevice.h>
28 #include <linux/etherdevice.h>
29 #include <linux/skbuff.h>
30 #include <linux/dma-mapping.h>
31 #include <linux/bitops.h>
32 #include <asm/io.h>
33 #include <asm/irq.h>
34 #include <linux/delay.h>
35 #include <asm/byteorder.h>
36 #include <asm/page.h>
37 #include <linux/time.h>
38 #include <linux/ethtool.h>
39 #include <linux/mii.h>
40 #include <linux/if.h>
41 #include <linux/if_vlan.h>
42 #include <net/ip.h>
43 #include <net/tcp.h>
44 #include <net/checksum.h>
45 #include <linux/workqueue.h>
46 #include <linux/crc32.h>
47 #include <linux/prefetch.h>
48 #include <linux/cache.h>
49 #include <linux/firmware.h>
50 #include <linux/log2.h>
51 #include <linux/aer.h>
52 #include <linux/crash_dump.h>
53 
54 #if IS_ENABLED(CONFIG_CNIC)
55 #define BCM_CNIC 1
56 #include "cnic_if.h"
57 #endif
58 #include "bnx2.h"
59 #include "bnx2_fw.h"
60 
61 #define DRV_MODULE_NAME		"bnx2"
62 #define DRV_MODULE_VERSION	"2.2.6"
63 #define DRV_MODULE_RELDATE	"January 29, 2014"
64 #define FW_MIPS_FILE_06		"bnx2/bnx2-mips-06-6.2.3.fw"
65 #define FW_RV2P_FILE_06		"bnx2/bnx2-rv2p-06-6.0.15.fw"
66 #define FW_MIPS_FILE_09		"bnx2/bnx2-mips-09-6.2.1b.fw"
67 #define FW_RV2P_FILE_09_Ax	"bnx2/bnx2-rv2p-09ax-6.0.17.fw"
68 #define FW_RV2P_FILE_09		"bnx2/bnx2-rv2p-09-6.0.17.fw"
69 
70 #define RUN_AT(x) (jiffies + (x))
71 
72 /* Time in jiffies before concluding the transmitter is hung. */
73 #define TX_TIMEOUT  (5*HZ)
74 
75 static char version[] =
76 	"QLogic " DRV_MODULE_NAME " Gigabit Ethernet Driver v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
77 
78 MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>");
79 MODULE_DESCRIPTION("QLogic BCM5706/5708/5709/5716 Driver");
80 MODULE_LICENSE("GPL");
81 MODULE_VERSION(DRV_MODULE_VERSION);
82 MODULE_FIRMWARE(FW_MIPS_FILE_06);
83 MODULE_FIRMWARE(FW_RV2P_FILE_06);
84 MODULE_FIRMWARE(FW_MIPS_FILE_09);
85 MODULE_FIRMWARE(FW_RV2P_FILE_09);
86 MODULE_FIRMWARE(FW_RV2P_FILE_09_Ax);
87 
88 static int disable_msi = 0;
89 
90 module_param(disable_msi, int, S_IRUGO);
91 MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
92 
93 typedef enum {
94 	BCM5706 = 0,
95 	NC370T,
96 	NC370I,
97 	BCM5706S,
98 	NC370F,
99 	BCM5708,
100 	BCM5708S,
101 	BCM5709,
102 	BCM5709S,
103 	BCM5716,
104 	BCM5716S,
105 } board_t;
106 
107 /* indexed by board_t, above */
108 static struct {
109 	char *name;
110 } board_info[] = {
111 	{ "Broadcom NetXtreme II BCM5706 1000Base-T" },
112 	{ "HP NC370T Multifunction Gigabit Server Adapter" },
113 	{ "HP NC370i Multifunction Gigabit Server Adapter" },
114 	{ "Broadcom NetXtreme II BCM5706 1000Base-SX" },
115 	{ "HP NC370F Multifunction Gigabit Server Adapter" },
116 	{ "Broadcom NetXtreme II BCM5708 1000Base-T" },
117 	{ "Broadcom NetXtreme II BCM5708 1000Base-SX" },
118 	{ "Broadcom NetXtreme II BCM5709 1000Base-T" },
119 	{ "Broadcom NetXtreme II BCM5709 1000Base-SX" },
120 	{ "Broadcom NetXtreme II BCM5716 1000Base-T" },
121 	{ "Broadcom NetXtreme II BCM5716 1000Base-SX" },
122 	};
123 
124 static const struct pci_device_id bnx2_pci_tbl[] = {
125 	{ PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
126 	  PCI_VENDOR_ID_HP, 0x3101, 0, 0, NC370T },
127 	{ PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
128 	  PCI_VENDOR_ID_HP, 0x3106, 0, 0, NC370I },
129 	{ PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
130 	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706 },
131 	{ PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708,
132 	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708 },
133 	{ PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
134 	  PCI_VENDOR_ID_HP, 0x3102, 0, 0, NC370F },
135 	{ PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
136 	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706S },
137 	{ PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708S,
138 	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708S },
139 	{ PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709,
140 	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709 },
141 	{ PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709S,
142 	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709S },
143 	{ PCI_VENDOR_ID_BROADCOM, 0x163b,
144 	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5716 },
145 	{ PCI_VENDOR_ID_BROADCOM, 0x163c,
146 	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5716S },
147 	{ 0, }
148 };
149 
150 static const struct flash_spec flash_table[] =
151 {
152 #define BUFFERED_FLAGS		(BNX2_NV_BUFFERED | BNX2_NV_TRANSLATE)
153 #define NONBUFFERED_FLAGS	(BNX2_NV_WREN)
154 	/* Slow EEPROM */
155 	{0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
156 	 BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
157 	 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
158 	 "EEPROM - slow"},
159 	/* Expansion entry 0001 */
160 	{0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
161 	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
162 	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
163 	 "Entry 0001"},
164 	/* Saifun SA25F010 (non-buffered flash) */
165 	/* strap, cfg1, & write1 need updates */
166 	{0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
167 	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
168 	 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
169 	 "Non-buffered flash (128kB)"},
170 	/* Saifun SA25F020 (non-buffered flash) */
171 	/* strap, cfg1, & write1 need updates */
172 	{0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
173 	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
174 	 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
175 	 "Non-buffered flash (256kB)"},
176 	/* Expansion entry 0100 */
177 	{0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
178 	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
179 	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
180 	 "Entry 0100"},
181 	/* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
182 	{0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
183 	 NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
184 	 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
185 	 "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
186 	/* Entry 0110: ST M45PE20 (non-buffered flash)*/
187 	{0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
188 	 NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
189 	 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
190 	 "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
191 	/* Saifun SA25F005 (non-buffered flash) */
192 	/* strap, cfg1, & write1 need updates */
193 	{0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
194 	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
195 	 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
196 	 "Non-buffered flash (64kB)"},
197 	/* Fast EEPROM */
198 	{0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
199 	 BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
200 	 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
201 	 "EEPROM - fast"},
202 	/* Expansion entry 1001 */
203 	{0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
204 	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
205 	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
206 	 "Entry 1001"},
207 	/* Expansion entry 1010 */
208 	{0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
209 	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
210 	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
211 	 "Entry 1010"},
212 	/* ATMEL AT45DB011B (buffered flash) */
213 	{0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
214 	 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
215 	 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
216 	 "Buffered flash (128kB)"},
217 	/* Expansion entry 1100 */
218 	{0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
219 	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
220 	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
221 	 "Entry 1100"},
222 	/* Expansion entry 1101 */
223 	{0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
224 	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
225 	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
226 	 "Entry 1101"},
227 	/* Ateml Expansion entry 1110 */
228 	{0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
229 	 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
230 	 BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
231 	 "Entry 1110 (Atmel)"},
232 	/* ATMEL AT45DB021B (buffered flash) */
233 	{0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
234 	 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
235 	 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
236 	 "Buffered flash (256kB)"},
237 };
238 
239 static const struct flash_spec flash_5709 = {
240 	.flags		= BNX2_NV_BUFFERED,
241 	.page_bits	= BCM5709_FLASH_PAGE_BITS,
242 	.page_size	= BCM5709_FLASH_PAGE_SIZE,
243 	.addr_mask	= BCM5709_FLASH_BYTE_ADDR_MASK,
244 	.total_size	= BUFFERED_FLASH_TOTAL_SIZE*2,
245 	.name		= "5709 Buffered flash (256kB)",
246 };
247 
248 MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
249 
250 static void bnx2_init_napi(struct bnx2 *bp);
251 static void bnx2_del_napi(struct bnx2 *bp);
252 
253 static inline u32 bnx2_tx_avail(struct bnx2 *bp, struct bnx2_tx_ring_info *txr)
254 {
255 	u32 diff;
256 
257 	/* The ring uses 256 indices for 255 entries, one of them
258 	 * needs to be skipped.
259 	 */
260 	diff = READ_ONCE(txr->tx_prod) - READ_ONCE(txr->tx_cons);
261 	if (unlikely(diff >= BNX2_TX_DESC_CNT)) {
262 		diff &= 0xffff;
263 		if (diff == BNX2_TX_DESC_CNT)
264 			diff = BNX2_MAX_TX_DESC_CNT;
265 	}
266 	return bp->tx_ring_size - diff;
267 }
268 
269 static u32
270 bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset)
271 {
272 	unsigned long flags;
273 	u32 val;
274 
275 	spin_lock_irqsave(&bp->indirect_lock, flags);
276 	BNX2_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
277 	val = BNX2_RD(bp, BNX2_PCICFG_REG_WINDOW);
278 	spin_unlock_irqrestore(&bp->indirect_lock, flags);
279 	return val;
280 }
281 
282 static void
283 bnx2_reg_wr_ind(struct bnx2 *bp, u32 offset, u32 val)
284 {
285 	unsigned long flags;
286 
287 	spin_lock_irqsave(&bp->indirect_lock, flags);
288 	BNX2_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
289 	BNX2_WR(bp, BNX2_PCICFG_REG_WINDOW, val);
290 	spin_unlock_irqrestore(&bp->indirect_lock, flags);
291 }
292 
293 static void
294 bnx2_shmem_wr(struct bnx2 *bp, u32 offset, u32 val)
295 {
296 	bnx2_reg_wr_ind(bp, bp->shmem_base + offset, val);
297 }
298 
299 static u32
300 bnx2_shmem_rd(struct bnx2 *bp, u32 offset)
301 {
302 	return bnx2_reg_rd_ind(bp, bp->shmem_base + offset);
303 }
304 
305 static void
306 bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
307 {
308 	unsigned long flags;
309 
310 	offset += cid_addr;
311 	spin_lock_irqsave(&bp->indirect_lock, flags);
312 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
313 		int i;
314 
315 		BNX2_WR(bp, BNX2_CTX_CTX_DATA, val);
316 		BNX2_WR(bp, BNX2_CTX_CTX_CTRL,
317 			offset | BNX2_CTX_CTX_CTRL_WRITE_REQ);
318 		for (i = 0; i < 5; i++) {
319 			val = BNX2_RD(bp, BNX2_CTX_CTX_CTRL);
320 			if ((val & BNX2_CTX_CTX_CTRL_WRITE_REQ) == 0)
321 				break;
322 			udelay(5);
323 		}
324 	} else {
325 		BNX2_WR(bp, BNX2_CTX_DATA_ADR, offset);
326 		BNX2_WR(bp, BNX2_CTX_DATA, val);
327 	}
328 	spin_unlock_irqrestore(&bp->indirect_lock, flags);
329 }
330 
331 #ifdef BCM_CNIC
332 static int
333 bnx2_drv_ctl(struct net_device *dev, struct drv_ctl_info *info)
334 {
335 	struct bnx2 *bp = netdev_priv(dev);
336 	struct drv_ctl_io *io = &info->data.io;
337 
338 	switch (info->cmd) {
339 	case DRV_CTL_IO_WR_CMD:
340 		bnx2_reg_wr_ind(bp, io->offset, io->data);
341 		break;
342 	case DRV_CTL_IO_RD_CMD:
343 		io->data = bnx2_reg_rd_ind(bp, io->offset);
344 		break;
345 	case DRV_CTL_CTX_WR_CMD:
346 		bnx2_ctx_wr(bp, io->cid_addr, io->offset, io->data);
347 		break;
348 	default:
349 		return -EINVAL;
350 	}
351 	return 0;
352 }
353 
354 static void bnx2_setup_cnic_irq_info(struct bnx2 *bp)
355 {
356 	struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
357 	struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
358 	int sb_id;
359 
360 	if (bp->flags & BNX2_FLAG_USING_MSIX) {
361 		cp->drv_state |= CNIC_DRV_STATE_USING_MSIX;
362 		bnapi->cnic_present = 0;
363 		sb_id = bp->irq_nvecs;
364 		cp->irq_arr[0].irq_flags |= CNIC_IRQ_FL_MSIX;
365 	} else {
366 		cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX;
367 		bnapi->cnic_tag = bnapi->last_status_idx;
368 		bnapi->cnic_present = 1;
369 		sb_id = 0;
370 		cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX;
371 	}
372 
373 	cp->irq_arr[0].vector = bp->irq_tbl[sb_id].vector;
374 	cp->irq_arr[0].status_blk = (void *)
375 		((unsigned long) bnapi->status_blk.msi +
376 		(BNX2_SBLK_MSIX_ALIGN_SIZE * sb_id));
377 	cp->irq_arr[0].status_blk_num = sb_id;
378 	cp->num_irq = 1;
379 }
380 
381 static int bnx2_register_cnic(struct net_device *dev, struct cnic_ops *ops,
382 			      void *data)
383 {
384 	struct bnx2 *bp = netdev_priv(dev);
385 	struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
386 
387 	if (ops == NULL)
388 		return -EINVAL;
389 
390 	if (cp->drv_state & CNIC_DRV_STATE_REGD)
391 		return -EBUSY;
392 
393 	if (!bnx2_reg_rd_ind(bp, BNX2_FW_MAX_ISCSI_CONN))
394 		return -ENODEV;
395 
396 	bp->cnic_data = data;
397 	rcu_assign_pointer(bp->cnic_ops, ops);
398 
399 	cp->num_irq = 0;
400 	cp->drv_state = CNIC_DRV_STATE_REGD;
401 
402 	bnx2_setup_cnic_irq_info(bp);
403 
404 	return 0;
405 }
406 
407 static int bnx2_unregister_cnic(struct net_device *dev)
408 {
409 	struct bnx2 *bp = netdev_priv(dev);
410 	struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
411 	struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
412 
413 	mutex_lock(&bp->cnic_lock);
414 	cp->drv_state = 0;
415 	bnapi->cnic_present = 0;
416 	RCU_INIT_POINTER(bp->cnic_ops, NULL);
417 	mutex_unlock(&bp->cnic_lock);
418 	synchronize_rcu();
419 	return 0;
420 }
421 
422 static struct cnic_eth_dev *bnx2_cnic_probe(struct net_device *dev)
423 {
424 	struct bnx2 *bp = netdev_priv(dev);
425 	struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
426 
427 	if (!cp->max_iscsi_conn)
428 		return NULL;
429 
430 	cp->drv_owner = THIS_MODULE;
431 	cp->chip_id = bp->chip_id;
432 	cp->pdev = bp->pdev;
433 	cp->io_base = bp->regview;
434 	cp->drv_ctl = bnx2_drv_ctl;
435 	cp->drv_register_cnic = bnx2_register_cnic;
436 	cp->drv_unregister_cnic = bnx2_unregister_cnic;
437 
438 	return cp;
439 }
440 
441 static void
442 bnx2_cnic_stop(struct bnx2 *bp)
443 {
444 	struct cnic_ops *c_ops;
445 	struct cnic_ctl_info info;
446 
447 	mutex_lock(&bp->cnic_lock);
448 	c_ops = rcu_dereference_protected(bp->cnic_ops,
449 					  lockdep_is_held(&bp->cnic_lock));
450 	if (c_ops) {
451 		info.cmd = CNIC_CTL_STOP_CMD;
452 		c_ops->cnic_ctl(bp->cnic_data, &info);
453 	}
454 	mutex_unlock(&bp->cnic_lock);
455 }
456 
457 static void
458 bnx2_cnic_start(struct bnx2 *bp)
459 {
460 	struct cnic_ops *c_ops;
461 	struct cnic_ctl_info info;
462 
463 	mutex_lock(&bp->cnic_lock);
464 	c_ops = rcu_dereference_protected(bp->cnic_ops,
465 					  lockdep_is_held(&bp->cnic_lock));
466 	if (c_ops) {
467 		if (!(bp->flags & BNX2_FLAG_USING_MSIX)) {
468 			struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
469 
470 			bnapi->cnic_tag = bnapi->last_status_idx;
471 		}
472 		info.cmd = CNIC_CTL_START_CMD;
473 		c_ops->cnic_ctl(bp->cnic_data, &info);
474 	}
475 	mutex_unlock(&bp->cnic_lock);
476 }
477 
478 #else
479 
480 static void
481 bnx2_cnic_stop(struct bnx2 *bp)
482 {
483 }
484 
485 static void
486 bnx2_cnic_start(struct bnx2 *bp)
487 {
488 }
489 
490 #endif
491 
492 static int
493 bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val)
494 {
495 	u32 val1;
496 	int i, ret;
497 
498 	if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
499 		val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
500 		val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
501 
502 		BNX2_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
503 		BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
504 
505 		udelay(40);
506 	}
507 
508 	val1 = (bp->phy_addr << 21) | (reg << 16) |
509 		BNX2_EMAC_MDIO_COMM_COMMAND_READ | BNX2_EMAC_MDIO_COMM_DISEXT |
510 		BNX2_EMAC_MDIO_COMM_START_BUSY;
511 	BNX2_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
512 
513 	for (i = 0; i < 50; i++) {
514 		udelay(10);
515 
516 		val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_COMM);
517 		if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
518 			udelay(5);
519 
520 			val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_COMM);
521 			val1 &= BNX2_EMAC_MDIO_COMM_DATA;
522 
523 			break;
524 		}
525 	}
526 
527 	if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY) {
528 		*val = 0x0;
529 		ret = -EBUSY;
530 	}
531 	else {
532 		*val = val1;
533 		ret = 0;
534 	}
535 
536 	if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
537 		val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
538 		val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
539 
540 		BNX2_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
541 		BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
542 
543 		udelay(40);
544 	}
545 
546 	return ret;
547 }
548 
549 static int
550 bnx2_write_phy(struct bnx2 *bp, u32 reg, u32 val)
551 {
552 	u32 val1;
553 	int i, ret;
554 
555 	if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
556 		val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
557 		val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
558 
559 		BNX2_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
560 		BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
561 
562 		udelay(40);
563 	}
564 
565 	val1 = (bp->phy_addr << 21) | (reg << 16) | val |
566 		BNX2_EMAC_MDIO_COMM_COMMAND_WRITE |
567 		BNX2_EMAC_MDIO_COMM_START_BUSY | BNX2_EMAC_MDIO_COMM_DISEXT;
568 	BNX2_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
569 
570 	for (i = 0; i < 50; i++) {
571 		udelay(10);
572 
573 		val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_COMM);
574 		if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
575 			udelay(5);
576 			break;
577 		}
578 	}
579 
580 	if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)
581         	ret = -EBUSY;
582 	else
583 		ret = 0;
584 
585 	if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
586 		val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
587 		val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
588 
589 		BNX2_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
590 		BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
591 
592 		udelay(40);
593 	}
594 
595 	return ret;
596 }
597 
598 static void
599 bnx2_disable_int(struct bnx2 *bp)
600 {
601 	int i;
602 	struct bnx2_napi *bnapi;
603 
604 	for (i = 0; i < bp->irq_nvecs; i++) {
605 		bnapi = &bp->bnx2_napi[i];
606 		BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
607 		       BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
608 	}
609 	BNX2_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
610 }
611 
612 static void
613 bnx2_enable_int(struct bnx2 *bp)
614 {
615 	int i;
616 	struct bnx2_napi *bnapi;
617 
618 	for (i = 0; i < bp->irq_nvecs; i++) {
619 		bnapi = &bp->bnx2_napi[i];
620 
621 		BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
622 			BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
623 			BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
624 			bnapi->last_status_idx);
625 
626 		BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
627 			BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
628 			bnapi->last_status_idx);
629 	}
630 	BNX2_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
631 }
632 
633 static void
634 bnx2_disable_int_sync(struct bnx2 *bp)
635 {
636 	int i;
637 
638 	atomic_inc(&bp->intr_sem);
639 	if (!netif_running(bp->dev))
640 		return;
641 
642 	bnx2_disable_int(bp);
643 	for (i = 0; i < bp->irq_nvecs; i++)
644 		synchronize_irq(bp->irq_tbl[i].vector);
645 }
646 
647 static void
648 bnx2_napi_disable(struct bnx2 *bp)
649 {
650 	int i;
651 
652 	for (i = 0; i < bp->irq_nvecs; i++)
653 		napi_disable(&bp->bnx2_napi[i].napi);
654 }
655 
656 static void
657 bnx2_napi_enable(struct bnx2 *bp)
658 {
659 	int i;
660 
661 	for (i = 0; i < bp->irq_nvecs; i++)
662 		napi_enable(&bp->bnx2_napi[i].napi);
663 }
664 
665 static void
666 bnx2_netif_stop(struct bnx2 *bp, bool stop_cnic)
667 {
668 	if (stop_cnic)
669 		bnx2_cnic_stop(bp);
670 	if (netif_running(bp->dev)) {
671 		bnx2_napi_disable(bp);
672 		netif_tx_disable(bp->dev);
673 	}
674 	bnx2_disable_int_sync(bp);
675 	netif_carrier_off(bp->dev);	/* prevent tx timeout */
676 }
677 
678 static void
679 bnx2_netif_start(struct bnx2 *bp, bool start_cnic)
680 {
681 	if (atomic_dec_and_test(&bp->intr_sem)) {
682 		if (netif_running(bp->dev)) {
683 			netif_tx_wake_all_queues(bp->dev);
684 			spin_lock_bh(&bp->phy_lock);
685 			if (bp->link_up)
686 				netif_carrier_on(bp->dev);
687 			spin_unlock_bh(&bp->phy_lock);
688 			bnx2_napi_enable(bp);
689 			bnx2_enable_int(bp);
690 			if (start_cnic)
691 				bnx2_cnic_start(bp);
692 		}
693 	}
694 }
695 
696 static void
697 bnx2_free_tx_mem(struct bnx2 *bp)
698 {
699 	int i;
700 
701 	for (i = 0; i < bp->num_tx_rings; i++) {
702 		struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
703 		struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
704 
705 		if (txr->tx_desc_ring) {
706 			dma_free_coherent(&bp->pdev->dev, TXBD_RING_SIZE,
707 					  txr->tx_desc_ring,
708 					  txr->tx_desc_mapping);
709 			txr->tx_desc_ring = NULL;
710 		}
711 		kfree(txr->tx_buf_ring);
712 		txr->tx_buf_ring = NULL;
713 	}
714 }
715 
716 static void
717 bnx2_free_rx_mem(struct bnx2 *bp)
718 {
719 	int i;
720 
721 	for (i = 0; i < bp->num_rx_rings; i++) {
722 		struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
723 		struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
724 		int j;
725 
726 		for (j = 0; j < bp->rx_max_ring; j++) {
727 			if (rxr->rx_desc_ring[j])
728 				dma_free_coherent(&bp->pdev->dev, RXBD_RING_SIZE,
729 						  rxr->rx_desc_ring[j],
730 						  rxr->rx_desc_mapping[j]);
731 			rxr->rx_desc_ring[j] = NULL;
732 		}
733 		vfree(rxr->rx_buf_ring);
734 		rxr->rx_buf_ring = NULL;
735 
736 		for (j = 0; j < bp->rx_max_pg_ring; j++) {
737 			if (rxr->rx_pg_desc_ring[j])
738 				dma_free_coherent(&bp->pdev->dev, RXBD_RING_SIZE,
739 						  rxr->rx_pg_desc_ring[j],
740 						  rxr->rx_pg_desc_mapping[j]);
741 			rxr->rx_pg_desc_ring[j] = NULL;
742 		}
743 		vfree(rxr->rx_pg_ring);
744 		rxr->rx_pg_ring = NULL;
745 	}
746 }
747 
748 static int
749 bnx2_alloc_tx_mem(struct bnx2 *bp)
750 {
751 	int i;
752 
753 	for (i = 0; i < bp->num_tx_rings; i++) {
754 		struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
755 		struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
756 
757 		txr->tx_buf_ring = kzalloc(SW_TXBD_RING_SIZE, GFP_KERNEL);
758 		if (txr->tx_buf_ring == NULL)
759 			return -ENOMEM;
760 
761 		txr->tx_desc_ring =
762 			dma_alloc_coherent(&bp->pdev->dev, TXBD_RING_SIZE,
763 					   &txr->tx_desc_mapping, GFP_KERNEL);
764 		if (txr->tx_desc_ring == NULL)
765 			return -ENOMEM;
766 	}
767 	return 0;
768 }
769 
770 static int
771 bnx2_alloc_rx_mem(struct bnx2 *bp)
772 {
773 	int i;
774 
775 	for (i = 0; i < bp->num_rx_rings; i++) {
776 		struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
777 		struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
778 		int j;
779 
780 		rxr->rx_buf_ring =
781 			vzalloc(SW_RXBD_RING_SIZE * bp->rx_max_ring);
782 		if (rxr->rx_buf_ring == NULL)
783 			return -ENOMEM;
784 
785 		for (j = 0; j < bp->rx_max_ring; j++) {
786 			rxr->rx_desc_ring[j] =
787 				dma_alloc_coherent(&bp->pdev->dev,
788 						   RXBD_RING_SIZE,
789 						   &rxr->rx_desc_mapping[j],
790 						   GFP_KERNEL);
791 			if (rxr->rx_desc_ring[j] == NULL)
792 				return -ENOMEM;
793 
794 		}
795 
796 		if (bp->rx_pg_ring_size) {
797 			rxr->rx_pg_ring = vzalloc(SW_RXPG_RING_SIZE *
798 						  bp->rx_max_pg_ring);
799 			if (rxr->rx_pg_ring == NULL)
800 				return -ENOMEM;
801 
802 		}
803 
804 		for (j = 0; j < bp->rx_max_pg_ring; j++) {
805 			rxr->rx_pg_desc_ring[j] =
806 				dma_alloc_coherent(&bp->pdev->dev,
807 						   RXBD_RING_SIZE,
808 						   &rxr->rx_pg_desc_mapping[j],
809 						   GFP_KERNEL);
810 			if (rxr->rx_pg_desc_ring[j] == NULL)
811 				return -ENOMEM;
812 
813 		}
814 	}
815 	return 0;
816 }
817 
818 static void
819 bnx2_free_stats_blk(struct net_device *dev)
820 {
821 	struct bnx2 *bp = netdev_priv(dev);
822 
823 	if (bp->status_blk) {
824 		dma_free_coherent(&bp->pdev->dev, bp->status_stats_size,
825 				  bp->status_blk,
826 				  bp->status_blk_mapping);
827 		bp->status_blk = NULL;
828 		bp->stats_blk = NULL;
829 	}
830 }
831 
832 static int
833 bnx2_alloc_stats_blk(struct net_device *dev)
834 {
835 	int status_blk_size;
836 	void *status_blk;
837 	struct bnx2 *bp = netdev_priv(dev);
838 
839 	/* Combine status and statistics blocks into one allocation. */
840 	status_blk_size = L1_CACHE_ALIGN(sizeof(struct status_block));
841 	if (bp->flags & BNX2_FLAG_MSIX_CAP)
842 		status_blk_size = L1_CACHE_ALIGN(BNX2_MAX_MSIX_HW_VEC *
843 						 BNX2_SBLK_MSIX_ALIGN_SIZE);
844 	bp->status_stats_size = status_blk_size +
845 				sizeof(struct statistics_block);
846 	status_blk = dma_zalloc_coherent(&bp->pdev->dev, bp->status_stats_size,
847 					 &bp->status_blk_mapping, GFP_KERNEL);
848 	if (status_blk == NULL)
849 		return -ENOMEM;
850 
851 	bp->status_blk = status_blk;
852 	bp->stats_blk = status_blk + status_blk_size;
853 	bp->stats_blk_mapping = bp->status_blk_mapping + status_blk_size;
854 
855 	return 0;
856 }
857 
858 static void
859 bnx2_free_mem(struct bnx2 *bp)
860 {
861 	int i;
862 	struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
863 
864 	bnx2_free_tx_mem(bp);
865 	bnx2_free_rx_mem(bp);
866 
867 	for (i = 0; i < bp->ctx_pages; i++) {
868 		if (bp->ctx_blk[i]) {
869 			dma_free_coherent(&bp->pdev->dev, BNX2_PAGE_SIZE,
870 					  bp->ctx_blk[i],
871 					  bp->ctx_blk_mapping[i]);
872 			bp->ctx_blk[i] = NULL;
873 		}
874 	}
875 
876 	if (bnapi->status_blk.msi)
877 		bnapi->status_blk.msi = NULL;
878 }
879 
880 static int
881 bnx2_alloc_mem(struct bnx2 *bp)
882 {
883 	int i, err;
884 	struct bnx2_napi *bnapi;
885 
886 	bnapi = &bp->bnx2_napi[0];
887 	bnapi->status_blk.msi = bp->status_blk;
888 	bnapi->hw_tx_cons_ptr =
889 		&bnapi->status_blk.msi->status_tx_quick_consumer_index0;
890 	bnapi->hw_rx_cons_ptr =
891 		&bnapi->status_blk.msi->status_rx_quick_consumer_index0;
892 	if (bp->flags & BNX2_FLAG_MSIX_CAP) {
893 		for (i = 1; i < bp->irq_nvecs; i++) {
894 			struct status_block_msix *sblk;
895 
896 			bnapi = &bp->bnx2_napi[i];
897 
898 			sblk = (bp->status_blk + BNX2_SBLK_MSIX_ALIGN_SIZE * i);
899 			bnapi->status_blk.msix = sblk;
900 			bnapi->hw_tx_cons_ptr =
901 				&sblk->status_tx_quick_consumer_index;
902 			bnapi->hw_rx_cons_ptr =
903 				&sblk->status_rx_quick_consumer_index;
904 			bnapi->int_num = i << 24;
905 		}
906 	}
907 
908 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
909 		bp->ctx_pages = 0x2000 / BNX2_PAGE_SIZE;
910 		if (bp->ctx_pages == 0)
911 			bp->ctx_pages = 1;
912 		for (i = 0; i < bp->ctx_pages; i++) {
913 			bp->ctx_blk[i] = dma_alloc_coherent(&bp->pdev->dev,
914 						BNX2_PAGE_SIZE,
915 						&bp->ctx_blk_mapping[i],
916 						GFP_KERNEL);
917 			if (bp->ctx_blk[i] == NULL)
918 				goto alloc_mem_err;
919 		}
920 	}
921 
922 	err = bnx2_alloc_rx_mem(bp);
923 	if (err)
924 		goto alloc_mem_err;
925 
926 	err = bnx2_alloc_tx_mem(bp);
927 	if (err)
928 		goto alloc_mem_err;
929 
930 	return 0;
931 
932 alloc_mem_err:
933 	bnx2_free_mem(bp);
934 	return -ENOMEM;
935 }
936 
937 static void
938 bnx2_report_fw_link(struct bnx2 *bp)
939 {
940 	u32 fw_link_status = 0;
941 
942 	if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
943 		return;
944 
945 	if (bp->link_up) {
946 		u32 bmsr;
947 
948 		switch (bp->line_speed) {
949 		case SPEED_10:
950 			if (bp->duplex == DUPLEX_HALF)
951 				fw_link_status = BNX2_LINK_STATUS_10HALF;
952 			else
953 				fw_link_status = BNX2_LINK_STATUS_10FULL;
954 			break;
955 		case SPEED_100:
956 			if (bp->duplex == DUPLEX_HALF)
957 				fw_link_status = BNX2_LINK_STATUS_100HALF;
958 			else
959 				fw_link_status = BNX2_LINK_STATUS_100FULL;
960 			break;
961 		case SPEED_1000:
962 			if (bp->duplex == DUPLEX_HALF)
963 				fw_link_status = BNX2_LINK_STATUS_1000HALF;
964 			else
965 				fw_link_status = BNX2_LINK_STATUS_1000FULL;
966 			break;
967 		case SPEED_2500:
968 			if (bp->duplex == DUPLEX_HALF)
969 				fw_link_status = BNX2_LINK_STATUS_2500HALF;
970 			else
971 				fw_link_status = BNX2_LINK_STATUS_2500FULL;
972 			break;
973 		}
974 
975 		fw_link_status |= BNX2_LINK_STATUS_LINK_UP;
976 
977 		if (bp->autoneg) {
978 			fw_link_status |= BNX2_LINK_STATUS_AN_ENABLED;
979 
980 			bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
981 			bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
982 
983 			if (!(bmsr & BMSR_ANEGCOMPLETE) ||
984 			    bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT)
985 				fw_link_status |= BNX2_LINK_STATUS_PARALLEL_DET;
986 			else
987 				fw_link_status |= BNX2_LINK_STATUS_AN_COMPLETE;
988 		}
989 	}
990 	else
991 		fw_link_status = BNX2_LINK_STATUS_LINK_DOWN;
992 
993 	bnx2_shmem_wr(bp, BNX2_LINK_STATUS, fw_link_status);
994 }
995 
996 static char *
997 bnx2_xceiver_str(struct bnx2 *bp)
998 {
999 	return (bp->phy_port == PORT_FIBRE) ? "SerDes" :
1000 		((bp->phy_flags & BNX2_PHY_FLAG_SERDES) ? "Remote Copper" :
1001 		 "Copper");
1002 }
1003 
1004 static void
1005 bnx2_report_link(struct bnx2 *bp)
1006 {
1007 	if (bp->link_up) {
1008 		netif_carrier_on(bp->dev);
1009 		netdev_info(bp->dev, "NIC %s Link is Up, %d Mbps %s duplex",
1010 			    bnx2_xceiver_str(bp),
1011 			    bp->line_speed,
1012 			    bp->duplex == DUPLEX_FULL ? "full" : "half");
1013 
1014 		if (bp->flow_ctrl) {
1015 			if (bp->flow_ctrl & FLOW_CTRL_RX) {
1016 				pr_cont(", receive ");
1017 				if (bp->flow_ctrl & FLOW_CTRL_TX)
1018 					pr_cont("& transmit ");
1019 			}
1020 			else {
1021 				pr_cont(", transmit ");
1022 			}
1023 			pr_cont("flow control ON");
1024 		}
1025 		pr_cont("\n");
1026 	} else {
1027 		netif_carrier_off(bp->dev);
1028 		netdev_err(bp->dev, "NIC %s Link is Down\n",
1029 			   bnx2_xceiver_str(bp));
1030 	}
1031 
1032 	bnx2_report_fw_link(bp);
1033 }
1034 
1035 static void
1036 bnx2_resolve_flow_ctrl(struct bnx2 *bp)
1037 {
1038 	u32 local_adv, remote_adv;
1039 
1040 	bp->flow_ctrl = 0;
1041 	if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
1042 		(AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
1043 
1044 		if (bp->duplex == DUPLEX_FULL) {
1045 			bp->flow_ctrl = bp->req_flow_ctrl;
1046 		}
1047 		return;
1048 	}
1049 
1050 	if (bp->duplex != DUPLEX_FULL) {
1051 		return;
1052 	}
1053 
1054 	if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1055 	    (BNX2_CHIP(bp) == BNX2_CHIP_5708)) {
1056 		u32 val;
1057 
1058 		bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
1059 		if (val & BCM5708S_1000X_STAT1_TX_PAUSE)
1060 			bp->flow_ctrl |= FLOW_CTRL_TX;
1061 		if (val & BCM5708S_1000X_STAT1_RX_PAUSE)
1062 			bp->flow_ctrl |= FLOW_CTRL_RX;
1063 		return;
1064 	}
1065 
1066 	bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1067 	bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
1068 
1069 	if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1070 		u32 new_local_adv = 0;
1071 		u32 new_remote_adv = 0;
1072 
1073 		if (local_adv & ADVERTISE_1000XPAUSE)
1074 			new_local_adv |= ADVERTISE_PAUSE_CAP;
1075 		if (local_adv & ADVERTISE_1000XPSE_ASYM)
1076 			new_local_adv |= ADVERTISE_PAUSE_ASYM;
1077 		if (remote_adv & ADVERTISE_1000XPAUSE)
1078 			new_remote_adv |= ADVERTISE_PAUSE_CAP;
1079 		if (remote_adv & ADVERTISE_1000XPSE_ASYM)
1080 			new_remote_adv |= ADVERTISE_PAUSE_ASYM;
1081 
1082 		local_adv = new_local_adv;
1083 		remote_adv = new_remote_adv;
1084 	}
1085 
1086 	/* See Table 28B-3 of 802.3ab-1999 spec. */
1087 	if (local_adv & ADVERTISE_PAUSE_CAP) {
1088 		if(local_adv & ADVERTISE_PAUSE_ASYM) {
1089 	                if (remote_adv & ADVERTISE_PAUSE_CAP) {
1090 				bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
1091 			}
1092 			else if (remote_adv & ADVERTISE_PAUSE_ASYM) {
1093 				bp->flow_ctrl = FLOW_CTRL_RX;
1094 			}
1095 		}
1096 		else {
1097 			if (remote_adv & ADVERTISE_PAUSE_CAP) {
1098 				bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
1099 			}
1100 		}
1101 	}
1102 	else if (local_adv & ADVERTISE_PAUSE_ASYM) {
1103 		if ((remote_adv & ADVERTISE_PAUSE_CAP) &&
1104 			(remote_adv & ADVERTISE_PAUSE_ASYM)) {
1105 
1106 			bp->flow_ctrl = FLOW_CTRL_TX;
1107 		}
1108 	}
1109 }
1110 
1111 static int
1112 bnx2_5709s_linkup(struct bnx2 *bp)
1113 {
1114 	u32 val, speed;
1115 
1116 	bp->link_up = 1;
1117 
1118 	bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_GP_STATUS);
1119 	bnx2_read_phy(bp, MII_BNX2_GP_TOP_AN_STATUS1, &val);
1120 	bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1121 
1122 	if ((bp->autoneg & AUTONEG_SPEED) == 0) {
1123 		bp->line_speed = bp->req_line_speed;
1124 		bp->duplex = bp->req_duplex;
1125 		return 0;
1126 	}
1127 	speed = val & MII_BNX2_GP_TOP_AN_SPEED_MSK;
1128 	switch (speed) {
1129 		case MII_BNX2_GP_TOP_AN_SPEED_10:
1130 			bp->line_speed = SPEED_10;
1131 			break;
1132 		case MII_BNX2_GP_TOP_AN_SPEED_100:
1133 			bp->line_speed = SPEED_100;
1134 			break;
1135 		case MII_BNX2_GP_TOP_AN_SPEED_1G:
1136 		case MII_BNX2_GP_TOP_AN_SPEED_1GKV:
1137 			bp->line_speed = SPEED_1000;
1138 			break;
1139 		case MII_BNX2_GP_TOP_AN_SPEED_2_5G:
1140 			bp->line_speed = SPEED_2500;
1141 			break;
1142 	}
1143 	if (val & MII_BNX2_GP_TOP_AN_FD)
1144 		bp->duplex = DUPLEX_FULL;
1145 	else
1146 		bp->duplex = DUPLEX_HALF;
1147 	return 0;
1148 }
1149 
1150 static int
1151 bnx2_5708s_linkup(struct bnx2 *bp)
1152 {
1153 	u32 val;
1154 
1155 	bp->link_up = 1;
1156 	bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
1157 	switch (val & BCM5708S_1000X_STAT1_SPEED_MASK) {
1158 		case BCM5708S_1000X_STAT1_SPEED_10:
1159 			bp->line_speed = SPEED_10;
1160 			break;
1161 		case BCM5708S_1000X_STAT1_SPEED_100:
1162 			bp->line_speed = SPEED_100;
1163 			break;
1164 		case BCM5708S_1000X_STAT1_SPEED_1G:
1165 			bp->line_speed = SPEED_1000;
1166 			break;
1167 		case BCM5708S_1000X_STAT1_SPEED_2G5:
1168 			bp->line_speed = SPEED_2500;
1169 			break;
1170 	}
1171 	if (val & BCM5708S_1000X_STAT1_FD)
1172 		bp->duplex = DUPLEX_FULL;
1173 	else
1174 		bp->duplex = DUPLEX_HALF;
1175 
1176 	return 0;
1177 }
1178 
1179 static int
1180 bnx2_5706s_linkup(struct bnx2 *bp)
1181 {
1182 	u32 bmcr, local_adv, remote_adv, common;
1183 
1184 	bp->link_up = 1;
1185 	bp->line_speed = SPEED_1000;
1186 
1187 	bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1188 	if (bmcr & BMCR_FULLDPLX) {
1189 		bp->duplex = DUPLEX_FULL;
1190 	}
1191 	else {
1192 		bp->duplex = DUPLEX_HALF;
1193 	}
1194 
1195 	if (!(bmcr & BMCR_ANENABLE)) {
1196 		return 0;
1197 	}
1198 
1199 	bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1200 	bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
1201 
1202 	common = local_adv & remote_adv;
1203 	if (common & (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL)) {
1204 
1205 		if (common & ADVERTISE_1000XFULL) {
1206 			bp->duplex = DUPLEX_FULL;
1207 		}
1208 		else {
1209 			bp->duplex = DUPLEX_HALF;
1210 		}
1211 	}
1212 
1213 	return 0;
1214 }
1215 
1216 static int
1217 bnx2_copper_linkup(struct bnx2 *bp)
1218 {
1219 	u32 bmcr;
1220 
1221 	bp->phy_flags &= ~BNX2_PHY_FLAG_MDIX;
1222 
1223 	bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1224 	if (bmcr & BMCR_ANENABLE) {
1225 		u32 local_adv, remote_adv, common;
1226 
1227 		bnx2_read_phy(bp, MII_CTRL1000, &local_adv);
1228 		bnx2_read_phy(bp, MII_STAT1000, &remote_adv);
1229 
1230 		common = local_adv & (remote_adv >> 2);
1231 		if (common & ADVERTISE_1000FULL) {
1232 			bp->line_speed = SPEED_1000;
1233 			bp->duplex = DUPLEX_FULL;
1234 		}
1235 		else if (common & ADVERTISE_1000HALF) {
1236 			bp->line_speed = SPEED_1000;
1237 			bp->duplex = DUPLEX_HALF;
1238 		}
1239 		else {
1240 			bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1241 			bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
1242 
1243 			common = local_adv & remote_adv;
1244 			if (common & ADVERTISE_100FULL) {
1245 				bp->line_speed = SPEED_100;
1246 				bp->duplex = DUPLEX_FULL;
1247 			}
1248 			else if (common & ADVERTISE_100HALF) {
1249 				bp->line_speed = SPEED_100;
1250 				bp->duplex = DUPLEX_HALF;
1251 			}
1252 			else if (common & ADVERTISE_10FULL) {
1253 				bp->line_speed = SPEED_10;
1254 				bp->duplex = DUPLEX_FULL;
1255 			}
1256 			else if (common & ADVERTISE_10HALF) {
1257 				bp->line_speed = SPEED_10;
1258 				bp->duplex = DUPLEX_HALF;
1259 			}
1260 			else {
1261 				bp->line_speed = 0;
1262 				bp->link_up = 0;
1263 			}
1264 		}
1265 	}
1266 	else {
1267 		if (bmcr & BMCR_SPEED100) {
1268 			bp->line_speed = SPEED_100;
1269 		}
1270 		else {
1271 			bp->line_speed = SPEED_10;
1272 		}
1273 		if (bmcr & BMCR_FULLDPLX) {
1274 			bp->duplex = DUPLEX_FULL;
1275 		}
1276 		else {
1277 			bp->duplex = DUPLEX_HALF;
1278 		}
1279 	}
1280 
1281 	if (bp->link_up) {
1282 		u32 ext_status;
1283 
1284 		bnx2_read_phy(bp, MII_BNX2_EXT_STATUS, &ext_status);
1285 		if (ext_status & EXT_STATUS_MDIX)
1286 			bp->phy_flags |= BNX2_PHY_FLAG_MDIX;
1287 	}
1288 
1289 	return 0;
1290 }
1291 
1292 static void
1293 bnx2_init_rx_context(struct bnx2 *bp, u32 cid)
1294 {
1295 	u32 val, rx_cid_addr = GET_CID_ADDR(cid);
1296 
1297 	val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE;
1298 	val |= BNX2_L2CTX_CTX_TYPE_SIZE_L2;
1299 	val |= 0x02 << 8;
1300 
1301 	if (bp->flow_ctrl & FLOW_CTRL_TX)
1302 		val |= BNX2_L2CTX_FLOW_CTRL_ENABLE;
1303 
1304 	bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_CTX_TYPE, val);
1305 }
1306 
1307 static void
1308 bnx2_init_all_rx_contexts(struct bnx2 *bp)
1309 {
1310 	int i;
1311 	u32 cid;
1312 
1313 	for (i = 0, cid = RX_CID; i < bp->num_rx_rings; i++, cid++) {
1314 		if (i == 1)
1315 			cid = RX_RSS_CID;
1316 		bnx2_init_rx_context(bp, cid);
1317 	}
1318 }
1319 
1320 static void
1321 bnx2_set_mac_link(struct bnx2 *bp)
1322 {
1323 	u32 val;
1324 
1325 	BNX2_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x2620);
1326 	if (bp->link_up && (bp->line_speed == SPEED_1000) &&
1327 		(bp->duplex == DUPLEX_HALF)) {
1328 		BNX2_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x26ff);
1329 	}
1330 
1331 	/* Configure the EMAC mode register. */
1332 	val = BNX2_RD(bp, BNX2_EMAC_MODE);
1333 
1334 	val &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
1335 		BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
1336 		BNX2_EMAC_MODE_25G_MODE);
1337 
1338 	if (bp->link_up) {
1339 		switch (bp->line_speed) {
1340 			case SPEED_10:
1341 				if (BNX2_CHIP(bp) != BNX2_CHIP_5706) {
1342 					val |= BNX2_EMAC_MODE_PORT_MII_10M;
1343 					break;
1344 				}
1345 				/* fall through */
1346 			case SPEED_100:
1347 				val |= BNX2_EMAC_MODE_PORT_MII;
1348 				break;
1349 			case SPEED_2500:
1350 				val |= BNX2_EMAC_MODE_25G_MODE;
1351 				/* fall through */
1352 			case SPEED_1000:
1353 				val |= BNX2_EMAC_MODE_PORT_GMII;
1354 				break;
1355 		}
1356 	}
1357 	else {
1358 		val |= BNX2_EMAC_MODE_PORT_GMII;
1359 	}
1360 
1361 	/* Set the MAC to operate in the appropriate duplex mode. */
1362 	if (bp->duplex == DUPLEX_HALF)
1363 		val |= BNX2_EMAC_MODE_HALF_DUPLEX;
1364 	BNX2_WR(bp, BNX2_EMAC_MODE, val);
1365 
1366 	/* Enable/disable rx PAUSE. */
1367 	bp->rx_mode &= ~BNX2_EMAC_RX_MODE_FLOW_EN;
1368 
1369 	if (bp->flow_ctrl & FLOW_CTRL_RX)
1370 		bp->rx_mode |= BNX2_EMAC_RX_MODE_FLOW_EN;
1371 	BNX2_WR(bp, BNX2_EMAC_RX_MODE, bp->rx_mode);
1372 
1373 	/* Enable/disable tx PAUSE. */
1374 	val = BNX2_RD(bp, BNX2_EMAC_TX_MODE);
1375 	val &= ~BNX2_EMAC_TX_MODE_FLOW_EN;
1376 
1377 	if (bp->flow_ctrl & FLOW_CTRL_TX)
1378 		val |= BNX2_EMAC_TX_MODE_FLOW_EN;
1379 	BNX2_WR(bp, BNX2_EMAC_TX_MODE, val);
1380 
1381 	/* Acknowledge the interrupt. */
1382 	BNX2_WR(bp, BNX2_EMAC_STATUS, BNX2_EMAC_STATUS_LINK_CHANGE);
1383 
1384 	bnx2_init_all_rx_contexts(bp);
1385 }
1386 
1387 static void
1388 bnx2_enable_bmsr1(struct bnx2 *bp)
1389 {
1390 	if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1391 	    (BNX2_CHIP(bp) == BNX2_CHIP_5709))
1392 		bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1393 			       MII_BNX2_BLK_ADDR_GP_STATUS);
1394 }
1395 
1396 static void
1397 bnx2_disable_bmsr1(struct bnx2 *bp)
1398 {
1399 	if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1400 	    (BNX2_CHIP(bp) == BNX2_CHIP_5709))
1401 		bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1402 			       MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1403 }
1404 
1405 static int
1406 bnx2_test_and_enable_2g5(struct bnx2 *bp)
1407 {
1408 	u32 up1;
1409 	int ret = 1;
1410 
1411 	if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1412 		return 0;
1413 
1414 	if (bp->autoneg & AUTONEG_SPEED)
1415 		bp->advertising |= ADVERTISED_2500baseX_Full;
1416 
1417 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
1418 		bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1419 
1420 	bnx2_read_phy(bp, bp->mii_up1, &up1);
1421 	if (!(up1 & BCM5708S_UP1_2G5)) {
1422 		up1 |= BCM5708S_UP1_2G5;
1423 		bnx2_write_phy(bp, bp->mii_up1, up1);
1424 		ret = 0;
1425 	}
1426 
1427 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
1428 		bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1429 			       MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1430 
1431 	return ret;
1432 }
1433 
1434 static int
1435 bnx2_test_and_disable_2g5(struct bnx2 *bp)
1436 {
1437 	u32 up1;
1438 	int ret = 0;
1439 
1440 	if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1441 		return 0;
1442 
1443 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
1444 		bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1445 
1446 	bnx2_read_phy(bp, bp->mii_up1, &up1);
1447 	if (up1 & BCM5708S_UP1_2G5) {
1448 		up1 &= ~BCM5708S_UP1_2G5;
1449 		bnx2_write_phy(bp, bp->mii_up1, up1);
1450 		ret = 1;
1451 	}
1452 
1453 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
1454 		bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1455 			       MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1456 
1457 	return ret;
1458 }
1459 
1460 static void
1461 bnx2_enable_forced_2g5(struct bnx2 *bp)
1462 {
1463 	u32 uninitialized_var(bmcr);
1464 	int err;
1465 
1466 	if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1467 		return;
1468 
1469 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
1470 		u32 val;
1471 
1472 		bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1473 			       MII_BNX2_BLK_ADDR_SERDES_DIG);
1474 		if (!bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val)) {
1475 			val &= ~MII_BNX2_SD_MISC1_FORCE_MSK;
1476 			val |= MII_BNX2_SD_MISC1_FORCE |
1477 				MII_BNX2_SD_MISC1_FORCE_2_5G;
1478 			bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1479 		}
1480 
1481 		bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1482 			       MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1483 		err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1484 
1485 	} else if (BNX2_CHIP(bp) == BNX2_CHIP_5708) {
1486 		err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1487 		if (!err)
1488 			bmcr |= BCM5708S_BMCR_FORCE_2500;
1489 	} else {
1490 		return;
1491 	}
1492 
1493 	if (err)
1494 		return;
1495 
1496 	if (bp->autoneg & AUTONEG_SPEED) {
1497 		bmcr &= ~BMCR_ANENABLE;
1498 		if (bp->req_duplex == DUPLEX_FULL)
1499 			bmcr |= BMCR_FULLDPLX;
1500 	}
1501 	bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1502 }
1503 
1504 static void
1505 bnx2_disable_forced_2g5(struct bnx2 *bp)
1506 {
1507 	u32 uninitialized_var(bmcr);
1508 	int err;
1509 
1510 	if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1511 		return;
1512 
1513 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
1514 		u32 val;
1515 
1516 		bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1517 			       MII_BNX2_BLK_ADDR_SERDES_DIG);
1518 		if (!bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val)) {
1519 			val &= ~MII_BNX2_SD_MISC1_FORCE;
1520 			bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1521 		}
1522 
1523 		bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1524 			       MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1525 		err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1526 
1527 	} else if (BNX2_CHIP(bp) == BNX2_CHIP_5708) {
1528 		err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1529 		if (!err)
1530 			bmcr &= ~BCM5708S_BMCR_FORCE_2500;
1531 	} else {
1532 		return;
1533 	}
1534 
1535 	if (err)
1536 		return;
1537 
1538 	if (bp->autoneg & AUTONEG_SPEED)
1539 		bmcr |= BMCR_SPEED1000 | BMCR_ANENABLE | BMCR_ANRESTART;
1540 	bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1541 }
1542 
1543 static void
1544 bnx2_5706s_force_link_dn(struct bnx2 *bp, int start)
1545 {
1546 	u32 val;
1547 
1548 	bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS, MII_EXPAND_SERDES_CTL);
1549 	bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
1550 	if (start)
1551 		bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val & 0xff0f);
1552 	else
1553 		bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val | 0xc0);
1554 }
1555 
1556 static int
1557 bnx2_set_link(struct bnx2 *bp)
1558 {
1559 	u32 bmsr;
1560 	u8 link_up;
1561 
1562 	if (bp->loopback == MAC_LOOPBACK || bp->loopback == PHY_LOOPBACK) {
1563 		bp->link_up = 1;
1564 		return 0;
1565 	}
1566 
1567 	if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
1568 		return 0;
1569 
1570 	link_up = bp->link_up;
1571 
1572 	bnx2_enable_bmsr1(bp);
1573 	bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1574 	bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1575 	bnx2_disable_bmsr1(bp);
1576 
1577 	if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1578 	    (BNX2_CHIP(bp) == BNX2_CHIP_5706)) {
1579 		u32 val, an_dbg;
1580 
1581 		if (bp->phy_flags & BNX2_PHY_FLAG_FORCED_DOWN) {
1582 			bnx2_5706s_force_link_dn(bp, 0);
1583 			bp->phy_flags &= ~BNX2_PHY_FLAG_FORCED_DOWN;
1584 		}
1585 		val = BNX2_RD(bp, BNX2_EMAC_STATUS);
1586 
1587 		bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
1588 		bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
1589 		bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
1590 
1591 		if ((val & BNX2_EMAC_STATUS_LINK) &&
1592 		    !(an_dbg & MISC_SHDW_AN_DBG_NOSYNC))
1593 			bmsr |= BMSR_LSTATUS;
1594 		else
1595 			bmsr &= ~BMSR_LSTATUS;
1596 	}
1597 
1598 	if (bmsr & BMSR_LSTATUS) {
1599 		bp->link_up = 1;
1600 
1601 		if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1602 			if (BNX2_CHIP(bp) == BNX2_CHIP_5706)
1603 				bnx2_5706s_linkup(bp);
1604 			else if (BNX2_CHIP(bp) == BNX2_CHIP_5708)
1605 				bnx2_5708s_linkup(bp);
1606 			else if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
1607 				bnx2_5709s_linkup(bp);
1608 		}
1609 		else {
1610 			bnx2_copper_linkup(bp);
1611 		}
1612 		bnx2_resolve_flow_ctrl(bp);
1613 	}
1614 	else {
1615 		if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1616 		    (bp->autoneg & AUTONEG_SPEED))
1617 			bnx2_disable_forced_2g5(bp);
1618 
1619 		if (bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT) {
1620 			u32 bmcr;
1621 
1622 			bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1623 			bmcr |= BMCR_ANENABLE;
1624 			bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1625 
1626 			bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
1627 		}
1628 		bp->link_up = 0;
1629 	}
1630 
1631 	if (bp->link_up != link_up) {
1632 		bnx2_report_link(bp);
1633 	}
1634 
1635 	bnx2_set_mac_link(bp);
1636 
1637 	return 0;
1638 }
1639 
1640 static int
1641 bnx2_reset_phy(struct bnx2 *bp)
1642 {
1643 	int i;
1644 	u32 reg;
1645 
1646         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_RESET);
1647 
1648 #define PHY_RESET_MAX_WAIT 100
1649 	for (i = 0; i < PHY_RESET_MAX_WAIT; i++) {
1650 		udelay(10);
1651 
1652 		bnx2_read_phy(bp, bp->mii_bmcr, &reg);
1653 		if (!(reg & BMCR_RESET)) {
1654 			udelay(20);
1655 			break;
1656 		}
1657 	}
1658 	if (i == PHY_RESET_MAX_WAIT) {
1659 		return -EBUSY;
1660 	}
1661 	return 0;
1662 }
1663 
1664 static u32
1665 bnx2_phy_get_pause_adv(struct bnx2 *bp)
1666 {
1667 	u32 adv = 0;
1668 
1669 	if ((bp->req_flow_ctrl & (FLOW_CTRL_RX | FLOW_CTRL_TX)) ==
1670 		(FLOW_CTRL_RX | FLOW_CTRL_TX)) {
1671 
1672 		if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1673 			adv = ADVERTISE_1000XPAUSE;
1674 		}
1675 		else {
1676 			adv = ADVERTISE_PAUSE_CAP;
1677 		}
1678 	}
1679 	else if (bp->req_flow_ctrl & FLOW_CTRL_TX) {
1680 		if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1681 			adv = ADVERTISE_1000XPSE_ASYM;
1682 		}
1683 		else {
1684 			adv = ADVERTISE_PAUSE_ASYM;
1685 		}
1686 	}
1687 	else if (bp->req_flow_ctrl & FLOW_CTRL_RX) {
1688 		if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1689 			adv = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1690 		}
1691 		else {
1692 			adv = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1693 		}
1694 	}
1695 	return adv;
1696 }
1697 
1698 static int bnx2_fw_sync(struct bnx2 *, u32, int, int);
1699 
1700 static int
1701 bnx2_setup_remote_phy(struct bnx2 *bp, u8 port)
1702 __releases(&bp->phy_lock)
1703 __acquires(&bp->phy_lock)
1704 {
1705 	u32 speed_arg = 0, pause_adv;
1706 
1707 	pause_adv = bnx2_phy_get_pause_adv(bp);
1708 
1709 	if (bp->autoneg & AUTONEG_SPEED) {
1710 		speed_arg |= BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG;
1711 		if (bp->advertising & ADVERTISED_10baseT_Half)
1712 			speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1713 		if (bp->advertising & ADVERTISED_10baseT_Full)
1714 			speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1715 		if (bp->advertising & ADVERTISED_100baseT_Half)
1716 			speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1717 		if (bp->advertising & ADVERTISED_100baseT_Full)
1718 			speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1719 		if (bp->advertising & ADVERTISED_1000baseT_Full)
1720 			speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1721 		if (bp->advertising & ADVERTISED_2500baseX_Full)
1722 			speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1723 	} else {
1724 		if (bp->req_line_speed == SPEED_2500)
1725 			speed_arg = BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1726 		else if (bp->req_line_speed == SPEED_1000)
1727 			speed_arg = BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1728 		else if (bp->req_line_speed == SPEED_100) {
1729 			if (bp->req_duplex == DUPLEX_FULL)
1730 				speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1731 			else
1732 				speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1733 		} else if (bp->req_line_speed == SPEED_10) {
1734 			if (bp->req_duplex == DUPLEX_FULL)
1735 				speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1736 			else
1737 				speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1738 		}
1739 	}
1740 
1741 	if (pause_adv & (ADVERTISE_1000XPAUSE | ADVERTISE_PAUSE_CAP))
1742 		speed_arg |= BNX2_NETLINK_SET_LINK_FC_SYM_PAUSE;
1743 	if (pause_adv & (ADVERTISE_1000XPSE_ASYM | ADVERTISE_PAUSE_ASYM))
1744 		speed_arg |= BNX2_NETLINK_SET_LINK_FC_ASYM_PAUSE;
1745 
1746 	if (port == PORT_TP)
1747 		speed_arg |= BNX2_NETLINK_SET_LINK_PHY_APP_REMOTE |
1748 			     BNX2_NETLINK_SET_LINK_ETH_AT_WIRESPEED;
1749 
1750 	bnx2_shmem_wr(bp, BNX2_DRV_MB_ARG0, speed_arg);
1751 
1752 	spin_unlock_bh(&bp->phy_lock);
1753 	bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_CMD_SET_LINK, 1, 0);
1754 	spin_lock_bh(&bp->phy_lock);
1755 
1756 	return 0;
1757 }
1758 
1759 static int
1760 bnx2_setup_serdes_phy(struct bnx2 *bp, u8 port)
1761 __releases(&bp->phy_lock)
1762 __acquires(&bp->phy_lock)
1763 {
1764 	u32 adv, bmcr;
1765 	u32 new_adv = 0;
1766 
1767 	if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
1768 		return bnx2_setup_remote_phy(bp, port);
1769 
1770 	if (!(bp->autoneg & AUTONEG_SPEED)) {
1771 		u32 new_bmcr;
1772 		int force_link_down = 0;
1773 
1774 		if (bp->req_line_speed == SPEED_2500) {
1775 			if (!bnx2_test_and_enable_2g5(bp))
1776 				force_link_down = 1;
1777 		} else if (bp->req_line_speed == SPEED_1000) {
1778 			if (bnx2_test_and_disable_2g5(bp))
1779 				force_link_down = 1;
1780 		}
1781 		bnx2_read_phy(bp, bp->mii_adv, &adv);
1782 		adv &= ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF);
1783 
1784 		bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1785 		new_bmcr = bmcr & ~BMCR_ANENABLE;
1786 		new_bmcr |= BMCR_SPEED1000;
1787 
1788 		if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
1789 			if (bp->req_line_speed == SPEED_2500)
1790 				bnx2_enable_forced_2g5(bp);
1791 			else if (bp->req_line_speed == SPEED_1000) {
1792 				bnx2_disable_forced_2g5(bp);
1793 				new_bmcr &= ~0x2000;
1794 			}
1795 
1796 		} else if (BNX2_CHIP(bp) == BNX2_CHIP_5708) {
1797 			if (bp->req_line_speed == SPEED_2500)
1798 				new_bmcr |= BCM5708S_BMCR_FORCE_2500;
1799 			else
1800 				new_bmcr = bmcr & ~BCM5708S_BMCR_FORCE_2500;
1801 		}
1802 
1803 		if (bp->req_duplex == DUPLEX_FULL) {
1804 			adv |= ADVERTISE_1000XFULL;
1805 			new_bmcr |= BMCR_FULLDPLX;
1806 		}
1807 		else {
1808 			adv |= ADVERTISE_1000XHALF;
1809 			new_bmcr &= ~BMCR_FULLDPLX;
1810 		}
1811 		if ((new_bmcr != bmcr) || (force_link_down)) {
1812 			/* Force a link down visible on the other side */
1813 			if (bp->link_up) {
1814 				bnx2_write_phy(bp, bp->mii_adv, adv &
1815 					       ~(ADVERTISE_1000XFULL |
1816 						 ADVERTISE_1000XHALF));
1817 				bnx2_write_phy(bp, bp->mii_bmcr, bmcr |
1818 					BMCR_ANRESTART | BMCR_ANENABLE);
1819 
1820 				bp->link_up = 0;
1821 				netif_carrier_off(bp->dev);
1822 				bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1823 				bnx2_report_link(bp);
1824 			}
1825 			bnx2_write_phy(bp, bp->mii_adv, adv);
1826 			bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1827 		} else {
1828 			bnx2_resolve_flow_ctrl(bp);
1829 			bnx2_set_mac_link(bp);
1830 		}
1831 		return 0;
1832 	}
1833 
1834 	bnx2_test_and_enable_2g5(bp);
1835 
1836 	if (bp->advertising & ADVERTISED_1000baseT_Full)
1837 		new_adv |= ADVERTISE_1000XFULL;
1838 
1839 	new_adv |= bnx2_phy_get_pause_adv(bp);
1840 
1841 	bnx2_read_phy(bp, bp->mii_adv, &adv);
1842 	bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1843 
1844 	bp->serdes_an_pending = 0;
1845 	if ((adv != new_adv) || ((bmcr & BMCR_ANENABLE) == 0)) {
1846 		/* Force a link down visible on the other side */
1847 		if (bp->link_up) {
1848 			bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
1849 			spin_unlock_bh(&bp->phy_lock);
1850 			msleep(20);
1851 			spin_lock_bh(&bp->phy_lock);
1852 		}
1853 
1854 		bnx2_write_phy(bp, bp->mii_adv, new_adv);
1855 		bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART |
1856 			BMCR_ANENABLE);
1857 		/* Speed up link-up time when the link partner
1858 		 * does not autonegotiate which is very common
1859 		 * in blade servers. Some blade servers use
1860 		 * IPMI for kerboard input and it's important
1861 		 * to minimize link disruptions. Autoneg. involves
1862 		 * exchanging base pages plus 3 next pages and
1863 		 * normally completes in about 120 msec.
1864 		 */
1865 		bp->current_interval = BNX2_SERDES_AN_TIMEOUT;
1866 		bp->serdes_an_pending = 1;
1867 		mod_timer(&bp->timer, jiffies + bp->current_interval);
1868 	} else {
1869 		bnx2_resolve_flow_ctrl(bp);
1870 		bnx2_set_mac_link(bp);
1871 	}
1872 
1873 	return 0;
1874 }
1875 
1876 #define ETHTOOL_ALL_FIBRE_SPEED						\
1877 	(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) ?			\
1878 		(ADVERTISED_2500baseX_Full | ADVERTISED_1000baseT_Full) :\
1879 		(ADVERTISED_1000baseT_Full)
1880 
1881 #define ETHTOOL_ALL_COPPER_SPEED					\
1882 	(ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |		\
1883 	ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |		\
1884 	ADVERTISED_1000baseT_Full)
1885 
1886 #define PHY_ALL_10_100_SPEED (ADVERTISE_10HALF | ADVERTISE_10FULL | \
1887 	ADVERTISE_100HALF | ADVERTISE_100FULL | ADVERTISE_CSMA)
1888 
1889 #define PHY_ALL_1000_SPEED (ADVERTISE_1000HALF | ADVERTISE_1000FULL)
1890 
1891 static void
1892 bnx2_set_default_remote_link(struct bnx2 *bp)
1893 {
1894 	u32 link;
1895 
1896 	if (bp->phy_port == PORT_TP)
1897 		link = bnx2_shmem_rd(bp, BNX2_RPHY_COPPER_LINK);
1898 	else
1899 		link = bnx2_shmem_rd(bp, BNX2_RPHY_SERDES_LINK);
1900 
1901 	if (link & BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG) {
1902 		bp->req_line_speed = 0;
1903 		bp->autoneg |= AUTONEG_SPEED;
1904 		bp->advertising = ADVERTISED_Autoneg;
1905 		if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1906 			bp->advertising |= ADVERTISED_10baseT_Half;
1907 		if (link & BNX2_NETLINK_SET_LINK_SPEED_10FULL)
1908 			bp->advertising |= ADVERTISED_10baseT_Full;
1909 		if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1910 			bp->advertising |= ADVERTISED_100baseT_Half;
1911 		if (link & BNX2_NETLINK_SET_LINK_SPEED_100FULL)
1912 			bp->advertising |= ADVERTISED_100baseT_Full;
1913 		if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1914 			bp->advertising |= ADVERTISED_1000baseT_Full;
1915 		if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1916 			bp->advertising |= ADVERTISED_2500baseX_Full;
1917 	} else {
1918 		bp->autoneg = 0;
1919 		bp->advertising = 0;
1920 		bp->req_duplex = DUPLEX_FULL;
1921 		if (link & BNX2_NETLINK_SET_LINK_SPEED_10) {
1922 			bp->req_line_speed = SPEED_10;
1923 			if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1924 				bp->req_duplex = DUPLEX_HALF;
1925 		}
1926 		if (link & BNX2_NETLINK_SET_LINK_SPEED_100) {
1927 			bp->req_line_speed = SPEED_100;
1928 			if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1929 				bp->req_duplex = DUPLEX_HALF;
1930 		}
1931 		if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1932 			bp->req_line_speed = SPEED_1000;
1933 		if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1934 			bp->req_line_speed = SPEED_2500;
1935 	}
1936 }
1937 
1938 static void
1939 bnx2_set_default_link(struct bnx2 *bp)
1940 {
1941 	if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
1942 		bnx2_set_default_remote_link(bp);
1943 		return;
1944 	}
1945 
1946 	bp->autoneg = AUTONEG_SPEED | AUTONEG_FLOW_CTRL;
1947 	bp->req_line_speed = 0;
1948 	if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1949 		u32 reg;
1950 
1951 		bp->advertising = ETHTOOL_ALL_FIBRE_SPEED | ADVERTISED_Autoneg;
1952 
1953 		reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_CONFIG);
1954 		reg &= BNX2_PORT_HW_CFG_CFG_DFLT_LINK_MASK;
1955 		if (reg == BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G) {
1956 			bp->autoneg = 0;
1957 			bp->req_line_speed = bp->line_speed = SPEED_1000;
1958 			bp->req_duplex = DUPLEX_FULL;
1959 		}
1960 	} else
1961 		bp->advertising = ETHTOOL_ALL_COPPER_SPEED | ADVERTISED_Autoneg;
1962 }
1963 
1964 static void
1965 bnx2_send_heart_beat(struct bnx2 *bp)
1966 {
1967 	u32 msg;
1968 	u32 addr;
1969 
1970 	spin_lock(&bp->indirect_lock);
1971 	msg = (u32) (++bp->fw_drv_pulse_wr_seq & BNX2_DRV_PULSE_SEQ_MASK);
1972 	addr = bp->shmem_base + BNX2_DRV_PULSE_MB;
1973 	BNX2_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, addr);
1974 	BNX2_WR(bp, BNX2_PCICFG_REG_WINDOW, msg);
1975 	spin_unlock(&bp->indirect_lock);
1976 }
1977 
1978 static void
1979 bnx2_remote_phy_event(struct bnx2 *bp)
1980 {
1981 	u32 msg;
1982 	u8 link_up = bp->link_up;
1983 	u8 old_port;
1984 
1985 	msg = bnx2_shmem_rd(bp, BNX2_LINK_STATUS);
1986 
1987 	if (msg & BNX2_LINK_STATUS_HEART_BEAT_EXPIRED)
1988 		bnx2_send_heart_beat(bp);
1989 
1990 	msg &= ~BNX2_LINK_STATUS_HEART_BEAT_EXPIRED;
1991 
1992 	if ((msg & BNX2_LINK_STATUS_LINK_UP) == BNX2_LINK_STATUS_LINK_DOWN)
1993 		bp->link_up = 0;
1994 	else {
1995 		u32 speed;
1996 
1997 		bp->link_up = 1;
1998 		speed = msg & BNX2_LINK_STATUS_SPEED_MASK;
1999 		bp->duplex = DUPLEX_FULL;
2000 		switch (speed) {
2001 			case BNX2_LINK_STATUS_10HALF:
2002 				bp->duplex = DUPLEX_HALF;
2003 				/* fall through */
2004 			case BNX2_LINK_STATUS_10FULL:
2005 				bp->line_speed = SPEED_10;
2006 				break;
2007 			case BNX2_LINK_STATUS_100HALF:
2008 				bp->duplex = DUPLEX_HALF;
2009 				/* fall through */
2010 			case BNX2_LINK_STATUS_100BASE_T4:
2011 			case BNX2_LINK_STATUS_100FULL:
2012 				bp->line_speed = SPEED_100;
2013 				break;
2014 			case BNX2_LINK_STATUS_1000HALF:
2015 				bp->duplex = DUPLEX_HALF;
2016 				/* fall through */
2017 			case BNX2_LINK_STATUS_1000FULL:
2018 				bp->line_speed = SPEED_1000;
2019 				break;
2020 			case BNX2_LINK_STATUS_2500HALF:
2021 				bp->duplex = DUPLEX_HALF;
2022 				/* fall through */
2023 			case BNX2_LINK_STATUS_2500FULL:
2024 				bp->line_speed = SPEED_2500;
2025 				break;
2026 			default:
2027 				bp->line_speed = 0;
2028 				break;
2029 		}
2030 
2031 		bp->flow_ctrl = 0;
2032 		if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
2033 		    (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
2034 			if (bp->duplex == DUPLEX_FULL)
2035 				bp->flow_ctrl = bp->req_flow_ctrl;
2036 		} else {
2037 			if (msg & BNX2_LINK_STATUS_TX_FC_ENABLED)
2038 				bp->flow_ctrl |= FLOW_CTRL_TX;
2039 			if (msg & BNX2_LINK_STATUS_RX_FC_ENABLED)
2040 				bp->flow_ctrl |= FLOW_CTRL_RX;
2041 		}
2042 
2043 		old_port = bp->phy_port;
2044 		if (msg & BNX2_LINK_STATUS_SERDES_LINK)
2045 			bp->phy_port = PORT_FIBRE;
2046 		else
2047 			bp->phy_port = PORT_TP;
2048 
2049 		if (old_port != bp->phy_port)
2050 			bnx2_set_default_link(bp);
2051 
2052 	}
2053 	if (bp->link_up != link_up)
2054 		bnx2_report_link(bp);
2055 
2056 	bnx2_set_mac_link(bp);
2057 }
2058 
2059 static int
2060 bnx2_set_remote_link(struct bnx2 *bp)
2061 {
2062 	u32 evt_code;
2063 
2064 	evt_code = bnx2_shmem_rd(bp, BNX2_FW_EVT_CODE_MB);
2065 	switch (evt_code) {
2066 		case BNX2_FW_EVT_CODE_LINK_EVENT:
2067 			bnx2_remote_phy_event(bp);
2068 			break;
2069 		case BNX2_FW_EVT_CODE_SW_TIMER_EXPIRATION_EVENT:
2070 		default:
2071 			bnx2_send_heart_beat(bp);
2072 			break;
2073 	}
2074 	return 0;
2075 }
2076 
2077 static int
2078 bnx2_setup_copper_phy(struct bnx2 *bp)
2079 __releases(&bp->phy_lock)
2080 __acquires(&bp->phy_lock)
2081 {
2082 	u32 bmcr, adv_reg, new_adv = 0;
2083 	u32 new_bmcr;
2084 
2085 	bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
2086 
2087 	bnx2_read_phy(bp, bp->mii_adv, &adv_reg);
2088 	adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP |
2089 		    ADVERTISE_PAUSE_ASYM);
2090 
2091 	new_adv = ADVERTISE_CSMA | ethtool_adv_to_mii_adv_t(bp->advertising);
2092 
2093 	if (bp->autoneg & AUTONEG_SPEED) {
2094 		u32 adv1000_reg;
2095 		u32 new_adv1000 = 0;
2096 
2097 		new_adv |= bnx2_phy_get_pause_adv(bp);
2098 
2099 		bnx2_read_phy(bp, MII_CTRL1000, &adv1000_reg);
2100 		adv1000_reg &= PHY_ALL_1000_SPEED;
2101 
2102 		new_adv1000 |= ethtool_adv_to_mii_ctrl1000_t(bp->advertising);
2103 		if ((adv1000_reg != new_adv1000) ||
2104 			(adv_reg != new_adv) ||
2105 			((bmcr & BMCR_ANENABLE) == 0)) {
2106 
2107 			bnx2_write_phy(bp, bp->mii_adv, new_adv);
2108 			bnx2_write_phy(bp, MII_CTRL1000, new_adv1000);
2109 			bnx2_write_phy(bp, bp->mii_bmcr, BMCR_ANRESTART |
2110 				BMCR_ANENABLE);
2111 		}
2112 		else if (bp->link_up) {
2113 			/* Flow ctrl may have changed from auto to forced */
2114 			/* or vice-versa. */
2115 
2116 			bnx2_resolve_flow_ctrl(bp);
2117 			bnx2_set_mac_link(bp);
2118 		}
2119 		return 0;
2120 	}
2121 
2122 	/* advertise nothing when forcing speed */
2123 	if (adv_reg != new_adv)
2124 		bnx2_write_phy(bp, bp->mii_adv, new_adv);
2125 
2126 	new_bmcr = 0;
2127 	if (bp->req_line_speed == SPEED_100) {
2128 		new_bmcr |= BMCR_SPEED100;
2129 	}
2130 	if (bp->req_duplex == DUPLEX_FULL) {
2131 		new_bmcr |= BMCR_FULLDPLX;
2132 	}
2133 	if (new_bmcr != bmcr) {
2134 		u32 bmsr;
2135 
2136 		bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2137 		bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2138 
2139 		if (bmsr & BMSR_LSTATUS) {
2140 			/* Force link down */
2141 			bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
2142 			spin_unlock_bh(&bp->phy_lock);
2143 			msleep(50);
2144 			spin_lock_bh(&bp->phy_lock);
2145 
2146 			bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2147 			bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2148 		}
2149 
2150 		bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
2151 
2152 		/* Normally, the new speed is setup after the link has
2153 		 * gone down and up again. In some cases, link will not go
2154 		 * down so we need to set up the new speed here.
2155 		 */
2156 		if (bmsr & BMSR_LSTATUS) {
2157 			bp->line_speed = bp->req_line_speed;
2158 			bp->duplex = bp->req_duplex;
2159 			bnx2_resolve_flow_ctrl(bp);
2160 			bnx2_set_mac_link(bp);
2161 		}
2162 	} else {
2163 		bnx2_resolve_flow_ctrl(bp);
2164 		bnx2_set_mac_link(bp);
2165 	}
2166 	return 0;
2167 }
2168 
2169 static int
2170 bnx2_setup_phy(struct bnx2 *bp, u8 port)
2171 __releases(&bp->phy_lock)
2172 __acquires(&bp->phy_lock)
2173 {
2174 	if (bp->loopback == MAC_LOOPBACK)
2175 		return 0;
2176 
2177 	if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
2178 		return bnx2_setup_serdes_phy(bp, port);
2179 	}
2180 	else {
2181 		return bnx2_setup_copper_phy(bp);
2182 	}
2183 }
2184 
2185 static int
2186 bnx2_init_5709s_phy(struct bnx2 *bp, int reset_phy)
2187 {
2188 	u32 val;
2189 
2190 	bp->mii_bmcr = MII_BMCR + 0x10;
2191 	bp->mii_bmsr = MII_BMSR + 0x10;
2192 	bp->mii_bmsr1 = MII_BNX2_GP_TOP_AN_STATUS1;
2193 	bp->mii_adv = MII_ADVERTISE + 0x10;
2194 	bp->mii_lpa = MII_LPA + 0x10;
2195 	bp->mii_up1 = MII_BNX2_OVER1G_UP1;
2196 
2197 	bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_AER);
2198 	bnx2_write_phy(bp, MII_BNX2_AER_AER, MII_BNX2_AER_AER_AN_MMD);
2199 
2200 	bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
2201 	if (reset_phy)
2202 		bnx2_reset_phy(bp);
2203 
2204 	bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_SERDES_DIG);
2205 
2206 	bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, &val);
2207 	val &= ~MII_BNX2_SD_1000XCTL1_AUTODET;
2208 	val |= MII_BNX2_SD_1000XCTL1_FIBER;
2209 	bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, val);
2210 
2211 	bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
2212 	bnx2_read_phy(bp, MII_BNX2_OVER1G_UP1, &val);
2213 	if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)
2214 		val |= BCM5708S_UP1_2G5;
2215 	else
2216 		val &= ~BCM5708S_UP1_2G5;
2217 	bnx2_write_phy(bp, MII_BNX2_OVER1G_UP1, val);
2218 
2219 	bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_BAM_NXTPG);
2220 	bnx2_read_phy(bp, MII_BNX2_BAM_NXTPG_CTL, &val);
2221 	val |= MII_BNX2_NXTPG_CTL_T2 | MII_BNX2_NXTPG_CTL_BAM;
2222 	bnx2_write_phy(bp, MII_BNX2_BAM_NXTPG_CTL, val);
2223 
2224 	bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_CL73_USERB0);
2225 
2226 	val = MII_BNX2_CL73_BAM_EN | MII_BNX2_CL73_BAM_STA_MGR_EN |
2227 	      MII_BNX2_CL73_BAM_NP_AFT_BP_EN;
2228 	bnx2_write_phy(bp, MII_BNX2_CL73_BAM_CTL1, val);
2229 
2230 	bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
2231 
2232 	return 0;
2233 }
2234 
2235 static int
2236 bnx2_init_5708s_phy(struct bnx2 *bp, int reset_phy)
2237 {
2238 	u32 val;
2239 
2240 	if (reset_phy)
2241 		bnx2_reset_phy(bp);
2242 
2243 	bp->mii_up1 = BCM5708S_UP1;
2244 
2245 	bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG3);
2246 	bnx2_write_phy(bp, BCM5708S_DIG_3_0, BCM5708S_DIG_3_0_USE_IEEE);
2247 	bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
2248 
2249 	bnx2_read_phy(bp, BCM5708S_1000X_CTL1, &val);
2250 	val |= BCM5708S_1000X_CTL1_FIBER_MODE | BCM5708S_1000X_CTL1_AUTODET_EN;
2251 	bnx2_write_phy(bp, BCM5708S_1000X_CTL1, val);
2252 
2253 	bnx2_read_phy(bp, BCM5708S_1000X_CTL2, &val);
2254 	val |= BCM5708S_1000X_CTL2_PLLEL_DET_EN;
2255 	bnx2_write_phy(bp, BCM5708S_1000X_CTL2, val);
2256 
2257 	if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) {
2258 		bnx2_read_phy(bp, BCM5708S_UP1, &val);
2259 		val |= BCM5708S_UP1_2G5;
2260 		bnx2_write_phy(bp, BCM5708S_UP1, val);
2261 	}
2262 
2263 	if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_A0) ||
2264 	    (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_B0) ||
2265 	    (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_B1)) {
2266 		/* increase tx signal amplitude */
2267 		bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2268 			       BCM5708S_BLK_ADDR_TX_MISC);
2269 		bnx2_read_phy(bp, BCM5708S_TX_ACTL1, &val);
2270 		val &= ~BCM5708S_TX_ACTL1_DRIVER_VCM;
2271 		bnx2_write_phy(bp, BCM5708S_TX_ACTL1, val);
2272 		bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
2273 	}
2274 
2275 	val = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_CONFIG) &
2276 	      BNX2_PORT_HW_CFG_CFG_TXCTL3_MASK;
2277 
2278 	if (val) {
2279 		u32 is_backplane;
2280 
2281 		is_backplane = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG);
2282 		if (is_backplane & BNX2_SHARED_HW_CFG_PHY_BACKPLANE) {
2283 			bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2284 				       BCM5708S_BLK_ADDR_TX_MISC);
2285 			bnx2_write_phy(bp, BCM5708S_TX_ACTL3, val);
2286 			bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2287 				       BCM5708S_BLK_ADDR_DIG);
2288 		}
2289 	}
2290 	return 0;
2291 }
2292 
2293 static int
2294 bnx2_init_5706s_phy(struct bnx2 *bp, int reset_phy)
2295 {
2296 	if (reset_phy)
2297 		bnx2_reset_phy(bp);
2298 
2299 	bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
2300 
2301 	if (BNX2_CHIP(bp) == BNX2_CHIP_5706)
2302 		BNX2_WR(bp, BNX2_MISC_GP_HW_CTL0, 0x300);
2303 
2304 	if (bp->dev->mtu > ETH_DATA_LEN) {
2305 		u32 val;
2306 
2307 		/* Set extended packet length bit */
2308 		bnx2_write_phy(bp, 0x18, 0x7);
2309 		bnx2_read_phy(bp, 0x18, &val);
2310 		bnx2_write_phy(bp, 0x18, (val & 0xfff8) | 0x4000);
2311 
2312 		bnx2_write_phy(bp, 0x1c, 0x6c00);
2313 		bnx2_read_phy(bp, 0x1c, &val);
2314 		bnx2_write_phy(bp, 0x1c, (val & 0x3ff) | 0xec02);
2315 	}
2316 	else {
2317 		u32 val;
2318 
2319 		bnx2_write_phy(bp, 0x18, 0x7);
2320 		bnx2_read_phy(bp, 0x18, &val);
2321 		bnx2_write_phy(bp, 0x18, val & ~0x4007);
2322 
2323 		bnx2_write_phy(bp, 0x1c, 0x6c00);
2324 		bnx2_read_phy(bp, 0x1c, &val);
2325 		bnx2_write_phy(bp, 0x1c, (val & 0x3fd) | 0xec00);
2326 	}
2327 
2328 	return 0;
2329 }
2330 
2331 static int
2332 bnx2_init_copper_phy(struct bnx2 *bp, int reset_phy)
2333 {
2334 	u32 val;
2335 
2336 	if (reset_phy)
2337 		bnx2_reset_phy(bp);
2338 
2339 	if (bp->phy_flags & BNX2_PHY_FLAG_CRC_FIX) {
2340 		bnx2_write_phy(bp, 0x18, 0x0c00);
2341 		bnx2_write_phy(bp, 0x17, 0x000a);
2342 		bnx2_write_phy(bp, 0x15, 0x310b);
2343 		bnx2_write_phy(bp, 0x17, 0x201f);
2344 		bnx2_write_phy(bp, 0x15, 0x9506);
2345 		bnx2_write_phy(bp, 0x17, 0x401f);
2346 		bnx2_write_phy(bp, 0x15, 0x14e2);
2347 		bnx2_write_phy(bp, 0x18, 0x0400);
2348 	}
2349 
2350 	if (bp->phy_flags & BNX2_PHY_FLAG_DIS_EARLY_DAC) {
2351 		bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS,
2352 			       MII_BNX2_DSP_EXPAND_REG | 0x8);
2353 		bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
2354 		val &= ~(1 << 8);
2355 		bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val);
2356 	}
2357 
2358 	if (bp->dev->mtu > ETH_DATA_LEN) {
2359 		/* Set extended packet length bit */
2360 		bnx2_write_phy(bp, 0x18, 0x7);
2361 		bnx2_read_phy(bp, 0x18, &val);
2362 		bnx2_write_phy(bp, 0x18, val | 0x4000);
2363 
2364 		bnx2_read_phy(bp, 0x10, &val);
2365 		bnx2_write_phy(bp, 0x10, val | 0x1);
2366 	}
2367 	else {
2368 		bnx2_write_phy(bp, 0x18, 0x7);
2369 		bnx2_read_phy(bp, 0x18, &val);
2370 		bnx2_write_phy(bp, 0x18, val & ~0x4007);
2371 
2372 		bnx2_read_phy(bp, 0x10, &val);
2373 		bnx2_write_phy(bp, 0x10, val & ~0x1);
2374 	}
2375 
2376 	/* ethernet@wirespeed */
2377 	bnx2_write_phy(bp, MII_BNX2_AUX_CTL, AUX_CTL_MISC_CTL);
2378 	bnx2_read_phy(bp, MII_BNX2_AUX_CTL, &val);
2379 	val |=  AUX_CTL_MISC_CTL_WR | AUX_CTL_MISC_CTL_WIRESPEED;
2380 
2381 	/* auto-mdix */
2382 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
2383 		val |=  AUX_CTL_MISC_CTL_AUTOMDIX;
2384 
2385 	bnx2_write_phy(bp, MII_BNX2_AUX_CTL, val);
2386 	return 0;
2387 }
2388 
2389 
2390 static int
2391 bnx2_init_phy(struct bnx2 *bp, int reset_phy)
2392 __releases(&bp->phy_lock)
2393 __acquires(&bp->phy_lock)
2394 {
2395 	u32 val;
2396 	int rc = 0;
2397 
2398 	bp->phy_flags &= ~BNX2_PHY_FLAG_INT_MODE_MASK;
2399 	bp->phy_flags |= BNX2_PHY_FLAG_INT_MODE_LINK_READY;
2400 
2401 	bp->mii_bmcr = MII_BMCR;
2402 	bp->mii_bmsr = MII_BMSR;
2403 	bp->mii_bmsr1 = MII_BMSR;
2404 	bp->mii_adv = MII_ADVERTISE;
2405 	bp->mii_lpa = MII_LPA;
2406 
2407 	BNX2_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
2408 
2409 	if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
2410 		goto setup_phy;
2411 
2412 	bnx2_read_phy(bp, MII_PHYSID1, &val);
2413 	bp->phy_id = val << 16;
2414 	bnx2_read_phy(bp, MII_PHYSID2, &val);
2415 	bp->phy_id |= val & 0xffff;
2416 
2417 	if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
2418 		if (BNX2_CHIP(bp) == BNX2_CHIP_5706)
2419 			rc = bnx2_init_5706s_phy(bp, reset_phy);
2420 		else if (BNX2_CHIP(bp) == BNX2_CHIP_5708)
2421 			rc = bnx2_init_5708s_phy(bp, reset_phy);
2422 		else if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
2423 			rc = bnx2_init_5709s_phy(bp, reset_phy);
2424 	}
2425 	else {
2426 		rc = bnx2_init_copper_phy(bp, reset_phy);
2427 	}
2428 
2429 setup_phy:
2430 	if (!rc)
2431 		rc = bnx2_setup_phy(bp, bp->phy_port);
2432 
2433 	return rc;
2434 }
2435 
2436 static int
2437 bnx2_set_mac_loopback(struct bnx2 *bp)
2438 {
2439 	u32 mac_mode;
2440 
2441 	mac_mode = BNX2_RD(bp, BNX2_EMAC_MODE);
2442 	mac_mode &= ~BNX2_EMAC_MODE_PORT;
2443 	mac_mode |= BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK;
2444 	BNX2_WR(bp, BNX2_EMAC_MODE, mac_mode);
2445 	bp->link_up = 1;
2446 	return 0;
2447 }
2448 
2449 static int bnx2_test_link(struct bnx2 *);
2450 
2451 static int
2452 bnx2_set_phy_loopback(struct bnx2 *bp)
2453 {
2454 	u32 mac_mode;
2455 	int rc, i;
2456 
2457 	spin_lock_bh(&bp->phy_lock);
2458 	rc = bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK | BMCR_FULLDPLX |
2459 			    BMCR_SPEED1000);
2460 	spin_unlock_bh(&bp->phy_lock);
2461 	if (rc)
2462 		return rc;
2463 
2464 	for (i = 0; i < 10; i++) {
2465 		if (bnx2_test_link(bp) == 0)
2466 			break;
2467 		msleep(100);
2468 	}
2469 
2470 	mac_mode = BNX2_RD(bp, BNX2_EMAC_MODE);
2471 	mac_mode &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
2472 		      BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
2473 		      BNX2_EMAC_MODE_25G_MODE);
2474 
2475 	mac_mode |= BNX2_EMAC_MODE_PORT_GMII;
2476 	BNX2_WR(bp, BNX2_EMAC_MODE, mac_mode);
2477 	bp->link_up = 1;
2478 	return 0;
2479 }
2480 
2481 static void
2482 bnx2_dump_mcp_state(struct bnx2 *bp)
2483 {
2484 	struct net_device *dev = bp->dev;
2485 	u32 mcp_p0, mcp_p1;
2486 
2487 	netdev_err(dev, "<--- start MCP states dump --->\n");
2488 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
2489 		mcp_p0 = BNX2_MCP_STATE_P0;
2490 		mcp_p1 = BNX2_MCP_STATE_P1;
2491 	} else {
2492 		mcp_p0 = BNX2_MCP_STATE_P0_5708;
2493 		mcp_p1 = BNX2_MCP_STATE_P1_5708;
2494 	}
2495 	netdev_err(dev, "DEBUG: MCP_STATE_P0[%08x] MCP_STATE_P1[%08x]\n",
2496 		   bnx2_reg_rd_ind(bp, mcp_p0), bnx2_reg_rd_ind(bp, mcp_p1));
2497 	netdev_err(dev, "DEBUG: MCP mode[%08x] state[%08x] evt_mask[%08x]\n",
2498 		   bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_MODE),
2499 		   bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_STATE),
2500 		   bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_EVENT_MASK));
2501 	netdev_err(dev, "DEBUG: pc[%08x] pc[%08x] instr[%08x]\n",
2502 		   bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_PROGRAM_COUNTER),
2503 		   bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_PROGRAM_COUNTER),
2504 		   bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_INSTRUCTION));
2505 	netdev_err(dev, "DEBUG: shmem states:\n");
2506 	netdev_err(dev, "DEBUG: drv_mb[%08x] fw_mb[%08x] link_status[%08x]",
2507 		   bnx2_shmem_rd(bp, BNX2_DRV_MB),
2508 		   bnx2_shmem_rd(bp, BNX2_FW_MB),
2509 		   bnx2_shmem_rd(bp, BNX2_LINK_STATUS));
2510 	pr_cont(" drv_pulse_mb[%08x]\n", bnx2_shmem_rd(bp, BNX2_DRV_PULSE_MB));
2511 	netdev_err(dev, "DEBUG: dev_info_signature[%08x] reset_type[%08x]",
2512 		   bnx2_shmem_rd(bp, BNX2_DEV_INFO_SIGNATURE),
2513 		   bnx2_shmem_rd(bp, BNX2_BC_STATE_RESET_TYPE));
2514 	pr_cont(" condition[%08x]\n",
2515 		bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION));
2516 	DP_SHMEM_LINE(bp, BNX2_BC_RESET_TYPE);
2517 	DP_SHMEM_LINE(bp, 0x3cc);
2518 	DP_SHMEM_LINE(bp, 0x3dc);
2519 	DP_SHMEM_LINE(bp, 0x3ec);
2520 	netdev_err(dev, "DEBUG: 0x3fc[%08x]\n", bnx2_shmem_rd(bp, 0x3fc));
2521 	netdev_err(dev, "<--- end MCP states dump --->\n");
2522 }
2523 
2524 static int
2525 bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int ack, int silent)
2526 {
2527 	int i;
2528 	u32 val;
2529 
2530 	bp->fw_wr_seq++;
2531 	msg_data |= bp->fw_wr_seq;
2532 	bp->fw_last_msg = msg_data;
2533 
2534 	bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data);
2535 
2536 	if (!ack)
2537 		return 0;
2538 
2539 	/* wait for an acknowledgement. */
2540 	for (i = 0; i < (BNX2_FW_ACK_TIME_OUT_MS / 10); i++) {
2541 		msleep(10);
2542 
2543 		val = bnx2_shmem_rd(bp, BNX2_FW_MB);
2544 
2545 		if ((val & BNX2_FW_MSG_ACK) == (msg_data & BNX2_DRV_MSG_SEQ))
2546 			break;
2547 	}
2548 	if ((msg_data & BNX2_DRV_MSG_DATA) == BNX2_DRV_MSG_DATA_WAIT0)
2549 		return 0;
2550 
2551 	/* If we timed out, inform the firmware that this is the case. */
2552 	if ((val & BNX2_FW_MSG_ACK) != (msg_data & BNX2_DRV_MSG_SEQ)) {
2553 		msg_data &= ~BNX2_DRV_MSG_CODE;
2554 		msg_data |= BNX2_DRV_MSG_CODE_FW_TIMEOUT;
2555 
2556 		bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data);
2557 		if (!silent) {
2558 			pr_err("fw sync timeout, reset code = %x\n", msg_data);
2559 			bnx2_dump_mcp_state(bp);
2560 		}
2561 
2562 		return -EBUSY;
2563 	}
2564 
2565 	if ((val & BNX2_FW_MSG_STATUS_MASK) != BNX2_FW_MSG_STATUS_OK)
2566 		return -EIO;
2567 
2568 	return 0;
2569 }
2570 
2571 static int
2572 bnx2_init_5709_context(struct bnx2 *bp)
2573 {
2574 	int i, ret = 0;
2575 	u32 val;
2576 
2577 	val = BNX2_CTX_COMMAND_ENABLED | BNX2_CTX_COMMAND_MEM_INIT | (1 << 12);
2578 	val |= (BNX2_PAGE_BITS - 8) << 16;
2579 	BNX2_WR(bp, BNX2_CTX_COMMAND, val);
2580 	for (i = 0; i < 10; i++) {
2581 		val = BNX2_RD(bp, BNX2_CTX_COMMAND);
2582 		if (!(val & BNX2_CTX_COMMAND_MEM_INIT))
2583 			break;
2584 		udelay(2);
2585 	}
2586 	if (val & BNX2_CTX_COMMAND_MEM_INIT)
2587 		return -EBUSY;
2588 
2589 	for (i = 0; i < bp->ctx_pages; i++) {
2590 		int j;
2591 
2592 		if (bp->ctx_blk[i])
2593 			memset(bp->ctx_blk[i], 0, BNX2_PAGE_SIZE);
2594 		else
2595 			return -ENOMEM;
2596 
2597 		BNX2_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA0,
2598 			(bp->ctx_blk_mapping[i] & 0xffffffff) |
2599 			BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID);
2600 		BNX2_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA1,
2601 			(u64) bp->ctx_blk_mapping[i] >> 32);
2602 		BNX2_WR(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL, i |
2603 			BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
2604 		for (j = 0; j < 10; j++) {
2605 
2606 			val = BNX2_RD(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL);
2607 			if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ))
2608 				break;
2609 			udelay(5);
2610 		}
2611 		if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) {
2612 			ret = -EBUSY;
2613 			break;
2614 		}
2615 	}
2616 	return ret;
2617 }
2618 
2619 static void
2620 bnx2_init_context(struct bnx2 *bp)
2621 {
2622 	u32 vcid;
2623 
2624 	vcid = 96;
2625 	while (vcid) {
2626 		u32 vcid_addr, pcid_addr, offset;
2627 		int i;
2628 
2629 		vcid--;
2630 
2631 		if (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) {
2632 			u32 new_vcid;
2633 
2634 			vcid_addr = GET_PCID_ADDR(vcid);
2635 			if (vcid & 0x8) {
2636 				new_vcid = 0x60 + (vcid & 0xf0) + (vcid & 0x7);
2637 			}
2638 			else {
2639 				new_vcid = vcid;
2640 			}
2641 			pcid_addr = GET_PCID_ADDR(new_vcid);
2642 		}
2643 		else {
2644 	    		vcid_addr = GET_CID_ADDR(vcid);
2645 			pcid_addr = vcid_addr;
2646 		}
2647 
2648 		for (i = 0; i < (CTX_SIZE / PHY_CTX_SIZE); i++) {
2649 			vcid_addr += (i << PHY_CTX_SHIFT);
2650 			pcid_addr += (i << PHY_CTX_SHIFT);
2651 
2652 			BNX2_WR(bp, BNX2_CTX_VIRT_ADDR, vcid_addr);
2653 			BNX2_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
2654 
2655 			/* Zero out the context. */
2656 			for (offset = 0; offset < PHY_CTX_SIZE; offset += 4)
2657 				bnx2_ctx_wr(bp, vcid_addr, offset, 0);
2658 		}
2659 	}
2660 }
2661 
2662 static int
2663 bnx2_alloc_bad_rbuf(struct bnx2 *bp)
2664 {
2665 	u16 *good_mbuf;
2666 	u32 good_mbuf_cnt;
2667 	u32 val;
2668 
2669 	good_mbuf = kmalloc(512 * sizeof(u16), GFP_KERNEL);
2670 	if (good_mbuf == NULL)
2671 		return -ENOMEM;
2672 
2673 	BNX2_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
2674 		BNX2_MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE);
2675 
2676 	good_mbuf_cnt = 0;
2677 
2678 	/* Allocate a bunch of mbufs and save the good ones in an array. */
2679 	val = bnx2_reg_rd_ind(bp, BNX2_RBUF_STATUS1);
2680 	while (val & BNX2_RBUF_STATUS1_FREE_COUNT) {
2681 		bnx2_reg_wr_ind(bp, BNX2_RBUF_COMMAND,
2682 				BNX2_RBUF_COMMAND_ALLOC_REQ);
2683 
2684 		val = bnx2_reg_rd_ind(bp, BNX2_RBUF_FW_BUF_ALLOC);
2685 
2686 		val &= BNX2_RBUF_FW_BUF_ALLOC_VALUE;
2687 
2688 		/* The addresses with Bit 9 set are bad memory blocks. */
2689 		if (!(val & (1 << 9))) {
2690 			good_mbuf[good_mbuf_cnt] = (u16) val;
2691 			good_mbuf_cnt++;
2692 		}
2693 
2694 		val = bnx2_reg_rd_ind(bp, BNX2_RBUF_STATUS1);
2695 	}
2696 
2697 	/* Free the good ones back to the mbuf pool thus discarding
2698 	 * all the bad ones. */
2699 	while (good_mbuf_cnt) {
2700 		good_mbuf_cnt--;
2701 
2702 		val = good_mbuf[good_mbuf_cnt];
2703 		val = (val << 9) | val | 1;
2704 
2705 		bnx2_reg_wr_ind(bp, BNX2_RBUF_FW_BUF_FREE, val);
2706 	}
2707 	kfree(good_mbuf);
2708 	return 0;
2709 }
2710 
2711 static void
2712 bnx2_set_mac_addr(struct bnx2 *bp, u8 *mac_addr, u32 pos)
2713 {
2714 	u32 val;
2715 
2716 	val = (mac_addr[0] << 8) | mac_addr[1];
2717 
2718 	BNX2_WR(bp, BNX2_EMAC_MAC_MATCH0 + (pos * 8), val);
2719 
2720 	val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
2721 		(mac_addr[4] << 8) | mac_addr[5];
2722 
2723 	BNX2_WR(bp, BNX2_EMAC_MAC_MATCH1 + (pos * 8), val);
2724 }
2725 
2726 static inline int
2727 bnx2_alloc_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index, gfp_t gfp)
2728 {
2729 	dma_addr_t mapping;
2730 	struct bnx2_sw_pg *rx_pg = &rxr->rx_pg_ring[index];
2731 	struct bnx2_rx_bd *rxbd =
2732 		&rxr->rx_pg_desc_ring[BNX2_RX_RING(index)][BNX2_RX_IDX(index)];
2733 	struct page *page = alloc_page(gfp);
2734 
2735 	if (!page)
2736 		return -ENOMEM;
2737 	mapping = dma_map_page(&bp->pdev->dev, page, 0, PAGE_SIZE,
2738 			       PCI_DMA_FROMDEVICE);
2739 	if (dma_mapping_error(&bp->pdev->dev, mapping)) {
2740 		__free_page(page);
2741 		return -EIO;
2742 	}
2743 
2744 	rx_pg->page = page;
2745 	dma_unmap_addr_set(rx_pg, mapping, mapping);
2746 	rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2747 	rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2748 	return 0;
2749 }
2750 
2751 static void
2752 bnx2_free_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
2753 {
2754 	struct bnx2_sw_pg *rx_pg = &rxr->rx_pg_ring[index];
2755 	struct page *page = rx_pg->page;
2756 
2757 	if (!page)
2758 		return;
2759 
2760 	dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(rx_pg, mapping),
2761 		       PAGE_SIZE, PCI_DMA_FROMDEVICE);
2762 
2763 	__free_page(page);
2764 	rx_pg->page = NULL;
2765 }
2766 
2767 static inline int
2768 bnx2_alloc_rx_data(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index, gfp_t gfp)
2769 {
2770 	u8 *data;
2771 	struct bnx2_sw_bd *rx_buf = &rxr->rx_buf_ring[index];
2772 	dma_addr_t mapping;
2773 	struct bnx2_rx_bd *rxbd =
2774 		&rxr->rx_desc_ring[BNX2_RX_RING(index)][BNX2_RX_IDX(index)];
2775 
2776 	data = kmalloc(bp->rx_buf_size, gfp);
2777 	if (!data)
2778 		return -ENOMEM;
2779 
2780 	mapping = dma_map_single(&bp->pdev->dev,
2781 				 get_l2_fhdr(data),
2782 				 bp->rx_buf_use_size,
2783 				 PCI_DMA_FROMDEVICE);
2784 	if (dma_mapping_error(&bp->pdev->dev, mapping)) {
2785 		kfree(data);
2786 		return -EIO;
2787 	}
2788 
2789 	rx_buf->data = data;
2790 	dma_unmap_addr_set(rx_buf, mapping, mapping);
2791 
2792 	rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2793 	rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2794 
2795 	rxr->rx_prod_bseq += bp->rx_buf_use_size;
2796 
2797 	return 0;
2798 }
2799 
2800 static int
2801 bnx2_phy_event_is_set(struct bnx2 *bp, struct bnx2_napi *bnapi, u32 event)
2802 {
2803 	struct status_block *sblk = bnapi->status_blk.msi;
2804 	u32 new_link_state, old_link_state;
2805 	int is_set = 1;
2806 
2807 	new_link_state = sblk->status_attn_bits & event;
2808 	old_link_state = sblk->status_attn_bits_ack & event;
2809 	if (new_link_state != old_link_state) {
2810 		if (new_link_state)
2811 			BNX2_WR(bp, BNX2_PCICFG_STATUS_BIT_SET_CMD, event);
2812 		else
2813 			BNX2_WR(bp, BNX2_PCICFG_STATUS_BIT_CLEAR_CMD, event);
2814 	} else
2815 		is_set = 0;
2816 
2817 	return is_set;
2818 }
2819 
2820 static void
2821 bnx2_phy_int(struct bnx2 *bp, struct bnx2_napi *bnapi)
2822 {
2823 	spin_lock(&bp->phy_lock);
2824 
2825 	if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_LINK_STATE))
2826 		bnx2_set_link(bp);
2827 	if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_TIMER_ABORT))
2828 		bnx2_set_remote_link(bp);
2829 
2830 	spin_unlock(&bp->phy_lock);
2831 
2832 }
2833 
2834 static inline u16
2835 bnx2_get_hw_tx_cons(struct bnx2_napi *bnapi)
2836 {
2837 	u16 cons;
2838 
2839 	cons = READ_ONCE(*bnapi->hw_tx_cons_ptr);
2840 
2841 	if (unlikely((cons & BNX2_MAX_TX_DESC_CNT) == BNX2_MAX_TX_DESC_CNT))
2842 		cons++;
2843 	return cons;
2844 }
2845 
2846 static int
2847 bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
2848 {
2849 	struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
2850 	u16 hw_cons, sw_cons, sw_ring_cons;
2851 	int tx_pkt = 0, index;
2852 	unsigned int tx_bytes = 0;
2853 	struct netdev_queue *txq;
2854 
2855 	index = (bnapi - bp->bnx2_napi);
2856 	txq = netdev_get_tx_queue(bp->dev, index);
2857 
2858 	hw_cons = bnx2_get_hw_tx_cons(bnapi);
2859 	sw_cons = txr->tx_cons;
2860 
2861 	while (sw_cons != hw_cons) {
2862 		struct bnx2_sw_tx_bd *tx_buf;
2863 		struct sk_buff *skb;
2864 		int i, last;
2865 
2866 		sw_ring_cons = BNX2_TX_RING_IDX(sw_cons);
2867 
2868 		tx_buf = &txr->tx_buf_ring[sw_ring_cons];
2869 		skb = tx_buf->skb;
2870 
2871 		/* prefetch skb_end_pointer() to speedup skb_shinfo(skb) */
2872 		prefetch(&skb->end);
2873 
2874 		/* partial BD completions possible with TSO packets */
2875 		if (tx_buf->is_gso) {
2876 			u16 last_idx, last_ring_idx;
2877 
2878 			last_idx = sw_cons + tx_buf->nr_frags + 1;
2879 			last_ring_idx = sw_ring_cons + tx_buf->nr_frags + 1;
2880 			if (unlikely(last_ring_idx >= BNX2_MAX_TX_DESC_CNT)) {
2881 				last_idx++;
2882 			}
2883 			if (((s16) ((s16) last_idx - (s16) hw_cons)) > 0) {
2884 				break;
2885 			}
2886 		}
2887 
2888 		dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(tx_buf, mapping),
2889 			skb_headlen(skb), PCI_DMA_TODEVICE);
2890 
2891 		tx_buf->skb = NULL;
2892 		last = tx_buf->nr_frags;
2893 
2894 		for (i = 0; i < last; i++) {
2895 			struct bnx2_sw_tx_bd *tx_buf;
2896 
2897 			sw_cons = BNX2_NEXT_TX_BD(sw_cons);
2898 
2899 			tx_buf = &txr->tx_buf_ring[BNX2_TX_RING_IDX(sw_cons)];
2900 			dma_unmap_page(&bp->pdev->dev,
2901 				dma_unmap_addr(tx_buf, mapping),
2902 				skb_frag_size(&skb_shinfo(skb)->frags[i]),
2903 				PCI_DMA_TODEVICE);
2904 		}
2905 
2906 		sw_cons = BNX2_NEXT_TX_BD(sw_cons);
2907 
2908 		tx_bytes += skb->len;
2909 		dev_kfree_skb_any(skb);
2910 		tx_pkt++;
2911 		if (tx_pkt == budget)
2912 			break;
2913 
2914 		if (hw_cons == sw_cons)
2915 			hw_cons = bnx2_get_hw_tx_cons(bnapi);
2916 	}
2917 
2918 	netdev_tx_completed_queue(txq, tx_pkt, tx_bytes);
2919 	txr->hw_tx_cons = hw_cons;
2920 	txr->tx_cons = sw_cons;
2921 
2922 	/* Need to make the tx_cons update visible to bnx2_start_xmit()
2923 	 * before checking for netif_tx_queue_stopped().  Without the
2924 	 * memory barrier, there is a small possibility that bnx2_start_xmit()
2925 	 * will miss it and cause the queue to be stopped forever.
2926 	 */
2927 	smp_mb();
2928 
2929 	if (unlikely(netif_tx_queue_stopped(txq)) &&
2930 		     (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh)) {
2931 		__netif_tx_lock(txq, smp_processor_id());
2932 		if ((netif_tx_queue_stopped(txq)) &&
2933 		    (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh))
2934 			netif_tx_wake_queue(txq);
2935 		__netif_tx_unlock(txq);
2936 	}
2937 
2938 	return tx_pkt;
2939 }
2940 
2941 static void
2942 bnx2_reuse_rx_skb_pages(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
2943 			struct sk_buff *skb, int count)
2944 {
2945 	struct bnx2_sw_pg *cons_rx_pg, *prod_rx_pg;
2946 	struct bnx2_rx_bd *cons_bd, *prod_bd;
2947 	int i;
2948 	u16 hw_prod, prod;
2949 	u16 cons = rxr->rx_pg_cons;
2950 
2951 	cons_rx_pg = &rxr->rx_pg_ring[cons];
2952 
2953 	/* The caller was unable to allocate a new page to replace the
2954 	 * last one in the frags array, so we need to recycle that page
2955 	 * and then free the skb.
2956 	 */
2957 	if (skb) {
2958 		struct page *page;
2959 		struct skb_shared_info *shinfo;
2960 
2961 		shinfo = skb_shinfo(skb);
2962 		shinfo->nr_frags--;
2963 		page = skb_frag_page(&shinfo->frags[shinfo->nr_frags]);
2964 		__skb_frag_set_page(&shinfo->frags[shinfo->nr_frags], NULL);
2965 
2966 		cons_rx_pg->page = page;
2967 		dev_kfree_skb(skb);
2968 	}
2969 
2970 	hw_prod = rxr->rx_pg_prod;
2971 
2972 	for (i = 0; i < count; i++) {
2973 		prod = BNX2_RX_PG_RING_IDX(hw_prod);
2974 
2975 		prod_rx_pg = &rxr->rx_pg_ring[prod];
2976 		cons_rx_pg = &rxr->rx_pg_ring[cons];
2977 		cons_bd = &rxr->rx_pg_desc_ring[BNX2_RX_RING(cons)]
2978 						[BNX2_RX_IDX(cons)];
2979 		prod_bd = &rxr->rx_pg_desc_ring[BNX2_RX_RING(prod)]
2980 						[BNX2_RX_IDX(prod)];
2981 
2982 		if (prod != cons) {
2983 			prod_rx_pg->page = cons_rx_pg->page;
2984 			cons_rx_pg->page = NULL;
2985 			dma_unmap_addr_set(prod_rx_pg, mapping,
2986 				dma_unmap_addr(cons_rx_pg, mapping));
2987 
2988 			prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2989 			prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
2990 
2991 		}
2992 		cons = BNX2_RX_PG_RING_IDX(BNX2_NEXT_RX_BD(cons));
2993 		hw_prod = BNX2_NEXT_RX_BD(hw_prod);
2994 	}
2995 	rxr->rx_pg_prod = hw_prod;
2996 	rxr->rx_pg_cons = cons;
2997 }
2998 
2999 static inline void
3000 bnx2_reuse_rx_data(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
3001 		   u8 *data, u16 cons, u16 prod)
3002 {
3003 	struct bnx2_sw_bd *cons_rx_buf, *prod_rx_buf;
3004 	struct bnx2_rx_bd *cons_bd, *prod_bd;
3005 
3006 	cons_rx_buf = &rxr->rx_buf_ring[cons];
3007 	prod_rx_buf = &rxr->rx_buf_ring[prod];
3008 
3009 	dma_sync_single_for_device(&bp->pdev->dev,
3010 		dma_unmap_addr(cons_rx_buf, mapping),
3011 		BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
3012 
3013 	rxr->rx_prod_bseq += bp->rx_buf_use_size;
3014 
3015 	prod_rx_buf->data = data;
3016 
3017 	if (cons == prod)
3018 		return;
3019 
3020 	dma_unmap_addr_set(prod_rx_buf, mapping,
3021 			dma_unmap_addr(cons_rx_buf, mapping));
3022 
3023 	cons_bd = &rxr->rx_desc_ring[BNX2_RX_RING(cons)][BNX2_RX_IDX(cons)];
3024 	prod_bd = &rxr->rx_desc_ring[BNX2_RX_RING(prod)][BNX2_RX_IDX(prod)];
3025 	prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
3026 	prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
3027 }
3028 
3029 static struct sk_buff *
3030 bnx2_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u8 *data,
3031 	    unsigned int len, unsigned int hdr_len, dma_addr_t dma_addr,
3032 	    u32 ring_idx)
3033 {
3034 	int err;
3035 	u16 prod = ring_idx & 0xffff;
3036 	struct sk_buff *skb;
3037 
3038 	err = bnx2_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC);
3039 	if (unlikely(err)) {
3040 		bnx2_reuse_rx_data(bp, rxr, data, (u16) (ring_idx >> 16), prod);
3041 error:
3042 		if (hdr_len) {
3043 			unsigned int raw_len = len + 4;
3044 			int pages = PAGE_ALIGN(raw_len - hdr_len) >> PAGE_SHIFT;
3045 
3046 			bnx2_reuse_rx_skb_pages(bp, rxr, NULL, pages);
3047 		}
3048 		return NULL;
3049 	}
3050 
3051 	dma_unmap_single(&bp->pdev->dev, dma_addr, bp->rx_buf_use_size,
3052 			 PCI_DMA_FROMDEVICE);
3053 	skb = build_skb(data, 0);
3054 	if (!skb) {
3055 		kfree(data);
3056 		goto error;
3057 	}
3058 	skb_reserve(skb, ((u8 *)get_l2_fhdr(data) - data) + BNX2_RX_OFFSET);
3059 	if (hdr_len == 0) {
3060 		skb_put(skb, len);
3061 		return skb;
3062 	} else {
3063 		unsigned int i, frag_len, frag_size, pages;
3064 		struct bnx2_sw_pg *rx_pg;
3065 		u16 pg_cons = rxr->rx_pg_cons;
3066 		u16 pg_prod = rxr->rx_pg_prod;
3067 
3068 		frag_size = len + 4 - hdr_len;
3069 		pages = PAGE_ALIGN(frag_size) >> PAGE_SHIFT;
3070 		skb_put(skb, hdr_len);
3071 
3072 		for (i = 0; i < pages; i++) {
3073 			dma_addr_t mapping_old;
3074 
3075 			frag_len = min(frag_size, (unsigned int) PAGE_SIZE);
3076 			if (unlikely(frag_len <= 4)) {
3077 				unsigned int tail = 4 - frag_len;
3078 
3079 				rxr->rx_pg_cons = pg_cons;
3080 				rxr->rx_pg_prod = pg_prod;
3081 				bnx2_reuse_rx_skb_pages(bp, rxr, NULL,
3082 							pages - i);
3083 				skb->len -= tail;
3084 				if (i == 0) {
3085 					skb->tail -= tail;
3086 				} else {
3087 					skb_frag_t *frag =
3088 						&skb_shinfo(skb)->frags[i - 1];
3089 					skb_frag_size_sub(frag, tail);
3090 					skb->data_len -= tail;
3091 				}
3092 				return skb;
3093 			}
3094 			rx_pg = &rxr->rx_pg_ring[pg_cons];
3095 
3096 			/* Don't unmap yet.  If we're unable to allocate a new
3097 			 * page, we need to recycle the page and the DMA addr.
3098 			 */
3099 			mapping_old = dma_unmap_addr(rx_pg, mapping);
3100 			if (i == pages - 1)
3101 				frag_len -= 4;
3102 
3103 			skb_fill_page_desc(skb, i, rx_pg->page, 0, frag_len);
3104 			rx_pg->page = NULL;
3105 
3106 			err = bnx2_alloc_rx_page(bp, rxr,
3107 						 BNX2_RX_PG_RING_IDX(pg_prod),
3108 						 GFP_ATOMIC);
3109 			if (unlikely(err)) {
3110 				rxr->rx_pg_cons = pg_cons;
3111 				rxr->rx_pg_prod = pg_prod;
3112 				bnx2_reuse_rx_skb_pages(bp, rxr, skb,
3113 							pages - i);
3114 				return NULL;
3115 			}
3116 
3117 			dma_unmap_page(&bp->pdev->dev, mapping_old,
3118 				       PAGE_SIZE, PCI_DMA_FROMDEVICE);
3119 
3120 			frag_size -= frag_len;
3121 			skb->data_len += frag_len;
3122 			skb->truesize += PAGE_SIZE;
3123 			skb->len += frag_len;
3124 
3125 			pg_prod = BNX2_NEXT_RX_BD(pg_prod);
3126 			pg_cons = BNX2_RX_PG_RING_IDX(BNX2_NEXT_RX_BD(pg_cons));
3127 		}
3128 		rxr->rx_pg_prod = pg_prod;
3129 		rxr->rx_pg_cons = pg_cons;
3130 	}
3131 	return skb;
3132 }
3133 
3134 static inline u16
3135 bnx2_get_hw_rx_cons(struct bnx2_napi *bnapi)
3136 {
3137 	u16 cons;
3138 
3139 	cons = READ_ONCE(*bnapi->hw_rx_cons_ptr);
3140 
3141 	if (unlikely((cons & BNX2_MAX_RX_DESC_CNT) == BNX2_MAX_RX_DESC_CNT))
3142 		cons++;
3143 	return cons;
3144 }
3145 
3146 static int
3147 bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
3148 {
3149 	struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3150 	u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod;
3151 	struct l2_fhdr *rx_hdr;
3152 	int rx_pkt = 0, pg_ring_used = 0;
3153 
3154 	if (budget <= 0)
3155 		return rx_pkt;
3156 
3157 	hw_cons = bnx2_get_hw_rx_cons(bnapi);
3158 	sw_cons = rxr->rx_cons;
3159 	sw_prod = rxr->rx_prod;
3160 
3161 	/* Memory barrier necessary as speculative reads of the rx
3162 	 * buffer can be ahead of the index in the status block
3163 	 */
3164 	rmb();
3165 	while (sw_cons != hw_cons) {
3166 		unsigned int len, hdr_len;
3167 		u32 status;
3168 		struct bnx2_sw_bd *rx_buf, *next_rx_buf;
3169 		struct sk_buff *skb;
3170 		dma_addr_t dma_addr;
3171 		u8 *data;
3172 		u16 next_ring_idx;
3173 
3174 		sw_ring_cons = BNX2_RX_RING_IDX(sw_cons);
3175 		sw_ring_prod = BNX2_RX_RING_IDX(sw_prod);
3176 
3177 		rx_buf = &rxr->rx_buf_ring[sw_ring_cons];
3178 		data = rx_buf->data;
3179 		rx_buf->data = NULL;
3180 
3181 		rx_hdr = get_l2_fhdr(data);
3182 		prefetch(rx_hdr);
3183 
3184 		dma_addr = dma_unmap_addr(rx_buf, mapping);
3185 
3186 		dma_sync_single_for_cpu(&bp->pdev->dev, dma_addr,
3187 			BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH,
3188 			PCI_DMA_FROMDEVICE);
3189 
3190 		next_ring_idx = BNX2_RX_RING_IDX(BNX2_NEXT_RX_BD(sw_cons));
3191 		next_rx_buf = &rxr->rx_buf_ring[next_ring_idx];
3192 		prefetch(get_l2_fhdr(next_rx_buf->data));
3193 
3194 		len = rx_hdr->l2_fhdr_pkt_len;
3195 		status = rx_hdr->l2_fhdr_status;
3196 
3197 		hdr_len = 0;
3198 		if (status & L2_FHDR_STATUS_SPLIT) {
3199 			hdr_len = rx_hdr->l2_fhdr_ip_xsum;
3200 			pg_ring_used = 1;
3201 		} else if (len > bp->rx_jumbo_thresh) {
3202 			hdr_len = bp->rx_jumbo_thresh;
3203 			pg_ring_used = 1;
3204 		}
3205 
3206 		if (unlikely(status & (L2_FHDR_ERRORS_BAD_CRC |
3207 				       L2_FHDR_ERRORS_PHY_DECODE |
3208 				       L2_FHDR_ERRORS_ALIGNMENT |
3209 				       L2_FHDR_ERRORS_TOO_SHORT |
3210 				       L2_FHDR_ERRORS_GIANT_FRAME))) {
3211 
3212 			bnx2_reuse_rx_data(bp, rxr, data, sw_ring_cons,
3213 					  sw_ring_prod);
3214 			if (pg_ring_used) {
3215 				int pages;
3216 
3217 				pages = PAGE_ALIGN(len - hdr_len) >> PAGE_SHIFT;
3218 
3219 				bnx2_reuse_rx_skb_pages(bp, rxr, NULL, pages);
3220 			}
3221 			goto next_rx;
3222 		}
3223 
3224 		len -= 4;
3225 
3226 		if (len <= bp->rx_copy_thresh) {
3227 			skb = netdev_alloc_skb(bp->dev, len + 6);
3228 			if (skb == NULL) {
3229 				bnx2_reuse_rx_data(bp, rxr, data, sw_ring_cons,
3230 						  sw_ring_prod);
3231 				goto next_rx;
3232 			}
3233 
3234 			/* aligned copy */
3235 			memcpy(skb->data,
3236 			       (u8 *)rx_hdr + BNX2_RX_OFFSET - 6,
3237 			       len + 6);
3238 			skb_reserve(skb, 6);
3239 			skb_put(skb, len);
3240 
3241 			bnx2_reuse_rx_data(bp, rxr, data,
3242 				sw_ring_cons, sw_ring_prod);
3243 
3244 		} else {
3245 			skb = bnx2_rx_skb(bp, rxr, data, len, hdr_len, dma_addr,
3246 					  (sw_ring_cons << 16) | sw_ring_prod);
3247 			if (!skb)
3248 				goto next_rx;
3249 		}
3250 		if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) &&
3251 		    !(bp->rx_mode & BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG))
3252 			__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rx_hdr->l2_fhdr_vlan_tag);
3253 
3254 		skb->protocol = eth_type_trans(skb, bp->dev);
3255 
3256 		if (len > (bp->dev->mtu + ETH_HLEN) &&
3257 		    skb->protocol != htons(0x8100) &&
3258 		    skb->protocol != htons(ETH_P_8021AD)) {
3259 
3260 			dev_kfree_skb(skb);
3261 			goto next_rx;
3262 
3263 		}
3264 
3265 		skb_checksum_none_assert(skb);
3266 		if ((bp->dev->features & NETIF_F_RXCSUM) &&
3267 			(status & (L2_FHDR_STATUS_TCP_SEGMENT |
3268 			L2_FHDR_STATUS_UDP_DATAGRAM))) {
3269 
3270 			if (likely((status & (L2_FHDR_ERRORS_TCP_XSUM |
3271 					      L2_FHDR_ERRORS_UDP_XSUM)) == 0))
3272 				skb->ip_summed = CHECKSUM_UNNECESSARY;
3273 		}
3274 		if ((bp->dev->features & NETIF_F_RXHASH) &&
3275 		    ((status & L2_FHDR_STATUS_USE_RXHASH) ==
3276 		     L2_FHDR_STATUS_USE_RXHASH))
3277 			skb_set_hash(skb, rx_hdr->l2_fhdr_hash,
3278 				     PKT_HASH_TYPE_L3);
3279 
3280 		skb_record_rx_queue(skb, bnapi - &bp->bnx2_napi[0]);
3281 		napi_gro_receive(&bnapi->napi, skb);
3282 		rx_pkt++;
3283 
3284 next_rx:
3285 		sw_cons = BNX2_NEXT_RX_BD(sw_cons);
3286 		sw_prod = BNX2_NEXT_RX_BD(sw_prod);
3287 
3288 		if ((rx_pkt == budget))
3289 			break;
3290 
3291 		/* Refresh hw_cons to see if there is new work */
3292 		if (sw_cons == hw_cons) {
3293 			hw_cons = bnx2_get_hw_rx_cons(bnapi);
3294 			rmb();
3295 		}
3296 	}
3297 	rxr->rx_cons = sw_cons;
3298 	rxr->rx_prod = sw_prod;
3299 
3300 	if (pg_ring_used)
3301 		BNX2_WR16(bp, rxr->rx_pg_bidx_addr, rxr->rx_pg_prod);
3302 
3303 	BNX2_WR16(bp, rxr->rx_bidx_addr, sw_prod);
3304 
3305 	BNX2_WR(bp, rxr->rx_bseq_addr, rxr->rx_prod_bseq);
3306 
3307 	mmiowb();
3308 
3309 	return rx_pkt;
3310 
3311 }
3312 
3313 /* MSI ISR - The only difference between this and the INTx ISR
3314  * is that the MSI interrupt is always serviced.
3315  */
3316 static irqreturn_t
3317 bnx2_msi(int irq, void *dev_instance)
3318 {
3319 	struct bnx2_napi *bnapi = dev_instance;
3320 	struct bnx2 *bp = bnapi->bp;
3321 
3322 	prefetch(bnapi->status_blk.msi);
3323 	BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3324 		BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
3325 		BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3326 
3327 	/* Return here if interrupt is disabled. */
3328 	if (unlikely(atomic_read(&bp->intr_sem) != 0))
3329 		return IRQ_HANDLED;
3330 
3331 	napi_schedule(&bnapi->napi);
3332 
3333 	return IRQ_HANDLED;
3334 }
3335 
3336 static irqreturn_t
3337 bnx2_msi_1shot(int irq, void *dev_instance)
3338 {
3339 	struct bnx2_napi *bnapi = dev_instance;
3340 	struct bnx2 *bp = bnapi->bp;
3341 
3342 	prefetch(bnapi->status_blk.msi);
3343 
3344 	/* Return here if interrupt is disabled. */
3345 	if (unlikely(atomic_read(&bp->intr_sem) != 0))
3346 		return IRQ_HANDLED;
3347 
3348 	napi_schedule(&bnapi->napi);
3349 
3350 	return IRQ_HANDLED;
3351 }
3352 
3353 static irqreturn_t
3354 bnx2_interrupt(int irq, void *dev_instance)
3355 {
3356 	struct bnx2_napi *bnapi = dev_instance;
3357 	struct bnx2 *bp = bnapi->bp;
3358 	struct status_block *sblk = bnapi->status_blk.msi;
3359 
3360 	/* When using INTx, it is possible for the interrupt to arrive
3361 	 * at the CPU before the status block posted prior to the
3362 	 * interrupt. Reading a register will flush the status block.
3363 	 * When using MSI, the MSI message will always complete after
3364 	 * the status block write.
3365 	 */
3366 	if ((sblk->status_idx == bnapi->last_status_idx) &&
3367 	    (BNX2_RD(bp, BNX2_PCICFG_MISC_STATUS) &
3368 	     BNX2_PCICFG_MISC_STATUS_INTA_VALUE))
3369 		return IRQ_NONE;
3370 
3371 	BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3372 		BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
3373 		BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3374 
3375 	/* Read back to deassert IRQ immediately to avoid too many
3376 	 * spurious interrupts.
3377 	 */
3378 	BNX2_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
3379 
3380 	/* Return here if interrupt is shared and is disabled. */
3381 	if (unlikely(atomic_read(&bp->intr_sem) != 0))
3382 		return IRQ_HANDLED;
3383 
3384 	if (napi_schedule_prep(&bnapi->napi)) {
3385 		bnapi->last_status_idx = sblk->status_idx;
3386 		__napi_schedule(&bnapi->napi);
3387 	}
3388 
3389 	return IRQ_HANDLED;
3390 }
3391 
3392 static inline int
3393 bnx2_has_fast_work(struct bnx2_napi *bnapi)
3394 {
3395 	struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
3396 	struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3397 
3398 	if ((bnx2_get_hw_rx_cons(bnapi) != rxr->rx_cons) ||
3399 	    (bnx2_get_hw_tx_cons(bnapi) != txr->hw_tx_cons))
3400 		return 1;
3401 	return 0;
3402 }
3403 
3404 #define STATUS_ATTN_EVENTS	(STATUS_ATTN_BITS_LINK_STATE | \
3405 				 STATUS_ATTN_BITS_TIMER_ABORT)
3406 
3407 static inline int
3408 bnx2_has_work(struct bnx2_napi *bnapi)
3409 {
3410 	struct status_block *sblk = bnapi->status_blk.msi;
3411 
3412 	if (bnx2_has_fast_work(bnapi))
3413 		return 1;
3414 
3415 #ifdef BCM_CNIC
3416 	if (bnapi->cnic_present && (bnapi->cnic_tag != sblk->status_idx))
3417 		return 1;
3418 #endif
3419 
3420 	if ((sblk->status_attn_bits & STATUS_ATTN_EVENTS) !=
3421 	    (sblk->status_attn_bits_ack & STATUS_ATTN_EVENTS))
3422 		return 1;
3423 
3424 	return 0;
3425 }
3426 
3427 static void
3428 bnx2_chk_missed_msi(struct bnx2 *bp)
3429 {
3430 	struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
3431 	u32 msi_ctrl;
3432 
3433 	if (bnx2_has_work(bnapi)) {
3434 		msi_ctrl = BNX2_RD(bp, BNX2_PCICFG_MSI_CONTROL);
3435 		if (!(msi_ctrl & BNX2_PCICFG_MSI_CONTROL_ENABLE))
3436 			return;
3437 
3438 		if (bnapi->last_status_idx == bp->idle_chk_status_idx) {
3439 			BNX2_WR(bp, BNX2_PCICFG_MSI_CONTROL, msi_ctrl &
3440 				~BNX2_PCICFG_MSI_CONTROL_ENABLE);
3441 			BNX2_WR(bp, BNX2_PCICFG_MSI_CONTROL, msi_ctrl);
3442 			bnx2_msi(bp->irq_tbl[0].vector, bnapi);
3443 		}
3444 	}
3445 
3446 	bp->idle_chk_status_idx = bnapi->last_status_idx;
3447 }
3448 
3449 #ifdef BCM_CNIC
3450 static void bnx2_poll_cnic(struct bnx2 *bp, struct bnx2_napi *bnapi)
3451 {
3452 	struct cnic_ops *c_ops;
3453 
3454 	if (!bnapi->cnic_present)
3455 		return;
3456 
3457 	rcu_read_lock();
3458 	c_ops = rcu_dereference(bp->cnic_ops);
3459 	if (c_ops)
3460 		bnapi->cnic_tag = c_ops->cnic_handler(bp->cnic_data,
3461 						      bnapi->status_blk.msi);
3462 	rcu_read_unlock();
3463 }
3464 #endif
3465 
3466 static void bnx2_poll_link(struct bnx2 *bp, struct bnx2_napi *bnapi)
3467 {
3468 	struct status_block *sblk = bnapi->status_blk.msi;
3469 	u32 status_attn_bits = sblk->status_attn_bits;
3470 	u32 status_attn_bits_ack = sblk->status_attn_bits_ack;
3471 
3472 	if ((status_attn_bits & STATUS_ATTN_EVENTS) !=
3473 	    (status_attn_bits_ack & STATUS_ATTN_EVENTS)) {
3474 
3475 		bnx2_phy_int(bp, bnapi);
3476 
3477 		/* This is needed to take care of transient status
3478 		 * during link changes.
3479 		 */
3480 		BNX2_WR(bp, BNX2_HC_COMMAND,
3481 			bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
3482 		BNX2_RD(bp, BNX2_HC_COMMAND);
3483 	}
3484 }
3485 
3486 static int bnx2_poll_work(struct bnx2 *bp, struct bnx2_napi *bnapi,
3487 			  int work_done, int budget)
3488 {
3489 	struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
3490 	struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3491 
3492 	if (bnx2_get_hw_tx_cons(bnapi) != txr->hw_tx_cons)
3493 		bnx2_tx_int(bp, bnapi, 0);
3494 
3495 	if (bnx2_get_hw_rx_cons(bnapi) != rxr->rx_cons)
3496 		work_done += bnx2_rx_int(bp, bnapi, budget - work_done);
3497 
3498 	return work_done;
3499 }
3500 
3501 static int bnx2_poll_msix(struct napi_struct *napi, int budget)
3502 {
3503 	struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
3504 	struct bnx2 *bp = bnapi->bp;
3505 	int work_done = 0;
3506 	struct status_block_msix *sblk = bnapi->status_blk.msix;
3507 
3508 	while (1) {
3509 		work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
3510 		if (unlikely(work_done >= budget))
3511 			break;
3512 
3513 		bnapi->last_status_idx = sblk->status_idx;
3514 		/* status idx must be read before checking for more work. */
3515 		rmb();
3516 		if (likely(!bnx2_has_fast_work(bnapi))) {
3517 
3518 			napi_complete_done(napi, work_done);
3519 			BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
3520 				BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3521 				bnapi->last_status_idx);
3522 			break;
3523 		}
3524 	}
3525 	return work_done;
3526 }
3527 
3528 static int bnx2_poll(struct napi_struct *napi, int budget)
3529 {
3530 	struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
3531 	struct bnx2 *bp = bnapi->bp;
3532 	int work_done = 0;
3533 	struct status_block *sblk = bnapi->status_blk.msi;
3534 
3535 	while (1) {
3536 		bnx2_poll_link(bp, bnapi);
3537 
3538 		work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
3539 
3540 #ifdef BCM_CNIC
3541 		bnx2_poll_cnic(bp, bnapi);
3542 #endif
3543 
3544 		/* bnapi->last_status_idx is used below to tell the hw how
3545 		 * much work has been processed, so we must read it before
3546 		 * checking for more work.
3547 		 */
3548 		bnapi->last_status_idx = sblk->status_idx;
3549 
3550 		if (unlikely(work_done >= budget))
3551 			break;
3552 
3553 		rmb();
3554 		if (likely(!bnx2_has_work(bnapi))) {
3555 			napi_complete_done(napi, work_done);
3556 			if (likely(bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)) {
3557 				BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3558 					BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3559 					bnapi->last_status_idx);
3560 				break;
3561 			}
3562 			BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3563 				BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3564 				BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
3565 				bnapi->last_status_idx);
3566 
3567 			BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3568 				BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3569 				bnapi->last_status_idx);
3570 			break;
3571 		}
3572 	}
3573 
3574 	return work_done;
3575 }
3576 
3577 /* Called with rtnl_lock from vlan functions and also netif_tx_lock
3578  * from set_multicast.
3579  */
3580 static void
3581 bnx2_set_rx_mode(struct net_device *dev)
3582 {
3583 	struct bnx2 *bp = netdev_priv(dev);
3584 	u32 rx_mode, sort_mode;
3585 	struct netdev_hw_addr *ha;
3586 	int i;
3587 
3588 	if (!netif_running(dev))
3589 		return;
3590 
3591 	spin_lock_bh(&bp->phy_lock);
3592 
3593 	rx_mode = bp->rx_mode & ~(BNX2_EMAC_RX_MODE_PROMISCUOUS |
3594 				  BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG);
3595 	sort_mode = 1 | BNX2_RPM_SORT_USER0_BC_EN;
3596 	if (!(dev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
3597 	     (bp->flags & BNX2_FLAG_CAN_KEEP_VLAN))
3598 		rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
3599 	if (dev->flags & IFF_PROMISC) {
3600 		/* Promiscuous mode. */
3601 		rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
3602 		sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
3603 			     BNX2_RPM_SORT_USER0_PROM_VLAN;
3604 	}
3605 	else if (dev->flags & IFF_ALLMULTI) {
3606 		for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3607 			BNX2_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3608 				0xffffffff);
3609         	}
3610 		sort_mode |= BNX2_RPM_SORT_USER0_MC_EN;
3611 	}
3612 	else {
3613 		/* Accept one or more multicast(s). */
3614 		u32 mc_filter[NUM_MC_HASH_REGISTERS];
3615 		u32 regidx;
3616 		u32 bit;
3617 		u32 crc;
3618 
3619 		memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS);
3620 
3621 		netdev_for_each_mc_addr(ha, dev) {
3622 			crc = ether_crc_le(ETH_ALEN, ha->addr);
3623 			bit = crc & 0xff;
3624 			regidx = (bit & 0xe0) >> 5;
3625 			bit &= 0x1f;
3626 			mc_filter[regidx] |= (1 << bit);
3627 		}
3628 
3629 		for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3630 			BNX2_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3631 				mc_filter[i]);
3632 		}
3633 
3634 		sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN;
3635 	}
3636 
3637 	if (netdev_uc_count(dev) > BNX2_MAX_UNICAST_ADDRESSES) {
3638 		rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
3639 		sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
3640 			     BNX2_RPM_SORT_USER0_PROM_VLAN;
3641 	} else if (!(dev->flags & IFF_PROMISC)) {
3642 		/* Add all entries into to the match filter list */
3643 		i = 0;
3644 		netdev_for_each_uc_addr(ha, dev) {
3645 			bnx2_set_mac_addr(bp, ha->addr,
3646 					  i + BNX2_START_UNICAST_ADDRESS_INDEX);
3647 			sort_mode |= (1 <<
3648 				      (i + BNX2_START_UNICAST_ADDRESS_INDEX));
3649 			i++;
3650 		}
3651 
3652 	}
3653 
3654 	if (rx_mode != bp->rx_mode) {
3655 		bp->rx_mode = rx_mode;
3656 		BNX2_WR(bp, BNX2_EMAC_RX_MODE, rx_mode);
3657 	}
3658 
3659 	BNX2_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3660 	BNX2_WR(bp, BNX2_RPM_SORT_USER0, sort_mode);
3661 	BNX2_WR(bp, BNX2_RPM_SORT_USER0, sort_mode | BNX2_RPM_SORT_USER0_ENA);
3662 
3663 	spin_unlock_bh(&bp->phy_lock);
3664 }
3665 
3666 static int
3667 check_fw_section(const struct firmware *fw,
3668 		 const struct bnx2_fw_file_section *section,
3669 		 u32 alignment, bool non_empty)
3670 {
3671 	u32 offset = be32_to_cpu(section->offset);
3672 	u32 len = be32_to_cpu(section->len);
3673 
3674 	if ((offset == 0 && len != 0) || offset >= fw->size || offset & 3)
3675 		return -EINVAL;
3676 	if ((non_empty && len == 0) || len > fw->size - offset ||
3677 	    len & (alignment - 1))
3678 		return -EINVAL;
3679 	return 0;
3680 }
3681 
3682 static int
3683 check_mips_fw_entry(const struct firmware *fw,
3684 		    const struct bnx2_mips_fw_file_entry *entry)
3685 {
3686 	if (check_fw_section(fw, &entry->text, 4, true) ||
3687 	    check_fw_section(fw, &entry->data, 4, false) ||
3688 	    check_fw_section(fw, &entry->rodata, 4, false))
3689 		return -EINVAL;
3690 	return 0;
3691 }
3692 
3693 static void bnx2_release_firmware(struct bnx2 *bp)
3694 {
3695 	if (bp->rv2p_firmware) {
3696 		release_firmware(bp->mips_firmware);
3697 		release_firmware(bp->rv2p_firmware);
3698 		bp->rv2p_firmware = NULL;
3699 	}
3700 }
3701 
3702 static int bnx2_request_uncached_firmware(struct bnx2 *bp)
3703 {
3704 	const char *mips_fw_file, *rv2p_fw_file;
3705 	const struct bnx2_mips_fw_file *mips_fw;
3706 	const struct bnx2_rv2p_fw_file *rv2p_fw;
3707 	int rc;
3708 
3709 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
3710 		mips_fw_file = FW_MIPS_FILE_09;
3711 		if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5709_A0) ||
3712 		    (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5709_A1))
3713 			rv2p_fw_file = FW_RV2P_FILE_09_Ax;
3714 		else
3715 			rv2p_fw_file = FW_RV2P_FILE_09;
3716 	} else {
3717 		mips_fw_file = FW_MIPS_FILE_06;
3718 		rv2p_fw_file = FW_RV2P_FILE_06;
3719 	}
3720 
3721 	rc = request_firmware(&bp->mips_firmware, mips_fw_file, &bp->pdev->dev);
3722 	if (rc) {
3723 		pr_err("Can't load firmware file \"%s\"\n", mips_fw_file);
3724 		goto out;
3725 	}
3726 
3727 	rc = request_firmware(&bp->rv2p_firmware, rv2p_fw_file, &bp->pdev->dev);
3728 	if (rc) {
3729 		pr_err("Can't load firmware file \"%s\"\n", rv2p_fw_file);
3730 		goto err_release_mips_firmware;
3731 	}
3732 	mips_fw = (const struct bnx2_mips_fw_file *) bp->mips_firmware->data;
3733 	rv2p_fw = (const struct bnx2_rv2p_fw_file *) bp->rv2p_firmware->data;
3734 	if (bp->mips_firmware->size < sizeof(*mips_fw) ||
3735 	    check_mips_fw_entry(bp->mips_firmware, &mips_fw->com) ||
3736 	    check_mips_fw_entry(bp->mips_firmware, &mips_fw->cp) ||
3737 	    check_mips_fw_entry(bp->mips_firmware, &mips_fw->rxp) ||
3738 	    check_mips_fw_entry(bp->mips_firmware, &mips_fw->tpat) ||
3739 	    check_mips_fw_entry(bp->mips_firmware, &mips_fw->txp)) {
3740 		pr_err("Firmware file \"%s\" is invalid\n", mips_fw_file);
3741 		rc = -EINVAL;
3742 		goto err_release_firmware;
3743 	}
3744 	if (bp->rv2p_firmware->size < sizeof(*rv2p_fw) ||
3745 	    check_fw_section(bp->rv2p_firmware, &rv2p_fw->proc1.rv2p, 8, true) ||
3746 	    check_fw_section(bp->rv2p_firmware, &rv2p_fw->proc2.rv2p, 8, true)) {
3747 		pr_err("Firmware file \"%s\" is invalid\n", rv2p_fw_file);
3748 		rc = -EINVAL;
3749 		goto err_release_firmware;
3750 	}
3751 out:
3752 	return rc;
3753 
3754 err_release_firmware:
3755 	release_firmware(bp->rv2p_firmware);
3756 	bp->rv2p_firmware = NULL;
3757 err_release_mips_firmware:
3758 	release_firmware(bp->mips_firmware);
3759 	goto out;
3760 }
3761 
3762 static int bnx2_request_firmware(struct bnx2 *bp)
3763 {
3764 	return bp->rv2p_firmware ? 0 : bnx2_request_uncached_firmware(bp);
3765 }
3766 
3767 static u32
3768 rv2p_fw_fixup(u32 rv2p_proc, int idx, u32 loc, u32 rv2p_code)
3769 {
3770 	switch (idx) {
3771 	case RV2P_P1_FIXUP_PAGE_SIZE_IDX:
3772 		rv2p_code &= ~RV2P_BD_PAGE_SIZE_MSK;
3773 		rv2p_code |= RV2P_BD_PAGE_SIZE;
3774 		break;
3775 	}
3776 	return rv2p_code;
3777 }
3778 
3779 static int
3780 load_rv2p_fw(struct bnx2 *bp, u32 rv2p_proc,
3781 	     const struct bnx2_rv2p_fw_file_entry *fw_entry)
3782 {
3783 	u32 rv2p_code_len, file_offset;
3784 	__be32 *rv2p_code;
3785 	int i;
3786 	u32 val, cmd, addr;
3787 
3788 	rv2p_code_len = be32_to_cpu(fw_entry->rv2p.len);
3789 	file_offset = be32_to_cpu(fw_entry->rv2p.offset);
3790 
3791 	rv2p_code = (__be32 *)(bp->rv2p_firmware->data + file_offset);
3792 
3793 	if (rv2p_proc == RV2P_PROC1) {
3794 		cmd = BNX2_RV2P_PROC1_ADDR_CMD_RDWR;
3795 		addr = BNX2_RV2P_PROC1_ADDR_CMD;
3796 	} else {
3797 		cmd = BNX2_RV2P_PROC2_ADDR_CMD_RDWR;
3798 		addr = BNX2_RV2P_PROC2_ADDR_CMD;
3799 	}
3800 
3801 	for (i = 0; i < rv2p_code_len; i += 8) {
3802 		BNX2_WR(bp, BNX2_RV2P_INSTR_HIGH, be32_to_cpu(*rv2p_code));
3803 		rv2p_code++;
3804 		BNX2_WR(bp, BNX2_RV2P_INSTR_LOW, be32_to_cpu(*rv2p_code));
3805 		rv2p_code++;
3806 
3807 		val = (i / 8) | cmd;
3808 		BNX2_WR(bp, addr, val);
3809 	}
3810 
3811 	rv2p_code = (__be32 *)(bp->rv2p_firmware->data + file_offset);
3812 	for (i = 0; i < 8; i++) {
3813 		u32 loc, code;
3814 
3815 		loc = be32_to_cpu(fw_entry->fixup[i]);
3816 		if (loc && ((loc * 4) < rv2p_code_len)) {
3817 			code = be32_to_cpu(*(rv2p_code + loc - 1));
3818 			BNX2_WR(bp, BNX2_RV2P_INSTR_HIGH, code);
3819 			code = be32_to_cpu(*(rv2p_code + loc));
3820 			code = rv2p_fw_fixup(rv2p_proc, i, loc, code);
3821 			BNX2_WR(bp, BNX2_RV2P_INSTR_LOW, code);
3822 
3823 			val = (loc / 2) | cmd;
3824 			BNX2_WR(bp, addr, val);
3825 		}
3826 	}
3827 
3828 	/* Reset the processor, un-stall is done later. */
3829 	if (rv2p_proc == RV2P_PROC1) {
3830 		BNX2_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC1_RESET);
3831 	}
3832 	else {
3833 		BNX2_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC2_RESET);
3834 	}
3835 
3836 	return 0;
3837 }
3838 
3839 static int
3840 load_cpu_fw(struct bnx2 *bp, const struct cpu_reg *cpu_reg,
3841 	    const struct bnx2_mips_fw_file_entry *fw_entry)
3842 {
3843 	u32 addr, len, file_offset;
3844 	__be32 *data;
3845 	u32 offset;
3846 	u32 val;
3847 
3848 	/* Halt the CPU. */
3849 	val = bnx2_reg_rd_ind(bp, cpu_reg->mode);
3850 	val |= cpu_reg->mode_value_halt;
3851 	bnx2_reg_wr_ind(bp, cpu_reg->mode, val);
3852 	bnx2_reg_wr_ind(bp, cpu_reg->state, cpu_reg->state_value_clear);
3853 
3854 	/* Load the Text area. */
3855 	addr = be32_to_cpu(fw_entry->text.addr);
3856 	len = be32_to_cpu(fw_entry->text.len);
3857 	file_offset = be32_to_cpu(fw_entry->text.offset);
3858 	data = (__be32 *)(bp->mips_firmware->data + file_offset);
3859 
3860 	offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
3861 	if (len) {
3862 		int j;
3863 
3864 		for (j = 0; j < (len / 4); j++, offset += 4)
3865 			bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
3866 	}
3867 
3868 	/* Load the Data area. */
3869 	addr = be32_to_cpu(fw_entry->data.addr);
3870 	len = be32_to_cpu(fw_entry->data.len);
3871 	file_offset = be32_to_cpu(fw_entry->data.offset);
3872 	data = (__be32 *)(bp->mips_firmware->data + file_offset);
3873 
3874 	offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
3875 	if (len) {
3876 		int j;
3877 
3878 		for (j = 0; j < (len / 4); j++, offset += 4)
3879 			bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
3880 	}
3881 
3882 	/* Load the Read-Only area. */
3883 	addr = be32_to_cpu(fw_entry->rodata.addr);
3884 	len = be32_to_cpu(fw_entry->rodata.len);
3885 	file_offset = be32_to_cpu(fw_entry->rodata.offset);
3886 	data = (__be32 *)(bp->mips_firmware->data + file_offset);
3887 
3888 	offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
3889 	if (len) {
3890 		int j;
3891 
3892 		for (j = 0; j < (len / 4); j++, offset += 4)
3893 			bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
3894 	}
3895 
3896 	/* Clear the pre-fetch instruction. */
3897 	bnx2_reg_wr_ind(bp, cpu_reg->inst, 0);
3898 
3899 	val = be32_to_cpu(fw_entry->start_addr);
3900 	bnx2_reg_wr_ind(bp, cpu_reg->pc, val);
3901 
3902 	/* Start the CPU. */
3903 	val = bnx2_reg_rd_ind(bp, cpu_reg->mode);
3904 	val &= ~cpu_reg->mode_value_halt;
3905 	bnx2_reg_wr_ind(bp, cpu_reg->state, cpu_reg->state_value_clear);
3906 	bnx2_reg_wr_ind(bp, cpu_reg->mode, val);
3907 
3908 	return 0;
3909 }
3910 
3911 static int
3912 bnx2_init_cpus(struct bnx2 *bp)
3913 {
3914 	const struct bnx2_mips_fw_file *mips_fw =
3915 		(const struct bnx2_mips_fw_file *) bp->mips_firmware->data;
3916 	const struct bnx2_rv2p_fw_file *rv2p_fw =
3917 		(const struct bnx2_rv2p_fw_file *) bp->rv2p_firmware->data;
3918 	int rc;
3919 
3920 	/* Initialize the RV2P processor. */
3921 	load_rv2p_fw(bp, RV2P_PROC1, &rv2p_fw->proc1);
3922 	load_rv2p_fw(bp, RV2P_PROC2, &rv2p_fw->proc2);
3923 
3924 	/* Initialize the RX Processor. */
3925 	rc = load_cpu_fw(bp, &cpu_reg_rxp, &mips_fw->rxp);
3926 	if (rc)
3927 		goto init_cpu_err;
3928 
3929 	/* Initialize the TX Processor. */
3930 	rc = load_cpu_fw(bp, &cpu_reg_txp, &mips_fw->txp);
3931 	if (rc)
3932 		goto init_cpu_err;
3933 
3934 	/* Initialize the TX Patch-up Processor. */
3935 	rc = load_cpu_fw(bp, &cpu_reg_tpat, &mips_fw->tpat);
3936 	if (rc)
3937 		goto init_cpu_err;
3938 
3939 	/* Initialize the Completion Processor. */
3940 	rc = load_cpu_fw(bp, &cpu_reg_com, &mips_fw->com);
3941 	if (rc)
3942 		goto init_cpu_err;
3943 
3944 	/* Initialize the Command Processor. */
3945 	rc = load_cpu_fw(bp, &cpu_reg_cp, &mips_fw->cp);
3946 
3947 init_cpu_err:
3948 	return rc;
3949 }
3950 
3951 static void
3952 bnx2_setup_wol(struct bnx2 *bp)
3953 {
3954 	int i;
3955 	u32 val, wol_msg;
3956 
3957 	if (bp->wol) {
3958 		u32 advertising;
3959 		u8 autoneg;
3960 
3961 		autoneg = bp->autoneg;
3962 		advertising = bp->advertising;
3963 
3964 		if (bp->phy_port == PORT_TP) {
3965 			bp->autoneg = AUTONEG_SPEED;
3966 			bp->advertising = ADVERTISED_10baseT_Half |
3967 				ADVERTISED_10baseT_Full |
3968 				ADVERTISED_100baseT_Half |
3969 				ADVERTISED_100baseT_Full |
3970 				ADVERTISED_Autoneg;
3971 		}
3972 
3973 		spin_lock_bh(&bp->phy_lock);
3974 		bnx2_setup_phy(bp, bp->phy_port);
3975 		spin_unlock_bh(&bp->phy_lock);
3976 
3977 		bp->autoneg = autoneg;
3978 		bp->advertising = advertising;
3979 
3980 		bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
3981 
3982 		val = BNX2_RD(bp, BNX2_EMAC_MODE);
3983 
3984 		/* Enable port mode. */
3985 		val &= ~BNX2_EMAC_MODE_PORT;
3986 		val |= BNX2_EMAC_MODE_MPKT_RCVD |
3987 		       BNX2_EMAC_MODE_ACPI_RCVD |
3988 		       BNX2_EMAC_MODE_MPKT;
3989 		if (bp->phy_port == PORT_TP) {
3990 			val |= BNX2_EMAC_MODE_PORT_MII;
3991 		} else {
3992 			val |= BNX2_EMAC_MODE_PORT_GMII;
3993 			if (bp->line_speed == SPEED_2500)
3994 				val |= BNX2_EMAC_MODE_25G_MODE;
3995 		}
3996 
3997 		BNX2_WR(bp, BNX2_EMAC_MODE, val);
3998 
3999 		/* receive all multicast */
4000 		for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
4001 			BNX2_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
4002 				0xffffffff);
4003 		}
4004 		BNX2_WR(bp, BNX2_EMAC_RX_MODE, BNX2_EMAC_RX_MODE_SORT_MODE);
4005 
4006 		val = 1 | BNX2_RPM_SORT_USER0_BC_EN | BNX2_RPM_SORT_USER0_MC_EN;
4007 		BNX2_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
4008 		BNX2_WR(bp, BNX2_RPM_SORT_USER0, val);
4009 		BNX2_WR(bp, BNX2_RPM_SORT_USER0, val | BNX2_RPM_SORT_USER0_ENA);
4010 
4011 		/* Need to enable EMAC and RPM for WOL. */
4012 		BNX2_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
4013 			BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE |
4014 			BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE |
4015 			BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE);
4016 
4017 		val = BNX2_RD(bp, BNX2_RPM_CONFIG);
4018 		val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
4019 		BNX2_WR(bp, BNX2_RPM_CONFIG, val);
4020 
4021 		wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
4022 	} else {
4023 			wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
4024 	}
4025 
4026 	if (!(bp->flags & BNX2_FLAG_NO_WOL)) {
4027 		u32 val;
4028 
4029 		wol_msg |= BNX2_DRV_MSG_DATA_WAIT3;
4030 		if (bp->fw_last_msg || BNX2_CHIP(bp) != BNX2_CHIP_5709) {
4031 			bnx2_fw_sync(bp, wol_msg, 1, 0);
4032 			return;
4033 		}
4034 		/* Tell firmware not to power down the PHY yet, otherwise
4035 		 * the chip will take a long time to respond to MMIO reads.
4036 		 */
4037 		val = bnx2_shmem_rd(bp, BNX2_PORT_FEATURE);
4038 		bnx2_shmem_wr(bp, BNX2_PORT_FEATURE,
4039 			      val | BNX2_PORT_FEATURE_ASF_ENABLED);
4040 		bnx2_fw_sync(bp, wol_msg, 1, 0);
4041 		bnx2_shmem_wr(bp, BNX2_PORT_FEATURE, val);
4042 	}
4043 
4044 }
4045 
4046 static int
4047 bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
4048 {
4049 	switch (state) {
4050 	case PCI_D0: {
4051 		u32 val;
4052 
4053 		pci_enable_wake(bp->pdev, PCI_D0, false);
4054 		pci_set_power_state(bp->pdev, PCI_D0);
4055 
4056 		val = BNX2_RD(bp, BNX2_EMAC_MODE);
4057 		val |= BNX2_EMAC_MODE_MPKT_RCVD | BNX2_EMAC_MODE_ACPI_RCVD;
4058 		val &= ~BNX2_EMAC_MODE_MPKT;
4059 		BNX2_WR(bp, BNX2_EMAC_MODE, val);
4060 
4061 		val = BNX2_RD(bp, BNX2_RPM_CONFIG);
4062 		val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
4063 		BNX2_WR(bp, BNX2_RPM_CONFIG, val);
4064 		break;
4065 	}
4066 	case PCI_D3hot: {
4067 		bnx2_setup_wol(bp);
4068 		pci_wake_from_d3(bp->pdev, bp->wol);
4069 		if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) ||
4070 		    (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A1)) {
4071 
4072 			if (bp->wol)
4073 				pci_set_power_state(bp->pdev, PCI_D3hot);
4074 			break;
4075 
4076 		}
4077 		if (!bp->fw_last_msg && BNX2_CHIP(bp) == BNX2_CHIP_5709) {
4078 			u32 val;
4079 
4080 			/* Tell firmware not to power down the PHY yet,
4081 			 * otherwise the other port may not respond to
4082 			 * MMIO reads.
4083 			 */
4084 			val = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION);
4085 			val &= ~BNX2_CONDITION_PM_STATE_MASK;
4086 			val |= BNX2_CONDITION_PM_STATE_UNPREP;
4087 			bnx2_shmem_wr(bp, BNX2_BC_STATE_CONDITION, val);
4088 		}
4089 		pci_set_power_state(bp->pdev, PCI_D3hot);
4090 
4091 		/* No more memory access after this point until
4092 		 * device is brought back to D0.
4093 		 */
4094 		break;
4095 	}
4096 	default:
4097 		return -EINVAL;
4098 	}
4099 	return 0;
4100 }
4101 
4102 static int
4103 bnx2_acquire_nvram_lock(struct bnx2 *bp)
4104 {
4105 	u32 val;
4106 	int j;
4107 
4108 	/* Request access to the flash interface. */
4109 	BNX2_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_SET2);
4110 	for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4111 		val = BNX2_RD(bp, BNX2_NVM_SW_ARB);
4112 		if (val & BNX2_NVM_SW_ARB_ARB_ARB2)
4113 			break;
4114 
4115 		udelay(5);
4116 	}
4117 
4118 	if (j >= NVRAM_TIMEOUT_COUNT)
4119 		return -EBUSY;
4120 
4121 	return 0;
4122 }
4123 
4124 static int
4125 bnx2_release_nvram_lock(struct bnx2 *bp)
4126 {
4127 	int j;
4128 	u32 val;
4129 
4130 	/* Relinquish nvram interface. */
4131 	BNX2_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_CLR2);
4132 
4133 	for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4134 		val = BNX2_RD(bp, BNX2_NVM_SW_ARB);
4135 		if (!(val & BNX2_NVM_SW_ARB_ARB_ARB2))
4136 			break;
4137 
4138 		udelay(5);
4139 	}
4140 
4141 	if (j >= NVRAM_TIMEOUT_COUNT)
4142 		return -EBUSY;
4143 
4144 	return 0;
4145 }
4146 
4147 
4148 static int
4149 bnx2_enable_nvram_write(struct bnx2 *bp)
4150 {
4151 	u32 val;
4152 
4153 	val = BNX2_RD(bp, BNX2_MISC_CFG);
4154 	BNX2_WR(bp, BNX2_MISC_CFG, val | BNX2_MISC_CFG_NVM_WR_EN_PCI);
4155 
4156 	if (bp->flash_info->flags & BNX2_NV_WREN) {
4157 		int j;
4158 
4159 		BNX2_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4160 		BNX2_WR(bp, BNX2_NVM_COMMAND,
4161 			BNX2_NVM_COMMAND_WREN | BNX2_NVM_COMMAND_DOIT);
4162 
4163 		for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4164 			udelay(5);
4165 
4166 			val = BNX2_RD(bp, BNX2_NVM_COMMAND);
4167 			if (val & BNX2_NVM_COMMAND_DONE)
4168 				break;
4169 		}
4170 
4171 		if (j >= NVRAM_TIMEOUT_COUNT)
4172 			return -EBUSY;
4173 	}
4174 	return 0;
4175 }
4176 
4177 static void
4178 bnx2_disable_nvram_write(struct bnx2 *bp)
4179 {
4180 	u32 val;
4181 
4182 	val = BNX2_RD(bp, BNX2_MISC_CFG);
4183 	BNX2_WR(bp, BNX2_MISC_CFG, val & ~BNX2_MISC_CFG_NVM_WR_EN);
4184 }
4185 
4186 
4187 static void
4188 bnx2_enable_nvram_access(struct bnx2 *bp)
4189 {
4190 	u32 val;
4191 
4192 	val = BNX2_RD(bp, BNX2_NVM_ACCESS_ENABLE);
4193 	/* Enable both bits, even on read. */
4194 	BNX2_WR(bp, BNX2_NVM_ACCESS_ENABLE,
4195 		val | BNX2_NVM_ACCESS_ENABLE_EN | BNX2_NVM_ACCESS_ENABLE_WR_EN);
4196 }
4197 
4198 static void
4199 bnx2_disable_nvram_access(struct bnx2 *bp)
4200 {
4201 	u32 val;
4202 
4203 	val = BNX2_RD(bp, BNX2_NVM_ACCESS_ENABLE);
4204 	/* Disable both bits, even after read. */
4205 	BNX2_WR(bp, BNX2_NVM_ACCESS_ENABLE,
4206 		val & ~(BNX2_NVM_ACCESS_ENABLE_EN |
4207 			BNX2_NVM_ACCESS_ENABLE_WR_EN));
4208 }
4209 
4210 static int
4211 bnx2_nvram_erase_page(struct bnx2 *bp, u32 offset)
4212 {
4213 	u32 cmd;
4214 	int j;
4215 
4216 	if (bp->flash_info->flags & BNX2_NV_BUFFERED)
4217 		/* Buffered flash, no erase needed */
4218 		return 0;
4219 
4220 	/* Build an erase command */
4221 	cmd = BNX2_NVM_COMMAND_ERASE | BNX2_NVM_COMMAND_WR |
4222 	      BNX2_NVM_COMMAND_DOIT;
4223 
4224 	/* Need to clear DONE bit separately. */
4225 	BNX2_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4226 
4227 	/* Address of the NVRAM to read from. */
4228 	BNX2_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
4229 
4230 	/* Issue an erase command. */
4231 	BNX2_WR(bp, BNX2_NVM_COMMAND, cmd);
4232 
4233 	/* Wait for completion. */
4234 	for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4235 		u32 val;
4236 
4237 		udelay(5);
4238 
4239 		val = BNX2_RD(bp, BNX2_NVM_COMMAND);
4240 		if (val & BNX2_NVM_COMMAND_DONE)
4241 			break;
4242 	}
4243 
4244 	if (j >= NVRAM_TIMEOUT_COUNT)
4245 		return -EBUSY;
4246 
4247 	return 0;
4248 }
4249 
4250 static int
4251 bnx2_nvram_read_dword(struct bnx2 *bp, u32 offset, u8 *ret_val, u32 cmd_flags)
4252 {
4253 	u32 cmd;
4254 	int j;
4255 
4256 	/* Build the command word. */
4257 	cmd = BNX2_NVM_COMMAND_DOIT | cmd_flags;
4258 
4259 	/* Calculate an offset of a buffered flash, not needed for 5709. */
4260 	if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
4261 		offset = ((offset / bp->flash_info->page_size) <<
4262 			   bp->flash_info->page_bits) +
4263 			  (offset % bp->flash_info->page_size);
4264 	}
4265 
4266 	/* Need to clear DONE bit separately. */
4267 	BNX2_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4268 
4269 	/* Address of the NVRAM to read from. */
4270 	BNX2_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
4271 
4272 	/* Issue a read command. */
4273 	BNX2_WR(bp, BNX2_NVM_COMMAND, cmd);
4274 
4275 	/* Wait for completion. */
4276 	for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4277 		u32 val;
4278 
4279 		udelay(5);
4280 
4281 		val = BNX2_RD(bp, BNX2_NVM_COMMAND);
4282 		if (val & BNX2_NVM_COMMAND_DONE) {
4283 			__be32 v = cpu_to_be32(BNX2_RD(bp, BNX2_NVM_READ));
4284 			memcpy(ret_val, &v, 4);
4285 			break;
4286 		}
4287 	}
4288 	if (j >= NVRAM_TIMEOUT_COUNT)
4289 		return -EBUSY;
4290 
4291 	return 0;
4292 }
4293 
4294 
4295 static int
4296 bnx2_nvram_write_dword(struct bnx2 *bp, u32 offset, u8 *val, u32 cmd_flags)
4297 {
4298 	u32 cmd;
4299 	__be32 val32;
4300 	int j;
4301 
4302 	/* Build the command word. */
4303 	cmd = BNX2_NVM_COMMAND_DOIT | BNX2_NVM_COMMAND_WR | cmd_flags;
4304 
4305 	/* Calculate an offset of a buffered flash, not needed for 5709. */
4306 	if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
4307 		offset = ((offset / bp->flash_info->page_size) <<
4308 			  bp->flash_info->page_bits) +
4309 			 (offset % bp->flash_info->page_size);
4310 	}
4311 
4312 	/* Need to clear DONE bit separately. */
4313 	BNX2_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4314 
4315 	memcpy(&val32, val, 4);
4316 
4317 	/* Write the data. */
4318 	BNX2_WR(bp, BNX2_NVM_WRITE, be32_to_cpu(val32));
4319 
4320 	/* Address of the NVRAM to write to. */
4321 	BNX2_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
4322 
4323 	/* Issue the write command. */
4324 	BNX2_WR(bp, BNX2_NVM_COMMAND, cmd);
4325 
4326 	/* Wait for completion. */
4327 	for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4328 		udelay(5);
4329 
4330 		if (BNX2_RD(bp, BNX2_NVM_COMMAND) & BNX2_NVM_COMMAND_DONE)
4331 			break;
4332 	}
4333 	if (j >= NVRAM_TIMEOUT_COUNT)
4334 		return -EBUSY;
4335 
4336 	return 0;
4337 }
4338 
4339 static int
4340 bnx2_init_nvram(struct bnx2 *bp)
4341 {
4342 	u32 val;
4343 	int j, entry_count, rc = 0;
4344 	const struct flash_spec *flash;
4345 
4346 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
4347 		bp->flash_info = &flash_5709;
4348 		goto get_flash_size;
4349 	}
4350 
4351 	/* Determine the selected interface. */
4352 	val = BNX2_RD(bp, BNX2_NVM_CFG1);
4353 
4354 	entry_count = ARRAY_SIZE(flash_table);
4355 
4356 	if (val & 0x40000000) {
4357 
4358 		/* Flash interface has been reconfigured */
4359 		for (j = 0, flash = &flash_table[0]; j < entry_count;
4360 		     j++, flash++) {
4361 			if ((val & FLASH_BACKUP_STRAP_MASK) ==
4362 			    (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
4363 				bp->flash_info = flash;
4364 				break;
4365 			}
4366 		}
4367 	}
4368 	else {
4369 		u32 mask;
4370 		/* Not yet been reconfigured */
4371 
4372 		if (val & (1 << 23))
4373 			mask = FLASH_BACKUP_STRAP_MASK;
4374 		else
4375 			mask = FLASH_STRAP_MASK;
4376 
4377 		for (j = 0, flash = &flash_table[0]; j < entry_count;
4378 			j++, flash++) {
4379 
4380 			if ((val & mask) == (flash->strapping & mask)) {
4381 				bp->flash_info = flash;
4382 
4383 				/* Request access to the flash interface. */
4384 				if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4385 					return rc;
4386 
4387 				/* Enable access to flash interface */
4388 				bnx2_enable_nvram_access(bp);
4389 
4390 				/* Reconfigure the flash interface */
4391 				BNX2_WR(bp, BNX2_NVM_CFG1, flash->config1);
4392 				BNX2_WR(bp, BNX2_NVM_CFG2, flash->config2);
4393 				BNX2_WR(bp, BNX2_NVM_CFG3, flash->config3);
4394 				BNX2_WR(bp, BNX2_NVM_WRITE1, flash->write1);
4395 
4396 				/* Disable access to flash interface */
4397 				bnx2_disable_nvram_access(bp);
4398 				bnx2_release_nvram_lock(bp);
4399 
4400 				break;
4401 			}
4402 		}
4403 	} /* if (val & 0x40000000) */
4404 
4405 	if (j == entry_count) {
4406 		bp->flash_info = NULL;
4407 		pr_alert("Unknown flash/EEPROM type\n");
4408 		return -ENODEV;
4409 	}
4410 
4411 get_flash_size:
4412 	val = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG2);
4413 	val &= BNX2_SHARED_HW_CFG2_NVM_SIZE_MASK;
4414 	if (val)
4415 		bp->flash_size = val;
4416 	else
4417 		bp->flash_size = bp->flash_info->total_size;
4418 
4419 	return rc;
4420 }
4421 
4422 static int
4423 bnx2_nvram_read(struct bnx2 *bp, u32 offset, u8 *ret_buf,
4424 		int buf_size)
4425 {
4426 	int rc = 0;
4427 	u32 cmd_flags, offset32, len32, extra;
4428 
4429 	if (buf_size == 0)
4430 		return 0;
4431 
4432 	/* Request access to the flash interface. */
4433 	if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4434 		return rc;
4435 
4436 	/* Enable access to flash interface */
4437 	bnx2_enable_nvram_access(bp);
4438 
4439 	len32 = buf_size;
4440 	offset32 = offset;
4441 	extra = 0;
4442 
4443 	cmd_flags = 0;
4444 
4445 	if (offset32 & 3) {
4446 		u8 buf[4];
4447 		u32 pre_len;
4448 
4449 		offset32 &= ~3;
4450 		pre_len = 4 - (offset & 3);
4451 
4452 		if (pre_len >= len32) {
4453 			pre_len = len32;
4454 			cmd_flags = BNX2_NVM_COMMAND_FIRST |
4455 				    BNX2_NVM_COMMAND_LAST;
4456 		}
4457 		else {
4458 			cmd_flags = BNX2_NVM_COMMAND_FIRST;
4459 		}
4460 
4461 		rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4462 
4463 		if (rc)
4464 			return rc;
4465 
4466 		memcpy(ret_buf, buf + (offset & 3), pre_len);
4467 
4468 		offset32 += 4;
4469 		ret_buf += pre_len;
4470 		len32 -= pre_len;
4471 	}
4472 	if (len32 & 3) {
4473 		extra = 4 - (len32 & 3);
4474 		len32 = (len32 + 4) & ~3;
4475 	}
4476 
4477 	if (len32 == 4) {
4478 		u8 buf[4];
4479 
4480 		if (cmd_flags)
4481 			cmd_flags = BNX2_NVM_COMMAND_LAST;
4482 		else
4483 			cmd_flags = BNX2_NVM_COMMAND_FIRST |
4484 				    BNX2_NVM_COMMAND_LAST;
4485 
4486 		rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4487 
4488 		memcpy(ret_buf, buf, 4 - extra);
4489 	}
4490 	else if (len32 > 0) {
4491 		u8 buf[4];
4492 
4493 		/* Read the first word. */
4494 		if (cmd_flags)
4495 			cmd_flags = 0;
4496 		else
4497 			cmd_flags = BNX2_NVM_COMMAND_FIRST;
4498 
4499 		rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, cmd_flags);
4500 
4501 		/* Advance to the next dword. */
4502 		offset32 += 4;
4503 		ret_buf += 4;
4504 		len32 -= 4;
4505 
4506 		while (len32 > 4 && rc == 0) {
4507 			rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, 0);
4508 
4509 			/* Advance to the next dword. */
4510 			offset32 += 4;
4511 			ret_buf += 4;
4512 			len32 -= 4;
4513 		}
4514 
4515 		if (rc)
4516 			return rc;
4517 
4518 		cmd_flags = BNX2_NVM_COMMAND_LAST;
4519 		rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4520 
4521 		memcpy(ret_buf, buf, 4 - extra);
4522 	}
4523 
4524 	/* Disable access to flash interface */
4525 	bnx2_disable_nvram_access(bp);
4526 
4527 	bnx2_release_nvram_lock(bp);
4528 
4529 	return rc;
4530 }
4531 
4532 static int
4533 bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
4534 		int buf_size)
4535 {
4536 	u32 written, offset32, len32;
4537 	u8 *buf, start[4], end[4], *align_buf = NULL, *flash_buffer = NULL;
4538 	int rc = 0;
4539 	int align_start, align_end;
4540 
4541 	buf = data_buf;
4542 	offset32 = offset;
4543 	len32 = buf_size;
4544 	align_start = align_end = 0;
4545 
4546 	if ((align_start = (offset32 & 3))) {
4547 		offset32 &= ~3;
4548 		len32 += align_start;
4549 		if (len32 < 4)
4550 			len32 = 4;
4551 		if ((rc = bnx2_nvram_read(bp, offset32, start, 4)))
4552 			return rc;
4553 	}
4554 
4555 	if (len32 & 3) {
4556 		align_end = 4 - (len32 & 3);
4557 		len32 += align_end;
4558 		if ((rc = bnx2_nvram_read(bp, offset32 + len32 - 4, end, 4)))
4559 			return rc;
4560 	}
4561 
4562 	if (align_start || align_end) {
4563 		align_buf = kmalloc(len32, GFP_KERNEL);
4564 		if (align_buf == NULL)
4565 			return -ENOMEM;
4566 		if (align_start) {
4567 			memcpy(align_buf, start, 4);
4568 		}
4569 		if (align_end) {
4570 			memcpy(align_buf + len32 - 4, end, 4);
4571 		}
4572 		memcpy(align_buf + align_start, data_buf, buf_size);
4573 		buf = align_buf;
4574 	}
4575 
4576 	if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4577 		flash_buffer = kmalloc(264, GFP_KERNEL);
4578 		if (flash_buffer == NULL) {
4579 			rc = -ENOMEM;
4580 			goto nvram_write_end;
4581 		}
4582 	}
4583 
4584 	written = 0;
4585 	while ((written < len32) && (rc == 0)) {
4586 		u32 page_start, page_end, data_start, data_end;
4587 		u32 addr, cmd_flags;
4588 		int i;
4589 
4590 	        /* Find the page_start addr */
4591 		page_start = offset32 + written;
4592 		page_start -= (page_start % bp->flash_info->page_size);
4593 		/* Find the page_end addr */
4594 		page_end = page_start + bp->flash_info->page_size;
4595 		/* Find the data_start addr */
4596 		data_start = (written == 0) ? offset32 : page_start;
4597 		/* Find the data_end addr */
4598 		data_end = (page_end > offset32 + len32) ?
4599 			(offset32 + len32) : page_end;
4600 
4601 		/* Request access to the flash interface. */
4602 		if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4603 			goto nvram_write_end;
4604 
4605 		/* Enable access to flash interface */
4606 		bnx2_enable_nvram_access(bp);
4607 
4608 		cmd_flags = BNX2_NVM_COMMAND_FIRST;
4609 		if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4610 			int j;
4611 
4612 			/* Read the whole page into the buffer
4613 			 * (non-buffer flash only) */
4614 			for (j = 0; j < bp->flash_info->page_size; j += 4) {
4615 				if (j == (bp->flash_info->page_size - 4)) {
4616 					cmd_flags |= BNX2_NVM_COMMAND_LAST;
4617 				}
4618 				rc = bnx2_nvram_read_dword(bp,
4619 					page_start + j,
4620 					&flash_buffer[j],
4621 					cmd_flags);
4622 
4623 				if (rc)
4624 					goto nvram_write_end;
4625 
4626 				cmd_flags = 0;
4627 			}
4628 		}
4629 
4630 		/* Enable writes to flash interface (unlock write-protect) */
4631 		if ((rc = bnx2_enable_nvram_write(bp)) != 0)
4632 			goto nvram_write_end;
4633 
4634 		/* Loop to write back the buffer data from page_start to
4635 		 * data_start */
4636 		i = 0;
4637 		if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4638 			/* Erase the page */
4639 			if ((rc = bnx2_nvram_erase_page(bp, page_start)) != 0)
4640 				goto nvram_write_end;
4641 
4642 			/* Re-enable the write again for the actual write */
4643 			bnx2_enable_nvram_write(bp);
4644 
4645 			for (addr = page_start; addr < data_start;
4646 				addr += 4, i += 4) {
4647 
4648 				rc = bnx2_nvram_write_dword(bp, addr,
4649 					&flash_buffer[i], cmd_flags);
4650 
4651 				if (rc != 0)
4652 					goto nvram_write_end;
4653 
4654 				cmd_flags = 0;
4655 			}
4656 		}
4657 
4658 		/* Loop to write the new data from data_start to data_end */
4659 		for (addr = data_start; addr < data_end; addr += 4, i += 4) {
4660 			if ((addr == page_end - 4) ||
4661 				((bp->flash_info->flags & BNX2_NV_BUFFERED) &&
4662 				 (addr == data_end - 4))) {
4663 
4664 				cmd_flags |= BNX2_NVM_COMMAND_LAST;
4665 			}
4666 			rc = bnx2_nvram_write_dword(bp, addr, buf,
4667 				cmd_flags);
4668 
4669 			if (rc != 0)
4670 				goto nvram_write_end;
4671 
4672 			cmd_flags = 0;
4673 			buf += 4;
4674 		}
4675 
4676 		/* Loop to write back the buffer data from data_end
4677 		 * to page_end */
4678 		if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4679 			for (addr = data_end; addr < page_end;
4680 				addr += 4, i += 4) {
4681 
4682 				if (addr == page_end-4) {
4683 					cmd_flags = BNX2_NVM_COMMAND_LAST;
4684                 		}
4685 				rc = bnx2_nvram_write_dword(bp, addr,
4686 					&flash_buffer[i], cmd_flags);
4687 
4688 				if (rc != 0)
4689 					goto nvram_write_end;
4690 
4691 				cmd_flags = 0;
4692 			}
4693 		}
4694 
4695 		/* Disable writes to flash interface (lock write-protect) */
4696 		bnx2_disable_nvram_write(bp);
4697 
4698 		/* Disable access to flash interface */
4699 		bnx2_disable_nvram_access(bp);
4700 		bnx2_release_nvram_lock(bp);
4701 
4702 		/* Increment written */
4703 		written += data_end - data_start;
4704 	}
4705 
4706 nvram_write_end:
4707 	kfree(flash_buffer);
4708 	kfree(align_buf);
4709 	return rc;
4710 }
4711 
4712 static void
4713 bnx2_init_fw_cap(struct bnx2 *bp)
4714 {
4715 	u32 val, sig = 0;
4716 
4717 	bp->phy_flags &= ~BNX2_PHY_FLAG_REMOTE_PHY_CAP;
4718 	bp->flags &= ~BNX2_FLAG_CAN_KEEP_VLAN;
4719 
4720 	if (!(bp->flags & BNX2_FLAG_ASF_ENABLE))
4721 		bp->flags |= BNX2_FLAG_CAN_KEEP_VLAN;
4722 
4723 	val = bnx2_shmem_rd(bp, BNX2_FW_CAP_MB);
4724 	if ((val & BNX2_FW_CAP_SIGNATURE_MASK) != BNX2_FW_CAP_SIGNATURE)
4725 		return;
4726 
4727 	if ((val & BNX2_FW_CAP_CAN_KEEP_VLAN) == BNX2_FW_CAP_CAN_KEEP_VLAN) {
4728 		bp->flags |= BNX2_FLAG_CAN_KEEP_VLAN;
4729 		sig |= BNX2_DRV_ACK_CAP_SIGNATURE | BNX2_FW_CAP_CAN_KEEP_VLAN;
4730 	}
4731 
4732 	if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
4733 	    (val & BNX2_FW_CAP_REMOTE_PHY_CAPABLE)) {
4734 		u32 link;
4735 
4736 		bp->phy_flags |= BNX2_PHY_FLAG_REMOTE_PHY_CAP;
4737 
4738 		link = bnx2_shmem_rd(bp, BNX2_LINK_STATUS);
4739 		if (link & BNX2_LINK_STATUS_SERDES_LINK)
4740 			bp->phy_port = PORT_FIBRE;
4741 		else
4742 			bp->phy_port = PORT_TP;
4743 
4744 		sig |= BNX2_DRV_ACK_CAP_SIGNATURE |
4745 		       BNX2_FW_CAP_REMOTE_PHY_CAPABLE;
4746 	}
4747 
4748 	if (netif_running(bp->dev) && sig)
4749 		bnx2_shmem_wr(bp, BNX2_DRV_ACK_CAP_MB, sig);
4750 }
4751 
4752 static void
4753 bnx2_setup_msix_tbl(struct bnx2 *bp)
4754 {
4755 	BNX2_WR(bp, BNX2_PCI_GRC_WINDOW_ADDR, BNX2_PCI_GRC_WINDOW_ADDR_SEP_WIN);
4756 
4757 	BNX2_WR(bp, BNX2_PCI_GRC_WINDOW2_ADDR, BNX2_MSIX_TABLE_ADDR);
4758 	BNX2_WR(bp, BNX2_PCI_GRC_WINDOW3_ADDR, BNX2_MSIX_PBA_ADDR);
4759 }
4760 
4761 static void
4762 bnx2_wait_dma_complete(struct bnx2 *bp)
4763 {
4764 	u32 val;
4765 	int i;
4766 
4767 	/*
4768 	 * Wait for the current PCI transaction to complete before
4769 	 * issuing a reset.
4770 	 */
4771 	if ((BNX2_CHIP(bp) == BNX2_CHIP_5706) ||
4772 	    (BNX2_CHIP(bp) == BNX2_CHIP_5708)) {
4773 		BNX2_WR(bp, BNX2_MISC_ENABLE_CLR_BITS,
4774 			BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
4775 			BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
4776 			BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
4777 			BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
4778 		val = BNX2_RD(bp, BNX2_MISC_ENABLE_CLR_BITS);
4779 		udelay(5);
4780 	} else {  /* 5709 */
4781 		val = BNX2_RD(bp, BNX2_MISC_NEW_CORE_CTL);
4782 		val &= ~BNX2_MISC_NEW_CORE_CTL_DMA_ENABLE;
4783 		BNX2_WR(bp, BNX2_MISC_NEW_CORE_CTL, val);
4784 		val = BNX2_RD(bp, BNX2_MISC_NEW_CORE_CTL);
4785 
4786 		for (i = 0; i < 100; i++) {
4787 			msleep(1);
4788 			val = BNX2_RD(bp, BNX2_PCICFG_DEVICE_CONTROL);
4789 			if (!(val & BNX2_PCICFG_DEVICE_STATUS_NO_PEND))
4790 				break;
4791 		}
4792 	}
4793 
4794 	return;
4795 }
4796 
4797 
4798 static int
4799 bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
4800 {
4801 	u32 val;
4802 	int i, rc = 0;
4803 	u8 old_port;
4804 
4805 	/* Wait for the current PCI transaction to complete before
4806 	 * issuing a reset. */
4807 	bnx2_wait_dma_complete(bp);
4808 
4809 	/* Wait for the firmware to tell us it is ok to issue a reset. */
4810 	bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1, 1);
4811 
4812 	/* Deposit a driver reset signature so the firmware knows that
4813 	 * this is a soft reset. */
4814 	bnx2_shmem_wr(bp, BNX2_DRV_RESET_SIGNATURE,
4815 		      BNX2_DRV_RESET_SIGNATURE_MAGIC);
4816 
4817 	/* Do a dummy read to force the chip to complete all current transaction
4818 	 * before we issue a reset. */
4819 	val = BNX2_RD(bp, BNX2_MISC_ID);
4820 
4821 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
4822 		BNX2_WR(bp, BNX2_MISC_COMMAND, BNX2_MISC_COMMAND_SW_RESET);
4823 		BNX2_RD(bp, BNX2_MISC_COMMAND);
4824 		udelay(5);
4825 
4826 		val = BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4827 		      BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
4828 
4829 		BNX2_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
4830 
4831 	} else {
4832 		val = BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4833 		      BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4834 		      BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
4835 
4836 		/* Chip reset. */
4837 		BNX2_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
4838 
4839 		/* Reading back any register after chip reset will hang the
4840 		 * bus on 5706 A0 and A1.  The msleep below provides plenty
4841 		 * of margin for write posting.
4842 		 */
4843 		if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) ||
4844 		    (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A1))
4845 			msleep(20);
4846 
4847 		/* Reset takes approximate 30 usec */
4848 		for (i = 0; i < 10; i++) {
4849 			val = BNX2_RD(bp, BNX2_PCICFG_MISC_CONFIG);
4850 			if ((val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4851 				    BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0)
4852 				break;
4853 			udelay(10);
4854 		}
4855 
4856 		if (val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4857 			   BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
4858 			pr_err("Chip reset did not complete\n");
4859 			return -EBUSY;
4860 		}
4861 	}
4862 
4863 	/* Make sure byte swapping is properly configured. */
4864 	val = BNX2_RD(bp, BNX2_PCI_SWAP_DIAG0);
4865 	if (val != 0x01020304) {
4866 		pr_err("Chip not in correct endian mode\n");
4867 		return -ENODEV;
4868 	}
4869 
4870 	/* Wait for the firmware to finish its initialization. */
4871 	rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT1 | reset_code, 1, 0);
4872 	if (rc)
4873 		return rc;
4874 
4875 	spin_lock_bh(&bp->phy_lock);
4876 	old_port = bp->phy_port;
4877 	bnx2_init_fw_cap(bp);
4878 	if ((bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) &&
4879 	    old_port != bp->phy_port)
4880 		bnx2_set_default_remote_link(bp);
4881 	spin_unlock_bh(&bp->phy_lock);
4882 
4883 	if (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) {
4884 		/* Adjust the voltage regular to two steps lower.  The default
4885 		 * of this register is 0x0000000e. */
4886 		BNX2_WR(bp, BNX2_MISC_VREG_CONTROL, 0x000000fa);
4887 
4888 		/* Remove bad rbuf memory from the free pool. */
4889 		rc = bnx2_alloc_bad_rbuf(bp);
4890 	}
4891 
4892 	if (bp->flags & BNX2_FLAG_USING_MSIX) {
4893 		bnx2_setup_msix_tbl(bp);
4894 		/* Prevent MSIX table reads and write from timing out */
4895 		BNX2_WR(bp, BNX2_MISC_ECO_HW_CTL,
4896 			BNX2_MISC_ECO_HW_CTL_LARGE_GRC_TMOUT_EN);
4897 	}
4898 
4899 	return rc;
4900 }
4901 
4902 static int
4903 bnx2_init_chip(struct bnx2 *bp)
4904 {
4905 	u32 val, mtu;
4906 	int rc, i;
4907 
4908 	/* Make sure the interrupt is not active. */
4909 	BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD, BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
4910 
4911 	val = BNX2_DMA_CONFIG_DATA_BYTE_SWAP |
4912 	      BNX2_DMA_CONFIG_DATA_WORD_SWAP |
4913 #ifdef __BIG_ENDIAN
4914 	      BNX2_DMA_CONFIG_CNTL_BYTE_SWAP |
4915 #endif
4916 	      BNX2_DMA_CONFIG_CNTL_WORD_SWAP |
4917 	      DMA_READ_CHANS << 12 |
4918 	      DMA_WRITE_CHANS << 16;
4919 
4920 	val |= (0x2 << 20) | (1 << 11);
4921 
4922 	if ((bp->flags & BNX2_FLAG_PCIX) && (bp->bus_speed_mhz == 133))
4923 		val |= (1 << 23);
4924 
4925 	if ((BNX2_CHIP(bp) == BNX2_CHIP_5706) &&
4926 	    (BNX2_CHIP_ID(bp) != BNX2_CHIP_ID_5706_A0) &&
4927 	    !(bp->flags & BNX2_FLAG_PCIX))
4928 		val |= BNX2_DMA_CONFIG_CNTL_PING_PONG_DMA;
4929 
4930 	BNX2_WR(bp, BNX2_DMA_CONFIG, val);
4931 
4932 	if (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) {
4933 		val = BNX2_RD(bp, BNX2_TDMA_CONFIG);
4934 		val |= BNX2_TDMA_CONFIG_ONE_DMA;
4935 		BNX2_WR(bp, BNX2_TDMA_CONFIG, val);
4936 	}
4937 
4938 	if (bp->flags & BNX2_FLAG_PCIX) {
4939 		u16 val16;
4940 
4941 		pci_read_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4942 				     &val16);
4943 		pci_write_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4944 				      val16 & ~PCI_X_CMD_ERO);
4945 	}
4946 
4947 	BNX2_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
4948 		BNX2_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
4949 		BNX2_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
4950 		BNX2_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
4951 
4952 	/* Initialize context mapping and zero out the quick contexts.  The
4953 	 * context block must have already been enabled. */
4954 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
4955 		rc = bnx2_init_5709_context(bp);
4956 		if (rc)
4957 			return rc;
4958 	} else
4959 		bnx2_init_context(bp);
4960 
4961 	if ((rc = bnx2_init_cpus(bp)) != 0)
4962 		return rc;
4963 
4964 	bnx2_init_nvram(bp);
4965 
4966 	bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
4967 
4968 	val = BNX2_RD(bp, BNX2_MQ_CONFIG);
4969 	val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
4970 	val |= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
4971 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
4972 		val |= BNX2_MQ_CONFIG_BIN_MQ_MODE;
4973 		if (BNX2_CHIP_REV(bp) == BNX2_CHIP_REV_Ax)
4974 			val |= BNX2_MQ_CONFIG_HALT_DIS;
4975 	}
4976 
4977 	BNX2_WR(bp, BNX2_MQ_CONFIG, val);
4978 
4979 	val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
4980 	BNX2_WR(bp, BNX2_MQ_KNL_BYP_WIND_START, val);
4981 	BNX2_WR(bp, BNX2_MQ_KNL_WIND_END, val);
4982 
4983 	val = (BNX2_PAGE_BITS - 8) << 24;
4984 	BNX2_WR(bp, BNX2_RV2P_CONFIG, val);
4985 
4986 	/* Configure page size. */
4987 	val = BNX2_RD(bp, BNX2_TBDR_CONFIG);
4988 	val &= ~BNX2_TBDR_CONFIG_PAGE_SIZE;
4989 	val |= (BNX2_PAGE_BITS - 8) << 24 | 0x40;
4990 	BNX2_WR(bp, BNX2_TBDR_CONFIG, val);
4991 
4992 	val = bp->mac_addr[0] +
4993 	      (bp->mac_addr[1] << 8) +
4994 	      (bp->mac_addr[2] << 16) +
4995 	      bp->mac_addr[3] +
4996 	      (bp->mac_addr[4] << 8) +
4997 	      (bp->mac_addr[5] << 16);
4998 	BNX2_WR(bp, BNX2_EMAC_BACKOFF_SEED, val);
4999 
5000 	/* Program the MTU.  Also include 4 bytes for CRC32. */
5001 	mtu = bp->dev->mtu;
5002 	val = mtu + ETH_HLEN + ETH_FCS_LEN;
5003 	if (val > (MAX_ETHERNET_PACKET_SIZE + ETH_HLEN + 4))
5004 		val |= BNX2_EMAC_RX_MTU_SIZE_JUMBO_ENA;
5005 	BNX2_WR(bp, BNX2_EMAC_RX_MTU_SIZE, val);
5006 
5007 	if (mtu < ETH_DATA_LEN)
5008 		mtu = ETH_DATA_LEN;
5009 
5010 	bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG, BNX2_RBUF_CONFIG_VAL(mtu));
5011 	bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG2, BNX2_RBUF_CONFIG2_VAL(mtu));
5012 	bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG3, BNX2_RBUF_CONFIG3_VAL(mtu));
5013 
5014 	memset(bp->bnx2_napi[0].status_blk.msi, 0, bp->status_stats_size);
5015 	for (i = 0; i < BNX2_MAX_MSIX_VEC; i++)
5016 		bp->bnx2_napi[i].last_status_idx = 0;
5017 
5018 	bp->idle_chk_status_idx = 0xffff;
5019 
5020 	/* Set up how to generate a link change interrupt. */
5021 	BNX2_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
5022 
5023 	BNX2_WR(bp, BNX2_HC_STATUS_ADDR_L,
5024 		(u64) bp->status_blk_mapping & 0xffffffff);
5025 	BNX2_WR(bp, BNX2_HC_STATUS_ADDR_H, (u64) bp->status_blk_mapping >> 32);
5026 
5027 	BNX2_WR(bp, BNX2_HC_STATISTICS_ADDR_L,
5028 		(u64) bp->stats_blk_mapping & 0xffffffff);
5029 	BNX2_WR(bp, BNX2_HC_STATISTICS_ADDR_H,
5030 		(u64) bp->stats_blk_mapping >> 32);
5031 
5032 	BNX2_WR(bp, BNX2_HC_TX_QUICK_CONS_TRIP,
5033 		(bp->tx_quick_cons_trip_int << 16) | bp->tx_quick_cons_trip);
5034 
5035 	BNX2_WR(bp, BNX2_HC_RX_QUICK_CONS_TRIP,
5036 		(bp->rx_quick_cons_trip_int << 16) | bp->rx_quick_cons_trip);
5037 
5038 	BNX2_WR(bp, BNX2_HC_COMP_PROD_TRIP,
5039 		(bp->comp_prod_trip_int << 16) | bp->comp_prod_trip);
5040 
5041 	BNX2_WR(bp, BNX2_HC_TX_TICKS, (bp->tx_ticks_int << 16) | bp->tx_ticks);
5042 
5043 	BNX2_WR(bp, BNX2_HC_RX_TICKS, (bp->rx_ticks_int << 16) | bp->rx_ticks);
5044 
5045 	BNX2_WR(bp, BNX2_HC_COM_TICKS,
5046 		(bp->com_ticks_int << 16) | bp->com_ticks);
5047 
5048 	BNX2_WR(bp, BNX2_HC_CMD_TICKS,
5049 		(bp->cmd_ticks_int << 16) | bp->cmd_ticks);
5050 
5051 	if (bp->flags & BNX2_FLAG_BROKEN_STATS)
5052 		BNX2_WR(bp, BNX2_HC_STATS_TICKS, 0);
5053 	else
5054 		BNX2_WR(bp, BNX2_HC_STATS_TICKS, bp->stats_ticks);
5055 	BNX2_WR(bp, BNX2_HC_STAT_COLLECT_TICKS, 0xbb8);  /* 3ms */
5056 
5057 	if (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A1)
5058 		val = BNX2_HC_CONFIG_COLLECT_STATS;
5059 	else {
5060 		val = BNX2_HC_CONFIG_RX_TMR_MODE | BNX2_HC_CONFIG_TX_TMR_MODE |
5061 		      BNX2_HC_CONFIG_COLLECT_STATS;
5062 	}
5063 
5064 	if (bp->flags & BNX2_FLAG_USING_MSIX) {
5065 		BNX2_WR(bp, BNX2_HC_MSIX_BIT_VECTOR,
5066 			BNX2_HC_MSIX_BIT_VECTOR_VAL);
5067 
5068 		val |= BNX2_HC_CONFIG_SB_ADDR_INC_128B;
5069 	}
5070 
5071 	if (bp->flags & BNX2_FLAG_ONE_SHOT_MSI)
5072 		val |= BNX2_HC_CONFIG_ONE_SHOT | BNX2_HC_CONFIG_USE_INT_PARAM;
5073 
5074 	BNX2_WR(bp, BNX2_HC_CONFIG, val);
5075 
5076 	if (bp->rx_ticks < 25)
5077 		bnx2_reg_wr_ind(bp, BNX2_FW_RX_LOW_LATENCY, 1);
5078 	else
5079 		bnx2_reg_wr_ind(bp, BNX2_FW_RX_LOW_LATENCY, 0);
5080 
5081 	for (i = 1; i < bp->irq_nvecs; i++) {
5082 		u32 base = ((i - 1) * BNX2_HC_SB_CONFIG_SIZE) +
5083 			   BNX2_HC_SB_CONFIG_1;
5084 
5085 		BNX2_WR(bp, base,
5086 			BNX2_HC_SB_CONFIG_1_TX_TMR_MODE |
5087 			BNX2_HC_SB_CONFIG_1_RX_TMR_MODE |
5088 			BNX2_HC_SB_CONFIG_1_ONE_SHOT);
5089 
5090 		BNX2_WR(bp, base + BNX2_HC_TX_QUICK_CONS_TRIP_OFF,
5091 			(bp->tx_quick_cons_trip_int << 16) |
5092 			 bp->tx_quick_cons_trip);
5093 
5094 		BNX2_WR(bp, base + BNX2_HC_TX_TICKS_OFF,
5095 			(bp->tx_ticks_int << 16) | bp->tx_ticks);
5096 
5097 		BNX2_WR(bp, base + BNX2_HC_RX_QUICK_CONS_TRIP_OFF,
5098 			(bp->rx_quick_cons_trip_int << 16) |
5099 			bp->rx_quick_cons_trip);
5100 
5101 		BNX2_WR(bp, base + BNX2_HC_RX_TICKS_OFF,
5102 			(bp->rx_ticks_int << 16) | bp->rx_ticks);
5103 	}
5104 
5105 	/* Clear internal stats counters. */
5106 	BNX2_WR(bp, BNX2_HC_COMMAND, BNX2_HC_COMMAND_CLR_STAT_NOW);
5107 
5108 	BNX2_WR(bp, BNX2_HC_ATTN_BITS_ENABLE, STATUS_ATTN_EVENTS);
5109 
5110 	/* Initialize the receive filter. */
5111 	bnx2_set_rx_mode(bp->dev);
5112 
5113 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
5114 		val = BNX2_RD(bp, BNX2_MISC_NEW_CORE_CTL);
5115 		val |= BNX2_MISC_NEW_CORE_CTL_DMA_ENABLE;
5116 		BNX2_WR(bp, BNX2_MISC_NEW_CORE_CTL, val);
5117 	}
5118 	rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT2 | BNX2_DRV_MSG_CODE_RESET,
5119 			  1, 0);
5120 
5121 	BNX2_WR(bp, BNX2_MISC_ENABLE_SET_BITS, BNX2_MISC_ENABLE_DEFAULT);
5122 	BNX2_RD(bp, BNX2_MISC_ENABLE_SET_BITS);
5123 
5124 	udelay(20);
5125 
5126 	bp->hc_cmd = BNX2_RD(bp, BNX2_HC_COMMAND);
5127 
5128 	return rc;
5129 }
5130 
5131 static void
5132 bnx2_clear_ring_states(struct bnx2 *bp)
5133 {
5134 	struct bnx2_napi *bnapi;
5135 	struct bnx2_tx_ring_info *txr;
5136 	struct bnx2_rx_ring_info *rxr;
5137 	int i;
5138 
5139 	for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
5140 		bnapi = &bp->bnx2_napi[i];
5141 		txr = &bnapi->tx_ring;
5142 		rxr = &bnapi->rx_ring;
5143 
5144 		txr->tx_cons = 0;
5145 		txr->hw_tx_cons = 0;
5146 		rxr->rx_prod_bseq = 0;
5147 		rxr->rx_prod = 0;
5148 		rxr->rx_cons = 0;
5149 		rxr->rx_pg_prod = 0;
5150 		rxr->rx_pg_cons = 0;
5151 	}
5152 }
5153 
5154 static void
5155 bnx2_init_tx_context(struct bnx2 *bp, u32 cid, struct bnx2_tx_ring_info *txr)
5156 {
5157 	u32 val, offset0, offset1, offset2, offset3;
5158 	u32 cid_addr = GET_CID_ADDR(cid);
5159 
5160 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
5161 		offset0 = BNX2_L2CTX_TYPE_XI;
5162 		offset1 = BNX2_L2CTX_CMD_TYPE_XI;
5163 		offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI;
5164 		offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI;
5165 	} else {
5166 		offset0 = BNX2_L2CTX_TYPE;
5167 		offset1 = BNX2_L2CTX_CMD_TYPE;
5168 		offset2 = BNX2_L2CTX_TBDR_BHADDR_HI;
5169 		offset3 = BNX2_L2CTX_TBDR_BHADDR_LO;
5170 	}
5171 	val = BNX2_L2CTX_TYPE_TYPE_L2 | BNX2_L2CTX_TYPE_SIZE_L2;
5172 	bnx2_ctx_wr(bp, cid_addr, offset0, val);
5173 
5174 	val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
5175 	bnx2_ctx_wr(bp, cid_addr, offset1, val);
5176 
5177 	val = (u64) txr->tx_desc_mapping >> 32;
5178 	bnx2_ctx_wr(bp, cid_addr, offset2, val);
5179 
5180 	val = (u64) txr->tx_desc_mapping & 0xffffffff;
5181 	bnx2_ctx_wr(bp, cid_addr, offset3, val);
5182 }
5183 
5184 static void
5185 bnx2_init_tx_ring(struct bnx2 *bp, int ring_num)
5186 {
5187 	struct bnx2_tx_bd *txbd;
5188 	u32 cid = TX_CID;
5189 	struct bnx2_napi *bnapi;
5190 	struct bnx2_tx_ring_info *txr;
5191 
5192 	bnapi = &bp->bnx2_napi[ring_num];
5193 	txr = &bnapi->tx_ring;
5194 
5195 	if (ring_num == 0)
5196 		cid = TX_CID;
5197 	else
5198 		cid = TX_TSS_CID + ring_num - 1;
5199 
5200 	bp->tx_wake_thresh = bp->tx_ring_size / 2;
5201 
5202 	txbd = &txr->tx_desc_ring[BNX2_MAX_TX_DESC_CNT];
5203 
5204 	txbd->tx_bd_haddr_hi = (u64) txr->tx_desc_mapping >> 32;
5205 	txbd->tx_bd_haddr_lo = (u64) txr->tx_desc_mapping & 0xffffffff;
5206 
5207 	txr->tx_prod = 0;
5208 	txr->tx_prod_bseq = 0;
5209 
5210 	txr->tx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BIDX;
5211 	txr->tx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BSEQ;
5212 
5213 	bnx2_init_tx_context(bp, cid, txr);
5214 }
5215 
5216 static void
5217 bnx2_init_rxbd_rings(struct bnx2_rx_bd *rx_ring[], dma_addr_t dma[],
5218 		     u32 buf_size, int num_rings)
5219 {
5220 	int i;
5221 	struct bnx2_rx_bd *rxbd;
5222 
5223 	for (i = 0; i < num_rings; i++) {
5224 		int j;
5225 
5226 		rxbd = &rx_ring[i][0];
5227 		for (j = 0; j < BNX2_MAX_RX_DESC_CNT; j++, rxbd++) {
5228 			rxbd->rx_bd_len = buf_size;
5229 			rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
5230 		}
5231 		if (i == (num_rings - 1))
5232 			j = 0;
5233 		else
5234 			j = i + 1;
5235 		rxbd->rx_bd_haddr_hi = (u64) dma[j] >> 32;
5236 		rxbd->rx_bd_haddr_lo = (u64) dma[j] & 0xffffffff;
5237 	}
5238 }
5239 
5240 static void
5241 bnx2_init_rx_ring(struct bnx2 *bp, int ring_num)
5242 {
5243 	int i;
5244 	u16 prod, ring_prod;
5245 	u32 cid, rx_cid_addr, val;
5246 	struct bnx2_napi *bnapi = &bp->bnx2_napi[ring_num];
5247 	struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
5248 
5249 	if (ring_num == 0)
5250 		cid = RX_CID;
5251 	else
5252 		cid = RX_RSS_CID + ring_num - 1;
5253 
5254 	rx_cid_addr = GET_CID_ADDR(cid);
5255 
5256 	bnx2_init_rxbd_rings(rxr->rx_desc_ring, rxr->rx_desc_mapping,
5257 			     bp->rx_buf_use_size, bp->rx_max_ring);
5258 
5259 	bnx2_init_rx_context(bp, cid);
5260 
5261 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
5262 		val = BNX2_RD(bp, BNX2_MQ_MAP_L2_5);
5263 		BNX2_WR(bp, BNX2_MQ_MAP_L2_5, val | BNX2_MQ_MAP_L2_5_ARM);
5264 	}
5265 
5266 	bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, 0);
5267 	if (bp->rx_pg_ring_size) {
5268 		bnx2_init_rxbd_rings(rxr->rx_pg_desc_ring,
5269 				     rxr->rx_pg_desc_mapping,
5270 				     PAGE_SIZE, bp->rx_max_pg_ring);
5271 		val = (bp->rx_buf_use_size << 16) | PAGE_SIZE;
5272 		bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, val);
5273 		bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_RBDC_KEY,
5274 		       BNX2_L2CTX_RBDC_JUMBO_KEY - ring_num);
5275 
5276 		val = (u64) rxr->rx_pg_desc_mapping[0] >> 32;
5277 		bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_HI, val);
5278 
5279 		val = (u64) rxr->rx_pg_desc_mapping[0] & 0xffffffff;
5280 		bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_LO, val);
5281 
5282 		if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
5283 			BNX2_WR(bp, BNX2_MQ_MAP_L2_3, BNX2_MQ_MAP_L2_3_DEFAULT);
5284 	}
5285 
5286 	val = (u64) rxr->rx_desc_mapping[0] >> 32;
5287 	bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_HI, val);
5288 
5289 	val = (u64) rxr->rx_desc_mapping[0] & 0xffffffff;
5290 	bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_LO, val);
5291 
5292 	ring_prod = prod = rxr->rx_pg_prod;
5293 	for (i = 0; i < bp->rx_pg_ring_size; i++) {
5294 		if (bnx2_alloc_rx_page(bp, rxr, ring_prod, GFP_KERNEL) < 0) {
5295 			netdev_warn(bp->dev, "init'ed rx page ring %d with %d/%d pages only\n",
5296 				    ring_num, i, bp->rx_pg_ring_size);
5297 			break;
5298 		}
5299 		prod = BNX2_NEXT_RX_BD(prod);
5300 		ring_prod = BNX2_RX_PG_RING_IDX(prod);
5301 	}
5302 	rxr->rx_pg_prod = prod;
5303 
5304 	ring_prod = prod = rxr->rx_prod;
5305 	for (i = 0; i < bp->rx_ring_size; i++) {
5306 		if (bnx2_alloc_rx_data(bp, rxr, ring_prod, GFP_KERNEL) < 0) {
5307 			netdev_warn(bp->dev, "init'ed rx ring %d with %d/%d skbs only\n",
5308 				    ring_num, i, bp->rx_ring_size);
5309 			break;
5310 		}
5311 		prod = BNX2_NEXT_RX_BD(prod);
5312 		ring_prod = BNX2_RX_RING_IDX(prod);
5313 	}
5314 	rxr->rx_prod = prod;
5315 
5316 	rxr->rx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_BDIDX;
5317 	rxr->rx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_BSEQ;
5318 	rxr->rx_pg_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_PG_BDIDX;
5319 
5320 	BNX2_WR16(bp, rxr->rx_pg_bidx_addr, rxr->rx_pg_prod);
5321 	BNX2_WR16(bp, rxr->rx_bidx_addr, prod);
5322 
5323 	BNX2_WR(bp, rxr->rx_bseq_addr, rxr->rx_prod_bseq);
5324 }
5325 
5326 static void
5327 bnx2_init_all_rings(struct bnx2 *bp)
5328 {
5329 	int i;
5330 	u32 val;
5331 
5332 	bnx2_clear_ring_states(bp);
5333 
5334 	BNX2_WR(bp, BNX2_TSCH_TSS_CFG, 0);
5335 	for (i = 0; i < bp->num_tx_rings; i++)
5336 		bnx2_init_tx_ring(bp, i);
5337 
5338 	if (bp->num_tx_rings > 1)
5339 		BNX2_WR(bp, BNX2_TSCH_TSS_CFG, ((bp->num_tx_rings - 1) << 24) |
5340 			(TX_TSS_CID << 7));
5341 
5342 	BNX2_WR(bp, BNX2_RLUP_RSS_CONFIG, 0);
5343 	bnx2_reg_wr_ind(bp, BNX2_RXP_SCRATCH_RSS_TBL_SZ, 0);
5344 
5345 	for (i = 0; i < bp->num_rx_rings; i++)
5346 		bnx2_init_rx_ring(bp, i);
5347 
5348 	if (bp->num_rx_rings > 1) {
5349 		u32 tbl_32 = 0;
5350 
5351 		for (i = 0; i < BNX2_RXP_SCRATCH_RSS_TBL_MAX_ENTRIES; i++) {
5352 			int shift = (i % 8) << 2;
5353 
5354 			tbl_32 |= (i % (bp->num_rx_rings - 1)) << shift;
5355 			if ((i % 8) == 7) {
5356 				BNX2_WR(bp, BNX2_RLUP_RSS_DATA, tbl_32);
5357 				BNX2_WR(bp, BNX2_RLUP_RSS_COMMAND, (i >> 3) |
5358 					BNX2_RLUP_RSS_COMMAND_RSS_WRITE_MASK |
5359 					BNX2_RLUP_RSS_COMMAND_WRITE |
5360 					BNX2_RLUP_RSS_COMMAND_HASH_MASK);
5361 				tbl_32 = 0;
5362 			}
5363 		}
5364 
5365 		val = BNX2_RLUP_RSS_CONFIG_IPV4_RSS_TYPE_ALL_XI |
5366 		      BNX2_RLUP_RSS_CONFIG_IPV6_RSS_TYPE_ALL_XI;
5367 
5368 		BNX2_WR(bp, BNX2_RLUP_RSS_CONFIG, val);
5369 
5370 	}
5371 }
5372 
5373 static u32 bnx2_find_max_ring(u32 ring_size, u32 max_size)
5374 {
5375 	u32 max, num_rings = 1;
5376 
5377 	while (ring_size > BNX2_MAX_RX_DESC_CNT) {
5378 		ring_size -= BNX2_MAX_RX_DESC_CNT;
5379 		num_rings++;
5380 	}
5381 	/* round to next power of 2 */
5382 	max = max_size;
5383 	while ((max & num_rings) == 0)
5384 		max >>= 1;
5385 
5386 	if (num_rings != max)
5387 		max <<= 1;
5388 
5389 	return max;
5390 }
5391 
5392 static void
5393 bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size)
5394 {
5395 	u32 rx_size, rx_space, jumbo_size;
5396 
5397 	/* 8 for CRC and VLAN */
5398 	rx_size = bp->dev->mtu + ETH_HLEN + BNX2_RX_OFFSET + 8;
5399 
5400 	rx_space = SKB_DATA_ALIGN(rx_size + BNX2_RX_ALIGN) + NET_SKB_PAD +
5401 		SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
5402 
5403 	bp->rx_copy_thresh = BNX2_RX_COPY_THRESH;
5404 	bp->rx_pg_ring_size = 0;
5405 	bp->rx_max_pg_ring = 0;
5406 	bp->rx_max_pg_ring_idx = 0;
5407 	if ((rx_space > PAGE_SIZE) && !(bp->flags & BNX2_FLAG_JUMBO_BROKEN)) {
5408 		int pages = PAGE_ALIGN(bp->dev->mtu - 40) >> PAGE_SHIFT;
5409 
5410 		jumbo_size = size * pages;
5411 		if (jumbo_size > BNX2_MAX_TOTAL_RX_PG_DESC_CNT)
5412 			jumbo_size = BNX2_MAX_TOTAL_RX_PG_DESC_CNT;
5413 
5414 		bp->rx_pg_ring_size = jumbo_size;
5415 		bp->rx_max_pg_ring = bnx2_find_max_ring(jumbo_size,
5416 							BNX2_MAX_RX_PG_RINGS);
5417 		bp->rx_max_pg_ring_idx =
5418 			(bp->rx_max_pg_ring * BNX2_RX_DESC_CNT) - 1;
5419 		rx_size = BNX2_RX_COPY_THRESH + BNX2_RX_OFFSET;
5420 		bp->rx_copy_thresh = 0;
5421 	}
5422 
5423 	bp->rx_buf_use_size = rx_size;
5424 	/* hw alignment + build_skb() overhead*/
5425 	bp->rx_buf_size = SKB_DATA_ALIGN(bp->rx_buf_use_size + BNX2_RX_ALIGN) +
5426 		NET_SKB_PAD + SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
5427 	bp->rx_jumbo_thresh = rx_size - BNX2_RX_OFFSET;
5428 	bp->rx_ring_size = size;
5429 	bp->rx_max_ring = bnx2_find_max_ring(size, BNX2_MAX_RX_RINGS);
5430 	bp->rx_max_ring_idx = (bp->rx_max_ring * BNX2_RX_DESC_CNT) - 1;
5431 }
5432 
5433 static void
5434 bnx2_free_tx_skbs(struct bnx2 *bp)
5435 {
5436 	int i;
5437 
5438 	for (i = 0; i < bp->num_tx_rings; i++) {
5439 		struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
5440 		struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
5441 		int j;
5442 
5443 		if (txr->tx_buf_ring == NULL)
5444 			continue;
5445 
5446 		for (j = 0; j < BNX2_TX_DESC_CNT; ) {
5447 			struct bnx2_sw_tx_bd *tx_buf = &txr->tx_buf_ring[j];
5448 			struct sk_buff *skb = tx_buf->skb;
5449 			int k, last;
5450 
5451 			if (skb == NULL) {
5452 				j = BNX2_NEXT_TX_BD(j);
5453 				continue;
5454 			}
5455 
5456 			dma_unmap_single(&bp->pdev->dev,
5457 					 dma_unmap_addr(tx_buf, mapping),
5458 					 skb_headlen(skb),
5459 					 PCI_DMA_TODEVICE);
5460 
5461 			tx_buf->skb = NULL;
5462 
5463 			last = tx_buf->nr_frags;
5464 			j = BNX2_NEXT_TX_BD(j);
5465 			for (k = 0; k < last; k++, j = BNX2_NEXT_TX_BD(j)) {
5466 				tx_buf = &txr->tx_buf_ring[BNX2_TX_RING_IDX(j)];
5467 				dma_unmap_page(&bp->pdev->dev,
5468 					dma_unmap_addr(tx_buf, mapping),
5469 					skb_frag_size(&skb_shinfo(skb)->frags[k]),
5470 					PCI_DMA_TODEVICE);
5471 			}
5472 			dev_kfree_skb(skb);
5473 		}
5474 		netdev_tx_reset_queue(netdev_get_tx_queue(bp->dev, i));
5475 	}
5476 }
5477 
5478 static void
5479 bnx2_free_rx_skbs(struct bnx2 *bp)
5480 {
5481 	int i;
5482 
5483 	for (i = 0; i < bp->num_rx_rings; i++) {
5484 		struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
5485 		struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
5486 		int j;
5487 
5488 		if (rxr->rx_buf_ring == NULL)
5489 			return;
5490 
5491 		for (j = 0; j < bp->rx_max_ring_idx; j++) {
5492 			struct bnx2_sw_bd *rx_buf = &rxr->rx_buf_ring[j];
5493 			u8 *data = rx_buf->data;
5494 
5495 			if (data == NULL)
5496 				continue;
5497 
5498 			dma_unmap_single(&bp->pdev->dev,
5499 					 dma_unmap_addr(rx_buf, mapping),
5500 					 bp->rx_buf_use_size,
5501 					 PCI_DMA_FROMDEVICE);
5502 
5503 			rx_buf->data = NULL;
5504 
5505 			kfree(data);
5506 		}
5507 		for (j = 0; j < bp->rx_max_pg_ring_idx; j++)
5508 			bnx2_free_rx_page(bp, rxr, j);
5509 	}
5510 }
5511 
5512 static void
5513 bnx2_free_skbs(struct bnx2 *bp)
5514 {
5515 	bnx2_free_tx_skbs(bp);
5516 	bnx2_free_rx_skbs(bp);
5517 }
5518 
5519 static int
5520 bnx2_reset_nic(struct bnx2 *bp, u32 reset_code)
5521 {
5522 	int rc;
5523 
5524 	rc = bnx2_reset_chip(bp, reset_code);
5525 	bnx2_free_skbs(bp);
5526 	if (rc)
5527 		return rc;
5528 
5529 	if ((rc = bnx2_init_chip(bp)) != 0)
5530 		return rc;
5531 
5532 	bnx2_init_all_rings(bp);
5533 	return 0;
5534 }
5535 
5536 static int
5537 bnx2_init_nic(struct bnx2 *bp, int reset_phy)
5538 {
5539 	int rc;
5540 
5541 	if ((rc = bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET)) != 0)
5542 		return rc;
5543 
5544 	spin_lock_bh(&bp->phy_lock);
5545 	bnx2_init_phy(bp, reset_phy);
5546 	bnx2_set_link(bp);
5547 	if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
5548 		bnx2_remote_phy_event(bp);
5549 	spin_unlock_bh(&bp->phy_lock);
5550 	return 0;
5551 }
5552 
5553 static int
5554 bnx2_shutdown_chip(struct bnx2 *bp)
5555 {
5556 	u32 reset_code;
5557 
5558 	if (bp->flags & BNX2_FLAG_NO_WOL)
5559 		reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
5560 	else if (bp->wol)
5561 		reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
5562 	else
5563 		reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
5564 
5565 	return bnx2_reset_chip(bp, reset_code);
5566 }
5567 
5568 static int
5569 bnx2_test_registers(struct bnx2 *bp)
5570 {
5571 	int ret;
5572 	int i, is_5709;
5573 	static const struct {
5574 		u16   offset;
5575 		u16   flags;
5576 #define BNX2_FL_NOT_5709	1
5577 		u32   rw_mask;
5578 		u32   ro_mask;
5579 	} reg_tbl[] = {
5580 		{ 0x006c, 0, 0x00000000, 0x0000003f },
5581 		{ 0x0090, 0, 0xffffffff, 0x00000000 },
5582 		{ 0x0094, 0, 0x00000000, 0x00000000 },
5583 
5584 		{ 0x0404, BNX2_FL_NOT_5709, 0x00003f00, 0x00000000 },
5585 		{ 0x0418, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5586 		{ 0x041c, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5587 		{ 0x0420, BNX2_FL_NOT_5709, 0x00000000, 0x80ffffff },
5588 		{ 0x0424, BNX2_FL_NOT_5709, 0x00000000, 0x00000000 },
5589 		{ 0x0428, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
5590 		{ 0x0450, BNX2_FL_NOT_5709, 0x00000000, 0x0000ffff },
5591 		{ 0x0454, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5592 		{ 0x0458, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5593 
5594 		{ 0x0808, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5595 		{ 0x0854, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5596 		{ 0x0868, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5597 		{ 0x086c, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5598 		{ 0x0870, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5599 		{ 0x0874, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5600 
5601 		{ 0x0c00, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
5602 		{ 0x0c04, BNX2_FL_NOT_5709, 0x00000000, 0x03ff0001 },
5603 		{ 0x0c08, BNX2_FL_NOT_5709,  0x0f0ff073, 0x00000000 },
5604 
5605 		{ 0x1000, 0, 0x00000000, 0x00000001 },
5606 		{ 0x1004, BNX2_FL_NOT_5709, 0x00000000, 0x000f0001 },
5607 
5608 		{ 0x1408, 0, 0x01c00800, 0x00000000 },
5609 		{ 0x149c, 0, 0x8000ffff, 0x00000000 },
5610 		{ 0x14a8, 0, 0x00000000, 0x000001ff },
5611 		{ 0x14ac, 0, 0x0fffffff, 0x10000000 },
5612 		{ 0x14b0, 0, 0x00000002, 0x00000001 },
5613 		{ 0x14b8, 0, 0x00000000, 0x00000000 },
5614 		{ 0x14c0, 0, 0x00000000, 0x00000009 },
5615 		{ 0x14c4, 0, 0x00003fff, 0x00000000 },
5616 		{ 0x14cc, 0, 0x00000000, 0x00000001 },
5617 		{ 0x14d0, 0, 0xffffffff, 0x00000000 },
5618 
5619 		{ 0x1800, 0, 0x00000000, 0x00000001 },
5620 		{ 0x1804, 0, 0x00000000, 0x00000003 },
5621 
5622 		{ 0x2800, 0, 0x00000000, 0x00000001 },
5623 		{ 0x2804, 0, 0x00000000, 0x00003f01 },
5624 		{ 0x2808, 0, 0x0f3f3f03, 0x00000000 },
5625 		{ 0x2810, 0, 0xffff0000, 0x00000000 },
5626 		{ 0x2814, 0, 0xffff0000, 0x00000000 },
5627 		{ 0x2818, 0, 0xffff0000, 0x00000000 },
5628 		{ 0x281c, 0, 0xffff0000, 0x00000000 },
5629 		{ 0x2834, 0, 0xffffffff, 0x00000000 },
5630 		{ 0x2840, 0, 0x00000000, 0xffffffff },
5631 		{ 0x2844, 0, 0x00000000, 0xffffffff },
5632 		{ 0x2848, 0, 0xffffffff, 0x00000000 },
5633 		{ 0x284c, 0, 0xf800f800, 0x07ff07ff },
5634 
5635 		{ 0x2c00, 0, 0x00000000, 0x00000011 },
5636 		{ 0x2c04, 0, 0x00000000, 0x00030007 },
5637 
5638 		{ 0x3c00, 0, 0x00000000, 0x00000001 },
5639 		{ 0x3c04, 0, 0x00000000, 0x00070000 },
5640 		{ 0x3c08, 0, 0x00007f71, 0x07f00000 },
5641 		{ 0x3c0c, 0, 0x1f3ffffc, 0x00000000 },
5642 		{ 0x3c10, 0, 0xffffffff, 0x00000000 },
5643 		{ 0x3c14, 0, 0x00000000, 0xffffffff },
5644 		{ 0x3c18, 0, 0x00000000, 0xffffffff },
5645 		{ 0x3c1c, 0, 0xfffff000, 0x00000000 },
5646 		{ 0x3c20, 0, 0xffffff00, 0x00000000 },
5647 
5648 		{ 0x5004, 0, 0x00000000, 0x0000007f },
5649 		{ 0x5008, 0, 0x0f0007ff, 0x00000000 },
5650 
5651 		{ 0x5c00, 0, 0x00000000, 0x00000001 },
5652 		{ 0x5c04, 0, 0x00000000, 0x0003000f },
5653 		{ 0x5c08, 0, 0x00000003, 0x00000000 },
5654 		{ 0x5c0c, 0, 0x0000fff8, 0x00000000 },
5655 		{ 0x5c10, 0, 0x00000000, 0xffffffff },
5656 		{ 0x5c80, 0, 0x00000000, 0x0f7113f1 },
5657 		{ 0x5c84, 0, 0x00000000, 0x0000f333 },
5658 		{ 0x5c88, 0, 0x00000000, 0x00077373 },
5659 		{ 0x5c8c, 0, 0x00000000, 0x0007f737 },
5660 
5661 		{ 0x6808, 0, 0x0000ff7f, 0x00000000 },
5662 		{ 0x680c, 0, 0xffffffff, 0x00000000 },
5663 		{ 0x6810, 0, 0xffffffff, 0x00000000 },
5664 		{ 0x6814, 0, 0xffffffff, 0x00000000 },
5665 		{ 0x6818, 0, 0xffffffff, 0x00000000 },
5666 		{ 0x681c, 0, 0xffffffff, 0x00000000 },
5667 		{ 0x6820, 0, 0x00ff00ff, 0x00000000 },
5668 		{ 0x6824, 0, 0x00ff00ff, 0x00000000 },
5669 		{ 0x6828, 0, 0x00ff00ff, 0x00000000 },
5670 		{ 0x682c, 0, 0x03ff03ff, 0x00000000 },
5671 		{ 0x6830, 0, 0x03ff03ff, 0x00000000 },
5672 		{ 0x6834, 0, 0x03ff03ff, 0x00000000 },
5673 		{ 0x6838, 0, 0x03ff03ff, 0x00000000 },
5674 		{ 0x683c, 0, 0x0000ffff, 0x00000000 },
5675 		{ 0x6840, 0, 0x00000ff0, 0x00000000 },
5676 		{ 0x6844, 0, 0x00ffff00, 0x00000000 },
5677 		{ 0x684c, 0, 0xffffffff, 0x00000000 },
5678 		{ 0x6850, 0, 0x7f7f7f7f, 0x00000000 },
5679 		{ 0x6854, 0, 0x7f7f7f7f, 0x00000000 },
5680 		{ 0x6858, 0, 0x7f7f7f7f, 0x00000000 },
5681 		{ 0x685c, 0, 0x7f7f7f7f, 0x00000000 },
5682 		{ 0x6908, 0, 0x00000000, 0x0001ff0f },
5683 		{ 0x690c, 0, 0x00000000, 0x0ffe00f0 },
5684 
5685 		{ 0xffff, 0, 0x00000000, 0x00000000 },
5686 	};
5687 
5688 	ret = 0;
5689 	is_5709 = 0;
5690 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
5691 		is_5709 = 1;
5692 
5693 	for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
5694 		u32 offset, rw_mask, ro_mask, save_val, val;
5695 		u16 flags = reg_tbl[i].flags;
5696 
5697 		if (is_5709 && (flags & BNX2_FL_NOT_5709))
5698 			continue;
5699 
5700 		offset = (u32) reg_tbl[i].offset;
5701 		rw_mask = reg_tbl[i].rw_mask;
5702 		ro_mask = reg_tbl[i].ro_mask;
5703 
5704 		save_val = readl(bp->regview + offset);
5705 
5706 		writel(0, bp->regview + offset);
5707 
5708 		val = readl(bp->regview + offset);
5709 		if ((val & rw_mask) != 0) {
5710 			goto reg_test_err;
5711 		}
5712 
5713 		if ((val & ro_mask) != (save_val & ro_mask)) {
5714 			goto reg_test_err;
5715 		}
5716 
5717 		writel(0xffffffff, bp->regview + offset);
5718 
5719 		val = readl(bp->regview + offset);
5720 		if ((val & rw_mask) != rw_mask) {
5721 			goto reg_test_err;
5722 		}
5723 
5724 		if ((val & ro_mask) != (save_val & ro_mask)) {
5725 			goto reg_test_err;
5726 		}
5727 
5728 		writel(save_val, bp->regview + offset);
5729 		continue;
5730 
5731 reg_test_err:
5732 		writel(save_val, bp->regview + offset);
5733 		ret = -ENODEV;
5734 		break;
5735 	}
5736 	return ret;
5737 }
5738 
5739 static int
5740 bnx2_do_mem_test(struct bnx2 *bp, u32 start, u32 size)
5741 {
5742 	static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0x55555555,
5743 		0xaaaaaaaa , 0xaa55aa55, 0x55aa55aa };
5744 	int i;
5745 
5746 	for (i = 0; i < sizeof(test_pattern) / 4; i++) {
5747 		u32 offset;
5748 
5749 		for (offset = 0; offset < size; offset += 4) {
5750 
5751 			bnx2_reg_wr_ind(bp, start + offset, test_pattern[i]);
5752 
5753 			if (bnx2_reg_rd_ind(bp, start + offset) !=
5754 				test_pattern[i]) {
5755 				return -ENODEV;
5756 			}
5757 		}
5758 	}
5759 	return 0;
5760 }
5761 
5762 static int
5763 bnx2_test_memory(struct bnx2 *bp)
5764 {
5765 	int ret = 0;
5766 	int i;
5767 	static struct mem_entry {
5768 		u32   offset;
5769 		u32   len;
5770 	} mem_tbl_5706[] = {
5771 		{ 0x60000,  0x4000 },
5772 		{ 0xa0000,  0x3000 },
5773 		{ 0xe0000,  0x4000 },
5774 		{ 0x120000, 0x4000 },
5775 		{ 0x1a0000, 0x4000 },
5776 		{ 0x160000, 0x4000 },
5777 		{ 0xffffffff, 0    },
5778 	},
5779 	mem_tbl_5709[] = {
5780 		{ 0x60000,  0x4000 },
5781 		{ 0xa0000,  0x3000 },
5782 		{ 0xe0000,  0x4000 },
5783 		{ 0x120000, 0x4000 },
5784 		{ 0x1a0000, 0x4000 },
5785 		{ 0xffffffff, 0    },
5786 	};
5787 	struct mem_entry *mem_tbl;
5788 
5789 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
5790 		mem_tbl = mem_tbl_5709;
5791 	else
5792 		mem_tbl = mem_tbl_5706;
5793 
5794 	for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
5795 		if ((ret = bnx2_do_mem_test(bp, mem_tbl[i].offset,
5796 			mem_tbl[i].len)) != 0) {
5797 			return ret;
5798 		}
5799 	}
5800 
5801 	return ret;
5802 }
5803 
5804 #define BNX2_MAC_LOOPBACK	0
5805 #define BNX2_PHY_LOOPBACK	1
5806 
5807 static int
5808 bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
5809 {
5810 	unsigned int pkt_size, num_pkts, i;
5811 	struct sk_buff *skb;
5812 	u8 *data;
5813 	unsigned char *packet;
5814 	u16 rx_start_idx, rx_idx;
5815 	dma_addr_t map;
5816 	struct bnx2_tx_bd *txbd;
5817 	struct bnx2_sw_bd *rx_buf;
5818 	struct l2_fhdr *rx_hdr;
5819 	int ret = -ENODEV;
5820 	struct bnx2_napi *bnapi = &bp->bnx2_napi[0], *tx_napi;
5821 	struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
5822 	struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
5823 
5824 	tx_napi = bnapi;
5825 
5826 	txr = &tx_napi->tx_ring;
5827 	rxr = &bnapi->rx_ring;
5828 	if (loopback_mode == BNX2_MAC_LOOPBACK) {
5829 		bp->loopback = MAC_LOOPBACK;
5830 		bnx2_set_mac_loopback(bp);
5831 	}
5832 	else if (loopback_mode == BNX2_PHY_LOOPBACK) {
5833 		if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
5834 			return 0;
5835 
5836 		bp->loopback = PHY_LOOPBACK;
5837 		bnx2_set_phy_loopback(bp);
5838 	}
5839 	else
5840 		return -EINVAL;
5841 
5842 	pkt_size = min(bp->dev->mtu + ETH_HLEN, bp->rx_jumbo_thresh - 4);
5843 	skb = netdev_alloc_skb(bp->dev, pkt_size);
5844 	if (!skb)
5845 		return -ENOMEM;
5846 	packet = skb_put(skb, pkt_size);
5847 	memcpy(packet, bp->dev->dev_addr, ETH_ALEN);
5848 	memset(packet + ETH_ALEN, 0x0, 8);
5849 	for (i = 14; i < pkt_size; i++)
5850 		packet[i] = (unsigned char) (i & 0xff);
5851 
5852 	map = dma_map_single(&bp->pdev->dev, skb->data, pkt_size,
5853 			     PCI_DMA_TODEVICE);
5854 	if (dma_mapping_error(&bp->pdev->dev, map)) {
5855 		dev_kfree_skb(skb);
5856 		return -EIO;
5857 	}
5858 
5859 	BNX2_WR(bp, BNX2_HC_COMMAND,
5860 		bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
5861 
5862 	BNX2_RD(bp, BNX2_HC_COMMAND);
5863 
5864 	udelay(5);
5865 	rx_start_idx = bnx2_get_hw_rx_cons(bnapi);
5866 
5867 	num_pkts = 0;
5868 
5869 	txbd = &txr->tx_desc_ring[BNX2_TX_RING_IDX(txr->tx_prod)];
5870 
5871 	txbd->tx_bd_haddr_hi = (u64) map >> 32;
5872 	txbd->tx_bd_haddr_lo = (u64) map & 0xffffffff;
5873 	txbd->tx_bd_mss_nbytes = pkt_size;
5874 	txbd->tx_bd_vlan_tag_flags = TX_BD_FLAGS_START | TX_BD_FLAGS_END;
5875 
5876 	num_pkts++;
5877 	txr->tx_prod = BNX2_NEXT_TX_BD(txr->tx_prod);
5878 	txr->tx_prod_bseq += pkt_size;
5879 
5880 	BNX2_WR16(bp, txr->tx_bidx_addr, txr->tx_prod);
5881 	BNX2_WR(bp, txr->tx_bseq_addr, txr->tx_prod_bseq);
5882 
5883 	udelay(100);
5884 
5885 	BNX2_WR(bp, BNX2_HC_COMMAND,
5886 		bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
5887 
5888 	BNX2_RD(bp, BNX2_HC_COMMAND);
5889 
5890 	udelay(5);
5891 
5892 	dma_unmap_single(&bp->pdev->dev, map, pkt_size, PCI_DMA_TODEVICE);
5893 	dev_kfree_skb(skb);
5894 
5895 	if (bnx2_get_hw_tx_cons(tx_napi) != txr->tx_prod)
5896 		goto loopback_test_done;
5897 
5898 	rx_idx = bnx2_get_hw_rx_cons(bnapi);
5899 	if (rx_idx != rx_start_idx + num_pkts) {
5900 		goto loopback_test_done;
5901 	}
5902 
5903 	rx_buf = &rxr->rx_buf_ring[rx_start_idx];
5904 	data = rx_buf->data;
5905 
5906 	rx_hdr = get_l2_fhdr(data);
5907 	data = (u8 *)rx_hdr + BNX2_RX_OFFSET;
5908 
5909 	dma_sync_single_for_cpu(&bp->pdev->dev,
5910 		dma_unmap_addr(rx_buf, mapping),
5911 		bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
5912 
5913 	if (rx_hdr->l2_fhdr_status &
5914 		(L2_FHDR_ERRORS_BAD_CRC |
5915 		L2_FHDR_ERRORS_PHY_DECODE |
5916 		L2_FHDR_ERRORS_ALIGNMENT |
5917 		L2_FHDR_ERRORS_TOO_SHORT |
5918 		L2_FHDR_ERRORS_GIANT_FRAME)) {
5919 
5920 		goto loopback_test_done;
5921 	}
5922 
5923 	if ((rx_hdr->l2_fhdr_pkt_len - 4) != pkt_size) {
5924 		goto loopback_test_done;
5925 	}
5926 
5927 	for (i = 14; i < pkt_size; i++) {
5928 		if (*(data + i) != (unsigned char) (i & 0xff)) {
5929 			goto loopback_test_done;
5930 		}
5931 	}
5932 
5933 	ret = 0;
5934 
5935 loopback_test_done:
5936 	bp->loopback = 0;
5937 	return ret;
5938 }
5939 
5940 #define BNX2_MAC_LOOPBACK_FAILED	1
5941 #define BNX2_PHY_LOOPBACK_FAILED	2
5942 #define BNX2_LOOPBACK_FAILED		(BNX2_MAC_LOOPBACK_FAILED |	\
5943 					 BNX2_PHY_LOOPBACK_FAILED)
5944 
5945 static int
5946 bnx2_test_loopback(struct bnx2 *bp)
5947 {
5948 	int rc = 0;
5949 
5950 	if (!netif_running(bp->dev))
5951 		return BNX2_LOOPBACK_FAILED;
5952 
5953 	bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
5954 	spin_lock_bh(&bp->phy_lock);
5955 	bnx2_init_phy(bp, 1);
5956 	spin_unlock_bh(&bp->phy_lock);
5957 	if (bnx2_run_loopback(bp, BNX2_MAC_LOOPBACK))
5958 		rc |= BNX2_MAC_LOOPBACK_FAILED;
5959 	if (bnx2_run_loopback(bp, BNX2_PHY_LOOPBACK))
5960 		rc |= BNX2_PHY_LOOPBACK_FAILED;
5961 	return rc;
5962 }
5963 
5964 #define NVRAM_SIZE 0x200
5965 #define CRC32_RESIDUAL 0xdebb20e3
5966 
5967 static int
5968 bnx2_test_nvram(struct bnx2 *bp)
5969 {
5970 	__be32 buf[NVRAM_SIZE / 4];
5971 	u8 *data = (u8 *) buf;
5972 	int rc = 0;
5973 	u32 magic, csum;
5974 
5975 	if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
5976 		goto test_nvram_done;
5977 
5978         magic = be32_to_cpu(buf[0]);
5979 	if (magic != 0x669955aa) {
5980 		rc = -ENODEV;
5981 		goto test_nvram_done;
5982 	}
5983 
5984 	if ((rc = bnx2_nvram_read(bp, 0x100, data, NVRAM_SIZE)) != 0)
5985 		goto test_nvram_done;
5986 
5987 	csum = ether_crc_le(0x100, data);
5988 	if (csum != CRC32_RESIDUAL) {
5989 		rc = -ENODEV;
5990 		goto test_nvram_done;
5991 	}
5992 
5993 	csum = ether_crc_le(0x100, data + 0x100);
5994 	if (csum != CRC32_RESIDUAL) {
5995 		rc = -ENODEV;
5996 	}
5997 
5998 test_nvram_done:
5999 	return rc;
6000 }
6001 
6002 static int
6003 bnx2_test_link(struct bnx2 *bp)
6004 {
6005 	u32 bmsr;
6006 
6007 	if (!netif_running(bp->dev))
6008 		return -ENODEV;
6009 
6010 	if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
6011 		if (bp->link_up)
6012 			return 0;
6013 		return -ENODEV;
6014 	}
6015 	spin_lock_bh(&bp->phy_lock);
6016 	bnx2_enable_bmsr1(bp);
6017 	bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
6018 	bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
6019 	bnx2_disable_bmsr1(bp);
6020 	spin_unlock_bh(&bp->phy_lock);
6021 
6022 	if (bmsr & BMSR_LSTATUS) {
6023 		return 0;
6024 	}
6025 	return -ENODEV;
6026 }
6027 
6028 static int
6029 bnx2_test_intr(struct bnx2 *bp)
6030 {
6031 	int i;
6032 	u16 status_idx;
6033 
6034 	if (!netif_running(bp->dev))
6035 		return -ENODEV;
6036 
6037 	status_idx = BNX2_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff;
6038 
6039 	/* This register is not touched during run-time. */
6040 	BNX2_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
6041 	BNX2_RD(bp, BNX2_HC_COMMAND);
6042 
6043 	for (i = 0; i < 10; i++) {
6044 		if ((BNX2_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff) !=
6045 			status_idx) {
6046 
6047 			break;
6048 		}
6049 
6050 		msleep_interruptible(10);
6051 	}
6052 	if (i < 10)
6053 		return 0;
6054 
6055 	return -ENODEV;
6056 }
6057 
6058 /* Determining link for parallel detection. */
6059 static int
6060 bnx2_5706_serdes_has_link(struct bnx2 *bp)
6061 {
6062 	u32 mode_ctl, an_dbg, exp;
6063 
6064 	if (bp->phy_flags & BNX2_PHY_FLAG_NO_PARALLEL)
6065 		return 0;
6066 
6067 	bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_MODE_CTL);
6068 	bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &mode_ctl);
6069 
6070 	if (!(mode_ctl & MISC_SHDW_MODE_CTL_SIG_DET))
6071 		return 0;
6072 
6073 	bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
6074 	bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
6075 	bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
6076 
6077 	if (an_dbg & (MISC_SHDW_AN_DBG_NOSYNC | MISC_SHDW_AN_DBG_RUDI_INVALID))
6078 		return 0;
6079 
6080 	bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS, MII_EXPAND_REG1);
6081 	bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &exp);
6082 	bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &exp);
6083 
6084 	if (exp & MII_EXPAND_REG1_RUDI_C)	/* receiving CONFIG */
6085 		return 0;
6086 
6087 	return 1;
6088 }
6089 
6090 static void
6091 bnx2_5706_serdes_timer(struct bnx2 *bp)
6092 {
6093 	int check_link = 1;
6094 
6095 	spin_lock(&bp->phy_lock);
6096 	if (bp->serdes_an_pending) {
6097 		bp->serdes_an_pending--;
6098 		check_link = 0;
6099 	} else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
6100 		u32 bmcr;
6101 
6102 		bp->current_interval = BNX2_TIMER_INTERVAL;
6103 
6104 		bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
6105 
6106 		if (bmcr & BMCR_ANENABLE) {
6107 			if (bnx2_5706_serdes_has_link(bp)) {
6108 				bmcr &= ~BMCR_ANENABLE;
6109 				bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
6110 				bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
6111 				bp->phy_flags |= BNX2_PHY_FLAG_PARALLEL_DETECT;
6112 			}
6113 		}
6114 	}
6115 	else if ((bp->link_up) && (bp->autoneg & AUTONEG_SPEED) &&
6116 		 (bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT)) {
6117 		u32 phy2;
6118 
6119 		bnx2_write_phy(bp, 0x17, 0x0f01);
6120 		bnx2_read_phy(bp, 0x15, &phy2);
6121 		if (phy2 & 0x20) {
6122 			u32 bmcr;
6123 
6124 			bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
6125 			bmcr |= BMCR_ANENABLE;
6126 			bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
6127 
6128 			bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
6129 		}
6130 	} else
6131 		bp->current_interval = BNX2_TIMER_INTERVAL;
6132 
6133 	if (check_link) {
6134 		u32 val;
6135 
6136 		bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
6137 		bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &val);
6138 		bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &val);
6139 
6140 		if (bp->link_up && (val & MISC_SHDW_AN_DBG_NOSYNC)) {
6141 			if (!(bp->phy_flags & BNX2_PHY_FLAG_FORCED_DOWN)) {
6142 				bnx2_5706s_force_link_dn(bp, 1);
6143 				bp->phy_flags |= BNX2_PHY_FLAG_FORCED_DOWN;
6144 			} else
6145 				bnx2_set_link(bp);
6146 		} else if (!bp->link_up && !(val & MISC_SHDW_AN_DBG_NOSYNC))
6147 			bnx2_set_link(bp);
6148 	}
6149 	spin_unlock(&bp->phy_lock);
6150 }
6151 
6152 static void
6153 bnx2_5708_serdes_timer(struct bnx2 *bp)
6154 {
6155 	if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
6156 		return;
6157 
6158 	if ((bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) == 0) {
6159 		bp->serdes_an_pending = 0;
6160 		return;
6161 	}
6162 
6163 	spin_lock(&bp->phy_lock);
6164 	if (bp->serdes_an_pending)
6165 		bp->serdes_an_pending--;
6166 	else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
6167 		u32 bmcr;
6168 
6169 		bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
6170 		if (bmcr & BMCR_ANENABLE) {
6171 			bnx2_enable_forced_2g5(bp);
6172 			bp->current_interval = BNX2_SERDES_FORCED_TIMEOUT;
6173 		} else {
6174 			bnx2_disable_forced_2g5(bp);
6175 			bp->serdes_an_pending = 2;
6176 			bp->current_interval = BNX2_TIMER_INTERVAL;
6177 		}
6178 
6179 	} else
6180 		bp->current_interval = BNX2_TIMER_INTERVAL;
6181 
6182 	spin_unlock(&bp->phy_lock);
6183 }
6184 
6185 static void
6186 bnx2_timer(unsigned long data)
6187 {
6188 	struct bnx2 *bp = (struct bnx2 *) data;
6189 
6190 	if (!netif_running(bp->dev))
6191 		return;
6192 
6193 	if (atomic_read(&bp->intr_sem) != 0)
6194 		goto bnx2_restart_timer;
6195 
6196 	if ((bp->flags & (BNX2_FLAG_USING_MSI | BNX2_FLAG_ONE_SHOT_MSI)) ==
6197 	     BNX2_FLAG_USING_MSI)
6198 		bnx2_chk_missed_msi(bp);
6199 
6200 	bnx2_send_heart_beat(bp);
6201 
6202 	bp->stats_blk->stat_FwRxDrop =
6203 		bnx2_reg_rd_ind(bp, BNX2_FW_RX_DROP_COUNT);
6204 
6205 	/* workaround occasional corrupted counters */
6206 	if ((bp->flags & BNX2_FLAG_BROKEN_STATS) && bp->stats_ticks)
6207 		BNX2_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd |
6208 			BNX2_HC_COMMAND_STATS_NOW);
6209 
6210 	if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
6211 		if (BNX2_CHIP(bp) == BNX2_CHIP_5706)
6212 			bnx2_5706_serdes_timer(bp);
6213 		else
6214 			bnx2_5708_serdes_timer(bp);
6215 	}
6216 
6217 bnx2_restart_timer:
6218 	mod_timer(&bp->timer, jiffies + bp->current_interval);
6219 }
6220 
6221 static int
6222 bnx2_request_irq(struct bnx2 *bp)
6223 {
6224 	unsigned long flags;
6225 	struct bnx2_irq *irq;
6226 	int rc = 0, i;
6227 
6228 	if (bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)
6229 		flags = 0;
6230 	else
6231 		flags = IRQF_SHARED;
6232 
6233 	for (i = 0; i < bp->irq_nvecs; i++) {
6234 		irq = &bp->irq_tbl[i];
6235 		rc = request_irq(irq->vector, irq->handler, flags, irq->name,
6236 				 &bp->bnx2_napi[i]);
6237 		if (rc)
6238 			break;
6239 		irq->requested = 1;
6240 	}
6241 	return rc;
6242 }
6243 
6244 static void
6245 __bnx2_free_irq(struct bnx2 *bp)
6246 {
6247 	struct bnx2_irq *irq;
6248 	int i;
6249 
6250 	for (i = 0; i < bp->irq_nvecs; i++) {
6251 		irq = &bp->irq_tbl[i];
6252 		if (irq->requested)
6253 			free_irq(irq->vector, &bp->bnx2_napi[i]);
6254 		irq->requested = 0;
6255 	}
6256 }
6257 
6258 static void
6259 bnx2_free_irq(struct bnx2 *bp)
6260 {
6261 
6262 	__bnx2_free_irq(bp);
6263 	if (bp->flags & BNX2_FLAG_USING_MSI)
6264 		pci_disable_msi(bp->pdev);
6265 	else if (bp->flags & BNX2_FLAG_USING_MSIX)
6266 		pci_disable_msix(bp->pdev);
6267 
6268 	bp->flags &= ~(BNX2_FLAG_USING_MSI_OR_MSIX | BNX2_FLAG_ONE_SHOT_MSI);
6269 }
6270 
6271 static void
6272 bnx2_enable_msix(struct bnx2 *bp, int msix_vecs)
6273 {
6274 	int i, total_vecs;
6275 	struct msix_entry msix_ent[BNX2_MAX_MSIX_VEC];
6276 	struct net_device *dev = bp->dev;
6277 	const int len = sizeof(bp->irq_tbl[0].name);
6278 
6279 	bnx2_setup_msix_tbl(bp);
6280 	BNX2_WR(bp, BNX2_PCI_MSIX_CONTROL, BNX2_MAX_MSIX_HW_VEC - 1);
6281 	BNX2_WR(bp, BNX2_PCI_MSIX_TBL_OFF_BIR, BNX2_PCI_GRC_WINDOW2_BASE);
6282 	BNX2_WR(bp, BNX2_PCI_MSIX_PBA_OFF_BIT, BNX2_PCI_GRC_WINDOW3_BASE);
6283 
6284 	/*  Need to flush the previous three writes to ensure MSI-X
6285 	 *  is setup properly */
6286 	BNX2_RD(bp, BNX2_PCI_MSIX_CONTROL);
6287 
6288 	for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
6289 		msix_ent[i].entry = i;
6290 		msix_ent[i].vector = 0;
6291 	}
6292 
6293 	total_vecs = msix_vecs;
6294 #ifdef BCM_CNIC
6295 	total_vecs++;
6296 #endif
6297 	total_vecs = pci_enable_msix_range(bp->pdev, msix_ent,
6298 					   BNX2_MIN_MSIX_VEC, total_vecs);
6299 	if (total_vecs < 0)
6300 		return;
6301 
6302 	msix_vecs = total_vecs;
6303 #ifdef BCM_CNIC
6304 	msix_vecs--;
6305 #endif
6306 	bp->irq_nvecs = msix_vecs;
6307 	bp->flags |= BNX2_FLAG_USING_MSIX | BNX2_FLAG_ONE_SHOT_MSI;
6308 	for (i = 0; i < total_vecs; i++) {
6309 		bp->irq_tbl[i].vector = msix_ent[i].vector;
6310 		snprintf(bp->irq_tbl[i].name, len, "%s-%d", dev->name, i);
6311 		bp->irq_tbl[i].handler = bnx2_msi_1shot;
6312 	}
6313 }
6314 
6315 static int
6316 bnx2_setup_int_mode(struct bnx2 *bp, int dis_msi)
6317 {
6318 	int cpus = netif_get_num_default_rss_queues();
6319 	int msix_vecs;
6320 
6321 	if (!bp->num_req_rx_rings)
6322 		msix_vecs = max(cpus + 1, bp->num_req_tx_rings);
6323 	else if (!bp->num_req_tx_rings)
6324 		msix_vecs = max(cpus, bp->num_req_rx_rings);
6325 	else
6326 		msix_vecs = max(bp->num_req_rx_rings, bp->num_req_tx_rings);
6327 
6328 	msix_vecs = min(msix_vecs, RX_MAX_RINGS);
6329 
6330 	bp->irq_tbl[0].handler = bnx2_interrupt;
6331 	strcpy(bp->irq_tbl[0].name, bp->dev->name);
6332 	bp->irq_nvecs = 1;
6333 	bp->irq_tbl[0].vector = bp->pdev->irq;
6334 
6335 	if ((bp->flags & BNX2_FLAG_MSIX_CAP) && !dis_msi)
6336 		bnx2_enable_msix(bp, msix_vecs);
6337 
6338 	if ((bp->flags & BNX2_FLAG_MSI_CAP) && !dis_msi &&
6339 	    !(bp->flags & BNX2_FLAG_USING_MSIX)) {
6340 		if (pci_enable_msi(bp->pdev) == 0) {
6341 			bp->flags |= BNX2_FLAG_USING_MSI;
6342 			if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
6343 				bp->flags |= BNX2_FLAG_ONE_SHOT_MSI;
6344 				bp->irq_tbl[0].handler = bnx2_msi_1shot;
6345 			} else
6346 				bp->irq_tbl[0].handler = bnx2_msi;
6347 
6348 			bp->irq_tbl[0].vector = bp->pdev->irq;
6349 		}
6350 	}
6351 
6352 	if (!bp->num_req_tx_rings)
6353 		bp->num_tx_rings = rounddown_pow_of_two(bp->irq_nvecs);
6354 	else
6355 		bp->num_tx_rings = min(bp->irq_nvecs, bp->num_req_tx_rings);
6356 
6357 	if (!bp->num_req_rx_rings)
6358 		bp->num_rx_rings = bp->irq_nvecs;
6359 	else
6360 		bp->num_rx_rings = min(bp->irq_nvecs, bp->num_req_rx_rings);
6361 
6362 	netif_set_real_num_tx_queues(bp->dev, bp->num_tx_rings);
6363 
6364 	return netif_set_real_num_rx_queues(bp->dev, bp->num_rx_rings);
6365 }
6366 
6367 /* Called with rtnl_lock */
6368 static int
6369 bnx2_open(struct net_device *dev)
6370 {
6371 	struct bnx2 *bp = netdev_priv(dev);
6372 	int rc;
6373 
6374 	rc = bnx2_request_firmware(bp);
6375 	if (rc < 0)
6376 		goto out;
6377 
6378 	netif_carrier_off(dev);
6379 
6380 	bnx2_disable_int(bp);
6381 
6382 	rc = bnx2_setup_int_mode(bp, disable_msi);
6383 	if (rc)
6384 		goto open_err;
6385 	bnx2_init_napi(bp);
6386 	bnx2_napi_enable(bp);
6387 	rc = bnx2_alloc_mem(bp);
6388 	if (rc)
6389 		goto open_err;
6390 
6391 	rc = bnx2_request_irq(bp);
6392 	if (rc)
6393 		goto open_err;
6394 
6395 	rc = bnx2_init_nic(bp, 1);
6396 	if (rc)
6397 		goto open_err;
6398 
6399 	mod_timer(&bp->timer, jiffies + bp->current_interval);
6400 
6401 	atomic_set(&bp->intr_sem, 0);
6402 
6403 	memset(bp->temp_stats_blk, 0, sizeof(struct statistics_block));
6404 
6405 	bnx2_enable_int(bp);
6406 
6407 	if (bp->flags & BNX2_FLAG_USING_MSI) {
6408 		/* Test MSI to make sure it is working
6409 		 * If MSI test fails, go back to INTx mode
6410 		 */
6411 		if (bnx2_test_intr(bp) != 0) {
6412 			netdev_warn(bp->dev, "No interrupt was generated using MSI, switching to INTx mode. Please report this failure to the PCI maintainer and include system chipset information.\n");
6413 
6414 			bnx2_disable_int(bp);
6415 			bnx2_free_irq(bp);
6416 
6417 			bnx2_setup_int_mode(bp, 1);
6418 
6419 			rc = bnx2_init_nic(bp, 0);
6420 
6421 			if (!rc)
6422 				rc = bnx2_request_irq(bp);
6423 
6424 			if (rc) {
6425 				del_timer_sync(&bp->timer);
6426 				goto open_err;
6427 			}
6428 			bnx2_enable_int(bp);
6429 		}
6430 	}
6431 	if (bp->flags & BNX2_FLAG_USING_MSI)
6432 		netdev_info(dev, "using MSI\n");
6433 	else if (bp->flags & BNX2_FLAG_USING_MSIX)
6434 		netdev_info(dev, "using MSIX\n");
6435 
6436 	netif_tx_start_all_queues(dev);
6437 out:
6438 	return rc;
6439 
6440 open_err:
6441 	bnx2_napi_disable(bp);
6442 	bnx2_free_skbs(bp);
6443 	bnx2_free_irq(bp);
6444 	bnx2_free_mem(bp);
6445 	bnx2_del_napi(bp);
6446 	bnx2_release_firmware(bp);
6447 	goto out;
6448 }
6449 
6450 static void
6451 bnx2_reset_task(struct work_struct *work)
6452 {
6453 	struct bnx2 *bp = container_of(work, struct bnx2, reset_task);
6454 	int rc;
6455 	u16 pcicmd;
6456 
6457 	rtnl_lock();
6458 	if (!netif_running(bp->dev)) {
6459 		rtnl_unlock();
6460 		return;
6461 	}
6462 
6463 	bnx2_netif_stop(bp, true);
6464 
6465 	pci_read_config_word(bp->pdev, PCI_COMMAND, &pcicmd);
6466 	if (!(pcicmd & PCI_COMMAND_MEMORY)) {
6467 		/* in case PCI block has reset */
6468 		pci_restore_state(bp->pdev);
6469 		pci_save_state(bp->pdev);
6470 	}
6471 	rc = bnx2_init_nic(bp, 1);
6472 	if (rc) {
6473 		netdev_err(bp->dev, "failed to reset NIC, closing\n");
6474 		bnx2_napi_enable(bp);
6475 		dev_close(bp->dev);
6476 		rtnl_unlock();
6477 		return;
6478 	}
6479 
6480 	atomic_set(&bp->intr_sem, 1);
6481 	bnx2_netif_start(bp, true);
6482 	rtnl_unlock();
6483 }
6484 
6485 #define BNX2_FTQ_ENTRY(ftq) { __stringify(ftq##FTQ_CTL), BNX2_##ftq##FTQ_CTL }
6486 
6487 static void
6488 bnx2_dump_ftq(struct bnx2 *bp)
6489 {
6490 	int i;
6491 	u32 reg, bdidx, cid, valid;
6492 	struct net_device *dev = bp->dev;
6493 	static const struct ftq_reg {
6494 		char *name;
6495 		u32 off;
6496 	} ftq_arr[] = {
6497 		BNX2_FTQ_ENTRY(RV2P_P),
6498 		BNX2_FTQ_ENTRY(RV2P_T),
6499 		BNX2_FTQ_ENTRY(RV2P_M),
6500 		BNX2_FTQ_ENTRY(TBDR_),
6501 		BNX2_FTQ_ENTRY(TDMA_),
6502 		BNX2_FTQ_ENTRY(TXP_),
6503 		BNX2_FTQ_ENTRY(TXP_),
6504 		BNX2_FTQ_ENTRY(TPAT_),
6505 		BNX2_FTQ_ENTRY(RXP_C),
6506 		BNX2_FTQ_ENTRY(RXP_),
6507 		BNX2_FTQ_ENTRY(COM_COMXQ_),
6508 		BNX2_FTQ_ENTRY(COM_COMTQ_),
6509 		BNX2_FTQ_ENTRY(COM_COMQ_),
6510 		BNX2_FTQ_ENTRY(CP_CPQ_),
6511 	};
6512 
6513 	netdev_err(dev, "<--- start FTQ dump --->\n");
6514 	for (i = 0; i < ARRAY_SIZE(ftq_arr); i++)
6515 		netdev_err(dev, "%s %08x\n", ftq_arr[i].name,
6516 			   bnx2_reg_rd_ind(bp, ftq_arr[i].off));
6517 
6518 	netdev_err(dev, "CPU states:\n");
6519 	for (reg = BNX2_TXP_CPU_MODE; reg <= BNX2_CP_CPU_MODE; reg += 0x40000)
6520 		netdev_err(dev, "%06x mode %x state %x evt_mask %x pc %x pc %x instr %x\n",
6521 			   reg, bnx2_reg_rd_ind(bp, reg),
6522 			   bnx2_reg_rd_ind(bp, reg + 4),
6523 			   bnx2_reg_rd_ind(bp, reg + 8),
6524 			   bnx2_reg_rd_ind(bp, reg + 0x1c),
6525 			   bnx2_reg_rd_ind(bp, reg + 0x1c),
6526 			   bnx2_reg_rd_ind(bp, reg + 0x20));
6527 
6528 	netdev_err(dev, "<--- end FTQ dump --->\n");
6529 	netdev_err(dev, "<--- start TBDC dump --->\n");
6530 	netdev_err(dev, "TBDC free cnt: %ld\n",
6531 		   BNX2_RD(bp, BNX2_TBDC_STATUS) & BNX2_TBDC_STATUS_FREE_CNT);
6532 	netdev_err(dev, "LINE     CID  BIDX   CMD  VALIDS\n");
6533 	for (i = 0; i < 0x20; i++) {
6534 		int j = 0;
6535 
6536 		BNX2_WR(bp, BNX2_TBDC_BD_ADDR, i);
6537 		BNX2_WR(bp, BNX2_TBDC_CAM_OPCODE,
6538 			BNX2_TBDC_CAM_OPCODE_OPCODE_CAM_READ);
6539 		BNX2_WR(bp, BNX2_TBDC_COMMAND, BNX2_TBDC_COMMAND_CMD_REG_ARB);
6540 		while ((BNX2_RD(bp, BNX2_TBDC_COMMAND) &
6541 			BNX2_TBDC_COMMAND_CMD_REG_ARB) && j < 100)
6542 			j++;
6543 
6544 		cid = BNX2_RD(bp, BNX2_TBDC_CID);
6545 		bdidx = BNX2_RD(bp, BNX2_TBDC_BIDX);
6546 		valid = BNX2_RD(bp, BNX2_TBDC_CAM_OPCODE);
6547 		netdev_err(dev, "%02x    %06x  %04lx   %02x    [%x]\n",
6548 			   i, cid, bdidx & BNX2_TBDC_BDIDX_BDIDX,
6549 			   bdidx >> 24, (valid >> 8) & 0x0ff);
6550 	}
6551 	netdev_err(dev, "<--- end TBDC dump --->\n");
6552 }
6553 
6554 static void
6555 bnx2_dump_state(struct bnx2 *bp)
6556 {
6557 	struct net_device *dev = bp->dev;
6558 	u32 val1, val2;
6559 
6560 	pci_read_config_dword(bp->pdev, PCI_COMMAND, &val1);
6561 	netdev_err(dev, "DEBUG: intr_sem[%x] PCI_CMD[%08x]\n",
6562 		   atomic_read(&bp->intr_sem), val1);
6563 	pci_read_config_dword(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &val1);
6564 	pci_read_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG, &val2);
6565 	netdev_err(dev, "DEBUG: PCI_PM[%08x] PCI_MISC_CFG[%08x]\n", val1, val2);
6566 	netdev_err(dev, "DEBUG: EMAC_TX_STATUS[%08x] EMAC_RX_STATUS[%08x]\n",
6567 		   BNX2_RD(bp, BNX2_EMAC_TX_STATUS),
6568 		   BNX2_RD(bp, BNX2_EMAC_RX_STATUS));
6569 	netdev_err(dev, "DEBUG: RPM_MGMT_PKT_CTRL[%08x]\n",
6570 		   BNX2_RD(bp, BNX2_RPM_MGMT_PKT_CTRL));
6571 	netdev_err(dev, "DEBUG: HC_STATS_INTERRUPT_STATUS[%08x]\n",
6572 		   BNX2_RD(bp, BNX2_HC_STATS_INTERRUPT_STATUS));
6573 	if (bp->flags & BNX2_FLAG_USING_MSIX)
6574 		netdev_err(dev, "DEBUG: PBA[%08x]\n",
6575 			   BNX2_RD(bp, BNX2_PCI_GRC_WINDOW3_BASE));
6576 }
6577 
6578 static void
6579 bnx2_tx_timeout(struct net_device *dev)
6580 {
6581 	struct bnx2 *bp = netdev_priv(dev);
6582 
6583 	bnx2_dump_ftq(bp);
6584 	bnx2_dump_state(bp);
6585 	bnx2_dump_mcp_state(bp);
6586 
6587 	/* This allows the netif to be shutdown gracefully before resetting */
6588 	schedule_work(&bp->reset_task);
6589 }
6590 
6591 /* Called with netif_tx_lock.
6592  * bnx2_tx_int() runs without netif_tx_lock unless it needs to call
6593  * netif_wake_queue().
6594  */
6595 static netdev_tx_t
6596 bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
6597 {
6598 	struct bnx2 *bp = netdev_priv(dev);
6599 	dma_addr_t mapping;
6600 	struct bnx2_tx_bd *txbd;
6601 	struct bnx2_sw_tx_bd *tx_buf;
6602 	u32 len, vlan_tag_flags, last_frag, mss;
6603 	u16 prod, ring_prod;
6604 	int i;
6605 	struct bnx2_napi *bnapi;
6606 	struct bnx2_tx_ring_info *txr;
6607 	struct netdev_queue *txq;
6608 
6609 	/*  Determine which tx ring we will be placed on */
6610 	i = skb_get_queue_mapping(skb);
6611 	bnapi = &bp->bnx2_napi[i];
6612 	txr = &bnapi->tx_ring;
6613 	txq = netdev_get_tx_queue(dev, i);
6614 
6615 	if (unlikely(bnx2_tx_avail(bp, txr) <
6616 	    (skb_shinfo(skb)->nr_frags + 1))) {
6617 		netif_tx_stop_queue(txq);
6618 		netdev_err(dev, "BUG! Tx ring full when queue awake!\n");
6619 
6620 		return NETDEV_TX_BUSY;
6621 	}
6622 	len = skb_headlen(skb);
6623 	prod = txr->tx_prod;
6624 	ring_prod = BNX2_TX_RING_IDX(prod);
6625 
6626 	vlan_tag_flags = 0;
6627 	if (skb->ip_summed == CHECKSUM_PARTIAL) {
6628 		vlan_tag_flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
6629 	}
6630 
6631 	if (skb_vlan_tag_present(skb)) {
6632 		vlan_tag_flags |=
6633 			(TX_BD_FLAGS_VLAN_TAG | (skb_vlan_tag_get(skb) << 16));
6634 	}
6635 
6636 	if ((mss = skb_shinfo(skb)->gso_size)) {
6637 		u32 tcp_opt_len;
6638 		struct iphdr *iph;
6639 
6640 		vlan_tag_flags |= TX_BD_FLAGS_SW_LSO;
6641 
6642 		tcp_opt_len = tcp_optlen(skb);
6643 
6644 		if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) {
6645 			u32 tcp_off = skb_transport_offset(skb) -
6646 				      sizeof(struct ipv6hdr) - ETH_HLEN;
6647 
6648 			vlan_tag_flags |= ((tcp_opt_len >> 2) << 8) |
6649 					  TX_BD_FLAGS_SW_FLAGS;
6650 			if (likely(tcp_off == 0))
6651 				vlan_tag_flags &= ~TX_BD_FLAGS_TCP6_OFF0_MSK;
6652 			else {
6653 				tcp_off >>= 3;
6654 				vlan_tag_flags |= ((tcp_off & 0x3) <<
6655 						   TX_BD_FLAGS_TCP6_OFF0_SHL) |
6656 						  ((tcp_off & 0x10) <<
6657 						   TX_BD_FLAGS_TCP6_OFF4_SHL);
6658 				mss |= (tcp_off & 0xc) << TX_BD_TCP6_OFF2_SHL;
6659 			}
6660 		} else {
6661 			iph = ip_hdr(skb);
6662 			if (tcp_opt_len || (iph->ihl > 5)) {
6663 				vlan_tag_flags |= ((iph->ihl - 5) +
6664 						   (tcp_opt_len >> 2)) << 8;
6665 			}
6666 		}
6667 	} else
6668 		mss = 0;
6669 
6670 	mapping = dma_map_single(&bp->pdev->dev, skb->data, len, PCI_DMA_TODEVICE);
6671 	if (dma_mapping_error(&bp->pdev->dev, mapping)) {
6672 		dev_kfree_skb_any(skb);
6673 		return NETDEV_TX_OK;
6674 	}
6675 
6676 	tx_buf = &txr->tx_buf_ring[ring_prod];
6677 	tx_buf->skb = skb;
6678 	dma_unmap_addr_set(tx_buf, mapping, mapping);
6679 
6680 	txbd = &txr->tx_desc_ring[ring_prod];
6681 
6682 	txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
6683 	txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
6684 	txbd->tx_bd_mss_nbytes = len | (mss << 16);
6685 	txbd->tx_bd_vlan_tag_flags = vlan_tag_flags | TX_BD_FLAGS_START;
6686 
6687 	last_frag = skb_shinfo(skb)->nr_frags;
6688 	tx_buf->nr_frags = last_frag;
6689 	tx_buf->is_gso = skb_is_gso(skb);
6690 
6691 	for (i = 0; i < last_frag; i++) {
6692 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
6693 
6694 		prod = BNX2_NEXT_TX_BD(prod);
6695 		ring_prod = BNX2_TX_RING_IDX(prod);
6696 		txbd = &txr->tx_desc_ring[ring_prod];
6697 
6698 		len = skb_frag_size(frag);
6699 		mapping = skb_frag_dma_map(&bp->pdev->dev, frag, 0, len,
6700 					   DMA_TO_DEVICE);
6701 		if (dma_mapping_error(&bp->pdev->dev, mapping))
6702 			goto dma_error;
6703 		dma_unmap_addr_set(&txr->tx_buf_ring[ring_prod], mapping,
6704 				   mapping);
6705 
6706 		txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
6707 		txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
6708 		txbd->tx_bd_mss_nbytes = len | (mss << 16);
6709 		txbd->tx_bd_vlan_tag_flags = vlan_tag_flags;
6710 
6711 	}
6712 	txbd->tx_bd_vlan_tag_flags |= TX_BD_FLAGS_END;
6713 
6714 	/* Sync BD data before updating TX mailbox */
6715 	wmb();
6716 
6717 	netdev_tx_sent_queue(txq, skb->len);
6718 
6719 	prod = BNX2_NEXT_TX_BD(prod);
6720 	txr->tx_prod_bseq += skb->len;
6721 
6722 	BNX2_WR16(bp, txr->tx_bidx_addr, prod);
6723 	BNX2_WR(bp, txr->tx_bseq_addr, txr->tx_prod_bseq);
6724 
6725 	mmiowb();
6726 
6727 	txr->tx_prod = prod;
6728 
6729 	if (unlikely(bnx2_tx_avail(bp, txr) <= MAX_SKB_FRAGS)) {
6730 		netif_tx_stop_queue(txq);
6731 
6732 		/* netif_tx_stop_queue() must be done before checking
6733 		 * tx index in bnx2_tx_avail() below, because in
6734 		 * bnx2_tx_int(), we update tx index before checking for
6735 		 * netif_tx_queue_stopped().
6736 		 */
6737 		smp_mb();
6738 		if (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh)
6739 			netif_tx_wake_queue(txq);
6740 	}
6741 
6742 	return NETDEV_TX_OK;
6743 dma_error:
6744 	/* save value of frag that failed */
6745 	last_frag = i;
6746 
6747 	/* start back at beginning and unmap skb */
6748 	prod = txr->tx_prod;
6749 	ring_prod = BNX2_TX_RING_IDX(prod);
6750 	tx_buf = &txr->tx_buf_ring[ring_prod];
6751 	tx_buf->skb = NULL;
6752 	dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(tx_buf, mapping),
6753 			 skb_headlen(skb), PCI_DMA_TODEVICE);
6754 
6755 	/* unmap remaining mapped pages */
6756 	for (i = 0; i < last_frag; i++) {
6757 		prod = BNX2_NEXT_TX_BD(prod);
6758 		ring_prod = BNX2_TX_RING_IDX(prod);
6759 		tx_buf = &txr->tx_buf_ring[ring_prod];
6760 		dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(tx_buf, mapping),
6761 			       skb_frag_size(&skb_shinfo(skb)->frags[i]),
6762 			       PCI_DMA_TODEVICE);
6763 	}
6764 
6765 	dev_kfree_skb_any(skb);
6766 	return NETDEV_TX_OK;
6767 }
6768 
6769 /* Called with rtnl_lock */
6770 static int
6771 bnx2_close(struct net_device *dev)
6772 {
6773 	struct bnx2 *bp = netdev_priv(dev);
6774 
6775 	bnx2_disable_int_sync(bp);
6776 	bnx2_napi_disable(bp);
6777 	netif_tx_disable(dev);
6778 	del_timer_sync(&bp->timer);
6779 	bnx2_shutdown_chip(bp);
6780 	bnx2_free_irq(bp);
6781 	bnx2_free_skbs(bp);
6782 	bnx2_free_mem(bp);
6783 	bnx2_del_napi(bp);
6784 	bp->link_up = 0;
6785 	netif_carrier_off(bp->dev);
6786 	return 0;
6787 }
6788 
6789 static void
6790 bnx2_save_stats(struct bnx2 *bp)
6791 {
6792 	u32 *hw_stats = (u32 *) bp->stats_blk;
6793 	u32 *temp_stats = (u32 *) bp->temp_stats_blk;
6794 	int i;
6795 
6796 	/* The 1st 10 counters are 64-bit counters */
6797 	for (i = 0; i < 20; i += 2) {
6798 		u32 hi;
6799 		u64 lo;
6800 
6801 		hi = temp_stats[i] + hw_stats[i];
6802 		lo = (u64) temp_stats[i + 1] + (u64) hw_stats[i + 1];
6803 		if (lo > 0xffffffff)
6804 			hi++;
6805 		temp_stats[i] = hi;
6806 		temp_stats[i + 1] = lo & 0xffffffff;
6807 	}
6808 
6809 	for ( ; i < sizeof(struct statistics_block) / 4; i++)
6810 		temp_stats[i] += hw_stats[i];
6811 }
6812 
6813 #define GET_64BIT_NET_STATS64(ctr)		\
6814 	(((u64) (ctr##_hi) << 32) + (u64) (ctr##_lo))
6815 
6816 #define GET_64BIT_NET_STATS(ctr)				\
6817 	GET_64BIT_NET_STATS64(bp->stats_blk->ctr) +		\
6818 	GET_64BIT_NET_STATS64(bp->temp_stats_blk->ctr)
6819 
6820 #define GET_32BIT_NET_STATS(ctr)				\
6821 	(unsigned long) (bp->stats_blk->ctr +			\
6822 			 bp->temp_stats_blk->ctr)
6823 
6824 static void
6825 bnx2_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *net_stats)
6826 {
6827 	struct bnx2 *bp = netdev_priv(dev);
6828 
6829 	if (bp->stats_blk == NULL)
6830 		return;
6831 
6832 	net_stats->rx_packets =
6833 		GET_64BIT_NET_STATS(stat_IfHCInUcastPkts) +
6834 		GET_64BIT_NET_STATS(stat_IfHCInMulticastPkts) +
6835 		GET_64BIT_NET_STATS(stat_IfHCInBroadcastPkts);
6836 
6837 	net_stats->tx_packets =
6838 		GET_64BIT_NET_STATS(stat_IfHCOutUcastPkts) +
6839 		GET_64BIT_NET_STATS(stat_IfHCOutMulticastPkts) +
6840 		GET_64BIT_NET_STATS(stat_IfHCOutBroadcastPkts);
6841 
6842 	net_stats->rx_bytes =
6843 		GET_64BIT_NET_STATS(stat_IfHCInOctets);
6844 
6845 	net_stats->tx_bytes =
6846 		GET_64BIT_NET_STATS(stat_IfHCOutOctets);
6847 
6848 	net_stats->multicast =
6849 		GET_64BIT_NET_STATS(stat_IfHCInMulticastPkts);
6850 
6851 	net_stats->collisions =
6852 		GET_32BIT_NET_STATS(stat_EtherStatsCollisions);
6853 
6854 	net_stats->rx_length_errors =
6855 		GET_32BIT_NET_STATS(stat_EtherStatsUndersizePkts) +
6856 		GET_32BIT_NET_STATS(stat_EtherStatsOverrsizePkts);
6857 
6858 	net_stats->rx_over_errors =
6859 		GET_32BIT_NET_STATS(stat_IfInFTQDiscards) +
6860 		GET_32BIT_NET_STATS(stat_IfInMBUFDiscards);
6861 
6862 	net_stats->rx_frame_errors =
6863 		GET_32BIT_NET_STATS(stat_Dot3StatsAlignmentErrors);
6864 
6865 	net_stats->rx_crc_errors =
6866 		GET_32BIT_NET_STATS(stat_Dot3StatsFCSErrors);
6867 
6868 	net_stats->rx_errors = net_stats->rx_length_errors +
6869 		net_stats->rx_over_errors + net_stats->rx_frame_errors +
6870 		net_stats->rx_crc_errors;
6871 
6872 	net_stats->tx_aborted_errors =
6873 		GET_32BIT_NET_STATS(stat_Dot3StatsExcessiveCollisions) +
6874 		GET_32BIT_NET_STATS(stat_Dot3StatsLateCollisions);
6875 
6876 	if ((BNX2_CHIP(bp) == BNX2_CHIP_5706) ||
6877 	    (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_A0))
6878 		net_stats->tx_carrier_errors = 0;
6879 	else {
6880 		net_stats->tx_carrier_errors =
6881 			GET_32BIT_NET_STATS(stat_Dot3StatsCarrierSenseErrors);
6882 	}
6883 
6884 	net_stats->tx_errors =
6885 		GET_32BIT_NET_STATS(stat_emac_tx_stat_dot3statsinternalmactransmiterrors) +
6886 		net_stats->tx_aborted_errors +
6887 		net_stats->tx_carrier_errors;
6888 
6889 	net_stats->rx_missed_errors =
6890 		GET_32BIT_NET_STATS(stat_IfInFTQDiscards) +
6891 		GET_32BIT_NET_STATS(stat_IfInMBUFDiscards) +
6892 		GET_32BIT_NET_STATS(stat_FwRxDrop);
6893 
6894 }
6895 
6896 /* All ethtool functions called with rtnl_lock */
6897 
6898 static int
6899 bnx2_get_link_ksettings(struct net_device *dev,
6900 			struct ethtool_link_ksettings *cmd)
6901 {
6902 	struct bnx2 *bp = netdev_priv(dev);
6903 	int support_serdes = 0, support_copper = 0;
6904 	u32 supported, advertising;
6905 
6906 	supported = SUPPORTED_Autoneg;
6907 	if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
6908 		support_serdes = 1;
6909 		support_copper = 1;
6910 	} else if (bp->phy_port == PORT_FIBRE)
6911 		support_serdes = 1;
6912 	else
6913 		support_copper = 1;
6914 
6915 	if (support_serdes) {
6916 		supported |= SUPPORTED_1000baseT_Full |
6917 			SUPPORTED_FIBRE;
6918 		if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)
6919 			supported |= SUPPORTED_2500baseX_Full;
6920 	}
6921 	if (support_copper) {
6922 		supported |= SUPPORTED_10baseT_Half |
6923 			SUPPORTED_10baseT_Full |
6924 			SUPPORTED_100baseT_Half |
6925 			SUPPORTED_100baseT_Full |
6926 			SUPPORTED_1000baseT_Full |
6927 			SUPPORTED_TP;
6928 	}
6929 
6930 	spin_lock_bh(&bp->phy_lock);
6931 	cmd->base.port = bp->phy_port;
6932 	advertising = bp->advertising;
6933 
6934 	if (bp->autoneg & AUTONEG_SPEED) {
6935 		cmd->base.autoneg = AUTONEG_ENABLE;
6936 	} else {
6937 		cmd->base.autoneg = AUTONEG_DISABLE;
6938 	}
6939 
6940 	if (netif_carrier_ok(dev)) {
6941 		cmd->base.speed = bp->line_speed;
6942 		cmd->base.duplex = bp->duplex;
6943 		if (!(bp->phy_flags & BNX2_PHY_FLAG_SERDES)) {
6944 			if (bp->phy_flags & BNX2_PHY_FLAG_MDIX)
6945 				cmd->base.eth_tp_mdix = ETH_TP_MDI_X;
6946 			else
6947 				cmd->base.eth_tp_mdix = ETH_TP_MDI;
6948 		}
6949 	}
6950 	else {
6951 		cmd->base.speed = SPEED_UNKNOWN;
6952 		cmd->base.duplex = DUPLEX_UNKNOWN;
6953 	}
6954 	spin_unlock_bh(&bp->phy_lock);
6955 
6956 	cmd->base.phy_address = bp->phy_addr;
6957 
6958 	ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
6959 						supported);
6960 	ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
6961 						advertising);
6962 
6963 	return 0;
6964 }
6965 
6966 static int
6967 bnx2_set_link_ksettings(struct net_device *dev,
6968 			const struct ethtool_link_ksettings *cmd)
6969 {
6970 	struct bnx2 *bp = netdev_priv(dev);
6971 	u8 autoneg = bp->autoneg;
6972 	u8 req_duplex = bp->req_duplex;
6973 	u16 req_line_speed = bp->req_line_speed;
6974 	u32 advertising = bp->advertising;
6975 	int err = -EINVAL;
6976 
6977 	spin_lock_bh(&bp->phy_lock);
6978 
6979 	if (cmd->base.port != PORT_TP && cmd->base.port != PORT_FIBRE)
6980 		goto err_out_unlock;
6981 
6982 	if (cmd->base.port != bp->phy_port &&
6983 	    !(bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP))
6984 		goto err_out_unlock;
6985 
6986 	/* If device is down, we can store the settings only if the user
6987 	 * is setting the currently active port.
6988 	 */
6989 	if (!netif_running(dev) && cmd->base.port != bp->phy_port)
6990 		goto err_out_unlock;
6991 
6992 	if (cmd->base.autoneg == AUTONEG_ENABLE) {
6993 		autoneg |= AUTONEG_SPEED;
6994 
6995 		ethtool_convert_link_mode_to_legacy_u32(
6996 			&advertising, cmd->link_modes.advertising);
6997 
6998 		if (cmd->base.port == PORT_TP) {
6999 			advertising &= ETHTOOL_ALL_COPPER_SPEED;
7000 			if (!advertising)
7001 				advertising = ETHTOOL_ALL_COPPER_SPEED;
7002 		} else {
7003 			advertising &= ETHTOOL_ALL_FIBRE_SPEED;
7004 			if (!advertising)
7005 				advertising = ETHTOOL_ALL_FIBRE_SPEED;
7006 		}
7007 		advertising |= ADVERTISED_Autoneg;
7008 	}
7009 	else {
7010 		u32 speed = cmd->base.speed;
7011 
7012 		if (cmd->base.port == PORT_FIBRE) {
7013 			if ((speed != SPEED_1000 &&
7014 			     speed != SPEED_2500) ||
7015 			    (cmd->base.duplex != DUPLEX_FULL))
7016 				goto err_out_unlock;
7017 
7018 			if (speed == SPEED_2500 &&
7019 			    !(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
7020 				goto err_out_unlock;
7021 		} else if (speed == SPEED_1000 || speed == SPEED_2500)
7022 			goto err_out_unlock;
7023 
7024 		autoneg &= ~AUTONEG_SPEED;
7025 		req_line_speed = speed;
7026 		req_duplex = cmd->base.duplex;
7027 		advertising = 0;
7028 	}
7029 
7030 	bp->autoneg = autoneg;
7031 	bp->advertising = advertising;
7032 	bp->req_line_speed = req_line_speed;
7033 	bp->req_duplex = req_duplex;
7034 
7035 	err = 0;
7036 	/* If device is down, the new settings will be picked up when it is
7037 	 * brought up.
7038 	 */
7039 	if (netif_running(dev))
7040 		err = bnx2_setup_phy(bp, cmd->base.port);
7041 
7042 err_out_unlock:
7043 	spin_unlock_bh(&bp->phy_lock);
7044 
7045 	return err;
7046 }
7047 
7048 static void
7049 bnx2_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
7050 {
7051 	struct bnx2 *bp = netdev_priv(dev);
7052 
7053 	strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
7054 	strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
7055 	strlcpy(info->bus_info, pci_name(bp->pdev), sizeof(info->bus_info));
7056 	strlcpy(info->fw_version, bp->fw_version, sizeof(info->fw_version));
7057 }
7058 
7059 #define BNX2_REGDUMP_LEN		(32 * 1024)
7060 
7061 static int
7062 bnx2_get_regs_len(struct net_device *dev)
7063 {
7064 	return BNX2_REGDUMP_LEN;
7065 }
7066 
7067 static void
7068 bnx2_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p)
7069 {
7070 	u32 *p = _p, i, offset;
7071 	u8 *orig_p = _p;
7072 	struct bnx2 *bp = netdev_priv(dev);
7073 	static const u32 reg_boundaries[] = {
7074 		0x0000, 0x0098, 0x0400, 0x045c,
7075 		0x0800, 0x0880, 0x0c00, 0x0c10,
7076 		0x0c30, 0x0d08, 0x1000, 0x101c,
7077 		0x1040, 0x1048, 0x1080, 0x10a4,
7078 		0x1400, 0x1490, 0x1498, 0x14f0,
7079 		0x1500, 0x155c, 0x1580, 0x15dc,
7080 		0x1600, 0x1658, 0x1680, 0x16d8,
7081 		0x1800, 0x1820, 0x1840, 0x1854,
7082 		0x1880, 0x1894, 0x1900, 0x1984,
7083 		0x1c00, 0x1c0c, 0x1c40, 0x1c54,
7084 		0x1c80, 0x1c94, 0x1d00, 0x1d84,
7085 		0x2000, 0x2030, 0x23c0, 0x2400,
7086 		0x2800, 0x2820, 0x2830, 0x2850,
7087 		0x2b40, 0x2c10, 0x2fc0, 0x3058,
7088 		0x3c00, 0x3c94, 0x4000, 0x4010,
7089 		0x4080, 0x4090, 0x43c0, 0x4458,
7090 		0x4c00, 0x4c18, 0x4c40, 0x4c54,
7091 		0x4fc0, 0x5010, 0x53c0, 0x5444,
7092 		0x5c00, 0x5c18, 0x5c80, 0x5c90,
7093 		0x5fc0, 0x6000, 0x6400, 0x6428,
7094 		0x6800, 0x6848, 0x684c, 0x6860,
7095 		0x6888, 0x6910, 0x8000
7096 	};
7097 
7098 	regs->version = 0;
7099 
7100 	memset(p, 0, BNX2_REGDUMP_LEN);
7101 
7102 	if (!netif_running(bp->dev))
7103 		return;
7104 
7105 	i = 0;
7106 	offset = reg_boundaries[0];
7107 	p += offset;
7108 	while (offset < BNX2_REGDUMP_LEN) {
7109 		*p++ = BNX2_RD(bp, offset);
7110 		offset += 4;
7111 		if (offset == reg_boundaries[i + 1]) {
7112 			offset = reg_boundaries[i + 2];
7113 			p = (u32 *) (orig_p + offset);
7114 			i += 2;
7115 		}
7116 	}
7117 }
7118 
7119 static void
7120 bnx2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7121 {
7122 	struct bnx2 *bp = netdev_priv(dev);
7123 
7124 	if (bp->flags & BNX2_FLAG_NO_WOL) {
7125 		wol->supported = 0;
7126 		wol->wolopts = 0;
7127 	}
7128 	else {
7129 		wol->supported = WAKE_MAGIC;
7130 		if (bp->wol)
7131 			wol->wolopts = WAKE_MAGIC;
7132 		else
7133 			wol->wolopts = 0;
7134 	}
7135 	memset(&wol->sopass, 0, sizeof(wol->sopass));
7136 }
7137 
7138 static int
7139 bnx2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7140 {
7141 	struct bnx2 *bp = netdev_priv(dev);
7142 
7143 	if (wol->wolopts & ~WAKE_MAGIC)
7144 		return -EINVAL;
7145 
7146 	if (wol->wolopts & WAKE_MAGIC) {
7147 		if (bp->flags & BNX2_FLAG_NO_WOL)
7148 			return -EINVAL;
7149 
7150 		bp->wol = 1;
7151 	}
7152 	else {
7153 		bp->wol = 0;
7154 	}
7155 
7156 	device_set_wakeup_enable(&bp->pdev->dev, bp->wol);
7157 
7158 	return 0;
7159 }
7160 
7161 static int
7162 bnx2_nway_reset(struct net_device *dev)
7163 {
7164 	struct bnx2 *bp = netdev_priv(dev);
7165 	u32 bmcr;
7166 
7167 	if (!netif_running(dev))
7168 		return -EAGAIN;
7169 
7170 	if (!(bp->autoneg & AUTONEG_SPEED)) {
7171 		return -EINVAL;
7172 	}
7173 
7174 	spin_lock_bh(&bp->phy_lock);
7175 
7176 	if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
7177 		int rc;
7178 
7179 		rc = bnx2_setup_remote_phy(bp, bp->phy_port);
7180 		spin_unlock_bh(&bp->phy_lock);
7181 		return rc;
7182 	}
7183 
7184 	/* Force a link down visible on the other side */
7185 	if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
7186 		bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
7187 		spin_unlock_bh(&bp->phy_lock);
7188 
7189 		msleep(20);
7190 
7191 		spin_lock_bh(&bp->phy_lock);
7192 
7193 		bp->current_interval = BNX2_SERDES_AN_TIMEOUT;
7194 		bp->serdes_an_pending = 1;
7195 		mod_timer(&bp->timer, jiffies + bp->current_interval);
7196 	}
7197 
7198 	bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
7199 	bmcr &= ~BMCR_LOOPBACK;
7200 	bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART | BMCR_ANENABLE);
7201 
7202 	spin_unlock_bh(&bp->phy_lock);
7203 
7204 	return 0;
7205 }
7206 
7207 static u32
7208 bnx2_get_link(struct net_device *dev)
7209 {
7210 	struct bnx2 *bp = netdev_priv(dev);
7211 
7212 	return bp->link_up;
7213 }
7214 
7215 static int
7216 bnx2_get_eeprom_len(struct net_device *dev)
7217 {
7218 	struct bnx2 *bp = netdev_priv(dev);
7219 
7220 	if (bp->flash_info == NULL)
7221 		return 0;
7222 
7223 	return (int) bp->flash_size;
7224 }
7225 
7226 static int
7227 bnx2_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
7228 		u8 *eebuf)
7229 {
7230 	struct bnx2 *bp = netdev_priv(dev);
7231 	int rc;
7232 
7233 	/* parameters already validated in ethtool_get_eeprom */
7234 
7235 	rc = bnx2_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
7236 
7237 	return rc;
7238 }
7239 
7240 static int
7241 bnx2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
7242 		u8 *eebuf)
7243 {
7244 	struct bnx2 *bp = netdev_priv(dev);
7245 	int rc;
7246 
7247 	/* parameters already validated in ethtool_set_eeprom */
7248 
7249 	rc = bnx2_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
7250 
7251 	return rc;
7252 }
7253 
7254 static int
7255 bnx2_get_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
7256 {
7257 	struct bnx2 *bp = netdev_priv(dev);
7258 
7259 	memset(coal, 0, sizeof(struct ethtool_coalesce));
7260 
7261 	coal->rx_coalesce_usecs = bp->rx_ticks;
7262 	coal->rx_max_coalesced_frames = bp->rx_quick_cons_trip;
7263 	coal->rx_coalesce_usecs_irq = bp->rx_ticks_int;
7264 	coal->rx_max_coalesced_frames_irq = bp->rx_quick_cons_trip_int;
7265 
7266 	coal->tx_coalesce_usecs = bp->tx_ticks;
7267 	coal->tx_max_coalesced_frames = bp->tx_quick_cons_trip;
7268 	coal->tx_coalesce_usecs_irq = bp->tx_ticks_int;
7269 	coal->tx_max_coalesced_frames_irq = bp->tx_quick_cons_trip_int;
7270 
7271 	coal->stats_block_coalesce_usecs = bp->stats_ticks;
7272 
7273 	return 0;
7274 }
7275 
7276 static int
7277 bnx2_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
7278 {
7279 	struct bnx2 *bp = netdev_priv(dev);
7280 
7281 	bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
7282 	if (bp->rx_ticks > 0x3ff) bp->rx_ticks = 0x3ff;
7283 
7284 	bp->rx_quick_cons_trip = (u16) coal->rx_max_coalesced_frames;
7285 	if (bp->rx_quick_cons_trip > 0xff) bp->rx_quick_cons_trip = 0xff;
7286 
7287 	bp->rx_ticks_int = (u16) coal->rx_coalesce_usecs_irq;
7288 	if (bp->rx_ticks_int > 0x3ff) bp->rx_ticks_int = 0x3ff;
7289 
7290 	bp->rx_quick_cons_trip_int = (u16) coal->rx_max_coalesced_frames_irq;
7291 	if (bp->rx_quick_cons_trip_int > 0xff)
7292 		bp->rx_quick_cons_trip_int = 0xff;
7293 
7294 	bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
7295 	if (bp->tx_ticks > 0x3ff) bp->tx_ticks = 0x3ff;
7296 
7297 	bp->tx_quick_cons_trip = (u16) coal->tx_max_coalesced_frames;
7298 	if (bp->tx_quick_cons_trip > 0xff) bp->tx_quick_cons_trip = 0xff;
7299 
7300 	bp->tx_ticks_int = (u16) coal->tx_coalesce_usecs_irq;
7301 	if (bp->tx_ticks_int > 0x3ff) bp->tx_ticks_int = 0x3ff;
7302 
7303 	bp->tx_quick_cons_trip_int = (u16) coal->tx_max_coalesced_frames_irq;
7304 	if (bp->tx_quick_cons_trip_int > 0xff) bp->tx_quick_cons_trip_int =
7305 		0xff;
7306 
7307 	bp->stats_ticks = coal->stats_block_coalesce_usecs;
7308 	if (bp->flags & BNX2_FLAG_BROKEN_STATS) {
7309 		if (bp->stats_ticks != 0 && bp->stats_ticks != USEC_PER_SEC)
7310 			bp->stats_ticks = USEC_PER_SEC;
7311 	}
7312 	if (bp->stats_ticks > BNX2_HC_STATS_TICKS_HC_STAT_TICKS)
7313 		bp->stats_ticks = BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
7314 	bp->stats_ticks &= BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
7315 
7316 	if (netif_running(bp->dev)) {
7317 		bnx2_netif_stop(bp, true);
7318 		bnx2_init_nic(bp, 0);
7319 		bnx2_netif_start(bp, true);
7320 	}
7321 
7322 	return 0;
7323 }
7324 
7325 static void
7326 bnx2_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
7327 {
7328 	struct bnx2 *bp = netdev_priv(dev);
7329 
7330 	ering->rx_max_pending = BNX2_MAX_TOTAL_RX_DESC_CNT;
7331 	ering->rx_jumbo_max_pending = BNX2_MAX_TOTAL_RX_PG_DESC_CNT;
7332 
7333 	ering->rx_pending = bp->rx_ring_size;
7334 	ering->rx_jumbo_pending = bp->rx_pg_ring_size;
7335 
7336 	ering->tx_max_pending = BNX2_MAX_TX_DESC_CNT;
7337 	ering->tx_pending = bp->tx_ring_size;
7338 }
7339 
7340 static int
7341 bnx2_change_ring_size(struct bnx2 *bp, u32 rx, u32 tx, bool reset_irq)
7342 {
7343 	if (netif_running(bp->dev)) {
7344 		/* Reset will erase chipset stats; save them */
7345 		bnx2_save_stats(bp);
7346 
7347 		bnx2_netif_stop(bp, true);
7348 		bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
7349 		if (reset_irq) {
7350 			bnx2_free_irq(bp);
7351 			bnx2_del_napi(bp);
7352 		} else {
7353 			__bnx2_free_irq(bp);
7354 		}
7355 		bnx2_free_skbs(bp);
7356 		bnx2_free_mem(bp);
7357 	}
7358 
7359 	bnx2_set_rx_ring_size(bp, rx);
7360 	bp->tx_ring_size = tx;
7361 
7362 	if (netif_running(bp->dev)) {
7363 		int rc = 0;
7364 
7365 		if (reset_irq) {
7366 			rc = bnx2_setup_int_mode(bp, disable_msi);
7367 			bnx2_init_napi(bp);
7368 		}
7369 
7370 		if (!rc)
7371 			rc = bnx2_alloc_mem(bp);
7372 
7373 		if (!rc)
7374 			rc = bnx2_request_irq(bp);
7375 
7376 		if (!rc)
7377 			rc = bnx2_init_nic(bp, 0);
7378 
7379 		if (rc) {
7380 			bnx2_napi_enable(bp);
7381 			dev_close(bp->dev);
7382 			return rc;
7383 		}
7384 #ifdef BCM_CNIC
7385 		mutex_lock(&bp->cnic_lock);
7386 		/* Let cnic know about the new status block. */
7387 		if (bp->cnic_eth_dev.drv_state & CNIC_DRV_STATE_REGD)
7388 			bnx2_setup_cnic_irq_info(bp);
7389 		mutex_unlock(&bp->cnic_lock);
7390 #endif
7391 		bnx2_netif_start(bp, true);
7392 	}
7393 	return 0;
7394 }
7395 
7396 static int
7397 bnx2_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
7398 {
7399 	struct bnx2 *bp = netdev_priv(dev);
7400 	int rc;
7401 
7402 	if ((ering->rx_pending > BNX2_MAX_TOTAL_RX_DESC_CNT) ||
7403 		(ering->tx_pending > BNX2_MAX_TX_DESC_CNT) ||
7404 		(ering->tx_pending <= MAX_SKB_FRAGS)) {
7405 
7406 		return -EINVAL;
7407 	}
7408 	rc = bnx2_change_ring_size(bp, ering->rx_pending, ering->tx_pending,
7409 				   false);
7410 	return rc;
7411 }
7412 
7413 static void
7414 bnx2_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
7415 {
7416 	struct bnx2 *bp = netdev_priv(dev);
7417 
7418 	epause->autoneg = ((bp->autoneg & AUTONEG_FLOW_CTRL) != 0);
7419 	epause->rx_pause = ((bp->flow_ctrl & FLOW_CTRL_RX) != 0);
7420 	epause->tx_pause = ((bp->flow_ctrl & FLOW_CTRL_TX) != 0);
7421 }
7422 
7423 static int
7424 bnx2_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
7425 {
7426 	struct bnx2 *bp = netdev_priv(dev);
7427 
7428 	bp->req_flow_ctrl = 0;
7429 	if (epause->rx_pause)
7430 		bp->req_flow_ctrl |= FLOW_CTRL_RX;
7431 	if (epause->tx_pause)
7432 		bp->req_flow_ctrl |= FLOW_CTRL_TX;
7433 
7434 	if (epause->autoneg) {
7435 		bp->autoneg |= AUTONEG_FLOW_CTRL;
7436 	}
7437 	else {
7438 		bp->autoneg &= ~AUTONEG_FLOW_CTRL;
7439 	}
7440 
7441 	if (netif_running(dev)) {
7442 		spin_lock_bh(&bp->phy_lock);
7443 		bnx2_setup_phy(bp, bp->phy_port);
7444 		spin_unlock_bh(&bp->phy_lock);
7445 	}
7446 
7447 	return 0;
7448 }
7449 
7450 static struct {
7451 	char string[ETH_GSTRING_LEN];
7452 } bnx2_stats_str_arr[] = {
7453 	{ "rx_bytes" },
7454 	{ "rx_error_bytes" },
7455 	{ "tx_bytes" },
7456 	{ "tx_error_bytes" },
7457 	{ "rx_ucast_packets" },
7458 	{ "rx_mcast_packets" },
7459 	{ "rx_bcast_packets" },
7460 	{ "tx_ucast_packets" },
7461 	{ "tx_mcast_packets" },
7462 	{ "tx_bcast_packets" },
7463 	{ "tx_mac_errors" },
7464 	{ "tx_carrier_errors" },
7465 	{ "rx_crc_errors" },
7466 	{ "rx_align_errors" },
7467 	{ "tx_single_collisions" },
7468 	{ "tx_multi_collisions" },
7469 	{ "tx_deferred" },
7470 	{ "tx_excess_collisions" },
7471 	{ "tx_late_collisions" },
7472 	{ "tx_total_collisions" },
7473 	{ "rx_fragments" },
7474 	{ "rx_jabbers" },
7475 	{ "rx_undersize_packets" },
7476 	{ "rx_oversize_packets" },
7477 	{ "rx_64_byte_packets" },
7478 	{ "rx_65_to_127_byte_packets" },
7479 	{ "rx_128_to_255_byte_packets" },
7480 	{ "rx_256_to_511_byte_packets" },
7481 	{ "rx_512_to_1023_byte_packets" },
7482 	{ "rx_1024_to_1522_byte_packets" },
7483 	{ "rx_1523_to_9022_byte_packets" },
7484 	{ "tx_64_byte_packets" },
7485 	{ "tx_65_to_127_byte_packets" },
7486 	{ "tx_128_to_255_byte_packets" },
7487 	{ "tx_256_to_511_byte_packets" },
7488 	{ "tx_512_to_1023_byte_packets" },
7489 	{ "tx_1024_to_1522_byte_packets" },
7490 	{ "tx_1523_to_9022_byte_packets" },
7491 	{ "rx_xon_frames" },
7492 	{ "rx_xoff_frames" },
7493 	{ "tx_xon_frames" },
7494 	{ "tx_xoff_frames" },
7495 	{ "rx_mac_ctrl_frames" },
7496 	{ "rx_filtered_packets" },
7497 	{ "rx_ftq_discards" },
7498 	{ "rx_discards" },
7499 	{ "rx_fw_discards" },
7500 };
7501 
7502 #define BNX2_NUM_STATS ARRAY_SIZE(bnx2_stats_str_arr)
7503 
7504 #define STATS_OFFSET32(offset_name) (offsetof(struct statistics_block, offset_name) / 4)
7505 
7506 static const unsigned long bnx2_stats_offset_arr[BNX2_NUM_STATS] = {
7507     STATS_OFFSET32(stat_IfHCInOctets_hi),
7508     STATS_OFFSET32(stat_IfHCInBadOctets_hi),
7509     STATS_OFFSET32(stat_IfHCOutOctets_hi),
7510     STATS_OFFSET32(stat_IfHCOutBadOctets_hi),
7511     STATS_OFFSET32(stat_IfHCInUcastPkts_hi),
7512     STATS_OFFSET32(stat_IfHCInMulticastPkts_hi),
7513     STATS_OFFSET32(stat_IfHCInBroadcastPkts_hi),
7514     STATS_OFFSET32(stat_IfHCOutUcastPkts_hi),
7515     STATS_OFFSET32(stat_IfHCOutMulticastPkts_hi),
7516     STATS_OFFSET32(stat_IfHCOutBroadcastPkts_hi),
7517     STATS_OFFSET32(stat_emac_tx_stat_dot3statsinternalmactransmiterrors),
7518     STATS_OFFSET32(stat_Dot3StatsCarrierSenseErrors),
7519     STATS_OFFSET32(stat_Dot3StatsFCSErrors),
7520     STATS_OFFSET32(stat_Dot3StatsAlignmentErrors),
7521     STATS_OFFSET32(stat_Dot3StatsSingleCollisionFrames),
7522     STATS_OFFSET32(stat_Dot3StatsMultipleCollisionFrames),
7523     STATS_OFFSET32(stat_Dot3StatsDeferredTransmissions),
7524     STATS_OFFSET32(stat_Dot3StatsExcessiveCollisions),
7525     STATS_OFFSET32(stat_Dot3StatsLateCollisions),
7526     STATS_OFFSET32(stat_EtherStatsCollisions),
7527     STATS_OFFSET32(stat_EtherStatsFragments),
7528     STATS_OFFSET32(stat_EtherStatsJabbers),
7529     STATS_OFFSET32(stat_EtherStatsUndersizePkts),
7530     STATS_OFFSET32(stat_EtherStatsOverrsizePkts),
7531     STATS_OFFSET32(stat_EtherStatsPktsRx64Octets),
7532     STATS_OFFSET32(stat_EtherStatsPktsRx65Octetsto127Octets),
7533     STATS_OFFSET32(stat_EtherStatsPktsRx128Octetsto255Octets),
7534     STATS_OFFSET32(stat_EtherStatsPktsRx256Octetsto511Octets),
7535     STATS_OFFSET32(stat_EtherStatsPktsRx512Octetsto1023Octets),
7536     STATS_OFFSET32(stat_EtherStatsPktsRx1024Octetsto1522Octets),
7537     STATS_OFFSET32(stat_EtherStatsPktsRx1523Octetsto9022Octets),
7538     STATS_OFFSET32(stat_EtherStatsPktsTx64Octets),
7539     STATS_OFFSET32(stat_EtherStatsPktsTx65Octetsto127Octets),
7540     STATS_OFFSET32(stat_EtherStatsPktsTx128Octetsto255Octets),
7541     STATS_OFFSET32(stat_EtherStatsPktsTx256Octetsto511Octets),
7542     STATS_OFFSET32(stat_EtherStatsPktsTx512Octetsto1023Octets),
7543     STATS_OFFSET32(stat_EtherStatsPktsTx1024Octetsto1522Octets),
7544     STATS_OFFSET32(stat_EtherStatsPktsTx1523Octetsto9022Octets),
7545     STATS_OFFSET32(stat_XonPauseFramesReceived),
7546     STATS_OFFSET32(stat_XoffPauseFramesReceived),
7547     STATS_OFFSET32(stat_OutXonSent),
7548     STATS_OFFSET32(stat_OutXoffSent),
7549     STATS_OFFSET32(stat_MacControlFramesReceived),
7550     STATS_OFFSET32(stat_IfInFramesL2FilterDiscards),
7551     STATS_OFFSET32(stat_IfInFTQDiscards),
7552     STATS_OFFSET32(stat_IfInMBUFDiscards),
7553     STATS_OFFSET32(stat_FwRxDrop),
7554 };
7555 
7556 /* stat_IfHCInBadOctets and stat_Dot3StatsCarrierSenseErrors are
7557  * skipped because of errata.
7558  */
7559 static u8 bnx2_5706_stats_len_arr[BNX2_NUM_STATS] = {
7560 	8,0,8,8,8,8,8,8,8,8,
7561 	4,0,4,4,4,4,4,4,4,4,
7562 	4,4,4,4,4,4,4,4,4,4,
7563 	4,4,4,4,4,4,4,4,4,4,
7564 	4,4,4,4,4,4,4,
7565 };
7566 
7567 static u8 bnx2_5708_stats_len_arr[BNX2_NUM_STATS] = {
7568 	8,0,8,8,8,8,8,8,8,8,
7569 	4,4,4,4,4,4,4,4,4,4,
7570 	4,4,4,4,4,4,4,4,4,4,
7571 	4,4,4,4,4,4,4,4,4,4,
7572 	4,4,4,4,4,4,4,
7573 };
7574 
7575 #define BNX2_NUM_TESTS 6
7576 
7577 static struct {
7578 	char string[ETH_GSTRING_LEN];
7579 } bnx2_tests_str_arr[BNX2_NUM_TESTS] = {
7580 	{ "register_test (offline)" },
7581 	{ "memory_test (offline)" },
7582 	{ "loopback_test (offline)" },
7583 	{ "nvram_test (online)" },
7584 	{ "interrupt_test (online)" },
7585 	{ "link_test (online)" },
7586 };
7587 
7588 static int
7589 bnx2_get_sset_count(struct net_device *dev, int sset)
7590 {
7591 	switch (sset) {
7592 	case ETH_SS_TEST:
7593 		return BNX2_NUM_TESTS;
7594 	case ETH_SS_STATS:
7595 		return BNX2_NUM_STATS;
7596 	default:
7597 		return -EOPNOTSUPP;
7598 	}
7599 }
7600 
7601 static void
7602 bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf)
7603 {
7604 	struct bnx2 *bp = netdev_priv(dev);
7605 
7606 	memset(buf, 0, sizeof(u64) * BNX2_NUM_TESTS);
7607 	if (etest->flags & ETH_TEST_FL_OFFLINE) {
7608 		int i;
7609 
7610 		bnx2_netif_stop(bp, true);
7611 		bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_DIAG);
7612 		bnx2_free_skbs(bp);
7613 
7614 		if (bnx2_test_registers(bp) != 0) {
7615 			buf[0] = 1;
7616 			etest->flags |= ETH_TEST_FL_FAILED;
7617 		}
7618 		if (bnx2_test_memory(bp) != 0) {
7619 			buf[1] = 1;
7620 			etest->flags |= ETH_TEST_FL_FAILED;
7621 		}
7622 		if ((buf[2] = bnx2_test_loopback(bp)) != 0)
7623 			etest->flags |= ETH_TEST_FL_FAILED;
7624 
7625 		if (!netif_running(bp->dev))
7626 			bnx2_shutdown_chip(bp);
7627 		else {
7628 			bnx2_init_nic(bp, 1);
7629 			bnx2_netif_start(bp, true);
7630 		}
7631 
7632 		/* wait for link up */
7633 		for (i = 0; i < 7; i++) {
7634 			if (bp->link_up)
7635 				break;
7636 			msleep_interruptible(1000);
7637 		}
7638 	}
7639 
7640 	if (bnx2_test_nvram(bp) != 0) {
7641 		buf[3] = 1;
7642 		etest->flags |= ETH_TEST_FL_FAILED;
7643 	}
7644 	if (bnx2_test_intr(bp) != 0) {
7645 		buf[4] = 1;
7646 		etest->flags |= ETH_TEST_FL_FAILED;
7647 	}
7648 
7649 	if (bnx2_test_link(bp) != 0) {
7650 		buf[5] = 1;
7651 		etest->flags |= ETH_TEST_FL_FAILED;
7652 
7653 	}
7654 }
7655 
7656 static void
7657 bnx2_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
7658 {
7659 	switch (stringset) {
7660 	case ETH_SS_STATS:
7661 		memcpy(buf, bnx2_stats_str_arr,
7662 			sizeof(bnx2_stats_str_arr));
7663 		break;
7664 	case ETH_SS_TEST:
7665 		memcpy(buf, bnx2_tests_str_arr,
7666 			sizeof(bnx2_tests_str_arr));
7667 		break;
7668 	}
7669 }
7670 
7671 static void
7672 bnx2_get_ethtool_stats(struct net_device *dev,
7673 		struct ethtool_stats *stats, u64 *buf)
7674 {
7675 	struct bnx2 *bp = netdev_priv(dev);
7676 	int i;
7677 	u32 *hw_stats = (u32 *) bp->stats_blk;
7678 	u32 *temp_stats = (u32 *) bp->temp_stats_blk;
7679 	u8 *stats_len_arr = NULL;
7680 
7681 	if (hw_stats == NULL) {
7682 		memset(buf, 0, sizeof(u64) * BNX2_NUM_STATS);
7683 		return;
7684 	}
7685 
7686 	if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) ||
7687 	    (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A1) ||
7688 	    (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A2) ||
7689 	    (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_A0))
7690 		stats_len_arr = bnx2_5706_stats_len_arr;
7691 	else
7692 		stats_len_arr = bnx2_5708_stats_len_arr;
7693 
7694 	for (i = 0; i < BNX2_NUM_STATS; i++) {
7695 		unsigned long offset;
7696 
7697 		if (stats_len_arr[i] == 0) {
7698 			/* skip this counter */
7699 			buf[i] = 0;
7700 			continue;
7701 		}
7702 
7703 		offset = bnx2_stats_offset_arr[i];
7704 		if (stats_len_arr[i] == 4) {
7705 			/* 4-byte counter */
7706 			buf[i] = (u64) *(hw_stats + offset) +
7707 				 *(temp_stats + offset);
7708 			continue;
7709 		}
7710 		/* 8-byte counter */
7711 		buf[i] = (((u64) *(hw_stats + offset)) << 32) +
7712 			 *(hw_stats + offset + 1) +
7713 			 (((u64) *(temp_stats + offset)) << 32) +
7714 			 *(temp_stats + offset + 1);
7715 	}
7716 }
7717 
7718 static int
7719 bnx2_set_phys_id(struct net_device *dev, enum ethtool_phys_id_state state)
7720 {
7721 	struct bnx2 *bp = netdev_priv(dev);
7722 
7723 	switch (state) {
7724 	case ETHTOOL_ID_ACTIVE:
7725 		bp->leds_save = BNX2_RD(bp, BNX2_MISC_CFG);
7726 		BNX2_WR(bp, BNX2_MISC_CFG, BNX2_MISC_CFG_LEDMODE_MAC);
7727 		return 1;	/* cycle on/off once per second */
7728 
7729 	case ETHTOOL_ID_ON:
7730 		BNX2_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE |
7731 			BNX2_EMAC_LED_1000MB_OVERRIDE |
7732 			BNX2_EMAC_LED_100MB_OVERRIDE |
7733 			BNX2_EMAC_LED_10MB_OVERRIDE |
7734 			BNX2_EMAC_LED_TRAFFIC_OVERRIDE |
7735 			BNX2_EMAC_LED_TRAFFIC);
7736 		break;
7737 
7738 	case ETHTOOL_ID_OFF:
7739 		BNX2_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE);
7740 		break;
7741 
7742 	case ETHTOOL_ID_INACTIVE:
7743 		BNX2_WR(bp, BNX2_EMAC_LED, 0);
7744 		BNX2_WR(bp, BNX2_MISC_CFG, bp->leds_save);
7745 		break;
7746 	}
7747 
7748 	return 0;
7749 }
7750 
7751 static int
7752 bnx2_set_features(struct net_device *dev, netdev_features_t features)
7753 {
7754 	struct bnx2 *bp = netdev_priv(dev);
7755 
7756 	/* TSO with VLAN tag won't work with current firmware */
7757 	if (features & NETIF_F_HW_VLAN_CTAG_TX)
7758 		dev->vlan_features |= (dev->hw_features & NETIF_F_ALL_TSO);
7759 	else
7760 		dev->vlan_features &= ~NETIF_F_ALL_TSO;
7761 
7762 	if ((!!(features & NETIF_F_HW_VLAN_CTAG_RX) !=
7763 	    !!(bp->rx_mode & BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG)) &&
7764 	    netif_running(dev)) {
7765 		bnx2_netif_stop(bp, false);
7766 		dev->features = features;
7767 		bnx2_set_rx_mode(dev);
7768 		bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_KEEP_VLAN_UPDATE, 0, 1);
7769 		bnx2_netif_start(bp, false);
7770 		return 1;
7771 	}
7772 
7773 	return 0;
7774 }
7775 
7776 static void bnx2_get_channels(struct net_device *dev,
7777 			      struct ethtool_channels *channels)
7778 {
7779 	struct bnx2 *bp = netdev_priv(dev);
7780 	u32 max_rx_rings = 1;
7781 	u32 max_tx_rings = 1;
7782 
7783 	if ((bp->flags & BNX2_FLAG_MSIX_CAP) && !disable_msi) {
7784 		max_rx_rings = RX_MAX_RINGS;
7785 		max_tx_rings = TX_MAX_RINGS;
7786 	}
7787 
7788 	channels->max_rx = max_rx_rings;
7789 	channels->max_tx = max_tx_rings;
7790 	channels->max_other = 0;
7791 	channels->max_combined = 0;
7792 	channels->rx_count = bp->num_rx_rings;
7793 	channels->tx_count = bp->num_tx_rings;
7794 	channels->other_count = 0;
7795 	channels->combined_count = 0;
7796 }
7797 
7798 static int bnx2_set_channels(struct net_device *dev,
7799 			      struct ethtool_channels *channels)
7800 {
7801 	struct bnx2 *bp = netdev_priv(dev);
7802 	u32 max_rx_rings = 1;
7803 	u32 max_tx_rings = 1;
7804 	int rc = 0;
7805 
7806 	if ((bp->flags & BNX2_FLAG_MSIX_CAP) && !disable_msi) {
7807 		max_rx_rings = RX_MAX_RINGS;
7808 		max_tx_rings = TX_MAX_RINGS;
7809 	}
7810 	if (channels->rx_count > max_rx_rings ||
7811 	    channels->tx_count > max_tx_rings)
7812 		return -EINVAL;
7813 
7814 	bp->num_req_rx_rings = channels->rx_count;
7815 	bp->num_req_tx_rings = channels->tx_count;
7816 
7817 	if (netif_running(dev))
7818 		rc = bnx2_change_ring_size(bp, bp->rx_ring_size,
7819 					   bp->tx_ring_size, true);
7820 
7821 	return rc;
7822 }
7823 
7824 static const struct ethtool_ops bnx2_ethtool_ops = {
7825 	.get_drvinfo		= bnx2_get_drvinfo,
7826 	.get_regs_len		= bnx2_get_regs_len,
7827 	.get_regs		= bnx2_get_regs,
7828 	.get_wol		= bnx2_get_wol,
7829 	.set_wol		= bnx2_set_wol,
7830 	.nway_reset		= bnx2_nway_reset,
7831 	.get_link		= bnx2_get_link,
7832 	.get_eeprom_len		= bnx2_get_eeprom_len,
7833 	.get_eeprom		= bnx2_get_eeprom,
7834 	.set_eeprom		= bnx2_set_eeprom,
7835 	.get_coalesce		= bnx2_get_coalesce,
7836 	.set_coalesce		= bnx2_set_coalesce,
7837 	.get_ringparam		= bnx2_get_ringparam,
7838 	.set_ringparam		= bnx2_set_ringparam,
7839 	.get_pauseparam		= bnx2_get_pauseparam,
7840 	.set_pauseparam		= bnx2_set_pauseparam,
7841 	.self_test		= bnx2_self_test,
7842 	.get_strings		= bnx2_get_strings,
7843 	.set_phys_id		= bnx2_set_phys_id,
7844 	.get_ethtool_stats	= bnx2_get_ethtool_stats,
7845 	.get_sset_count		= bnx2_get_sset_count,
7846 	.get_channels		= bnx2_get_channels,
7847 	.set_channels		= bnx2_set_channels,
7848 	.get_link_ksettings	= bnx2_get_link_ksettings,
7849 	.set_link_ksettings	= bnx2_set_link_ksettings,
7850 };
7851 
7852 /* Called with rtnl_lock */
7853 static int
7854 bnx2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
7855 {
7856 	struct mii_ioctl_data *data = if_mii(ifr);
7857 	struct bnx2 *bp = netdev_priv(dev);
7858 	int err;
7859 
7860 	switch(cmd) {
7861 	case SIOCGMIIPHY:
7862 		data->phy_id = bp->phy_addr;
7863 
7864 		/* fallthru */
7865 	case SIOCGMIIREG: {
7866 		u32 mii_regval;
7867 
7868 		if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
7869 			return -EOPNOTSUPP;
7870 
7871 		if (!netif_running(dev))
7872 			return -EAGAIN;
7873 
7874 		spin_lock_bh(&bp->phy_lock);
7875 		err = bnx2_read_phy(bp, data->reg_num & 0x1f, &mii_regval);
7876 		spin_unlock_bh(&bp->phy_lock);
7877 
7878 		data->val_out = mii_regval;
7879 
7880 		return err;
7881 	}
7882 
7883 	case SIOCSMIIREG:
7884 		if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
7885 			return -EOPNOTSUPP;
7886 
7887 		if (!netif_running(dev))
7888 			return -EAGAIN;
7889 
7890 		spin_lock_bh(&bp->phy_lock);
7891 		err = bnx2_write_phy(bp, data->reg_num & 0x1f, data->val_in);
7892 		spin_unlock_bh(&bp->phy_lock);
7893 
7894 		return err;
7895 
7896 	default:
7897 		/* do nothing */
7898 		break;
7899 	}
7900 	return -EOPNOTSUPP;
7901 }
7902 
7903 /* Called with rtnl_lock */
7904 static int
7905 bnx2_change_mac_addr(struct net_device *dev, void *p)
7906 {
7907 	struct sockaddr *addr = p;
7908 	struct bnx2 *bp = netdev_priv(dev);
7909 
7910 	if (!is_valid_ether_addr(addr->sa_data))
7911 		return -EADDRNOTAVAIL;
7912 
7913 	memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
7914 	if (netif_running(dev))
7915 		bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
7916 
7917 	return 0;
7918 }
7919 
7920 /* Called with rtnl_lock */
7921 static int
7922 bnx2_change_mtu(struct net_device *dev, int new_mtu)
7923 {
7924 	struct bnx2 *bp = netdev_priv(dev);
7925 
7926 	dev->mtu = new_mtu;
7927 	return bnx2_change_ring_size(bp, bp->rx_ring_size, bp->tx_ring_size,
7928 				     false);
7929 }
7930 
7931 #ifdef CONFIG_NET_POLL_CONTROLLER
7932 static void
7933 poll_bnx2(struct net_device *dev)
7934 {
7935 	struct bnx2 *bp = netdev_priv(dev);
7936 	int i;
7937 
7938 	for (i = 0; i < bp->irq_nvecs; i++) {
7939 		struct bnx2_irq *irq = &bp->irq_tbl[i];
7940 
7941 		disable_irq(irq->vector);
7942 		irq->handler(irq->vector, &bp->bnx2_napi[i]);
7943 		enable_irq(irq->vector);
7944 	}
7945 }
7946 #endif
7947 
7948 static void
7949 bnx2_get_5709_media(struct bnx2 *bp)
7950 {
7951 	u32 val = BNX2_RD(bp, BNX2_MISC_DUAL_MEDIA_CTRL);
7952 	u32 bond_id = val & BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID;
7953 	u32 strap;
7954 
7955 	if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_C)
7956 		return;
7957 	else if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_S) {
7958 		bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7959 		return;
7960 	}
7961 
7962 	if (val & BNX2_MISC_DUAL_MEDIA_CTRL_STRAP_OVERRIDE)
7963 		strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL) >> 21;
7964 	else
7965 		strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL_STRAP) >> 8;
7966 
7967 	if (bp->func == 0) {
7968 		switch (strap) {
7969 		case 0x4:
7970 		case 0x5:
7971 		case 0x6:
7972 			bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7973 			return;
7974 		}
7975 	} else {
7976 		switch (strap) {
7977 		case 0x1:
7978 		case 0x2:
7979 		case 0x4:
7980 			bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7981 			return;
7982 		}
7983 	}
7984 }
7985 
7986 static void
7987 bnx2_get_pci_speed(struct bnx2 *bp)
7988 {
7989 	u32 reg;
7990 
7991 	reg = BNX2_RD(bp, BNX2_PCICFG_MISC_STATUS);
7992 	if (reg & BNX2_PCICFG_MISC_STATUS_PCIX_DET) {
7993 		u32 clkreg;
7994 
7995 		bp->flags |= BNX2_FLAG_PCIX;
7996 
7997 		clkreg = BNX2_RD(bp, BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS);
7998 
7999 		clkreg &= BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET;
8000 		switch (clkreg) {
8001 		case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ:
8002 			bp->bus_speed_mhz = 133;
8003 			break;
8004 
8005 		case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ:
8006 			bp->bus_speed_mhz = 100;
8007 			break;
8008 
8009 		case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ:
8010 		case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ:
8011 			bp->bus_speed_mhz = 66;
8012 			break;
8013 
8014 		case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ:
8015 		case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ:
8016 			bp->bus_speed_mhz = 50;
8017 			break;
8018 
8019 		case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW:
8020 		case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ:
8021 		case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ:
8022 			bp->bus_speed_mhz = 33;
8023 			break;
8024 		}
8025 	}
8026 	else {
8027 		if (reg & BNX2_PCICFG_MISC_STATUS_M66EN)
8028 			bp->bus_speed_mhz = 66;
8029 		else
8030 			bp->bus_speed_mhz = 33;
8031 	}
8032 
8033 	if (reg & BNX2_PCICFG_MISC_STATUS_32BIT_DET)
8034 		bp->flags |= BNX2_FLAG_PCI_32BIT;
8035 
8036 }
8037 
8038 static void
8039 bnx2_read_vpd_fw_ver(struct bnx2 *bp)
8040 {
8041 	int rc, i, j;
8042 	u8 *data;
8043 	unsigned int block_end, rosize, len;
8044 
8045 #define BNX2_VPD_NVRAM_OFFSET	0x300
8046 #define BNX2_VPD_LEN		128
8047 #define BNX2_MAX_VER_SLEN	30
8048 
8049 	data = kmalloc(256, GFP_KERNEL);
8050 	if (!data)
8051 		return;
8052 
8053 	rc = bnx2_nvram_read(bp, BNX2_VPD_NVRAM_OFFSET, data + BNX2_VPD_LEN,
8054 			     BNX2_VPD_LEN);
8055 	if (rc)
8056 		goto vpd_done;
8057 
8058 	for (i = 0; i < BNX2_VPD_LEN; i += 4) {
8059 		data[i] = data[i + BNX2_VPD_LEN + 3];
8060 		data[i + 1] = data[i + BNX2_VPD_LEN + 2];
8061 		data[i + 2] = data[i + BNX2_VPD_LEN + 1];
8062 		data[i + 3] = data[i + BNX2_VPD_LEN];
8063 	}
8064 
8065 	i = pci_vpd_find_tag(data, 0, BNX2_VPD_LEN, PCI_VPD_LRDT_RO_DATA);
8066 	if (i < 0)
8067 		goto vpd_done;
8068 
8069 	rosize = pci_vpd_lrdt_size(&data[i]);
8070 	i += PCI_VPD_LRDT_TAG_SIZE;
8071 	block_end = i + rosize;
8072 
8073 	if (block_end > BNX2_VPD_LEN)
8074 		goto vpd_done;
8075 
8076 	j = pci_vpd_find_info_keyword(data, i, rosize,
8077 				      PCI_VPD_RO_KEYWORD_MFR_ID);
8078 	if (j < 0)
8079 		goto vpd_done;
8080 
8081 	len = pci_vpd_info_field_size(&data[j]);
8082 
8083 	j += PCI_VPD_INFO_FLD_HDR_SIZE;
8084 	if (j + len > block_end || len != 4 ||
8085 	    memcmp(&data[j], "1028", 4))
8086 		goto vpd_done;
8087 
8088 	j = pci_vpd_find_info_keyword(data, i, rosize,
8089 				      PCI_VPD_RO_KEYWORD_VENDOR0);
8090 	if (j < 0)
8091 		goto vpd_done;
8092 
8093 	len = pci_vpd_info_field_size(&data[j]);
8094 
8095 	j += PCI_VPD_INFO_FLD_HDR_SIZE;
8096 	if (j + len > block_end || len > BNX2_MAX_VER_SLEN)
8097 		goto vpd_done;
8098 
8099 	memcpy(bp->fw_version, &data[j], len);
8100 	bp->fw_version[len] = ' ';
8101 
8102 vpd_done:
8103 	kfree(data);
8104 }
8105 
8106 static int
8107 bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
8108 {
8109 	struct bnx2 *bp;
8110 	int rc, i, j;
8111 	u32 reg;
8112 	u64 dma_mask, persist_dma_mask;
8113 	int err;
8114 
8115 	SET_NETDEV_DEV(dev, &pdev->dev);
8116 	bp = netdev_priv(dev);
8117 
8118 	bp->flags = 0;
8119 	bp->phy_flags = 0;
8120 
8121 	bp->temp_stats_blk =
8122 		kzalloc(sizeof(struct statistics_block), GFP_KERNEL);
8123 
8124 	if (bp->temp_stats_blk == NULL) {
8125 		rc = -ENOMEM;
8126 		goto err_out;
8127 	}
8128 
8129 	/* enable device (incl. PCI PM wakeup), and bus-mastering */
8130 	rc = pci_enable_device(pdev);
8131 	if (rc) {
8132 		dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
8133 		goto err_out;
8134 	}
8135 
8136 	if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
8137 		dev_err(&pdev->dev,
8138 			"Cannot find PCI device base address, aborting\n");
8139 		rc = -ENODEV;
8140 		goto err_out_disable;
8141 	}
8142 
8143 	rc = pci_request_regions(pdev, DRV_MODULE_NAME);
8144 	if (rc) {
8145 		dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
8146 		goto err_out_disable;
8147 	}
8148 
8149 	pci_set_master(pdev);
8150 
8151 	bp->pm_cap = pdev->pm_cap;
8152 	if (bp->pm_cap == 0) {
8153 		dev_err(&pdev->dev,
8154 			"Cannot find power management capability, aborting\n");
8155 		rc = -EIO;
8156 		goto err_out_release;
8157 	}
8158 
8159 	bp->dev = dev;
8160 	bp->pdev = pdev;
8161 
8162 	spin_lock_init(&bp->phy_lock);
8163 	spin_lock_init(&bp->indirect_lock);
8164 #ifdef BCM_CNIC
8165 	mutex_init(&bp->cnic_lock);
8166 #endif
8167 	INIT_WORK(&bp->reset_task, bnx2_reset_task);
8168 
8169 	bp->regview = pci_iomap(pdev, 0, MB_GET_CID_ADDR(TX_TSS_CID +
8170 							 TX_MAX_TSS_RINGS + 1));
8171 	if (!bp->regview) {
8172 		dev_err(&pdev->dev, "Cannot map register space, aborting\n");
8173 		rc = -ENOMEM;
8174 		goto err_out_release;
8175 	}
8176 
8177 	/* Configure byte swap and enable write to the reg_window registers.
8178 	 * Rely on CPU to do target byte swapping on big endian systems
8179 	 * The chip's target access swapping will not swap all accesses
8180 	 */
8181 	BNX2_WR(bp, BNX2_PCICFG_MISC_CONFIG,
8182 		BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
8183 		BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP);
8184 
8185 	bp->chip_id = BNX2_RD(bp, BNX2_MISC_ID);
8186 
8187 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
8188 		if (!pci_is_pcie(pdev)) {
8189 			dev_err(&pdev->dev, "Not PCIE, aborting\n");
8190 			rc = -EIO;
8191 			goto err_out_unmap;
8192 		}
8193 		bp->flags |= BNX2_FLAG_PCIE;
8194 		if (BNX2_CHIP_REV(bp) == BNX2_CHIP_REV_Ax)
8195 			bp->flags |= BNX2_FLAG_JUMBO_BROKEN;
8196 
8197 		/* AER (Advanced Error Reporting) hooks */
8198 		err = pci_enable_pcie_error_reporting(pdev);
8199 		if (!err)
8200 			bp->flags |= BNX2_FLAG_AER_ENABLED;
8201 
8202 	} else {
8203 		bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX);
8204 		if (bp->pcix_cap == 0) {
8205 			dev_err(&pdev->dev,
8206 				"Cannot find PCIX capability, aborting\n");
8207 			rc = -EIO;
8208 			goto err_out_unmap;
8209 		}
8210 		bp->flags |= BNX2_FLAG_BROKEN_STATS;
8211 	}
8212 
8213 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709 &&
8214 	    BNX2_CHIP_REV(bp) != BNX2_CHIP_REV_Ax) {
8215 		if (pdev->msix_cap)
8216 			bp->flags |= BNX2_FLAG_MSIX_CAP;
8217 	}
8218 
8219 	if (BNX2_CHIP_ID(bp) != BNX2_CHIP_ID_5706_A0 &&
8220 	    BNX2_CHIP_ID(bp) != BNX2_CHIP_ID_5706_A1) {
8221 		if (pdev->msi_cap)
8222 			bp->flags |= BNX2_FLAG_MSI_CAP;
8223 	}
8224 
8225 	/* 5708 cannot support DMA addresses > 40-bit.  */
8226 	if (BNX2_CHIP(bp) == BNX2_CHIP_5708)
8227 		persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
8228 	else
8229 		persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
8230 
8231 	/* Configure DMA attributes. */
8232 	if (pci_set_dma_mask(pdev, dma_mask) == 0) {
8233 		dev->features |= NETIF_F_HIGHDMA;
8234 		rc = pci_set_consistent_dma_mask(pdev, persist_dma_mask);
8235 		if (rc) {
8236 			dev_err(&pdev->dev,
8237 				"pci_set_consistent_dma_mask failed, aborting\n");
8238 			goto err_out_unmap;
8239 		}
8240 	} else if ((rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) != 0) {
8241 		dev_err(&pdev->dev, "System does not support DMA, aborting\n");
8242 		goto err_out_unmap;
8243 	}
8244 
8245 	if (!(bp->flags & BNX2_FLAG_PCIE))
8246 		bnx2_get_pci_speed(bp);
8247 
8248 	/* 5706A0 may falsely detect SERR and PERR. */
8249 	if (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) {
8250 		reg = BNX2_RD(bp, PCI_COMMAND);
8251 		reg &= ~(PCI_COMMAND_SERR | PCI_COMMAND_PARITY);
8252 		BNX2_WR(bp, PCI_COMMAND, reg);
8253 	} else if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A1) &&
8254 		!(bp->flags & BNX2_FLAG_PCIX)) {
8255 
8256 		dev_err(&pdev->dev,
8257 			"5706 A1 can only be used in a PCIX bus, aborting\n");
8258 		goto err_out_unmap;
8259 	}
8260 
8261 	bnx2_init_nvram(bp);
8262 
8263 	reg = bnx2_reg_rd_ind(bp, BNX2_SHM_HDR_SIGNATURE);
8264 
8265 	if (bnx2_reg_rd_ind(bp, BNX2_MCP_TOE_ID) & BNX2_MCP_TOE_ID_FUNCTION_ID)
8266 		bp->func = 1;
8267 
8268 	if ((reg & BNX2_SHM_HDR_SIGNATURE_SIG_MASK) ==
8269 	    BNX2_SHM_HDR_SIGNATURE_SIG) {
8270 		u32 off = bp->func << 2;
8271 
8272 		bp->shmem_base = bnx2_reg_rd_ind(bp, BNX2_SHM_HDR_ADDR_0 + off);
8273 	} else
8274 		bp->shmem_base = HOST_VIEW_SHMEM_BASE;
8275 
8276 	/* Get the permanent MAC address.  First we need to make sure the
8277 	 * firmware is actually running.
8278 	 */
8279 	reg = bnx2_shmem_rd(bp, BNX2_DEV_INFO_SIGNATURE);
8280 
8281 	if ((reg & BNX2_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
8282 	    BNX2_DEV_INFO_SIGNATURE_MAGIC) {
8283 		dev_err(&pdev->dev, "Firmware not running, aborting\n");
8284 		rc = -ENODEV;
8285 		goto err_out_unmap;
8286 	}
8287 
8288 	bnx2_read_vpd_fw_ver(bp);
8289 
8290 	j = strlen(bp->fw_version);
8291 	reg = bnx2_shmem_rd(bp, BNX2_DEV_INFO_BC_REV);
8292 	for (i = 0; i < 3 && j < 24; i++) {
8293 		u8 num, k, skip0;
8294 
8295 		if (i == 0) {
8296 			bp->fw_version[j++] = 'b';
8297 			bp->fw_version[j++] = 'c';
8298 			bp->fw_version[j++] = ' ';
8299 		}
8300 		num = (u8) (reg >> (24 - (i * 8)));
8301 		for (k = 100, skip0 = 1; k >= 1; num %= k, k /= 10) {
8302 			if (num >= k || !skip0 || k == 1) {
8303 				bp->fw_version[j++] = (num / k) + '0';
8304 				skip0 = 0;
8305 			}
8306 		}
8307 		if (i != 2)
8308 			bp->fw_version[j++] = '.';
8309 	}
8310 	reg = bnx2_shmem_rd(bp, BNX2_PORT_FEATURE);
8311 	if (reg & BNX2_PORT_FEATURE_WOL_ENABLED)
8312 		bp->wol = 1;
8313 
8314 	if (reg & BNX2_PORT_FEATURE_ASF_ENABLED) {
8315 		bp->flags |= BNX2_FLAG_ASF_ENABLE;
8316 
8317 		for (i = 0; i < 30; i++) {
8318 			reg = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION);
8319 			if (reg & BNX2_CONDITION_MFW_RUN_MASK)
8320 				break;
8321 			msleep(10);
8322 		}
8323 	}
8324 	reg = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION);
8325 	reg &= BNX2_CONDITION_MFW_RUN_MASK;
8326 	if (reg != BNX2_CONDITION_MFW_RUN_UNKNOWN &&
8327 	    reg != BNX2_CONDITION_MFW_RUN_NONE) {
8328 		u32 addr = bnx2_shmem_rd(bp, BNX2_MFW_VER_PTR);
8329 
8330 		if (j < 32)
8331 			bp->fw_version[j++] = ' ';
8332 		for (i = 0; i < 3 && j < 28; i++) {
8333 			reg = bnx2_reg_rd_ind(bp, addr + i * 4);
8334 			reg = be32_to_cpu(reg);
8335 			memcpy(&bp->fw_version[j], &reg, 4);
8336 			j += 4;
8337 		}
8338 	}
8339 
8340 	reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_MAC_UPPER);
8341 	bp->mac_addr[0] = (u8) (reg >> 8);
8342 	bp->mac_addr[1] = (u8) reg;
8343 
8344 	reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_MAC_LOWER);
8345 	bp->mac_addr[2] = (u8) (reg >> 24);
8346 	bp->mac_addr[3] = (u8) (reg >> 16);
8347 	bp->mac_addr[4] = (u8) (reg >> 8);
8348 	bp->mac_addr[5] = (u8) reg;
8349 
8350 	bp->tx_ring_size = BNX2_MAX_TX_DESC_CNT;
8351 	bnx2_set_rx_ring_size(bp, 255);
8352 
8353 	bp->tx_quick_cons_trip_int = 2;
8354 	bp->tx_quick_cons_trip = 20;
8355 	bp->tx_ticks_int = 18;
8356 	bp->tx_ticks = 80;
8357 
8358 	bp->rx_quick_cons_trip_int = 2;
8359 	bp->rx_quick_cons_trip = 12;
8360 	bp->rx_ticks_int = 18;
8361 	bp->rx_ticks = 18;
8362 
8363 	bp->stats_ticks = USEC_PER_SEC & BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
8364 
8365 	bp->current_interval = BNX2_TIMER_INTERVAL;
8366 
8367 	bp->phy_addr = 1;
8368 
8369 	/* allocate stats_blk */
8370 	rc = bnx2_alloc_stats_blk(dev);
8371 	if (rc)
8372 		goto err_out_unmap;
8373 
8374 	/* Disable WOL support if we are running on a SERDES chip. */
8375 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
8376 		bnx2_get_5709_media(bp);
8377 	else if (BNX2_CHIP_BOND(bp) & BNX2_CHIP_BOND_SERDES_BIT)
8378 		bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
8379 
8380 	bp->phy_port = PORT_TP;
8381 	if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
8382 		bp->phy_port = PORT_FIBRE;
8383 		reg = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG);
8384 		if (!(reg & BNX2_SHARED_HW_CFG_GIG_LINK_ON_VAUX)) {
8385 			bp->flags |= BNX2_FLAG_NO_WOL;
8386 			bp->wol = 0;
8387 		}
8388 		if (BNX2_CHIP(bp) == BNX2_CHIP_5706) {
8389 			/* Don't do parallel detect on this board because of
8390 			 * some board problems.  The link will not go down
8391 			 * if we do parallel detect.
8392 			 */
8393 			if (pdev->subsystem_vendor == PCI_VENDOR_ID_HP &&
8394 			    pdev->subsystem_device == 0x310c)
8395 				bp->phy_flags |= BNX2_PHY_FLAG_NO_PARALLEL;
8396 		} else {
8397 			bp->phy_addr = 2;
8398 			if (reg & BNX2_SHARED_HW_CFG_PHY_2_5G)
8399 				bp->phy_flags |= BNX2_PHY_FLAG_2_5G_CAPABLE;
8400 		}
8401 	} else if (BNX2_CHIP(bp) == BNX2_CHIP_5706 ||
8402 		   BNX2_CHIP(bp) == BNX2_CHIP_5708)
8403 		bp->phy_flags |= BNX2_PHY_FLAG_CRC_FIX;
8404 	else if (BNX2_CHIP(bp) == BNX2_CHIP_5709 &&
8405 		 (BNX2_CHIP_REV(bp) == BNX2_CHIP_REV_Ax ||
8406 		  BNX2_CHIP_REV(bp) == BNX2_CHIP_REV_Bx))
8407 		bp->phy_flags |= BNX2_PHY_FLAG_DIS_EARLY_DAC;
8408 
8409 	bnx2_init_fw_cap(bp);
8410 
8411 	if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_A0) ||
8412 	    (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_B0) ||
8413 	    (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_B1) ||
8414 	    !(BNX2_RD(bp, BNX2_PCI_CONFIG_3) & BNX2_PCI_CONFIG_3_VAUX_PRESET)) {
8415 		bp->flags |= BNX2_FLAG_NO_WOL;
8416 		bp->wol = 0;
8417 	}
8418 
8419 	if (bp->flags & BNX2_FLAG_NO_WOL)
8420 		device_set_wakeup_capable(&bp->pdev->dev, false);
8421 	else
8422 		device_set_wakeup_enable(&bp->pdev->dev, bp->wol);
8423 
8424 	if (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) {
8425 		bp->tx_quick_cons_trip_int =
8426 			bp->tx_quick_cons_trip;
8427 		bp->tx_ticks_int = bp->tx_ticks;
8428 		bp->rx_quick_cons_trip_int =
8429 			bp->rx_quick_cons_trip;
8430 		bp->rx_ticks_int = bp->rx_ticks;
8431 		bp->comp_prod_trip_int = bp->comp_prod_trip;
8432 		bp->com_ticks_int = bp->com_ticks;
8433 		bp->cmd_ticks_int = bp->cmd_ticks;
8434 	}
8435 
8436 	/* Disable MSI on 5706 if AMD 8132 bridge is found.
8437 	 *
8438 	 * MSI is defined to be 32-bit write.  The 5706 does 64-bit MSI writes
8439 	 * with byte enables disabled on the unused 32-bit word.  This is legal
8440 	 * but causes problems on the AMD 8132 which will eventually stop
8441 	 * responding after a while.
8442 	 *
8443 	 * AMD believes this incompatibility is unique to the 5706, and
8444 	 * prefers to locally disable MSI rather than globally disabling it.
8445 	 */
8446 	if (BNX2_CHIP(bp) == BNX2_CHIP_5706 && disable_msi == 0) {
8447 		struct pci_dev *amd_8132 = NULL;
8448 
8449 		while ((amd_8132 = pci_get_device(PCI_VENDOR_ID_AMD,
8450 						  PCI_DEVICE_ID_AMD_8132_BRIDGE,
8451 						  amd_8132))) {
8452 
8453 			if (amd_8132->revision >= 0x10 &&
8454 			    amd_8132->revision <= 0x13) {
8455 				disable_msi = 1;
8456 				pci_dev_put(amd_8132);
8457 				break;
8458 			}
8459 		}
8460 	}
8461 
8462 	bnx2_set_default_link(bp);
8463 	bp->req_flow_ctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
8464 
8465 	init_timer(&bp->timer);
8466 	bp->timer.expires = RUN_AT(BNX2_TIMER_INTERVAL);
8467 	bp->timer.data = (unsigned long) bp;
8468 	bp->timer.function = bnx2_timer;
8469 
8470 #ifdef BCM_CNIC
8471 	if (bnx2_shmem_rd(bp, BNX2_ISCSI_INITIATOR) & BNX2_ISCSI_INITIATOR_EN)
8472 		bp->cnic_eth_dev.max_iscsi_conn =
8473 			(bnx2_shmem_rd(bp, BNX2_ISCSI_MAX_CONN) &
8474 			 BNX2_ISCSI_MAX_CONN_MASK) >> BNX2_ISCSI_MAX_CONN_SHIFT;
8475 	bp->cnic_probe = bnx2_cnic_probe;
8476 #endif
8477 	pci_save_state(pdev);
8478 
8479 	return 0;
8480 
8481 err_out_unmap:
8482 	if (bp->flags & BNX2_FLAG_AER_ENABLED) {
8483 		pci_disable_pcie_error_reporting(pdev);
8484 		bp->flags &= ~BNX2_FLAG_AER_ENABLED;
8485 	}
8486 
8487 	pci_iounmap(pdev, bp->regview);
8488 	bp->regview = NULL;
8489 
8490 err_out_release:
8491 	pci_release_regions(pdev);
8492 
8493 err_out_disable:
8494 	pci_disable_device(pdev);
8495 
8496 err_out:
8497 	kfree(bp->temp_stats_blk);
8498 
8499 	return rc;
8500 }
8501 
8502 static char *
8503 bnx2_bus_string(struct bnx2 *bp, char *str)
8504 {
8505 	char *s = str;
8506 
8507 	if (bp->flags & BNX2_FLAG_PCIE) {
8508 		s += sprintf(s, "PCI Express");
8509 	} else {
8510 		s += sprintf(s, "PCI");
8511 		if (bp->flags & BNX2_FLAG_PCIX)
8512 			s += sprintf(s, "-X");
8513 		if (bp->flags & BNX2_FLAG_PCI_32BIT)
8514 			s += sprintf(s, " 32-bit");
8515 		else
8516 			s += sprintf(s, " 64-bit");
8517 		s += sprintf(s, " %dMHz", bp->bus_speed_mhz);
8518 	}
8519 	return str;
8520 }
8521 
8522 static void
8523 bnx2_del_napi(struct bnx2 *bp)
8524 {
8525 	int i;
8526 
8527 	for (i = 0; i < bp->irq_nvecs; i++)
8528 		netif_napi_del(&bp->bnx2_napi[i].napi);
8529 }
8530 
8531 static void
8532 bnx2_init_napi(struct bnx2 *bp)
8533 {
8534 	int i;
8535 
8536 	for (i = 0; i < bp->irq_nvecs; i++) {
8537 		struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
8538 		int (*poll)(struct napi_struct *, int);
8539 
8540 		if (i == 0)
8541 			poll = bnx2_poll;
8542 		else
8543 			poll = bnx2_poll_msix;
8544 
8545 		netif_napi_add(bp->dev, &bp->bnx2_napi[i].napi, poll, 64);
8546 		bnapi->bp = bp;
8547 	}
8548 }
8549 
8550 static const struct net_device_ops bnx2_netdev_ops = {
8551 	.ndo_open		= bnx2_open,
8552 	.ndo_start_xmit		= bnx2_start_xmit,
8553 	.ndo_stop		= bnx2_close,
8554 	.ndo_get_stats64	= bnx2_get_stats64,
8555 	.ndo_set_rx_mode	= bnx2_set_rx_mode,
8556 	.ndo_do_ioctl		= bnx2_ioctl,
8557 	.ndo_validate_addr	= eth_validate_addr,
8558 	.ndo_set_mac_address	= bnx2_change_mac_addr,
8559 	.ndo_change_mtu		= bnx2_change_mtu,
8560 	.ndo_set_features	= bnx2_set_features,
8561 	.ndo_tx_timeout		= bnx2_tx_timeout,
8562 #ifdef CONFIG_NET_POLL_CONTROLLER
8563 	.ndo_poll_controller	= poll_bnx2,
8564 #endif
8565 };
8566 
8567 static int
8568 bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
8569 {
8570 	static int version_printed = 0;
8571 	struct net_device *dev;
8572 	struct bnx2 *bp;
8573 	int rc;
8574 	char str[40];
8575 
8576 	if (version_printed++ == 0)
8577 		pr_info("%s", version);
8578 
8579 	/* dev zeroed in init_etherdev */
8580 	dev = alloc_etherdev_mq(sizeof(*bp), TX_MAX_RINGS);
8581 	if (!dev)
8582 		return -ENOMEM;
8583 
8584 	rc = bnx2_init_board(pdev, dev);
8585 	if (rc < 0)
8586 		goto err_free;
8587 
8588 	dev->netdev_ops = &bnx2_netdev_ops;
8589 	dev->watchdog_timeo = TX_TIMEOUT;
8590 	dev->ethtool_ops = &bnx2_ethtool_ops;
8591 
8592 	bp = netdev_priv(dev);
8593 
8594 	pci_set_drvdata(pdev, dev);
8595 
8596 	/*
8597 	 * In-flight DMA from 1st kernel could continue going in kdump kernel.
8598 	 * New io-page table has been created before bnx2 does reset at open stage.
8599 	 * We have to wait for the in-flight DMA to complete to avoid it look up
8600 	 * into the newly created io-page table.
8601 	 */
8602 	if (is_kdump_kernel())
8603 		bnx2_wait_dma_complete(bp);
8604 
8605 	memcpy(dev->dev_addr, bp->mac_addr, ETH_ALEN);
8606 
8607 	dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG |
8608 		NETIF_F_TSO | NETIF_F_TSO_ECN |
8609 		NETIF_F_RXHASH | NETIF_F_RXCSUM;
8610 
8611 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
8612 		dev->hw_features |= NETIF_F_IPV6_CSUM | NETIF_F_TSO6;
8613 
8614 	dev->vlan_features = dev->hw_features;
8615 	dev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
8616 	dev->features |= dev->hw_features;
8617 	dev->priv_flags |= IFF_UNICAST_FLT;
8618 	dev->min_mtu = MIN_ETHERNET_PACKET_SIZE;
8619 	dev->max_mtu = MAX_ETHERNET_JUMBO_PACKET_SIZE;
8620 
8621 	if (!(bp->flags & BNX2_FLAG_CAN_KEEP_VLAN))
8622 		dev->hw_features &= ~NETIF_F_HW_VLAN_CTAG_RX;
8623 
8624 	if ((rc = register_netdev(dev))) {
8625 		dev_err(&pdev->dev, "Cannot register net device\n");
8626 		goto error;
8627 	}
8628 
8629 	netdev_info(dev, "%s (%c%d) %s found at mem %lx, IRQ %d, "
8630 		    "node addr %pM\n", board_info[ent->driver_data].name,
8631 		    ((BNX2_CHIP_ID(bp) & 0xf000) >> 12) + 'A',
8632 		    ((BNX2_CHIP_ID(bp) & 0x0ff0) >> 4),
8633 		    bnx2_bus_string(bp, str), (long)pci_resource_start(pdev, 0),
8634 		    pdev->irq, dev->dev_addr);
8635 
8636 	return 0;
8637 
8638 error:
8639 	pci_iounmap(pdev, bp->regview);
8640 	pci_release_regions(pdev);
8641 	pci_disable_device(pdev);
8642 err_free:
8643 	bnx2_free_stats_blk(dev);
8644 	free_netdev(dev);
8645 	return rc;
8646 }
8647 
8648 static void
8649 bnx2_remove_one(struct pci_dev *pdev)
8650 {
8651 	struct net_device *dev = pci_get_drvdata(pdev);
8652 	struct bnx2 *bp = netdev_priv(dev);
8653 
8654 	unregister_netdev(dev);
8655 
8656 	del_timer_sync(&bp->timer);
8657 	cancel_work_sync(&bp->reset_task);
8658 
8659 	pci_iounmap(bp->pdev, bp->regview);
8660 
8661 	bnx2_free_stats_blk(dev);
8662 	kfree(bp->temp_stats_blk);
8663 
8664 	if (bp->flags & BNX2_FLAG_AER_ENABLED) {
8665 		pci_disable_pcie_error_reporting(pdev);
8666 		bp->flags &= ~BNX2_FLAG_AER_ENABLED;
8667 	}
8668 
8669 	bnx2_release_firmware(bp);
8670 
8671 	free_netdev(dev);
8672 
8673 	pci_release_regions(pdev);
8674 	pci_disable_device(pdev);
8675 }
8676 
8677 #ifdef CONFIG_PM_SLEEP
8678 static int
8679 bnx2_suspend(struct device *device)
8680 {
8681 	struct pci_dev *pdev = to_pci_dev(device);
8682 	struct net_device *dev = pci_get_drvdata(pdev);
8683 	struct bnx2 *bp = netdev_priv(dev);
8684 
8685 	if (netif_running(dev)) {
8686 		cancel_work_sync(&bp->reset_task);
8687 		bnx2_netif_stop(bp, true);
8688 		netif_device_detach(dev);
8689 		del_timer_sync(&bp->timer);
8690 		bnx2_shutdown_chip(bp);
8691 		__bnx2_free_irq(bp);
8692 		bnx2_free_skbs(bp);
8693 	}
8694 	bnx2_setup_wol(bp);
8695 	return 0;
8696 }
8697 
8698 static int
8699 bnx2_resume(struct device *device)
8700 {
8701 	struct pci_dev *pdev = to_pci_dev(device);
8702 	struct net_device *dev = pci_get_drvdata(pdev);
8703 	struct bnx2 *bp = netdev_priv(dev);
8704 
8705 	if (!netif_running(dev))
8706 		return 0;
8707 
8708 	bnx2_set_power_state(bp, PCI_D0);
8709 	netif_device_attach(dev);
8710 	bnx2_request_irq(bp);
8711 	bnx2_init_nic(bp, 1);
8712 	bnx2_netif_start(bp, true);
8713 	return 0;
8714 }
8715 
8716 static SIMPLE_DEV_PM_OPS(bnx2_pm_ops, bnx2_suspend, bnx2_resume);
8717 #define BNX2_PM_OPS (&bnx2_pm_ops)
8718 
8719 #else
8720 
8721 #define BNX2_PM_OPS NULL
8722 
8723 #endif /* CONFIG_PM_SLEEP */
8724 /**
8725  * bnx2_io_error_detected - called when PCI error is detected
8726  * @pdev: Pointer to PCI device
8727  * @state: The current pci connection state
8728  *
8729  * This function is called after a PCI bus error affecting
8730  * this device has been detected.
8731  */
8732 static pci_ers_result_t bnx2_io_error_detected(struct pci_dev *pdev,
8733 					       pci_channel_state_t state)
8734 {
8735 	struct net_device *dev = pci_get_drvdata(pdev);
8736 	struct bnx2 *bp = netdev_priv(dev);
8737 
8738 	rtnl_lock();
8739 	netif_device_detach(dev);
8740 
8741 	if (state == pci_channel_io_perm_failure) {
8742 		rtnl_unlock();
8743 		return PCI_ERS_RESULT_DISCONNECT;
8744 	}
8745 
8746 	if (netif_running(dev)) {
8747 		bnx2_netif_stop(bp, true);
8748 		del_timer_sync(&bp->timer);
8749 		bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
8750 	}
8751 
8752 	pci_disable_device(pdev);
8753 	rtnl_unlock();
8754 
8755 	/* Request a slot slot reset. */
8756 	return PCI_ERS_RESULT_NEED_RESET;
8757 }
8758 
8759 /**
8760  * bnx2_io_slot_reset - called after the pci bus has been reset.
8761  * @pdev: Pointer to PCI device
8762  *
8763  * Restart the card from scratch, as if from a cold-boot.
8764  */
8765 static pci_ers_result_t bnx2_io_slot_reset(struct pci_dev *pdev)
8766 {
8767 	struct net_device *dev = pci_get_drvdata(pdev);
8768 	struct bnx2 *bp = netdev_priv(dev);
8769 	pci_ers_result_t result = PCI_ERS_RESULT_DISCONNECT;
8770 	int err = 0;
8771 
8772 	rtnl_lock();
8773 	if (pci_enable_device(pdev)) {
8774 		dev_err(&pdev->dev,
8775 			"Cannot re-enable PCI device after reset\n");
8776 	} else {
8777 		pci_set_master(pdev);
8778 		pci_restore_state(pdev);
8779 		pci_save_state(pdev);
8780 
8781 		if (netif_running(dev))
8782 			err = bnx2_init_nic(bp, 1);
8783 
8784 		if (!err)
8785 			result = PCI_ERS_RESULT_RECOVERED;
8786 	}
8787 
8788 	if (result != PCI_ERS_RESULT_RECOVERED && netif_running(dev)) {
8789 		bnx2_napi_enable(bp);
8790 		dev_close(dev);
8791 	}
8792 	rtnl_unlock();
8793 
8794 	if (!(bp->flags & BNX2_FLAG_AER_ENABLED))
8795 		return result;
8796 
8797 	err = pci_cleanup_aer_uncorrect_error_status(pdev);
8798 	if (err) {
8799 		dev_err(&pdev->dev,
8800 			"pci_cleanup_aer_uncorrect_error_status failed 0x%0x\n",
8801 			 err); /* non-fatal, continue */
8802 	}
8803 
8804 	return result;
8805 }
8806 
8807 /**
8808  * bnx2_io_resume - called when traffic can start flowing again.
8809  * @pdev: Pointer to PCI device
8810  *
8811  * This callback is called when the error recovery driver tells us that
8812  * its OK to resume normal operation.
8813  */
8814 static void bnx2_io_resume(struct pci_dev *pdev)
8815 {
8816 	struct net_device *dev = pci_get_drvdata(pdev);
8817 	struct bnx2 *bp = netdev_priv(dev);
8818 
8819 	rtnl_lock();
8820 	if (netif_running(dev))
8821 		bnx2_netif_start(bp, true);
8822 
8823 	netif_device_attach(dev);
8824 	rtnl_unlock();
8825 }
8826 
8827 static void bnx2_shutdown(struct pci_dev *pdev)
8828 {
8829 	struct net_device *dev = pci_get_drvdata(pdev);
8830 	struct bnx2 *bp;
8831 
8832 	if (!dev)
8833 		return;
8834 
8835 	bp = netdev_priv(dev);
8836 	if (!bp)
8837 		return;
8838 
8839 	rtnl_lock();
8840 	if (netif_running(dev))
8841 		dev_close(bp->dev);
8842 
8843 	if (system_state == SYSTEM_POWER_OFF)
8844 		bnx2_set_power_state(bp, PCI_D3hot);
8845 
8846 	rtnl_unlock();
8847 }
8848 
8849 static const struct pci_error_handlers bnx2_err_handler = {
8850 	.error_detected	= bnx2_io_error_detected,
8851 	.slot_reset	= bnx2_io_slot_reset,
8852 	.resume		= bnx2_io_resume,
8853 };
8854 
8855 static struct pci_driver bnx2_pci_driver = {
8856 	.name		= DRV_MODULE_NAME,
8857 	.id_table	= bnx2_pci_tbl,
8858 	.probe		= bnx2_init_one,
8859 	.remove		= bnx2_remove_one,
8860 	.driver.pm	= BNX2_PM_OPS,
8861 	.err_handler	= &bnx2_err_handler,
8862 	.shutdown	= bnx2_shutdown,
8863 };
8864 
8865 module_pci_driver(bnx2_pci_driver);
8866