1 /* bnx2.c: QLogic bnx2 network driver.
2  *
3  * Copyright (c) 2004-2014 Broadcom Corporation
4  * Copyright (c) 2014-2015 QLogic Corporation
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License as published by
8  * the Free Software Foundation.
9  *
10  * Written by: Michael Chan  (mchan@broadcom.com)
11  */
12 
13 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14 
15 #include <linux/module.h>
16 #include <linux/moduleparam.h>
17 
18 #include <linux/stringify.h>
19 #include <linux/kernel.h>
20 #include <linux/timer.h>
21 #include <linux/errno.h>
22 #include <linux/ioport.h>
23 #include <linux/slab.h>
24 #include <linux/vmalloc.h>
25 #include <linux/interrupt.h>
26 #include <linux/pci.h>
27 #include <linux/netdevice.h>
28 #include <linux/etherdevice.h>
29 #include <linux/skbuff.h>
30 #include <linux/dma-mapping.h>
31 #include <linux/bitops.h>
32 #include <asm/io.h>
33 #include <asm/irq.h>
34 #include <linux/delay.h>
35 #include <asm/byteorder.h>
36 #include <asm/page.h>
37 #include <linux/time.h>
38 #include <linux/ethtool.h>
39 #include <linux/mii.h>
40 #include <linux/if.h>
41 #include <linux/if_vlan.h>
42 #include <net/ip.h>
43 #include <net/tcp.h>
44 #include <net/checksum.h>
45 #include <linux/workqueue.h>
46 #include <linux/crc32.h>
47 #include <linux/prefetch.h>
48 #include <linux/cache.h>
49 #include <linux/firmware.h>
50 #include <linux/log2.h>
51 #include <linux/aer.h>
52 #include <linux/crash_dump.h>
53 
54 #if IS_ENABLED(CONFIG_CNIC)
55 #define BCM_CNIC 1
56 #include "cnic_if.h"
57 #endif
58 #include "bnx2.h"
59 #include "bnx2_fw.h"
60 
61 #define DRV_MODULE_NAME		"bnx2"
62 #define DRV_MODULE_VERSION	"2.2.6"
63 #define DRV_MODULE_RELDATE	"January 29, 2014"
64 #define FW_MIPS_FILE_06		"bnx2/bnx2-mips-06-6.2.3.fw"
65 #define FW_RV2P_FILE_06		"bnx2/bnx2-rv2p-06-6.0.15.fw"
66 #define FW_MIPS_FILE_09		"bnx2/bnx2-mips-09-6.2.1b.fw"
67 #define FW_RV2P_FILE_09_Ax	"bnx2/bnx2-rv2p-09ax-6.0.17.fw"
68 #define FW_RV2P_FILE_09		"bnx2/bnx2-rv2p-09-6.0.17.fw"
69 
70 #define RUN_AT(x) (jiffies + (x))
71 
72 /* Time in jiffies before concluding the transmitter is hung. */
73 #define TX_TIMEOUT  (5*HZ)
74 
75 static char version[] =
76 	"QLogic " DRV_MODULE_NAME " Gigabit Ethernet Driver v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
77 
78 MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>");
79 MODULE_DESCRIPTION("QLogic BCM5706/5708/5709/5716 Driver");
80 MODULE_LICENSE("GPL");
81 MODULE_VERSION(DRV_MODULE_VERSION);
82 MODULE_FIRMWARE(FW_MIPS_FILE_06);
83 MODULE_FIRMWARE(FW_RV2P_FILE_06);
84 MODULE_FIRMWARE(FW_MIPS_FILE_09);
85 MODULE_FIRMWARE(FW_RV2P_FILE_09);
86 MODULE_FIRMWARE(FW_RV2P_FILE_09_Ax);
87 
88 static int disable_msi = 0;
89 
90 module_param(disable_msi, int, 0444);
91 MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
92 
93 typedef enum {
94 	BCM5706 = 0,
95 	NC370T,
96 	NC370I,
97 	BCM5706S,
98 	NC370F,
99 	BCM5708,
100 	BCM5708S,
101 	BCM5709,
102 	BCM5709S,
103 	BCM5716,
104 	BCM5716S,
105 } board_t;
106 
107 /* indexed by board_t, above */
108 static struct {
109 	char *name;
110 } board_info[] = {
111 	{ "Broadcom NetXtreme II BCM5706 1000Base-T" },
112 	{ "HP NC370T Multifunction Gigabit Server Adapter" },
113 	{ "HP NC370i Multifunction Gigabit Server Adapter" },
114 	{ "Broadcom NetXtreme II BCM5706 1000Base-SX" },
115 	{ "HP NC370F Multifunction Gigabit Server Adapter" },
116 	{ "Broadcom NetXtreme II BCM5708 1000Base-T" },
117 	{ "Broadcom NetXtreme II BCM5708 1000Base-SX" },
118 	{ "Broadcom NetXtreme II BCM5709 1000Base-T" },
119 	{ "Broadcom NetXtreme II BCM5709 1000Base-SX" },
120 	{ "Broadcom NetXtreme II BCM5716 1000Base-T" },
121 	{ "Broadcom NetXtreme II BCM5716 1000Base-SX" },
122 	};
123 
124 static const struct pci_device_id bnx2_pci_tbl[] = {
125 	{ PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
126 	  PCI_VENDOR_ID_HP, 0x3101, 0, 0, NC370T },
127 	{ PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
128 	  PCI_VENDOR_ID_HP, 0x3106, 0, 0, NC370I },
129 	{ PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
130 	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706 },
131 	{ PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708,
132 	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708 },
133 	{ PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
134 	  PCI_VENDOR_ID_HP, 0x3102, 0, 0, NC370F },
135 	{ PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
136 	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706S },
137 	{ PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708S,
138 	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708S },
139 	{ PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709,
140 	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709 },
141 	{ PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709S,
142 	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709S },
143 	{ PCI_VENDOR_ID_BROADCOM, 0x163b,
144 	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5716 },
145 	{ PCI_VENDOR_ID_BROADCOM, 0x163c,
146 	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5716S },
147 	{ 0, }
148 };
149 
150 static const struct flash_spec flash_table[] =
151 {
152 #define BUFFERED_FLAGS		(BNX2_NV_BUFFERED | BNX2_NV_TRANSLATE)
153 #define NONBUFFERED_FLAGS	(BNX2_NV_WREN)
154 	/* Slow EEPROM */
155 	{0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
156 	 BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
157 	 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
158 	 "EEPROM - slow"},
159 	/* Expansion entry 0001 */
160 	{0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
161 	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
162 	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
163 	 "Entry 0001"},
164 	/* Saifun SA25F010 (non-buffered flash) */
165 	/* strap, cfg1, & write1 need updates */
166 	{0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
167 	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
168 	 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
169 	 "Non-buffered flash (128kB)"},
170 	/* Saifun SA25F020 (non-buffered flash) */
171 	/* strap, cfg1, & write1 need updates */
172 	{0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
173 	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
174 	 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
175 	 "Non-buffered flash (256kB)"},
176 	/* Expansion entry 0100 */
177 	{0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
178 	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
179 	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
180 	 "Entry 0100"},
181 	/* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
182 	{0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
183 	 NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
184 	 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
185 	 "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
186 	/* Entry 0110: ST M45PE20 (non-buffered flash)*/
187 	{0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
188 	 NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
189 	 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
190 	 "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
191 	/* Saifun SA25F005 (non-buffered flash) */
192 	/* strap, cfg1, & write1 need updates */
193 	{0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
194 	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
195 	 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
196 	 "Non-buffered flash (64kB)"},
197 	/* Fast EEPROM */
198 	{0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
199 	 BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
200 	 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
201 	 "EEPROM - fast"},
202 	/* Expansion entry 1001 */
203 	{0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
204 	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
205 	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
206 	 "Entry 1001"},
207 	/* Expansion entry 1010 */
208 	{0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
209 	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
210 	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
211 	 "Entry 1010"},
212 	/* ATMEL AT45DB011B (buffered flash) */
213 	{0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
214 	 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
215 	 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
216 	 "Buffered flash (128kB)"},
217 	/* Expansion entry 1100 */
218 	{0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
219 	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
220 	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
221 	 "Entry 1100"},
222 	/* Expansion entry 1101 */
223 	{0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
224 	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
225 	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
226 	 "Entry 1101"},
227 	/* Ateml Expansion entry 1110 */
228 	{0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
229 	 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
230 	 BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
231 	 "Entry 1110 (Atmel)"},
232 	/* ATMEL AT45DB021B (buffered flash) */
233 	{0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
234 	 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
235 	 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
236 	 "Buffered flash (256kB)"},
237 };
238 
239 static const struct flash_spec flash_5709 = {
240 	.flags		= BNX2_NV_BUFFERED,
241 	.page_bits	= BCM5709_FLASH_PAGE_BITS,
242 	.page_size	= BCM5709_FLASH_PAGE_SIZE,
243 	.addr_mask	= BCM5709_FLASH_BYTE_ADDR_MASK,
244 	.total_size	= BUFFERED_FLASH_TOTAL_SIZE*2,
245 	.name		= "5709 Buffered flash (256kB)",
246 };
247 
248 MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
249 
250 static void bnx2_init_napi(struct bnx2 *bp);
251 static void bnx2_del_napi(struct bnx2 *bp);
252 
253 static inline u32 bnx2_tx_avail(struct bnx2 *bp, struct bnx2_tx_ring_info *txr)
254 {
255 	u32 diff;
256 
257 	/* The ring uses 256 indices for 255 entries, one of them
258 	 * needs to be skipped.
259 	 */
260 	diff = READ_ONCE(txr->tx_prod) - READ_ONCE(txr->tx_cons);
261 	if (unlikely(diff >= BNX2_TX_DESC_CNT)) {
262 		diff &= 0xffff;
263 		if (diff == BNX2_TX_DESC_CNT)
264 			diff = BNX2_MAX_TX_DESC_CNT;
265 	}
266 	return bp->tx_ring_size - diff;
267 }
268 
269 static u32
270 bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset)
271 {
272 	unsigned long flags;
273 	u32 val;
274 
275 	spin_lock_irqsave(&bp->indirect_lock, flags);
276 	BNX2_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
277 	val = BNX2_RD(bp, BNX2_PCICFG_REG_WINDOW);
278 	spin_unlock_irqrestore(&bp->indirect_lock, flags);
279 	return val;
280 }
281 
282 static void
283 bnx2_reg_wr_ind(struct bnx2 *bp, u32 offset, u32 val)
284 {
285 	unsigned long flags;
286 
287 	spin_lock_irqsave(&bp->indirect_lock, flags);
288 	BNX2_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
289 	BNX2_WR(bp, BNX2_PCICFG_REG_WINDOW, val);
290 	spin_unlock_irqrestore(&bp->indirect_lock, flags);
291 }
292 
293 static void
294 bnx2_shmem_wr(struct bnx2 *bp, u32 offset, u32 val)
295 {
296 	bnx2_reg_wr_ind(bp, bp->shmem_base + offset, val);
297 }
298 
299 static u32
300 bnx2_shmem_rd(struct bnx2 *bp, u32 offset)
301 {
302 	return bnx2_reg_rd_ind(bp, bp->shmem_base + offset);
303 }
304 
305 static void
306 bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
307 {
308 	unsigned long flags;
309 
310 	offset += cid_addr;
311 	spin_lock_irqsave(&bp->indirect_lock, flags);
312 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
313 		int i;
314 
315 		BNX2_WR(bp, BNX2_CTX_CTX_DATA, val);
316 		BNX2_WR(bp, BNX2_CTX_CTX_CTRL,
317 			offset | BNX2_CTX_CTX_CTRL_WRITE_REQ);
318 		for (i = 0; i < 5; i++) {
319 			val = BNX2_RD(bp, BNX2_CTX_CTX_CTRL);
320 			if ((val & BNX2_CTX_CTX_CTRL_WRITE_REQ) == 0)
321 				break;
322 			udelay(5);
323 		}
324 	} else {
325 		BNX2_WR(bp, BNX2_CTX_DATA_ADR, offset);
326 		BNX2_WR(bp, BNX2_CTX_DATA, val);
327 	}
328 	spin_unlock_irqrestore(&bp->indirect_lock, flags);
329 }
330 
331 #ifdef BCM_CNIC
332 static int
333 bnx2_drv_ctl(struct net_device *dev, struct drv_ctl_info *info)
334 {
335 	struct bnx2 *bp = netdev_priv(dev);
336 	struct drv_ctl_io *io = &info->data.io;
337 
338 	switch (info->cmd) {
339 	case DRV_CTL_IO_WR_CMD:
340 		bnx2_reg_wr_ind(bp, io->offset, io->data);
341 		break;
342 	case DRV_CTL_IO_RD_CMD:
343 		io->data = bnx2_reg_rd_ind(bp, io->offset);
344 		break;
345 	case DRV_CTL_CTX_WR_CMD:
346 		bnx2_ctx_wr(bp, io->cid_addr, io->offset, io->data);
347 		break;
348 	default:
349 		return -EINVAL;
350 	}
351 	return 0;
352 }
353 
354 static void bnx2_setup_cnic_irq_info(struct bnx2 *bp)
355 {
356 	struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
357 	struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
358 	int sb_id;
359 
360 	if (bp->flags & BNX2_FLAG_USING_MSIX) {
361 		cp->drv_state |= CNIC_DRV_STATE_USING_MSIX;
362 		bnapi->cnic_present = 0;
363 		sb_id = bp->irq_nvecs;
364 		cp->irq_arr[0].irq_flags |= CNIC_IRQ_FL_MSIX;
365 	} else {
366 		cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX;
367 		bnapi->cnic_tag = bnapi->last_status_idx;
368 		bnapi->cnic_present = 1;
369 		sb_id = 0;
370 		cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX;
371 	}
372 
373 	cp->irq_arr[0].vector = bp->irq_tbl[sb_id].vector;
374 	cp->irq_arr[0].status_blk = (void *)
375 		((unsigned long) bnapi->status_blk.msi +
376 		(BNX2_SBLK_MSIX_ALIGN_SIZE * sb_id));
377 	cp->irq_arr[0].status_blk_num = sb_id;
378 	cp->num_irq = 1;
379 }
380 
381 static int bnx2_register_cnic(struct net_device *dev, struct cnic_ops *ops,
382 			      void *data)
383 {
384 	struct bnx2 *bp = netdev_priv(dev);
385 	struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
386 
387 	if (!ops)
388 		return -EINVAL;
389 
390 	if (cp->drv_state & CNIC_DRV_STATE_REGD)
391 		return -EBUSY;
392 
393 	if (!bnx2_reg_rd_ind(bp, BNX2_FW_MAX_ISCSI_CONN))
394 		return -ENODEV;
395 
396 	bp->cnic_data = data;
397 	rcu_assign_pointer(bp->cnic_ops, ops);
398 
399 	cp->num_irq = 0;
400 	cp->drv_state = CNIC_DRV_STATE_REGD;
401 
402 	bnx2_setup_cnic_irq_info(bp);
403 
404 	return 0;
405 }
406 
407 static int bnx2_unregister_cnic(struct net_device *dev)
408 {
409 	struct bnx2 *bp = netdev_priv(dev);
410 	struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
411 	struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
412 
413 	mutex_lock(&bp->cnic_lock);
414 	cp->drv_state = 0;
415 	bnapi->cnic_present = 0;
416 	RCU_INIT_POINTER(bp->cnic_ops, NULL);
417 	mutex_unlock(&bp->cnic_lock);
418 	synchronize_rcu();
419 	return 0;
420 }
421 
422 static struct cnic_eth_dev *bnx2_cnic_probe(struct net_device *dev)
423 {
424 	struct bnx2 *bp = netdev_priv(dev);
425 	struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
426 
427 	if (!cp->max_iscsi_conn)
428 		return NULL;
429 
430 	cp->drv_owner = THIS_MODULE;
431 	cp->chip_id = bp->chip_id;
432 	cp->pdev = bp->pdev;
433 	cp->io_base = bp->regview;
434 	cp->drv_ctl = bnx2_drv_ctl;
435 	cp->drv_register_cnic = bnx2_register_cnic;
436 	cp->drv_unregister_cnic = bnx2_unregister_cnic;
437 
438 	return cp;
439 }
440 
441 static void
442 bnx2_cnic_stop(struct bnx2 *bp)
443 {
444 	struct cnic_ops *c_ops;
445 	struct cnic_ctl_info info;
446 
447 	mutex_lock(&bp->cnic_lock);
448 	c_ops = rcu_dereference_protected(bp->cnic_ops,
449 					  lockdep_is_held(&bp->cnic_lock));
450 	if (c_ops) {
451 		info.cmd = CNIC_CTL_STOP_CMD;
452 		c_ops->cnic_ctl(bp->cnic_data, &info);
453 	}
454 	mutex_unlock(&bp->cnic_lock);
455 }
456 
457 static void
458 bnx2_cnic_start(struct bnx2 *bp)
459 {
460 	struct cnic_ops *c_ops;
461 	struct cnic_ctl_info info;
462 
463 	mutex_lock(&bp->cnic_lock);
464 	c_ops = rcu_dereference_protected(bp->cnic_ops,
465 					  lockdep_is_held(&bp->cnic_lock));
466 	if (c_ops) {
467 		if (!(bp->flags & BNX2_FLAG_USING_MSIX)) {
468 			struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
469 
470 			bnapi->cnic_tag = bnapi->last_status_idx;
471 		}
472 		info.cmd = CNIC_CTL_START_CMD;
473 		c_ops->cnic_ctl(bp->cnic_data, &info);
474 	}
475 	mutex_unlock(&bp->cnic_lock);
476 }
477 
478 #else
479 
480 static void
481 bnx2_cnic_stop(struct bnx2 *bp)
482 {
483 }
484 
485 static void
486 bnx2_cnic_start(struct bnx2 *bp)
487 {
488 }
489 
490 #endif
491 
492 static int
493 bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val)
494 {
495 	u32 val1;
496 	int i, ret;
497 
498 	if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
499 		val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
500 		val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
501 
502 		BNX2_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
503 		BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
504 
505 		udelay(40);
506 	}
507 
508 	val1 = (bp->phy_addr << 21) | (reg << 16) |
509 		BNX2_EMAC_MDIO_COMM_COMMAND_READ | BNX2_EMAC_MDIO_COMM_DISEXT |
510 		BNX2_EMAC_MDIO_COMM_START_BUSY;
511 	BNX2_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
512 
513 	for (i = 0; i < 50; i++) {
514 		udelay(10);
515 
516 		val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_COMM);
517 		if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
518 			udelay(5);
519 
520 			val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_COMM);
521 			val1 &= BNX2_EMAC_MDIO_COMM_DATA;
522 
523 			break;
524 		}
525 	}
526 
527 	if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY) {
528 		*val = 0x0;
529 		ret = -EBUSY;
530 	}
531 	else {
532 		*val = val1;
533 		ret = 0;
534 	}
535 
536 	if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
537 		val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
538 		val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
539 
540 		BNX2_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
541 		BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
542 
543 		udelay(40);
544 	}
545 
546 	return ret;
547 }
548 
549 static int
550 bnx2_write_phy(struct bnx2 *bp, u32 reg, u32 val)
551 {
552 	u32 val1;
553 	int i, ret;
554 
555 	if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
556 		val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
557 		val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
558 
559 		BNX2_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
560 		BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
561 
562 		udelay(40);
563 	}
564 
565 	val1 = (bp->phy_addr << 21) | (reg << 16) | val |
566 		BNX2_EMAC_MDIO_COMM_COMMAND_WRITE |
567 		BNX2_EMAC_MDIO_COMM_START_BUSY | BNX2_EMAC_MDIO_COMM_DISEXT;
568 	BNX2_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
569 
570 	for (i = 0; i < 50; i++) {
571 		udelay(10);
572 
573 		val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_COMM);
574 		if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
575 			udelay(5);
576 			break;
577 		}
578 	}
579 
580 	if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)
581         	ret = -EBUSY;
582 	else
583 		ret = 0;
584 
585 	if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
586 		val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
587 		val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
588 
589 		BNX2_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
590 		BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
591 
592 		udelay(40);
593 	}
594 
595 	return ret;
596 }
597 
598 static void
599 bnx2_disable_int(struct bnx2 *bp)
600 {
601 	int i;
602 	struct bnx2_napi *bnapi;
603 
604 	for (i = 0; i < bp->irq_nvecs; i++) {
605 		bnapi = &bp->bnx2_napi[i];
606 		BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
607 		       BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
608 	}
609 	BNX2_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
610 }
611 
612 static void
613 bnx2_enable_int(struct bnx2 *bp)
614 {
615 	int i;
616 	struct bnx2_napi *bnapi;
617 
618 	for (i = 0; i < bp->irq_nvecs; i++) {
619 		bnapi = &bp->bnx2_napi[i];
620 
621 		BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
622 			BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
623 			BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
624 			bnapi->last_status_idx);
625 
626 		BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
627 			BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
628 			bnapi->last_status_idx);
629 	}
630 	BNX2_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
631 }
632 
633 static void
634 bnx2_disable_int_sync(struct bnx2 *bp)
635 {
636 	int i;
637 
638 	atomic_inc(&bp->intr_sem);
639 	if (!netif_running(bp->dev))
640 		return;
641 
642 	bnx2_disable_int(bp);
643 	for (i = 0; i < bp->irq_nvecs; i++)
644 		synchronize_irq(bp->irq_tbl[i].vector);
645 }
646 
647 static void
648 bnx2_napi_disable(struct bnx2 *bp)
649 {
650 	int i;
651 
652 	for (i = 0; i < bp->irq_nvecs; i++)
653 		napi_disable(&bp->bnx2_napi[i].napi);
654 }
655 
656 static void
657 bnx2_napi_enable(struct bnx2 *bp)
658 {
659 	int i;
660 
661 	for (i = 0; i < bp->irq_nvecs; i++)
662 		napi_enable(&bp->bnx2_napi[i].napi);
663 }
664 
665 static void
666 bnx2_netif_stop(struct bnx2 *bp, bool stop_cnic)
667 {
668 	if (stop_cnic)
669 		bnx2_cnic_stop(bp);
670 	if (netif_running(bp->dev)) {
671 		bnx2_napi_disable(bp);
672 		netif_tx_disable(bp->dev);
673 	}
674 	bnx2_disable_int_sync(bp);
675 	netif_carrier_off(bp->dev);	/* prevent tx timeout */
676 }
677 
678 static void
679 bnx2_netif_start(struct bnx2 *bp, bool start_cnic)
680 {
681 	if (atomic_dec_and_test(&bp->intr_sem)) {
682 		if (netif_running(bp->dev)) {
683 			netif_tx_wake_all_queues(bp->dev);
684 			spin_lock_bh(&bp->phy_lock);
685 			if (bp->link_up)
686 				netif_carrier_on(bp->dev);
687 			spin_unlock_bh(&bp->phy_lock);
688 			bnx2_napi_enable(bp);
689 			bnx2_enable_int(bp);
690 			if (start_cnic)
691 				bnx2_cnic_start(bp);
692 		}
693 	}
694 }
695 
696 static void
697 bnx2_free_tx_mem(struct bnx2 *bp)
698 {
699 	int i;
700 
701 	for (i = 0; i < bp->num_tx_rings; i++) {
702 		struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
703 		struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
704 
705 		if (txr->tx_desc_ring) {
706 			dma_free_coherent(&bp->pdev->dev, TXBD_RING_SIZE,
707 					  txr->tx_desc_ring,
708 					  txr->tx_desc_mapping);
709 			txr->tx_desc_ring = NULL;
710 		}
711 		kfree(txr->tx_buf_ring);
712 		txr->tx_buf_ring = NULL;
713 	}
714 }
715 
716 static void
717 bnx2_free_rx_mem(struct bnx2 *bp)
718 {
719 	int i;
720 
721 	for (i = 0; i < bp->num_rx_rings; i++) {
722 		struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
723 		struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
724 		int j;
725 
726 		for (j = 0; j < bp->rx_max_ring; j++) {
727 			if (rxr->rx_desc_ring[j])
728 				dma_free_coherent(&bp->pdev->dev, RXBD_RING_SIZE,
729 						  rxr->rx_desc_ring[j],
730 						  rxr->rx_desc_mapping[j]);
731 			rxr->rx_desc_ring[j] = NULL;
732 		}
733 		vfree(rxr->rx_buf_ring);
734 		rxr->rx_buf_ring = NULL;
735 
736 		for (j = 0; j < bp->rx_max_pg_ring; j++) {
737 			if (rxr->rx_pg_desc_ring[j])
738 				dma_free_coherent(&bp->pdev->dev, RXBD_RING_SIZE,
739 						  rxr->rx_pg_desc_ring[j],
740 						  rxr->rx_pg_desc_mapping[j]);
741 			rxr->rx_pg_desc_ring[j] = NULL;
742 		}
743 		vfree(rxr->rx_pg_ring);
744 		rxr->rx_pg_ring = NULL;
745 	}
746 }
747 
748 static int
749 bnx2_alloc_tx_mem(struct bnx2 *bp)
750 {
751 	int i;
752 
753 	for (i = 0; i < bp->num_tx_rings; i++) {
754 		struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
755 		struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
756 
757 		txr->tx_buf_ring = kzalloc(SW_TXBD_RING_SIZE, GFP_KERNEL);
758 		if (!txr->tx_buf_ring)
759 			return -ENOMEM;
760 
761 		txr->tx_desc_ring =
762 			dma_alloc_coherent(&bp->pdev->dev, TXBD_RING_SIZE,
763 					   &txr->tx_desc_mapping, GFP_KERNEL);
764 		if (!txr->tx_desc_ring)
765 			return -ENOMEM;
766 	}
767 	return 0;
768 }
769 
770 static int
771 bnx2_alloc_rx_mem(struct bnx2 *bp)
772 {
773 	int i;
774 
775 	for (i = 0; i < bp->num_rx_rings; i++) {
776 		struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
777 		struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
778 		int j;
779 
780 		rxr->rx_buf_ring =
781 			vzalloc(array_size(SW_RXBD_RING_SIZE, bp->rx_max_ring));
782 		if (!rxr->rx_buf_ring)
783 			return -ENOMEM;
784 
785 		for (j = 0; j < bp->rx_max_ring; j++) {
786 			rxr->rx_desc_ring[j] =
787 				dma_alloc_coherent(&bp->pdev->dev,
788 						   RXBD_RING_SIZE,
789 						   &rxr->rx_desc_mapping[j],
790 						   GFP_KERNEL);
791 			if (!rxr->rx_desc_ring[j])
792 				return -ENOMEM;
793 
794 		}
795 
796 		if (bp->rx_pg_ring_size) {
797 			rxr->rx_pg_ring =
798 				vzalloc(array_size(SW_RXPG_RING_SIZE,
799 						   bp->rx_max_pg_ring));
800 			if (!rxr->rx_pg_ring)
801 				return -ENOMEM;
802 
803 		}
804 
805 		for (j = 0; j < bp->rx_max_pg_ring; j++) {
806 			rxr->rx_pg_desc_ring[j] =
807 				dma_alloc_coherent(&bp->pdev->dev,
808 						   RXBD_RING_SIZE,
809 						   &rxr->rx_pg_desc_mapping[j],
810 						   GFP_KERNEL);
811 			if (!rxr->rx_pg_desc_ring[j])
812 				return -ENOMEM;
813 
814 		}
815 	}
816 	return 0;
817 }
818 
819 static void
820 bnx2_free_stats_blk(struct net_device *dev)
821 {
822 	struct bnx2 *bp = netdev_priv(dev);
823 
824 	if (bp->status_blk) {
825 		dma_free_coherent(&bp->pdev->dev, bp->status_stats_size,
826 				  bp->status_blk,
827 				  bp->status_blk_mapping);
828 		bp->status_blk = NULL;
829 		bp->stats_blk = NULL;
830 	}
831 }
832 
833 static int
834 bnx2_alloc_stats_blk(struct net_device *dev)
835 {
836 	int status_blk_size;
837 	void *status_blk;
838 	struct bnx2 *bp = netdev_priv(dev);
839 
840 	/* Combine status and statistics blocks into one allocation. */
841 	status_blk_size = L1_CACHE_ALIGN(sizeof(struct status_block));
842 	if (bp->flags & BNX2_FLAG_MSIX_CAP)
843 		status_blk_size = L1_CACHE_ALIGN(BNX2_MAX_MSIX_HW_VEC *
844 						 BNX2_SBLK_MSIX_ALIGN_SIZE);
845 	bp->status_stats_size = status_blk_size +
846 				sizeof(struct statistics_block);
847 	status_blk = dma_alloc_coherent(&bp->pdev->dev, bp->status_stats_size,
848 					&bp->status_blk_mapping, GFP_KERNEL);
849 	if (!status_blk)
850 		return -ENOMEM;
851 
852 	bp->status_blk = status_blk;
853 	bp->stats_blk = status_blk + status_blk_size;
854 	bp->stats_blk_mapping = bp->status_blk_mapping + status_blk_size;
855 
856 	return 0;
857 }
858 
859 static void
860 bnx2_free_mem(struct bnx2 *bp)
861 {
862 	int i;
863 	struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
864 
865 	bnx2_free_tx_mem(bp);
866 	bnx2_free_rx_mem(bp);
867 
868 	for (i = 0; i < bp->ctx_pages; i++) {
869 		if (bp->ctx_blk[i]) {
870 			dma_free_coherent(&bp->pdev->dev, BNX2_PAGE_SIZE,
871 					  bp->ctx_blk[i],
872 					  bp->ctx_blk_mapping[i]);
873 			bp->ctx_blk[i] = NULL;
874 		}
875 	}
876 
877 	if (bnapi->status_blk.msi)
878 		bnapi->status_blk.msi = NULL;
879 }
880 
881 static int
882 bnx2_alloc_mem(struct bnx2 *bp)
883 {
884 	int i, err;
885 	struct bnx2_napi *bnapi;
886 
887 	bnapi = &bp->bnx2_napi[0];
888 	bnapi->status_blk.msi = bp->status_blk;
889 	bnapi->hw_tx_cons_ptr =
890 		&bnapi->status_blk.msi->status_tx_quick_consumer_index0;
891 	bnapi->hw_rx_cons_ptr =
892 		&bnapi->status_blk.msi->status_rx_quick_consumer_index0;
893 	if (bp->flags & BNX2_FLAG_MSIX_CAP) {
894 		for (i = 1; i < bp->irq_nvecs; i++) {
895 			struct status_block_msix *sblk;
896 
897 			bnapi = &bp->bnx2_napi[i];
898 
899 			sblk = (bp->status_blk + BNX2_SBLK_MSIX_ALIGN_SIZE * i);
900 			bnapi->status_blk.msix = sblk;
901 			bnapi->hw_tx_cons_ptr =
902 				&sblk->status_tx_quick_consumer_index;
903 			bnapi->hw_rx_cons_ptr =
904 				&sblk->status_rx_quick_consumer_index;
905 			bnapi->int_num = i << 24;
906 		}
907 	}
908 
909 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
910 		bp->ctx_pages = 0x2000 / BNX2_PAGE_SIZE;
911 		if (bp->ctx_pages == 0)
912 			bp->ctx_pages = 1;
913 		for (i = 0; i < bp->ctx_pages; i++) {
914 			bp->ctx_blk[i] = dma_alloc_coherent(&bp->pdev->dev,
915 						BNX2_PAGE_SIZE,
916 						&bp->ctx_blk_mapping[i],
917 						GFP_KERNEL);
918 			if (!bp->ctx_blk[i])
919 				goto alloc_mem_err;
920 		}
921 	}
922 
923 	err = bnx2_alloc_rx_mem(bp);
924 	if (err)
925 		goto alloc_mem_err;
926 
927 	err = bnx2_alloc_tx_mem(bp);
928 	if (err)
929 		goto alloc_mem_err;
930 
931 	return 0;
932 
933 alloc_mem_err:
934 	bnx2_free_mem(bp);
935 	return -ENOMEM;
936 }
937 
938 static void
939 bnx2_report_fw_link(struct bnx2 *bp)
940 {
941 	u32 fw_link_status = 0;
942 
943 	if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
944 		return;
945 
946 	if (bp->link_up) {
947 		u32 bmsr;
948 
949 		switch (bp->line_speed) {
950 		case SPEED_10:
951 			if (bp->duplex == DUPLEX_HALF)
952 				fw_link_status = BNX2_LINK_STATUS_10HALF;
953 			else
954 				fw_link_status = BNX2_LINK_STATUS_10FULL;
955 			break;
956 		case SPEED_100:
957 			if (bp->duplex == DUPLEX_HALF)
958 				fw_link_status = BNX2_LINK_STATUS_100HALF;
959 			else
960 				fw_link_status = BNX2_LINK_STATUS_100FULL;
961 			break;
962 		case SPEED_1000:
963 			if (bp->duplex == DUPLEX_HALF)
964 				fw_link_status = BNX2_LINK_STATUS_1000HALF;
965 			else
966 				fw_link_status = BNX2_LINK_STATUS_1000FULL;
967 			break;
968 		case SPEED_2500:
969 			if (bp->duplex == DUPLEX_HALF)
970 				fw_link_status = BNX2_LINK_STATUS_2500HALF;
971 			else
972 				fw_link_status = BNX2_LINK_STATUS_2500FULL;
973 			break;
974 		}
975 
976 		fw_link_status |= BNX2_LINK_STATUS_LINK_UP;
977 
978 		if (bp->autoneg) {
979 			fw_link_status |= BNX2_LINK_STATUS_AN_ENABLED;
980 
981 			bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
982 			bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
983 
984 			if (!(bmsr & BMSR_ANEGCOMPLETE) ||
985 			    bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT)
986 				fw_link_status |= BNX2_LINK_STATUS_PARALLEL_DET;
987 			else
988 				fw_link_status |= BNX2_LINK_STATUS_AN_COMPLETE;
989 		}
990 	}
991 	else
992 		fw_link_status = BNX2_LINK_STATUS_LINK_DOWN;
993 
994 	bnx2_shmem_wr(bp, BNX2_LINK_STATUS, fw_link_status);
995 }
996 
997 static char *
998 bnx2_xceiver_str(struct bnx2 *bp)
999 {
1000 	return (bp->phy_port == PORT_FIBRE) ? "SerDes" :
1001 		((bp->phy_flags & BNX2_PHY_FLAG_SERDES) ? "Remote Copper" :
1002 		 "Copper");
1003 }
1004 
1005 static void
1006 bnx2_report_link(struct bnx2 *bp)
1007 {
1008 	if (bp->link_up) {
1009 		netif_carrier_on(bp->dev);
1010 		netdev_info(bp->dev, "NIC %s Link is Up, %d Mbps %s duplex",
1011 			    bnx2_xceiver_str(bp),
1012 			    bp->line_speed,
1013 			    bp->duplex == DUPLEX_FULL ? "full" : "half");
1014 
1015 		if (bp->flow_ctrl) {
1016 			if (bp->flow_ctrl & FLOW_CTRL_RX) {
1017 				pr_cont(", receive ");
1018 				if (bp->flow_ctrl & FLOW_CTRL_TX)
1019 					pr_cont("& transmit ");
1020 			}
1021 			else {
1022 				pr_cont(", transmit ");
1023 			}
1024 			pr_cont("flow control ON");
1025 		}
1026 		pr_cont("\n");
1027 	} else {
1028 		netif_carrier_off(bp->dev);
1029 		netdev_err(bp->dev, "NIC %s Link is Down\n",
1030 			   bnx2_xceiver_str(bp));
1031 	}
1032 
1033 	bnx2_report_fw_link(bp);
1034 }
1035 
1036 static void
1037 bnx2_resolve_flow_ctrl(struct bnx2 *bp)
1038 {
1039 	u32 local_adv, remote_adv;
1040 
1041 	bp->flow_ctrl = 0;
1042 	if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
1043 		(AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
1044 
1045 		if (bp->duplex == DUPLEX_FULL) {
1046 			bp->flow_ctrl = bp->req_flow_ctrl;
1047 		}
1048 		return;
1049 	}
1050 
1051 	if (bp->duplex != DUPLEX_FULL) {
1052 		return;
1053 	}
1054 
1055 	if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1056 	    (BNX2_CHIP(bp) == BNX2_CHIP_5708)) {
1057 		u32 val;
1058 
1059 		bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
1060 		if (val & BCM5708S_1000X_STAT1_TX_PAUSE)
1061 			bp->flow_ctrl |= FLOW_CTRL_TX;
1062 		if (val & BCM5708S_1000X_STAT1_RX_PAUSE)
1063 			bp->flow_ctrl |= FLOW_CTRL_RX;
1064 		return;
1065 	}
1066 
1067 	bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1068 	bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
1069 
1070 	if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1071 		u32 new_local_adv = 0;
1072 		u32 new_remote_adv = 0;
1073 
1074 		if (local_adv & ADVERTISE_1000XPAUSE)
1075 			new_local_adv |= ADVERTISE_PAUSE_CAP;
1076 		if (local_adv & ADVERTISE_1000XPSE_ASYM)
1077 			new_local_adv |= ADVERTISE_PAUSE_ASYM;
1078 		if (remote_adv & ADVERTISE_1000XPAUSE)
1079 			new_remote_adv |= ADVERTISE_PAUSE_CAP;
1080 		if (remote_adv & ADVERTISE_1000XPSE_ASYM)
1081 			new_remote_adv |= ADVERTISE_PAUSE_ASYM;
1082 
1083 		local_adv = new_local_adv;
1084 		remote_adv = new_remote_adv;
1085 	}
1086 
1087 	/* See Table 28B-3 of 802.3ab-1999 spec. */
1088 	if (local_adv & ADVERTISE_PAUSE_CAP) {
1089 		if(local_adv & ADVERTISE_PAUSE_ASYM) {
1090 	                if (remote_adv & ADVERTISE_PAUSE_CAP) {
1091 				bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
1092 			}
1093 			else if (remote_adv & ADVERTISE_PAUSE_ASYM) {
1094 				bp->flow_ctrl = FLOW_CTRL_RX;
1095 			}
1096 		}
1097 		else {
1098 			if (remote_adv & ADVERTISE_PAUSE_CAP) {
1099 				bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
1100 			}
1101 		}
1102 	}
1103 	else if (local_adv & ADVERTISE_PAUSE_ASYM) {
1104 		if ((remote_adv & ADVERTISE_PAUSE_CAP) &&
1105 			(remote_adv & ADVERTISE_PAUSE_ASYM)) {
1106 
1107 			bp->flow_ctrl = FLOW_CTRL_TX;
1108 		}
1109 	}
1110 }
1111 
1112 static int
1113 bnx2_5709s_linkup(struct bnx2 *bp)
1114 {
1115 	u32 val, speed;
1116 
1117 	bp->link_up = 1;
1118 
1119 	bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_GP_STATUS);
1120 	bnx2_read_phy(bp, MII_BNX2_GP_TOP_AN_STATUS1, &val);
1121 	bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1122 
1123 	if ((bp->autoneg & AUTONEG_SPEED) == 0) {
1124 		bp->line_speed = bp->req_line_speed;
1125 		bp->duplex = bp->req_duplex;
1126 		return 0;
1127 	}
1128 	speed = val & MII_BNX2_GP_TOP_AN_SPEED_MSK;
1129 	switch (speed) {
1130 		case MII_BNX2_GP_TOP_AN_SPEED_10:
1131 			bp->line_speed = SPEED_10;
1132 			break;
1133 		case MII_BNX2_GP_TOP_AN_SPEED_100:
1134 			bp->line_speed = SPEED_100;
1135 			break;
1136 		case MII_BNX2_GP_TOP_AN_SPEED_1G:
1137 		case MII_BNX2_GP_TOP_AN_SPEED_1GKV:
1138 			bp->line_speed = SPEED_1000;
1139 			break;
1140 		case MII_BNX2_GP_TOP_AN_SPEED_2_5G:
1141 			bp->line_speed = SPEED_2500;
1142 			break;
1143 	}
1144 	if (val & MII_BNX2_GP_TOP_AN_FD)
1145 		bp->duplex = DUPLEX_FULL;
1146 	else
1147 		bp->duplex = DUPLEX_HALF;
1148 	return 0;
1149 }
1150 
1151 static int
1152 bnx2_5708s_linkup(struct bnx2 *bp)
1153 {
1154 	u32 val;
1155 
1156 	bp->link_up = 1;
1157 	bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
1158 	switch (val & BCM5708S_1000X_STAT1_SPEED_MASK) {
1159 		case BCM5708S_1000X_STAT1_SPEED_10:
1160 			bp->line_speed = SPEED_10;
1161 			break;
1162 		case BCM5708S_1000X_STAT1_SPEED_100:
1163 			bp->line_speed = SPEED_100;
1164 			break;
1165 		case BCM5708S_1000X_STAT1_SPEED_1G:
1166 			bp->line_speed = SPEED_1000;
1167 			break;
1168 		case BCM5708S_1000X_STAT1_SPEED_2G5:
1169 			bp->line_speed = SPEED_2500;
1170 			break;
1171 	}
1172 	if (val & BCM5708S_1000X_STAT1_FD)
1173 		bp->duplex = DUPLEX_FULL;
1174 	else
1175 		bp->duplex = DUPLEX_HALF;
1176 
1177 	return 0;
1178 }
1179 
1180 static int
1181 bnx2_5706s_linkup(struct bnx2 *bp)
1182 {
1183 	u32 bmcr, local_adv, remote_adv, common;
1184 
1185 	bp->link_up = 1;
1186 	bp->line_speed = SPEED_1000;
1187 
1188 	bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1189 	if (bmcr & BMCR_FULLDPLX) {
1190 		bp->duplex = DUPLEX_FULL;
1191 	}
1192 	else {
1193 		bp->duplex = DUPLEX_HALF;
1194 	}
1195 
1196 	if (!(bmcr & BMCR_ANENABLE)) {
1197 		return 0;
1198 	}
1199 
1200 	bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1201 	bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
1202 
1203 	common = local_adv & remote_adv;
1204 	if (common & (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL)) {
1205 
1206 		if (common & ADVERTISE_1000XFULL) {
1207 			bp->duplex = DUPLEX_FULL;
1208 		}
1209 		else {
1210 			bp->duplex = DUPLEX_HALF;
1211 		}
1212 	}
1213 
1214 	return 0;
1215 }
1216 
1217 static int
1218 bnx2_copper_linkup(struct bnx2 *bp)
1219 {
1220 	u32 bmcr;
1221 
1222 	bp->phy_flags &= ~BNX2_PHY_FLAG_MDIX;
1223 
1224 	bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1225 	if (bmcr & BMCR_ANENABLE) {
1226 		u32 local_adv, remote_adv, common;
1227 
1228 		bnx2_read_phy(bp, MII_CTRL1000, &local_adv);
1229 		bnx2_read_phy(bp, MII_STAT1000, &remote_adv);
1230 
1231 		common = local_adv & (remote_adv >> 2);
1232 		if (common & ADVERTISE_1000FULL) {
1233 			bp->line_speed = SPEED_1000;
1234 			bp->duplex = DUPLEX_FULL;
1235 		}
1236 		else if (common & ADVERTISE_1000HALF) {
1237 			bp->line_speed = SPEED_1000;
1238 			bp->duplex = DUPLEX_HALF;
1239 		}
1240 		else {
1241 			bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1242 			bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
1243 
1244 			common = local_adv & remote_adv;
1245 			if (common & ADVERTISE_100FULL) {
1246 				bp->line_speed = SPEED_100;
1247 				bp->duplex = DUPLEX_FULL;
1248 			}
1249 			else if (common & ADVERTISE_100HALF) {
1250 				bp->line_speed = SPEED_100;
1251 				bp->duplex = DUPLEX_HALF;
1252 			}
1253 			else if (common & ADVERTISE_10FULL) {
1254 				bp->line_speed = SPEED_10;
1255 				bp->duplex = DUPLEX_FULL;
1256 			}
1257 			else if (common & ADVERTISE_10HALF) {
1258 				bp->line_speed = SPEED_10;
1259 				bp->duplex = DUPLEX_HALF;
1260 			}
1261 			else {
1262 				bp->line_speed = 0;
1263 				bp->link_up = 0;
1264 			}
1265 		}
1266 	}
1267 	else {
1268 		if (bmcr & BMCR_SPEED100) {
1269 			bp->line_speed = SPEED_100;
1270 		}
1271 		else {
1272 			bp->line_speed = SPEED_10;
1273 		}
1274 		if (bmcr & BMCR_FULLDPLX) {
1275 			bp->duplex = DUPLEX_FULL;
1276 		}
1277 		else {
1278 			bp->duplex = DUPLEX_HALF;
1279 		}
1280 	}
1281 
1282 	if (bp->link_up) {
1283 		u32 ext_status;
1284 
1285 		bnx2_read_phy(bp, MII_BNX2_EXT_STATUS, &ext_status);
1286 		if (ext_status & EXT_STATUS_MDIX)
1287 			bp->phy_flags |= BNX2_PHY_FLAG_MDIX;
1288 	}
1289 
1290 	return 0;
1291 }
1292 
1293 static void
1294 bnx2_init_rx_context(struct bnx2 *bp, u32 cid)
1295 {
1296 	u32 val, rx_cid_addr = GET_CID_ADDR(cid);
1297 
1298 	val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE;
1299 	val |= BNX2_L2CTX_CTX_TYPE_SIZE_L2;
1300 	val |= 0x02 << 8;
1301 
1302 	if (bp->flow_ctrl & FLOW_CTRL_TX)
1303 		val |= BNX2_L2CTX_FLOW_CTRL_ENABLE;
1304 
1305 	bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_CTX_TYPE, val);
1306 }
1307 
1308 static void
1309 bnx2_init_all_rx_contexts(struct bnx2 *bp)
1310 {
1311 	int i;
1312 	u32 cid;
1313 
1314 	for (i = 0, cid = RX_CID; i < bp->num_rx_rings; i++, cid++) {
1315 		if (i == 1)
1316 			cid = RX_RSS_CID;
1317 		bnx2_init_rx_context(bp, cid);
1318 	}
1319 }
1320 
1321 static void
1322 bnx2_set_mac_link(struct bnx2 *bp)
1323 {
1324 	u32 val;
1325 
1326 	BNX2_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x2620);
1327 	if (bp->link_up && (bp->line_speed == SPEED_1000) &&
1328 		(bp->duplex == DUPLEX_HALF)) {
1329 		BNX2_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x26ff);
1330 	}
1331 
1332 	/* Configure the EMAC mode register. */
1333 	val = BNX2_RD(bp, BNX2_EMAC_MODE);
1334 
1335 	val &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
1336 		BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
1337 		BNX2_EMAC_MODE_25G_MODE);
1338 
1339 	if (bp->link_up) {
1340 		switch (bp->line_speed) {
1341 			case SPEED_10:
1342 				if (BNX2_CHIP(bp) != BNX2_CHIP_5706) {
1343 					val |= BNX2_EMAC_MODE_PORT_MII_10M;
1344 					break;
1345 				}
1346 				/* fall through */
1347 			case SPEED_100:
1348 				val |= BNX2_EMAC_MODE_PORT_MII;
1349 				break;
1350 			case SPEED_2500:
1351 				val |= BNX2_EMAC_MODE_25G_MODE;
1352 				/* fall through */
1353 			case SPEED_1000:
1354 				val |= BNX2_EMAC_MODE_PORT_GMII;
1355 				break;
1356 		}
1357 	}
1358 	else {
1359 		val |= BNX2_EMAC_MODE_PORT_GMII;
1360 	}
1361 
1362 	/* Set the MAC to operate in the appropriate duplex mode. */
1363 	if (bp->duplex == DUPLEX_HALF)
1364 		val |= BNX2_EMAC_MODE_HALF_DUPLEX;
1365 	BNX2_WR(bp, BNX2_EMAC_MODE, val);
1366 
1367 	/* Enable/disable rx PAUSE. */
1368 	bp->rx_mode &= ~BNX2_EMAC_RX_MODE_FLOW_EN;
1369 
1370 	if (bp->flow_ctrl & FLOW_CTRL_RX)
1371 		bp->rx_mode |= BNX2_EMAC_RX_MODE_FLOW_EN;
1372 	BNX2_WR(bp, BNX2_EMAC_RX_MODE, bp->rx_mode);
1373 
1374 	/* Enable/disable tx PAUSE. */
1375 	val = BNX2_RD(bp, BNX2_EMAC_TX_MODE);
1376 	val &= ~BNX2_EMAC_TX_MODE_FLOW_EN;
1377 
1378 	if (bp->flow_ctrl & FLOW_CTRL_TX)
1379 		val |= BNX2_EMAC_TX_MODE_FLOW_EN;
1380 	BNX2_WR(bp, BNX2_EMAC_TX_MODE, val);
1381 
1382 	/* Acknowledge the interrupt. */
1383 	BNX2_WR(bp, BNX2_EMAC_STATUS, BNX2_EMAC_STATUS_LINK_CHANGE);
1384 
1385 	bnx2_init_all_rx_contexts(bp);
1386 }
1387 
1388 static void
1389 bnx2_enable_bmsr1(struct bnx2 *bp)
1390 {
1391 	if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1392 	    (BNX2_CHIP(bp) == BNX2_CHIP_5709))
1393 		bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1394 			       MII_BNX2_BLK_ADDR_GP_STATUS);
1395 }
1396 
1397 static void
1398 bnx2_disable_bmsr1(struct bnx2 *bp)
1399 {
1400 	if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1401 	    (BNX2_CHIP(bp) == BNX2_CHIP_5709))
1402 		bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1403 			       MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1404 }
1405 
1406 static int
1407 bnx2_test_and_enable_2g5(struct bnx2 *bp)
1408 {
1409 	u32 up1;
1410 	int ret = 1;
1411 
1412 	if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1413 		return 0;
1414 
1415 	if (bp->autoneg & AUTONEG_SPEED)
1416 		bp->advertising |= ADVERTISED_2500baseX_Full;
1417 
1418 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
1419 		bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1420 
1421 	bnx2_read_phy(bp, bp->mii_up1, &up1);
1422 	if (!(up1 & BCM5708S_UP1_2G5)) {
1423 		up1 |= BCM5708S_UP1_2G5;
1424 		bnx2_write_phy(bp, bp->mii_up1, up1);
1425 		ret = 0;
1426 	}
1427 
1428 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
1429 		bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1430 			       MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1431 
1432 	return ret;
1433 }
1434 
1435 static int
1436 bnx2_test_and_disable_2g5(struct bnx2 *bp)
1437 {
1438 	u32 up1;
1439 	int ret = 0;
1440 
1441 	if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1442 		return 0;
1443 
1444 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
1445 		bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1446 
1447 	bnx2_read_phy(bp, bp->mii_up1, &up1);
1448 	if (up1 & BCM5708S_UP1_2G5) {
1449 		up1 &= ~BCM5708S_UP1_2G5;
1450 		bnx2_write_phy(bp, bp->mii_up1, up1);
1451 		ret = 1;
1452 	}
1453 
1454 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
1455 		bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1456 			       MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1457 
1458 	return ret;
1459 }
1460 
1461 static void
1462 bnx2_enable_forced_2g5(struct bnx2 *bp)
1463 {
1464 	u32 uninitialized_var(bmcr);
1465 	int err;
1466 
1467 	if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1468 		return;
1469 
1470 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
1471 		u32 val;
1472 
1473 		bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1474 			       MII_BNX2_BLK_ADDR_SERDES_DIG);
1475 		if (!bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val)) {
1476 			val &= ~MII_BNX2_SD_MISC1_FORCE_MSK;
1477 			val |= MII_BNX2_SD_MISC1_FORCE |
1478 				MII_BNX2_SD_MISC1_FORCE_2_5G;
1479 			bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1480 		}
1481 
1482 		bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1483 			       MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1484 		err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1485 
1486 	} else if (BNX2_CHIP(bp) == BNX2_CHIP_5708) {
1487 		err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1488 		if (!err)
1489 			bmcr |= BCM5708S_BMCR_FORCE_2500;
1490 	} else {
1491 		return;
1492 	}
1493 
1494 	if (err)
1495 		return;
1496 
1497 	if (bp->autoneg & AUTONEG_SPEED) {
1498 		bmcr &= ~BMCR_ANENABLE;
1499 		if (bp->req_duplex == DUPLEX_FULL)
1500 			bmcr |= BMCR_FULLDPLX;
1501 	}
1502 	bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1503 }
1504 
1505 static void
1506 bnx2_disable_forced_2g5(struct bnx2 *bp)
1507 {
1508 	u32 uninitialized_var(bmcr);
1509 	int err;
1510 
1511 	if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1512 		return;
1513 
1514 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
1515 		u32 val;
1516 
1517 		bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1518 			       MII_BNX2_BLK_ADDR_SERDES_DIG);
1519 		if (!bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val)) {
1520 			val &= ~MII_BNX2_SD_MISC1_FORCE;
1521 			bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1522 		}
1523 
1524 		bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1525 			       MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1526 		err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1527 
1528 	} else if (BNX2_CHIP(bp) == BNX2_CHIP_5708) {
1529 		err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1530 		if (!err)
1531 			bmcr &= ~BCM5708S_BMCR_FORCE_2500;
1532 	} else {
1533 		return;
1534 	}
1535 
1536 	if (err)
1537 		return;
1538 
1539 	if (bp->autoneg & AUTONEG_SPEED)
1540 		bmcr |= BMCR_SPEED1000 | BMCR_ANENABLE | BMCR_ANRESTART;
1541 	bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1542 }
1543 
1544 static void
1545 bnx2_5706s_force_link_dn(struct bnx2 *bp, int start)
1546 {
1547 	u32 val;
1548 
1549 	bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS, MII_EXPAND_SERDES_CTL);
1550 	bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
1551 	if (start)
1552 		bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val & 0xff0f);
1553 	else
1554 		bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val | 0xc0);
1555 }
1556 
1557 static int
1558 bnx2_set_link(struct bnx2 *bp)
1559 {
1560 	u32 bmsr;
1561 	u8 link_up;
1562 
1563 	if (bp->loopback == MAC_LOOPBACK || bp->loopback == PHY_LOOPBACK) {
1564 		bp->link_up = 1;
1565 		return 0;
1566 	}
1567 
1568 	if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
1569 		return 0;
1570 
1571 	link_up = bp->link_up;
1572 
1573 	bnx2_enable_bmsr1(bp);
1574 	bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1575 	bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1576 	bnx2_disable_bmsr1(bp);
1577 
1578 	if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1579 	    (BNX2_CHIP(bp) == BNX2_CHIP_5706)) {
1580 		u32 val, an_dbg;
1581 
1582 		if (bp->phy_flags & BNX2_PHY_FLAG_FORCED_DOWN) {
1583 			bnx2_5706s_force_link_dn(bp, 0);
1584 			bp->phy_flags &= ~BNX2_PHY_FLAG_FORCED_DOWN;
1585 		}
1586 		val = BNX2_RD(bp, BNX2_EMAC_STATUS);
1587 
1588 		bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
1589 		bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
1590 		bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
1591 
1592 		if ((val & BNX2_EMAC_STATUS_LINK) &&
1593 		    !(an_dbg & MISC_SHDW_AN_DBG_NOSYNC))
1594 			bmsr |= BMSR_LSTATUS;
1595 		else
1596 			bmsr &= ~BMSR_LSTATUS;
1597 	}
1598 
1599 	if (bmsr & BMSR_LSTATUS) {
1600 		bp->link_up = 1;
1601 
1602 		if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1603 			if (BNX2_CHIP(bp) == BNX2_CHIP_5706)
1604 				bnx2_5706s_linkup(bp);
1605 			else if (BNX2_CHIP(bp) == BNX2_CHIP_5708)
1606 				bnx2_5708s_linkup(bp);
1607 			else if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
1608 				bnx2_5709s_linkup(bp);
1609 		}
1610 		else {
1611 			bnx2_copper_linkup(bp);
1612 		}
1613 		bnx2_resolve_flow_ctrl(bp);
1614 	}
1615 	else {
1616 		if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1617 		    (bp->autoneg & AUTONEG_SPEED))
1618 			bnx2_disable_forced_2g5(bp);
1619 
1620 		if (bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT) {
1621 			u32 bmcr;
1622 
1623 			bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1624 			bmcr |= BMCR_ANENABLE;
1625 			bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1626 
1627 			bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
1628 		}
1629 		bp->link_up = 0;
1630 	}
1631 
1632 	if (bp->link_up != link_up) {
1633 		bnx2_report_link(bp);
1634 	}
1635 
1636 	bnx2_set_mac_link(bp);
1637 
1638 	return 0;
1639 }
1640 
1641 static int
1642 bnx2_reset_phy(struct bnx2 *bp)
1643 {
1644 	int i;
1645 	u32 reg;
1646 
1647         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_RESET);
1648 
1649 #define PHY_RESET_MAX_WAIT 100
1650 	for (i = 0; i < PHY_RESET_MAX_WAIT; i++) {
1651 		udelay(10);
1652 
1653 		bnx2_read_phy(bp, bp->mii_bmcr, &reg);
1654 		if (!(reg & BMCR_RESET)) {
1655 			udelay(20);
1656 			break;
1657 		}
1658 	}
1659 	if (i == PHY_RESET_MAX_WAIT) {
1660 		return -EBUSY;
1661 	}
1662 	return 0;
1663 }
1664 
1665 static u32
1666 bnx2_phy_get_pause_adv(struct bnx2 *bp)
1667 {
1668 	u32 adv = 0;
1669 
1670 	if ((bp->req_flow_ctrl & (FLOW_CTRL_RX | FLOW_CTRL_TX)) ==
1671 		(FLOW_CTRL_RX | FLOW_CTRL_TX)) {
1672 
1673 		if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1674 			adv = ADVERTISE_1000XPAUSE;
1675 		}
1676 		else {
1677 			adv = ADVERTISE_PAUSE_CAP;
1678 		}
1679 	}
1680 	else if (bp->req_flow_ctrl & FLOW_CTRL_TX) {
1681 		if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1682 			adv = ADVERTISE_1000XPSE_ASYM;
1683 		}
1684 		else {
1685 			adv = ADVERTISE_PAUSE_ASYM;
1686 		}
1687 	}
1688 	else if (bp->req_flow_ctrl & FLOW_CTRL_RX) {
1689 		if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1690 			adv = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1691 		}
1692 		else {
1693 			adv = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1694 		}
1695 	}
1696 	return adv;
1697 }
1698 
1699 static int bnx2_fw_sync(struct bnx2 *, u32, int, int);
1700 
1701 static int
1702 bnx2_setup_remote_phy(struct bnx2 *bp, u8 port)
1703 __releases(&bp->phy_lock)
1704 __acquires(&bp->phy_lock)
1705 {
1706 	u32 speed_arg = 0, pause_adv;
1707 
1708 	pause_adv = bnx2_phy_get_pause_adv(bp);
1709 
1710 	if (bp->autoneg & AUTONEG_SPEED) {
1711 		speed_arg |= BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG;
1712 		if (bp->advertising & ADVERTISED_10baseT_Half)
1713 			speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1714 		if (bp->advertising & ADVERTISED_10baseT_Full)
1715 			speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1716 		if (bp->advertising & ADVERTISED_100baseT_Half)
1717 			speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1718 		if (bp->advertising & ADVERTISED_100baseT_Full)
1719 			speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1720 		if (bp->advertising & ADVERTISED_1000baseT_Full)
1721 			speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1722 		if (bp->advertising & ADVERTISED_2500baseX_Full)
1723 			speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1724 	} else {
1725 		if (bp->req_line_speed == SPEED_2500)
1726 			speed_arg = BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1727 		else if (bp->req_line_speed == SPEED_1000)
1728 			speed_arg = BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1729 		else if (bp->req_line_speed == SPEED_100) {
1730 			if (bp->req_duplex == DUPLEX_FULL)
1731 				speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1732 			else
1733 				speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1734 		} else if (bp->req_line_speed == SPEED_10) {
1735 			if (bp->req_duplex == DUPLEX_FULL)
1736 				speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1737 			else
1738 				speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1739 		}
1740 	}
1741 
1742 	if (pause_adv & (ADVERTISE_1000XPAUSE | ADVERTISE_PAUSE_CAP))
1743 		speed_arg |= BNX2_NETLINK_SET_LINK_FC_SYM_PAUSE;
1744 	if (pause_adv & (ADVERTISE_1000XPSE_ASYM | ADVERTISE_PAUSE_ASYM))
1745 		speed_arg |= BNX2_NETLINK_SET_LINK_FC_ASYM_PAUSE;
1746 
1747 	if (port == PORT_TP)
1748 		speed_arg |= BNX2_NETLINK_SET_LINK_PHY_APP_REMOTE |
1749 			     BNX2_NETLINK_SET_LINK_ETH_AT_WIRESPEED;
1750 
1751 	bnx2_shmem_wr(bp, BNX2_DRV_MB_ARG0, speed_arg);
1752 
1753 	spin_unlock_bh(&bp->phy_lock);
1754 	bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_CMD_SET_LINK, 1, 0);
1755 	spin_lock_bh(&bp->phy_lock);
1756 
1757 	return 0;
1758 }
1759 
1760 static int
1761 bnx2_setup_serdes_phy(struct bnx2 *bp, u8 port)
1762 __releases(&bp->phy_lock)
1763 __acquires(&bp->phy_lock)
1764 {
1765 	u32 adv, bmcr;
1766 	u32 new_adv = 0;
1767 
1768 	if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
1769 		return bnx2_setup_remote_phy(bp, port);
1770 
1771 	if (!(bp->autoneg & AUTONEG_SPEED)) {
1772 		u32 new_bmcr;
1773 		int force_link_down = 0;
1774 
1775 		if (bp->req_line_speed == SPEED_2500) {
1776 			if (!bnx2_test_and_enable_2g5(bp))
1777 				force_link_down = 1;
1778 		} else if (bp->req_line_speed == SPEED_1000) {
1779 			if (bnx2_test_and_disable_2g5(bp))
1780 				force_link_down = 1;
1781 		}
1782 		bnx2_read_phy(bp, bp->mii_adv, &adv);
1783 		adv &= ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF);
1784 
1785 		bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1786 		new_bmcr = bmcr & ~BMCR_ANENABLE;
1787 		new_bmcr |= BMCR_SPEED1000;
1788 
1789 		if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
1790 			if (bp->req_line_speed == SPEED_2500)
1791 				bnx2_enable_forced_2g5(bp);
1792 			else if (bp->req_line_speed == SPEED_1000) {
1793 				bnx2_disable_forced_2g5(bp);
1794 				new_bmcr &= ~0x2000;
1795 			}
1796 
1797 		} else if (BNX2_CHIP(bp) == BNX2_CHIP_5708) {
1798 			if (bp->req_line_speed == SPEED_2500)
1799 				new_bmcr |= BCM5708S_BMCR_FORCE_2500;
1800 			else
1801 				new_bmcr = bmcr & ~BCM5708S_BMCR_FORCE_2500;
1802 		}
1803 
1804 		if (bp->req_duplex == DUPLEX_FULL) {
1805 			adv |= ADVERTISE_1000XFULL;
1806 			new_bmcr |= BMCR_FULLDPLX;
1807 		}
1808 		else {
1809 			adv |= ADVERTISE_1000XHALF;
1810 			new_bmcr &= ~BMCR_FULLDPLX;
1811 		}
1812 		if ((new_bmcr != bmcr) || (force_link_down)) {
1813 			/* Force a link down visible on the other side */
1814 			if (bp->link_up) {
1815 				bnx2_write_phy(bp, bp->mii_adv, adv &
1816 					       ~(ADVERTISE_1000XFULL |
1817 						 ADVERTISE_1000XHALF));
1818 				bnx2_write_phy(bp, bp->mii_bmcr, bmcr |
1819 					BMCR_ANRESTART | BMCR_ANENABLE);
1820 
1821 				bp->link_up = 0;
1822 				netif_carrier_off(bp->dev);
1823 				bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1824 				bnx2_report_link(bp);
1825 			}
1826 			bnx2_write_phy(bp, bp->mii_adv, adv);
1827 			bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1828 		} else {
1829 			bnx2_resolve_flow_ctrl(bp);
1830 			bnx2_set_mac_link(bp);
1831 		}
1832 		return 0;
1833 	}
1834 
1835 	bnx2_test_and_enable_2g5(bp);
1836 
1837 	if (bp->advertising & ADVERTISED_1000baseT_Full)
1838 		new_adv |= ADVERTISE_1000XFULL;
1839 
1840 	new_adv |= bnx2_phy_get_pause_adv(bp);
1841 
1842 	bnx2_read_phy(bp, bp->mii_adv, &adv);
1843 	bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1844 
1845 	bp->serdes_an_pending = 0;
1846 	if ((adv != new_adv) || ((bmcr & BMCR_ANENABLE) == 0)) {
1847 		/* Force a link down visible on the other side */
1848 		if (bp->link_up) {
1849 			bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
1850 			spin_unlock_bh(&bp->phy_lock);
1851 			msleep(20);
1852 			spin_lock_bh(&bp->phy_lock);
1853 		}
1854 
1855 		bnx2_write_phy(bp, bp->mii_adv, new_adv);
1856 		bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART |
1857 			BMCR_ANENABLE);
1858 		/* Speed up link-up time when the link partner
1859 		 * does not autonegotiate which is very common
1860 		 * in blade servers. Some blade servers use
1861 		 * IPMI for kerboard input and it's important
1862 		 * to minimize link disruptions. Autoneg. involves
1863 		 * exchanging base pages plus 3 next pages and
1864 		 * normally completes in about 120 msec.
1865 		 */
1866 		bp->current_interval = BNX2_SERDES_AN_TIMEOUT;
1867 		bp->serdes_an_pending = 1;
1868 		mod_timer(&bp->timer, jiffies + bp->current_interval);
1869 	} else {
1870 		bnx2_resolve_flow_ctrl(bp);
1871 		bnx2_set_mac_link(bp);
1872 	}
1873 
1874 	return 0;
1875 }
1876 
1877 #define ETHTOOL_ALL_FIBRE_SPEED						\
1878 	(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) ?			\
1879 		(ADVERTISED_2500baseX_Full | ADVERTISED_1000baseT_Full) :\
1880 		(ADVERTISED_1000baseT_Full)
1881 
1882 #define ETHTOOL_ALL_COPPER_SPEED					\
1883 	(ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |		\
1884 	ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |		\
1885 	ADVERTISED_1000baseT_Full)
1886 
1887 #define PHY_ALL_10_100_SPEED (ADVERTISE_10HALF | ADVERTISE_10FULL | \
1888 	ADVERTISE_100HALF | ADVERTISE_100FULL | ADVERTISE_CSMA)
1889 
1890 #define PHY_ALL_1000_SPEED (ADVERTISE_1000HALF | ADVERTISE_1000FULL)
1891 
1892 static void
1893 bnx2_set_default_remote_link(struct bnx2 *bp)
1894 {
1895 	u32 link;
1896 
1897 	if (bp->phy_port == PORT_TP)
1898 		link = bnx2_shmem_rd(bp, BNX2_RPHY_COPPER_LINK);
1899 	else
1900 		link = bnx2_shmem_rd(bp, BNX2_RPHY_SERDES_LINK);
1901 
1902 	if (link & BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG) {
1903 		bp->req_line_speed = 0;
1904 		bp->autoneg |= AUTONEG_SPEED;
1905 		bp->advertising = ADVERTISED_Autoneg;
1906 		if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1907 			bp->advertising |= ADVERTISED_10baseT_Half;
1908 		if (link & BNX2_NETLINK_SET_LINK_SPEED_10FULL)
1909 			bp->advertising |= ADVERTISED_10baseT_Full;
1910 		if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1911 			bp->advertising |= ADVERTISED_100baseT_Half;
1912 		if (link & BNX2_NETLINK_SET_LINK_SPEED_100FULL)
1913 			bp->advertising |= ADVERTISED_100baseT_Full;
1914 		if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1915 			bp->advertising |= ADVERTISED_1000baseT_Full;
1916 		if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1917 			bp->advertising |= ADVERTISED_2500baseX_Full;
1918 	} else {
1919 		bp->autoneg = 0;
1920 		bp->advertising = 0;
1921 		bp->req_duplex = DUPLEX_FULL;
1922 		if (link & BNX2_NETLINK_SET_LINK_SPEED_10) {
1923 			bp->req_line_speed = SPEED_10;
1924 			if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1925 				bp->req_duplex = DUPLEX_HALF;
1926 		}
1927 		if (link & BNX2_NETLINK_SET_LINK_SPEED_100) {
1928 			bp->req_line_speed = SPEED_100;
1929 			if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1930 				bp->req_duplex = DUPLEX_HALF;
1931 		}
1932 		if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1933 			bp->req_line_speed = SPEED_1000;
1934 		if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1935 			bp->req_line_speed = SPEED_2500;
1936 	}
1937 }
1938 
1939 static void
1940 bnx2_set_default_link(struct bnx2 *bp)
1941 {
1942 	if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
1943 		bnx2_set_default_remote_link(bp);
1944 		return;
1945 	}
1946 
1947 	bp->autoneg = AUTONEG_SPEED | AUTONEG_FLOW_CTRL;
1948 	bp->req_line_speed = 0;
1949 	if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1950 		u32 reg;
1951 
1952 		bp->advertising = ETHTOOL_ALL_FIBRE_SPEED | ADVERTISED_Autoneg;
1953 
1954 		reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_CONFIG);
1955 		reg &= BNX2_PORT_HW_CFG_CFG_DFLT_LINK_MASK;
1956 		if (reg == BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G) {
1957 			bp->autoneg = 0;
1958 			bp->req_line_speed = bp->line_speed = SPEED_1000;
1959 			bp->req_duplex = DUPLEX_FULL;
1960 		}
1961 	} else
1962 		bp->advertising = ETHTOOL_ALL_COPPER_SPEED | ADVERTISED_Autoneg;
1963 }
1964 
1965 static void
1966 bnx2_send_heart_beat(struct bnx2 *bp)
1967 {
1968 	u32 msg;
1969 	u32 addr;
1970 
1971 	spin_lock(&bp->indirect_lock);
1972 	msg = (u32) (++bp->fw_drv_pulse_wr_seq & BNX2_DRV_PULSE_SEQ_MASK);
1973 	addr = bp->shmem_base + BNX2_DRV_PULSE_MB;
1974 	BNX2_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, addr);
1975 	BNX2_WR(bp, BNX2_PCICFG_REG_WINDOW, msg);
1976 	spin_unlock(&bp->indirect_lock);
1977 }
1978 
1979 static void
1980 bnx2_remote_phy_event(struct bnx2 *bp)
1981 {
1982 	u32 msg;
1983 	u8 link_up = bp->link_up;
1984 	u8 old_port;
1985 
1986 	msg = bnx2_shmem_rd(bp, BNX2_LINK_STATUS);
1987 
1988 	if (msg & BNX2_LINK_STATUS_HEART_BEAT_EXPIRED)
1989 		bnx2_send_heart_beat(bp);
1990 
1991 	msg &= ~BNX2_LINK_STATUS_HEART_BEAT_EXPIRED;
1992 
1993 	if ((msg & BNX2_LINK_STATUS_LINK_UP) == BNX2_LINK_STATUS_LINK_DOWN)
1994 		bp->link_up = 0;
1995 	else {
1996 		u32 speed;
1997 
1998 		bp->link_up = 1;
1999 		speed = msg & BNX2_LINK_STATUS_SPEED_MASK;
2000 		bp->duplex = DUPLEX_FULL;
2001 		switch (speed) {
2002 			case BNX2_LINK_STATUS_10HALF:
2003 				bp->duplex = DUPLEX_HALF;
2004 				/* fall through */
2005 			case BNX2_LINK_STATUS_10FULL:
2006 				bp->line_speed = SPEED_10;
2007 				break;
2008 			case BNX2_LINK_STATUS_100HALF:
2009 				bp->duplex = DUPLEX_HALF;
2010 				/* fall through */
2011 			case BNX2_LINK_STATUS_100BASE_T4:
2012 			case BNX2_LINK_STATUS_100FULL:
2013 				bp->line_speed = SPEED_100;
2014 				break;
2015 			case BNX2_LINK_STATUS_1000HALF:
2016 				bp->duplex = DUPLEX_HALF;
2017 				/* fall through */
2018 			case BNX2_LINK_STATUS_1000FULL:
2019 				bp->line_speed = SPEED_1000;
2020 				break;
2021 			case BNX2_LINK_STATUS_2500HALF:
2022 				bp->duplex = DUPLEX_HALF;
2023 				/* fall through */
2024 			case BNX2_LINK_STATUS_2500FULL:
2025 				bp->line_speed = SPEED_2500;
2026 				break;
2027 			default:
2028 				bp->line_speed = 0;
2029 				break;
2030 		}
2031 
2032 		bp->flow_ctrl = 0;
2033 		if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
2034 		    (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
2035 			if (bp->duplex == DUPLEX_FULL)
2036 				bp->flow_ctrl = bp->req_flow_ctrl;
2037 		} else {
2038 			if (msg & BNX2_LINK_STATUS_TX_FC_ENABLED)
2039 				bp->flow_ctrl |= FLOW_CTRL_TX;
2040 			if (msg & BNX2_LINK_STATUS_RX_FC_ENABLED)
2041 				bp->flow_ctrl |= FLOW_CTRL_RX;
2042 		}
2043 
2044 		old_port = bp->phy_port;
2045 		if (msg & BNX2_LINK_STATUS_SERDES_LINK)
2046 			bp->phy_port = PORT_FIBRE;
2047 		else
2048 			bp->phy_port = PORT_TP;
2049 
2050 		if (old_port != bp->phy_port)
2051 			bnx2_set_default_link(bp);
2052 
2053 	}
2054 	if (bp->link_up != link_up)
2055 		bnx2_report_link(bp);
2056 
2057 	bnx2_set_mac_link(bp);
2058 }
2059 
2060 static int
2061 bnx2_set_remote_link(struct bnx2 *bp)
2062 {
2063 	u32 evt_code;
2064 
2065 	evt_code = bnx2_shmem_rd(bp, BNX2_FW_EVT_CODE_MB);
2066 	switch (evt_code) {
2067 		case BNX2_FW_EVT_CODE_LINK_EVENT:
2068 			bnx2_remote_phy_event(bp);
2069 			break;
2070 		case BNX2_FW_EVT_CODE_SW_TIMER_EXPIRATION_EVENT:
2071 		default:
2072 			bnx2_send_heart_beat(bp);
2073 			break;
2074 	}
2075 	return 0;
2076 }
2077 
2078 static int
2079 bnx2_setup_copper_phy(struct bnx2 *bp)
2080 __releases(&bp->phy_lock)
2081 __acquires(&bp->phy_lock)
2082 {
2083 	u32 bmcr, adv_reg, new_adv = 0;
2084 	u32 new_bmcr;
2085 
2086 	bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
2087 
2088 	bnx2_read_phy(bp, bp->mii_adv, &adv_reg);
2089 	adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP |
2090 		    ADVERTISE_PAUSE_ASYM);
2091 
2092 	new_adv = ADVERTISE_CSMA | ethtool_adv_to_mii_adv_t(bp->advertising);
2093 
2094 	if (bp->autoneg & AUTONEG_SPEED) {
2095 		u32 adv1000_reg;
2096 		u32 new_adv1000 = 0;
2097 
2098 		new_adv |= bnx2_phy_get_pause_adv(bp);
2099 
2100 		bnx2_read_phy(bp, MII_CTRL1000, &adv1000_reg);
2101 		adv1000_reg &= PHY_ALL_1000_SPEED;
2102 
2103 		new_adv1000 |= ethtool_adv_to_mii_ctrl1000_t(bp->advertising);
2104 		if ((adv1000_reg != new_adv1000) ||
2105 			(adv_reg != new_adv) ||
2106 			((bmcr & BMCR_ANENABLE) == 0)) {
2107 
2108 			bnx2_write_phy(bp, bp->mii_adv, new_adv);
2109 			bnx2_write_phy(bp, MII_CTRL1000, new_adv1000);
2110 			bnx2_write_phy(bp, bp->mii_bmcr, BMCR_ANRESTART |
2111 				BMCR_ANENABLE);
2112 		}
2113 		else if (bp->link_up) {
2114 			/* Flow ctrl may have changed from auto to forced */
2115 			/* or vice-versa. */
2116 
2117 			bnx2_resolve_flow_ctrl(bp);
2118 			bnx2_set_mac_link(bp);
2119 		}
2120 		return 0;
2121 	}
2122 
2123 	/* advertise nothing when forcing speed */
2124 	if (adv_reg != new_adv)
2125 		bnx2_write_phy(bp, bp->mii_adv, new_adv);
2126 
2127 	new_bmcr = 0;
2128 	if (bp->req_line_speed == SPEED_100) {
2129 		new_bmcr |= BMCR_SPEED100;
2130 	}
2131 	if (bp->req_duplex == DUPLEX_FULL) {
2132 		new_bmcr |= BMCR_FULLDPLX;
2133 	}
2134 	if (new_bmcr != bmcr) {
2135 		u32 bmsr;
2136 
2137 		bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2138 		bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2139 
2140 		if (bmsr & BMSR_LSTATUS) {
2141 			/* Force link down */
2142 			bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
2143 			spin_unlock_bh(&bp->phy_lock);
2144 			msleep(50);
2145 			spin_lock_bh(&bp->phy_lock);
2146 
2147 			bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2148 			bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2149 		}
2150 
2151 		bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
2152 
2153 		/* Normally, the new speed is setup after the link has
2154 		 * gone down and up again. In some cases, link will not go
2155 		 * down so we need to set up the new speed here.
2156 		 */
2157 		if (bmsr & BMSR_LSTATUS) {
2158 			bp->line_speed = bp->req_line_speed;
2159 			bp->duplex = bp->req_duplex;
2160 			bnx2_resolve_flow_ctrl(bp);
2161 			bnx2_set_mac_link(bp);
2162 		}
2163 	} else {
2164 		bnx2_resolve_flow_ctrl(bp);
2165 		bnx2_set_mac_link(bp);
2166 	}
2167 	return 0;
2168 }
2169 
2170 static int
2171 bnx2_setup_phy(struct bnx2 *bp, u8 port)
2172 __releases(&bp->phy_lock)
2173 __acquires(&bp->phy_lock)
2174 {
2175 	if (bp->loopback == MAC_LOOPBACK)
2176 		return 0;
2177 
2178 	if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
2179 		return bnx2_setup_serdes_phy(bp, port);
2180 	}
2181 	else {
2182 		return bnx2_setup_copper_phy(bp);
2183 	}
2184 }
2185 
2186 static int
2187 bnx2_init_5709s_phy(struct bnx2 *bp, int reset_phy)
2188 {
2189 	u32 val;
2190 
2191 	bp->mii_bmcr = MII_BMCR + 0x10;
2192 	bp->mii_bmsr = MII_BMSR + 0x10;
2193 	bp->mii_bmsr1 = MII_BNX2_GP_TOP_AN_STATUS1;
2194 	bp->mii_adv = MII_ADVERTISE + 0x10;
2195 	bp->mii_lpa = MII_LPA + 0x10;
2196 	bp->mii_up1 = MII_BNX2_OVER1G_UP1;
2197 
2198 	bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_AER);
2199 	bnx2_write_phy(bp, MII_BNX2_AER_AER, MII_BNX2_AER_AER_AN_MMD);
2200 
2201 	bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
2202 	if (reset_phy)
2203 		bnx2_reset_phy(bp);
2204 
2205 	bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_SERDES_DIG);
2206 
2207 	bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, &val);
2208 	val &= ~MII_BNX2_SD_1000XCTL1_AUTODET;
2209 	val |= MII_BNX2_SD_1000XCTL1_FIBER;
2210 	bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, val);
2211 
2212 	bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
2213 	bnx2_read_phy(bp, MII_BNX2_OVER1G_UP1, &val);
2214 	if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)
2215 		val |= BCM5708S_UP1_2G5;
2216 	else
2217 		val &= ~BCM5708S_UP1_2G5;
2218 	bnx2_write_phy(bp, MII_BNX2_OVER1G_UP1, val);
2219 
2220 	bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_BAM_NXTPG);
2221 	bnx2_read_phy(bp, MII_BNX2_BAM_NXTPG_CTL, &val);
2222 	val |= MII_BNX2_NXTPG_CTL_T2 | MII_BNX2_NXTPG_CTL_BAM;
2223 	bnx2_write_phy(bp, MII_BNX2_BAM_NXTPG_CTL, val);
2224 
2225 	bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_CL73_USERB0);
2226 
2227 	val = MII_BNX2_CL73_BAM_EN | MII_BNX2_CL73_BAM_STA_MGR_EN |
2228 	      MII_BNX2_CL73_BAM_NP_AFT_BP_EN;
2229 	bnx2_write_phy(bp, MII_BNX2_CL73_BAM_CTL1, val);
2230 
2231 	bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
2232 
2233 	return 0;
2234 }
2235 
2236 static int
2237 bnx2_init_5708s_phy(struct bnx2 *bp, int reset_phy)
2238 {
2239 	u32 val;
2240 
2241 	if (reset_phy)
2242 		bnx2_reset_phy(bp);
2243 
2244 	bp->mii_up1 = BCM5708S_UP1;
2245 
2246 	bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG3);
2247 	bnx2_write_phy(bp, BCM5708S_DIG_3_0, BCM5708S_DIG_3_0_USE_IEEE);
2248 	bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
2249 
2250 	bnx2_read_phy(bp, BCM5708S_1000X_CTL1, &val);
2251 	val |= BCM5708S_1000X_CTL1_FIBER_MODE | BCM5708S_1000X_CTL1_AUTODET_EN;
2252 	bnx2_write_phy(bp, BCM5708S_1000X_CTL1, val);
2253 
2254 	bnx2_read_phy(bp, BCM5708S_1000X_CTL2, &val);
2255 	val |= BCM5708S_1000X_CTL2_PLLEL_DET_EN;
2256 	bnx2_write_phy(bp, BCM5708S_1000X_CTL2, val);
2257 
2258 	if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) {
2259 		bnx2_read_phy(bp, BCM5708S_UP1, &val);
2260 		val |= BCM5708S_UP1_2G5;
2261 		bnx2_write_phy(bp, BCM5708S_UP1, val);
2262 	}
2263 
2264 	if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_A0) ||
2265 	    (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_B0) ||
2266 	    (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_B1)) {
2267 		/* increase tx signal amplitude */
2268 		bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2269 			       BCM5708S_BLK_ADDR_TX_MISC);
2270 		bnx2_read_phy(bp, BCM5708S_TX_ACTL1, &val);
2271 		val &= ~BCM5708S_TX_ACTL1_DRIVER_VCM;
2272 		bnx2_write_phy(bp, BCM5708S_TX_ACTL1, val);
2273 		bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
2274 	}
2275 
2276 	val = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_CONFIG) &
2277 	      BNX2_PORT_HW_CFG_CFG_TXCTL3_MASK;
2278 
2279 	if (val) {
2280 		u32 is_backplane;
2281 
2282 		is_backplane = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG);
2283 		if (is_backplane & BNX2_SHARED_HW_CFG_PHY_BACKPLANE) {
2284 			bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2285 				       BCM5708S_BLK_ADDR_TX_MISC);
2286 			bnx2_write_phy(bp, BCM5708S_TX_ACTL3, val);
2287 			bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2288 				       BCM5708S_BLK_ADDR_DIG);
2289 		}
2290 	}
2291 	return 0;
2292 }
2293 
2294 static int
2295 bnx2_init_5706s_phy(struct bnx2 *bp, int reset_phy)
2296 {
2297 	if (reset_phy)
2298 		bnx2_reset_phy(bp);
2299 
2300 	bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
2301 
2302 	if (BNX2_CHIP(bp) == BNX2_CHIP_5706)
2303 		BNX2_WR(bp, BNX2_MISC_GP_HW_CTL0, 0x300);
2304 
2305 	if (bp->dev->mtu > ETH_DATA_LEN) {
2306 		u32 val;
2307 
2308 		/* Set extended packet length bit */
2309 		bnx2_write_phy(bp, 0x18, 0x7);
2310 		bnx2_read_phy(bp, 0x18, &val);
2311 		bnx2_write_phy(bp, 0x18, (val & 0xfff8) | 0x4000);
2312 
2313 		bnx2_write_phy(bp, 0x1c, 0x6c00);
2314 		bnx2_read_phy(bp, 0x1c, &val);
2315 		bnx2_write_phy(bp, 0x1c, (val & 0x3ff) | 0xec02);
2316 	}
2317 	else {
2318 		u32 val;
2319 
2320 		bnx2_write_phy(bp, 0x18, 0x7);
2321 		bnx2_read_phy(bp, 0x18, &val);
2322 		bnx2_write_phy(bp, 0x18, val & ~0x4007);
2323 
2324 		bnx2_write_phy(bp, 0x1c, 0x6c00);
2325 		bnx2_read_phy(bp, 0x1c, &val);
2326 		bnx2_write_phy(bp, 0x1c, (val & 0x3fd) | 0xec00);
2327 	}
2328 
2329 	return 0;
2330 }
2331 
2332 static int
2333 bnx2_init_copper_phy(struct bnx2 *bp, int reset_phy)
2334 {
2335 	u32 val;
2336 
2337 	if (reset_phy)
2338 		bnx2_reset_phy(bp);
2339 
2340 	if (bp->phy_flags & BNX2_PHY_FLAG_CRC_FIX) {
2341 		bnx2_write_phy(bp, 0x18, 0x0c00);
2342 		bnx2_write_phy(bp, 0x17, 0x000a);
2343 		bnx2_write_phy(bp, 0x15, 0x310b);
2344 		bnx2_write_phy(bp, 0x17, 0x201f);
2345 		bnx2_write_phy(bp, 0x15, 0x9506);
2346 		bnx2_write_phy(bp, 0x17, 0x401f);
2347 		bnx2_write_phy(bp, 0x15, 0x14e2);
2348 		bnx2_write_phy(bp, 0x18, 0x0400);
2349 	}
2350 
2351 	if (bp->phy_flags & BNX2_PHY_FLAG_DIS_EARLY_DAC) {
2352 		bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS,
2353 			       MII_BNX2_DSP_EXPAND_REG | 0x8);
2354 		bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
2355 		val &= ~(1 << 8);
2356 		bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val);
2357 	}
2358 
2359 	if (bp->dev->mtu > ETH_DATA_LEN) {
2360 		/* Set extended packet length bit */
2361 		bnx2_write_phy(bp, 0x18, 0x7);
2362 		bnx2_read_phy(bp, 0x18, &val);
2363 		bnx2_write_phy(bp, 0x18, val | 0x4000);
2364 
2365 		bnx2_read_phy(bp, 0x10, &val);
2366 		bnx2_write_phy(bp, 0x10, val | 0x1);
2367 	}
2368 	else {
2369 		bnx2_write_phy(bp, 0x18, 0x7);
2370 		bnx2_read_phy(bp, 0x18, &val);
2371 		bnx2_write_phy(bp, 0x18, val & ~0x4007);
2372 
2373 		bnx2_read_phy(bp, 0x10, &val);
2374 		bnx2_write_phy(bp, 0x10, val & ~0x1);
2375 	}
2376 
2377 	/* ethernet@wirespeed */
2378 	bnx2_write_phy(bp, MII_BNX2_AUX_CTL, AUX_CTL_MISC_CTL);
2379 	bnx2_read_phy(bp, MII_BNX2_AUX_CTL, &val);
2380 	val |=  AUX_CTL_MISC_CTL_WR | AUX_CTL_MISC_CTL_WIRESPEED;
2381 
2382 	/* auto-mdix */
2383 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
2384 		val |=  AUX_CTL_MISC_CTL_AUTOMDIX;
2385 
2386 	bnx2_write_phy(bp, MII_BNX2_AUX_CTL, val);
2387 	return 0;
2388 }
2389 
2390 
2391 static int
2392 bnx2_init_phy(struct bnx2 *bp, int reset_phy)
2393 __releases(&bp->phy_lock)
2394 __acquires(&bp->phy_lock)
2395 {
2396 	u32 val;
2397 	int rc = 0;
2398 
2399 	bp->phy_flags &= ~BNX2_PHY_FLAG_INT_MODE_MASK;
2400 	bp->phy_flags |= BNX2_PHY_FLAG_INT_MODE_LINK_READY;
2401 
2402 	bp->mii_bmcr = MII_BMCR;
2403 	bp->mii_bmsr = MII_BMSR;
2404 	bp->mii_bmsr1 = MII_BMSR;
2405 	bp->mii_adv = MII_ADVERTISE;
2406 	bp->mii_lpa = MII_LPA;
2407 
2408 	BNX2_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
2409 
2410 	if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
2411 		goto setup_phy;
2412 
2413 	bnx2_read_phy(bp, MII_PHYSID1, &val);
2414 	bp->phy_id = val << 16;
2415 	bnx2_read_phy(bp, MII_PHYSID2, &val);
2416 	bp->phy_id |= val & 0xffff;
2417 
2418 	if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
2419 		if (BNX2_CHIP(bp) == BNX2_CHIP_5706)
2420 			rc = bnx2_init_5706s_phy(bp, reset_phy);
2421 		else if (BNX2_CHIP(bp) == BNX2_CHIP_5708)
2422 			rc = bnx2_init_5708s_phy(bp, reset_phy);
2423 		else if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
2424 			rc = bnx2_init_5709s_phy(bp, reset_phy);
2425 	}
2426 	else {
2427 		rc = bnx2_init_copper_phy(bp, reset_phy);
2428 	}
2429 
2430 setup_phy:
2431 	if (!rc)
2432 		rc = bnx2_setup_phy(bp, bp->phy_port);
2433 
2434 	return rc;
2435 }
2436 
2437 static int
2438 bnx2_set_mac_loopback(struct bnx2 *bp)
2439 {
2440 	u32 mac_mode;
2441 
2442 	mac_mode = BNX2_RD(bp, BNX2_EMAC_MODE);
2443 	mac_mode &= ~BNX2_EMAC_MODE_PORT;
2444 	mac_mode |= BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK;
2445 	BNX2_WR(bp, BNX2_EMAC_MODE, mac_mode);
2446 	bp->link_up = 1;
2447 	return 0;
2448 }
2449 
2450 static int bnx2_test_link(struct bnx2 *);
2451 
2452 static int
2453 bnx2_set_phy_loopback(struct bnx2 *bp)
2454 {
2455 	u32 mac_mode;
2456 	int rc, i;
2457 
2458 	spin_lock_bh(&bp->phy_lock);
2459 	rc = bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK | BMCR_FULLDPLX |
2460 			    BMCR_SPEED1000);
2461 	spin_unlock_bh(&bp->phy_lock);
2462 	if (rc)
2463 		return rc;
2464 
2465 	for (i = 0; i < 10; i++) {
2466 		if (bnx2_test_link(bp) == 0)
2467 			break;
2468 		msleep(100);
2469 	}
2470 
2471 	mac_mode = BNX2_RD(bp, BNX2_EMAC_MODE);
2472 	mac_mode &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
2473 		      BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
2474 		      BNX2_EMAC_MODE_25G_MODE);
2475 
2476 	mac_mode |= BNX2_EMAC_MODE_PORT_GMII;
2477 	BNX2_WR(bp, BNX2_EMAC_MODE, mac_mode);
2478 	bp->link_up = 1;
2479 	return 0;
2480 }
2481 
2482 static void
2483 bnx2_dump_mcp_state(struct bnx2 *bp)
2484 {
2485 	struct net_device *dev = bp->dev;
2486 	u32 mcp_p0, mcp_p1;
2487 
2488 	netdev_err(dev, "<--- start MCP states dump --->\n");
2489 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
2490 		mcp_p0 = BNX2_MCP_STATE_P0;
2491 		mcp_p1 = BNX2_MCP_STATE_P1;
2492 	} else {
2493 		mcp_p0 = BNX2_MCP_STATE_P0_5708;
2494 		mcp_p1 = BNX2_MCP_STATE_P1_5708;
2495 	}
2496 	netdev_err(dev, "DEBUG: MCP_STATE_P0[%08x] MCP_STATE_P1[%08x]\n",
2497 		   bnx2_reg_rd_ind(bp, mcp_p0), bnx2_reg_rd_ind(bp, mcp_p1));
2498 	netdev_err(dev, "DEBUG: MCP mode[%08x] state[%08x] evt_mask[%08x]\n",
2499 		   bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_MODE),
2500 		   bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_STATE),
2501 		   bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_EVENT_MASK));
2502 	netdev_err(dev, "DEBUG: pc[%08x] pc[%08x] instr[%08x]\n",
2503 		   bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_PROGRAM_COUNTER),
2504 		   bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_PROGRAM_COUNTER),
2505 		   bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_INSTRUCTION));
2506 	netdev_err(dev, "DEBUG: shmem states:\n");
2507 	netdev_err(dev, "DEBUG: drv_mb[%08x] fw_mb[%08x] link_status[%08x]",
2508 		   bnx2_shmem_rd(bp, BNX2_DRV_MB),
2509 		   bnx2_shmem_rd(bp, BNX2_FW_MB),
2510 		   bnx2_shmem_rd(bp, BNX2_LINK_STATUS));
2511 	pr_cont(" drv_pulse_mb[%08x]\n", bnx2_shmem_rd(bp, BNX2_DRV_PULSE_MB));
2512 	netdev_err(dev, "DEBUG: dev_info_signature[%08x] reset_type[%08x]",
2513 		   bnx2_shmem_rd(bp, BNX2_DEV_INFO_SIGNATURE),
2514 		   bnx2_shmem_rd(bp, BNX2_BC_STATE_RESET_TYPE));
2515 	pr_cont(" condition[%08x]\n",
2516 		bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION));
2517 	DP_SHMEM_LINE(bp, BNX2_BC_RESET_TYPE);
2518 	DP_SHMEM_LINE(bp, 0x3cc);
2519 	DP_SHMEM_LINE(bp, 0x3dc);
2520 	DP_SHMEM_LINE(bp, 0x3ec);
2521 	netdev_err(dev, "DEBUG: 0x3fc[%08x]\n", bnx2_shmem_rd(bp, 0x3fc));
2522 	netdev_err(dev, "<--- end MCP states dump --->\n");
2523 }
2524 
2525 static int
2526 bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int ack, int silent)
2527 {
2528 	int i;
2529 	u32 val;
2530 
2531 	bp->fw_wr_seq++;
2532 	msg_data |= bp->fw_wr_seq;
2533 	bp->fw_last_msg = msg_data;
2534 
2535 	bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data);
2536 
2537 	if (!ack)
2538 		return 0;
2539 
2540 	/* wait for an acknowledgement. */
2541 	for (i = 0; i < (BNX2_FW_ACK_TIME_OUT_MS / 10); i++) {
2542 		msleep(10);
2543 
2544 		val = bnx2_shmem_rd(bp, BNX2_FW_MB);
2545 
2546 		if ((val & BNX2_FW_MSG_ACK) == (msg_data & BNX2_DRV_MSG_SEQ))
2547 			break;
2548 	}
2549 	if ((msg_data & BNX2_DRV_MSG_DATA) == BNX2_DRV_MSG_DATA_WAIT0)
2550 		return 0;
2551 
2552 	/* If we timed out, inform the firmware that this is the case. */
2553 	if ((val & BNX2_FW_MSG_ACK) != (msg_data & BNX2_DRV_MSG_SEQ)) {
2554 		msg_data &= ~BNX2_DRV_MSG_CODE;
2555 		msg_data |= BNX2_DRV_MSG_CODE_FW_TIMEOUT;
2556 
2557 		bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data);
2558 		if (!silent) {
2559 			pr_err("fw sync timeout, reset code = %x\n", msg_data);
2560 			bnx2_dump_mcp_state(bp);
2561 		}
2562 
2563 		return -EBUSY;
2564 	}
2565 
2566 	if ((val & BNX2_FW_MSG_STATUS_MASK) != BNX2_FW_MSG_STATUS_OK)
2567 		return -EIO;
2568 
2569 	return 0;
2570 }
2571 
2572 static int
2573 bnx2_init_5709_context(struct bnx2 *bp)
2574 {
2575 	int i, ret = 0;
2576 	u32 val;
2577 
2578 	val = BNX2_CTX_COMMAND_ENABLED | BNX2_CTX_COMMAND_MEM_INIT | (1 << 12);
2579 	val |= (BNX2_PAGE_BITS - 8) << 16;
2580 	BNX2_WR(bp, BNX2_CTX_COMMAND, val);
2581 	for (i = 0; i < 10; i++) {
2582 		val = BNX2_RD(bp, BNX2_CTX_COMMAND);
2583 		if (!(val & BNX2_CTX_COMMAND_MEM_INIT))
2584 			break;
2585 		udelay(2);
2586 	}
2587 	if (val & BNX2_CTX_COMMAND_MEM_INIT)
2588 		return -EBUSY;
2589 
2590 	for (i = 0; i < bp->ctx_pages; i++) {
2591 		int j;
2592 
2593 		if (bp->ctx_blk[i])
2594 			memset(bp->ctx_blk[i], 0, BNX2_PAGE_SIZE);
2595 		else
2596 			return -ENOMEM;
2597 
2598 		BNX2_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA0,
2599 			(bp->ctx_blk_mapping[i] & 0xffffffff) |
2600 			BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID);
2601 		BNX2_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA1,
2602 			(u64) bp->ctx_blk_mapping[i] >> 32);
2603 		BNX2_WR(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL, i |
2604 			BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
2605 		for (j = 0; j < 10; j++) {
2606 
2607 			val = BNX2_RD(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL);
2608 			if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ))
2609 				break;
2610 			udelay(5);
2611 		}
2612 		if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) {
2613 			ret = -EBUSY;
2614 			break;
2615 		}
2616 	}
2617 	return ret;
2618 }
2619 
2620 static void
2621 bnx2_init_context(struct bnx2 *bp)
2622 {
2623 	u32 vcid;
2624 
2625 	vcid = 96;
2626 	while (vcid) {
2627 		u32 vcid_addr, pcid_addr, offset;
2628 		int i;
2629 
2630 		vcid--;
2631 
2632 		if (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) {
2633 			u32 new_vcid;
2634 
2635 			vcid_addr = GET_PCID_ADDR(vcid);
2636 			if (vcid & 0x8) {
2637 				new_vcid = 0x60 + (vcid & 0xf0) + (vcid & 0x7);
2638 			}
2639 			else {
2640 				new_vcid = vcid;
2641 			}
2642 			pcid_addr = GET_PCID_ADDR(new_vcid);
2643 		}
2644 		else {
2645 	    		vcid_addr = GET_CID_ADDR(vcid);
2646 			pcid_addr = vcid_addr;
2647 		}
2648 
2649 		for (i = 0; i < (CTX_SIZE / PHY_CTX_SIZE); i++) {
2650 			vcid_addr += (i << PHY_CTX_SHIFT);
2651 			pcid_addr += (i << PHY_CTX_SHIFT);
2652 
2653 			BNX2_WR(bp, BNX2_CTX_VIRT_ADDR, vcid_addr);
2654 			BNX2_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
2655 
2656 			/* Zero out the context. */
2657 			for (offset = 0; offset < PHY_CTX_SIZE; offset += 4)
2658 				bnx2_ctx_wr(bp, vcid_addr, offset, 0);
2659 		}
2660 	}
2661 }
2662 
2663 static int
2664 bnx2_alloc_bad_rbuf(struct bnx2 *bp)
2665 {
2666 	u16 *good_mbuf;
2667 	u32 good_mbuf_cnt;
2668 	u32 val;
2669 
2670 	good_mbuf = kmalloc_array(512, sizeof(u16), GFP_KERNEL);
2671 	if (!good_mbuf)
2672 		return -ENOMEM;
2673 
2674 	BNX2_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
2675 		BNX2_MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE);
2676 
2677 	good_mbuf_cnt = 0;
2678 
2679 	/* Allocate a bunch of mbufs and save the good ones in an array. */
2680 	val = bnx2_reg_rd_ind(bp, BNX2_RBUF_STATUS1);
2681 	while (val & BNX2_RBUF_STATUS1_FREE_COUNT) {
2682 		bnx2_reg_wr_ind(bp, BNX2_RBUF_COMMAND,
2683 				BNX2_RBUF_COMMAND_ALLOC_REQ);
2684 
2685 		val = bnx2_reg_rd_ind(bp, BNX2_RBUF_FW_BUF_ALLOC);
2686 
2687 		val &= BNX2_RBUF_FW_BUF_ALLOC_VALUE;
2688 
2689 		/* The addresses with Bit 9 set are bad memory blocks. */
2690 		if (!(val & (1 << 9))) {
2691 			good_mbuf[good_mbuf_cnt] = (u16) val;
2692 			good_mbuf_cnt++;
2693 		}
2694 
2695 		val = bnx2_reg_rd_ind(bp, BNX2_RBUF_STATUS1);
2696 	}
2697 
2698 	/* Free the good ones back to the mbuf pool thus discarding
2699 	 * all the bad ones. */
2700 	while (good_mbuf_cnt) {
2701 		good_mbuf_cnt--;
2702 
2703 		val = good_mbuf[good_mbuf_cnt];
2704 		val = (val << 9) | val | 1;
2705 
2706 		bnx2_reg_wr_ind(bp, BNX2_RBUF_FW_BUF_FREE, val);
2707 	}
2708 	kfree(good_mbuf);
2709 	return 0;
2710 }
2711 
2712 static void
2713 bnx2_set_mac_addr(struct bnx2 *bp, u8 *mac_addr, u32 pos)
2714 {
2715 	u32 val;
2716 
2717 	val = (mac_addr[0] << 8) | mac_addr[1];
2718 
2719 	BNX2_WR(bp, BNX2_EMAC_MAC_MATCH0 + (pos * 8), val);
2720 
2721 	val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
2722 		(mac_addr[4] << 8) | mac_addr[5];
2723 
2724 	BNX2_WR(bp, BNX2_EMAC_MAC_MATCH1 + (pos * 8), val);
2725 }
2726 
2727 static inline int
2728 bnx2_alloc_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index, gfp_t gfp)
2729 {
2730 	dma_addr_t mapping;
2731 	struct bnx2_sw_pg *rx_pg = &rxr->rx_pg_ring[index];
2732 	struct bnx2_rx_bd *rxbd =
2733 		&rxr->rx_pg_desc_ring[BNX2_RX_RING(index)][BNX2_RX_IDX(index)];
2734 	struct page *page = alloc_page(gfp);
2735 
2736 	if (!page)
2737 		return -ENOMEM;
2738 	mapping = dma_map_page(&bp->pdev->dev, page, 0, PAGE_SIZE,
2739 			       PCI_DMA_FROMDEVICE);
2740 	if (dma_mapping_error(&bp->pdev->dev, mapping)) {
2741 		__free_page(page);
2742 		return -EIO;
2743 	}
2744 
2745 	rx_pg->page = page;
2746 	dma_unmap_addr_set(rx_pg, mapping, mapping);
2747 	rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2748 	rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2749 	return 0;
2750 }
2751 
2752 static void
2753 bnx2_free_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
2754 {
2755 	struct bnx2_sw_pg *rx_pg = &rxr->rx_pg_ring[index];
2756 	struct page *page = rx_pg->page;
2757 
2758 	if (!page)
2759 		return;
2760 
2761 	dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(rx_pg, mapping),
2762 		       PAGE_SIZE, PCI_DMA_FROMDEVICE);
2763 
2764 	__free_page(page);
2765 	rx_pg->page = NULL;
2766 }
2767 
2768 static inline int
2769 bnx2_alloc_rx_data(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index, gfp_t gfp)
2770 {
2771 	u8 *data;
2772 	struct bnx2_sw_bd *rx_buf = &rxr->rx_buf_ring[index];
2773 	dma_addr_t mapping;
2774 	struct bnx2_rx_bd *rxbd =
2775 		&rxr->rx_desc_ring[BNX2_RX_RING(index)][BNX2_RX_IDX(index)];
2776 
2777 	data = kmalloc(bp->rx_buf_size, gfp);
2778 	if (!data)
2779 		return -ENOMEM;
2780 
2781 	mapping = dma_map_single(&bp->pdev->dev,
2782 				 get_l2_fhdr(data),
2783 				 bp->rx_buf_use_size,
2784 				 PCI_DMA_FROMDEVICE);
2785 	if (dma_mapping_error(&bp->pdev->dev, mapping)) {
2786 		kfree(data);
2787 		return -EIO;
2788 	}
2789 
2790 	rx_buf->data = data;
2791 	dma_unmap_addr_set(rx_buf, mapping, mapping);
2792 
2793 	rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2794 	rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2795 
2796 	rxr->rx_prod_bseq += bp->rx_buf_use_size;
2797 
2798 	return 0;
2799 }
2800 
2801 static int
2802 bnx2_phy_event_is_set(struct bnx2 *bp, struct bnx2_napi *bnapi, u32 event)
2803 {
2804 	struct status_block *sblk = bnapi->status_blk.msi;
2805 	u32 new_link_state, old_link_state;
2806 	int is_set = 1;
2807 
2808 	new_link_state = sblk->status_attn_bits & event;
2809 	old_link_state = sblk->status_attn_bits_ack & event;
2810 	if (new_link_state != old_link_state) {
2811 		if (new_link_state)
2812 			BNX2_WR(bp, BNX2_PCICFG_STATUS_BIT_SET_CMD, event);
2813 		else
2814 			BNX2_WR(bp, BNX2_PCICFG_STATUS_BIT_CLEAR_CMD, event);
2815 	} else
2816 		is_set = 0;
2817 
2818 	return is_set;
2819 }
2820 
2821 static void
2822 bnx2_phy_int(struct bnx2 *bp, struct bnx2_napi *bnapi)
2823 {
2824 	spin_lock(&bp->phy_lock);
2825 
2826 	if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_LINK_STATE))
2827 		bnx2_set_link(bp);
2828 	if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_TIMER_ABORT))
2829 		bnx2_set_remote_link(bp);
2830 
2831 	spin_unlock(&bp->phy_lock);
2832 
2833 }
2834 
2835 static inline u16
2836 bnx2_get_hw_tx_cons(struct bnx2_napi *bnapi)
2837 {
2838 	u16 cons;
2839 
2840 	cons = READ_ONCE(*bnapi->hw_tx_cons_ptr);
2841 
2842 	if (unlikely((cons & BNX2_MAX_TX_DESC_CNT) == BNX2_MAX_TX_DESC_CNT))
2843 		cons++;
2844 	return cons;
2845 }
2846 
2847 static int
2848 bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
2849 {
2850 	struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
2851 	u16 hw_cons, sw_cons, sw_ring_cons;
2852 	int tx_pkt = 0, index;
2853 	unsigned int tx_bytes = 0;
2854 	struct netdev_queue *txq;
2855 
2856 	index = (bnapi - bp->bnx2_napi);
2857 	txq = netdev_get_tx_queue(bp->dev, index);
2858 
2859 	hw_cons = bnx2_get_hw_tx_cons(bnapi);
2860 	sw_cons = txr->tx_cons;
2861 
2862 	while (sw_cons != hw_cons) {
2863 		struct bnx2_sw_tx_bd *tx_buf;
2864 		struct sk_buff *skb;
2865 		int i, last;
2866 
2867 		sw_ring_cons = BNX2_TX_RING_IDX(sw_cons);
2868 
2869 		tx_buf = &txr->tx_buf_ring[sw_ring_cons];
2870 		skb = tx_buf->skb;
2871 
2872 		/* prefetch skb_end_pointer() to speedup skb_shinfo(skb) */
2873 		prefetch(&skb->end);
2874 
2875 		/* partial BD completions possible with TSO packets */
2876 		if (tx_buf->is_gso) {
2877 			u16 last_idx, last_ring_idx;
2878 
2879 			last_idx = sw_cons + tx_buf->nr_frags + 1;
2880 			last_ring_idx = sw_ring_cons + tx_buf->nr_frags + 1;
2881 			if (unlikely(last_ring_idx >= BNX2_MAX_TX_DESC_CNT)) {
2882 				last_idx++;
2883 			}
2884 			if (((s16) ((s16) last_idx - (s16) hw_cons)) > 0) {
2885 				break;
2886 			}
2887 		}
2888 
2889 		dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(tx_buf, mapping),
2890 			skb_headlen(skb), PCI_DMA_TODEVICE);
2891 
2892 		tx_buf->skb = NULL;
2893 		last = tx_buf->nr_frags;
2894 
2895 		for (i = 0; i < last; i++) {
2896 			struct bnx2_sw_tx_bd *tx_buf;
2897 
2898 			sw_cons = BNX2_NEXT_TX_BD(sw_cons);
2899 
2900 			tx_buf = &txr->tx_buf_ring[BNX2_TX_RING_IDX(sw_cons)];
2901 			dma_unmap_page(&bp->pdev->dev,
2902 				dma_unmap_addr(tx_buf, mapping),
2903 				skb_frag_size(&skb_shinfo(skb)->frags[i]),
2904 				PCI_DMA_TODEVICE);
2905 		}
2906 
2907 		sw_cons = BNX2_NEXT_TX_BD(sw_cons);
2908 
2909 		tx_bytes += skb->len;
2910 		dev_kfree_skb_any(skb);
2911 		tx_pkt++;
2912 		if (tx_pkt == budget)
2913 			break;
2914 
2915 		if (hw_cons == sw_cons)
2916 			hw_cons = bnx2_get_hw_tx_cons(bnapi);
2917 	}
2918 
2919 	netdev_tx_completed_queue(txq, tx_pkt, tx_bytes);
2920 	txr->hw_tx_cons = hw_cons;
2921 	txr->tx_cons = sw_cons;
2922 
2923 	/* Need to make the tx_cons update visible to bnx2_start_xmit()
2924 	 * before checking for netif_tx_queue_stopped().  Without the
2925 	 * memory barrier, there is a small possibility that bnx2_start_xmit()
2926 	 * will miss it and cause the queue to be stopped forever.
2927 	 */
2928 	smp_mb();
2929 
2930 	if (unlikely(netif_tx_queue_stopped(txq)) &&
2931 		     (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh)) {
2932 		__netif_tx_lock(txq, smp_processor_id());
2933 		if ((netif_tx_queue_stopped(txq)) &&
2934 		    (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh))
2935 			netif_tx_wake_queue(txq);
2936 		__netif_tx_unlock(txq);
2937 	}
2938 
2939 	return tx_pkt;
2940 }
2941 
2942 static void
2943 bnx2_reuse_rx_skb_pages(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
2944 			struct sk_buff *skb, int count)
2945 {
2946 	struct bnx2_sw_pg *cons_rx_pg, *prod_rx_pg;
2947 	struct bnx2_rx_bd *cons_bd, *prod_bd;
2948 	int i;
2949 	u16 hw_prod, prod;
2950 	u16 cons = rxr->rx_pg_cons;
2951 
2952 	cons_rx_pg = &rxr->rx_pg_ring[cons];
2953 
2954 	/* The caller was unable to allocate a new page to replace the
2955 	 * last one in the frags array, so we need to recycle that page
2956 	 * and then free the skb.
2957 	 */
2958 	if (skb) {
2959 		struct page *page;
2960 		struct skb_shared_info *shinfo;
2961 
2962 		shinfo = skb_shinfo(skb);
2963 		shinfo->nr_frags--;
2964 		page = skb_frag_page(&shinfo->frags[shinfo->nr_frags]);
2965 		__skb_frag_set_page(&shinfo->frags[shinfo->nr_frags], NULL);
2966 
2967 		cons_rx_pg->page = page;
2968 		dev_kfree_skb(skb);
2969 	}
2970 
2971 	hw_prod = rxr->rx_pg_prod;
2972 
2973 	for (i = 0; i < count; i++) {
2974 		prod = BNX2_RX_PG_RING_IDX(hw_prod);
2975 
2976 		prod_rx_pg = &rxr->rx_pg_ring[prod];
2977 		cons_rx_pg = &rxr->rx_pg_ring[cons];
2978 		cons_bd = &rxr->rx_pg_desc_ring[BNX2_RX_RING(cons)]
2979 						[BNX2_RX_IDX(cons)];
2980 		prod_bd = &rxr->rx_pg_desc_ring[BNX2_RX_RING(prod)]
2981 						[BNX2_RX_IDX(prod)];
2982 
2983 		if (prod != cons) {
2984 			prod_rx_pg->page = cons_rx_pg->page;
2985 			cons_rx_pg->page = NULL;
2986 			dma_unmap_addr_set(prod_rx_pg, mapping,
2987 				dma_unmap_addr(cons_rx_pg, mapping));
2988 
2989 			prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2990 			prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
2991 
2992 		}
2993 		cons = BNX2_RX_PG_RING_IDX(BNX2_NEXT_RX_BD(cons));
2994 		hw_prod = BNX2_NEXT_RX_BD(hw_prod);
2995 	}
2996 	rxr->rx_pg_prod = hw_prod;
2997 	rxr->rx_pg_cons = cons;
2998 }
2999 
3000 static inline void
3001 bnx2_reuse_rx_data(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
3002 		   u8 *data, u16 cons, u16 prod)
3003 {
3004 	struct bnx2_sw_bd *cons_rx_buf, *prod_rx_buf;
3005 	struct bnx2_rx_bd *cons_bd, *prod_bd;
3006 
3007 	cons_rx_buf = &rxr->rx_buf_ring[cons];
3008 	prod_rx_buf = &rxr->rx_buf_ring[prod];
3009 
3010 	dma_sync_single_for_device(&bp->pdev->dev,
3011 		dma_unmap_addr(cons_rx_buf, mapping),
3012 		BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
3013 
3014 	rxr->rx_prod_bseq += bp->rx_buf_use_size;
3015 
3016 	prod_rx_buf->data = data;
3017 
3018 	if (cons == prod)
3019 		return;
3020 
3021 	dma_unmap_addr_set(prod_rx_buf, mapping,
3022 			dma_unmap_addr(cons_rx_buf, mapping));
3023 
3024 	cons_bd = &rxr->rx_desc_ring[BNX2_RX_RING(cons)][BNX2_RX_IDX(cons)];
3025 	prod_bd = &rxr->rx_desc_ring[BNX2_RX_RING(prod)][BNX2_RX_IDX(prod)];
3026 	prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
3027 	prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
3028 }
3029 
3030 static struct sk_buff *
3031 bnx2_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u8 *data,
3032 	    unsigned int len, unsigned int hdr_len, dma_addr_t dma_addr,
3033 	    u32 ring_idx)
3034 {
3035 	int err;
3036 	u16 prod = ring_idx & 0xffff;
3037 	struct sk_buff *skb;
3038 
3039 	err = bnx2_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC);
3040 	if (unlikely(err)) {
3041 		bnx2_reuse_rx_data(bp, rxr, data, (u16) (ring_idx >> 16), prod);
3042 error:
3043 		if (hdr_len) {
3044 			unsigned int raw_len = len + 4;
3045 			int pages = PAGE_ALIGN(raw_len - hdr_len) >> PAGE_SHIFT;
3046 
3047 			bnx2_reuse_rx_skb_pages(bp, rxr, NULL, pages);
3048 		}
3049 		return NULL;
3050 	}
3051 
3052 	dma_unmap_single(&bp->pdev->dev, dma_addr, bp->rx_buf_use_size,
3053 			 PCI_DMA_FROMDEVICE);
3054 	skb = build_skb(data, 0);
3055 	if (!skb) {
3056 		kfree(data);
3057 		goto error;
3058 	}
3059 	skb_reserve(skb, ((u8 *)get_l2_fhdr(data) - data) + BNX2_RX_OFFSET);
3060 	if (hdr_len == 0) {
3061 		skb_put(skb, len);
3062 		return skb;
3063 	} else {
3064 		unsigned int i, frag_len, frag_size, pages;
3065 		struct bnx2_sw_pg *rx_pg;
3066 		u16 pg_cons = rxr->rx_pg_cons;
3067 		u16 pg_prod = rxr->rx_pg_prod;
3068 
3069 		frag_size = len + 4 - hdr_len;
3070 		pages = PAGE_ALIGN(frag_size) >> PAGE_SHIFT;
3071 		skb_put(skb, hdr_len);
3072 
3073 		for (i = 0; i < pages; i++) {
3074 			dma_addr_t mapping_old;
3075 
3076 			frag_len = min(frag_size, (unsigned int) PAGE_SIZE);
3077 			if (unlikely(frag_len <= 4)) {
3078 				unsigned int tail = 4 - frag_len;
3079 
3080 				rxr->rx_pg_cons = pg_cons;
3081 				rxr->rx_pg_prod = pg_prod;
3082 				bnx2_reuse_rx_skb_pages(bp, rxr, NULL,
3083 							pages - i);
3084 				skb->len -= tail;
3085 				if (i == 0) {
3086 					skb->tail -= tail;
3087 				} else {
3088 					skb_frag_t *frag =
3089 						&skb_shinfo(skb)->frags[i - 1];
3090 					skb_frag_size_sub(frag, tail);
3091 					skb->data_len -= tail;
3092 				}
3093 				return skb;
3094 			}
3095 			rx_pg = &rxr->rx_pg_ring[pg_cons];
3096 
3097 			/* Don't unmap yet.  If we're unable to allocate a new
3098 			 * page, we need to recycle the page and the DMA addr.
3099 			 */
3100 			mapping_old = dma_unmap_addr(rx_pg, mapping);
3101 			if (i == pages - 1)
3102 				frag_len -= 4;
3103 
3104 			skb_fill_page_desc(skb, i, rx_pg->page, 0, frag_len);
3105 			rx_pg->page = NULL;
3106 
3107 			err = bnx2_alloc_rx_page(bp, rxr,
3108 						 BNX2_RX_PG_RING_IDX(pg_prod),
3109 						 GFP_ATOMIC);
3110 			if (unlikely(err)) {
3111 				rxr->rx_pg_cons = pg_cons;
3112 				rxr->rx_pg_prod = pg_prod;
3113 				bnx2_reuse_rx_skb_pages(bp, rxr, skb,
3114 							pages - i);
3115 				return NULL;
3116 			}
3117 
3118 			dma_unmap_page(&bp->pdev->dev, mapping_old,
3119 				       PAGE_SIZE, PCI_DMA_FROMDEVICE);
3120 
3121 			frag_size -= frag_len;
3122 			skb->data_len += frag_len;
3123 			skb->truesize += PAGE_SIZE;
3124 			skb->len += frag_len;
3125 
3126 			pg_prod = BNX2_NEXT_RX_BD(pg_prod);
3127 			pg_cons = BNX2_RX_PG_RING_IDX(BNX2_NEXT_RX_BD(pg_cons));
3128 		}
3129 		rxr->rx_pg_prod = pg_prod;
3130 		rxr->rx_pg_cons = pg_cons;
3131 	}
3132 	return skb;
3133 }
3134 
3135 static inline u16
3136 bnx2_get_hw_rx_cons(struct bnx2_napi *bnapi)
3137 {
3138 	u16 cons;
3139 
3140 	cons = READ_ONCE(*bnapi->hw_rx_cons_ptr);
3141 
3142 	if (unlikely((cons & BNX2_MAX_RX_DESC_CNT) == BNX2_MAX_RX_DESC_CNT))
3143 		cons++;
3144 	return cons;
3145 }
3146 
3147 static int
3148 bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
3149 {
3150 	struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3151 	u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod;
3152 	struct l2_fhdr *rx_hdr;
3153 	int rx_pkt = 0, pg_ring_used = 0;
3154 
3155 	if (budget <= 0)
3156 		return rx_pkt;
3157 
3158 	hw_cons = bnx2_get_hw_rx_cons(bnapi);
3159 	sw_cons = rxr->rx_cons;
3160 	sw_prod = rxr->rx_prod;
3161 
3162 	/* Memory barrier necessary as speculative reads of the rx
3163 	 * buffer can be ahead of the index in the status block
3164 	 */
3165 	rmb();
3166 	while (sw_cons != hw_cons) {
3167 		unsigned int len, hdr_len;
3168 		u32 status;
3169 		struct bnx2_sw_bd *rx_buf, *next_rx_buf;
3170 		struct sk_buff *skb;
3171 		dma_addr_t dma_addr;
3172 		u8 *data;
3173 		u16 next_ring_idx;
3174 
3175 		sw_ring_cons = BNX2_RX_RING_IDX(sw_cons);
3176 		sw_ring_prod = BNX2_RX_RING_IDX(sw_prod);
3177 
3178 		rx_buf = &rxr->rx_buf_ring[sw_ring_cons];
3179 		data = rx_buf->data;
3180 		rx_buf->data = NULL;
3181 
3182 		rx_hdr = get_l2_fhdr(data);
3183 		prefetch(rx_hdr);
3184 
3185 		dma_addr = dma_unmap_addr(rx_buf, mapping);
3186 
3187 		dma_sync_single_for_cpu(&bp->pdev->dev, dma_addr,
3188 			BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH,
3189 			PCI_DMA_FROMDEVICE);
3190 
3191 		next_ring_idx = BNX2_RX_RING_IDX(BNX2_NEXT_RX_BD(sw_cons));
3192 		next_rx_buf = &rxr->rx_buf_ring[next_ring_idx];
3193 		prefetch(get_l2_fhdr(next_rx_buf->data));
3194 
3195 		len = rx_hdr->l2_fhdr_pkt_len;
3196 		status = rx_hdr->l2_fhdr_status;
3197 
3198 		hdr_len = 0;
3199 		if (status & L2_FHDR_STATUS_SPLIT) {
3200 			hdr_len = rx_hdr->l2_fhdr_ip_xsum;
3201 			pg_ring_used = 1;
3202 		} else if (len > bp->rx_jumbo_thresh) {
3203 			hdr_len = bp->rx_jumbo_thresh;
3204 			pg_ring_used = 1;
3205 		}
3206 
3207 		if (unlikely(status & (L2_FHDR_ERRORS_BAD_CRC |
3208 				       L2_FHDR_ERRORS_PHY_DECODE |
3209 				       L2_FHDR_ERRORS_ALIGNMENT |
3210 				       L2_FHDR_ERRORS_TOO_SHORT |
3211 				       L2_FHDR_ERRORS_GIANT_FRAME))) {
3212 
3213 			bnx2_reuse_rx_data(bp, rxr, data, sw_ring_cons,
3214 					  sw_ring_prod);
3215 			if (pg_ring_used) {
3216 				int pages;
3217 
3218 				pages = PAGE_ALIGN(len - hdr_len) >> PAGE_SHIFT;
3219 
3220 				bnx2_reuse_rx_skb_pages(bp, rxr, NULL, pages);
3221 			}
3222 			goto next_rx;
3223 		}
3224 
3225 		len -= 4;
3226 
3227 		if (len <= bp->rx_copy_thresh) {
3228 			skb = netdev_alloc_skb(bp->dev, len + 6);
3229 			if (!skb) {
3230 				bnx2_reuse_rx_data(bp, rxr, data, sw_ring_cons,
3231 						  sw_ring_prod);
3232 				goto next_rx;
3233 			}
3234 
3235 			/* aligned copy */
3236 			memcpy(skb->data,
3237 			       (u8 *)rx_hdr + BNX2_RX_OFFSET - 6,
3238 			       len + 6);
3239 			skb_reserve(skb, 6);
3240 			skb_put(skb, len);
3241 
3242 			bnx2_reuse_rx_data(bp, rxr, data,
3243 				sw_ring_cons, sw_ring_prod);
3244 
3245 		} else {
3246 			skb = bnx2_rx_skb(bp, rxr, data, len, hdr_len, dma_addr,
3247 					  (sw_ring_cons << 16) | sw_ring_prod);
3248 			if (!skb)
3249 				goto next_rx;
3250 		}
3251 		if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) &&
3252 		    !(bp->rx_mode & BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG))
3253 			__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rx_hdr->l2_fhdr_vlan_tag);
3254 
3255 		skb->protocol = eth_type_trans(skb, bp->dev);
3256 
3257 		if (len > (bp->dev->mtu + ETH_HLEN) &&
3258 		    skb->protocol != htons(0x8100) &&
3259 		    skb->protocol != htons(ETH_P_8021AD)) {
3260 
3261 			dev_kfree_skb(skb);
3262 			goto next_rx;
3263 
3264 		}
3265 
3266 		skb_checksum_none_assert(skb);
3267 		if ((bp->dev->features & NETIF_F_RXCSUM) &&
3268 			(status & (L2_FHDR_STATUS_TCP_SEGMENT |
3269 			L2_FHDR_STATUS_UDP_DATAGRAM))) {
3270 
3271 			if (likely((status & (L2_FHDR_ERRORS_TCP_XSUM |
3272 					      L2_FHDR_ERRORS_UDP_XSUM)) == 0))
3273 				skb->ip_summed = CHECKSUM_UNNECESSARY;
3274 		}
3275 		if ((bp->dev->features & NETIF_F_RXHASH) &&
3276 		    ((status & L2_FHDR_STATUS_USE_RXHASH) ==
3277 		     L2_FHDR_STATUS_USE_RXHASH))
3278 			skb_set_hash(skb, rx_hdr->l2_fhdr_hash,
3279 				     PKT_HASH_TYPE_L3);
3280 
3281 		skb_record_rx_queue(skb, bnapi - &bp->bnx2_napi[0]);
3282 		napi_gro_receive(&bnapi->napi, skb);
3283 		rx_pkt++;
3284 
3285 next_rx:
3286 		sw_cons = BNX2_NEXT_RX_BD(sw_cons);
3287 		sw_prod = BNX2_NEXT_RX_BD(sw_prod);
3288 
3289 		if (rx_pkt == budget)
3290 			break;
3291 
3292 		/* Refresh hw_cons to see if there is new work */
3293 		if (sw_cons == hw_cons) {
3294 			hw_cons = bnx2_get_hw_rx_cons(bnapi);
3295 			rmb();
3296 		}
3297 	}
3298 	rxr->rx_cons = sw_cons;
3299 	rxr->rx_prod = sw_prod;
3300 
3301 	if (pg_ring_used)
3302 		BNX2_WR16(bp, rxr->rx_pg_bidx_addr, rxr->rx_pg_prod);
3303 
3304 	BNX2_WR16(bp, rxr->rx_bidx_addr, sw_prod);
3305 
3306 	BNX2_WR(bp, rxr->rx_bseq_addr, rxr->rx_prod_bseq);
3307 
3308 	return rx_pkt;
3309 
3310 }
3311 
3312 /* MSI ISR - The only difference between this and the INTx ISR
3313  * is that the MSI interrupt is always serviced.
3314  */
3315 static irqreturn_t
3316 bnx2_msi(int irq, void *dev_instance)
3317 {
3318 	struct bnx2_napi *bnapi = dev_instance;
3319 	struct bnx2 *bp = bnapi->bp;
3320 
3321 	prefetch(bnapi->status_blk.msi);
3322 	BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3323 		BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
3324 		BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3325 
3326 	/* Return here if interrupt is disabled. */
3327 	if (unlikely(atomic_read(&bp->intr_sem) != 0))
3328 		return IRQ_HANDLED;
3329 
3330 	napi_schedule(&bnapi->napi);
3331 
3332 	return IRQ_HANDLED;
3333 }
3334 
3335 static irqreturn_t
3336 bnx2_msi_1shot(int irq, void *dev_instance)
3337 {
3338 	struct bnx2_napi *bnapi = dev_instance;
3339 	struct bnx2 *bp = bnapi->bp;
3340 
3341 	prefetch(bnapi->status_blk.msi);
3342 
3343 	/* Return here if interrupt is disabled. */
3344 	if (unlikely(atomic_read(&bp->intr_sem) != 0))
3345 		return IRQ_HANDLED;
3346 
3347 	napi_schedule(&bnapi->napi);
3348 
3349 	return IRQ_HANDLED;
3350 }
3351 
3352 static irqreturn_t
3353 bnx2_interrupt(int irq, void *dev_instance)
3354 {
3355 	struct bnx2_napi *bnapi = dev_instance;
3356 	struct bnx2 *bp = bnapi->bp;
3357 	struct status_block *sblk = bnapi->status_blk.msi;
3358 
3359 	/* When using INTx, it is possible for the interrupt to arrive
3360 	 * at the CPU before the status block posted prior to the
3361 	 * interrupt. Reading a register will flush the status block.
3362 	 * When using MSI, the MSI message will always complete after
3363 	 * the status block write.
3364 	 */
3365 	if ((sblk->status_idx == bnapi->last_status_idx) &&
3366 	    (BNX2_RD(bp, BNX2_PCICFG_MISC_STATUS) &
3367 	     BNX2_PCICFG_MISC_STATUS_INTA_VALUE))
3368 		return IRQ_NONE;
3369 
3370 	BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3371 		BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
3372 		BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3373 
3374 	/* Read back to deassert IRQ immediately to avoid too many
3375 	 * spurious interrupts.
3376 	 */
3377 	BNX2_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
3378 
3379 	/* Return here if interrupt is shared and is disabled. */
3380 	if (unlikely(atomic_read(&bp->intr_sem) != 0))
3381 		return IRQ_HANDLED;
3382 
3383 	if (napi_schedule_prep(&bnapi->napi)) {
3384 		bnapi->last_status_idx = sblk->status_idx;
3385 		__napi_schedule(&bnapi->napi);
3386 	}
3387 
3388 	return IRQ_HANDLED;
3389 }
3390 
3391 static inline int
3392 bnx2_has_fast_work(struct bnx2_napi *bnapi)
3393 {
3394 	struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
3395 	struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3396 
3397 	if ((bnx2_get_hw_rx_cons(bnapi) != rxr->rx_cons) ||
3398 	    (bnx2_get_hw_tx_cons(bnapi) != txr->hw_tx_cons))
3399 		return 1;
3400 	return 0;
3401 }
3402 
3403 #define STATUS_ATTN_EVENTS	(STATUS_ATTN_BITS_LINK_STATE | \
3404 				 STATUS_ATTN_BITS_TIMER_ABORT)
3405 
3406 static inline int
3407 bnx2_has_work(struct bnx2_napi *bnapi)
3408 {
3409 	struct status_block *sblk = bnapi->status_blk.msi;
3410 
3411 	if (bnx2_has_fast_work(bnapi))
3412 		return 1;
3413 
3414 #ifdef BCM_CNIC
3415 	if (bnapi->cnic_present && (bnapi->cnic_tag != sblk->status_idx))
3416 		return 1;
3417 #endif
3418 
3419 	if ((sblk->status_attn_bits & STATUS_ATTN_EVENTS) !=
3420 	    (sblk->status_attn_bits_ack & STATUS_ATTN_EVENTS))
3421 		return 1;
3422 
3423 	return 0;
3424 }
3425 
3426 static void
3427 bnx2_chk_missed_msi(struct bnx2 *bp)
3428 {
3429 	struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
3430 	u32 msi_ctrl;
3431 
3432 	if (bnx2_has_work(bnapi)) {
3433 		msi_ctrl = BNX2_RD(bp, BNX2_PCICFG_MSI_CONTROL);
3434 		if (!(msi_ctrl & BNX2_PCICFG_MSI_CONTROL_ENABLE))
3435 			return;
3436 
3437 		if (bnapi->last_status_idx == bp->idle_chk_status_idx) {
3438 			BNX2_WR(bp, BNX2_PCICFG_MSI_CONTROL, msi_ctrl &
3439 				~BNX2_PCICFG_MSI_CONTROL_ENABLE);
3440 			BNX2_WR(bp, BNX2_PCICFG_MSI_CONTROL, msi_ctrl);
3441 			bnx2_msi(bp->irq_tbl[0].vector, bnapi);
3442 		}
3443 	}
3444 
3445 	bp->idle_chk_status_idx = bnapi->last_status_idx;
3446 }
3447 
3448 #ifdef BCM_CNIC
3449 static void bnx2_poll_cnic(struct bnx2 *bp, struct bnx2_napi *bnapi)
3450 {
3451 	struct cnic_ops *c_ops;
3452 
3453 	if (!bnapi->cnic_present)
3454 		return;
3455 
3456 	rcu_read_lock();
3457 	c_ops = rcu_dereference(bp->cnic_ops);
3458 	if (c_ops)
3459 		bnapi->cnic_tag = c_ops->cnic_handler(bp->cnic_data,
3460 						      bnapi->status_blk.msi);
3461 	rcu_read_unlock();
3462 }
3463 #endif
3464 
3465 static void bnx2_poll_link(struct bnx2 *bp, struct bnx2_napi *bnapi)
3466 {
3467 	struct status_block *sblk = bnapi->status_blk.msi;
3468 	u32 status_attn_bits = sblk->status_attn_bits;
3469 	u32 status_attn_bits_ack = sblk->status_attn_bits_ack;
3470 
3471 	if ((status_attn_bits & STATUS_ATTN_EVENTS) !=
3472 	    (status_attn_bits_ack & STATUS_ATTN_EVENTS)) {
3473 
3474 		bnx2_phy_int(bp, bnapi);
3475 
3476 		/* This is needed to take care of transient status
3477 		 * during link changes.
3478 		 */
3479 		BNX2_WR(bp, BNX2_HC_COMMAND,
3480 			bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
3481 		BNX2_RD(bp, BNX2_HC_COMMAND);
3482 	}
3483 }
3484 
3485 static int bnx2_poll_work(struct bnx2 *bp, struct bnx2_napi *bnapi,
3486 			  int work_done, int budget)
3487 {
3488 	struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
3489 	struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3490 
3491 	if (bnx2_get_hw_tx_cons(bnapi) != txr->hw_tx_cons)
3492 		bnx2_tx_int(bp, bnapi, 0);
3493 
3494 	if (bnx2_get_hw_rx_cons(bnapi) != rxr->rx_cons)
3495 		work_done += bnx2_rx_int(bp, bnapi, budget - work_done);
3496 
3497 	return work_done;
3498 }
3499 
3500 static int bnx2_poll_msix(struct napi_struct *napi, int budget)
3501 {
3502 	struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
3503 	struct bnx2 *bp = bnapi->bp;
3504 	int work_done = 0;
3505 	struct status_block_msix *sblk = bnapi->status_blk.msix;
3506 
3507 	while (1) {
3508 		work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
3509 		if (unlikely(work_done >= budget))
3510 			break;
3511 
3512 		bnapi->last_status_idx = sblk->status_idx;
3513 		/* status idx must be read before checking for more work. */
3514 		rmb();
3515 		if (likely(!bnx2_has_fast_work(bnapi))) {
3516 
3517 			napi_complete_done(napi, work_done);
3518 			BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
3519 				BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3520 				bnapi->last_status_idx);
3521 			break;
3522 		}
3523 	}
3524 	return work_done;
3525 }
3526 
3527 static int bnx2_poll(struct napi_struct *napi, int budget)
3528 {
3529 	struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
3530 	struct bnx2 *bp = bnapi->bp;
3531 	int work_done = 0;
3532 	struct status_block *sblk = bnapi->status_blk.msi;
3533 
3534 	while (1) {
3535 		bnx2_poll_link(bp, bnapi);
3536 
3537 		work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
3538 
3539 #ifdef BCM_CNIC
3540 		bnx2_poll_cnic(bp, bnapi);
3541 #endif
3542 
3543 		/* bnapi->last_status_idx is used below to tell the hw how
3544 		 * much work has been processed, so we must read it before
3545 		 * checking for more work.
3546 		 */
3547 		bnapi->last_status_idx = sblk->status_idx;
3548 
3549 		if (unlikely(work_done >= budget))
3550 			break;
3551 
3552 		rmb();
3553 		if (likely(!bnx2_has_work(bnapi))) {
3554 			napi_complete_done(napi, work_done);
3555 			if (likely(bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)) {
3556 				BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3557 					BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3558 					bnapi->last_status_idx);
3559 				break;
3560 			}
3561 			BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3562 				BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3563 				BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
3564 				bnapi->last_status_idx);
3565 
3566 			BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3567 				BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3568 				bnapi->last_status_idx);
3569 			break;
3570 		}
3571 	}
3572 
3573 	return work_done;
3574 }
3575 
3576 /* Called with rtnl_lock from vlan functions and also netif_tx_lock
3577  * from set_multicast.
3578  */
3579 static void
3580 bnx2_set_rx_mode(struct net_device *dev)
3581 {
3582 	struct bnx2 *bp = netdev_priv(dev);
3583 	u32 rx_mode, sort_mode;
3584 	struct netdev_hw_addr *ha;
3585 	int i;
3586 
3587 	if (!netif_running(dev))
3588 		return;
3589 
3590 	spin_lock_bh(&bp->phy_lock);
3591 
3592 	rx_mode = bp->rx_mode & ~(BNX2_EMAC_RX_MODE_PROMISCUOUS |
3593 				  BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG);
3594 	sort_mode = 1 | BNX2_RPM_SORT_USER0_BC_EN;
3595 	if (!(dev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
3596 	     (bp->flags & BNX2_FLAG_CAN_KEEP_VLAN))
3597 		rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
3598 	if (dev->flags & IFF_PROMISC) {
3599 		/* Promiscuous mode. */
3600 		rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
3601 		sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
3602 			     BNX2_RPM_SORT_USER0_PROM_VLAN;
3603 	}
3604 	else if (dev->flags & IFF_ALLMULTI) {
3605 		for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3606 			BNX2_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3607 				0xffffffff);
3608         	}
3609 		sort_mode |= BNX2_RPM_SORT_USER0_MC_EN;
3610 	}
3611 	else {
3612 		/* Accept one or more multicast(s). */
3613 		u32 mc_filter[NUM_MC_HASH_REGISTERS];
3614 		u32 regidx;
3615 		u32 bit;
3616 		u32 crc;
3617 
3618 		memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS);
3619 
3620 		netdev_for_each_mc_addr(ha, dev) {
3621 			crc = ether_crc_le(ETH_ALEN, ha->addr);
3622 			bit = crc & 0xff;
3623 			regidx = (bit & 0xe0) >> 5;
3624 			bit &= 0x1f;
3625 			mc_filter[regidx] |= (1 << bit);
3626 		}
3627 
3628 		for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3629 			BNX2_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3630 				mc_filter[i]);
3631 		}
3632 
3633 		sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN;
3634 	}
3635 
3636 	if (netdev_uc_count(dev) > BNX2_MAX_UNICAST_ADDRESSES) {
3637 		rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
3638 		sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
3639 			     BNX2_RPM_SORT_USER0_PROM_VLAN;
3640 	} else if (!(dev->flags & IFF_PROMISC)) {
3641 		/* Add all entries into to the match filter list */
3642 		i = 0;
3643 		netdev_for_each_uc_addr(ha, dev) {
3644 			bnx2_set_mac_addr(bp, ha->addr,
3645 					  i + BNX2_START_UNICAST_ADDRESS_INDEX);
3646 			sort_mode |= (1 <<
3647 				      (i + BNX2_START_UNICAST_ADDRESS_INDEX));
3648 			i++;
3649 		}
3650 
3651 	}
3652 
3653 	if (rx_mode != bp->rx_mode) {
3654 		bp->rx_mode = rx_mode;
3655 		BNX2_WR(bp, BNX2_EMAC_RX_MODE, rx_mode);
3656 	}
3657 
3658 	BNX2_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3659 	BNX2_WR(bp, BNX2_RPM_SORT_USER0, sort_mode);
3660 	BNX2_WR(bp, BNX2_RPM_SORT_USER0, sort_mode | BNX2_RPM_SORT_USER0_ENA);
3661 
3662 	spin_unlock_bh(&bp->phy_lock);
3663 }
3664 
3665 static int
3666 check_fw_section(const struct firmware *fw,
3667 		 const struct bnx2_fw_file_section *section,
3668 		 u32 alignment, bool non_empty)
3669 {
3670 	u32 offset = be32_to_cpu(section->offset);
3671 	u32 len = be32_to_cpu(section->len);
3672 
3673 	if ((offset == 0 && len != 0) || offset >= fw->size || offset & 3)
3674 		return -EINVAL;
3675 	if ((non_empty && len == 0) || len > fw->size - offset ||
3676 	    len & (alignment - 1))
3677 		return -EINVAL;
3678 	return 0;
3679 }
3680 
3681 static int
3682 check_mips_fw_entry(const struct firmware *fw,
3683 		    const struct bnx2_mips_fw_file_entry *entry)
3684 {
3685 	if (check_fw_section(fw, &entry->text, 4, true) ||
3686 	    check_fw_section(fw, &entry->data, 4, false) ||
3687 	    check_fw_section(fw, &entry->rodata, 4, false))
3688 		return -EINVAL;
3689 	return 0;
3690 }
3691 
3692 static void bnx2_release_firmware(struct bnx2 *bp)
3693 {
3694 	if (bp->rv2p_firmware) {
3695 		release_firmware(bp->mips_firmware);
3696 		release_firmware(bp->rv2p_firmware);
3697 		bp->rv2p_firmware = NULL;
3698 	}
3699 }
3700 
3701 static int bnx2_request_uncached_firmware(struct bnx2 *bp)
3702 {
3703 	const char *mips_fw_file, *rv2p_fw_file;
3704 	const struct bnx2_mips_fw_file *mips_fw;
3705 	const struct bnx2_rv2p_fw_file *rv2p_fw;
3706 	int rc;
3707 
3708 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
3709 		mips_fw_file = FW_MIPS_FILE_09;
3710 		if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5709_A0) ||
3711 		    (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5709_A1))
3712 			rv2p_fw_file = FW_RV2P_FILE_09_Ax;
3713 		else
3714 			rv2p_fw_file = FW_RV2P_FILE_09;
3715 	} else {
3716 		mips_fw_file = FW_MIPS_FILE_06;
3717 		rv2p_fw_file = FW_RV2P_FILE_06;
3718 	}
3719 
3720 	rc = request_firmware(&bp->mips_firmware, mips_fw_file, &bp->pdev->dev);
3721 	if (rc) {
3722 		pr_err("Can't load firmware file \"%s\"\n", mips_fw_file);
3723 		goto out;
3724 	}
3725 
3726 	rc = request_firmware(&bp->rv2p_firmware, rv2p_fw_file, &bp->pdev->dev);
3727 	if (rc) {
3728 		pr_err("Can't load firmware file \"%s\"\n", rv2p_fw_file);
3729 		goto err_release_mips_firmware;
3730 	}
3731 	mips_fw = (const struct bnx2_mips_fw_file *) bp->mips_firmware->data;
3732 	rv2p_fw = (const struct bnx2_rv2p_fw_file *) bp->rv2p_firmware->data;
3733 	if (bp->mips_firmware->size < sizeof(*mips_fw) ||
3734 	    check_mips_fw_entry(bp->mips_firmware, &mips_fw->com) ||
3735 	    check_mips_fw_entry(bp->mips_firmware, &mips_fw->cp) ||
3736 	    check_mips_fw_entry(bp->mips_firmware, &mips_fw->rxp) ||
3737 	    check_mips_fw_entry(bp->mips_firmware, &mips_fw->tpat) ||
3738 	    check_mips_fw_entry(bp->mips_firmware, &mips_fw->txp)) {
3739 		pr_err("Firmware file \"%s\" is invalid\n", mips_fw_file);
3740 		rc = -EINVAL;
3741 		goto err_release_firmware;
3742 	}
3743 	if (bp->rv2p_firmware->size < sizeof(*rv2p_fw) ||
3744 	    check_fw_section(bp->rv2p_firmware, &rv2p_fw->proc1.rv2p, 8, true) ||
3745 	    check_fw_section(bp->rv2p_firmware, &rv2p_fw->proc2.rv2p, 8, true)) {
3746 		pr_err("Firmware file \"%s\" is invalid\n", rv2p_fw_file);
3747 		rc = -EINVAL;
3748 		goto err_release_firmware;
3749 	}
3750 out:
3751 	return rc;
3752 
3753 err_release_firmware:
3754 	release_firmware(bp->rv2p_firmware);
3755 	bp->rv2p_firmware = NULL;
3756 err_release_mips_firmware:
3757 	release_firmware(bp->mips_firmware);
3758 	goto out;
3759 }
3760 
3761 static int bnx2_request_firmware(struct bnx2 *bp)
3762 {
3763 	return bp->rv2p_firmware ? 0 : bnx2_request_uncached_firmware(bp);
3764 }
3765 
3766 static u32
3767 rv2p_fw_fixup(u32 rv2p_proc, int idx, u32 loc, u32 rv2p_code)
3768 {
3769 	switch (idx) {
3770 	case RV2P_P1_FIXUP_PAGE_SIZE_IDX:
3771 		rv2p_code &= ~RV2P_BD_PAGE_SIZE_MSK;
3772 		rv2p_code |= RV2P_BD_PAGE_SIZE;
3773 		break;
3774 	}
3775 	return rv2p_code;
3776 }
3777 
3778 static int
3779 load_rv2p_fw(struct bnx2 *bp, u32 rv2p_proc,
3780 	     const struct bnx2_rv2p_fw_file_entry *fw_entry)
3781 {
3782 	u32 rv2p_code_len, file_offset;
3783 	__be32 *rv2p_code;
3784 	int i;
3785 	u32 val, cmd, addr;
3786 
3787 	rv2p_code_len = be32_to_cpu(fw_entry->rv2p.len);
3788 	file_offset = be32_to_cpu(fw_entry->rv2p.offset);
3789 
3790 	rv2p_code = (__be32 *)(bp->rv2p_firmware->data + file_offset);
3791 
3792 	if (rv2p_proc == RV2P_PROC1) {
3793 		cmd = BNX2_RV2P_PROC1_ADDR_CMD_RDWR;
3794 		addr = BNX2_RV2P_PROC1_ADDR_CMD;
3795 	} else {
3796 		cmd = BNX2_RV2P_PROC2_ADDR_CMD_RDWR;
3797 		addr = BNX2_RV2P_PROC2_ADDR_CMD;
3798 	}
3799 
3800 	for (i = 0; i < rv2p_code_len; i += 8) {
3801 		BNX2_WR(bp, BNX2_RV2P_INSTR_HIGH, be32_to_cpu(*rv2p_code));
3802 		rv2p_code++;
3803 		BNX2_WR(bp, BNX2_RV2P_INSTR_LOW, be32_to_cpu(*rv2p_code));
3804 		rv2p_code++;
3805 
3806 		val = (i / 8) | cmd;
3807 		BNX2_WR(bp, addr, val);
3808 	}
3809 
3810 	rv2p_code = (__be32 *)(bp->rv2p_firmware->data + file_offset);
3811 	for (i = 0; i < 8; i++) {
3812 		u32 loc, code;
3813 
3814 		loc = be32_to_cpu(fw_entry->fixup[i]);
3815 		if (loc && ((loc * 4) < rv2p_code_len)) {
3816 			code = be32_to_cpu(*(rv2p_code + loc - 1));
3817 			BNX2_WR(bp, BNX2_RV2P_INSTR_HIGH, code);
3818 			code = be32_to_cpu(*(rv2p_code + loc));
3819 			code = rv2p_fw_fixup(rv2p_proc, i, loc, code);
3820 			BNX2_WR(bp, BNX2_RV2P_INSTR_LOW, code);
3821 
3822 			val = (loc / 2) | cmd;
3823 			BNX2_WR(bp, addr, val);
3824 		}
3825 	}
3826 
3827 	/* Reset the processor, un-stall is done later. */
3828 	if (rv2p_proc == RV2P_PROC1) {
3829 		BNX2_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC1_RESET);
3830 	}
3831 	else {
3832 		BNX2_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC2_RESET);
3833 	}
3834 
3835 	return 0;
3836 }
3837 
3838 static int
3839 load_cpu_fw(struct bnx2 *bp, const struct cpu_reg *cpu_reg,
3840 	    const struct bnx2_mips_fw_file_entry *fw_entry)
3841 {
3842 	u32 addr, len, file_offset;
3843 	__be32 *data;
3844 	u32 offset;
3845 	u32 val;
3846 
3847 	/* Halt the CPU. */
3848 	val = bnx2_reg_rd_ind(bp, cpu_reg->mode);
3849 	val |= cpu_reg->mode_value_halt;
3850 	bnx2_reg_wr_ind(bp, cpu_reg->mode, val);
3851 	bnx2_reg_wr_ind(bp, cpu_reg->state, cpu_reg->state_value_clear);
3852 
3853 	/* Load the Text area. */
3854 	addr = be32_to_cpu(fw_entry->text.addr);
3855 	len = be32_to_cpu(fw_entry->text.len);
3856 	file_offset = be32_to_cpu(fw_entry->text.offset);
3857 	data = (__be32 *)(bp->mips_firmware->data + file_offset);
3858 
3859 	offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
3860 	if (len) {
3861 		int j;
3862 
3863 		for (j = 0; j < (len / 4); j++, offset += 4)
3864 			bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
3865 	}
3866 
3867 	/* Load the Data area. */
3868 	addr = be32_to_cpu(fw_entry->data.addr);
3869 	len = be32_to_cpu(fw_entry->data.len);
3870 	file_offset = be32_to_cpu(fw_entry->data.offset);
3871 	data = (__be32 *)(bp->mips_firmware->data + file_offset);
3872 
3873 	offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
3874 	if (len) {
3875 		int j;
3876 
3877 		for (j = 0; j < (len / 4); j++, offset += 4)
3878 			bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
3879 	}
3880 
3881 	/* Load the Read-Only area. */
3882 	addr = be32_to_cpu(fw_entry->rodata.addr);
3883 	len = be32_to_cpu(fw_entry->rodata.len);
3884 	file_offset = be32_to_cpu(fw_entry->rodata.offset);
3885 	data = (__be32 *)(bp->mips_firmware->data + file_offset);
3886 
3887 	offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
3888 	if (len) {
3889 		int j;
3890 
3891 		for (j = 0; j < (len / 4); j++, offset += 4)
3892 			bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
3893 	}
3894 
3895 	/* Clear the pre-fetch instruction. */
3896 	bnx2_reg_wr_ind(bp, cpu_reg->inst, 0);
3897 
3898 	val = be32_to_cpu(fw_entry->start_addr);
3899 	bnx2_reg_wr_ind(bp, cpu_reg->pc, val);
3900 
3901 	/* Start the CPU. */
3902 	val = bnx2_reg_rd_ind(bp, cpu_reg->mode);
3903 	val &= ~cpu_reg->mode_value_halt;
3904 	bnx2_reg_wr_ind(bp, cpu_reg->state, cpu_reg->state_value_clear);
3905 	bnx2_reg_wr_ind(bp, cpu_reg->mode, val);
3906 
3907 	return 0;
3908 }
3909 
3910 static int
3911 bnx2_init_cpus(struct bnx2 *bp)
3912 {
3913 	const struct bnx2_mips_fw_file *mips_fw =
3914 		(const struct bnx2_mips_fw_file *) bp->mips_firmware->data;
3915 	const struct bnx2_rv2p_fw_file *rv2p_fw =
3916 		(const struct bnx2_rv2p_fw_file *) bp->rv2p_firmware->data;
3917 	int rc;
3918 
3919 	/* Initialize the RV2P processor. */
3920 	load_rv2p_fw(bp, RV2P_PROC1, &rv2p_fw->proc1);
3921 	load_rv2p_fw(bp, RV2P_PROC2, &rv2p_fw->proc2);
3922 
3923 	/* Initialize the RX Processor. */
3924 	rc = load_cpu_fw(bp, &cpu_reg_rxp, &mips_fw->rxp);
3925 	if (rc)
3926 		goto init_cpu_err;
3927 
3928 	/* Initialize the TX Processor. */
3929 	rc = load_cpu_fw(bp, &cpu_reg_txp, &mips_fw->txp);
3930 	if (rc)
3931 		goto init_cpu_err;
3932 
3933 	/* Initialize the TX Patch-up Processor. */
3934 	rc = load_cpu_fw(bp, &cpu_reg_tpat, &mips_fw->tpat);
3935 	if (rc)
3936 		goto init_cpu_err;
3937 
3938 	/* Initialize the Completion Processor. */
3939 	rc = load_cpu_fw(bp, &cpu_reg_com, &mips_fw->com);
3940 	if (rc)
3941 		goto init_cpu_err;
3942 
3943 	/* Initialize the Command Processor. */
3944 	rc = load_cpu_fw(bp, &cpu_reg_cp, &mips_fw->cp);
3945 
3946 init_cpu_err:
3947 	return rc;
3948 }
3949 
3950 static void
3951 bnx2_setup_wol(struct bnx2 *bp)
3952 {
3953 	int i;
3954 	u32 val, wol_msg;
3955 
3956 	if (bp->wol) {
3957 		u32 advertising;
3958 		u8 autoneg;
3959 
3960 		autoneg = bp->autoneg;
3961 		advertising = bp->advertising;
3962 
3963 		if (bp->phy_port == PORT_TP) {
3964 			bp->autoneg = AUTONEG_SPEED;
3965 			bp->advertising = ADVERTISED_10baseT_Half |
3966 				ADVERTISED_10baseT_Full |
3967 				ADVERTISED_100baseT_Half |
3968 				ADVERTISED_100baseT_Full |
3969 				ADVERTISED_Autoneg;
3970 		}
3971 
3972 		spin_lock_bh(&bp->phy_lock);
3973 		bnx2_setup_phy(bp, bp->phy_port);
3974 		spin_unlock_bh(&bp->phy_lock);
3975 
3976 		bp->autoneg = autoneg;
3977 		bp->advertising = advertising;
3978 
3979 		bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
3980 
3981 		val = BNX2_RD(bp, BNX2_EMAC_MODE);
3982 
3983 		/* Enable port mode. */
3984 		val &= ~BNX2_EMAC_MODE_PORT;
3985 		val |= BNX2_EMAC_MODE_MPKT_RCVD |
3986 		       BNX2_EMAC_MODE_ACPI_RCVD |
3987 		       BNX2_EMAC_MODE_MPKT;
3988 		if (bp->phy_port == PORT_TP) {
3989 			val |= BNX2_EMAC_MODE_PORT_MII;
3990 		} else {
3991 			val |= BNX2_EMAC_MODE_PORT_GMII;
3992 			if (bp->line_speed == SPEED_2500)
3993 				val |= BNX2_EMAC_MODE_25G_MODE;
3994 		}
3995 
3996 		BNX2_WR(bp, BNX2_EMAC_MODE, val);
3997 
3998 		/* receive all multicast */
3999 		for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
4000 			BNX2_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
4001 				0xffffffff);
4002 		}
4003 		BNX2_WR(bp, BNX2_EMAC_RX_MODE, BNX2_EMAC_RX_MODE_SORT_MODE);
4004 
4005 		val = 1 | BNX2_RPM_SORT_USER0_BC_EN | BNX2_RPM_SORT_USER0_MC_EN;
4006 		BNX2_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
4007 		BNX2_WR(bp, BNX2_RPM_SORT_USER0, val);
4008 		BNX2_WR(bp, BNX2_RPM_SORT_USER0, val | BNX2_RPM_SORT_USER0_ENA);
4009 
4010 		/* Need to enable EMAC and RPM for WOL. */
4011 		BNX2_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
4012 			BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE |
4013 			BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE |
4014 			BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE);
4015 
4016 		val = BNX2_RD(bp, BNX2_RPM_CONFIG);
4017 		val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
4018 		BNX2_WR(bp, BNX2_RPM_CONFIG, val);
4019 
4020 		wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
4021 	} else {
4022 			wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
4023 	}
4024 
4025 	if (!(bp->flags & BNX2_FLAG_NO_WOL)) {
4026 		u32 val;
4027 
4028 		wol_msg |= BNX2_DRV_MSG_DATA_WAIT3;
4029 		if (bp->fw_last_msg || BNX2_CHIP(bp) != BNX2_CHIP_5709) {
4030 			bnx2_fw_sync(bp, wol_msg, 1, 0);
4031 			return;
4032 		}
4033 		/* Tell firmware not to power down the PHY yet, otherwise
4034 		 * the chip will take a long time to respond to MMIO reads.
4035 		 */
4036 		val = bnx2_shmem_rd(bp, BNX2_PORT_FEATURE);
4037 		bnx2_shmem_wr(bp, BNX2_PORT_FEATURE,
4038 			      val | BNX2_PORT_FEATURE_ASF_ENABLED);
4039 		bnx2_fw_sync(bp, wol_msg, 1, 0);
4040 		bnx2_shmem_wr(bp, BNX2_PORT_FEATURE, val);
4041 	}
4042 
4043 }
4044 
4045 static int
4046 bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
4047 {
4048 	switch (state) {
4049 	case PCI_D0: {
4050 		u32 val;
4051 
4052 		pci_enable_wake(bp->pdev, PCI_D0, false);
4053 		pci_set_power_state(bp->pdev, PCI_D0);
4054 
4055 		val = BNX2_RD(bp, BNX2_EMAC_MODE);
4056 		val |= BNX2_EMAC_MODE_MPKT_RCVD | BNX2_EMAC_MODE_ACPI_RCVD;
4057 		val &= ~BNX2_EMAC_MODE_MPKT;
4058 		BNX2_WR(bp, BNX2_EMAC_MODE, val);
4059 
4060 		val = BNX2_RD(bp, BNX2_RPM_CONFIG);
4061 		val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
4062 		BNX2_WR(bp, BNX2_RPM_CONFIG, val);
4063 		break;
4064 	}
4065 	case PCI_D3hot: {
4066 		bnx2_setup_wol(bp);
4067 		pci_wake_from_d3(bp->pdev, bp->wol);
4068 		if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) ||
4069 		    (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A1)) {
4070 
4071 			if (bp->wol)
4072 				pci_set_power_state(bp->pdev, PCI_D3hot);
4073 			break;
4074 
4075 		}
4076 		if (!bp->fw_last_msg && BNX2_CHIP(bp) == BNX2_CHIP_5709) {
4077 			u32 val;
4078 
4079 			/* Tell firmware not to power down the PHY yet,
4080 			 * otherwise the other port may not respond to
4081 			 * MMIO reads.
4082 			 */
4083 			val = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION);
4084 			val &= ~BNX2_CONDITION_PM_STATE_MASK;
4085 			val |= BNX2_CONDITION_PM_STATE_UNPREP;
4086 			bnx2_shmem_wr(bp, BNX2_BC_STATE_CONDITION, val);
4087 		}
4088 		pci_set_power_state(bp->pdev, PCI_D3hot);
4089 
4090 		/* No more memory access after this point until
4091 		 * device is brought back to D0.
4092 		 */
4093 		break;
4094 	}
4095 	default:
4096 		return -EINVAL;
4097 	}
4098 	return 0;
4099 }
4100 
4101 static int
4102 bnx2_acquire_nvram_lock(struct bnx2 *bp)
4103 {
4104 	u32 val;
4105 	int j;
4106 
4107 	/* Request access to the flash interface. */
4108 	BNX2_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_SET2);
4109 	for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4110 		val = BNX2_RD(bp, BNX2_NVM_SW_ARB);
4111 		if (val & BNX2_NVM_SW_ARB_ARB_ARB2)
4112 			break;
4113 
4114 		udelay(5);
4115 	}
4116 
4117 	if (j >= NVRAM_TIMEOUT_COUNT)
4118 		return -EBUSY;
4119 
4120 	return 0;
4121 }
4122 
4123 static int
4124 bnx2_release_nvram_lock(struct bnx2 *bp)
4125 {
4126 	int j;
4127 	u32 val;
4128 
4129 	/* Relinquish nvram interface. */
4130 	BNX2_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_CLR2);
4131 
4132 	for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4133 		val = BNX2_RD(bp, BNX2_NVM_SW_ARB);
4134 		if (!(val & BNX2_NVM_SW_ARB_ARB_ARB2))
4135 			break;
4136 
4137 		udelay(5);
4138 	}
4139 
4140 	if (j >= NVRAM_TIMEOUT_COUNT)
4141 		return -EBUSY;
4142 
4143 	return 0;
4144 }
4145 
4146 
4147 static int
4148 bnx2_enable_nvram_write(struct bnx2 *bp)
4149 {
4150 	u32 val;
4151 
4152 	val = BNX2_RD(bp, BNX2_MISC_CFG);
4153 	BNX2_WR(bp, BNX2_MISC_CFG, val | BNX2_MISC_CFG_NVM_WR_EN_PCI);
4154 
4155 	if (bp->flash_info->flags & BNX2_NV_WREN) {
4156 		int j;
4157 
4158 		BNX2_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4159 		BNX2_WR(bp, BNX2_NVM_COMMAND,
4160 			BNX2_NVM_COMMAND_WREN | BNX2_NVM_COMMAND_DOIT);
4161 
4162 		for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4163 			udelay(5);
4164 
4165 			val = BNX2_RD(bp, BNX2_NVM_COMMAND);
4166 			if (val & BNX2_NVM_COMMAND_DONE)
4167 				break;
4168 		}
4169 
4170 		if (j >= NVRAM_TIMEOUT_COUNT)
4171 			return -EBUSY;
4172 	}
4173 	return 0;
4174 }
4175 
4176 static void
4177 bnx2_disable_nvram_write(struct bnx2 *bp)
4178 {
4179 	u32 val;
4180 
4181 	val = BNX2_RD(bp, BNX2_MISC_CFG);
4182 	BNX2_WR(bp, BNX2_MISC_CFG, val & ~BNX2_MISC_CFG_NVM_WR_EN);
4183 }
4184 
4185 
4186 static void
4187 bnx2_enable_nvram_access(struct bnx2 *bp)
4188 {
4189 	u32 val;
4190 
4191 	val = BNX2_RD(bp, BNX2_NVM_ACCESS_ENABLE);
4192 	/* Enable both bits, even on read. */
4193 	BNX2_WR(bp, BNX2_NVM_ACCESS_ENABLE,
4194 		val | BNX2_NVM_ACCESS_ENABLE_EN | BNX2_NVM_ACCESS_ENABLE_WR_EN);
4195 }
4196 
4197 static void
4198 bnx2_disable_nvram_access(struct bnx2 *bp)
4199 {
4200 	u32 val;
4201 
4202 	val = BNX2_RD(bp, BNX2_NVM_ACCESS_ENABLE);
4203 	/* Disable both bits, even after read. */
4204 	BNX2_WR(bp, BNX2_NVM_ACCESS_ENABLE,
4205 		val & ~(BNX2_NVM_ACCESS_ENABLE_EN |
4206 			BNX2_NVM_ACCESS_ENABLE_WR_EN));
4207 }
4208 
4209 static int
4210 bnx2_nvram_erase_page(struct bnx2 *bp, u32 offset)
4211 {
4212 	u32 cmd;
4213 	int j;
4214 
4215 	if (bp->flash_info->flags & BNX2_NV_BUFFERED)
4216 		/* Buffered flash, no erase needed */
4217 		return 0;
4218 
4219 	/* Build an erase command */
4220 	cmd = BNX2_NVM_COMMAND_ERASE | BNX2_NVM_COMMAND_WR |
4221 	      BNX2_NVM_COMMAND_DOIT;
4222 
4223 	/* Need to clear DONE bit separately. */
4224 	BNX2_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4225 
4226 	/* Address of the NVRAM to read from. */
4227 	BNX2_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
4228 
4229 	/* Issue an erase command. */
4230 	BNX2_WR(bp, BNX2_NVM_COMMAND, cmd);
4231 
4232 	/* Wait for completion. */
4233 	for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4234 		u32 val;
4235 
4236 		udelay(5);
4237 
4238 		val = BNX2_RD(bp, BNX2_NVM_COMMAND);
4239 		if (val & BNX2_NVM_COMMAND_DONE)
4240 			break;
4241 	}
4242 
4243 	if (j >= NVRAM_TIMEOUT_COUNT)
4244 		return -EBUSY;
4245 
4246 	return 0;
4247 }
4248 
4249 static int
4250 bnx2_nvram_read_dword(struct bnx2 *bp, u32 offset, u8 *ret_val, u32 cmd_flags)
4251 {
4252 	u32 cmd;
4253 	int j;
4254 
4255 	/* Build the command word. */
4256 	cmd = BNX2_NVM_COMMAND_DOIT | cmd_flags;
4257 
4258 	/* Calculate an offset of a buffered flash, not needed for 5709. */
4259 	if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
4260 		offset = ((offset / bp->flash_info->page_size) <<
4261 			   bp->flash_info->page_bits) +
4262 			  (offset % bp->flash_info->page_size);
4263 	}
4264 
4265 	/* Need to clear DONE bit separately. */
4266 	BNX2_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4267 
4268 	/* Address of the NVRAM to read from. */
4269 	BNX2_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
4270 
4271 	/* Issue a read command. */
4272 	BNX2_WR(bp, BNX2_NVM_COMMAND, cmd);
4273 
4274 	/* Wait for completion. */
4275 	for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4276 		u32 val;
4277 
4278 		udelay(5);
4279 
4280 		val = BNX2_RD(bp, BNX2_NVM_COMMAND);
4281 		if (val & BNX2_NVM_COMMAND_DONE) {
4282 			__be32 v = cpu_to_be32(BNX2_RD(bp, BNX2_NVM_READ));
4283 			memcpy(ret_val, &v, 4);
4284 			break;
4285 		}
4286 	}
4287 	if (j >= NVRAM_TIMEOUT_COUNT)
4288 		return -EBUSY;
4289 
4290 	return 0;
4291 }
4292 
4293 
4294 static int
4295 bnx2_nvram_write_dword(struct bnx2 *bp, u32 offset, u8 *val, u32 cmd_flags)
4296 {
4297 	u32 cmd;
4298 	__be32 val32;
4299 	int j;
4300 
4301 	/* Build the command word. */
4302 	cmd = BNX2_NVM_COMMAND_DOIT | BNX2_NVM_COMMAND_WR | cmd_flags;
4303 
4304 	/* Calculate an offset of a buffered flash, not needed for 5709. */
4305 	if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
4306 		offset = ((offset / bp->flash_info->page_size) <<
4307 			  bp->flash_info->page_bits) +
4308 			 (offset % bp->flash_info->page_size);
4309 	}
4310 
4311 	/* Need to clear DONE bit separately. */
4312 	BNX2_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4313 
4314 	memcpy(&val32, val, 4);
4315 
4316 	/* Write the data. */
4317 	BNX2_WR(bp, BNX2_NVM_WRITE, be32_to_cpu(val32));
4318 
4319 	/* Address of the NVRAM to write to. */
4320 	BNX2_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
4321 
4322 	/* Issue the write command. */
4323 	BNX2_WR(bp, BNX2_NVM_COMMAND, cmd);
4324 
4325 	/* Wait for completion. */
4326 	for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4327 		udelay(5);
4328 
4329 		if (BNX2_RD(bp, BNX2_NVM_COMMAND) & BNX2_NVM_COMMAND_DONE)
4330 			break;
4331 	}
4332 	if (j >= NVRAM_TIMEOUT_COUNT)
4333 		return -EBUSY;
4334 
4335 	return 0;
4336 }
4337 
4338 static int
4339 bnx2_init_nvram(struct bnx2 *bp)
4340 {
4341 	u32 val;
4342 	int j, entry_count, rc = 0;
4343 	const struct flash_spec *flash;
4344 
4345 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
4346 		bp->flash_info = &flash_5709;
4347 		goto get_flash_size;
4348 	}
4349 
4350 	/* Determine the selected interface. */
4351 	val = BNX2_RD(bp, BNX2_NVM_CFG1);
4352 
4353 	entry_count = ARRAY_SIZE(flash_table);
4354 
4355 	if (val & 0x40000000) {
4356 
4357 		/* Flash interface has been reconfigured */
4358 		for (j = 0, flash = &flash_table[0]; j < entry_count;
4359 		     j++, flash++) {
4360 			if ((val & FLASH_BACKUP_STRAP_MASK) ==
4361 			    (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
4362 				bp->flash_info = flash;
4363 				break;
4364 			}
4365 		}
4366 	}
4367 	else {
4368 		u32 mask;
4369 		/* Not yet been reconfigured */
4370 
4371 		if (val & (1 << 23))
4372 			mask = FLASH_BACKUP_STRAP_MASK;
4373 		else
4374 			mask = FLASH_STRAP_MASK;
4375 
4376 		for (j = 0, flash = &flash_table[0]; j < entry_count;
4377 			j++, flash++) {
4378 
4379 			if ((val & mask) == (flash->strapping & mask)) {
4380 				bp->flash_info = flash;
4381 
4382 				/* Request access to the flash interface. */
4383 				if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4384 					return rc;
4385 
4386 				/* Enable access to flash interface */
4387 				bnx2_enable_nvram_access(bp);
4388 
4389 				/* Reconfigure the flash interface */
4390 				BNX2_WR(bp, BNX2_NVM_CFG1, flash->config1);
4391 				BNX2_WR(bp, BNX2_NVM_CFG2, flash->config2);
4392 				BNX2_WR(bp, BNX2_NVM_CFG3, flash->config3);
4393 				BNX2_WR(bp, BNX2_NVM_WRITE1, flash->write1);
4394 
4395 				/* Disable access to flash interface */
4396 				bnx2_disable_nvram_access(bp);
4397 				bnx2_release_nvram_lock(bp);
4398 
4399 				break;
4400 			}
4401 		}
4402 	} /* if (val & 0x40000000) */
4403 
4404 	if (j == entry_count) {
4405 		bp->flash_info = NULL;
4406 		pr_alert("Unknown flash/EEPROM type\n");
4407 		return -ENODEV;
4408 	}
4409 
4410 get_flash_size:
4411 	val = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG2);
4412 	val &= BNX2_SHARED_HW_CFG2_NVM_SIZE_MASK;
4413 	if (val)
4414 		bp->flash_size = val;
4415 	else
4416 		bp->flash_size = bp->flash_info->total_size;
4417 
4418 	return rc;
4419 }
4420 
4421 static int
4422 bnx2_nvram_read(struct bnx2 *bp, u32 offset, u8 *ret_buf,
4423 		int buf_size)
4424 {
4425 	int rc = 0;
4426 	u32 cmd_flags, offset32, len32, extra;
4427 
4428 	if (buf_size == 0)
4429 		return 0;
4430 
4431 	/* Request access to the flash interface. */
4432 	if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4433 		return rc;
4434 
4435 	/* Enable access to flash interface */
4436 	bnx2_enable_nvram_access(bp);
4437 
4438 	len32 = buf_size;
4439 	offset32 = offset;
4440 	extra = 0;
4441 
4442 	cmd_flags = 0;
4443 
4444 	if (offset32 & 3) {
4445 		u8 buf[4];
4446 		u32 pre_len;
4447 
4448 		offset32 &= ~3;
4449 		pre_len = 4 - (offset & 3);
4450 
4451 		if (pre_len >= len32) {
4452 			pre_len = len32;
4453 			cmd_flags = BNX2_NVM_COMMAND_FIRST |
4454 				    BNX2_NVM_COMMAND_LAST;
4455 		}
4456 		else {
4457 			cmd_flags = BNX2_NVM_COMMAND_FIRST;
4458 		}
4459 
4460 		rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4461 
4462 		if (rc)
4463 			return rc;
4464 
4465 		memcpy(ret_buf, buf + (offset & 3), pre_len);
4466 
4467 		offset32 += 4;
4468 		ret_buf += pre_len;
4469 		len32 -= pre_len;
4470 	}
4471 	if (len32 & 3) {
4472 		extra = 4 - (len32 & 3);
4473 		len32 = (len32 + 4) & ~3;
4474 	}
4475 
4476 	if (len32 == 4) {
4477 		u8 buf[4];
4478 
4479 		if (cmd_flags)
4480 			cmd_flags = BNX2_NVM_COMMAND_LAST;
4481 		else
4482 			cmd_flags = BNX2_NVM_COMMAND_FIRST |
4483 				    BNX2_NVM_COMMAND_LAST;
4484 
4485 		rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4486 
4487 		memcpy(ret_buf, buf, 4 - extra);
4488 	}
4489 	else if (len32 > 0) {
4490 		u8 buf[4];
4491 
4492 		/* Read the first word. */
4493 		if (cmd_flags)
4494 			cmd_flags = 0;
4495 		else
4496 			cmd_flags = BNX2_NVM_COMMAND_FIRST;
4497 
4498 		rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, cmd_flags);
4499 
4500 		/* Advance to the next dword. */
4501 		offset32 += 4;
4502 		ret_buf += 4;
4503 		len32 -= 4;
4504 
4505 		while (len32 > 4 && rc == 0) {
4506 			rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, 0);
4507 
4508 			/* Advance to the next dword. */
4509 			offset32 += 4;
4510 			ret_buf += 4;
4511 			len32 -= 4;
4512 		}
4513 
4514 		if (rc)
4515 			return rc;
4516 
4517 		cmd_flags = BNX2_NVM_COMMAND_LAST;
4518 		rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4519 
4520 		memcpy(ret_buf, buf, 4 - extra);
4521 	}
4522 
4523 	/* Disable access to flash interface */
4524 	bnx2_disable_nvram_access(bp);
4525 
4526 	bnx2_release_nvram_lock(bp);
4527 
4528 	return rc;
4529 }
4530 
4531 static int
4532 bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
4533 		int buf_size)
4534 {
4535 	u32 written, offset32, len32;
4536 	u8 *buf, start[4], end[4], *align_buf = NULL, *flash_buffer = NULL;
4537 	int rc = 0;
4538 	int align_start, align_end;
4539 
4540 	buf = data_buf;
4541 	offset32 = offset;
4542 	len32 = buf_size;
4543 	align_start = align_end = 0;
4544 
4545 	if ((align_start = (offset32 & 3))) {
4546 		offset32 &= ~3;
4547 		len32 += align_start;
4548 		if (len32 < 4)
4549 			len32 = 4;
4550 		if ((rc = bnx2_nvram_read(bp, offset32, start, 4)))
4551 			return rc;
4552 	}
4553 
4554 	if (len32 & 3) {
4555 		align_end = 4 - (len32 & 3);
4556 		len32 += align_end;
4557 		if ((rc = bnx2_nvram_read(bp, offset32 + len32 - 4, end, 4)))
4558 			return rc;
4559 	}
4560 
4561 	if (align_start || align_end) {
4562 		align_buf = kmalloc(len32, GFP_KERNEL);
4563 		if (!align_buf)
4564 			return -ENOMEM;
4565 		if (align_start) {
4566 			memcpy(align_buf, start, 4);
4567 		}
4568 		if (align_end) {
4569 			memcpy(align_buf + len32 - 4, end, 4);
4570 		}
4571 		memcpy(align_buf + align_start, data_buf, buf_size);
4572 		buf = align_buf;
4573 	}
4574 
4575 	if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4576 		flash_buffer = kmalloc(264, GFP_KERNEL);
4577 		if (!flash_buffer) {
4578 			rc = -ENOMEM;
4579 			goto nvram_write_end;
4580 		}
4581 	}
4582 
4583 	written = 0;
4584 	while ((written < len32) && (rc == 0)) {
4585 		u32 page_start, page_end, data_start, data_end;
4586 		u32 addr, cmd_flags;
4587 		int i;
4588 
4589 	        /* Find the page_start addr */
4590 		page_start = offset32 + written;
4591 		page_start -= (page_start % bp->flash_info->page_size);
4592 		/* Find the page_end addr */
4593 		page_end = page_start + bp->flash_info->page_size;
4594 		/* Find the data_start addr */
4595 		data_start = (written == 0) ? offset32 : page_start;
4596 		/* Find the data_end addr */
4597 		data_end = (page_end > offset32 + len32) ?
4598 			(offset32 + len32) : page_end;
4599 
4600 		/* Request access to the flash interface. */
4601 		if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4602 			goto nvram_write_end;
4603 
4604 		/* Enable access to flash interface */
4605 		bnx2_enable_nvram_access(bp);
4606 
4607 		cmd_flags = BNX2_NVM_COMMAND_FIRST;
4608 		if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4609 			int j;
4610 
4611 			/* Read the whole page into the buffer
4612 			 * (non-buffer flash only) */
4613 			for (j = 0; j < bp->flash_info->page_size; j += 4) {
4614 				if (j == (bp->flash_info->page_size - 4)) {
4615 					cmd_flags |= BNX2_NVM_COMMAND_LAST;
4616 				}
4617 				rc = bnx2_nvram_read_dword(bp,
4618 					page_start + j,
4619 					&flash_buffer[j],
4620 					cmd_flags);
4621 
4622 				if (rc)
4623 					goto nvram_write_end;
4624 
4625 				cmd_flags = 0;
4626 			}
4627 		}
4628 
4629 		/* Enable writes to flash interface (unlock write-protect) */
4630 		if ((rc = bnx2_enable_nvram_write(bp)) != 0)
4631 			goto nvram_write_end;
4632 
4633 		/* Loop to write back the buffer data from page_start to
4634 		 * data_start */
4635 		i = 0;
4636 		if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4637 			/* Erase the page */
4638 			if ((rc = bnx2_nvram_erase_page(bp, page_start)) != 0)
4639 				goto nvram_write_end;
4640 
4641 			/* Re-enable the write again for the actual write */
4642 			bnx2_enable_nvram_write(bp);
4643 
4644 			for (addr = page_start; addr < data_start;
4645 				addr += 4, i += 4) {
4646 
4647 				rc = bnx2_nvram_write_dword(bp, addr,
4648 					&flash_buffer[i], cmd_flags);
4649 
4650 				if (rc != 0)
4651 					goto nvram_write_end;
4652 
4653 				cmd_flags = 0;
4654 			}
4655 		}
4656 
4657 		/* Loop to write the new data from data_start to data_end */
4658 		for (addr = data_start; addr < data_end; addr += 4, i += 4) {
4659 			if ((addr == page_end - 4) ||
4660 				((bp->flash_info->flags & BNX2_NV_BUFFERED) &&
4661 				 (addr == data_end - 4))) {
4662 
4663 				cmd_flags |= BNX2_NVM_COMMAND_LAST;
4664 			}
4665 			rc = bnx2_nvram_write_dword(bp, addr, buf,
4666 				cmd_flags);
4667 
4668 			if (rc != 0)
4669 				goto nvram_write_end;
4670 
4671 			cmd_flags = 0;
4672 			buf += 4;
4673 		}
4674 
4675 		/* Loop to write back the buffer data from data_end
4676 		 * to page_end */
4677 		if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4678 			for (addr = data_end; addr < page_end;
4679 				addr += 4, i += 4) {
4680 
4681 				if (addr == page_end-4) {
4682 					cmd_flags = BNX2_NVM_COMMAND_LAST;
4683                 		}
4684 				rc = bnx2_nvram_write_dword(bp, addr,
4685 					&flash_buffer[i], cmd_flags);
4686 
4687 				if (rc != 0)
4688 					goto nvram_write_end;
4689 
4690 				cmd_flags = 0;
4691 			}
4692 		}
4693 
4694 		/* Disable writes to flash interface (lock write-protect) */
4695 		bnx2_disable_nvram_write(bp);
4696 
4697 		/* Disable access to flash interface */
4698 		bnx2_disable_nvram_access(bp);
4699 		bnx2_release_nvram_lock(bp);
4700 
4701 		/* Increment written */
4702 		written += data_end - data_start;
4703 	}
4704 
4705 nvram_write_end:
4706 	kfree(flash_buffer);
4707 	kfree(align_buf);
4708 	return rc;
4709 }
4710 
4711 static void
4712 bnx2_init_fw_cap(struct bnx2 *bp)
4713 {
4714 	u32 val, sig = 0;
4715 
4716 	bp->phy_flags &= ~BNX2_PHY_FLAG_REMOTE_PHY_CAP;
4717 	bp->flags &= ~BNX2_FLAG_CAN_KEEP_VLAN;
4718 
4719 	if (!(bp->flags & BNX2_FLAG_ASF_ENABLE))
4720 		bp->flags |= BNX2_FLAG_CAN_KEEP_VLAN;
4721 
4722 	val = bnx2_shmem_rd(bp, BNX2_FW_CAP_MB);
4723 	if ((val & BNX2_FW_CAP_SIGNATURE_MASK) != BNX2_FW_CAP_SIGNATURE)
4724 		return;
4725 
4726 	if ((val & BNX2_FW_CAP_CAN_KEEP_VLAN) == BNX2_FW_CAP_CAN_KEEP_VLAN) {
4727 		bp->flags |= BNX2_FLAG_CAN_KEEP_VLAN;
4728 		sig |= BNX2_DRV_ACK_CAP_SIGNATURE | BNX2_FW_CAP_CAN_KEEP_VLAN;
4729 	}
4730 
4731 	if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
4732 	    (val & BNX2_FW_CAP_REMOTE_PHY_CAPABLE)) {
4733 		u32 link;
4734 
4735 		bp->phy_flags |= BNX2_PHY_FLAG_REMOTE_PHY_CAP;
4736 
4737 		link = bnx2_shmem_rd(bp, BNX2_LINK_STATUS);
4738 		if (link & BNX2_LINK_STATUS_SERDES_LINK)
4739 			bp->phy_port = PORT_FIBRE;
4740 		else
4741 			bp->phy_port = PORT_TP;
4742 
4743 		sig |= BNX2_DRV_ACK_CAP_SIGNATURE |
4744 		       BNX2_FW_CAP_REMOTE_PHY_CAPABLE;
4745 	}
4746 
4747 	if (netif_running(bp->dev) && sig)
4748 		bnx2_shmem_wr(bp, BNX2_DRV_ACK_CAP_MB, sig);
4749 }
4750 
4751 static void
4752 bnx2_setup_msix_tbl(struct bnx2 *bp)
4753 {
4754 	BNX2_WR(bp, BNX2_PCI_GRC_WINDOW_ADDR, BNX2_PCI_GRC_WINDOW_ADDR_SEP_WIN);
4755 
4756 	BNX2_WR(bp, BNX2_PCI_GRC_WINDOW2_ADDR, BNX2_MSIX_TABLE_ADDR);
4757 	BNX2_WR(bp, BNX2_PCI_GRC_WINDOW3_ADDR, BNX2_MSIX_PBA_ADDR);
4758 }
4759 
4760 static void
4761 bnx2_wait_dma_complete(struct bnx2 *bp)
4762 {
4763 	u32 val;
4764 	int i;
4765 
4766 	/*
4767 	 * Wait for the current PCI transaction to complete before
4768 	 * issuing a reset.
4769 	 */
4770 	if ((BNX2_CHIP(bp) == BNX2_CHIP_5706) ||
4771 	    (BNX2_CHIP(bp) == BNX2_CHIP_5708)) {
4772 		BNX2_WR(bp, BNX2_MISC_ENABLE_CLR_BITS,
4773 			BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
4774 			BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
4775 			BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
4776 			BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
4777 		val = BNX2_RD(bp, BNX2_MISC_ENABLE_CLR_BITS);
4778 		udelay(5);
4779 	} else {  /* 5709 */
4780 		val = BNX2_RD(bp, BNX2_MISC_NEW_CORE_CTL);
4781 		val &= ~BNX2_MISC_NEW_CORE_CTL_DMA_ENABLE;
4782 		BNX2_WR(bp, BNX2_MISC_NEW_CORE_CTL, val);
4783 		val = BNX2_RD(bp, BNX2_MISC_NEW_CORE_CTL);
4784 
4785 		for (i = 0; i < 100; i++) {
4786 			msleep(1);
4787 			val = BNX2_RD(bp, BNX2_PCICFG_DEVICE_CONTROL);
4788 			if (!(val & BNX2_PCICFG_DEVICE_STATUS_NO_PEND))
4789 				break;
4790 		}
4791 	}
4792 
4793 	return;
4794 }
4795 
4796 
4797 static int
4798 bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
4799 {
4800 	u32 val;
4801 	int i, rc = 0;
4802 	u8 old_port;
4803 
4804 	/* Wait for the current PCI transaction to complete before
4805 	 * issuing a reset. */
4806 	bnx2_wait_dma_complete(bp);
4807 
4808 	/* Wait for the firmware to tell us it is ok to issue a reset. */
4809 	bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1, 1);
4810 
4811 	/* Deposit a driver reset signature so the firmware knows that
4812 	 * this is a soft reset. */
4813 	bnx2_shmem_wr(bp, BNX2_DRV_RESET_SIGNATURE,
4814 		      BNX2_DRV_RESET_SIGNATURE_MAGIC);
4815 
4816 	/* Do a dummy read to force the chip to complete all current transaction
4817 	 * before we issue a reset. */
4818 	val = BNX2_RD(bp, BNX2_MISC_ID);
4819 
4820 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
4821 		BNX2_WR(bp, BNX2_MISC_COMMAND, BNX2_MISC_COMMAND_SW_RESET);
4822 		BNX2_RD(bp, BNX2_MISC_COMMAND);
4823 		udelay(5);
4824 
4825 		val = BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4826 		      BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
4827 
4828 		BNX2_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
4829 
4830 	} else {
4831 		val = BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4832 		      BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4833 		      BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
4834 
4835 		/* Chip reset. */
4836 		BNX2_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
4837 
4838 		/* Reading back any register after chip reset will hang the
4839 		 * bus on 5706 A0 and A1.  The msleep below provides plenty
4840 		 * of margin for write posting.
4841 		 */
4842 		if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) ||
4843 		    (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A1))
4844 			msleep(20);
4845 
4846 		/* Reset takes approximate 30 usec */
4847 		for (i = 0; i < 10; i++) {
4848 			val = BNX2_RD(bp, BNX2_PCICFG_MISC_CONFIG);
4849 			if ((val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4850 				    BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0)
4851 				break;
4852 			udelay(10);
4853 		}
4854 
4855 		if (val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4856 			   BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
4857 			pr_err("Chip reset did not complete\n");
4858 			return -EBUSY;
4859 		}
4860 	}
4861 
4862 	/* Make sure byte swapping is properly configured. */
4863 	val = BNX2_RD(bp, BNX2_PCI_SWAP_DIAG0);
4864 	if (val != 0x01020304) {
4865 		pr_err("Chip not in correct endian mode\n");
4866 		return -ENODEV;
4867 	}
4868 
4869 	/* Wait for the firmware to finish its initialization. */
4870 	rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT1 | reset_code, 1, 0);
4871 	if (rc)
4872 		return rc;
4873 
4874 	spin_lock_bh(&bp->phy_lock);
4875 	old_port = bp->phy_port;
4876 	bnx2_init_fw_cap(bp);
4877 	if ((bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) &&
4878 	    old_port != bp->phy_port)
4879 		bnx2_set_default_remote_link(bp);
4880 	spin_unlock_bh(&bp->phy_lock);
4881 
4882 	if (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) {
4883 		/* Adjust the voltage regular to two steps lower.  The default
4884 		 * of this register is 0x0000000e. */
4885 		BNX2_WR(bp, BNX2_MISC_VREG_CONTROL, 0x000000fa);
4886 
4887 		/* Remove bad rbuf memory from the free pool. */
4888 		rc = bnx2_alloc_bad_rbuf(bp);
4889 	}
4890 
4891 	if (bp->flags & BNX2_FLAG_USING_MSIX) {
4892 		bnx2_setup_msix_tbl(bp);
4893 		/* Prevent MSIX table reads and write from timing out */
4894 		BNX2_WR(bp, BNX2_MISC_ECO_HW_CTL,
4895 			BNX2_MISC_ECO_HW_CTL_LARGE_GRC_TMOUT_EN);
4896 	}
4897 
4898 	return rc;
4899 }
4900 
4901 static int
4902 bnx2_init_chip(struct bnx2 *bp)
4903 {
4904 	u32 val, mtu;
4905 	int rc, i;
4906 
4907 	/* Make sure the interrupt is not active. */
4908 	BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD, BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
4909 
4910 	val = BNX2_DMA_CONFIG_DATA_BYTE_SWAP |
4911 	      BNX2_DMA_CONFIG_DATA_WORD_SWAP |
4912 #ifdef __BIG_ENDIAN
4913 	      BNX2_DMA_CONFIG_CNTL_BYTE_SWAP |
4914 #endif
4915 	      BNX2_DMA_CONFIG_CNTL_WORD_SWAP |
4916 	      DMA_READ_CHANS << 12 |
4917 	      DMA_WRITE_CHANS << 16;
4918 
4919 	val |= (0x2 << 20) | (1 << 11);
4920 
4921 	if ((bp->flags & BNX2_FLAG_PCIX) && (bp->bus_speed_mhz == 133))
4922 		val |= (1 << 23);
4923 
4924 	if ((BNX2_CHIP(bp) == BNX2_CHIP_5706) &&
4925 	    (BNX2_CHIP_ID(bp) != BNX2_CHIP_ID_5706_A0) &&
4926 	    !(bp->flags & BNX2_FLAG_PCIX))
4927 		val |= BNX2_DMA_CONFIG_CNTL_PING_PONG_DMA;
4928 
4929 	BNX2_WR(bp, BNX2_DMA_CONFIG, val);
4930 
4931 	if (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) {
4932 		val = BNX2_RD(bp, BNX2_TDMA_CONFIG);
4933 		val |= BNX2_TDMA_CONFIG_ONE_DMA;
4934 		BNX2_WR(bp, BNX2_TDMA_CONFIG, val);
4935 	}
4936 
4937 	if (bp->flags & BNX2_FLAG_PCIX) {
4938 		u16 val16;
4939 
4940 		pci_read_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4941 				     &val16);
4942 		pci_write_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4943 				      val16 & ~PCI_X_CMD_ERO);
4944 	}
4945 
4946 	BNX2_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
4947 		BNX2_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
4948 		BNX2_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
4949 		BNX2_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
4950 
4951 	/* Initialize context mapping and zero out the quick contexts.  The
4952 	 * context block must have already been enabled. */
4953 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
4954 		rc = bnx2_init_5709_context(bp);
4955 		if (rc)
4956 			return rc;
4957 	} else
4958 		bnx2_init_context(bp);
4959 
4960 	if ((rc = bnx2_init_cpus(bp)) != 0)
4961 		return rc;
4962 
4963 	bnx2_init_nvram(bp);
4964 
4965 	bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
4966 
4967 	val = BNX2_RD(bp, BNX2_MQ_CONFIG);
4968 	val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
4969 	val |= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
4970 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
4971 		val |= BNX2_MQ_CONFIG_BIN_MQ_MODE;
4972 		if (BNX2_CHIP_REV(bp) == BNX2_CHIP_REV_Ax)
4973 			val |= BNX2_MQ_CONFIG_HALT_DIS;
4974 	}
4975 
4976 	BNX2_WR(bp, BNX2_MQ_CONFIG, val);
4977 
4978 	val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
4979 	BNX2_WR(bp, BNX2_MQ_KNL_BYP_WIND_START, val);
4980 	BNX2_WR(bp, BNX2_MQ_KNL_WIND_END, val);
4981 
4982 	val = (BNX2_PAGE_BITS - 8) << 24;
4983 	BNX2_WR(bp, BNX2_RV2P_CONFIG, val);
4984 
4985 	/* Configure page size. */
4986 	val = BNX2_RD(bp, BNX2_TBDR_CONFIG);
4987 	val &= ~BNX2_TBDR_CONFIG_PAGE_SIZE;
4988 	val |= (BNX2_PAGE_BITS - 8) << 24 | 0x40;
4989 	BNX2_WR(bp, BNX2_TBDR_CONFIG, val);
4990 
4991 	val = bp->mac_addr[0] +
4992 	      (bp->mac_addr[1] << 8) +
4993 	      (bp->mac_addr[2] << 16) +
4994 	      bp->mac_addr[3] +
4995 	      (bp->mac_addr[4] << 8) +
4996 	      (bp->mac_addr[5] << 16);
4997 	BNX2_WR(bp, BNX2_EMAC_BACKOFF_SEED, val);
4998 
4999 	/* Program the MTU.  Also include 4 bytes for CRC32. */
5000 	mtu = bp->dev->mtu;
5001 	val = mtu + ETH_HLEN + ETH_FCS_LEN;
5002 	if (val > (MAX_ETHERNET_PACKET_SIZE + ETH_HLEN + 4))
5003 		val |= BNX2_EMAC_RX_MTU_SIZE_JUMBO_ENA;
5004 	BNX2_WR(bp, BNX2_EMAC_RX_MTU_SIZE, val);
5005 
5006 	if (mtu < ETH_DATA_LEN)
5007 		mtu = ETH_DATA_LEN;
5008 
5009 	bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG, BNX2_RBUF_CONFIG_VAL(mtu));
5010 	bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG2, BNX2_RBUF_CONFIG2_VAL(mtu));
5011 	bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG3, BNX2_RBUF_CONFIG3_VAL(mtu));
5012 
5013 	memset(bp->bnx2_napi[0].status_blk.msi, 0, bp->status_stats_size);
5014 	for (i = 0; i < BNX2_MAX_MSIX_VEC; i++)
5015 		bp->bnx2_napi[i].last_status_idx = 0;
5016 
5017 	bp->idle_chk_status_idx = 0xffff;
5018 
5019 	/* Set up how to generate a link change interrupt. */
5020 	BNX2_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
5021 
5022 	BNX2_WR(bp, BNX2_HC_STATUS_ADDR_L,
5023 		(u64) bp->status_blk_mapping & 0xffffffff);
5024 	BNX2_WR(bp, BNX2_HC_STATUS_ADDR_H, (u64) bp->status_blk_mapping >> 32);
5025 
5026 	BNX2_WR(bp, BNX2_HC_STATISTICS_ADDR_L,
5027 		(u64) bp->stats_blk_mapping & 0xffffffff);
5028 	BNX2_WR(bp, BNX2_HC_STATISTICS_ADDR_H,
5029 		(u64) bp->stats_blk_mapping >> 32);
5030 
5031 	BNX2_WR(bp, BNX2_HC_TX_QUICK_CONS_TRIP,
5032 		(bp->tx_quick_cons_trip_int << 16) | bp->tx_quick_cons_trip);
5033 
5034 	BNX2_WR(bp, BNX2_HC_RX_QUICK_CONS_TRIP,
5035 		(bp->rx_quick_cons_trip_int << 16) | bp->rx_quick_cons_trip);
5036 
5037 	BNX2_WR(bp, BNX2_HC_COMP_PROD_TRIP,
5038 		(bp->comp_prod_trip_int << 16) | bp->comp_prod_trip);
5039 
5040 	BNX2_WR(bp, BNX2_HC_TX_TICKS, (bp->tx_ticks_int << 16) | bp->tx_ticks);
5041 
5042 	BNX2_WR(bp, BNX2_HC_RX_TICKS, (bp->rx_ticks_int << 16) | bp->rx_ticks);
5043 
5044 	BNX2_WR(bp, BNX2_HC_COM_TICKS,
5045 		(bp->com_ticks_int << 16) | bp->com_ticks);
5046 
5047 	BNX2_WR(bp, BNX2_HC_CMD_TICKS,
5048 		(bp->cmd_ticks_int << 16) | bp->cmd_ticks);
5049 
5050 	if (bp->flags & BNX2_FLAG_BROKEN_STATS)
5051 		BNX2_WR(bp, BNX2_HC_STATS_TICKS, 0);
5052 	else
5053 		BNX2_WR(bp, BNX2_HC_STATS_TICKS, bp->stats_ticks);
5054 	BNX2_WR(bp, BNX2_HC_STAT_COLLECT_TICKS, 0xbb8);  /* 3ms */
5055 
5056 	if (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A1)
5057 		val = BNX2_HC_CONFIG_COLLECT_STATS;
5058 	else {
5059 		val = BNX2_HC_CONFIG_RX_TMR_MODE | BNX2_HC_CONFIG_TX_TMR_MODE |
5060 		      BNX2_HC_CONFIG_COLLECT_STATS;
5061 	}
5062 
5063 	if (bp->flags & BNX2_FLAG_USING_MSIX) {
5064 		BNX2_WR(bp, BNX2_HC_MSIX_BIT_VECTOR,
5065 			BNX2_HC_MSIX_BIT_VECTOR_VAL);
5066 
5067 		val |= BNX2_HC_CONFIG_SB_ADDR_INC_128B;
5068 	}
5069 
5070 	if (bp->flags & BNX2_FLAG_ONE_SHOT_MSI)
5071 		val |= BNX2_HC_CONFIG_ONE_SHOT | BNX2_HC_CONFIG_USE_INT_PARAM;
5072 
5073 	BNX2_WR(bp, BNX2_HC_CONFIG, val);
5074 
5075 	if (bp->rx_ticks < 25)
5076 		bnx2_reg_wr_ind(bp, BNX2_FW_RX_LOW_LATENCY, 1);
5077 	else
5078 		bnx2_reg_wr_ind(bp, BNX2_FW_RX_LOW_LATENCY, 0);
5079 
5080 	for (i = 1; i < bp->irq_nvecs; i++) {
5081 		u32 base = ((i - 1) * BNX2_HC_SB_CONFIG_SIZE) +
5082 			   BNX2_HC_SB_CONFIG_1;
5083 
5084 		BNX2_WR(bp, base,
5085 			BNX2_HC_SB_CONFIG_1_TX_TMR_MODE |
5086 			BNX2_HC_SB_CONFIG_1_RX_TMR_MODE |
5087 			BNX2_HC_SB_CONFIG_1_ONE_SHOT);
5088 
5089 		BNX2_WR(bp, base + BNX2_HC_TX_QUICK_CONS_TRIP_OFF,
5090 			(bp->tx_quick_cons_trip_int << 16) |
5091 			 bp->tx_quick_cons_trip);
5092 
5093 		BNX2_WR(bp, base + BNX2_HC_TX_TICKS_OFF,
5094 			(bp->tx_ticks_int << 16) | bp->tx_ticks);
5095 
5096 		BNX2_WR(bp, base + BNX2_HC_RX_QUICK_CONS_TRIP_OFF,
5097 			(bp->rx_quick_cons_trip_int << 16) |
5098 			bp->rx_quick_cons_trip);
5099 
5100 		BNX2_WR(bp, base + BNX2_HC_RX_TICKS_OFF,
5101 			(bp->rx_ticks_int << 16) | bp->rx_ticks);
5102 	}
5103 
5104 	/* Clear internal stats counters. */
5105 	BNX2_WR(bp, BNX2_HC_COMMAND, BNX2_HC_COMMAND_CLR_STAT_NOW);
5106 
5107 	BNX2_WR(bp, BNX2_HC_ATTN_BITS_ENABLE, STATUS_ATTN_EVENTS);
5108 
5109 	/* Initialize the receive filter. */
5110 	bnx2_set_rx_mode(bp->dev);
5111 
5112 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
5113 		val = BNX2_RD(bp, BNX2_MISC_NEW_CORE_CTL);
5114 		val |= BNX2_MISC_NEW_CORE_CTL_DMA_ENABLE;
5115 		BNX2_WR(bp, BNX2_MISC_NEW_CORE_CTL, val);
5116 	}
5117 	rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT2 | BNX2_DRV_MSG_CODE_RESET,
5118 			  1, 0);
5119 
5120 	BNX2_WR(bp, BNX2_MISC_ENABLE_SET_BITS, BNX2_MISC_ENABLE_DEFAULT);
5121 	BNX2_RD(bp, BNX2_MISC_ENABLE_SET_BITS);
5122 
5123 	udelay(20);
5124 
5125 	bp->hc_cmd = BNX2_RD(bp, BNX2_HC_COMMAND);
5126 
5127 	return rc;
5128 }
5129 
5130 static void
5131 bnx2_clear_ring_states(struct bnx2 *bp)
5132 {
5133 	struct bnx2_napi *bnapi;
5134 	struct bnx2_tx_ring_info *txr;
5135 	struct bnx2_rx_ring_info *rxr;
5136 	int i;
5137 
5138 	for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
5139 		bnapi = &bp->bnx2_napi[i];
5140 		txr = &bnapi->tx_ring;
5141 		rxr = &bnapi->rx_ring;
5142 
5143 		txr->tx_cons = 0;
5144 		txr->hw_tx_cons = 0;
5145 		rxr->rx_prod_bseq = 0;
5146 		rxr->rx_prod = 0;
5147 		rxr->rx_cons = 0;
5148 		rxr->rx_pg_prod = 0;
5149 		rxr->rx_pg_cons = 0;
5150 	}
5151 }
5152 
5153 static void
5154 bnx2_init_tx_context(struct bnx2 *bp, u32 cid, struct bnx2_tx_ring_info *txr)
5155 {
5156 	u32 val, offset0, offset1, offset2, offset3;
5157 	u32 cid_addr = GET_CID_ADDR(cid);
5158 
5159 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
5160 		offset0 = BNX2_L2CTX_TYPE_XI;
5161 		offset1 = BNX2_L2CTX_CMD_TYPE_XI;
5162 		offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI;
5163 		offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI;
5164 	} else {
5165 		offset0 = BNX2_L2CTX_TYPE;
5166 		offset1 = BNX2_L2CTX_CMD_TYPE;
5167 		offset2 = BNX2_L2CTX_TBDR_BHADDR_HI;
5168 		offset3 = BNX2_L2CTX_TBDR_BHADDR_LO;
5169 	}
5170 	val = BNX2_L2CTX_TYPE_TYPE_L2 | BNX2_L2CTX_TYPE_SIZE_L2;
5171 	bnx2_ctx_wr(bp, cid_addr, offset0, val);
5172 
5173 	val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
5174 	bnx2_ctx_wr(bp, cid_addr, offset1, val);
5175 
5176 	val = (u64) txr->tx_desc_mapping >> 32;
5177 	bnx2_ctx_wr(bp, cid_addr, offset2, val);
5178 
5179 	val = (u64) txr->tx_desc_mapping & 0xffffffff;
5180 	bnx2_ctx_wr(bp, cid_addr, offset3, val);
5181 }
5182 
5183 static void
5184 bnx2_init_tx_ring(struct bnx2 *bp, int ring_num)
5185 {
5186 	struct bnx2_tx_bd *txbd;
5187 	u32 cid = TX_CID;
5188 	struct bnx2_napi *bnapi;
5189 	struct bnx2_tx_ring_info *txr;
5190 
5191 	bnapi = &bp->bnx2_napi[ring_num];
5192 	txr = &bnapi->tx_ring;
5193 
5194 	if (ring_num == 0)
5195 		cid = TX_CID;
5196 	else
5197 		cid = TX_TSS_CID + ring_num - 1;
5198 
5199 	bp->tx_wake_thresh = bp->tx_ring_size / 2;
5200 
5201 	txbd = &txr->tx_desc_ring[BNX2_MAX_TX_DESC_CNT];
5202 
5203 	txbd->tx_bd_haddr_hi = (u64) txr->tx_desc_mapping >> 32;
5204 	txbd->tx_bd_haddr_lo = (u64) txr->tx_desc_mapping & 0xffffffff;
5205 
5206 	txr->tx_prod = 0;
5207 	txr->tx_prod_bseq = 0;
5208 
5209 	txr->tx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BIDX;
5210 	txr->tx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BSEQ;
5211 
5212 	bnx2_init_tx_context(bp, cid, txr);
5213 }
5214 
5215 static void
5216 bnx2_init_rxbd_rings(struct bnx2_rx_bd *rx_ring[], dma_addr_t dma[],
5217 		     u32 buf_size, int num_rings)
5218 {
5219 	int i;
5220 	struct bnx2_rx_bd *rxbd;
5221 
5222 	for (i = 0; i < num_rings; i++) {
5223 		int j;
5224 
5225 		rxbd = &rx_ring[i][0];
5226 		for (j = 0; j < BNX2_MAX_RX_DESC_CNT; j++, rxbd++) {
5227 			rxbd->rx_bd_len = buf_size;
5228 			rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
5229 		}
5230 		if (i == (num_rings - 1))
5231 			j = 0;
5232 		else
5233 			j = i + 1;
5234 		rxbd->rx_bd_haddr_hi = (u64) dma[j] >> 32;
5235 		rxbd->rx_bd_haddr_lo = (u64) dma[j] & 0xffffffff;
5236 	}
5237 }
5238 
5239 static void
5240 bnx2_init_rx_ring(struct bnx2 *bp, int ring_num)
5241 {
5242 	int i;
5243 	u16 prod, ring_prod;
5244 	u32 cid, rx_cid_addr, val;
5245 	struct bnx2_napi *bnapi = &bp->bnx2_napi[ring_num];
5246 	struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
5247 
5248 	if (ring_num == 0)
5249 		cid = RX_CID;
5250 	else
5251 		cid = RX_RSS_CID + ring_num - 1;
5252 
5253 	rx_cid_addr = GET_CID_ADDR(cid);
5254 
5255 	bnx2_init_rxbd_rings(rxr->rx_desc_ring, rxr->rx_desc_mapping,
5256 			     bp->rx_buf_use_size, bp->rx_max_ring);
5257 
5258 	bnx2_init_rx_context(bp, cid);
5259 
5260 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
5261 		val = BNX2_RD(bp, BNX2_MQ_MAP_L2_5);
5262 		BNX2_WR(bp, BNX2_MQ_MAP_L2_5, val | BNX2_MQ_MAP_L2_5_ARM);
5263 	}
5264 
5265 	bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, 0);
5266 	if (bp->rx_pg_ring_size) {
5267 		bnx2_init_rxbd_rings(rxr->rx_pg_desc_ring,
5268 				     rxr->rx_pg_desc_mapping,
5269 				     PAGE_SIZE, bp->rx_max_pg_ring);
5270 		val = (bp->rx_buf_use_size << 16) | PAGE_SIZE;
5271 		bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, val);
5272 		bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_RBDC_KEY,
5273 		       BNX2_L2CTX_RBDC_JUMBO_KEY - ring_num);
5274 
5275 		val = (u64) rxr->rx_pg_desc_mapping[0] >> 32;
5276 		bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_HI, val);
5277 
5278 		val = (u64) rxr->rx_pg_desc_mapping[0] & 0xffffffff;
5279 		bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_LO, val);
5280 
5281 		if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
5282 			BNX2_WR(bp, BNX2_MQ_MAP_L2_3, BNX2_MQ_MAP_L2_3_DEFAULT);
5283 	}
5284 
5285 	val = (u64) rxr->rx_desc_mapping[0] >> 32;
5286 	bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_HI, val);
5287 
5288 	val = (u64) rxr->rx_desc_mapping[0] & 0xffffffff;
5289 	bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_LO, val);
5290 
5291 	ring_prod = prod = rxr->rx_pg_prod;
5292 	for (i = 0; i < bp->rx_pg_ring_size; i++) {
5293 		if (bnx2_alloc_rx_page(bp, rxr, ring_prod, GFP_KERNEL) < 0) {
5294 			netdev_warn(bp->dev, "init'ed rx page ring %d with %d/%d pages only\n",
5295 				    ring_num, i, bp->rx_pg_ring_size);
5296 			break;
5297 		}
5298 		prod = BNX2_NEXT_RX_BD(prod);
5299 		ring_prod = BNX2_RX_PG_RING_IDX(prod);
5300 	}
5301 	rxr->rx_pg_prod = prod;
5302 
5303 	ring_prod = prod = rxr->rx_prod;
5304 	for (i = 0; i < bp->rx_ring_size; i++) {
5305 		if (bnx2_alloc_rx_data(bp, rxr, ring_prod, GFP_KERNEL) < 0) {
5306 			netdev_warn(bp->dev, "init'ed rx ring %d with %d/%d skbs only\n",
5307 				    ring_num, i, bp->rx_ring_size);
5308 			break;
5309 		}
5310 		prod = BNX2_NEXT_RX_BD(prod);
5311 		ring_prod = BNX2_RX_RING_IDX(prod);
5312 	}
5313 	rxr->rx_prod = prod;
5314 
5315 	rxr->rx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_BDIDX;
5316 	rxr->rx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_BSEQ;
5317 	rxr->rx_pg_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_PG_BDIDX;
5318 
5319 	BNX2_WR16(bp, rxr->rx_pg_bidx_addr, rxr->rx_pg_prod);
5320 	BNX2_WR16(bp, rxr->rx_bidx_addr, prod);
5321 
5322 	BNX2_WR(bp, rxr->rx_bseq_addr, rxr->rx_prod_bseq);
5323 }
5324 
5325 static void
5326 bnx2_init_all_rings(struct bnx2 *bp)
5327 {
5328 	int i;
5329 	u32 val;
5330 
5331 	bnx2_clear_ring_states(bp);
5332 
5333 	BNX2_WR(bp, BNX2_TSCH_TSS_CFG, 0);
5334 	for (i = 0; i < bp->num_tx_rings; i++)
5335 		bnx2_init_tx_ring(bp, i);
5336 
5337 	if (bp->num_tx_rings > 1)
5338 		BNX2_WR(bp, BNX2_TSCH_TSS_CFG, ((bp->num_tx_rings - 1) << 24) |
5339 			(TX_TSS_CID << 7));
5340 
5341 	BNX2_WR(bp, BNX2_RLUP_RSS_CONFIG, 0);
5342 	bnx2_reg_wr_ind(bp, BNX2_RXP_SCRATCH_RSS_TBL_SZ, 0);
5343 
5344 	for (i = 0; i < bp->num_rx_rings; i++)
5345 		bnx2_init_rx_ring(bp, i);
5346 
5347 	if (bp->num_rx_rings > 1) {
5348 		u32 tbl_32 = 0;
5349 
5350 		for (i = 0; i < BNX2_RXP_SCRATCH_RSS_TBL_MAX_ENTRIES; i++) {
5351 			int shift = (i % 8) << 2;
5352 
5353 			tbl_32 |= (i % (bp->num_rx_rings - 1)) << shift;
5354 			if ((i % 8) == 7) {
5355 				BNX2_WR(bp, BNX2_RLUP_RSS_DATA, tbl_32);
5356 				BNX2_WR(bp, BNX2_RLUP_RSS_COMMAND, (i >> 3) |
5357 					BNX2_RLUP_RSS_COMMAND_RSS_WRITE_MASK |
5358 					BNX2_RLUP_RSS_COMMAND_WRITE |
5359 					BNX2_RLUP_RSS_COMMAND_HASH_MASK);
5360 				tbl_32 = 0;
5361 			}
5362 		}
5363 
5364 		val = BNX2_RLUP_RSS_CONFIG_IPV4_RSS_TYPE_ALL_XI |
5365 		      BNX2_RLUP_RSS_CONFIG_IPV6_RSS_TYPE_ALL_XI;
5366 
5367 		BNX2_WR(bp, BNX2_RLUP_RSS_CONFIG, val);
5368 
5369 	}
5370 }
5371 
5372 static u32 bnx2_find_max_ring(u32 ring_size, u32 max_size)
5373 {
5374 	u32 max, num_rings = 1;
5375 
5376 	while (ring_size > BNX2_MAX_RX_DESC_CNT) {
5377 		ring_size -= BNX2_MAX_RX_DESC_CNT;
5378 		num_rings++;
5379 	}
5380 	/* round to next power of 2 */
5381 	max = max_size;
5382 	while ((max & num_rings) == 0)
5383 		max >>= 1;
5384 
5385 	if (num_rings != max)
5386 		max <<= 1;
5387 
5388 	return max;
5389 }
5390 
5391 static void
5392 bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size)
5393 {
5394 	u32 rx_size, rx_space, jumbo_size;
5395 
5396 	/* 8 for CRC and VLAN */
5397 	rx_size = bp->dev->mtu + ETH_HLEN + BNX2_RX_OFFSET + 8;
5398 
5399 	rx_space = SKB_DATA_ALIGN(rx_size + BNX2_RX_ALIGN) + NET_SKB_PAD +
5400 		SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
5401 
5402 	bp->rx_copy_thresh = BNX2_RX_COPY_THRESH;
5403 	bp->rx_pg_ring_size = 0;
5404 	bp->rx_max_pg_ring = 0;
5405 	bp->rx_max_pg_ring_idx = 0;
5406 	if ((rx_space > PAGE_SIZE) && !(bp->flags & BNX2_FLAG_JUMBO_BROKEN)) {
5407 		int pages = PAGE_ALIGN(bp->dev->mtu - 40) >> PAGE_SHIFT;
5408 
5409 		jumbo_size = size * pages;
5410 		if (jumbo_size > BNX2_MAX_TOTAL_RX_PG_DESC_CNT)
5411 			jumbo_size = BNX2_MAX_TOTAL_RX_PG_DESC_CNT;
5412 
5413 		bp->rx_pg_ring_size = jumbo_size;
5414 		bp->rx_max_pg_ring = bnx2_find_max_ring(jumbo_size,
5415 							BNX2_MAX_RX_PG_RINGS);
5416 		bp->rx_max_pg_ring_idx =
5417 			(bp->rx_max_pg_ring * BNX2_RX_DESC_CNT) - 1;
5418 		rx_size = BNX2_RX_COPY_THRESH + BNX2_RX_OFFSET;
5419 		bp->rx_copy_thresh = 0;
5420 	}
5421 
5422 	bp->rx_buf_use_size = rx_size;
5423 	/* hw alignment + build_skb() overhead*/
5424 	bp->rx_buf_size = SKB_DATA_ALIGN(bp->rx_buf_use_size + BNX2_RX_ALIGN) +
5425 		NET_SKB_PAD + SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
5426 	bp->rx_jumbo_thresh = rx_size - BNX2_RX_OFFSET;
5427 	bp->rx_ring_size = size;
5428 	bp->rx_max_ring = bnx2_find_max_ring(size, BNX2_MAX_RX_RINGS);
5429 	bp->rx_max_ring_idx = (bp->rx_max_ring * BNX2_RX_DESC_CNT) - 1;
5430 }
5431 
5432 static void
5433 bnx2_free_tx_skbs(struct bnx2 *bp)
5434 {
5435 	int i;
5436 
5437 	for (i = 0; i < bp->num_tx_rings; i++) {
5438 		struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
5439 		struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
5440 		int j;
5441 
5442 		if (!txr->tx_buf_ring)
5443 			continue;
5444 
5445 		for (j = 0; j < BNX2_TX_DESC_CNT; ) {
5446 			struct bnx2_sw_tx_bd *tx_buf = &txr->tx_buf_ring[j];
5447 			struct sk_buff *skb = tx_buf->skb;
5448 			int k, last;
5449 
5450 			if (!skb) {
5451 				j = BNX2_NEXT_TX_BD(j);
5452 				continue;
5453 			}
5454 
5455 			dma_unmap_single(&bp->pdev->dev,
5456 					 dma_unmap_addr(tx_buf, mapping),
5457 					 skb_headlen(skb),
5458 					 PCI_DMA_TODEVICE);
5459 
5460 			tx_buf->skb = NULL;
5461 
5462 			last = tx_buf->nr_frags;
5463 			j = BNX2_NEXT_TX_BD(j);
5464 			for (k = 0; k < last; k++, j = BNX2_NEXT_TX_BD(j)) {
5465 				tx_buf = &txr->tx_buf_ring[BNX2_TX_RING_IDX(j)];
5466 				dma_unmap_page(&bp->pdev->dev,
5467 					dma_unmap_addr(tx_buf, mapping),
5468 					skb_frag_size(&skb_shinfo(skb)->frags[k]),
5469 					PCI_DMA_TODEVICE);
5470 			}
5471 			dev_kfree_skb(skb);
5472 		}
5473 		netdev_tx_reset_queue(netdev_get_tx_queue(bp->dev, i));
5474 	}
5475 }
5476 
5477 static void
5478 bnx2_free_rx_skbs(struct bnx2 *bp)
5479 {
5480 	int i;
5481 
5482 	for (i = 0; i < bp->num_rx_rings; i++) {
5483 		struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
5484 		struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
5485 		int j;
5486 
5487 		if (!rxr->rx_buf_ring)
5488 			return;
5489 
5490 		for (j = 0; j < bp->rx_max_ring_idx; j++) {
5491 			struct bnx2_sw_bd *rx_buf = &rxr->rx_buf_ring[j];
5492 			u8 *data = rx_buf->data;
5493 
5494 			if (!data)
5495 				continue;
5496 
5497 			dma_unmap_single(&bp->pdev->dev,
5498 					 dma_unmap_addr(rx_buf, mapping),
5499 					 bp->rx_buf_use_size,
5500 					 PCI_DMA_FROMDEVICE);
5501 
5502 			rx_buf->data = NULL;
5503 
5504 			kfree(data);
5505 		}
5506 		for (j = 0; j < bp->rx_max_pg_ring_idx; j++)
5507 			bnx2_free_rx_page(bp, rxr, j);
5508 	}
5509 }
5510 
5511 static void
5512 bnx2_free_skbs(struct bnx2 *bp)
5513 {
5514 	bnx2_free_tx_skbs(bp);
5515 	bnx2_free_rx_skbs(bp);
5516 }
5517 
5518 static int
5519 bnx2_reset_nic(struct bnx2 *bp, u32 reset_code)
5520 {
5521 	int rc;
5522 
5523 	rc = bnx2_reset_chip(bp, reset_code);
5524 	bnx2_free_skbs(bp);
5525 	if (rc)
5526 		return rc;
5527 
5528 	if ((rc = bnx2_init_chip(bp)) != 0)
5529 		return rc;
5530 
5531 	bnx2_init_all_rings(bp);
5532 	return 0;
5533 }
5534 
5535 static int
5536 bnx2_init_nic(struct bnx2 *bp, int reset_phy)
5537 {
5538 	int rc;
5539 
5540 	if ((rc = bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET)) != 0)
5541 		return rc;
5542 
5543 	spin_lock_bh(&bp->phy_lock);
5544 	bnx2_init_phy(bp, reset_phy);
5545 	bnx2_set_link(bp);
5546 	if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
5547 		bnx2_remote_phy_event(bp);
5548 	spin_unlock_bh(&bp->phy_lock);
5549 	return 0;
5550 }
5551 
5552 static int
5553 bnx2_shutdown_chip(struct bnx2 *bp)
5554 {
5555 	u32 reset_code;
5556 
5557 	if (bp->flags & BNX2_FLAG_NO_WOL)
5558 		reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
5559 	else if (bp->wol)
5560 		reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
5561 	else
5562 		reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
5563 
5564 	return bnx2_reset_chip(bp, reset_code);
5565 }
5566 
5567 static int
5568 bnx2_test_registers(struct bnx2 *bp)
5569 {
5570 	int ret;
5571 	int i, is_5709;
5572 	static const struct {
5573 		u16   offset;
5574 		u16   flags;
5575 #define BNX2_FL_NOT_5709	1
5576 		u32   rw_mask;
5577 		u32   ro_mask;
5578 	} reg_tbl[] = {
5579 		{ 0x006c, 0, 0x00000000, 0x0000003f },
5580 		{ 0x0090, 0, 0xffffffff, 0x00000000 },
5581 		{ 0x0094, 0, 0x00000000, 0x00000000 },
5582 
5583 		{ 0x0404, BNX2_FL_NOT_5709, 0x00003f00, 0x00000000 },
5584 		{ 0x0418, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5585 		{ 0x041c, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5586 		{ 0x0420, BNX2_FL_NOT_5709, 0x00000000, 0x80ffffff },
5587 		{ 0x0424, BNX2_FL_NOT_5709, 0x00000000, 0x00000000 },
5588 		{ 0x0428, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
5589 		{ 0x0450, BNX2_FL_NOT_5709, 0x00000000, 0x0000ffff },
5590 		{ 0x0454, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5591 		{ 0x0458, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5592 
5593 		{ 0x0808, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5594 		{ 0x0854, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5595 		{ 0x0868, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5596 		{ 0x086c, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5597 		{ 0x0870, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5598 		{ 0x0874, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5599 
5600 		{ 0x0c00, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
5601 		{ 0x0c04, BNX2_FL_NOT_5709, 0x00000000, 0x03ff0001 },
5602 		{ 0x0c08, BNX2_FL_NOT_5709,  0x0f0ff073, 0x00000000 },
5603 
5604 		{ 0x1000, 0, 0x00000000, 0x00000001 },
5605 		{ 0x1004, BNX2_FL_NOT_5709, 0x00000000, 0x000f0001 },
5606 
5607 		{ 0x1408, 0, 0x01c00800, 0x00000000 },
5608 		{ 0x149c, 0, 0x8000ffff, 0x00000000 },
5609 		{ 0x14a8, 0, 0x00000000, 0x000001ff },
5610 		{ 0x14ac, 0, 0x0fffffff, 0x10000000 },
5611 		{ 0x14b0, 0, 0x00000002, 0x00000001 },
5612 		{ 0x14b8, 0, 0x00000000, 0x00000000 },
5613 		{ 0x14c0, 0, 0x00000000, 0x00000009 },
5614 		{ 0x14c4, 0, 0x00003fff, 0x00000000 },
5615 		{ 0x14cc, 0, 0x00000000, 0x00000001 },
5616 		{ 0x14d0, 0, 0xffffffff, 0x00000000 },
5617 
5618 		{ 0x1800, 0, 0x00000000, 0x00000001 },
5619 		{ 0x1804, 0, 0x00000000, 0x00000003 },
5620 
5621 		{ 0x2800, 0, 0x00000000, 0x00000001 },
5622 		{ 0x2804, 0, 0x00000000, 0x00003f01 },
5623 		{ 0x2808, 0, 0x0f3f3f03, 0x00000000 },
5624 		{ 0x2810, 0, 0xffff0000, 0x00000000 },
5625 		{ 0x2814, 0, 0xffff0000, 0x00000000 },
5626 		{ 0x2818, 0, 0xffff0000, 0x00000000 },
5627 		{ 0x281c, 0, 0xffff0000, 0x00000000 },
5628 		{ 0x2834, 0, 0xffffffff, 0x00000000 },
5629 		{ 0x2840, 0, 0x00000000, 0xffffffff },
5630 		{ 0x2844, 0, 0x00000000, 0xffffffff },
5631 		{ 0x2848, 0, 0xffffffff, 0x00000000 },
5632 		{ 0x284c, 0, 0xf800f800, 0x07ff07ff },
5633 
5634 		{ 0x2c00, 0, 0x00000000, 0x00000011 },
5635 		{ 0x2c04, 0, 0x00000000, 0x00030007 },
5636 
5637 		{ 0x3c00, 0, 0x00000000, 0x00000001 },
5638 		{ 0x3c04, 0, 0x00000000, 0x00070000 },
5639 		{ 0x3c08, 0, 0x00007f71, 0x07f00000 },
5640 		{ 0x3c0c, 0, 0x1f3ffffc, 0x00000000 },
5641 		{ 0x3c10, 0, 0xffffffff, 0x00000000 },
5642 		{ 0x3c14, 0, 0x00000000, 0xffffffff },
5643 		{ 0x3c18, 0, 0x00000000, 0xffffffff },
5644 		{ 0x3c1c, 0, 0xfffff000, 0x00000000 },
5645 		{ 0x3c20, 0, 0xffffff00, 0x00000000 },
5646 
5647 		{ 0x5004, 0, 0x00000000, 0x0000007f },
5648 		{ 0x5008, 0, 0x0f0007ff, 0x00000000 },
5649 
5650 		{ 0x5c00, 0, 0x00000000, 0x00000001 },
5651 		{ 0x5c04, 0, 0x00000000, 0x0003000f },
5652 		{ 0x5c08, 0, 0x00000003, 0x00000000 },
5653 		{ 0x5c0c, 0, 0x0000fff8, 0x00000000 },
5654 		{ 0x5c10, 0, 0x00000000, 0xffffffff },
5655 		{ 0x5c80, 0, 0x00000000, 0x0f7113f1 },
5656 		{ 0x5c84, 0, 0x00000000, 0x0000f333 },
5657 		{ 0x5c88, 0, 0x00000000, 0x00077373 },
5658 		{ 0x5c8c, 0, 0x00000000, 0x0007f737 },
5659 
5660 		{ 0x6808, 0, 0x0000ff7f, 0x00000000 },
5661 		{ 0x680c, 0, 0xffffffff, 0x00000000 },
5662 		{ 0x6810, 0, 0xffffffff, 0x00000000 },
5663 		{ 0x6814, 0, 0xffffffff, 0x00000000 },
5664 		{ 0x6818, 0, 0xffffffff, 0x00000000 },
5665 		{ 0x681c, 0, 0xffffffff, 0x00000000 },
5666 		{ 0x6820, 0, 0x00ff00ff, 0x00000000 },
5667 		{ 0x6824, 0, 0x00ff00ff, 0x00000000 },
5668 		{ 0x6828, 0, 0x00ff00ff, 0x00000000 },
5669 		{ 0x682c, 0, 0x03ff03ff, 0x00000000 },
5670 		{ 0x6830, 0, 0x03ff03ff, 0x00000000 },
5671 		{ 0x6834, 0, 0x03ff03ff, 0x00000000 },
5672 		{ 0x6838, 0, 0x03ff03ff, 0x00000000 },
5673 		{ 0x683c, 0, 0x0000ffff, 0x00000000 },
5674 		{ 0x6840, 0, 0x00000ff0, 0x00000000 },
5675 		{ 0x6844, 0, 0x00ffff00, 0x00000000 },
5676 		{ 0x684c, 0, 0xffffffff, 0x00000000 },
5677 		{ 0x6850, 0, 0x7f7f7f7f, 0x00000000 },
5678 		{ 0x6854, 0, 0x7f7f7f7f, 0x00000000 },
5679 		{ 0x6858, 0, 0x7f7f7f7f, 0x00000000 },
5680 		{ 0x685c, 0, 0x7f7f7f7f, 0x00000000 },
5681 		{ 0x6908, 0, 0x00000000, 0x0001ff0f },
5682 		{ 0x690c, 0, 0x00000000, 0x0ffe00f0 },
5683 
5684 		{ 0xffff, 0, 0x00000000, 0x00000000 },
5685 	};
5686 
5687 	ret = 0;
5688 	is_5709 = 0;
5689 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
5690 		is_5709 = 1;
5691 
5692 	for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
5693 		u32 offset, rw_mask, ro_mask, save_val, val;
5694 		u16 flags = reg_tbl[i].flags;
5695 
5696 		if (is_5709 && (flags & BNX2_FL_NOT_5709))
5697 			continue;
5698 
5699 		offset = (u32) reg_tbl[i].offset;
5700 		rw_mask = reg_tbl[i].rw_mask;
5701 		ro_mask = reg_tbl[i].ro_mask;
5702 
5703 		save_val = readl(bp->regview + offset);
5704 
5705 		writel(0, bp->regview + offset);
5706 
5707 		val = readl(bp->regview + offset);
5708 		if ((val & rw_mask) != 0) {
5709 			goto reg_test_err;
5710 		}
5711 
5712 		if ((val & ro_mask) != (save_val & ro_mask)) {
5713 			goto reg_test_err;
5714 		}
5715 
5716 		writel(0xffffffff, bp->regview + offset);
5717 
5718 		val = readl(bp->regview + offset);
5719 		if ((val & rw_mask) != rw_mask) {
5720 			goto reg_test_err;
5721 		}
5722 
5723 		if ((val & ro_mask) != (save_val & ro_mask)) {
5724 			goto reg_test_err;
5725 		}
5726 
5727 		writel(save_val, bp->regview + offset);
5728 		continue;
5729 
5730 reg_test_err:
5731 		writel(save_val, bp->regview + offset);
5732 		ret = -ENODEV;
5733 		break;
5734 	}
5735 	return ret;
5736 }
5737 
5738 static int
5739 bnx2_do_mem_test(struct bnx2 *bp, u32 start, u32 size)
5740 {
5741 	static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0x55555555,
5742 		0xaaaaaaaa , 0xaa55aa55, 0x55aa55aa };
5743 	int i;
5744 
5745 	for (i = 0; i < sizeof(test_pattern) / 4; i++) {
5746 		u32 offset;
5747 
5748 		for (offset = 0; offset < size; offset += 4) {
5749 
5750 			bnx2_reg_wr_ind(bp, start + offset, test_pattern[i]);
5751 
5752 			if (bnx2_reg_rd_ind(bp, start + offset) !=
5753 				test_pattern[i]) {
5754 				return -ENODEV;
5755 			}
5756 		}
5757 	}
5758 	return 0;
5759 }
5760 
5761 static int
5762 bnx2_test_memory(struct bnx2 *bp)
5763 {
5764 	int ret = 0;
5765 	int i;
5766 	static struct mem_entry {
5767 		u32   offset;
5768 		u32   len;
5769 	} mem_tbl_5706[] = {
5770 		{ 0x60000,  0x4000 },
5771 		{ 0xa0000,  0x3000 },
5772 		{ 0xe0000,  0x4000 },
5773 		{ 0x120000, 0x4000 },
5774 		{ 0x1a0000, 0x4000 },
5775 		{ 0x160000, 0x4000 },
5776 		{ 0xffffffff, 0    },
5777 	},
5778 	mem_tbl_5709[] = {
5779 		{ 0x60000,  0x4000 },
5780 		{ 0xa0000,  0x3000 },
5781 		{ 0xe0000,  0x4000 },
5782 		{ 0x120000, 0x4000 },
5783 		{ 0x1a0000, 0x4000 },
5784 		{ 0xffffffff, 0    },
5785 	};
5786 	struct mem_entry *mem_tbl;
5787 
5788 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
5789 		mem_tbl = mem_tbl_5709;
5790 	else
5791 		mem_tbl = mem_tbl_5706;
5792 
5793 	for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
5794 		if ((ret = bnx2_do_mem_test(bp, mem_tbl[i].offset,
5795 			mem_tbl[i].len)) != 0) {
5796 			return ret;
5797 		}
5798 	}
5799 
5800 	return ret;
5801 }
5802 
5803 #define BNX2_MAC_LOOPBACK	0
5804 #define BNX2_PHY_LOOPBACK	1
5805 
5806 static int
5807 bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
5808 {
5809 	unsigned int pkt_size, num_pkts, i;
5810 	struct sk_buff *skb;
5811 	u8 *data;
5812 	unsigned char *packet;
5813 	u16 rx_start_idx, rx_idx;
5814 	dma_addr_t map;
5815 	struct bnx2_tx_bd *txbd;
5816 	struct bnx2_sw_bd *rx_buf;
5817 	struct l2_fhdr *rx_hdr;
5818 	int ret = -ENODEV;
5819 	struct bnx2_napi *bnapi = &bp->bnx2_napi[0], *tx_napi;
5820 	struct bnx2_tx_ring_info *txr;
5821 	struct bnx2_rx_ring_info *rxr;
5822 
5823 	tx_napi = bnapi;
5824 
5825 	txr = &tx_napi->tx_ring;
5826 	rxr = &bnapi->rx_ring;
5827 	if (loopback_mode == BNX2_MAC_LOOPBACK) {
5828 		bp->loopback = MAC_LOOPBACK;
5829 		bnx2_set_mac_loopback(bp);
5830 	}
5831 	else if (loopback_mode == BNX2_PHY_LOOPBACK) {
5832 		if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
5833 			return 0;
5834 
5835 		bp->loopback = PHY_LOOPBACK;
5836 		bnx2_set_phy_loopback(bp);
5837 	}
5838 	else
5839 		return -EINVAL;
5840 
5841 	pkt_size = min(bp->dev->mtu + ETH_HLEN, bp->rx_jumbo_thresh - 4);
5842 	skb = netdev_alloc_skb(bp->dev, pkt_size);
5843 	if (!skb)
5844 		return -ENOMEM;
5845 	packet = skb_put(skb, pkt_size);
5846 	memcpy(packet, bp->dev->dev_addr, ETH_ALEN);
5847 	memset(packet + ETH_ALEN, 0x0, 8);
5848 	for (i = 14; i < pkt_size; i++)
5849 		packet[i] = (unsigned char) (i & 0xff);
5850 
5851 	map = dma_map_single(&bp->pdev->dev, skb->data, pkt_size,
5852 			     PCI_DMA_TODEVICE);
5853 	if (dma_mapping_error(&bp->pdev->dev, map)) {
5854 		dev_kfree_skb(skb);
5855 		return -EIO;
5856 	}
5857 
5858 	BNX2_WR(bp, BNX2_HC_COMMAND,
5859 		bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
5860 
5861 	BNX2_RD(bp, BNX2_HC_COMMAND);
5862 
5863 	udelay(5);
5864 	rx_start_idx = bnx2_get_hw_rx_cons(bnapi);
5865 
5866 	num_pkts = 0;
5867 
5868 	txbd = &txr->tx_desc_ring[BNX2_TX_RING_IDX(txr->tx_prod)];
5869 
5870 	txbd->tx_bd_haddr_hi = (u64) map >> 32;
5871 	txbd->tx_bd_haddr_lo = (u64) map & 0xffffffff;
5872 	txbd->tx_bd_mss_nbytes = pkt_size;
5873 	txbd->tx_bd_vlan_tag_flags = TX_BD_FLAGS_START | TX_BD_FLAGS_END;
5874 
5875 	num_pkts++;
5876 	txr->tx_prod = BNX2_NEXT_TX_BD(txr->tx_prod);
5877 	txr->tx_prod_bseq += pkt_size;
5878 
5879 	BNX2_WR16(bp, txr->tx_bidx_addr, txr->tx_prod);
5880 	BNX2_WR(bp, txr->tx_bseq_addr, txr->tx_prod_bseq);
5881 
5882 	udelay(100);
5883 
5884 	BNX2_WR(bp, BNX2_HC_COMMAND,
5885 		bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
5886 
5887 	BNX2_RD(bp, BNX2_HC_COMMAND);
5888 
5889 	udelay(5);
5890 
5891 	dma_unmap_single(&bp->pdev->dev, map, pkt_size, PCI_DMA_TODEVICE);
5892 	dev_kfree_skb(skb);
5893 
5894 	if (bnx2_get_hw_tx_cons(tx_napi) != txr->tx_prod)
5895 		goto loopback_test_done;
5896 
5897 	rx_idx = bnx2_get_hw_rx_cons(bnapi);
5898 	if (rx_idx != rx_start_idx + num_pkts) {
5899 		goto loopback_test_done;
5900 	}
5901 
5902 	rx_buf = &rxr->rx_buf_ring[rx_start_idx];
5903 	data = rx_buf->data;
5904 
5905 	rx_hdr = get_l2_fhdr(data);
5906 	data = (u8 *)rx_hdr + BNX2_RX_OFFSET;
5907 
5908 	dma_sync_single_for_cpu(&bp->pdev->dev,
5909 		dma_unmap_addr(rx_buf, mapping),
5910 		bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
5911 
5912 	if (rx_hdr->l2_fhdr_status &
5913 		(L2_FHDR_ERRORS_BAD_CRC |
5914 		L2_FHDR_ERRORS_PHY_DECODE |
5915 		L2_FHDR_ERRORS_ALIGNMENT |
5916 		L2_FHDR_ERRORS_TOO_SHORT |
5917 		L2_FHDR_ERRORS_GIANT_FRAME)) {
5918 
5919 		goto loopback_test_done;
5920 	}
5921 
5922 	if ((rx_hdr->l2_fhdr_pkt_len - 4) != pkt_size) {
5923 		goto loopback_test_done;
5924 	}
5925 
5926 	for (i = 14; i < pkt_size; i++) {
5927 		if (*(data + i) != (unsigned char) (i & 0xff)) {
5928 			goto loopback_test_done;
5929 		}
5930 	}
5931 
5932 	ret = 0;
5933 
5934 loopback_test_done:
5935 	bp->loopback = 0;
5936 	return ret;
5937 }
5938 
5939 #define BNX2_MAC_LOOPBACK_FAILED	1
5940 #define BNX2_PHY_LOOPBACK_FAILED	2
5941 #define BNX2_LOOPBACK_FAILED		(BNX2_MAC_LOOPBACK_FAILED |	\
5942 					 BNX2_PHY_LOOPBACK_FAILED)
5943 
5944 static int
5945 bnx2_test_loopback(struct bnx2 *bp)
5946 {
5947 	int rc = 0;
5948 
5949 	if (!netif_running(bp->dev))
5950 		return BNX2_LOOPBACK_FAILED;
5951 
5952 	bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
5953 	spin_lock_bh(&bp->phy_lock);
5954 	bnx2_init_phy(bp, 1);
5955 	spin_unlock_bh(&bp->phy_lock);
5956 	if (bnx2_run_loopback(bp, BNX2_MAC_LOOPBACK))
5957 		rc |= BNX2_MAC_LOOPBACK_FAILED;
5958 	if (bnx2_run_loopback(bp, BNX2_PHY_LOOPBACK))
5959 		rc |= BNX2_PHY_LOOPBACK_FAILED;
5960 	return rc;
5961 }
5962 
5963 #define NVRAM_SIZE 0x200
5964 #define CRC32_RESIDUAL 0xdebb20e3
5965 
5966 static int
5967 bnx2_test_nvram(struct bnx2 *bp)
5968 {
5969 	__be32 buf[NVRAM_SIZE / 4];
5970 	u8 *data = (u8 *) buf;
5971 	int rc = 0;
5972 	u32 magic, csum;
5973 
5974 	if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
5975 		goto test_nvram_done;
5976 
5977         magic = be32_to_cpu(buf[0]);
5978 	if (magic != 0x669955aa) {
5979 		rc = -ENODEV;
5980 		goto test_nvram_done;
5981 	}
5982 
5983 	if ((rc = bnx2_nvram_read(bp, 0x100, data, NVRAM_SIZE)) != 0)
5984 		goto test_nvram_done;
5985 
5986 	csum = ether_crc_le(0x100, data);
5987 	if (csum != CRC32_RESIDUAL) {
5988 		rc = -ENODEV;
5989 		goto test_nvram_done;
5990 	}
5991 
5992 	csum = ether_crc_le(0x100, data + 0x100);
5993 	if (csum != CRC32_RESIDUAL) {
5994 		rc = -ENODEV;
5995 	}
5996 
5997 test_nvram_done:
5998 	return rc;
5999 }
6000 
6001 static int
6002 bnx2_test_link(struct bnx2 *bp)
6003 {
6004 	u32 bmsr;
6005 
6006 	if (!netif_running(bp->dev))
6007 		return -ENODEV;
6008 
6009 	if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
6010 		if (bp->link_up)
6011 			return 0;
6012 		return -ENODEV;
6013 	}
6014 	spin_lock_bh(&bp->phy_lock);
6015 	bnx2_enable_bmsr1(bp);
6016 	bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
6017 	bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
6018 	bnx2_disable_bmsr1(bp);
6019 	spin_unlock_bh(&bp->phy_lock);
6020 
6021 	if (bmsr & BMSR_LSTATUS) {
6022 		return 0;
6023 	}
6024 	return -ENODEV;
6025 }
6026 
6027 static int
6028 bnx2_test_intr(struct bnx2 *bp)
6029 {
6030 	int i;
6031 	u16 status_idx;
6032 
6033 	if (!netif_running(bp->dev))
6034 		return -ENODEV;
6035 
6036 	status_idx = BNX2_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff;
6037 
6038 	/* This register is not touched during run-time. */
6039 	BNX2_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
6040 	BNX2_RD(bp, BNX2_HC_COMMAND);
6041 
6042 	for (i = 0; i < 10; i++) {
6043 		if ((BNX2_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff) !=
6044 			status_idx) {
6045 
6046 			break;
6047 		}
6048 
6049 		msleep_interruptible(10);
6050 	}
6051 	if (i < 10)
6052 		return 0;
6053 
6054 	return -ENODEV;
6055 }
6056 
6057 /* Determining link for parallel detection. */
6058 static int
6059 bnx2_5706_serdes_has_link(struct bnx2 *bp)
6060 {
6061 	u32 mode_ctl, an_dbg, exp;
6062 
6063 	if (bp->phy_flags & BNX2_PHY_FLAG_NO_PARALLEL)
6064 		return 0;
6065 
6066 	bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_MODE_CTL);
6067 	bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &mode_ctl);
6068 
6069 	if (!(mode_ctl & MISC_SHDW_MODE_CTL_SIG_DET))
6070 		return 0;
6071 
6072 	bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
6073 	bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
6074 	bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
6075 
6076 	if (an_dbg & (MISC_SHDW_AN_DBG_NOSYNC | MISC_SHDW_AN_DBG_RUDI_INVALID))
6077 		return 0;
6078 
6079 	bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS, MII_EXPAND_REG1);
6080 	bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &exp);
6081 	bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &exp);
6082 
6083 	if (exp & MII_EXPAND_REG1_RUDI_C)	/* receiving CONFIG */
6084 		return 0;
6085 
6086 	return 1;
6087 }
6088 
6089 static void
6090 bnx2_5706_serdes_timer(struct bnx2 *bp)
6091 {
6092 	int check_link = 1;
6093 
6094 	spin_lock(&bp->phy_lock);
6095 	if (bp->serdes_an_pending) {
6096 		bp->serdes_an_pending--;
6097 		check_link = 0;
6098 	} else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
6099 		u32 bmcr;
6100 
6101 		bp->current_interval = BNX2_TIMER_INTERVAL;
6102 
6103 		bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
6104 
6105 		if (bmcr & BMCR_ANENABLE) {
6106 			if (bnx2_5706_serdes_has_link(bp)) {
6107 				bmcr &= ~BMCR_ANENABLE;
6108 				bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
6109 				bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
6110 				bp->phy_flags |= BNX2_PHY_FLAG_PARALLEL_DETECT;
6111 			}
6112 		}
6113 	}
6114 	else if ((bp->link_up) && (bp->autoneg & AUTONEG_SPEED) &&
6115 		 (bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT)) {
6116 		u32 phy2;
6117 
6118 		bnx2_write_phy(bp, 0x17, 0x0f01);
6119 		bnx2_read_phy(bp, 0x15, &phy2);
6120 		if (phy2 & 0x20) {
6121 			u32 bmcr;
6122 
6123 			bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
6124 			bmcr |= BMCR_ANENABLE;
6125 			bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
6126 
6127 			bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
6128 		}
6129 	} else
6130 		bp->current_interval = BNX2_TIMER_INTERVAL;
6131 
6132 	if (check_link) {
6133 		u32 val;
6134 
6135 		bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
6136 		bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &val);
6137 		bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &val);
6138 
6139 		if (bp->link_up && (val & MISC_SHDW_AN_DBG_NOSYNC)) {
6140 			if (!(bp->phy_flags & BNX2_PHY_FLAG_FORCED_DOWN)) {
6141 				bnx2_5706s_force_link_dn(bp, 1);
6142 				bp->phy_flags |= BNX2_PHY_FLAG_FORCED_DOWN;
6143 			} else
6144 				bnx2_set_link(bp);
6145 		} else if (!bp->link_up && !(val & MISC_SHDW_AN_DBG_NOSYNC))
6146 			bnx2_set_link(bp);
6147 	}
6148 	spin_unlock(&bp->phy_lock);
6149 }
6150 
6151 static void
6152 bnx2_5708_serdes_timer(struct bnx2 *bp)
6153 {
6154 	if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
6155 		return;
6156 
6157 	if ((bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) == 0) {
6158 		bp->serdes_an_pending = 0;
6159 		return;
6160 	}
6161 
6162 	spin_lock(&bp->phy_lock);
6163 	if (bp->serdes_an_pending)
6164 		bp->serdes_an_pending--;
6165 	else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
6166 		u32 bmcr;
6167 
6168 		bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
6169 		if (bmcr & BMCR_ANENABLE) {
6170 			bnx2_enable_forced_2g5(bp);
6171 			bp->current_interval = BNX2_SERDES_FORCED_TIMEOUT;
6172 		} else {
6173 			bnx2_disable_forced_2g5(bp);
6174 			bp->serdes_an_pending = 2;
6175 			bp->current_interval = BNX2_TIMER_INTERVAL;
6176 		}
6177 
6178 	} else
6179 		bp->current_interval = BNX2_TIMER_INTERVAL;
6180 
6181 	spin_unlock(&bp->phy_lock);
6182 }
6183 
6184 static void
6185 bnx2_timer(struct timer_list *t)
6186 {
6187 	struct bnx2 *bp = from_timer(bp, t, timer);
6188 
6189 	if (!netif_running(bp->dev))
6190 		return;
6191 
6192 	if (atomic_read(&bp->intr_sem) != 0)
6193 		goto bnx2_restart_timer;
6194 
6195 	if ((bp->flags & (BNX2_FLAG_USING_MSI | BNX2_FLAG_ONE_SHOT_MSI)) ==
6196 	     BNX2_FLAG_USING_MSI)
6197 		bnx2_chk_missed_msi(bp);
6198 
6199 	bnx2_send_heart_beat(bp);
6200 
6201 	bp->stats_blk->stat_FwRxDrop =
6202 		bnx2_reg_rd_ind(bp, BNX2_FW_RX_DROP_COUNT);
6203 
6204 	/* workaround occasional corrupted counters */
6205 	if ((bp->flags & BNX2_FLAG_BROKEN_STATS) && bp->stats_ticks)
6206 		BNX2_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd |
6207 			BNX2_HC_COMMAND_STATS_NOW);
6208 
6209 	if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
6210 		if (BNX2_CHIP(bp) == BNX2_CHIP_5706)
6211 			bnx2_5706_serdes_timer(bp);
6212 		else
6213 			bnx2_5708_serdes_timer(bp);
6214 	}
6215 
6216 bnx2_restart_timer:
6217 	mod_timer(&bp->timer, jiffies + bp->current_interval);
6218 }
6219 
6220 static int
6221 bnx2_request_irq(struct bnx2 *bp)
6222 {
6223 	unsigned long flags;
6224 	struct bnx2_irq *irq;
6225 	int rc = 0, i;
6226 
6227 	if (bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)
6228 		flags = 0;
6229 	else
6230 		flags = IRQF_SHARED;
6231 
6232 	for (i = 0; i < bp->irq_nvecs; i++) {
6233 		irq = &bp->irq_tbl[i];
6234 		rc = request_irq(irq->vector, irq->handler, flags, irq->name,
6235 				 &bp->bnx2_napi[i]);
6236 		if (rc)
6237 			break;
6238 		irq->requested = 1;
6239 	}
6240 	return rc;
6241 }
6242 
6243 static void
6244 __bnx2_free_irq(struct bnx2 *bp)
6245 {
6246 	struct bnx2_irq *irq;
6247 	int i;
6248 
6249 	for (i = 0; i < bp->irq_nvecs; i++) {
6250 		irq = &bp->irq_tbl[i];
6251 		if (irq->requested)
6252 			free_irq(irq->vector, &bp->bnx2_napi[i]);
6253 		irq->requested = 0;
6254 	}
6255 }
6256 
6257 static void
6258 bnx2_free_irq(struct bnx2 *bp)
6259 {
6260 
6261 	__bnx2_free_irq(bp);
6262 	if (bp->flags & BNX2_FLAG_USING_MSI)
6263 		pci_disable_msi(bp->pdev);
6264 	else if (bp->flags & BNX2_FLAG_USING_MSIX)
6265 		pci_disable_msix(bp->pdev);
6266 
6267 	bp->flags &= ~(BNX2_FLAG_USING_MSI_OR_MSIX | BNX2_FLAG_ONE_SHOT_MSI);
6268 }
6269 
6270 static void
6271 bnx2_enable_msix(struct bnx2 *bp, int msix_vecs)
6272 {
6273 	int i, total_vecs;
6274 	struct msix_entry msix_ent[BNX2_MAX_MSIX_VEC];
6275 	struct net_device *dev = bp->dev;
6276 	const int len = sizeof(bp->irq_tbl[0].name);
6277 
6278 	bnx2_setup_msix_tbl(bp);
6279 	BNX2_WR(bp, BNX2_PCI_MSIX_CONTROL, BNX2_MAX_MSIX_HW_VEC - 1);
6280 	BNX2_WR(bp, BNX2_PCI_MSIX_TBL_OFF_BIR, BNX2_PCI_GRC_WINDOW2_BASE);
6281 	BNX2_WR(bp, BNX2_PCI_MSIX_PBA_OFF_BIT, BNX2_PCI_GRC_WINDOW3_BASE);
6282 
6283 	/*  Need to flush the previous three writes to ensure MSI-X
6284 	 *  is setup properly */
6285 	BNX2_RD(bp, BNX2_PCI_MSIX_CONTROL);
6286 
6287 	for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
6288 		msix_ent[i].entry = i;
6289 		msix_ent[i].vector = 0;
6290 	}
6291 
6292 	total_vecs = msix_vecs;
6293 #ifdef BCM_CNIC
6294 	total_vecs++;
6295 #endif
6296 	total_vecs = pci_enable_msix_range(bp->pdev, msix_ent,
6297 					   BNX2_MIN_MSIX_VEC, total_vecs);
6298 	if (total_vecs < 0)
6299 		return;
6300 
6301 	msix_vecs = total_vecs;
6302 #ifdef BCM_CNIC
6303 	msix_vecs--;
6304 #endif
6305 	bp->irq_nvecs = msix_vecs;
6306 	bp->flags |= BNX2_FLAG_USING_MSIX | BNX2_FLAG_ONE_SHOT_MSI;
6307 	for (i = 0; i < total_vecs; i++) {
6308 		bp->irq_tbl[i].vector = msix_ent[i].vector;
6309 		snprintf(bp->irq_tbl[i].name, len, "%s-%d", dev->name, i);
6310 		bp->irq_tbl[i].handler = bnx2_msi_1shot;
6311 	}
6312 }
6313 
6314 static int
6315 bnx2_setup_int_mode(struct bnx2 *bp, int dis_msi)
6316 {
6317 	int cpus = netif_get_num_default_rss_queues();
6318 	int msix_vecs;
6319 
6320 	if (!bp->num_req_rx_rings)
6321 		msix_vecs = max(cpus + 1, bp->num_req_tx_rings);
6322 	else if (!bp->num_req_tx_rings)
6323 		msix_vecs = max(cpus, bp->num_req_rx_rings);
6324 	else
6325 		msix_vecs = max(bp->num_req_rx_rings, bp->num_req_tx_rings);
6326 
6327 	msix_vecs = min(msix_vecs, RX_MAX_RINGS);
6328 
6329 	bp->irq_tbl[0].handler = bnx2_interrupt;
6330 	strcpy(bp->irq_tbl[0].name, bp->dev->name);
6331 	bp->irq_nvecs = 1;
6332 	bp->irq_tbl[0].vector = bp->pdev->irq;
6333 
6334 	if ((bp->flags & BNX2_FLAG_MSIX_CAP) && !dis_msi)
6335 		bnx2_enable_msix(bp, msix_vecs);
6336 
6337 	if ((bp->flags & BNX2_FLAG_MSI_CAP) && !dis_msi &&
6338 	    !(bp->flags & BNX2_FLAG_USING_MSIX)) {
6339 		if (pci_enable_msi(bp->pdev) == 0) {
6340 			bp->flags |= BNX2_FLAG_USING_MSI;
6341 			if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
6342 				bp->flags |= BNX2_FLAG_ONE_SHOT_MSI;
6343 				bp->irq_tbl[0].handler = bnx2_msi_1shot;
6344 			} else
6345 				bp->irq_tbl[0].handler = bnx2_msi;
6346 
6347 			bp->irq_tbl[0].vector = bp->pdev->irq;
6348 		}
6349 	}
6350 
6351 	if (!bp->num_req_tx_rings)
6352 		bp->num_tx_rings = rounddown_pow_of_two(bp->irq_nvecs);
6353 	else
6354 		bp->num_tx_rings = min(bp->irq_nvecs, bp->num_req_tx_rings);
6355 
6356 	if (!bp->num_req_rx_rings)
6357 		bp->num_rx_rings = bp->irq_nvecs;
6358 	else
6359 		bp->num_rx_rings = min(bp->irq_nvecs, bp->num_req_rx_rings);
6360 
6361 	netif_set_real_num_tx_queues(bp->dev, bp->num_tx_rings);
6362 
6363 	return netif_set_real_num_rx_queues(bp->dev, bp->num_rx_rings);
6364 }
6365 
6366 /* Called with rtnl_lock */
6367 static int
6368 bnx2_open(struct net_device *dev)
6369 {
6370 	struct bnx2 *bp = netdev_priv(dev);
6371 	int rc;
6372 
6373 	rc = bnx2_request_firmware(bp);
6374 	if (rc < 0)
6375 		goto out;
6376 
6377 	netif_carrier_off(dev);
6378 
6379 	bnx2_disable_int(bp);
6380 
6381 	rc = bnx2_setup_int_mode(bp, disable_msi);
6382 	if (rc)
6383 		goto open_err;
6384 	bnx2_init_napi(bp);
6385 	bnx2_napi_enable(bp);
6386 	rc = bnx2_alloc_mem(bp);
6387 	if (rc)
6388 		goto open_err;
6389 
6390 	rc = bnx2_request_irq(bp);
6391 	if (rc)
6392 		goto open_err;
6393 
6394 	rc = bnx2_init_nic(bp, 1);
6395 	if (rc)
6396 		goto open_err;
6397 
6398 	mod_timer(&bp->timer, jiffies + bp->current_interval);
6399 
6400 	atomic_set(&bp->intr_sem, 0);
6401 
6402 	memset(bp->temp_stats_blk, 0, sizeof(struct statistics_block));
6403 
6404 	bnx2_enable_int(bp);
6405 
6406 	if (bp->flags & BNX2_FLAG_USING_MSI) {
6407 		/* Test MSI to make sure it is working
6408 		 * If MSI test fails, go back to INTx mode
6409 		 */
6410 		if (bnx2_test_intr(bp) != 0) {
6411 			netdev_warn(bp->dev, "No interrupt was generated using MSI, switching to INTx mode. Please report this failure to the PCI maintainer and include system chipset information.\n");
6412 
6413 			bnx2_disable_int(bp);
6414 			bnx2_free_irq(bp);
6415 
6416 			bnx2_setup_int_mode(bp, 1);
6417 
6418 			rc = bnx2_init_nic(bp, 0);
6419 
6420 			if (!rc)
6421 				rc = bnx2_request_irq(bp);
6422 
6423 			if (rc) {
6424 				del_timer_sync(&bp->timer);
6425 				goto open_err;
6426 			}
6427 			bnx2_enable_int(bp);
6428 		}
6429 	}
6430 	if (bp->flags & BNX2_FLAG_USING_MSI)
6431 		netdev_info(dev, "using MSI\n");
6432 	else if (bp->flags & BNX2_FLAG_USING_MSIX)
6433 		netdev_info(dev, "using MSIX\n");
6434 
6435 	netif_tx_start_all_queues(dev);
6436 out:
6437 	return rc;
6438 
6439 open_err:
6440 	bnx2_napi_disable(bp);
6441 	bnx2_free_skbs(bp);
6442 	bnx2_free_irq(bp);
6443 	bnx2_free_mem(bp);
6444 	bnx2_del_napi(bp);
6445 	bnx2_release_firmware(bp);
6446 	goto out;
6447 }
6448 
6449 static void
6450 bnx2_reset_task(struct work_struct *work)
6451 {
6452 	struct bnx2 *bp = container_of(work, struct bnx2, reset_task);
6453 	int rc;
6454 	u16 pcicmd;
6455 
6456 	rtnl_lock();
6457 	if (!netif_running(bp->dev)) {
6458 		rtnl_unlock();
6459 		return;
6460 	}
6461 
6462 	bnx2_netif_stop(bp, true);
6463 
6464 	pci_read_config_word(bp->pdev, PCI_COMMAND, &pcicmd);
6465 	if (!(pcicmd & PCI_COMMAND_MEMORY)) {
6466 		/* in case PCI block has reset */
6467 		pci_restore_state(bp->pdev);
6468 		pci_save_state(bp->pdev);
6469 	}
6470 	rc = bnx2_init_nic(bp, 1);
6471 	if (rc) {
6472 		netdev_err(bp->dev, "failed to reset NIC, closing\n");
6473 		bnx2_napi_enable(bp);
6474 		dev_close(bp->dev);
6475 		rtnl_unlock();
6476 		return;
6477 	}
6478 
6479 	atomic_set(&bp->intr_sem, 1);
6480 	bnx2_netif_start(bp, true);
6481 	rtnl_unlock();
6482 }
6483 
6484 #define BNX2_FTQ_ENTRY(ftq) { __stringify(ftq##FTQ_CTL), BNX2_##ftq##FTQ_CTL }
6485 
6486 static void
6487 bnx2_dump_ftq(struct bnx2 *bp)
6488 {
6489 	int i;
6490 	u32 reg, bdidx, cid, valid;
6491 	struct net_device *dev = bp->dev;
6492 	static const struct ftq_reg {
6493 		char *name;
6494 		u32 off;
6495 	} ftq_arr[] = {
6496 		BNX2_FTQ_ENTRY(RV2P_P),
6497 		BNX2_FTQ_ENTRY(RV2P_T),
6498 		BNX2_FTQ_ENTRY(RV2P_M),
6499 		BNX2_FTQ_ENTRY(TBDR_),
6500 		BNX2_FTQ_ENTRY(TDMA_),
6501 		BNX2_FTQ_ENTRY(TXP_),
6502 		BNX2_FTQ_ENTRY(TXP_),
6503 		BNX2_FTQ_ENTRY(TPAT_),
6504 		BNX2_FTQ_ENTRY(RXP_C),
6505 		BNX2_FTQ_ENTRY(RXP_),
6506 		BNX2_FTQ_ENTRY(COM_COMXQ_),
6507 		BNX2_FTQ_ENTRY(COM_COMTQ_),
6508 		BNX2_FTQ_ENTRY(COM_COMQ_),
6509 		BNX2_FTQ_ENTRY(CP_CPQ_),
6510 	};
6511 
6512 	netdev_err(dev, "<--- start FTQ dump --->\n");
6513 	for (i = 0; i < ARRAY_SIZE(ftq_arr); i++)
6514 		netdev_err(dev, "%s %08x\n", ftq_arr[i].name,
6515 			   bnx2_reg_rd_ind(bp, ftq_arr[i].off));
6516 
6517 	netdev_err(dev, "CPU states:\n");
6518 	for (reg = BNX2_TXP_CPU_MODE; reg <= BNX2_CP_CPU_MODE; reg += 0x40000)
6519 		netdev_err(dev, "%06x mode %x state %x evt_mask %x pc %x pc %x instr %x\n",
6520 			   reg, bnx2_reg_rd_ind(bp, reg),
6521 			   bnx2_reg_rd_ind(bp, reg + 4),
6522 			   bnx2_reg_rd_ind(bp, reg + 8),
6523 			   bnx2_reg_rd_ind(bp, reg + 0x1c),
6524 			   bnx2_reg_rd_ind(bp, reg + 0x1c),
6525 			   bnx2_reg_rd_ind(bp, reg + 0x20));
6526 
6527 	netdev_err(dev, "<--- end FTQ dump --->\n");
6528 	netdev_err(dev, "<--- start TBDC dump --->\n");
6529 	netdev_err(dev, "TBDC free cnt: %ld\n",
6530 		   BNX2_RD(bp, BNX2_TBDC_STATUS) & BNX2_TBDC_STATUS_FREE_CNT);
6531 	netdev_err(dev, "LINE     CID  BIDX   CMD  VALIDS\n");
6532 	for (i = 0; i < 0x20; i++) {
6533 		int j = 0;
6534 
6535 		BNX2_WR(bp, BNX2_TBDC_BD_ADDR, i);
6536 		BNX2_WR(bp, BNX2_TBDC_CAM_OPCODE,
6537 			BNX2_TBDC_CAM_OPCODE_OPCODE_CAM_READ);
6538 		BNX2_WR(bp, BNX2_TBDC_COMMAND, BNX2_TBDC_COMMAND_CMD_REG_ARB);
6539 		while ((BNX2_RD(bp, BNX2_TBDC_COMMAND) &
6540 			BNX2_TBDC_COMMAND_CMD_REG_ARB) && j < 100)
6541 			j++;
6542 
6543 		cid = BNX2_RD(bp, BNX2_TBDC_CID);
6544 		bdidx = BNX2_RD(bp, BNX2_TBDC_BIDX);
6545 		valid = BNX2_RD(bp, BNX2_TBDC_CAM_OPCODE);
6546 		netdev_err(dev, "%02x    %06x  %04lx   %02x    [%x]\n",
6547 			   i, cid, bdidx & BNX2_TBDC_BDIDX_BDIDX,
6548 			   bdidx >> 24, (valid >> 8) & 0x0ff);
6549 	}
6550 	netdev_err(dev, "<--- end TBDC dump --->\n");
6551 }
6552 
6553 static void
6554 bnx2_dump_state(struct bnx2 *bp)
6555 {
6556 	struct net_device *dev = bp->dev;
6557 	u32 val1, val2;
6558 
6559 	pci_read_config_dword(bp->pdev, PCI_COMMAND, &val1);
6560 	netdev_err(dev, "DEBUG: intr_sem[%x] PCI_CMD[%08x]\n",
6561 		   atomic_read(&bp->intr_sem), val1);
6562 	pci_read_config_dword(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &val1);
6563 	pci_read_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG, &val2);
6564 	netdev_err(dev, "DEBUG: PCI_PM[%08x] PCI_MISC_CFG[%08x]\n", val1, val2);
6565 	netdev_err(dev, "DEBUG: EMAC_TX_STATUS[%08x] EMAC_RX_STATUS[%08x]\n",
6566 		   BNX2_RD(bp, BNX2_EMAC_TX_STATUS),
6567 		   BNX2_RD(bp, BNX2_EMAC_RX_STATUS));
6568 	netdev_err(dev, "DEBUG: RPM_MGMT_PKT_CTRL[%08x]\n",
6569 		   BNX2_RD(bp, BNX2_RPM_MGMT_PKT_CTRL));
6570 	netdev_err(dev, "DEBUG: HC_STATS_INTERRUPT_STATUS[%08x]\n",
6571 		   BNX2_RD(bp, BNX2_HC_STATS_INTERRUPT_STATUS));
6572 	if (bp->flags & BNX2_FLAG_USING_MSIX)
6573 		netdev_err(dev, "DEBUG: PBA[%08x]\n",
6574 			   BNX2_RD(bp, BNX2_PCI_GRC_WINDOW3_BASE));
6575 }
6576 
6577 static void
6578 bnx2_tx_timeout(struct net_device *dev)
6579 {
6580 	struct bnx2 *bp = netdev_priv(dev);
6581 
6582 	bnx2_dump_ftq(bp);
6583 	bnx2_dump_state(bp);
6584 	bnx2_dump_mcp_state(bp);
6585 
6586 	/* This allows the netif to be shutdown gracefully before resetting */
6587 	schedule_work(&bp->reset_task);
6588 }
6589 
6590 /* Called with netif_tx_lock.
6591  * bnx2_tx_int() runs without netif_tx_lock unless it needs to call
6592  * netif_wake_queue().
6593  */
6594 static netdev_tx_t
6595 bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
6596 {
6597 	struct bnx2 *bp = netdev_priv(dev);
6598 	dma_addr_t mapping;
6599 	struct bnx2_tx_bd *txbd;
6600 	struct bnx2_sw_tx_bd *tx_buf;
6601 	u32 len, vlan_tag_flags, last_frag, mss;
6602 	u16 prod, ring_prod;
6603 	int i;
6604 	struct bnx2_napi *bnapi;
6605 	struct bnx2_tx_ring_info *txr;
6606 	struct netdev_queue *txq;
6607 
6608 	/*  Determine which tx ring we will be placed on */
6609 	i = skb_get_queue_mapping(skb);
6610 	bnapi = &bp->bnx2_napi[i];
6611 	txr = &bnapi->tx_ring;
6612 	txq = netdev_get_tx_queue(dev, i);
6613 
6614 	if (unlikely(bnx2_tx_avail(bp, txr) <
6615 	    (skb_shinfo(skb)->nr_frags + 1))) {
6616 		netif_tx_stop_queue(txq);
6617 		netdev_err(dev, "BUG! Tx ring full when queue awake!\n");
6618 
6619 		return NETDEV_TX_BUSY;
6620 	}
6621 	len = skb_headlen(skb);
6622 	prod = txr->tx_prod;
6623 	ring_prod = BNX2_TX_RING_IDX(prod);
6624 
6625 	vlan_tag_flags = 0;
6626 	if (skb->ip_summed == CHECKSUM_PARTIAL) {
6627 		vlan_tag_flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
6628 	}
6629 
6630 	if (skb_vlan_tag_present(skb)) {
6631 		vlan_tag_flags |=
6632 			(TX_BD_FLAGS_VLAN_TAG | (skb_vlan_tag_get(skb) << 16));
6633 	}
6634 
6635 	if ((mss = skb_shinfo(skb)->gso_size)) {
6636 		u32 tcp_opt_len;
6637 		struct iphdr *iph;
6638 
6639 		vlan_tag_flags |= TX_BD_FLAGS_SW_LSO;
6640 
6641 		tcp_opt_len = tcp_optlen(skb);
6642 
6643 		if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) {
6644 			u32 tcp_off = skb_transport_offset(skb) -
6645 				      sizeof(struct ipv6hdr) - ETH_HLEN;
6646 
6647 			vlan_tag_flags |= ((tcp_opt_len >> 2) << 8) |
6648 					  TX_BD_FLAGS_SW_FLAGS;
6649 			if (likely(tcp_off == 0))
6650 				vlan_tag_flags &= ~TX_BD_FLAGS_TCP6_OFF0_MSK;
6651 			else {
6652 				tcp_off >>= 3;
6653 				vlan_tag_flags |= ((tcp_off & 0x3) <<
6654 						   TX_BD_FLAGS_TCP6_OFF0_SHL) |
6655 						  ((tcp_off & 0x10) <<
6656 						   TX_BD_FLAGS_TCP6_OFF4_SHL);
6657 				mss |= (tcp_off & 0xc) << TX_BD_TCP6_OFF2_SHL;
6658 			}
6659 		} else {
6660 			iph = ip_hdr(skb);
6661 			if (tcp_opt_len || (iph->ihl > 5)) {
6662 				vlan_tag_flags |= ((iph->ihl - 5) +
6663 						   (tcp_opt_len >> 2)) << 8;
6664 			}
6665 		}
6666 	} else
6667 		mss = 0;
6668 
6669 	mapping = dma_map_single(&bp->pdev->dev, skb->data, len, PCI_DMA_TODEVICE);
6670 	if (dma_mapping_error(&bp->pdev->dev, mapping)) {
6671 		dev_kfree_skb_any(skb);
6672 		return NETDEV_TX_OK;
6673 	}
6674 
6675 	tx_buf = &txr->tx_buf_ring[ring_prod];
6676 	tx_buf->skb = skb;
6677 	dma_unmap_addr_set(tx_buf, mapping, mapping);
6678 
6679 	txbd = &txr->tx_desc_ring[ring_prod];
6680 
6681 	txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
6682 	txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
6683 	txbd->tx_bd_mss_nbytes = len | (mss << 16);
6684 	txbd->tx_bd_vlan_tag_flags = vlan_tag_flags | TX_BD_FLAGS_START;
6685 
6686 	last_frag = skb_shinfo(skb)->nr_frags;
6687 	tx_buf->nr_frags = last_frag;
6688 	tx_buf->is_gso = skb_is_gso(skb);
6689 
6690 	for (i = 0; i < last_frag; i++) {
6691 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
6692 
6693 		prod = BNX2_NEXT_TX_BD(prod);
6694 		ring_prod = BNX2_TX_RING_IDX(prod);
6695 		txbd = &txr->tx_desc_ring[ring_prod];
6696 
6697 		len = skb_frag_size(frag);
6698 		mapping = skb_frag_dma_map(&bp->pdev->dev, frag, 0, len,
6699 					   DMA_TO_DEVICE);
6700 		if (dma_mapping_error(&bp->pdev->dev, mapping))
6701 			goto dma_error;
6702 		dma_unmap_addr_set(&txr->tx_buf_ring[ring_prod], mapping,
6703 				   mapping);
6704 
6705 		txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
6706 		txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
6707 		txbd->tx_bd_mss_nbytes = len | (mss << 16);
6708 		txbd->tx_bd_vlan_tag_flags = vlan_tag_flags;
6709 
6710 	}
6711 	txbd->tx_bd_vlan_tag_flags |= TX_BD_FLAGS_END;
6712 
6713 	/* Sync BD data before updating TX mailbox */
6714 	wmb();
6715 
6716 	netdev_tx_sent_queue(txq, skb->len);
6717 
6718 	prod = BNX2_NEXT_TX_BD(prod);
6719 	txr->tx_prod_bseq += skb->len;
6720 
6721 	BNX2_WR16(bp, txr->tx_bidx_addr, prod);
6722 	BNX2_WR(bp, txr->tx_bseq_addr, txr->tx_prod_bseq);
6723 
6724 	txr->tx_prod = prod;
6725 
6726 	if (unlikely(bnx2_tx_avail(bp, txr) <= MAX_SKB_FRAGS)) {
6727 		netif_tx_stop_queue(txq);
6728 
6729 		/* netif_tx_stop_queue() must be done before checking
6730 		 * tx index in bnx2_tx_avail() below, because in
6731 		 * bnx2_tx_int(), we update tx index before checking for
6732 		 * netif_tx_queue_stopped().
6733 		 */
6734 		smp_mb();
6735 		if (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh)
6736 			netif_tx_wake_queue(txq);
6737 	}
6738 
6739 	return NETDEV_TX_OK;
6740 dma_error:
6741 	/* save value of frag that failed */
6742 	last_frag = i;
6743 
6744 	/* start back at beginning and unmap skb */
6745 	prod = txr->tx_prod;
6746 	ring_prod = BNX2_TX_RING_IDX(prod);
6747 	tx_buf = &txr->tx_buf_ring[ring_prod];
6748 	tx_buf->skb = NULL;
6749 	dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(tx_buf, mapping),
6750 			 skb_headlen(skb), PCI_DMA_TODEVICE);
6751 
6752 	/* unmap remaining mapped pages */
6753 	for (i = 0; i < last_frag; i++) {
6754 		prod = BNX2_NEXT_TX_BD(prod);
6755 		ring_prod = BNX2_TX_RING_IDX(prod);
6756 		tx_buf = &txr->tx_buf_ring[ring_prod];
6757 		dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(tx_buf, mapping),
6758 			       skb_frag_size(&skb_shinfo(skb)->frags[i]),
6759 			       PCI_DMA_TODEVICE);
6760 	}
6761 
6762 	dev_kfree_skb_any(skb);
6763 	return NETDEV_TX_OK;
6764 }
6765 
6766 /* Called with rtnl_lock */
6767 static int
6768 bnx2_close(struct net_device *dev)
6769 {
6770 	struct bnx2 *bp = netdev_priv(dev);
6771 
6772 	bnx2_disable_int_sync(bp);
6773 	bnx2_napi_disable(bp);
6774 	netif_tx_disable(dev);
6775 	del_timer_sync(&bp->timer);
6776 	bnx2_shutdown_chip(bp);
6777 	bnx2_free_irq(bp);
6778 	bnx2_free_skbs(bp);
6779 	bnx2_free_mem(bp);
6780 	bnx2_del_napi(bp);
6781 	bp->link_up = 0;
6782 	netif_carrier_off(bp->dev);
6783 	return 0;
6784 }
6785 
6786 static void
6787 bnx2_save_stats(struct bnx2 *bp)
6788 {
6789 	u32 *hw_stats = (u32 *) bp->stats_blk;
6790 	u32 *temp_stats = (u32 *) bp->temp_stats_blk;
6791 	int i;
6792 
6793 	/* The 1st 10 counters are 64-bit counters */
6794 	for (i = 0; i < 20; i += 2) {
6795 		u32 hi;
6796 		u64 lo;
6797 
6798 		hi = temp_stats[i] + hw_stats[i];
6799 		lo = (u64) temp_stats[i + 1] + (u64) hw_stats[i + 1];
6800 		if (lo > 0xffffffff)
6801 			hi++;
6802 		temp_stats[i] = hi;
6803 		temp_stats[i + 1] = lo & 0xffffffff;
6804 	}
6805 
6806 	for ( ; i < sizeof(struct statistics_block) / 4; i++)
6807 		temp_stats[i] += hw_stats[i];
6808 }
6809 
6810 #define GET_64BIT_NET_STATS64(ctr)		\
6811 	(((u64) (ctr##_hi) << 32) + (u64) (ctr##_lo))
6812 
6813 #define GET_64BIT_NET_STATS(ctr)				\
6814 	GET_64BIT_NET_STATS64(bp->stats_blk->ctr) +		\
6815 	GET_64BIT_NET_STATS64(bp->temp_stats_blk->ctr)
6816 
6817 #define GET_32BIT_NET_STATS(ctr)				\
6818 	(unsigned long) (bp->stats_blk->ctr +			\
6819 			 bp->temp_stats_blk->ctr)
6820 
6821 static void
6822 bnx2_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *net_stats)
6823 {
6824 	struct bnx2 *bp = netdev_priv(dev);
6825 
6826 	if (!bp->stats_blk)
6827 		return;
6828 
6829 	net_stats->rx_packets =
6830 		GET_64BIT_NET_STATS(stat_IfHCInUcastPkts) +
6831 		GET_64BIT_NET_STATS(stat_IfHCInMulticastPkts) +
6832 		GET_64BIT_NET_STATS(stat_IfHCInBroadcastPkts);
6833 
6834 	net_stats->tx_packets =
6835 		GET_64BIT_NET_STATS(stat_IfHCOutUcastPkts) +
6836 		GET_64BIT_NET_STATS(stat_IfHCOutMulticastPkts) +
6837 		GET_64BIT_NET_STATS(stat_IfHCOutBroadcastPkts);
6838 
6839 	net_stats->rx_bytes =
6840 		GET_64BIT_NET_STATS(stat_IfHCInOctets);
6841 
6842 	net_stats->tx_bytes =
6843 		GET_64BIT_NET_STATS(stat_IfHCOutOctets);
6844 
6845 	net_stats->multicast =
6846 		GET_64BIT_NET_STATS(stat_IfHCInMulticastPkts);
6847 
6848 	net_stats->collisions =
6849 		GET_32BIT_NET_STATS(stat_EtherStatsCollisions);
6850 
6851 	net_stats->rx_length_errors =
6852 		GET_32BIT_NET_STATS(stat_EtherStatsUndersizePkts) +
6853 		GET_32BIT_NET_STATS(stat_EtherStatsOverrsizePkts);
6854 
6855 	net_stats->rx_over_errors =
6856 		GET_32BIT_NET_STATS(stat_IfInFTQDiscards) +
6857 		GET_32BIT_NET_STATS(stat_IfInMBUFDiscards);
6858 
6859 	net_stats->rx_frame_errors =
6860 		GET_32BIT_NET_STATS(stat_Dot3StatsAlignmentErrors);
6861 
6862 	net_stats->rx_crc_errors =
6863 		GET_32BIT_NET_STATS(stat_Dot3StatsFCSErrors);
6864 
6865 	net_stats->rx_errors = net_stats->rx_length_errors +
6866 		net_stats->rx_over_errors + net_stats->rx_frame_errors +
6867 		net_stats->rx_crc_errors;
6868 
6869 	net_stats->tx_aborted_errors =
6870 		GET_32BIT_NET_STATS(stat_Dot3StatsExcessiveCollisions) +
6871 		GET_32BIT_NET_STATS(stat_Dot3StatsLateCollisions);
6872 
6873 	if ((BNX2_CHIP(bp) == BNX2_CHIP_5706) ||
6874 	    (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_A0))
6875 		net_stats->tx_carrier_errors = 0;
6876 	else {
6877 		net_stats->tx_carrier_errors =
6878 			GET_32BIT_NET_STATS(stat_Dot3StatsCarrierSenseErrors);
6879 	}
6880 
6881 	net_stats->tx_errors =
6882 		GET_32BIT_NET_STATS(stat_emac_tx_stat_dot3statsinternalmactransmiterrors) +
6883 		net_stats->tx_aborted_errors +
6884 		net_stats->tx_carrier_errors;
6885 
6886 	net_stats->rx_missed_errors =
6887 		GET_32BIT_NET_STATS(stat_IfInFTQDiscards) +
6888 		GET_32BIT_NET_STATS(stat_IfInMBUFDiscards) +
6889 		GET_32BIT_NET_STATS(stat_FwRxDrop);
6890 
6891 }
6892 
6893 /* All ethtool functions called with rtnl_lock */
6894 
6895 static int
6896 bnx2_get_link_ksettings(struct net_device *dev,
6897 			struct ethtool_link_ksettings *cmd)
6898 {
6899 	struct bnx2 *bp = netdev_priv(dev);
6900 	int support_serdes = 0, support_copper = 0;
6901 	u32 supported, advertising;
6902 
6903 	supported = SUPPORTED_Autoneg;
6904 	if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
6905 		support_serdes = 1;
6906 		support_copper = 1;
6907 	} else if (bp->phy_port == PORT_FIBRE)
6908 		support_serdes = 1;
6909 	else
6910 		support_copper = 1;
6911 
6912 	if (support_serdes) {
6913 		supported |= SUPPORTED_1000baseT_Full |
6914 			SUPPORTED_FIBRE;
6915 		if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)
6916 			supported |= SUPPORTED_2500baseX_Full;
6917 	}
6918 	if (support_copper) {
6919 		supported |= SUPPORTED_10baseT_Half |
6920 			SUPPORTED_10baseT_Full |
6921 			SUPPORTED_100baseT_Half |
6922 			SUPPORTED_100baseT_Full |
6923 			SUPPORTED_1000baseT_Full |
6924 			SUPPORTED_TP;
6925 	}
6926 
6927 	spin_lock_bh(&bp->phy_lock);
6928 	cmd->base.port = bp->phy_port;
6929 	advertising = bp->advertising;
6930 
6931 	if (bp->autoneg & AUTONEG_SPEED) {
6932 		cmd->base.autoneg = AUTONEG_ENABLE;
6933 	} else {
6934 		cmd->base.autoneg = AUTONEG_DISABLE;
6935 	}
6936 
6937 	if (netif_carrier_ok(dev)) {
6938 		cmd->base.speed = bp->line_speed;
6939 		cmd->base.duplex = bp->duplex;
6940 		if (!(bp->phy_flags & BNX2_PHY_FLAG_SERDES)) {
6941 			if (bp->phy_flags & BNX2_PHY_FLAG_MDIX)
6942 				cmd->base.eth_tp_mdix = ETH_TP_MDI_X;
6943 			else
6944 				cmd->base.eth_tp_mdix = ETH_TP_MDI;
6945 		}
6946 	}
6947 	else {
6948 		cmd->base.speed = SPEED_UNKNOWN;
6949 		cmd->base.duplex = DUPLEX_UNKNOWN;
6950 	}
6951 	spin_unlock_bh(&bp->phy_lock);
6952 
6953 	cmd->base.phy_address = bp->phy_addr;
6954 
6955 	ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
6956 						supported);
6957 	ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
6958 						advertising);
6959 
6960 	return 0;
6961 }
6962 
6963 static int
6964 bnx2_set_link_ksettings(struct net_device *dev,
6965 			const struct ethtool_link_ksettings *cmd)
6966 {
6967 	struct bnx2 *bp = netdev_priv(dev);
6968 	u8 autoneg = bp->autoneg;
6969 	u8 req_duplex = bp->req_duplex;
6970 	u16 req_line_speed = bp->req_line_speed;
6971 	u32 advertising = bp->advertising;
6972 	int err = -EINVAL;
6973 
6974 	spin_lock_bh(&bp->phy_lock);
6975 
6976 	if (cmd->base.port != PORT_TP && cmd->base.port != PORT_FIBRE)
6977 		goto err_out_unlock;
6978 
6979 	if (cmd->base.port != bp->phy_port &&
6980 	    !(bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP))
6981 		goto err_out_unlock;
6982 
6983 	/* If device is down, we can store the settings only if the user
6984 	 * is setting the currently active port.
6985 	 */
6986 	if (!netif_running(dev) && cmd->base.port != bp->phy_port)
6987 		goto err_out_unlock;
6988 
6989 	if (cmd->base.autoneg == AUTONEG_ENABLE) {
6990 		autoneg |= AUTONEG_SPEED;
6991 
6992 		ethtool_convert_link_mode_to_legacy_u32(
6993 			&advertising, cmd->link_modes.advertising);
6994 
6995 		if (cmd->base.port == PORT_TP) {
6996 			advertising &= ETHTOOL_ALL_COPPER_SPEED;
6997 			if (!advertising)
6998 				advertising = ETHTOOL_ALL_COPPER_SPEED;
6999 		} else {
7000 			advertising &= ETHTOOL_ALL_FIBRE_SPEED;
7001 			if (!advertising)
7002 				advertising = ETHTOOL_ALL_FIBRE_SPEED;
7003 		}
7004 		advertising |= ADVERTISED_Autoneg;
7005 	}
7006 	else {
7007 		u32 speed = cmd->base.speed;
7008 
7009 		if (cmd->base.port == PORT_FIBRE) {
7010 			if ((speed != SPEED_1000 &&
7011 			     speed != SPEED_2500) ||
7012 			    (cmd->base.duplex != DUPLEX_FULL))
7013 				goto err_out_unlock;
7014 
7015 			if (speed == SPEED_2500 &&
7016 			    !(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
7017 				goto err_out_unlock;
7018 		} else if (speed == SPEED_1000 || speed == SPEED_2500)
7019 			goto err_out_unlock;
7020 
7021 		autoneg &= ~AUTONEG_SPEED;
7022 		req_line_speed = speed;
7023 		req_duplex = cmd->base.duplex;
7024 		advertising = 0;
7025 	}
7026 
7027 	bp->autoneg = autoneg;
7028 	bp->advertising = advertising;
7029 	bp->req_line_speed = req_line_speed;
7030 	bp->req_duplex = req_duplex;
7031 
7032 	err = 0;
7033 	/* If device is down, the new settings will be picked up when it is
7034 	 * brought up.
7035 	 */
7036 	if (netif_running(dev))
7037 		err = bnx2_setup_phy(bp, cmd->base.port);
7038 
7039 err_out_unlock:
7040 	spin_unlock_bh(&bp->phy_lock);
7041 
7042 	return err;
7043 }
7044 
7045 static void
7046 bnx2_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
7047 {
7048 	struct bnx2 *bp = netdev_priv(dev);
7049 
7050 	strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
7051 	strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
7052 	strlcpy(info->bus_info, pci_name(bp->pdev), sizeof(info->bus_info));
7053 	strlcpy(info->fw_version, bp->fw_version, sizeof(info->fw_version));
7054 }
7055 
7056 #define BNX2_REGDUMP_LEN		(32 * 1024)
7057 
7058 static int
7059 bnx2_get_regs_len(struct net_device *dev)
7060 {
7061 	return BNX2_REGDUMP_LEN;
7062 }
7063 
7064 static void
7065 bnx2_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p)
7066 {
7067 	u32 *p = _p, i, offset;
7068 	u8 *orig_p = _p;
7069 	struct bnx2 *bp = netdev_priv(dev);
7070 	static const u32 reg_boundaries[] = {
7071 		0x0000, 0x0098, 0x0400, 0x045c,
7072 		0x0800, 0x0880, 0x0c00, 0x0c10,
7073 		0x0c30, 0x0d08, 0x1000, 0x101c,
7074 		0x1040, 0x1048, 0x1080, 0x10a4,
7075 		0x1400, 0x1490, 0x1498, 0x14f0,
7076 		0x1500, 0x155c, 0x1580, 0x15dc,
7077 		0x1600, 0x1658, 0x1680, 0x16d8,
7078 		0x1800, 0x1820, 0x1840, 0x1854,
7079 		0x1880, 0x1894, 0x1900, 0x1984,
7080 		0x1c00, 0x1c0c, 0x1c40, 0x1c54,
7081 		0x1c80, 0x1c94, 0x1d00, 0x1d84,
7082 		0x2000, 0x2030, 0x23c0, 0x2400,
7083 		0x2800, 0x2820, 0x2830, 0x2850,
7084 		0x2b40, 0x2c10, 0x2fc0, 0x3058,
7085 		0x3c00, 0x3c94, 0x4000, 0x4010,
7086 		0x4080, 0x4090, 0x43c0, 0x4458,
7087 		0x4c00, 0x4c18, 0x4c40, 0x4c54,
7088 		0x4fc0, 0x5010, 0x53c0, 0x5444,
7089 		0x5c00, 0x5c18, 0x5c80, 0x5c90,
7090 		0x5fc0, 0x6000, 0x6400, 0x6428,
7091 		0x6800, 0x6848, 0x684c, 0x6860,
7092 		0x6888, 0x6910, 0x8000
7093 	};
7094 
7095 	regs->version = 0;
7096 
7097 	memset(p, 0, BNX2_REGDUMP_LEN);
7098 
7099 	if (!netif_running(bp->dev))
7100 		return;
7101 
7102 	i = 0;
7103 	offset = reg_boundaries[0];
7104 	p += offset;
7105 	while (offset < BNX2_REGDUMP_LEN) {
7106 		*p++ = BNX2_RD(bp, offset);
7107 		offset += 4;
7108 		if (offset == reg_boundaries[i + 1]) {
7109 			offset = reg_boundaries[i + 2];
7110 			p = (u32 *) (orig_p + offset);
7111 			i += 2;
7112 		}
7113 	}
7114 }
7115 
7116 static void
7117 bnx2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7118 {
7119 	struct bnx2 *bp = netdev_priv(dev);
7120 
7121 	if (bp->flags & BNX2_FLAG_NO_WOL) {
7122 		wol->supported = 0;
7123 		wol->wolopts = 0;
7124 	}
7125 	else {
7126 		wol->supported = WAKE_MAGIC;
7127 		if (bp->wol)
7128 			wol->wolopts = WAKE_MAGIC;
7129 		else
7130 			wol->wolopts = 0;
7131 	}
7132 	memset(&wol->sopass, 0, sizeof(wol->sopass));
7133 }
7134 
7135 static int
7136 bnx2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7137 {
7138 	struct bnx2 *bp = netdev_priv(dev);
7139 
7140 	if (wol->wolopts & ~WAKE_MAGIC)
7141 		return -EINVAL;
7142 
7143 	if (wol->wolopts & WAKE_MAGIC) {
7144 		if (bp->flags & BNX2_FLAG_NO_WOL)
7145 			return -EINVAL;
7146 
7147 		bp->wol = 1;
7148 	}
7149 	else {
7150 		bp->wol = 0;
7151 	}
7152 
7153 	device_set_wakeup_enable(&bp->pdev->dev, bp->wol);
7154 
7155 	return 0;
7156 }
7157 
7158 static int
7159 bnx2_nway_reset(struct net_device *dev)
7160 {
7161 	struct bnx2 *bp = netdev_priv(dev);
7162 	u32 bmcr;
7163 
7164 	if (!netif_running(dev))
7165 		return -EAGAIN;
7166 
7167 	if (!(bp->autoneg & AUTONEG_SPEED)) {
7168 		return -EINVAL;
7169 	}
7170 
7171 	spin_lock_bh(&bp->phy_lock);
7172 
7173 	if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
7174 		int rc;
7175 
7176 		rc = bnx2_setup_remote_phy(bp, bp->phy_port);
7177 		spin_unlock_bh(&bp->phy_lock);
7178 		return rc;
7179 	}
7180 
7181 	/* Force a link down visible on the other side */
7182 	if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
7183 		bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
7184 		spin_unlock_bh(&bp->phy_lock);
7185 
7186 		msleep(20);
7187 
7188 		spin_lock_bh(&bp->phy_lock);
7189 
7190 		bp->current_interval = BNX2_SERDES_AN_TIMEOUT;
7191 		bp->serdes_an_pending = 1;
7192 		mod_timer(&bp->timer, jiffies + bp->current_interval);
7193 	}
7194 
7195 	bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
7196 	bmcr &= ~BMCR_LOOPBACK;
7197 	bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART | BMCR_ANENABLE);
7198 
7199 	spin_unlock_bh(&bp->phy_lock);
7200 
7201 	return 0;
7202 }
7203 
7204 static u32
7205 bnx2_get_link(struct net_device *dev)
7206 {
7207 	struct bnx2 *bp = netdev_priv(dev);
7208 
7209 	return bp->link_up;
7210 }
7211 
7212 static int
7213 bnx2_get_eeprom_len(struct net_device *dev)
7214 {
7215 	struct bnx2 *bp = netdev_priv(dev);
7216 
7217 	if (!bp->flash_info)
7218 		return 0;
7219 
7220 	return (int) bp->flash_size;
7221 }
7222 
7223 static int
7224 bnx2_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
7225 		u8 *eebuf)
7226 {
7227 	struct bnx2 *bp = netdev_priv(dev);
7228 	int rc;
7229 
7230 	/* parameters already validated in ethtool_get_eeprom */
7231 
7232 	rc = bnx2_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
7233 
7234 	return rc;
7235 }
7236 
7237 static int
7238 bnx2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
7239 		u8 *eebuf)
7240 {
7241 	struct bnx2 *bp = netdev_priv(dev);
7242 	int rc;
7243 
7244 	/* parameters already validated in ethtool_set_eeprom */
7245 
7246 	rc = bnx2_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
7247 
7248 	return rc;
7249 }
7250 
7251 static int
7252 bnx2_get_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
7253 {
7254 	struct bnx2 *bp = netdev_priv(dev);
7255 
7256 	memset(coal, 0, sizeof(struct ethtool_coalesce));
7257 
7258 	coal->rx_coalesce_usecs = bp->rx_ticks;
7259 	coal->rx_max_coalesced_frames = bp->rx_quick_cons_trip;
7260 	coal->rx_coalesce_usecs_irq = bp->rx_ticks_int;
7261 	coal->rx_max_coalesced_frames_irq = bp->rx_quick_cons_trip_int;
7262 
7263 	coal->tx_coalesce_usecs = bp->tx_ticks;
7264 	coal->tx_max_coalesced_frames = bp->tx_quick_cons_trip;
7265 	coal->tx_coalesce_usecs_irq = bp->tx_ticks_int;
7266 	coal->tx_max_coalesced_frames_irq = bp->tx_quick_cons_trip_int;
7267 
7268 	coal->stats_block_coalesce_usecs = bp->stats_ticks;
7269 
7270 	return 0;
7271 }
7272 
7273 static int
7274 bnx2_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
7275 {
7276 	struct bnx2 *bp = netdev_priv(dev);
7277 
7278 	bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
7279 	if (bp->rx_ticks > 0x3ff) bp->rx_ticks = 0x3ff;
7280 
7281 	bp->rx_quick_cons_trip = (u16) coal->rx_max_coalesced_frames;
7282 	if (bp->rx_quick_cons_trip > 0xff) bp->rx_quick_cons_trip = 0xff;
7283 
7284 	bp->rx_ticks_int = (u16) coal->rx_coalesce_usecs_irq;
7285 	if (bp->rx_ticks_int > 0x3ff) bp->rx_ticks_int = 0x3ff;
7286 
7287 	bp->rx_quick_cons_trip_int = (u16) coal->rx_max_coalesced_frames_irq;
7288 	if (bp->rx_quick_cons_trip_int > 0xff)
7289 		bp->rx_quick_cons_trip_int = 0xff;
7290 
7291 	bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
7292 	if (bp->tx_ticks > 0x3ff) bp->tx_ticks = 0x3ff;
7293 
7294 	bp->tx_quick_cons_trip = (u16) coal->tx_max_coalesced_frames;
7295 	if (bp->tx_quick_cons_trip > 0xff) bp->tx_quick_cons_trip = 0xff;
7296 
7297 	bp->tx_ticks_int = (u16) coal->tx_coalesce_usecs_irq;
7298 	if (bp->tx_ticks_int > 0x3ff) bp->tx_ticks_int = 0x3ff;
7299 
7300 	bp->tx_quick_cons_trip_int = (u16) coal->tx_max_coalesced_frames_irq;
7301 	if (bp->tx_quick_cons_trip_int > 0xff) bp->tx_quick_cons_trip_int =
7302 		0xff;
7303 
7304 	bp->stats_ticks = coal->stats_block_coalesce_usecs;
7305 	if (bp->flags & BNX2_FLAG_BROKEN_STATS) {
7306 		if (bp->stats_ticks != 0 && bp->stats_ticks != USEC_PER_SEC)
7307 			bp->stats_ticks = USEC_PER_SEC;
7308 	}
7309 	if (bp->stats_ticks > BNX2_HC_STATS_TICKS_HC_STAT_TICKS)
7310 		bp->stats_ticks = BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
7311 	bp->stats_ticks &= BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
7312 
7313 	if (netif_running(bp->dev)) {
7314 		bnx2_netif_stop(bp, true);
7315 		bnx2_init_nic(bp, 0);
7316 		bnx2_netif_start(bp, true);
7317 	}
7318 
7319 	return 0;
7320 }
7321 
7322 static void
7323 bnx2_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
7324 {
7325 	struct bnx2 *bp = netdev_priv(dev);
7326 
7327 	ering->rx_max_pending = BNX2_MAX_TOTAL_RX_DESC_CNT;
7328 	ering->rx_jumbo_max_pending = BNX2_MAX_TOTAL_RX_PG_DESC_CNT;
7329 
7330 	ering->rx_pending = bp->rx_ring_size;
7331 	ering->rx_jumbo_pending = bp->rx_pg_ring_size;
7332 
7333 	ering->tx_max_pending = BNX2_MAX_TX_DESC_CNT;
7334 	ering->tx_pending = bp->tx_ring_size;
7335 }
7336 
7337 static int
7338 bnx2_change_ring_size(struct bnx2 *bp, u32 rx, u32 tx, bool reset_irq)
7339 {
7340 	if (netif_running(bp->dev)) {
7341 		/* Reset will erase chipset stats; save them */
7342 		bnx2_save_stats(bp);
7343 
7344 		bnx2_netif_stop(bp, true);
7345 		bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
7346 		if (reset_irq) {
7347 			bnx2_free_irq(bp);
7348 			bnx2_del_napi(bp);
7349 		} else {
7350 			__bnx2_free_irq(bp);
7351 		}
7352 		bnx2_free_skbs(bp);
7353 		bnx2_free_mem(bp);
7354 	}
7355 
7356 	bnx2_set_rx_ring_size(bp, rx);
7357 	bp->tx_ring_size = tx;
7358 
7359 	if (netif_running(bp->dev)) {
7360 		int rc = 0;
7361 
7362 		if (reset_irq) {
7363 			rc = bnx2_setup_int_mode(bp, disable_msi);
7364 			bnx2_init_napi(bp);
7365 		}
7366 
7367 		if (!rc)
7368 			rc = bnx2_alloc_mem(bp);
7369 
7370 		if (!rc)
7371 			rc = bnx2_request_irq(bp);
7372 
7373 		if (!rc)
7374 			rc = bnx2_init_nic(bp, 0);
7375 
7376 		if (rc) {
7377 			bnx2_napi_enable(bp);
7378 			dev_close(bp->dev);
7379 			return rc;
7380 		}
7381 #ifdef BCM_CNIC
7382 		mutex_lock(&bp->cnic_lock);
7383 		/* Let cnic know about the new status block. */
7384 		if (bp->cnic_eth_dev.drv_state & CNIC_DRV_STATE_REGD)
7385 			bnx2_setup_cnic_irq_info(bp);
7386 		mutex_unlock(&bp->cnic_lock);
7387 #endif
7388 		bnx2_netif_start(bp, true);
7389 	}
7390 	return 0;
7391 }
7392 
7393 static int
7394 bnx2_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
7395 {
7396 	struct bnx2 *bp = netdev_priv(dev);
7397 	int rc;
7398 
7399 	if ((ering->rx_pending > BNX2_MAX_TOTAL_RX_DESC_CNT) ||
7400 		(ering->tx_pending > BNX2_MAX_TX_DESC_CNT) ||
7401 		(ering->tx_pending <= MAX_SKB_FRAGS)) {
7402 
7403 		return -EINVAL;
7404 	}
7405 	rc = bnx2_change_ring_size(bp, ering->rx_pending, ering->tx_pending,
7406 				   false);
7407 	return rc;
7408 }
7409 
7410 static void
7411 bnx2_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
7412 {
7413 	struct bnx2 *bp = netdev_priv(dev);
7414 
7415 	epause->autoneg = ((bp->autoneg & AUTONEG_FLOW_CTRL) != 0);
7416 	epause->rx_pause = ((bp->flow_ctrl & FLOW_CTRL_RX) != 0);
7417 	epause->tx_pause = ((bp->flow_ctrl & FLOW_CTRL_TX) != 0);
7418 }
7419 
7420 static int
7421 bnx2_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
7422 {
7423 	struct bnx2 *bp = netdev_priv(dev);
7424 
7425 	bp->req_flow_ctrl = 0;
7426 	if (epause->rx_pause)
7427 		bp->req_flow_ctrl |= FLOW_CTRL_RX;
7428 	if (epause->tx_pause)
7429 		bp->req_flow_ctrl |= FLOW_CTRL_TX;
7430 
7431 	if (epause->autoneg) {
7432 		bp->autoneg |= AUTONEG_FLOW_CTRL;
7433 	}
7434 	else {
7435 		bp->autoneg &= ~AUTONEG_FLOW_CTRL;
7436 	}
7437 
7438 	if (netif_running(dev)) {
7439 		spin_lock_bh(&bp->phy_lock);
7440 		bnx2_setup_phy(bp, bp->phy_port);
7441 		spin_unlock_bh(&bp->phy_lock);
7442 	}
7443 
7444 	return 0;
7445 }
7446 
7447 static struct {
7448 	char string[ETH_GSTRING_LEN];
7449 } bnx2_stats_str_arr[] = {
7450 	{ "rx_bytes" },
7451 	{ "rx_error_bytes" },
7452 	{ "tx_bytes" },
7453 	{ "tx_error_bytes" },
7454 	{ "rx_ucast_packets" },
7455 	{ "rx_mcast_packets" },
7456 	{ "rx_bcast_packets" },
7457 	{ "tx_ucast_packets" },
7458 	{ "tx_mcast_packets" },
7459 	{ "tx_bcast_packets" },
7460 	{ "tx_mac_errors" },
7461 	{ "tx_carrier_errors" },
7462 	{ "rx_crc_errors" },
7463 	{ "rx_align_errors" },
7464 	{ "tx_single_collisions" },
7465 	{ "tx_multi_collisions" },
7466 	{ "tx_deferred" },
7467 	{ "tx_excess_collisions" },
7468 	{ "tx_late_collisions" },
7469 	{ "tx_total_collisions" },
7470 	{ "rx_fragments" },
7471 	{ "rx_jabbers" },
7472 	{ "rx_undersize_packets" },
7473 	{ "rx_oversize_packets" },
7474 	{ "rx_64_byte_packets" },
7475 	{ "rx_65_to_127_byte_packets" },
7476 	{ "rx_128_to_255_byte_packets" },
7477 	{ "rx_256_to_511_byte_packets" },
7478 	{ "rx_512_to_1023_byte_packets" },
7479 	{ "rx_1024_to_1522_byte_packets" },
7480 	{ "rx_1523_to_9022_byte_packets" },
7481 	{ "tx_64_byte_packets" },
7482 	{ "tx_65_to_127_byte_packets" },
7483 	{ "tx_128_to_255_byte_packets" },
7484 	{ "tx_256_to_511_byte_packets" },
7485 	{ "tx_512_to_1023_byte_packets" },
7486 	{ "tx_1024_to_1522_byte_packets" },
7487 	{ "tx_1523_to_9022_byte_packets" },
7488 	{ "rx_xon_frames" },
7489 	{ "rx_xoff_frames" },
7490 	{ "tx_xon_frames" },
7491 	{ "tx_xoff_frames" },
7492 	{ "rx_mac_ctrl_frames" },
7493 	{ "rx_filtered_packets" },
7494 	{ "rx_ftq_discards" },
7495 	{ "rx_discards" },
7496 	{ "rx_fw_discards" },
7497 };
7498 
7499 #define BNX2_NUM_STATS ARRAY_SIZE(bnx2_stats_str_arr)
7500 
7501 #define STATS_OFFSET32(offset_name) (offsetof(struct statistics_block, offset_name) / 4)
7502 
7503 static const unsigned long bnx2_stats_offset_arr[BNX2_NUM_STATS] = {
7504     STATS_OFFSET32(stat_IfHCInOctets_hi),
7505     STATS_OFFSET32(stat_IfHCInBadOctets_hi),
7506     STATS_OFFSET32(stat_IfHCOutOctets_hi),
7507     STATS_OFFSET32(stat_IfHCOutBadOctets_hi),
7508     STATS_OFFSET32(stat_IfHCInUcastPkts_hi),
7509     STATS_OFFSET32(stat_IfHCInMulticastPkts_hi),
7510     STATS_OFFSET32(stat_IfHCInBroadcastPkts_hi),
7511     STATS_OFFSET32(stat_IfHCOutUcastPkts_hi),
7512     STATS_OFFSET32(stat_IfHCOutMulticastPkts_hi),
7513     STATS_OFFSET32(stat_IfHCOutBroadcastPkts_hi),
7514     STATS_OFFSET32(stat_emac_tx_stat_dot3statsinternalmactransmiterrors),
7515     STATS_OFFSET32(stat_Dot3StatsCarrierSenseErrors),
7516     STATS_OFFSET32(stat_Dot3StatsFCSErrors),
7517     STATS_OFFSET32(stat_Dot3StatsAlignmentErrors),
7518     STATS_OFFSET32(stat_Dot3StatsSingleCollisionFrames),
7519     STATS_OFFSET32(stat_Dot3StatsMultipleCollisionFrames),
7520     STATS_OFFSET32(stat_Dot3StatsDeferredTransmissions),
7521     STATS_OFFSET32(stat_Dot3StatsExcessiveCollisions),
7522     STATS_OFFSET32(stat_Dot3StatsLateCollisions),
7523     STATS_OFFSET32(stat_EtherStatsCollisions),
7524     STATS_OFFSET32(stat_EtherStatsFragments),
7525     STATS_OFFSET32(stat_EtherStatsJabbers),
7526     STATS_OFFSET32(stat_EtherStatsUndersizePkts),
7527     STATS_OFFSET32(stat_EtherStatsOverrsizePkts),
7528     STATS_OFFSET32(stat_EtherStatsPktsRx64Octets),
7529     STATS_OFFSET32(stat_EtherStatsPktsRx65Octetsto127Octets),
7530     STATS_OFFSET32(stat_EtherStatsPktsRx128Octetsto255Octets),
7531     STATS_OFFSET32(stat_EtherStatsPktsRx256Octetsto511Octets),
7532     STATS_OFFSET32(stat_EtherStatsPktsRx512Octetsto1023Octets),
7533     STATS_OFFSET32(stat_EtherStatsPktsRx1024Octetsto1522Octets),
7534     STATS_OFFSET32(stat_EtherStatsPktsRx1523Octetsto9022Octets),
7535     STATS_OFFSET32(stat_EtherStatsPktsTx64Octets),
7536     STATS_OFFSET32(stat_EtherStatsPktsTx65Octetsto127Octets),
7537     STATS_OFFSET32(stat_EtherStatsPktsTx128Octetsto255Octets),
7538     STATS_OFFSET32(stat_EtherStatsPktsTx256Octetsto511Octets),
7539     STATS_OFFSET32(stat_EtherStatsPktsTx512Octetsto1023Octets),
7540     STATS_OFFSET32(stat_EtherStatsPktsTx1024Octetsto1522Octets),
7541     STATS_OFFSET32(stat_EtherStatsPktsTx1523Octetsto9022Octets),
7542     STATS_OFFSET32(stat_XonPauseFramesReceived),
7543     STATS_OFFSET32(stat_XoffPauseFramesReceived),
7544     STATS_OFFSET32(stat_OutXonSent),
7545     STATS_OFFSET32(stat_OutXoffSent),
7546     STATS_OFFSET32(stat_MacControlFramesReceived),
7547     STATS_OFFSET32(stat_IfInFramesL2FilterDiscards),
7548     STATS_OFFSET32(stat_IfInFTQDiscards),
7549     STATS_OFFSET32(stat_IfInMBUFDiscards),
7550     STATS_OFFSET32(stat_FwRxDrop),
7551 };
7552 
7553 /* stat_IfHCInBadOctets and stat_Dot3StatsCarrierSenseErrors are
7554  * skipped because of errata.
7555  */
7556 static u8 bnx2_5706_stats_len_arr[BNX2_NUM_STATS] = {
7557 	8,0,8,8,8,8,8,8,8,8,
7558 	4,0,4,4,4,4,4,4,4,4,
7559 	4,4,4,4,4,4,4,4,4,4,
7560 	4,4,4,4,4,4,4,4,4,4,
7561 	4,4,4,4,4,4,4,
7562 };
7563 
7564 static u8 bnx2_5708_stats_len_arr[BNX2_NUM_STATS] = {
7565 	8,0,8,8,8,8,8,8,8,8,
7566 	4,4,4,4,4,4,4,4,4,4,
7567 	4,4,4,4,4,4,4,4,4,4,
7568 	4,4,4,4,4,4,4,4,4,4,
7569 	4,4,4,4,4,4,4,
7570 };
7571 
7572 #define BNX2_NUM_TESTS 6
7573 
7574 static struct {
7575 	char string[ETH_GSTRING_LEN];
7576 } bnx2_tests_str_arr[BNX2_NUM_TESTS] = {
7577 	{ "register_test (offline)" },
7578 	{ "memory_test (offline)" },
7579 	{ "loopback_test (offline)" },
7580 	{ "nvram_test (online)" },
7581 	{ "interrupt_test (online)" },
7582 	{ "link_test (online)" },
7583 };
7584 
7585 static int
7586 bnx2_get_sset_count(struct net_device *dev, int sset)
7587 {
7588 	switch (sset) {
7589 	case ETH_SS_TEST:
7590 		return BNX2_NUM_TESTS;
7591 	case ETH_SS_STATS:
7592 		return BNX2_NUM_STATS;
7593 	default:
7594 		return -EOPNOTSUPP;
7595 	}
7596 }
7597 
7598 static void
7599 bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf)
7600 {
7601 	struct bnx2 *bp = netdev_priv(dev);
7602 
7603 	memset(buf, 0, sizeof(u64) * BNX2_NUM_TESTS);
7604 	if (etest->flags & ETH_TEST_FL_OFFLINE) {
7605 		int i;
7606 
7607 		bnx2_netif_stop(bp, true);
7608 		bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_DIAG);
7609 		bnx2_free_skbs(bp);
7610 
7611 		if (bnx2_test_registers(bp) != 0) {
7612 			buf[0] = 1;
7613 			etest->flags |= ETH_TEST_FL_FAILED;
7614 		}
7615 		if (bnx2_test_memory(bp) != 0) {
7616 			buf[1] = 1;
7617 			etest->flags |= ETH_TEST_FL_FAILED;
7618 		}
7619 		if ((buf[2] = bnx2_test_loopback(bp)) != 0)
7620 			etest->flags |= ETH_TEST_FL_FAILED;
7621 
7622 		if (!netif_running(bp->dev))
7623 			bnx2_shutdown_chip(bp);
7624 		else {
7625 			bnx2_init_nic(bp, 1);
7626 			bnx2_netif_start(bp, true);
7627 		}
7628 
7629 		/* wait for link up */
7630 		for (i = 0; i < 7; i++) {
7631 			if (bp->link_up)
7632 				break;
7633 			msleep_interruptible(1000);
7634 		}
7635 	}
7636 
7637 	if (bnx2_test_nvram(bp) != 0) {
7638 		buf[3] = 1;
7639 		etest->flags |= ETH_TEST_FL_FAILED;
7640 	}
7641 	if (bnx2_test_intr(bp) != 0) {
7642 		buf[4] = 1;
7643 		etest->flags |= ETH_TEST_FL_FAILED;
7644 	}
7645 
7646 	if (bnx2_test_link(bp) != 0) {
7647 		buf[5] = 1;
7648 		etest->flags |= ETH_TEST_FL_FAILED;
7649 
7650 	}
7651 }
7652 
7653 static void
7654 bnx2_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
7655 {
7656 	switch (stringset) {
7657 	case ETH_SS_STATS:
7658 		memcpy(buf, bnx2_stats_str_arr,
7659 			sizeof(bnx2_stats_str_arr));
7660 		break;
7661 	case ETH_SS_TEST:
7662 		memcpy(buf, bnx2_tests_str_arr,
7663 			sizeof(bnx2_tests_str_arr));
7664 		break;
7665 	}
7666 }
7667 
7668 static void
7669 bnx2_get_ethtool_stats(struct net_device *dev,
7670 		struct ethtool_stats *stats, u64 *buf)
7671 {
7672 	struct bnx2 *bp = netdev_priv(dev);
7673 	int i;
7674 	u32 *hw_stats = (u32 *) bp->stats_blk;
7675 	u32 *temp_stats = (u32 *) bp->temp_stats_blk;
7676 	u8 *stats_len_arr = NULL;
7677 
7678 	if (!hw_stats) {
7679 		memset(buf, 0, sizeof(u64) * BNX2_NUM_STATS);
7680 		return;
7681 	}
7682 
7683 	if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) ||
7684 	    (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A1) ||
7685 	    (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A2) ||
7686 	    (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_A0))
7687 		stats_len_arr = bnx2_5706_stats_len_arr;
7688 	else
7689 		stats_len_arr = bnx2_5708_stats_len_arr;
7690 
7691 	for (i = 0; i < BNX2_NUM_STATS; i++) {
7692 		unsigned long offset;
7693 
7694 		if (stats_len_arr[i] == 0) {
7695 			/* skip this counter */
7696 			buf[i] = 0;
7697 			continue;
7698 		}
7699 
7700 		offset = bnx2_stats_offset_arr[i];
7701 		if (stats_len_arr[i] == 4) {
7702 			/* 4-byte counter */
7703 			buf[i] = (u64) *(hw_stats + offset) +
7704 				 *(temp_stats + offset);
7705 			continue;
7706 		}
7707 		/* 8-byte counter */
7708 		buf[i] = (((u64) *(hw_stats + offset)) << 32) +
7709 			 *(hw_stats + offset + 1) +
7710 			 (((u64) *(temp_stats + offset)) << 32) +
7711 			 *(temp_stats + offset + 1);
7712 	}
7713 }
7714 
7715 static int
7716 bnx2_set_phys_id(struct net_device *dev, enum ethtool_phys_id_state state)
7717 {
7718 	struct bnx2 *bp = netdev_priv(dev);
7719 
7720 	switch (state) {
7721 	case ETHTOOL_ID_ACTIVE:
7722 		bp->leds_save = BNX2_RD(bp, BNX2_MISC_CFG);
7723 		BNX2_WR(bp, BNX2_MISC_CFG, BNX2_MISC_CFG_LEDMODE_MAC);
7724 		return 1;	/* cycle on/off once per second */
7725 
7726 	case ETHTOOL_ID_ON:
7727 		BNX2_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE |
7728 			BNX2_EMAC_LED_1000MB_OVERRIDE |
7729 			BNX2_EMAC_LED_100MB_OVERRIDE |
7730 			BNX2_EMAC_LED_10MB_OVERRIDE |
7731 			BNX2_EMAC_LED_TRAFFIC_OVERRIDE |
7732 			BNX2_EMAC_LED_TRAFFIC);
7733 		break;
7734 
7735 	case ETHTOOL_ID_OFF:
7736 		BNX2_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE);
7737 		break;
7738 
7739 	case ETHTOOL_ID_INACTIVE:
7740 		BNX2_WR(bp, BNX2_EMAC_LED, 0);
7741 		BNX2_WR(bp, BNX2_MISC_CFG, bp->leds_save);
7742 		break;
7743 	}
7744 
7745 	return 0;
7746 }
7747 
7748 static int
7749 bnx2_set_features(struct net_device *dev, netdev_features_t features)
7750 {
7751 	struct bnx2 *bp = netdev_priv(dev);
7752 
7753 	/* TSO with VLAN tag won't work with current firmware */
7754 	if (features & NETIF_F_HW_VLAN_CTAG_TX)
7755 		dev->vlan_features |= (dev->hw_features & NETIF_F_ALL_TSO);
7756 	else
7757 		dev->vlan_features &= ~NETIF_F_ALL_TSO;
7758 
7759 	if ((!!(features & NETIF_F_HW_VLAN_CTAG_RX) !=
7760 	    !!(bp->rx_mode & BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG)) &&
7761 	    netif_running(dev)) {
7762 		bnx2_netif_stop(bp, false);
7763 		dev->features = features;
7764 		bnx2_set_rx_mode(dev);
7765 		bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_KEEP_VLAN_UPDATE, 0, 1);
7766 		bnx2_netif_start(bp, false);
7767 		return 1;
7768 	}
7769 
7770 	return 0;
7771 }
7772 
7773 static void bnx2_get_channels(struct net_device *dev,
7774 			      struct ethtool_channels *channels)
7775 {
7776 	struct bnx2 *bp = netdev_priv(dev);
7777 	u32 max_rx_rings = 1;
7778 	u32 max_tx_rings = 1;
7779 
7780 	if ((bp->flags & BNX2_FLAG_MSIX_CAP) && !disable_msi) {
7781 		max_rx_rings = RX_MAX_RINGS;
7782 		max_tx_rings = TX_MAX_RINGS;
7783 	}
7784 
7785 	channels->max_rx = max_rx_rings;
7786 	channels->max_tx = max_tx_rings;
7787 	channels->max_other = 0;
7788 	channels->max_combined = 0;
7789 	channels->rx_count = bp->num_rx_rings;
7790 	channels->tx_count = bp->num_tx_rings;
7791 	channels->other_count = 0;
7792 	channels->combined_count = 0;
7793 }
7794 
7795 static int bnx2_set_channels(struct net_device *dev,
7796 			      struct ethtool_channels *channels)
7797 {
7798 	struct bnx2 *bp = netdev_priv(dev);
7799 	u32 max_rx_rings = 1;
7800 	u32 max_tx_rings = 1;
7801 	int rc = 0;
7802 
7803 	if ((bp->flags & BNX2_FLAG_MSIX_CAP) && !disable_msi) {
7804 		max_rx_rings = RX_MAX_RINGS;
7805 		max_tx_rings = TX_MAX_RINGS;
7806 	}
7807 	if (channels->rx_count > max_rx_rings ||
7808 	    channels->tx_count > max_tx_rings)
7809 		return -EINVAL;
7810 
7811 	bp->num_req_rx_rings = channels->rx_count;
7812 	bp->num_req_tx_rings = channels->tx_count;
7813 
7814 	if (netif_running(dev))
7815 		rc = bnx2_change_ring_size(bp, bp->rx_ring_size,
7816 					   bp->tx_ring_size, true);
7817 
7818 	return rc;
7819 }
7820 
7821 static const struct ethtool_ops bnx2_ethtool_ops = {
7822 	.get_drvinfo		= bnx2_get_drvinfo,
7823 	.get_regs_len		= bnx2_get_regs_len,
7824 	.get_regs		= bnx2_get_regs,
7825 	.get_wol		= bnx2_get_wol,
7826 	.set_wol		= bnx2_set_wol,
7827 	.nway_reset		= bnx2_nway_reset,
7828 	.get_link		= bnx2_get_link,
7829 	.get_eeprom_len		= bnx2_get_eeprom_len,
7830 	.get_eeprom		= bnx2_get_eeprom,
7831 	.set_eeprom		= bnx2_set_eeprom,
7832 	.get_coalesce		= bnx2_get_coalesce,
7833 	.set_coalesce		= bnx2_set_coalesce,
7834 	.get_ringparam		= bnx2_get_ringparam,
7835 	.set_ringparam		= bnx2_set_ringparam,
7836 	.get_pauseparam		= bnx2_get_pauseparam,
7837 	.set_pauseparam		= bnx2_set_pauseparam,
7838 	.self_test		= bnx2_self_test,
7839 	.get_strings		= bnx2_get_strings,
7840 	.set_phys_id		= bnx2_set_phys_id,
7841 	.get_ethtool_stats	= bnx2_get_ethtool_stats,
7842 	.get_sset_count		= bnx2_get_sset_count,
7843 	.get_channels		= bnx2_get_channels,
7844 	.set_channels		= bnx2_set_channels,
7845 	.get_link_ksettings	= bnx2_get_link_ksettings,
7846 	.set_link_ksettings	= bnx2_set_link_ksettings,
7847 };
7848 
7849 /* Called with rtnl_lock */
7850 static int
7851 bnx2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
7852 {
7853 	struct mii_ioctl_data *data = if_mii(ifr);
7854 	struct bnx2 *bp = netdev_priv(dev);
7855 	int err;
7856 
7857 	switch(cmd) {
7858 	case SIOCGMIIPHY:
7859 		data->phy_id = bp->phy_addr;
7860 
7861 		/* fallthru */
7862 	case SIOCGMIIREG: {
7863 		u32 mii_regval;
7864 
7865 		if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
7866 			return -EOPNOTSUPP;
7867 
7868 		if (!netif_running(dev))
7869 			return -EAGAIN;
7870 
7871 		spin_lock_bh(&bp->phy_lock);
7872 		err = bnx2_read_phy(bp, data->reg_num & 0x1f, &mii_regval);
7873 		spin_unlock_bh(&bp->phy_lock);
7874 
7875 		data->val_out = mii_regval;
7876 
7877 		return err;
7878 	}
7879 
7880 	case SIOCSMIIREG:
7881 		if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
7882 			return -EOPNOTSUPP;
7883 
7884 		if (!netif_running(dev))
7885 			return -EAGAIN;
7886 
7887 		spin_lock_bh(&bp->phy_lock);
7888 		err = bnx2_write_phy(bp, data->reg_num & 0x1f, data->val_in);
7889 		spin_unlock_bh(&bp->phy_lock);
7890 
7891 		return err;
7892 
7893 	default:
7894 		/* do nothing */
7895 		break;
7896 	}
7897 	return -EOPNOTSUPP;
7898 }
7899 
7900 /* Called with rtnl_lock */
7901 static int
7902 bnx2_change_mac_addr(struct net_device *dev, void *p)
7903 {
7904 	struct sockaddr *addr = p;
7905 	struct bnx2 *bp = netdev_priv(dev);
7906 
7907 	if (!is_valid_ether_addr(addr->sa_data))
7908 		return -EADDRNOTAVAIL;
7909 
7910 	memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
7911 	if (netif_running(dev))
7912 		bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
7913 
7914 	return 0;
7915 }
7916 
7917 /* Called with rtnl_lock */
7918 static int
7919 bnx2_change_mtu(struct net_device *dev, int new_mtu)
7920 {
7921 	struct bnx2 *bp = netdev_priv(dev);
7922 
7923 	dev->mtu = new_mtu;
7924 	return bnx2_change_ring_size(bp, bp->rx_ring_size, bp->tx_ring_size,
7925 				     false);
7926 }
7927 
7928 #ifdef CONFIG_NET_POLL_CONTROLLER
7929 static void
7930 poll_bnx2(struct net_device *dev)
7931 {
7932 	struct bnx2 *bp = netdev_priv(dev);
7933 	int i;
7934 
7935 	for (i = 0; i < bp->irq_nvecs; i++) {
7936 		struct bnx2_irq *irq = &bp->irq_tbl[i];
7937 
7938 		disable_irq(irq->vector);
7939 		irq->handler(irq->vector, &bp->bnx2_napi[i]);
7940 		enable_irq(irq->vector);
7941 	}
7942 }
7943 #endif
7944 
7945 static void
7946 bnx2_get_5709_media(struct bnx2 *bp)
7947 {
7948 	u32 val = BNX2_RD(bp, BNX2_MISC_DUAL_MEDIA_CTRL);
7949 	u32 bond_id = val & BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID;
7950 	u32 strap;
7951 
7952 	if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_C)
7953 		return;
7954 	else if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_S) {
7955 		bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7956 		return;
7957 	}
7958 
7959 	if (val & BNX2_MISC_DUAL_MEDIA_CTRL_STRAP_OVERRIDE)
7960 		strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL) >> 21;
7961 	else
7962 		strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL_STRAP) >> 8;
7963 
7964 	if (bp->func == 0) {
7965 		switch (strap) {
7966 		case 0x4:
7967 		case 0x5:
7968 		case 0x6:
7969 			bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7970 			return;
7971 		}
7972 	} else {
7973 		switch (strap) {
7974 		case 0x1:
7975 		case 0x2:
7976 		case 0x4:
7977 			bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7978 			return;
7979 		}
7980 	}
7981 }
7982 
7983 static void
7984 bnx2_get_pci_speed(struct bnx2 *bp)
7985 {
7986 	u32 reg;
7987 
7988 	reg = BNX2_RD(bp, BNX2_PCICFG_MISC_STATUS);
7989 	if (reg & BNX2_PCICFG_MISC_STATUS_PCIX_DET) {
7990 		u32 clkreg;
7991 
7992 		bp->flags |= BNX2_FLAG_PCIX;
7993 
7994 		clkreg = BNX2_RD(bp, BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS);
7995 
7996 		clkreg &= BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET;
7997 		switch (clkreg) {
7998 		case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ:
7999 			bp->bus_speed_mhz = 133;
8000 			break;
8001 
8002 		case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ:
8003 			bp->bus_speed_mhz = 100;
8004 			break;
8005 
8006 		case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ:
8007 		case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ:
8008 			bp->bus_speed_mhz = 66;
8009 			break;
8010 
8011 		case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ:
8012 		case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ:
8013 			bp->bus_speed_mhz = 50;
8014 			break;
8015 
8016 		case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW:
8017 		case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ:
8018 		case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ:
8019 			bp->bus_speed_mhz = 33;
8020 			break;
8021 		}
8022 	}
8023 	else {
8024 		if (reg & BNX2_PCICFG_MISC_STATUS_M66EN)
8025 			bp->bus_speed_mhz = 66;
8026 		else
8027 			bp->bus_speed_mhz = 33;
8028 	}
8029 
8030 	if (reg & BNX2_PCICFG_MISC_STATUS_32BIT_DET)
8031 		bp->flags |= BNX2_FLAG_PCI_32BIT;
8032 
8033 }
8034 
8035 static void
8036 bnx2_read_vpd_fw_ver(struct bnx2 *bp)
8037 {
8038 	int rc, i, j;
8039 	u8 *data;
8040 	unsigned int block_end, rosize, len;
8041 
8042 #define BNX2_VPD_NVRAM_OFFSET	0x300
8043 #define BNX2_VPD_LEN		128
8044 #define BNX2_MAX_VER_SLEN	30
8045 
8046 	data = kmalloc(256, GFP_KERNEL);
8047 	if (!data)
8048 		return;
8049 
8050 	rc = bnx2_nvram_read(bp, BNX2_VPD_NVRAM_OFFSET, data + BNX2_VPD_LEN,
8051 			     BNX2_VPD_LEN);
8052 	if (rc)
8053 		goto vpd_done;
8054 
8055 	for (i = 0; i < BNX2_VPD_LEN; i += 4) {
8056 		data[i] = data[i + BNX2_VPD_LEN + 3];
8057 		data[i + 1] = data[i + BNX2_VPD_LEN + 2];
8058 		data[i + 2] = data[i + BNX2_VPD_LEN + 1];
8059 		data[i + 3] = data[i + BNX2_VPD_LEN];
8060 	}
8061 
8062 	i = pci_vpd_find_tag(data, 0, BNX2_VPD_LEN, PCI_VPD_LRDT_RO_DATA);
8063 	if (i < 0)
8064 		goto vpd_done;
8065 
8066 	rosize = pci_vpd_lrdt_size(&data[i]);
8067 	i += PCI_VPD_LRDT_TAG_SIZE;
8068 	block_end = i + rosize;
8069 
8070 	if (block_end > BNX2_VPD_LEN)
8071 		goto vpd_done;
8072 
8073 	j = pci_vpd_find_info_keyword(data, i, rosize,
8074 				      PCI_VPD_RO_KEYWORD_MFR_ID);
8075 	if (j < 0)
8076 		goto vpd_done;
8077 
8078 	len = pci_vpd_info_field_size(&data[j]);
8079 
8080 	j += PCI_VPD_INFO_FLD_HDR_SIZE;
8081 	if (j + len > block_end || len != 4 ||
8082 	    memcmp(&data[j], "1028", 4))
8083 		goto vpd_done;
8084 
8085 	j = pci_vpd_find_info_keyword(data, i, rosize,
8086 				      PCI_VPD_RO_KEYWORD_VENDOR0);
8087 	if (j < 0)
8088 		goto vpd_done;
8089 
8090 	len = pci_vpd_info_field_size(&data[j]);
8091 
8092 	j += PCI_VPD_INFO_FLD_HDR_SIZE;
8093 	if (j + len > block_end || len > BNX2_MAX_VER_SLEN)
8094 		goto vpd_done;
8095 
8096 	memcpy(bp->fw_version, &data[j], len);
8097 	bp->fw_version[len] = ' ';
8098 
8099 vpd_done:
8100 	kfree(data);
8101 }
8102 
8103 static int
8104 bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
8105 {
8106 	struct bnx2 *bp;
8107 	int rc, i, j;
8108 	u32 reg;
8109 	u64 dma_mask, persist_dma_mask;
8110 	int err;
8111 
8112 	SET_NETDEV_DEV(dev, &pdev->dev);
8113 	bp = netdev_priv(dev);
8114 
8115 	bp->flags = 0;
8116 	bp->phy_flags = 0;
8117 
8118 	bp->temp_stats_blk =
8119 		kzalloc(sizeof(struct statistics_block), GFP_KERNEL);
8120 
8121 	if (!bp->temp_stats_blk) {
8122 		rc = -ENOMEM;
8123 		goto err_out;
8124 	}
8125 
8126 	/* enable device (incl. PCI PM wakeup), and bus-mastering */
8127 	rc = pci_enable_device(pdev);
8128 	if (rc) {
8129 		dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
8130 		goto err_out;
8131 	}
8132 
8133 	if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
8134 		dev_err(&pdev->dev,
8135 			"Cannot find PCI device base address, aborting\n");
8136 		rc = -ENODEV;
8137 		goto err_out_disable;
8138 	}
8139 
8140 	rc = pci_request_regions(pdev, DRV_MODULE_NAME);
8141 	if (rc) {
8142 		dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
8143 		goto err_out_disable;
8144 	}
8145 
8146 	pci_set_master(pdev);
8147 
8148 	bp->pm_cap = pdev->pm_cap;
8149 	if (bp->pm_cap == 0) {
8150 		dev_err(&pdev->dev,
8151 			"Cannot find power management capability, aborting\n");
8152 		rc = -EIO;
8153 		goto err_out_release;
8154 	}
8155 
8156 	bp->dev = dev;
8157 	bp->pdev = pdev;
8158 
8159 	spin_lock_init(&bp->phy_lock);
8160 	spin_lock_init(&bp->indirect_lock);
8161 #ifdef BCM_CNIC
8162 	mutex_init(&bp->cnic_lock);
8163 #endif
8164 	INIT_WORK(&bp->reset_task, bnx2_reset_task);
8165 
8166 	bp->regview = pci_iomap(pdev, 0, MB_GET_CID_ADDR(TX_TSS_CID +
8167 							 TX_MAX_TSS_RINGS + 1));
8168 	if (!bp->regview) {
8169 		dev_err(&pdev->dev, "Cannot map register space, aborting\n");
8170 		rc = -ENOMEM;
8171 		goto err_out_release;
8172 	}
8173 
8174 	/* Configure byte swap and enable write to the reg_window registers.
8175 	 * Rely on CPU to do target byte swapping on big endian systems
8176 	 * The chip's target access swapping will not swap all accesses
8177 	 */
8178 	BNX2_WR(bp, BNX2_PCICFG_MISC_CONFIG,
8179 		BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
8180 		BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP);
8181 
8182 	bp->chip_id = BNX2_RD(bp, BNX2_MISC_ID);
8183 
8184 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
8185 		if (!pci_is_pcie(pdev)) {
8186 			dev_err(&pdev->dev, "Not PCIE, aborting\n");
8187 			rc = -EIO;
8188 			goto err_out_unmap;
8189 		}
8190 		bp->flags |= BNX2_FLAG_PCIE;
8191 		if (BNX2_CHIP_REV(bp) == BNX2_CHIP_REV_Ax)
8192 			bp->flags |= BNX2_FLAG_JUMBO_BROKEN;
8193 
8194 		/* AER (Advanced Error Reporting) hooks */
8195 		err = pci_enable_pcie_error_reporting(pdev);
8196 		if (!err)
8197 			bp->flags |= BNX2_FLAG_AER_ENABLED;
8198 
8199 	} else {
8200 		bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX);
8201 		if (bp->pcix_cap == 0) {
8202 			dev_err(&pdev->dev,
8203 				"Cannot find PCIX capability, aborting\n");
8204 			rc = -EIO;
8205 			goto err_out_unmap;
8206 		}
8207 		bp->flags |= BNX2_FLAG_BROKEN_STATS;
8208 	}
8209 
8210 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709 &&
8211 	    BNX2_CHIP_REV(bp) != BNX2_CHIP_REV_Ax) {
8212 		if (pdev->msix_cap)
8213 			bp->flags |= BNX2_FLAG_MSIX_CAP;
8214 	}
8215 
8216 	if (BNX2_CHIP_ID(bp) != BNX2_CHIP_ID_5706_A0 &&
8217 	    BNX2_CHIP_ID(bp) != BNX2_CHIP_ID_5706_A1) {
8218 		if (pdev->msi_cap)
8219 			bp->flags |= BNX2_FLAG_MSI_CAP;
8220 	}
8221 
8222 	/* 5708 cannot support DMA addresses > 40-bit.  */
8223 	if (BNX2_CHIP(bp) == BNX2_CHIP_5708)
8224 		persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
8225 	else
8226 		persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
8227 
8228 	/* Configure DMA attributes. */
8229 	if (pci_set_dma_mask(pdev, dma_mask) == 0) {
8230 		dev->features |= NETIF_F_HIGHDMA;
8231 		rc = pci_set_consistent_dma_mask(pdev, persist_dma_mask);
8232 		if (rc) {
8233 			dev_err(&pdev->dev,
8234 				"pci_set_consistent_dma_mask failed, aborting\n");
8235 			goto err_out_unmap;
8236 		}
8237 	} else if ((rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) != 0) {
8238 		dev_err(&pdev->dev, "System does not support DMA, aborting\n");
8239 		goto err_out_unmap;
8240 	}
8241 
8242 	if (!(bp->flags & BNX2_FLAG_PCIE))
8243 		bnx2_get_pci_speed(bp);
8244 
8245 	/* 5706A0 may falsely detect SERR and PERR. */
8246 	if (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) {
8247 		reg = BNX2_RD(bp, PCI_COMMAND);
8248 		reg &= ~(PCI_COMMAND_SERR | PCI_COMMAND_PARITY);
8249 		BNX2_WR(bp, PCI_COMMAND, reg);
8250 	} else if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A1) &&
8251 		!(bp->flags & BNX2_FLAG_PCIX)) {
8252 
8253 		dev_err(&pdev->dev,
8254 			"5706 A1 can only be used in a PCIX bus, aborting\n");
8255 		goto err_out_unmap;
8256 	}
8257 
8258 	bnx2_init_nvram(bp);
8259 
8260 	reg = bnx2_reg_rd_ind(bp, BNX2_SHM_HDR_SIGNATURE);
8261 
8262 	if (bnx2_reg_rd_ind(bp, BNX2_MCP_TOE_ID) & BNX2_MCP_TOE_ID_FUNCTION_ID)
8263 		bp->func = 1;
8264 
8265 	if ((reg & BNX2_SHM_HDR_SIGNATURE_SIG_MASK) ==
8266 	    BNX2_SHM_HDR_SIGNATURE_SIG) {
8267 		u32 off = bp->func << 2;
8268 
8269 		bp->shmem_base = bnx2_reg_rd_ind(bp, BNX2_SHM_HDR_ADDR_0 + off);
8270 	} else
8271 		bp->shmem_base = HOST_VIEW_SHMEM_BASE;
8272 
8273 	/* Get the permanent MAC address.  First we need to make sure the
8274 	 * firmware is actually running.
8275 	 */
8276 	reg = bnx2_shmem_rd(bp, BNX2_DEV_INFO_SIGNATURE);
8277 
8278 	if ((reg & BNX2_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
8279 	    BNX2_DEV_INFO_SIGNATURE_MAGIC) {
8280 		dev_err(&pdev->dev, "Firmware not running, aborting\n");
8281 		rc = -ENODEV;
8282 		goto err_out_unmap;
8283 	}
8284 
8285 	bnx2_read_vpd_fw_ver(bp);
8286 
8287 	j = strlen(bp->fw_version);
8288 	reg = bnx2_shmem_rd(bp, BNX2_DEV_INFO_BC_REV);
8289 	for (i = 0; i < 3 && j < 24; i++) {
8290 		u8 num, k, skip0;
8291 
8292 		if (i == 0) {
8293 			bp->fw_version[j++] = 'b';
8294 			bp->fw_version[j++] = 'c';
8295 			bp->fw_version[j++] = ' ';
8296 		}
8297 		num = (u8) (reg >> (24 - (i * 8)));
8298 		for (k = 100, skip0 = 1; k >= 1; num %= k, k /= 10) {
8299 			if (num >= k || !skip0 || k == 1) {
8300 				bp->fw_version[j++] = (num / k) + '0';
8301 				skip0 = 0;
8302 			}
8303 		}
8304 		if (i != 2)
8305 			bp->fw_version[j++] = '.';
8306 	}
8307 	reg = bnx2_shmem_rd(bp, BNX2_PORT_FEATURE);
8308 	if (reg & BNX2_PORT_FEATURE_WOL_ENABLED)
8309 		bp->wol = 1;
8310 
8311 	if (reg & BNX2_PORT_FEATURE_ASF_ENABLED) {
8312 		bp->flags |= BNX2_FLAG_ASF_ENABLE;
8313 
8314 		for (i = 0; i < 30; i++) {
8315 			reg = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION);
8316 			if (reg & BNX2_CONDITION_MFW_RUN_MASK)
8317 				break;
8318 			msleep(10);
8319 		}
8320 	}
8321 	reg = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION);
8322 	reg &= BNX2_CONDITION_MFW_RUN_MASK;
8323 	if (reg != BNX2_CONDITION_MFW_RUN_UNKNOWN &&
8324 	    reg != BNX2_CONDITION_MFW_RUN_NONE) {
8325 		u32 addr = bnx2_shmem_rd(bp, BNX2_MFW_VER_PTR);
8326 
8327 		if (j < 32)
8328 			bp->fw_version[j++] = ' ';
8329 		for (i = 0; i < 3 && j < 28; i++) {
8330 			reg = bnx2_reg_rd_ind(bp, addr + i * 4);
8331 			reg = be32_to_cpu(reg);
8332 			memcpy(&bp->fw_version[j], &reg, 4);
8333 			j += 4;
8334 		}
8335 	}
8336 
8337 	reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_MAC_UPPER);
8338 	bp->mac_addr[0] = (u8) (reg >> 8);
8339 	bp->mac_addr[1] = (u8) reg;
8340 
8341 	reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_MAC_LOWER);
8342 	bp->mac_addr[2] = (u8) (reg >> 24);
8343 	bp->mac_addr[3] = (u8) (reg >> 16);
8344 	bp->mac_addr[4] = (u8) (reg >> 8);
8345 	bp->mac_addr[5] = (u8) reg;
8346 
8347 	bp->tx_ring_size = BNX2_MAX_TX_DESC_CNT;
8348 	bnx2_set_rx_ring_size(bp, 255);
8349 
8350 	bp->tx_quick_cons_trip_int = 2;
8351 	bp->tx_quick_cons_trip = 20;
8352 	bp->tx_ticks_int = 18;
8353 	bp->tx_ticks = 80;
8354 
8355 	bp->rx_quick_cons_trip_int = 2;
8356 	bp->rx_quick_cons_trip = 12;
8357 	bp->rx_ticks_int = 18;
8358 	bp->rx_ticks = 18;
8359 
8360 	bp->stats_ticks = USEC_PER_SEC & BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
8361 
8362 	bp->current_interval = BNX2_TIMER_INTERVAL;
8363 
8364 	bp->phy_addr = 1;
8365 
8366 	/* allocate stats_blk */
8367 	rc = bnx2_alloc_stats_blk(dev);
8368 	if (rc)
8369 		goto err_out_unmap;
8370 
8371 	/* Disable WOL support if we are running on a SERDES chip. */
8372 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
8373 		bnx2_get_5709_media(bp);
8374 	else if (BNX2_CHIP_BOND(bp) & BNX2_CHIP_BOND_SERDES_BIT)
8375 		bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
8376 
8377 	bp->phy_port = PORT_TP;
8378 	if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
8379 		bp->phy_port = PORT_FIBRE;
8380 		reg = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG);
8381 		if (!(reg & BNX2_SHARED_HW_CFG_GIG_LINK_ON_VAUX)) {
8382 			bp->flags |= BNX2_FLAG_NO_WOL;
8383 			bp->wol = 0;
8384 		}
8385 		if (BNX2_CHIP(bp) == BNX2_CHIP_5706) {
8386 			/* Don't do parallel detect on this board because of
8387 			 * some board problems.  The link will not go down
8388 			 * if we do parallel detect.
8389 			 */
8390 			if (pdev->subsystem_vendor == PCI_VENDOR_ID_HP &&
8391 			    pdev->subsystem_device == 0x310c)
8392 				bp->phy_flags |= BNX2_PHY_FLAG_NO_PARALLEL;
8393 		} else {
8394 			bp->phy_addr = 2;
8395 			if (reg & BNX2_SHARED_HW_CFG_PHY_2_5G)
8396 				bp->phy_flags |= BNX2_PHY_FLAG_2_5G_CAPABLE;
8397 		}
8398 	} else if (BNX2_CHIP(bp) == BNX2_CHIP_5706 ||
8399 		   BNX2_CHIP(bp) == BNX2_CHIP_5708)
8400 		bp->phy_flags |= BNX2_PHY_FLAG_CRC_FIX;
8401 	else if (BNX2_CHIP(bp) == BNX2_CHIP_5709 &&
8402 		 (BNX2_CHIP_REV(bp) == BNX2_CHIP_REV_Ax ||
8403 		  BNX2_CHIP_REV(bp) == BNX2_CHIP_REV_Bx))
8404 		bp->phy_flags |= BNX2_PHY_FLAG_DIS_EARLY_DAC;
8405 
8406 	bnx2_init_fw_cap(bp);
8407 
8408 	if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_A0) ||
8409 	    (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_B0) ||
8410 	    (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_B1) ||
8411 	    !(BNX2_RD(bp, BNX2_PCI_CONFIG_3) & BNX2_PCI_CONFIG_3_VAUX_PRESET)) {
8412 		bp->flags |= BNX2_FLAG_NO_WOL;
8413 		bp->wol = 0;
8414 	}
8415 
8416 	if (bp->flags & BNX2_FLAG_NO_WOL)
8417 		device_set_wakeup_capable(&bp->pdev->dev, false);
8418 	else
8419 		device_set_wakeup_enable(&bp->pdev->dev, bp->wol);
8420 
8421 	if (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) {
8422 		bp->tx_quick_cons_trip_int =
8423 			bp->tx_quick_cons_trip;
8424 		bp->tx_ticks_int = bp->tx_ticks;
8425 		bp->rx_quick_cons_trip_int =
8426 			bp->rx_quick_cons_trip;
8427 		bp->rx_ticks_int = bp->rx_ticks;
8428 		bp->comp_prod_trip_int = bp->comp_prod_trip;
8429 		bp->com_ticks_int = bp->com_ticks;
8430 		bp->cmd_ticks_int = bp->cmd_ticks;
8431 	}
8432 
8433 	/* Disable MSI on 5706 if AMD 8132 bridge is found.
8434 	 *
8435 	 * MSI is defined to be 32-bit write.  The 5706 does 64-bit MSI writes
8436 	 * with byte enables disabled on the unused 32-bit word.  This is legal
8437 	 * but causes problems on the AMD 8132 which will eventually stop
8438 	 * responding after a while.
8439 	 *
8440 	 * AMD believes this incompatibility is unique to the 5706, and
8441 	 * prefers to locally disable MSI rather than globally disabling it.
8442 	 */
8443 	if (BNX2_CHIP(bp) == BNX2_CHIP_5706 && disable_msi == 0) {
8444 		struct pci_dev *amd_8132 = NULL;
8445 
8446 		while ((amd_8132 = pci_get_device(PCI_VENDOR_ID_AMD,
8447 						  PCI_DEVICE_ID_AMD_8132_BRIDGE,
8448 						  amd_8132))) {
8449 
8450 			if (amd_8132->revision >= 0x10 &&
8451 			    amd_8132->revision <= 0x13) {
8452 				disable_msi = 1;
8453 				pci_dev_put(amd_8132);
8454 				break;
8455 			}
8456 		}
8457 	}
8458 
8459 	bnx2_set_default_link(bp);
8460 	bp->req_flow_ctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
8461 
8462 	timer_setup(&bp->timer, bnx2_timer, 0);
8463 	bp->timer.expires = RUN_AT(BNX2_TIMER_INTERVAL);
8464 
8465 #ifdef BCM_CNIC
8466 	if (bnx2_shmem_rd(bp, BNX2_ISCSI_INITIATOR) & BNX2_ISCSI_INITIATOR_EN)
8467 		bp->cnic_eth_dev.max_iscsi_conn =
8468 			(bnx2_shmem_rd(bp, BNX2_ISCSI_MAX_CONN) &
8469 			 BNX2_ISCSI_MAX_CONN_MASK) >> BNX2_ISCSI_MAX_CONN_SHIFT;
8470 	bp->cnic_probe = bnx2_cnic_probe;
8471 #endif
8472 	pci_save_state(pdev);
8473 
8474 	return 0;
8475 
8476 err_out_unmap:
8477 	if (bp->flags & BNX2_FLAG_AER_ENABLED) {
8478 		pci_disable_pcie_error_reporting(pdev);
8479 		bp->flags &= ~BNX2_FLAG_AER_ENABLED;
8480 	}
8481 
8482 	pci_iounmap(pdev, bp->regview);
8483 	bp->regview = NULL;
8484 
8485 err_out_release:
8486 	pci_release_regions(pdev);
8487 
8488 err_out_disable:
8489 	pci_disable_device(pdev);
8490 
8491 err_out:
8492 	kfree(bp->temp_stats_blk);
8493 
8494 	return rc;
8495 }
8496 
8497 static char *
8498 bnx2_bus_string(struct bnx2 *bp, char *str)
8499 {
8500 	char *s = str;
8501 
8502 	if (bp->flags & BNX2_FLAG_PCIE) {
8503 		s += sprintf(s, "PCI Express");
8504 	} else {
8505 		s += sprintf(s, "PCI");
8506 		if (bp->flags & BNX2_FLAG_PCIX)
8507 			s += sprintf(s, "-X");
8508 		if (bp->flags & BNX2_FLAG_PCI_32BIT)
8509 			s += sprintf(s, " 32-bit");
8510 		else
8511 			s += sprintf(s, " 64-bit");
8512 		s += sprintf(s, " %dMHz", bp->bus_speed_mhz);
8513 	}
8514 	return str;
8515 }
8516 
8517 static void
8518 bnx2_del_napi(struct bnx2 *bp)
8519 {
8520 	int i;
8521 
8522 	for (i = 0; i < bp->irq_nvecs; i++)
8523 		netif_napi_del(&bp->bnx2_napi[i].napi);
8524 }
8525 
8526 static void
8527 bnx2_init_napi(struct bnx2 *bp)
8528 {
8529 	int i;
8530 
8531 	for (i = 0; i < bp->irq_nvecs; i++) {
8532 		struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
8533 		int (*poll)(struct napi_struct *, int);
8534 
8535 		if (i == 0)
8536 			poll = bnx2_poll;
8537 		else
8538 			poll = bnx2_poll_msix;
8539 
8540 		netif_napi_add(bp->dev, &bp->bnx2_napi[i].napi, poll, 64);
8541 		bnapi->bp = bp;
8542 	}
8543 }
8544 
8545 static const struct net_device_ops bnx2_netdev_ops = {
8546 	.ndo_open		= bnx2_open,
8547 	.ndo_start_xmit		= bnx2_start_xmit,
8548 	.ndo_stop		= bnx2_close,
8549 	.ndo_get_stats64	= bnx2_get_stats64,
8550 	.ndo_set_rx_mode	= bnx2_set_rx_mode,
8551 	.ndo_do_ioctl		= bnx2_ioctl,
8552 	.ndo_validate_addr	= eth_validate_addr,
8553 	.ndo_set_mac_address	= bnx2_change_mac_addr,
8554 	.ndo_change_mtu		= bnx2_change_mtu,
8555 	.ndo_set_features	= bnx2_set_features,
8556 	.ndo_tx_timeout		= bnx2_tx_timeout,
8557 #ifdef CONFIG_NET_POLL_CONTROLLER
8558 	.ndo_poll_controller	= poll_bnx2,
8559 #endif
8560 };
8561 
8562 static int
8563 bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
8564 {
8565 	static int version_printed = 0;
8566 	struct net_device *dev;
8567 	struct bnx2 *bp;
8568 	int rc;
8569 	char str[40];
8570 
8571 	if (version_printed++ == 0)
8572 		pr_info("%s", version);
8573 
8574 	/* dev zeroed in init_etherdev */
8575 	dev = alloc_etherdev_mq(sizeof(*bp), TX_MAX_RINGS);
8576 	if (!dev)
8577 		return -ENOMEM;
8578 
8579 	rc = bnx2_init_board(pdev, dev);
8580 	if (rc < 0)
8581 		goto err_free;
8582 
8583 	dev->netdev_ops = &bnx2_netdev_ops;
8584 	dev->watchdog_timeo = TX_TIMEOUT;
8585 	dev->ethtool_ops = &bnx2_ethtool_ops;
8586 
8587 	bp = netdev_priv(dev);
8588 
8589 	pci_set_drvdata(pdev, dev);
8590 
8591 	/*
8592 	 * In-flight DMA from 1st kernel could continue going in kdump kernel.
8593 	 * New io-page table has been created before bnx2 does reset at open stage.
8594 	 * We have to wait for the in-flight DMA to complete to avoid it look up
8595 	 * into the newly created io-page table.
8596 	 */
8597 	if (is_kdump_kernel())
8598 		bnx2_wait_dma_complete(bp);
8599 
8600 	memcpy(dev->dev_addr, bp->mac_addr, ETH_ALEN);
8601 
8602 	dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG |
8603 		NETIF_F_TSO | NETIF_F_TSO_ECN |
8604 		NETIF_F_RXHASH | NETIF_F_RXCSUM;
8605 
8606 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
8607 		dev->hw_features |= NETIF_F_IPV6_CSUM | NETIF_F_TSO6;
8608 
8609 	dev->vlan_features = dev->hw_features;
8610 	dev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
8611 	dev->features |= dev->hw_features;
8612 	dev->priv_flags |= IFF_UNICAST_FLT;
8613 	dev->min_mtu = MIN_ETHERNET_PACKET_SIZE;
8614 	dev->max_mtu = MAX_ETHERNET_JUMBO_PACKET_SIZE;
8615 
8616 	if (!(bp->flags & BNX2_FLAG_CAN_KEEP_VLAN))
8617 		dev->hw_features &= ~NETIF_F_HW_VLAN_CTAG_RX;
8618 
8619 	if ((rc = register_netdev(dev))) {
8620 		dev_err(&pdev->dev, "Cannot register net device\n");
8621 		goto error;
8622 	}
8623 
8624 	netdev_info(dev, "%s (%c%d) %s found at mem %lx, IRQ %d, "
8625 		    "node addr %pM\n", board_info[ent->driver_data].name,
8626 		    ((BNX2_CHIP_ID(bp) & 0xf000) >> 12) + 'A',
8627 		    ((BNX2_CHIP_ID(bp) & 0x0ff0) >> 4),
8628 		    bnx2_bus_string(bp, str), (long)pci_resource_start(pdev, 0),
8629 		    pdev->irq, dev->dev_addr);
8630 
8631 	return 0;
8632 
8633 error:
8634 	pci_iounmap(pdev, bp->regview);
8635 	pci_release_regions(pdev);
8636 	pci_disable_device(pdev);
8637 err_free:
8638 	bnx2_free_stats_blk(dev);
8639 	free_netdev(dev);
8640 	return rc;
8641 }
8642 
8643 static void
8644 bnx2_remove_one(struct pci_dev *pdev)
8645 {
8646 	struct net_device *dev = pci_get_drvdata(pdev);
8647 	struct bnx2 *bp = netdev_priv(dev);
8648 
8649 	unregister_netdev(dev);
8650 
8651 	del_timer_sync(&bp->timer);
8652 	cancel_work_sync(&bp->reset_task);
8653 
8654 	pci_iounmap(bp->pdev, bp->regview);
8655 
8656 	bnx2_free_stats_blk(dev);
8657 	kfree(bp->temp_stats_blk);
8658 
8659 	if (bp->flags & BNX2_FLAG_AER_ENABLED) {
8660 		pci_disable_pcie_error_reporting(pdev);
8661 		bp->flags &= ~BNX2_FLAG_AER_ENABLED;
8662 	}
8663 
8664 	bnx2_release_firmware(bp);
8665 
8666 	free_netdev(dev);
8667 
8668 	pci_release_regions(pdev);
8669 	pci_disable_device(pdev);
8670 }
8671 
8672 #ifdef CONFIG_PM_SLEEP
8673 static int
8674 bnx2_suspend(struct device *device)
8675 {
8676 	struct net_device *dev = dev_get_drvdata(device);
8677 	struct bnx2 *bp = netdev_priv(dev);
8678 
8679 	if (netif_running(dev)) {
8680 		cancel_work_sync(&bp->reset_task);
8681 		bnx2_netif_stop(bp, true);
8682 		netif_device_detach(dev);
8683 		del_timer_sync(&bp->timer);
8684 		bnx2_shutdown_chip(bp);
8685 		__bnx2_free_irq(bp);
8686 		bnx2_free_skbs(bp);
8687 	}
8688 	bnx2_setup_wol(bp);
8689 	return 0;
8690 }
8691 
8692 static int
8693 bnx2_resume(struct device *device)
8694 {
8695 	struct net_device *dev = dev_get_drvdata(device);
8696 	struct bnx2 *bp = netdev_priv(dev);
8697 
8698 	if (!netif_running(dev))
8699 		return 0;
8700 
8701 	bnx2_set_power_state(bp, PCI_D0);
8702 	netif_device_attach(dev);
8703 	bnx2_request_irq(bp);
8704 	bnx2_init_nic(bp, 1);
8705 	bnx2_netif_start(bp, true);
8706 	return 0;
8707 }
8708 
8709 static SIMPLE_DEV_PM_OPS(bnx2_pm_ops, bnx2_suspend, bnx2_resume);
8710 #define BNX2_PM_OPS (&bnx2_pm_ops)
8711 
8712 #else
8713 
8714 #define BNX2_PM_OPS NULL
8715 
8716 #endif /* CONFIG_PM_SLEEP */
8717 /**
8718  * bnx2_io_error_detected - called when PCI error is detected
8719  * @pdev: Pointer to PCI device
8720  * @state: The current pci connection state
8721  *
8722  * This function is called after a PCI bus error affecting
8723  * this device has been detected.
8724  */
8725 static pci_ers_result_t bnx2_io_error_detected(struct pci_dev *pdev,
8726 					       pci_channel_state_t state)
8727 {
8728 	struct net_device *dev = pci_get_drvdata(pdev);
8729 	struct bnx2 *bp = netdev_priv(dev);
8730 
8731 	rtnl_lock();
8732 	netif_device_detach(dev);
8733 
8734 	if (state == pci_channel_io_perm_failure) {
8735 		rtnl_unlock();
8736 		return PCI_ERS_RESULT_DISCONNECT;
8737 	}
8738 
8739 	if (netif_running(dev)) {
8740 		bnx2_netif_stop(bp, true);
8741 		del_timer_sync(&bp->timer);
8742 		bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
8743 	}
8744 
8745 	pci_disable_device(pdev);
8746 	rtnl_unlock();
8747 
8748 	/* Request a slot slot reset. */
8749 	return PCI_ERS_RESULT_NEED_RESET;
8750 }
8751 
8752 /**
8753  * bnx2_io_slot_reset - called after the pci bus has been reset.
8754  * @pdev: Pointer to PCI device
8755  *
8756  * Restart the card from scratch, as if from a cold-boot.
8757  */
8758 static pci_ers_result_t bnx2_io_slot_reset(struct pci_dev *pdev)
8759 {
8760 	struct net_device *dev = pci_get_drvdata(pdev);
8761 	struct bnx2 *bp = netdev_priv(dev);
8762 	pci_ers_result_t result = PCI_ERS_RESULT_DISCONNECT;
8763 	int err = 0;
8764 
8765 	rtnl_lock();
8766 	if (pci_enable_device(pdev)) {
8767 		dev_err(&pdev->dev,
8768 			"Cannot re-enable PCI device after reset\n");
8769 	} else {
8770 		pci_set_master(pdev);
8771 		pci_restore_state(pdev);
8772 		pci_save_state(pdev);
8773 
8774 		if (netif_running(dev))
8775 			err = bnx2_init_nic(bp, 1);
8776 
8777 		if (!err)
8778 			result = PCI_ERS_RESULT_RECOVERED;
8779 	}
8780 
8781 	if (result != PCI_ERS_RESULT_RECOVERED && netif_running(dev)) {
8782 		bnx2_napi_enable(bp);
8783 		dev_close(dev);
8784 	}
8785 	rtnl_unlock();
8786 
8787 	if (!(bp->flags & BNX2_FLAG_AER_ENABLED))
8788 		return result;
8789 
8790 	return result;
8791 }
8792 
8793 /**
8794  * bnx2_io_resume - called when traffic can start flowing again.
8795  * @pdev: Pointer to PCI device
8796  *
8797  * This callback is called when the error recovery driver tells us that
8798  * its OK to resume normal operation.
8799  */
8800 static void bnx2_io_resume(struct pci_dev *pdev)
8801 {
8802 	struct net_device *dev = pci_get_drvdata(pdev);
8803 	struct bnx2 *bp = netdev_priv(dev);
8804 
8805 	rtnl_lock();
8806 	if (netif_running(dev))
8807 		bnx2_netif_start(bp, true);
8808 
8809 	netif_device_attach(dev);
8810 	rtnl_unlock();
8811 }
8812 
8813 static void bnx2_shutdown(struct pci_dev *pdev)
8814 {
8815 	struct net_device *dev = pci_get_drvdata(pdev);
8816 	struct bnx2 *bp;
8817 
8818 	if (!dev)
8819 		return;
8820 
8821 	bp = netdev_priv(dev);
8822 	if (!bp)
8823 		return;
8824 
8825 	rtnl_lock();
8826 	if (netif_running(dev))
8827 		dev_close(bp->dev);
8828 
8829 	if (system_state == SYSTEM_POWER_OFF)
8830 		bnx2_set_power_state(bp, PCI_D3hot);
8831 
8832 	rtnl_unlock();
8833 }
8834 
8835 static const struct pci_error_handlers bnx2_err_handler = {
8836 	.error_detected	= bnx2_io_error_detected,
8837 	.slot_reset	= bnx2_io_slot_reset,
8838 	.resume		= bnx2_io_resume,
8839 };
8840 
8841 static struct pci_driver bnx2_pci_driver = {
8842 	.name		= DRV_MODULE_NAME,
8843 	.id_table	= bnx2_pci_tbl,
8844 	.probe		= bnx2_init_one,
8845 	.remove		= bnx2_remove_one,
8846 	.driver.pm	= BNX2_PM_OPS,
8847 	.err_handler	= &bnx2_err_handler,
8848 	.shutdown	= bnx2_shutdown,
8849 };
8850 
8851 module_pci_driver(bnx2_pci_driver);
8852