1 /* bnx2.c: QLogic bnx2 network driver.
2  *
3  * Copyright (c) 2004-2014 Broadcom Corporation
4  * Copyright (c) 2014-2015 QLogic Corporation
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License as published by
8  * the Free Software Foundation.
9  *
10  * Written by: Michael Chan  (mchan@broadcom.com)
11  */
12 
13 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14 
15 #include <linux/module.h>
16 #include <linux/moduleparam.h>
17 
18 #include <linux/stringify.h>
19 #include <linux/kernel.h>
20 #include <linux/timer.h>
21 #include <linux/errno.h>
22 #include <linux/ioport.h>
23 #include <linux/slab.h>
24 #include <linux/vmalloc.h>
25 #include <linux/interrupt.h>
26 #include <linux/pci.h>
27 #include <linux/netdevice.h>
28 #include <linux/etherdevice.h>
29 #include <linux/skbuff.h>
30 #include <linux/dma-mapping.h>
31 #include <linux/bitops.h>
32 #include <asm/io.h>
33 #include <asm/irq.h>
34 #include <linux/delay.h>
35 #include <asm/byteorder.h>
36 #include <asm/page.h>
37 #include <linux/time.h>
38 #include <linux/ethtool.h>
39 #include <linux/mii.h>
40 #include <linux/if.h>
41 #include <linux/if_vlan.h>
42 #include <net/ip.h>
43 #include <net/tcp.h>
44 #include <net/checksum.h>
45 #include <linux/workqueue.h>
46 #include <linux/crc32.h>
47 #include <linux/prefetch.h>
48 #include <linux/cache.h>
49 #include <linux/firmware.h>
50 #include <linux/log2.h>
51 #include <linux/aer.h>
52 
53 #if IS_ENABLED(CONFIG_CNIC)
54 #define BCM_CNIC 1
55 #include "cnic_if.h"
56 #endif
57 #include "bnx2.h"
58 #include "bnx2_fw.h"
59 
60 #define DRV_MODULE_NAME		"bnx2"
61 #define DRV_MODULE_VERSION	"2.2.6"
62 #define DRV_MODULE_RELDATE	"January 29, 2014"
63 #define FW_MIPS_FILE_06		"bnx2/bnx2-mips-06-6.2.3.fw"
64 #define FW_RV2P_FILE_06		"bnx2/bnx2-rv2p-06-6.0.15.fw"
65 #define FW_MIPS_FILE_09		"bnx2/bnx2-mips-09-6.2.1b.fw"
66 #define FW_RV2P_FILE_09_Ax	"bnx2/bnx2-rv2p-09ax-6.0.17.fw"
67 #define FW_RV2P_FILE_09		"bnx2/bnx2-rv2p-09-6.0.17.fw"
68 
69 #define RUN_AT(x) (jiffies + (x))
70 
71 /* Time in jiffies before concluding the transmitter is hung. */
72 #define TX_TIMEOUT  (5*HZ)
73 
74 static char version[] =
75 	"QLogic " DRV_MODULE_NAME " Gigabit Ethernet Driver v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
76 
77 MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>");
78 MODULE_DESCRIPTION("QLogic BCM5706/5708/5709/5716 Driver");
79 MODULE_LICENSE("GPL");
80 MODULE_VERSION(DRV_MODULE_VERSION);
81 MODULE_FIRMWARE(FW_MIPS_FILE_06);
82 MODULE_FIRMWARE(FW_RV2P_FILE_06);
83 MODULE_FIRMWARE(FW_MIPS_FILE_09);
84 MODULE_FIRMWARE(FW_RV2P_FILE_09);
85 MODULE_FIRMWARE(FW_RV2P_FILE_09_Ax);
86 
87 static int disable_msi = 0;
88 
89 module_param(disable_msi, int, S_IRUGO);
90 MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
91 
92 typedef enum {
93 	BCM5706 = 0,
94 	NC370T,
95 	NC370I,
96 	BCM5706S,
97 	NC370F,
98 	BCM5708,
99 	BCM5708S,
100 	BCM5709,
101 	BCM5709S,
102 	BCM5716,
103 	BCM5716S,
104 } board_t;
105 
106 /* indexed by board_t, above */
107 static struct {
108 	char *name;
109 } board_info[] = {
110 	{ "Broadcom NetXtreme II BCM5706 1000Base-T" },
111 	{ "HP NC370T Multifunction Gigabit Server Adapter" },
112 	{ "HP NC370i Multifunction Gigabit Server Adapter" },
113 	{ "Broadcom NetXtreme II BCM5706 1000Base-SX" },
114 	{ "HP NC370F Multifunction Gigabit Server Adapter" },
115 	{ "Broadcom NetXtreme II BCM5708 1000Base-T" },
116 	{ "Broadcom NetXtreme II BCM5708 1000Base-SX" },
117 	{ "Broadcom NetXtreme II BCM5709 1000Base-T" },
118 	{ "Broadcom NetXtreme II BCM5709 1000Base-SX" },
119 	{ "Broadcom NetXtreme II BCM5716 1000Base-T" },
120 	{ "Broadcom NetXtreme II BCM5716 1000Base-SX" },
121 	};
122 
123 static const struct pci_device_id bnx2_pci_tbl[] = {
124 	{ PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
125 	  PCI_VENDOR_ID_HP, 0x3101, 0, 0, NC370T },
126 	{ PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
127 	  PCI_VENDOR_ID_HP, 0x3106, 0, 0, NC370I },
128 	{ PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
129 	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706 },
130 	{ PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708,
131 	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708 },
132 	{ PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
133 	  PCI_VENDOR_ID_HP, 0x3102, 0, 0, NC370F },
134 	{ PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
135 	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706S },
136 	{ PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708S,
137 	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708S },
138 	{ PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709,
139 	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709 },
140 	{ PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709S,
141 	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709S },
142 	{ PCI_VENDOR_ID_BROADCOM, 0x163b,
143 	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5716 },
144 	{ PCI_VENDOR_ID_BROADCOM, 0x163c,
145 	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5716S },
146 	{ 0, }
147 };
148 
149 static const struct flash_spec flash_table[] =
150 {
151 #define BUFFERED_FLAGS		(BNX2_NV_BUFFERED | BNX2_NV_TRANSLATE)
152 #define NONBUFFERED_FLAGS	(BNX2_NV_WREN)
153 	/* Slow EEPROM */
154 	{0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
155 	 BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
156 	 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
157 	 "EEPROM - slow"},
158 	/* Expansion entry 0001 */
159 	{0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
160 	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
161 	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
162 	 "Entry 0001"},
163 	/* Saifun SA25F010 (non-buffered flash) */
164 	/* strap, cfg1, & write1 need updates */
165 	{0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
166 	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
167 	 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
168 	 "Non-buffered flash (128kB)"},
169 	/* Saifun SA25F020 (non-buffered flash) */
170 	/* strap, cfg1, & write1 need updates */
171 	{0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
172 	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
173 	 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
174 	 "Non-buffered flash (256kB)"},
175 	/* Expansion entry 0100 */
176 	{0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
177 	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
178 	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
179 	 "Entry 0100"},
180 	/* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
181 	{0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
182 	 NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
183 	 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
184 	 "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
185 	/* Entry 0110: ST M45PE20 (non-buffered flash)*/
186 	{0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
187 	 NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
188 	 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
189 	 "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
190 	/* Saifun SA25F005 (non-buffered flash) */
191 	/* strap, cfg1, & write1 need updates */
192 	{0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
193 	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
194 	 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
195 	 "Non-buffered flash (64kB)"},
196 	/* Fast EEPROM */
197 	{0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
198 	 BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
199 	 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
200 	 "EEPROM - fast"},
201 	/* Expansion entry 1001 */
202 	{0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
203 	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
204 	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
205 	 "Entry 1001"},
206 	/* Expansion entry 1010 */
207 	{0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
208 	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
209 	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
210 	 "Entry 1010"},
211 	/* ATMEL AT45DB011B (buffered flash) */
212 	{0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
213 	 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
214 	 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
215 	 "Buffered flash (128kB)"},
216 	/* Expansion entry 1100 */
217 	{0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
218 	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
219 	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
220 	 "Entry 1100"},
221 	/* Expansion entry 1101 */
222 	{0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
223 	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
224 	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
225 	 "Entry 1101"},
226 	/* Ateml Expansion entry 1110 */
227 	{0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
228 	 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
229 	 BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
230 	 "Entry 1110 (Atmel)"},
231 	/* ATMEL AT45DB021B (buffered flash) */
232 	{0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
233 	 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
234 	 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
235 	 "Buffered flash (256kB)"},
236 };
237 
238 static const struct flash_spec flash_5709 = {
239 	.flags		= BNX2_NV_BUFFERED,
240 	.page_bits	= BCM5709_FLASH_PAGE_BITS,
241 	.page_size	= BCM5709_FLASH_PAGE_SIZE,
242 	.addr_mask	= BCM5709_FLASH_BYTE_ADDR_MASK,
243 	.total_size	= BUFFERED_FLASH_TOTAL_SIZE*2,
244 	.name		= "5709 Buffered flash (256kB)",
245 };
246 
247 MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
248 
249 static void bnx2_init_napi(struct bnx2 *bp);
250 static void bnx2_del_napi(struct bnx2 *bp);
251 
252 static inline u32 bnx2_tx_avail(struct bnx2 *bp, struct bnx2_tx_ring_info *txr)
253 {
254 	u32 diff;
255 
256 	/* Tell compiler to fetch tx_prod and tx_cons from memory. */
257 	barrier();
258 
259 	/* The ring uses 256 indices for 255 entries, one of them
260 	 * needs to be skipped.
261 	 */
262 	diff = txr->tx_prod - txr->tx_cons;
263 	if (unlikely(diff >= BNX2_TX_DESC_CNT)) {
264 		diff &= 0xffff;
265 		if (diff == BNX2_TX_DESC_CNT)
266 			diff = BNX2_MAX_TX_DESC_CNT;
267 	}
268 	return bp->tx_ring_size - diff;
269 }
270 
271 static u32
272 bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset)
273 {
274 	unsigned long flags;
275 	u32 val;
276 
277 	spin_lock_irqsave(&bp->indirect_lock, flags);
278 	BNX2_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
279 	val = BNX2_RD(bp, BNX2_PCICFG_REG_WINDOW);
280 	spin_unlock_irqrestore(&bp->indirect_lock, flags);
281 	return val;
282 }
283 
284 static void
285 bnx2_reg_wr_ind(struct bnx2 *bp, u32 offset, u32 val)
286 {
287 	unsigned long flags;
288 
289 	spin_lock_irqsave(&bp->indirect_lock, flags);
290 	BNX2_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
291 	BNX2_WR(bp, BNX2_PCICFG_REG_WINDOW, val);
292 	spin_unlock_irqrestore(&bp->indirect_lock, flags);
293 }
294 
295 static void
296 bnx2_shmem_wr(struct bnx2 *bp, u32 offset, u32 val)
297 {
298 	bnx2_reg_wr_ind(bp, bp->shmem_base + offset, val);
299 }
300 
301 static u32
302 bnx2_shmem_rd(struct bnx2 *bp, u32 offset)
303 {
304 	return bnx2_reg_rd_ind(bp, bp->shmem_base + offset);
305 }
306 
307 static void
308 bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
309 {
310 	unsigned long flags;
311 
312 	offset += cid_addr;
313 	spin_lock_irqsave(&bp->indirect_lock, flags);
314 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
315 		int i;
316 
317 		BNX2_WR(bp, BNX2_CTX_CTX_DATA, val);
318 		BNX2_WR(bp, BNX2_CTX_CTX_CTRL,
319 			offset | BNX2_CTX_CTX_CTRL_WRITE_REQ);
320 		for (i = 0; i < 5; i++) {
321 			val = BNX2_RD(bp, BNX2_CTX_CTX_CTRL);
322 			if ((val & BNX2_CTX_CTX_CTRL_WRITE_REQ) == 0)
323 				break;
324 			udelay(5);
325 		}
326 	} else {
327 		BNX2_WR(bp, BNX2_CTX_DATA_ADR, offset);
328 		BNX2_WR(bp, BNX2_CTX_DATA, val);
329 	}
330 	spin_unlock_irqrestore(&bp->indirect_lock, flags);
331 }
332 
333 #ifdef BCM_CNIC
334 static int
335 bnx2_drv_ctl(struct net_device *dev, struct drv_ctl_info *info)
336 {
337 	struct bnx2 *bp = netdev_priv(dev);
338 	struct drv_ctl_io *io = &info->data.io;
339 
340 	switch (info->cmd) {
341 	case DRV_CTL_IO_WR_CMD:
342 		bnx2_reg_wr_ind(bp, io->offset, io->data);
343 		break;
344 	case DRV_CTL_IO_RD_CMD:
345 		io->data = bnx2_reg_rd_ind(bp, io->offset);
346 		break;
347 	case DRV_CTL_CTX_WR_CMD:
348 		bnx2_ctx_wr(bp, io->cid_addr, io->offset, io->data);
349 		break;
350 	default:
351 		return -EINVAL;
352 	}
353 	return 0;
354 }
355 
356 static void bnx2_setup_cnic_irq_info(struct bnx2 *bp)
357 {
358 	struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
359 	struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
360 	int sb_id;
361 
362 	if (bp->flags & BNX2_FLAG_USING_MSIX) {
363 		cp->drv_state |= CNIC_DRV_STATE_USING_MSIX;
364 		bnapi->cnic_present = 0;
365 		sb_id = bp->irq_nvecs;
366 		cp->irq_arr[0].irq_flags |= CNIC_IRQ_FL_MSIX;
367 	} else {
368 		cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX;
369 		bnapi->cnic_tag = bnapi->last_status_idx;
370 		bnapi->cnic_present = 1;
371 		sb_id = 0;
372 		cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX;
373 	}
374 
375 	cp->irq_arr[0].vector = bp->irq_tbl[sb_id].vector;
376 	cp->irq_arr[0].status_blk = (void *)
377 		((unsigned long) bnapi->status_blk.msi +
378 		(BNX2_SBLK_MSIX_ALIGN_SIZE * sb_id));
379 	cp->irq_arr[0].status_blk_num = sb_id;
380 	cp->num_irq = 1;
381 }
382 
383 static int bnx2_register_cnic(struct net_device *dev, struct cnic_ops *ops,
384 			      void *data)
385 {
386 	struct bnx2 *bp = netdev_priv(dev);
387 	struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
388 
389 	if (ops == NULL)
390 		return -EINVAL;
391 
392 	if (cp->drv_state & CNIC_DRV_STATE_REGD)
393 		return -EBUSY;
394 
395 	if (!bnx2_reg_rd_ind(bp, BNX2_FW_MAX_ISCSI_CONN))
396 		return -ENODEV;
397 
398 	bp->cnic_data = data;
399 	rcu_assign_pointer(bp->cnic_ops, ops);
400 
401 	cp->num_irq = 0;
402 	cp->drv_state = CNIC_DRV_STATE_REGD;
403 
404 	bnx2_setup_cnic_irq_info(bp);
405 
406 	return 0;
407 }
408 
409 static int bnx2_unregister_cnic(struct net_device *dev)
410 {
411 	struct bnx2 *bp = netdev_priv(dev);
412 	struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
413 	struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
414 
415 	mutex_lock(&bp->cnic_lock);
416 	cp->drv_state = 0;
417 	bnapi->cnic_present = 0;
418 	RCU_INIT_POINTER(bp->cnic_ops, NULL);
419 	mutex_unlock(&bp->cnic_lock);
420 	synchronize_rcu();
421 	return 0;
422 }
423 
424 static struct cnic_eth_dev *bnx2_cnic_probe(struct net_device *dev)
425 {
426 	struct bnx2 *bp = netdev_priv(dev);
427 	struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
428 
429 	if (!cp->max_iscsi_conn)
430 		return NULL;
431 
432 	cp->drv_owner = THIS_MODULE;
433 	cp->chip_id = bp->chip_id;
434 	cp->pdev = bp->pdev;
435 	cp->io_base = bp->regview;
436 	cp->drv_ctl = bnx2_drv_ctl;
437 	cp->drv_register_cnic = bnx2_register_cnic;
438 	cp->drv_unregister_cnic = bnx2_unregister_cnic;
439 
440 	return cp;
441 }
442 
443 static void
444 bnx2_cnic_stop(struct bnx2 *bp)
445 {
446 	struct cnic_ops *c_ops;
447 	struct cnic_ctl_info info;
448 
449 	mutex_lock(&bp->cnic_lock);
450 	c_ops = rcu_dereference_protected(bp->cnic_ops,
451 					  lockdep_is_held(&bp->cnic_lock));
452 	if (c_ops) {
453 		info.cmd = CNIC_CTL_STOP_CMD;
454 		c_ops->cnic_ctl(bp->cnic_data, &info);
455 	}
456 	mutex_unlock(&bp->cnic_lock);
457 }
458 
459 static void
460 bnx2_cnic_start(struct bnx2 *bp)
461 {
462 	struct cnic_ops *c_ops;
463 	struct cnic_ctl_info info;
464 
465 	mutex_lock(&bp->cnic_lock);
466 	c_ops = rcu_dereference_protected(bp->cnic_ops,
467 					  lockdep_is_held(&bp->cnic_lock));
468 	if (c_ops) {
469 		if (!(bp->flags & BNX2_FLAG_USING_MSIX)) {
470 			struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
471 
472 			bnapi->cnic_tag = bnapi->last_status_idx;
473 		}
474 		info.cmd = CNIC_CTL_START_CMD;
475 		c_ops->cnic_ctl(bp->cnic_data, &info);
476 	}
477 	mutex_unlock(&bp->cnic_lock);
478 }
479 
480 #else
481 
482 static void
483 bnx2_cnic_stop(struct bnx2 *bp)
484 {
485 }
486 
487 static void
488 bnx2_cnic_start(struct bnx2 *bp)
489 {
490 }
491 
492 #endif
493 
494 static int
495 bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val)
496 {
497 	u32 val1;
498 	int i, ret;
499 
500 	if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
501 		val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
502 		val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
503 
504 		BNX2_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
505 		BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
506 
507 		udelay(40);
508 	}
509 
510 	val1 = (bp->phy_addr << 21) | (reg << 16) |
511 		BNX2_EMAC_MDIO_COMM_COMMAND_READ | BNX2_EMAC_MDIO_COMM_DISEXT |
512 		BNX2_EMAC_MDIO_COMM_START_BUSY;
513 	BNX2_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
514 
515 	for (i = 0; i < 50; i++) {
516 		udelay(10);
517 
518 		val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_COMM);
519 		if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
520 			udelay(5);
521 
522 			val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_COMM);
523 			val1 &= BNX2_EMAC_MDIO_COMM_DATA;
524 
525 			break;
526 		}
527 	}
528 
529 	if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY) {
530 		*val = 0x0;
531 		ret = -EBUSY;
532 	}
533 	else {
534 		*val = val1;
535 		ret = 0;
536 	}
537 
538 	if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
539 		val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
540 		val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
541 
542 		BNX2_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
543 		BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
544 
545 		udelay(40);
546 	}
547 
548 	return ret;
549 }
550 
551 static int
552 bnx2_write_phy(struct bnx2 *bp, u32 reg, u32 val)
553 {
554 	u32 val1;
555 	int i, ret;
556 
557 	if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
558 		val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
559 		val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
560 
561 		BNX2_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
562 		BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
563 
564 		udelay(40);
565 	}
566 
567 	val1 = (bp->phy_addr << 21) | (reg << 16) | val |
568 		BNX2_EMAC_MDIO_COMM_COMMAND_WRITE |
569 		BNX2_EMAC_MDIO_COMM_START_BUSY | BNX2_EMAC_MDIO_COMM_DISEXT;
570 	BNX2_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
571 
572 	for (i = 0; i < 50; i++) {
573 		udelay(10);
574 
575 		val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_COMM);
576 		if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
577 			udelay(5);
578 			break;
579 		}
580 	}
581 
582 	if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)
583         	ret = -EBUSY;
584 	else
585 		ret = 0;
586 
587 	if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
588 		val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
589 		val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
590 
591 		BNX2_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
592 		BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
593 
594 		udelay(40);
595 	}
596 
597 	return ret;
598 }
599 
600 static void
601 bnx2_disable_int(struct bnx2 *bp)
602 {
603 	int i;
604 	struct bnx2_napi *bnapi;
605 
606 	for (i = 0; i < bp->irq_nvecs; i++) {
607 		bnapi = &bp->bnx2_napi[i];
608 		BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
609 		       BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
610 	}
611 	BNX2_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
612 }
613 
614 static void
615 bnx2_enable_int(struct bnx2 *bp)
616 {
617 	int i;
618 	struct bnx2_napi *bnapi;
619 
620 	for (i = 0; i < bp->irq_nvecs; i++) {
621 		bnapi = &bp->bnx2_napi[i];
622 
623 		BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
624 			BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
625 			BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
626 			bnapi->last_status_idx);
627 
628 		BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
629 			BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
630 			bnapi->last_status_idx);
631 	}
632 	BNX2_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
633 }
634 
635 static void
636 bnx2_disable_int_sync(struct bnx2 *bp)
637 {
638 	int i;
639 
640 	atomic_inc(&bp->intr_sem);
641 	if (!netif_running(bp->dev))
642 		return;
643 
644 	bnx2_disable_int(bp);
645 	for (i = 0; i < bp->irq_nvecs; i++)
646 		synchronize_irq(bp->irq_tbl[i].vector);
647 }
648 
649 static void
650 bnx2_napi_disable(struct bnx2 *bp)
651 {
652 	int i;
653 
654 	for (i = 0; i < bp->irq_nvecs; i++)
655 		napi_disable(&bp->bnx2_napi[i].napi);
656 }
657 
658 static void
659 bnx2_napi_enable(struct bnx2 *bp)
660 {
661 	int i;
662 
663 	for (i = 0; i < bp->irq_nvecs; i++)
664 		napi_enable(&bp->bnx2_napi[i].napi);
665 }
666 
667 static void
668 bnx2_netif_stop(struct bnx2 *bp, bool stop_cnic)
669 {
670 	if (stop_cnic)
671 		bnx2_cnic_stop(bp);
672 	if (netif_running(bp->dev)) {
673 		bnx2_napi_disable(bp);
674 		netif_tx_disable(bp->dev);
675 	}
676 	bnx2_disable_int_sync(bp);
677 	netif_carrier_off(bp->dev);	/* prevent tx timeout */
678 }
679 
680 static void
681 bnx2_netif_start(struct bnx2 *bp, bool start_cnic)
682 {
683 	if (atomic_dec_and_test(&bp->intr_sem)) {
684 		if (netif_running(bp->dev)) {
685 			netif_tx_wake_all_queues(bp->dev);
686 			spin_lock_bh(&bp->phy_lock);
687 			if (bp->link_up)
688 				netif_carrier_on(bp->dev);
689 			spin_unlock_bh(&bp->phy_lock);
690 			bnx2_napi_enable(bp);
691 			bnx2_enable_int(bp);
692 			if (start_cnic)
693 				bnx2_cnic_start(bp);
694 		}
695 	}
696 }
697 
698 static void
699 bnx2_free_tx_mem(struct bnx2 *bp)
700 {
701 	int i;
702 
703 	for (i = 0; i < bp->num_tx_rings; i++) {
704 		struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
705 		struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
706 
707 		if (txr->tx_desc_ring) {
708 			dma_free_coherent(&bp->pdev->dev, TXBD_RING_SIZE,
709 					  txr->tx_desc_ring,
710 					  txr->tx_desc_mapping);
711 			txr->tx_desc_ring = NULL;
712 		}
713 		kfree(txr->tx_buf_ring);
714 		txr->tx_buf_ring = NULL;
715 	}
716 }
717 
718 static void
719 bnx2_free_rx_mem(struct bnx2 *bp)
720 {
721 	int i;
722 
723 	for (i = 0; i < bp->num_rx_rings; i++) {
724 		struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
725 		struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
726 		int j;
727 
728 		for (j = 0; j < bp->rx_max_ring; j++) {
729 			if (rxr->rx_desc_ring[j])
730 				dma_free_coherent(&bp->pdev->dev, RXBD_RING_SIZE,
731 						  rxr->rx_desc_ring[j],
732 						  rxr->rx_desc_mapping[j]);
733 			rxr->rx_desc_ring[j] = NULL;
734 		}
735 		vfree(rxr->rx_buf_ring);
736 		rxr->rx_buf_ring = NULL;
737 
738 		for (j = 0; j < bp->rx_max_pg_ring; j++) {
739 			if (rxr->rx_pg_desc_ring[j])
740 				dma_free_coherent(&bp->pdev->dev, RXBD_RING_SIZE,
741 						  rxr->rx_pg_desc_ring[j],
742 						  rxr->rx_pg_desc_mapping[j]);
743 			rxr->rx_pg_desc_ring[j] = NULL;
744 		}
745 		vfree(rxr->rx_pg_ring);
746 		rxr->rx_pg_ring = NULL;
747 	}
748 }
749 
750 static int
751 bnx2_alloc_tx_mem(struct bnx2 *bp)
752 {
753 	int i;
754 
755 	for (i = 0; i < bp->num_tx_rings; i++) {
756 		struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
757 		struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
758 
759 		txr->tx_buf_ring = kzalloc(SW_TXBD_RING_SIZE, GFP_KERNEL);
760 		if (txr->tx_buf_ring == NULL)
761 			return -ENOMEM;
762 
763 		txr->tx_desc_ring =
764 			dma_alloc_coherent(&bp->pdev->dev, TXBD_RING_SIZE,
765 					   &txr->tx_desc_mapping, GFP_KERNEL);
766 		if (txr->tx_desc_ring == NULL)
767 			return -ENOMEM;
768 	}
769 	return 0;
770 }
771 
772 static int
773 bnx2_alloc_rx_mem(struct bnx2 *bp)
774 {
775 	int i;
776 
777 	for (i = 0; i < bp->num_rx_rings; i++) {
778 		struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
779 		struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
780 		int j;
781 
782 		rxr->rx_buf_ring =
783 			vzalloc(SW_RXBD_RING_SIZE * bp->rx_max_ring);
784 		if (rxr->rx_buf_ring == NULL)
785 			return -ENOMEM;
786 
787 		for (j = 0; j < bp->rx_max_ring; j++) {
788 			rxr->rx_desc_ring[j] =
789 				dma_alloc_coherent(&bp->pdev->dev,
790 						   RXBD_RING_SIZE,
791 						   &rxr->rx_desc_mapping[j],
792 						   GFP_KERNEL);
793 			if (rxr->rx_desc_ring[j] == NULL)
794 				return -ENOMEM;
795 
796 		}
797 
798 		if (bp->rx_pg_ring_size) {
799 			rxr->rx_pg_ring = vzalloc(SW_RXPG_RING_SIZE *
800 						  bp->rx_max_pg_ring);
801 			if (rxr->rx_pg_ring == NULL)
802 				return -ENOMEM;
803 
804 		}
805 
806 		for (j = 0; j < bp->rx_max_pg_ring; j++) {
807 			rxr->rx_pg_desc_ring[j] =
808 				dma_alloc_coherent(&bp->pdev->dev,
809 						   RXBD_RING_SIZE,
810 						   &rxr->rx_pg_desc_mapping[j],
811 						   GFP_KERNEL);
812 			if (rxr->rx_pg_desc_ring[j] == NULL)
813 				return -ENOMEM;
814 
815 		}
816 	}
817 	return 0;
818 }
819 
820 static void
821 bnx2_free_stats_blk(struct net_device *dev)
822 {
823 	struct bnx2 *bp = netdev_priv(dev);
824 
825 	if (bp->status_blk) {
826 		dma_free_coherent(&bp->pdev->dev, bp->status_stats_size,
827 				  bp->status_blk,
828 				  bp->status_blk_mapping);
829 		bp->status_blk = NULL;
830 		bp->stats_blk = NULL;
831 	}
832 }
833 
834 static int
835 bnx2_alloc_stats_blk(struct net_device *dev)
836 {
837 	int status_blk_size;
838 	void *status_blk;
839 	struct bnx2 *bp = netdev_priv(dev);
840 
841 	/* Combine status and statistics blocks into one allocation. */
842 	status_blk_size = L1_CACHE_ALIGN(sizeof(struct status_block));
843 	if (bp->flags & BNX2_FLAG_MSIX_CAP)
844 		status_blk_size = L1_CACHE_ALIGN(BNX2_MAX_MSIX_HW_VEC *
845 						 BNX2_SBLK_MSIX_ALIGN_SIZE);
846 	bp->status_stats_size = status_blk_size +
847 				sizeof(struct statistics_block);
848 	status_blk = dma_zalloc_coherent(&bp->pdev->dev, bp->status_stats_size,
849 					 &bp->status_blk_mapping, GFP_KERNEL);
850 	if (status_blk == NULL)
851 		return -ENOMEM;
852 
853 	bp->status_blk = status_blk;
854 	bp->stats_blk = status_blk + status_blk_size;
855 	bp->stats_blk_mapping = bp->status_blk_mapping + status_blk_size;
856 
857 	return 0;
858 }
859 
860 static void
861 bnx2_free_mem(struct bnx2 *bp)
862 {
863 	int i;
864 	struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
865 
866 	bnx2_free_tx_mem(bp);
867 	bnx2_free_rx_mem(bp);
868 
869 	for (i = 0; i < bp->ctx_pages; i++) {
870 		if (bp->ctx_blk[i]) {
871 			dma_free_coherent(&bp->pdev->dev, BNX2_PAGE_SIZE,
872 					  bp->ctx_blk[i],
873 					  bp->ctx_blk_mapping[i]);
874 			bp->ctx_blk[i] = NULL;
875 		}
876 	}
877 
878 	if (bnapi->status_blk.msi)
879 		bnapi->status_blk.msi = NULL;
880 }
881 
882 static int
883 bnx2_alloc_mem(struct bnx2 *bp)
884 {
885 	int i, err;
886 	struct bnx2_napi *bnapi;
887 
888 	bnapi = &bp->bnx2_napi[0];
889 	bnapi->status_blk.msi = bp->status_blk;
890 	bnapi->hw_tx_cons_ptr =
891 		&bnapi->status_blk.msi->status_tx_quick_consumer_index0;
892 	bnapi->hw_rx_cons_ptr =
893 		&bnapi->status_blk.msi->status_rx_quick_consumer_index0;
894 	if (bp->flags & BNX2_FLAG_MSIX_CAP) {
895 		for (i = 1; i < bp->irq_nvecs; i++) {
896 			struct status_block_msix *sblk;
897 
898 			bnapi = &bp->bnx2_napi[i];
899 
900 			sblk = (bp->status_blk + BNX2_SBLK_MSIX_ALIGN_SIZE * i);
901 			bnapi->status_blk.msix = sblk;
902 			bnapi->hw_tx_cons_ptr =
903 				&sblk->status_tx_quick_consumer_index;
904 			bnapi->hw_rx_cons_ptr =
905 				&sblk->status_rx_quick_consumer_index;
906 			bnapi->int_num = i << 24;
907 		}
908 	}
909 
910 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
911 		bp->ctx_pages = 0x2000 / BNX2_PAGE_SIZE;
912 		if (bp->ctx_pages == 0)
913 			bp->ctx_pages = 1;
914 		for (i = 0; i < bp->ctx_pages; i++) {
915 			bp->ctx_blk[i] = dma_alloc_coherent(&bp->pdev->dev,
916 						BNX2_PAGE_SIZE,
917 						&bp->ctx_blk_mapping[i],
918 						GFP_KERNEL);
919 			if (bp->ctx_blk[i] == NULL)
920 				goto alloc_mem_err;
921 		}
922 	}
923 
924 	err = bnx2_alloc_rx_mem(bp);
925 	if (err)
926 		goto alloc_mem_err;
927 
928 	err = bnx2_alloc_tx_mem(bp);
929 	if (err)
930 		goto alloc_mem_err;
931 
932 	return 0;
933 
934 alloc_mem_err:
935 	bnx2_free_mem(bp);
936 	return -ENOMEM;
937 }
938 
939 static void
940 bnx2_report_fw_link(struct bnx2 *bp)
941 {
942 	u32 fw_link_status = 0;
943 
944 	if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
945 		return;
946 
947 	if (bp->link_up) {
948 		u32 bmsr;
949 
950 		switch (bp->line_speed) {
951 		case SPEED_10:
952 			if (bp->duplex == DUPLEX_HALF)
953 				fw_link_status = BNX2_LINK_STATUS_10HALF;
954 			else
955 				fw_link_status = BNX2_LINK_STATUS_10FULL;
956 			break;
957 		case SPEED_100:
958 			if (bp->duplex == DUPLEX_HALF)
959 				fw_link_status = BNX2_LINK_STATUS_100HALF;
960 			else
961 				fw_link_status = BNX2_LINK_STATUS_100FULL;
962 			break;
963 		case SPEED_1000:
964 			if (bp->duplex == DUPLEX_HALF)
965 				fw_link_status = BNX2_LINK_STATUS_1000HALF;
966 			else
967 				fw_link_status = BNX2_LINK_STATUS_1000FULL;
968 			break;
969 		case SPEED_2500:
970 			if (bp->duplex == DUPLEX_HALF)
971 				fw_link_status = BNX2_LINK_STATUS_2500HALF;
972 			else
973 				fw_link_status = BNX2_LINK_STATUS_2500FULL;
974 			break;
975 		}
976 
977 		fw_link_status |= BNX2_LINK_STATUS_LINK_UP;
978 
979 		if (bp->autoneg) {
980 			fw_link_status |= BNX2_LINK_STATUS_AN_ENABLED;
981 
982 			bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
983 			bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
984 
985 			if (!(bmsr & BMSR_ANEGCOMPLETE) ||
986 			    bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT)
987 				fw_link_status |= BNX2_LINK_STATUS_PARALLEL_DET;
988 			else
989 				fw_link_status |= BNX2_LINK_STATUS_AN_COMPLETE;
990 		}
991 	}
992 	else
993 		fw_link_status = BNX2_LINK_STATUS_LINK_DOWN;
994 
995 	bnx2_shmem_wr(bp, BNX2_LINK_STATUS, fw_link_status);
996 }
997 
998 static char *
999 bnx2_xceiver_str(struct bnx2 *bp)
1000 {
1001 	return (bp->phy_port == PORT_FIBRE) ? "SerDes" :
1002 		((bp->phy_flags & BNX2_PHY_FLAG_SERDES) ? "Remote Copper" :
1003 		 "Copper");
1004 }
1005 
1006 static void
1007 bnx2_report_link(struct bnx2 *bp)
1008 {
1009 	if (bp->link_up) {
1010 		netif_carrier_on(bp->dev);
1011 		netdev_info(bp->dev, "NIC %s Link is Up, %d Mbps %s duplex",
1012 			    bnx2_xceiver_str(bp),
1013 			    bp->line_speed,
1014 			    bp->duplex == DUPLEX_FULL ? "full" : "half");
1015 
1016 		if (bp->flow_ctrl) {
1017 			if (bp->flow_ctrl & FLOW_CTRL_RX) {
1018 				pr_cont(", receive ");
1019 				if (bp->flow_ctrl & FLOW_CTRL_TX)
1020 					pr_cont("& transmit ");
1021 			}
1022 			else {
1023 				pr_cont(", transmit ");
1024 			}
1025 			pr_cont("flow control ON");
1026 		}
1027 		pr_cont("\n");
1028 	} else {
1029 		netif_carrier_off(bp->dev);
1030 		netdev_err(bp->dev, "NIC %s Link is Down\n",
1031 			   bnx2_xceiver_str(bp));
1032 	}
1033 
1034 	bnx2_report_fw_link(bp);
1035 }
1036 
1037 static void
1038 bnx2_resolve_flow_ctrl(struct bnx2 *bp)
1039 {
1040 	u32 local_adv, remote_adv;
1041 
1042 	bp->flow_ctrl = 0;
1043 	if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
1044 		(AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
1045 
1046 		if (bp->duplex == DUPLEX_FULL) {
1047 			bp->flow_ctrl = bp->req_flow_ctrl;
1048 		}
1049 		return;
1050 	}
1051 
1052 	if (bp->duplex != DUPLEX_FULL) {
1053 		return;
1054 	}
1055 
1056 	if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1057 	    (BNX2_CHIP(bp) == BNX2_CHIP_5708)) {
1058 		u32 val;
1059 
1060 		bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
1061 		if (val & BCM5708S_1000X_STAT1_TX_PAUSE)
1062 			bp->flow_ctrl |= FLOW_CTRL_TX;
1063 		if (val & BCM5708S_1000X_STAT1_RX_PAUSE)
1064 			bp->flow_ctrl |= FLOW_CTRL_RX;
1065 		return;
1066 	}
1067 
1068 	bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1069 	bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
1070 
1071 	if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1072 		u32 new_local_adv = 0;
1073 		u32 new_remote_adv = 0;
1074 
1075 		if (local_adv & ADVERTISE_1000XPAUSE)
1076 			new_local_adv |= ADVERTISE_PAUSE_CAP;
1077 		if (local_adv & ADVERTISE_1000XPSE_ASYM)
1078 			new_local_adv |= ADVERTISE_PAUSE_ASYM;
1079 		if (remote_adv & ADVERTISE_1000XPAUSE)
1080 			new_remote_adv |= ADVERTISE_PAUSE_CAP;
1081 		if (remote_adv & ADVERTISE_1000XPSE_ASYM)
1082 			new_remote_adv |= ADVERTISE_PAUSE_ASYM;
1083 
1084 		local_adv = new_local_adv;
1085 		remote_adv = new_remote_adv;
1086 	}
1087 
1088 	/* See Table 28B-3 of 802.3ab-1999 spec. */
1089 	if (local_adv & ADVERTISE_PAUSE_CAP) {
1090 		if(local_adv & ADVERTISE_PAUSE_ASYM) {
1091 	                if (remote_adv & ADVERTISE_PAUSE_CAP) {
1092 				bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
1093 			}
1094 			else if (remote_adv & ADVERTISE_PAUSE_ASYM) {
1095 				bp->flow_ctrl = FLOW_CTRL_RX;
1096 			}
1097 		}
1098 		else {
1099 			if (remote_adv & ADVERTISE_PAUSE_CAP) {
1100 				bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
1101 			}
1102 		}
1103 	}
1104 	else if (local_adv & ADVERTISE_PAUSE_ASYM) {
1105 		if ((remote_adv & ADVERTISE_PAUSE_CAP) &&
1106 			(remote_adv & ADVERTISE_PAUSE_ASYM)) {
1107 
1108 			bp->flow_ctrl = FLOW_CTRL_TX;
1109 		}
1110 	}
1111 }
1112 
1113 static int
1114 bnx2_5709s_linkup(struct bnx2 *bp)
1115 {
1116 	u32 val, speed;
1117 
1118 	bp->link_up = 1;
1119 
1120 	bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_GP_STATUS);
1121 	bnx2_read_phy(bp, MII_BNX2_GP_TOP_AN_STATUS1, &val);
1122 	bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1123 
1124 	if ((bp->autoneg & AUTONEG_SPEED) == 0) {
1125 		bp->line_speed = bp->req_line_speed;
1126 		bp->duplex = bp->req_duplex;
1127 		return 0;
1128 	}
1129 	speed = val & MII_BNX2_GP_TOP_AN_SPEED_MSK;
1130 	switch (speed) {
1131 		case MII_BNX2_GP_TOP_AN_SPEED_10:
1132 			bp->line_speed = SPEED_10;
1133 			break;
1134 		case MII_BNX2_GP_TOP_AN_SPEED_100:
1135 			bp->line_speed = SPEED_100;
1136 			break;
1137 		case MII_BNX2_GP_TOP_AN_SPEED_1G:
1138 		case MII_BNX2_GP_TOP_AN_SPEED_1GKV:
1139 			bp->line_speed = SPEED_1000;
1140 			break;
1141 		case MII_BNX2_GP_TOP_AN_SPEED_2_5G:
1142 			bp->line_speed = SPEED_2500;
1143 			break;
1144 	}
1145 	if (val & MII_BNX2_GP_TOP_AN_FD)
1146 		bp->duplex = DUPLEX_FULL;
1147 	else
1148 		bp->duplex = DUPLEX_HALF;
1149 	return 0;
1150 }
1151 
1152 static int
1153 bnx2_5708s_linkup(struct bnx2 *bp)
1154 {
1155 	u32 val;
1156 
1157 	bp->link_up = 1;
1158 	bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
1159 	switch (val & BCM5708S_1000X_STAT1_SPEED_MASK) {
1160 		case BCM5708S_1000X_STAT1_SPEED_10:
1161 			bp->line_speed = SPEED_10;
1162 			break;
1163 		case BCM5708S_1000X_STAT1_SPEED_100:
1164 			bp->line_speed = SPEED_100;
1165 			break;
1166 		case BCM5708S_1000X_STAT1_SPEED_1G:
1167 			bp->line_speed = SPEED_1000;
1168 			break;
1169 		case BCM5708S_1000X_STAT1_SPEED_2G5:
1170 			bp->line_speed = SPEED_2500;
1171 			break;
1172 	}
1173 	if (val & BCM5708S_1000X_STAT1_FD)
1174 		bp->duplex = DUPLEX_FULL;
1175 	else
1176 		bp->duplex = DUPLEX_HALF;
1177 
1178 	return 0;
1179 }
1180 
1181 static int
1182 bnx2_5706s_linkup(struct bnx2 *bp)
1183 {
1184 	u32 bmcr, local_adv, remote_adv, common;
1185 
1186 	bp->link_up = 1;
1187 	bp->line_speed = SPEED_1000;
1188 
1189 	bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1190 	if (bmcr & BMCR_FULLDPLX) {
1191 		bp->duplex = DUPLEX_FULL;
1192 	}
1193 	else {
1194 		bp->duplex = DUPLEX_HALF;
1195 	}
1196 
1197 	if (!(bmcr & BMCR_ANENABLE)) {
1198 		return 0;
1199 	}
1200 
1201 	bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1202 	bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
1203 
1204 	common = local_adv & remote_adv;
1205 	if (common & (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL)) {
1206 
1207 		if (common & ADVERTISE_1000XFULL) {
1208 			bp->duplex = DUPLEX_FULL;
1209 		}
1210 		else {
1211 			bp->duplex = DUPLEX_HALF;
1212 		}
1213 	}
1214 
1215 	return 0;
1216 }
1217 
1218 static int
1219 bnx2_copper_linkup(struct bnx2 *bp)
1220 {
1221 	u32 bmcr;
1222 
1223 	bp->phy_flags &= ~BNX2_PHY_FLAG_MDIX;
1224 
1225 	bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1226 	if (bmcr & BMCR_ANENABLE) {
1227 		u32 local_adv, remote_adv, common;
1228 
1229 		bnx2_read_phy(bp, MII_CTRL1000, &local_adv);
1230 		bnx2_read_phy(bp, MII_STAT1000, &remote_adv);
1231 
1232 		common = local_adv & (remote_adv >> 2);
1233 		if (common & ADVERTISE_1000FULL) {
1234 			bp->line_speed = SPEED_1000;
1235 			bp->duplex = DUPLEX_FULL;
1236 		}
1237 		else if (common & ADVERTISE_1000HALF) {
1238 			bp->line_speed = SPEED_1000;
1239 			bp->duplex = DUPLEX_HALF;
1240 		}
1241 		else {
1242 			bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1243 			bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
1244 
1245 			common = local_adv & remote_adv;
1246 			if (common & ADVERTISE_100FULL) {
1247 				bp->line_speed = SPEED_100;
1248 				bp->duplex = DUPLEX_FULL;
1249 			}
1250 			else if (common & ADVERTISE_100HALF) {
1251 				bp->line_speed = SPEED_100;
1252 				bp->duplex = DUPLEX_HALF;
1253 			}
1254 			else if (common & ADVERTISE_10FULL) {
1255 				bp->line_speed = SPEED_10;
1256 				bp->duplex = DUPLEX_FULL;
1257 			}
1258 			else if (common & ADVERTISE_10HALF) {
1259 				bp->line_speed = SPEED_10;
1260 				bp->duplex = DUPLEX_HALF;
1261 			}
1262 			else {
1263 				bp->line_speed = 0;
1264 				bp->link_up = 0;
1265 			}
1266 		}
1267 	}
1268 	else {
1269 		if (bmcr & BMCR_SPEED100) {
1270 			bp->line_speed = SPEED_100;
1271 		}
1272 		else {
1273 			bp->line_speed = SPEED_10;
1274 		}
1275 		if (bmcr & BMCR_FULLDPLX) {
1276 			bp->duplex = DUPLEX_FULL;
1277 		}
1278 		else {
1279 			bp->duplex = DUPLEX_HALF;
1280 		}
1281 	}
1282 
1283 	if (bp->link_up) {
1284 		u32 ext_status;
1285 
1286 		bnx2_read_phy(bp, MII_BNX2_EXT_STATUS, &ext_status);
1287 		if (ext_status & EXT_STATUS_MDIX)
1288 			bp->phy_flags |= BNX2_PHY_FLAG_MDIX;
1289 	}
1290 
1291 	return 0;
1292 }
1293 
1294 static void
1295 bnx2_init_rx_context(struct bnx2 *bp, u32 cid)
1296 {
1297 	u32 val, rx_cid_addr = GET_CID_ADDR(cid);
1298 
1299 	val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE;
1300 	val |= BNX2_L2CTX_CTX_TYPE_SIZE_L2;
1301 	val |= 0x02 << 8;
1302 
1303 	if (bp->flow_ctrl & FLOW_CTRL_TX)
1304 		val |= BNX2_L2CTX_FLOW_CTRL_ENABLE;
1305 
1306 	bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_CTX_TYPE, val);
1307 }
1308 
1309 static void
1310 bnx2_init_all_rx_contexts(struct bnx2 *bp)
1311 {
1312 	int i;
1313 	u32 cid;
1314 
1315 	for (i = 0, cid = RX_CID; i < bp->num_rx_rings; i++, cid++) {
1316 		if (i == 1)
1317 			cid = RX_RSS_CID;
1318 		bnx2_init_rx_context(bp, cid);
1319 	}
1320 }
1321 
1322 static void
1323 bnx2_set_mac_link(struct bnx2 *bp)
1324 {
1325 	u32 val;
1326 
1327 	BNX2_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x2620);
1328 	if (bp->link_up && (bp->line_speed == SPEED_1000) &&
1329 		(bp->duplex == DUPLEX_HALF)) {
1330 		BNX2_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x26ff);
1331 	}
1332 
1333 	/* Configure the EMAC mode register. */
1334 	val = BNX2_RD(bp, BNX2_EMAC_MODE);
1335 
1336 	val &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
1337 		BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
1338 		BNX2_EMAC_MODE_25G_MODE);
1339 
1340 	if (bp->link_up) {
1341 		switch (bp->line_speed) {
1342 			case SPEED_10:
1343 				if (BNX2_CHIP(bp) != BNX2_CHIP_5706) {
1344 					val |= BNX2_EMAC_MODE_PORT_MII_10M;
1345 					break;
1346 				}
1347 				/* fall through */
1348 			case SPEED_100:
1349 				val |= BNX2_EMAC_MODE_PORT_MII;
1350 				break;
1351 			case SPEED_2500:
1352 				val |= BNX2_EMAC_MODE_25G_MODE;
1353 				/* fall through */
1354 			case SPEED_1000:
1355 				val |= BNX2_EMAC_MODE_PORT_GMII;
1356 				break;
1357 		}
1358 	}
1359 	else {
1360 		val |= BNX2_EMAC_MODE_PORT_GMII;
1361 	}
1362 
1363 	/* Set the MAC to operate in the appropriate duplex mode. */
1364 	if (bp->duplex == DUPLEX_HALF)
1365 		val |= BNX2_EMAC_MODE_HALF_DUPLEX;
1366 	BNX2_WR(bp, BNX2_EMAC_MODE, val);
1367 
1368 	/* Enable/disable rx PAUSE. */
1369 	bp->rx_mode &= ~BNX2_EMAC_RX_MODE_FLOW_EN;
1370 
1371 	if (bp->flow_ctrl & FLOW_CTRL_RX)
1372 		bp->rx_mode |= BNX2_EMAC_RX_MODE_FLOW_EN;
1373 	BNX2_WR(bp, BNX2_EMAC_RX_MODE, bp->rx_mode);
1374 
1375 	/* Enable/disable tx PAUSE. */
1376 	val = BNX2_RD(bp, BNX2_EMAC_TX_MODE);
1377 	val &= ~BNX2_EMAC_TX_MODE_FLOW_EN;
1378 
1379 	if (bp->flow_ctrl & FLOW_CTRL_TX)
1380 		val |= BNX2_EMAC_TX_MODE_FLOW_EN;
1381 	BNX2_WR(bp, BNX2_EMAC_TX_MODE, val);
1382 
1383 	/* Acknowledge the interrupt. */
1384 	BNX2_WR(bp, BNX2_EMAC_STATUS, BNX2_EMAC_STATUS_LINK_CHANGE);
1385 
1386 	bnx2_init_all_rx_contexts(bp);
1387 }
1388 
1389 static void
1390 bnx2_enable_bmsr1(struct bnx2 *bp)
1391 {
1392 	if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1393 	    (BNX2_CHIP(bp) == BNX2_CHIP_5709))
1394 		bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1395 			       MII_BNX2_BLK_ADDR_GP_STATUS);
1396 }
1397 
1398 static void
1399 bnx2_disable_bmsr1(struct bnx2 *bp)
1400 {
1401 	if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1402 	    (BNX2_CHIP(bp) == BNX2_CHIP_5709))
1403 		bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1404 			       MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1405 }
1406 
1407 static int
1408 bnx2_test_and_enable_2g5(struct bnx2 *bp)
1409 {
1410 	u32 up1;
1411 	int ret = 1;
1412 
1413 	if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1414 		return 0;
1415 
1416 	if (bp->autoneg & AUTONEG_SPEED)
1417 		bp->advertising |= ADVERTISED_2500baseX_Full;
1418 
1419 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
1420 		bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1421 
1422 	bnx2_read_phy(bp, bp->mii_up1, &up1);
1423 	if (!(up1 & BCM5708S_UP1_2G5)) {
1424 		up1 |= BCM5708S_UP1_2G5;
1425 		bnx2_write_phy(bp, bp->mii_up1, up1);
1426 		ret = 0;
1427 	}
1428 
1429 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
1430 		bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1431 			       MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1432 
1433 	return ret;
1434 }
1435 
1436 static int
1437 bnx2_test_and_disable_2g5(struct bnx2 *bp)
1438 {
1439 	u32 up1;
1440 	int ret = 0;
1441 
1442 	if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1443 		return 0;
1444 
1445 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
1446 		bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1447 
1448 	bnx2_read_phy(bp, bp->mii_up1, &up1);
1449 	if (up1 & BCM5708S_UP1_2G5) {
1450 		up1 &= ~BCM5708S_UP1_2G5;
1451 		bnx2_write_phy(bp, bp->mii_up1, up1);
1452 		ret = 1;
1453 	}
1454 
1455 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
1456 		bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1457 			       MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1458 
1459 	return ret;
1460 }
1461 
1462 static void
1463 bnx2_enable_forced_2g5(struct bnx2 *bp)
1464 {
1465 	u32 uninitialized_var(bmcr);
1466 	int err;
1467 
1468 	if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1469 		return;
1470 
1471 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
1472 		u32 val;
1473 
1474 		bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1475 			       MII_BNX2_BLK_ADDR_SERDES_DIG);
1476 		if (!bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val)) {
1477 			val &= ~MII_BNX2_SD_MISC1_FORCE_MSK;
1478 			val |= MII_BNX2_SD_MISC1_FORCE |
1479 				MII_BNX2_SD_MISC1_FORCE_2_5G;
1480 			bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1481 		}
1482 
1483 		bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1484 			       MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1485 		err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1486 
1487 	} else if (BNX2_CHIP(bp) == BNX2_CHIP_5708) {
1488 		err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1489 		if (!err)
1490 			bmcr |= BCM5708S_BMCR_FORCE_2500;
1491 	} else {
1492 		return;
1493 	}
1494 
1495 	if (err)
1496 		return;
1497 
1498 	if (bp->autoneg & AUTONEG_SPEED) {
1499 		bmcr &= ~BMCR_ANENABLE;
1500 		if (bp->req_duplex == DUPLEX_FULL)
1501 			bmcr |= BMCR_FULLDPLX;
1502 	}
1503 	bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1504 }
1505 
1506 static void
1507 bnx2_disable_forced_2g5(struct bnx2 *bp)
1508 {
1509 	u32 uninitialized_var(bmcr);
1510 	int err;
1511 
1512 	if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1513 		return;
1514 
1515 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
1516 		u32 val;
1517 
1518 		bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1519 			       MII_BNX2_BLK_ADDR_SERDES_DIG);
1520 		if (!bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val)) {
1521 			val &= ~MII_BNX2_SD_MISC1_FORCE;
1522 			bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1523 		}
1524 
1525 		bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1526 			       MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1527 		err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1528 
1529 	} else if (BNX2_CHIP(bp) == BNX2_CHIP_5708) {
1530 		err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1531 		if (!err)
1532 			bmcr &= ~BCM5708S_BMCR_FORCE_2500;
1533 	} else {
1534 		return;
1535 	}
1536 
1537 	if (err)
1538 		return;
1539 
1540 	if (bp->autoneg & AUTONEG_SPEED)
1541 		bmcr |= BMCR_SPEED1000 | BMCR_ANENABLE | BMCR_ANRESTART;
1542 	bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1543 }
1544 
1545 static void
1546 bnx2_5706s_force_link_dn(struct bnx2 *bp, int start)
1547 {
1548 	u32 val;
1549 
1550 	bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS, MII_EXPAND_SERDES_CTL);
1551 	bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
1552 	if (start)
1553 		bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val & 0xff0f);
1554 	else
1555 		bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val | 0xc0);
1556 }
1557 
1558 static int
1559 bnx2_set_link(struct bnx2 *bp)
1560 {
1561 	u32 bmsr;
1562 	u8 link_up;
1563 
1564 	if (bp->loopback == MAC_LOOPBACK || bp->loopback == PHY_LOOPBACK) {
1565 		bp->link_up = 1;
1566 		return 0;
1567 	}
1568 
1569 	if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
1570 		return 0;
1571 
1572 	link_up = bp->link_up;
1573 
1574 	bnx2_enable_bmsr1(bp);
1575 	bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1576 	bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1577 	bnx2_disable_bmsr1(bp);
1578 
1579 	if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1580 	    (BNX2_CHIP(bp) == BNX2_CHIP_5706)) {
1581 		u32 val, an_dbg;
1582 
1583 		if (bp->phy_flags & BNX2_PHY_FLAG_FORCED_DOWN) {
1584 			bnx2_5706s_force_link_dn(bp, 0);
1585 			bp->phy_flags &= ~BNX2_PHY_FLAG_FORCED_DOWN;
1586 		}
1587 		val = BNX2_RD(bp, BNX2_EMAC_STATUS);
1588 
1589 		bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
1590 		bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
1591 		bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
1592 
1593 		if ((val & BNX2_EMAC_STATUS_LINK) &&
1594 		    !(an_dbg & MISC_SHDW_AN_DBG_NOSYNC))
1595 			bmsr |= BMSR_LSTATUS;
1596 		else
1597 			bmsr &= ~BMSR_LSTATUS;
1598 	}
1599 
1600 	if (bmsr & BMSR_LSTATUS) {
1601 		bp->link_up = 1;
1602 
1603 		if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1604 			if (BNX2_CHIP(bp) == BNX2_CHIP_5706)
1605 				bnx2_5706s_linkup(bp);
1606 			else if (BNX2_CHIP(bp) == BNX2_CHIP_5708)
1607 				bnx2_5708s_linkup(bp);
1608 			else if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
1609 				bnx2_5709s_linkup(bp);
1610 		}
1611 		else {
1612 			bnx2_copper_linkup(bp);
1613 		}
1614 		bnx2_resolve_flow_ctrl(bp);
1615 	}
1616 	else {
1617 		if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1618 		    (bp->autoneg & AUTONEG_SPEED))
1619 			bnx2_disable_forced_2g5(bp);
1620 
1621 		if (bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT) {
1622 			u32 bmcr;
1623 
1624 			bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1625 			bmcr |= BMCR_ANENABLE;
1626 			bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1627 
1628 			bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
1629 		}
1630 		bp->link_up = 0;
1631 	}
1632 
1633 	if (bp->link_up != link_up) {
1634 		bnx2_report_link(bp);
1635 	}
1636 
1637 	bnx2_set_mac_link(bp);
1638 
1639 	return 0;
1640 }
1641 
1642 static int
1643 bnx2_reset_phy(struct bnx2 *bp)
1644 {
1645 	int i;
1646 	u32 reg;
1647 
1648         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_RESET);
1649 
1650 #define PHY_RESET_MAX_WAIT 100
1651 	for (i = 0; i < PHY_RESET_MAX_WAIT; i++) {
1652 		udelay(10);
1653 
1654 		bnx2_read_phy(bp, bp->mii_bmcr, &reg);
1655 		if (!(reg & BMCR_RESET)) {
1656 			udelay(20);
1657 			break;
1658 		}
1659 	}
1660 	if (i == PHY_RESET_MAX_WAIT) {
1661 		return -EBUSY;
1662 	}
1663 	return 0;
1664 }
1665 
1666 static u32
1667 bnx2_phy_get_pause_adv(struct bnx2 *bp)
1668 {
1669 	u32 adv = 0;
1670 
1671 	if ((bp->req_flow_ctrl & (FLOW_CTRL_RX | FLOW_CTRL_TX)) ==
1672 		(FLOW_CTRL_RX | FLOW_CTRL_TX)) {
1673 
1674 		if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1675 			adv = ADVERTISE_1000XPAUSE;
1676 		}
1677 		else {
1678 			adv = ADVERTISE_PAUSE_CAP;
1679 		}
1680 	}
1681 	else if (bp->req_flow_ctrl & FLOW_CTRL_TX) {
1682 		if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1683 			adv = ADVERTISE_1000XPSE_ASYM;
1684 		}
1685 		else {
1686 			adv = ADVERTISE_PAUSE_ASYM;
1687 		}
1688 	}
1689 	else if (bp->req_flow_ctrl & FLOW_CTRL_RX) {
1690 		if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1691 			adv = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1692 		}
1693 		else {
1694 			adv = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1695 		}
1696 	}
1697 	return adv;
1698 }
1699 
1700 static int bnx2_fw_sync(struct bnx2 *, u32, int, int);
1701 
1702 static int
1703 bnx2_setup_remote_phy(struct bnx2 *bp, u8 port)
1704 __releases(&bp->phy_lock)
1705 __acquires(&bp->phy_lock)
1706 {
1707 	u32 speed_arg = 0, pause_adv;
1708 
1709 	pause_adv = bnx2_phy_get_pause_adv(bp);
1710 
1711 	if (bp->autoneg & AUTONEG_SPEED) {
1712 		speed_arg |= BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG;
1713 		if (bp->advertising & ADVERTISED_10baseT_Half)
1714 			speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1715 		if (bp->advertising & ADVERTISED_10baseT_Full)
1716 			speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1717 		if (bp->advertising & ADVERTISED_100baseT_Half)
1718 			speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1719 		if (bp->advertising & ADVERTISED_100baseT_Full)
1720 			speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1721 		if (bp->advertising & ADVERTISED_1000baseT_Full)
1722 			speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1723 		if (bp->advertising & ADVERTISED_2500baseX_Full)
1724 			speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1725 	} else {
1726 		if (bp->req_line_speed == SPEED_2500)
1727 			speed_arg = BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1728 		else if (bp->req_line_speed == SPEED_1000)
1729 			speed_arg = BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1730 		else if (bp->req_line_speed == SPEED_100) {
1731 			if (bp->req_duplex == DUPLEX_FULL)
1732 				speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1733 			else
1734 				speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1735 		} else if (bp->req_line_speed == SPEED_10) {
1736 			if (bp->req_duplex == DUPLEX_FULL)
1737 				speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1738 			else
1739 				speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1740 		}
1741 	}
1742 
1743 	if (pause_adv & (ADVERTISE_1000XPAUSE | ADVERTISE_PAUSE_CAP))
1744 		speed_arg |= BNX2_NETLINK_SET_LINK_FC_SYM_PAUSE;
1745 	if (pause_adv & (ADVERTISE_1000XPSE_ASYM | ADVERTISE_PAUSE_ASYM))
1746 		speed_arg |= BNX2_NETLINK_SET_LINK_FC_ASYM_PAUSE;
1747 
1748 	if (port == PORT_TP)
1749 		speed_arg |= BNX2_NETLINK_SET_LINK_PHY_APP_REMOTE |
1750 			     BNX2_NETLINK_SET_LINK_ETH_AT_WIRESPEED;
1751 
1752 	bnx2_shmem_wr(bp, BNX2_DRV_MB_ARG0, speed_arg);
1753 
1754 	spin_unlock_bh(&bp->phy_lock);
1755 	bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_CMD_SET_LINK, 1, 0);
1756 	spin_lock_bh(&bp->phy_lock);
1757 
1758 	return 0;
1759 }
1760 
1761 static int
1762 bnx2_setup_serdes_phy(struct bnx2 *bp, u8 port)
1763 __releases(&bp->phy_lock)
1764 __acquires(&bp->phy_lock)
1765 {
1766 	u32 adv, bmcr;
1767 	u32 new_adv = 0;
1768 
1769 	if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
1770 		return bnx2_setup_remote_phy(bp, port);
1771 
1772 	if (!(bp->autoneg & AUTONEG_SPEED)) {
1773 		u32 new_bmcr;
1774 		int force_link_down = 0;
1775 
1776 		if (bp->req_line_speed == SPEED_2500) {
1777 			if (!bnx2_test_and_enable_2g5(bp))
1778 				force_link_down = 1;
1779 		} else if (bp->req_line_speed == SPEED_1000) {
1780 			if (bnx2_test_and_disable_2g5(bp))
1781 				force_link_down = 1;
1782 		}
1783 		bnx2_read_phy(bp, bp->mii_adv, &adv);
1784 		adv &= ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF);
1785 
1786 		bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1787 		new_bmcr = bmcr & ~BMCR_ANENABLE;
1788 		new_bmcr |= BMCR_SPEED1000;
1789 
1790 		if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
1791 			if (bp->req_line_speed == SPEED_2500)
1792 				bnx2_enable_forced_2g5(bp);
1793 			else if (bp->req_line_speed == SPEED_1000) {
1794 				bnx2_disable_forced_2g5(bp);
1795 				new_bmcr &= ~0x2000;
1796 			}
1797 
1798 		} else if (BNX2_CHIP(bp) == BNX2_CHIP_5708) {
1799 			if (bp->req_line_speed == SPEED_2500)
1800 				new_bmcr |= BCM5708S_BMCR_FORCE_2500;
1801 			else
1802 				new_bmcr = bmcr & ~BCM5708S_BMCR_FORCE_2500;
1803 		}
1804 
1805 		if (bp->req_duplex == DUPLEX_FULL) {
1806 			adv |= ADVERTISE_1000XFULL;
1807 			new_bmcr |= BMCR_FULLDPLX;
1808 		}
1809 		else {
1810 			adv |= ADVERTISE_1000XHALF;
1811 			new_bmcr &= ~BMCR_FULLDPLX;
1812 		}
1813 		if ((new_bmcr != bmcr) || (force_link_down)) {
1814 			/* Force a link down visible on the other side */
1815 			if (bp->link_up) {
1816 				bnx2_write_phy(bp, bp->mii_adv, adv &
1817 					       ~(ADVERTISE_1000XFULL |
1818 						 ADVERTISE_1000XHALF));
1819 				bnx2_write_phy(bp, bp->mii_bmcr, bmcr |
1820 					BMCR_ANRESTART | BMCR_ANENABLE);
1821 
1822 				bp->link_up = 0;
1823 				netif_carrier_off(bp->dev);
1824 				bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1825 				bnx2_report_link(bp);
1826 			}
1827 			bnx2_write_phy(bp, bp->mii_adv, adv);
1828 			bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1829 		} else {
1830 			bnx2_resolve_flow_ctrl(bp);
1831 			bnx2_set_mac_link(bp);
1832 		}
1833 		return 0;
1834 	}
1835 
1836 	bnx2_test_and_enable_2g5(bp);
1837 
1838 	if (bp->advertising & ADVERTISED_1000baseT_Full)
1839 		new_adv |= ADVERTISE_1000XFULL;
1840 
1841 	new_adv |= bnx2_phy_get_pause_adv(bp);
1842 
1843 	bnx2_read_phy(bp, bp->mii_adv, &adv);
1844 	bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1845 
1846 	bp->serdes_an_pending = 0;
1847 	if ((adv != new_adv) || ((bmcr & BMCR_ANENABLE) == 0)) {
1848 		/* Force a link down visible on the other side */
1849 		if (bp->link_up) {
1850 			bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
1851 			spin_unlock_bh(&bp->phy_lock);
1852 			msleep(20);
1853 			spin_lock_bh(&bp->phy_lock);
1854 		}
1855 
1856 		bnx2_write_phy(bp, bp->mii_adv, new_adv);
1857 		bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART |
1858 			BMCR_ANENABLE);
1859 		/* Speed up link-up time when the link partner
1860 		 * does not autonegotiate which is very common
1861 		 * in blade servers. Some blade servers use
1862 		 * IPMI for kerboard input and it's important
1863 		 * to minimize link disruptions. Autoneg. involves
1864 		 * exchanging base pages plus 3 next pages and
1865 		 * normally completes in about 120 msec.
1866 		 */
1867 		bp->current_interval = BNX2_SERDES_AN_TIMEOUT;
1868 		bp->serdes_an_pending = 1;
1869 		mod_timer(&bp->timer, jiffies + bp->current_interval);
1870 	} else {
1871 		bnx2_resolve_flow_ctrl(bp);
1872 		bnx2_set_mac_link(bp);
1873 	}
1874 
1875 	return 0;
1876 }
1877 
1878 #define ETHTOOL_ALL_FIBRE_SPEED						\
1879 	(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) ?			\
1880 		(ADVERTISED_2500baseX_Full | ADVERTISED_1000baseT_Full) :\
1881 		(ADVERTISED_1000baseT_Full)
1882 
1883 #define ETHTOOL_ALL_COPPER_SPEED					\
1884 	(ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |		\
1885 	ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |		\
1886 	ADVERTISED_1000baseT_Full)
1887 
1888 #define PHY_ALL_10_100_SPEED (ADVERTISE_10HALF | ADVERTISE_10FULL | \
1889 	ADVERTISE_100HALF | ADVERTISE_100FULL | ADVERTISE_CSMA)
1890 
1891 #define PHY_ALL_1000_SPEED (ADVERTISE_1000HALF | ADVERTISE_1000FULL)
1892 
1893 static void
1894 bnx2_set_default_remote_link(struct bnx2 *bp)
1895 {
1896 	u32 link;
1897 
1898 	if (bp->phy_port == PORT_TP)
1899 		link = bnx2_shmem_rd(bp, BNX2_RPHY_COPPER_LINK);
1900 	else
1901 		link = bnx2_shmem_rd(bp, BNX2_RPHY_SERDES_LINK);
1902 
1903 	if (link & BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG) {
1904 		bp->req_line_speed = 0;
1905 		bp->autoneg |= AUTONEG_SPEED;
1906 		bp->advertising = ADVERTISED_Autoneg;
1907 		if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1908 			bp->advertising |= ADVERTISED_10baseT_Half;
1909 		if (link & BNX2_NETLINK_SET_LINK_SPEED_10FULL)
1910 			bp->advertising |= ADVERTISED_10baseT_Full;
1911 		if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1912 			bp->advertising |= ADVERTISED_100baseT_Half;
1913 		if (link & BNX2_NETLINK_SET_LINK_SPEED_100FULL)
1914 			bp->advertising |= ADVERTISED_100baseT_Full;
1915 		if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1916 			bp->advertising |= ADVERTISED_1000baseT_Full;
1917 		if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1918 			bp->advertising |= ADVERTISED_2500baseX_Full;
1919 	} else {
1920 		bp->autoneg = 0;
1921 		bp->advertising = 0;
1922 		bp->req_duplex = DUPLEX_FULL;
1923 		if (link & BNX2_NETLINK_SET_LINK_SPEED_10) {
1924 			bp->req_line_speed = SPEED_10;
1925 			if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1926 				bp->req_duplex = DUPLEX_HALF;
1927 		}
1928 		if (link & BNX2_NETLINK_SET_LINK_SPEED_100) {
1929 			bp->req_line_speed = SPEED_100;
1930 			if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1931 				bp->req_duplex = DUPLEX_HALF;
1932 		}
1933 		if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1934 			bp->req_line_speed = SPEED_1000;
1935 		if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1936 			bp->req_line_speed = SPEED_2500;
1937 	}
1938 }
1939 
1940 static void
1941 bnx2_set_default_link(struct bnx2 *bp)
1942 {
1943 	if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
1944 		bnx2_set_default_remote_link(bp);
1945 		return;
1946 	}
1947 
1948 	bp->autoneg = AUTONEG_SPEED | AUTONEG_FLOW_CTRL;
1949 	bp->req_line_speed = 0;
1950 	if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1951 		u32 reg;
1952 
1953 		bp->advertising = ETHTOOL_ALL_FIBRE_SPEED | ADVERTISED_Autoneg;
1954 
1955 		reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_CONFIG);
1956 		reg &= BNX2_PORT_HW_CFG_CFG_DFLT_LINK_MASK;
1957 		if (reg == BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G) {
1958 			bp->autoneg = 0;
1959 			bp->req_line_speed = bp->line_speed = SPEED_1000;
1960 			bp->req_duplex = DUPLEX_FULL;
1961 		}
1962 	} else
1963 		bp->advertising = ETHTOOL_ALL_COPPER_SPEED | ADVERTISED_Autoneg;
1964 }
1965 
1966 static void
1967 bnx2_send_heart_beat(struct bnx2 *bp)
1968 {
1969 	u32 msg;
1970 	u32 addr;
1971 
1972 	spin_lock(&bp->indirect_lock);
1973 	msg = (u32) (++bp->fw_drv_pulse_wr_seq & BNX2_DRV_PULSE_SEQ_MASK);
1974 	addr = bp->shmem_base + BNX2_DRV_PULSE_MB;
1975 	BNX2_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, addr);
1976 	BNX2_WR(bp, BNX2_PCICFG_REG_WINDOW, msg);
1977 	spin_unlock(&bp->indirect_lock);
1978 }
1979 
1980 static void
1981 bnx2_remote_phy_event(struct bnx2 *bp)
1982 {
1983 	u32 msg;
1984 	u8 link_up = bp->link_up;
1985 	u8 old_port;
1986 
1987 	msg = bnx2_shmem_rd(bp, BNX2_LINK_STATUS);
1988 
1989 	if (msg & BNX2_LINK_STATUS_HEART_BEAT_EXPIRED)
1990 		bnx2_send_heart_beat(bp);
1991 
1992 	msg &= ~BNX2_LINK_STATUS_HEART_BEAT_EXPIRED;
1993 
1994 	if ((msg & BNX2_LINK_STATUS_LINK_UP) == BNX2_LINK_STATUS_LINK_DOWN)
1995 		bp->link_up = 0;
1996 	else {
1997 		u32 speed;
1998 
1999 		bp->link_up = 1;
2000 		speed = msg & BNX2_LINK_STATUS_SPEED_MASK;
2001 		bp->duplex = DUPLEX_FULL;
2002 		switch (speed) {
2003 			case BNX2_LINK_STATUS_10HALF:
2004 				bp->duplex = DUPLEX_HALF;
2005 				/* fall through */
2006 			case BNX2_LINK_STATUS_10FULL:
2007 				bp->line_speed = SPEED_10;
2008 				break;
2009 			case BNX2_LINK_STATUS_100HALF:
2010 				bp->duplex = DUPLEX_HALF;
2011 				/* fall through */
2012 			case BNX2_LINK_STATUS_100BASE_T4:
2013 			case BNX2_LINK_STATUS_100FULL:
2014 				bp->line_speed = SPEED_100;
2015 				break;
2016 			case BNX2_LINK_STATUS_1000HALF:
2017 				bp->duplex = DUPLEX_HALF;
2018 				/* fall through */
2019 			case BNX2_LINK_STATUS_1000FULL:
2020 				bp->line_speed = SPEED_1000;
2021 				break;
2022 			case BNX2_LINK_STATUS_2500HALF:
2023 				bp->duplex = DUPLEX_HALF;
2024 				/* fall through */
2025 			case BNX2_LINK_STATUS_2500FULL:
2026 				bp->line_speed = SPEED_2500;
2027 				break;
2028 			default:
2029 				bp->line_speed = 0;
2030 				break;
2031 		}
2032 
2033 		bp->flow_ctrl = 0;
2034 		if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
2035 		    (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
2036 			if (bp->duplex == DUPLEX_FULL)
2037 				bp->flow_ctrl = bp->req_flow_ctrl;
2038 		} else {
2039 			if (msg & BNX2_LINK_STATUS_TX_FC_ENABLED)
2040 				bp->flow_ctrl |= FLOW_CTRL_TX;
2041 			if (msg & BNX2_LINK_STATUS_RX_FC_ENABLED)
2042 				bp->flow_ctrl |= FLOW_CTRL_RX;
2043 		}
2044 
2045 		old_port = bp->phy_port;
2046 		if (msg & BNX2_LINK_STATUS_SERDES_LINK)
2047 			bp->phy_port = PORT_FIBRE;
2048 		else
2049 			bp->phy_port = PORT_TP;
2050 
2051 		if (old_port != bp->phy_port)
2052 			bnx2_set_default_link(bp);
2053 
2054 	}
2055 	if (bp->link_up != link_up)
2056 		bnx2_report_link(bp);
2057 
2058 	bnx2_set_mac_link(bp);
2059 }
2060 
2061 static int
2062 bnx2_set_remote_link(struct bnx2 *bp)
2063 {
2064 	u32 evt_code;
2065 
2066 	evt_code = bnx2_shmem_rd(bp, BNX2_FW_EVT_CODE_MB);
2067 	switch (evt_code) {
2068 		case BNX2_FW_EVT_CODE_LINK_EVENT:
2069 			bnx2_remote_phy_event(bp);
2070 			break;
2071 		case BNX2_FW_EVT_CODE_SW_TIMER_EXPIRATION_EVENT:
2072 		default:
2073 			bnx2_send_heart_beat(bp);
2074 			break;
2075 	}
2076 	return 0;
2077 }
2078 
2079 static int
2080 bnx2_setup_copper_phy(struct bnx2 *bp)
2081 __releases(&bp->phy_lock)
2082 __acquires(&bp->phy_lock)
2083 {
2084 	u32 bmcr, adv_reg, new_adv = 0;
2085 	u32 new_bmcr;
2086 
2087 	bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
2088 
2089 	bnx2_read_phy(bp, bp->mii_adv, &adv_reg);
2090 	adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP |
2091 		    ADVERTISE_PAUSE_ASYM);
2092 
2093 	new_adv = ADVERTISE_CSMA | ethtool_adv_to_mii_adv_t(bp->advertising);
2094 
2095 	if (bp->autoneg & AUTONEG_SPEED) {
2096 		u32 adv1000_reg;
2097 		u32 new_adv1000 = 0;
2098 
2099 		new_adv |= bnx2_phy_get_pause_adv(bp);
2100 
2101 		bnx2_read_phy(bp, MII_CTRL1000, &adv1000_reg);
2102 		adv1000_reg &= PHY_ALL_1000_SPEED;
2103 
2104 		new_adv1000 |= ethtool_adv_to_mii_ctrl1000_t(bp->advertising);
2105 		if ((adv1000_reg != new_adv1000) ||
2106 			(adv_reg != new_adv) ||
2107 			((bmcr & BMCR_ANENABLE) == 0)) {
2108 
2109 			bnx2_write_phy(bp, bp->mii_adv, new_adv);
2110 			bnx2_write_phy(bp, MII_CTRL1000, new_adv1000);
2111 			bnx2_write_phy(bp, bp->mii_bmcr, BMCR_ANRESTART |
2112 				BMCR_ANENABLE);
2113 		}
2114 		else if (bp->link_up) {
2115 			/* Flow ctrl may have changed from auto to forced */
2116 			/* or vice-versa. */
2117 
2118 			bnx2_resolve_flow_ctrl(bp);
2119 			bnx2_set_mac_link(bp);
2120 		}
2121 		return 0;
2122 	}
2123 
2124 	/* advertise nothing when forcing speed */
2125 	if (adv_reg != new_adv)
2126 		bnx2_write_phy(bp, bp->mii_adv, new_adv);
2127 
2128 	new_bmcr = 0;
2129 	if (bp->req_line_speed == SPEED_100) {
2130 		new_bmcr |= BMCR_SPEED100;
2131 	}
2132 	if (bp->req_duplex == DUPLEX_FULL) {
2133 		new_bmcr |= BMCR_FULLDPLX;
2134 	}
2135 	if (new_bmcr != bmcr) {
2136 		u32 bmsr;
2137 
2138 		bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2139 		bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2140 
2141 		if (bmsr & BMSR_LSTATUS) {
2142 			/* Force link down */
2143 			bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
2144 			spin_unlock_bh(&bp->phy_lock);
2145 			msleep(50);
2146 			spin_lock_bh(&bp->phy_lock);
2147 
2148 			bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2149 			bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2150 		}
2151 
2152 		bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
2153 
2154 		/* Normally, the new speed is setup after the link has
2155 		 * gone down and up again. In some cases, link will not go
2156 		 * down so we need to set up the new speed here.
2157 		 */
2158 		if (bmsr & BMSR_LSTATUS) {
2159 			bp->line_speed = bp->req_line_speed;
2160 			bp->duplex = bp->req_duplex;
2161 			bnx2_resolve_flow_ctrl(bp);
2162 			bnx2_set_mac_link(bp);
2163 		}
2164 	} else {
2165 		bnx2_resolve_flow_ctrl(bp);
2166 		bnx2_set_mac_link(bp);
2167 	}
2168 	return 0;
2169 }
2170 
2171 static int
2172 bnx2_setup_phy(struct bnx2 *bp, u8 port)
2173 __releases(&bp->phy_lock)
2174 __acquires(&bp->phy_lock)
2175 {
2176 	if (bp->loopback == MAC_LOOPBACK)
2177 		return 0;
2178 
2179 	if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
2180 		return bnx2_setup_serdes_phy(bp, port);
2181 	}
2182 	else {
2183 		return bnx2_setup_copper_phy(bp);
2184 	}
2185 }
2186 
2187 static int
2188 bnx2_init_5709s_phy(struct bnx2 *bp, int reset_phy)
2189 {
2190 	u32 val;
2191 
2192 	bp->mii_bmcr = MII_BMCR + 0x10;
2193 	bp->mii_bmsr = MII_BMSR + 0x10;
2194 	bp->mii_bmsr1 = MII_BNX2_GP_TOP_AN_STATUS1;
2195 	bp->mii_adv = MII_ADVERTISE + 0x10;
2196 	bp->mii_lpa = MII_LPA + 0x10;
2197 	bp->mii_up1 = MII_BNX2_OVER1G_UP1;
2198 
2199 	bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_AER);
2200 	bnx2_write_phy(bp, MII_BNX2_AER_AER, MII_BNX2_AER_AER_AN_MMD);
2201 
2202 	bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
2203 	if (reset_phy)
2204 		bnx2_reset_phy(bp);
2205 
2206 	bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_SERDES_DIG);
2207 
2208 	bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, &val);
2209 	val &= ~MII_BNX2_SD_1000XCTL1_AUTODET;
2210 	val |= MII_BNX2_SD_1000XCTL1_FIBER;
2211 	bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, val);
2212 
2213 	bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
2214 	bnx2_read_phy(bp, MII_BNX2_OVER1G_UP1, &val);
2215 	if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)
2216 		val |= BCM5708S_UP1_2G5;
2217 	else
2218 		val &= ~BCM5708S_UP1_2G5;
2219 	bnx2_write_phy(bp, MII_BNX2_OVER1G_UP1, val);
2220 
2221 	bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_BAM_NXTPG);
2222 	bnx2_read_phy(bp, MII_BNX2_BAM_NXTPG_CTL, &val);
2223 	val |= MII_BNX2_NXTPG_CTL_T2 | MII_BNX2_NXTPG_CTL_BAM;
2224 	bnx2_write_phy(bp, MII_BNX2_BAM_NXTPG_CTL, val);
2225 
2226 	bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_CL73_USERB0);
2227 
2228 	val = MII_BNX2_CL73_BAM_EN | MII_BNX2_CL73_BAM_STA_MGR_EN |
2229 	      MII_BNX2_CL73_BAM_NP_AFT_BP_EN;
2230 	bnx2_write_phy(bp, MII_BNX2_CL73_BAM_CTL1, val);
2231 
2232 	bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
2233 
2234 	return 0;
2235 }
2236 
2237 static int
2238 bnx2_init_5708s_phy(struct bnx2 *bp, int reset_phy)
2239 {
2240 	u32 val;
2241 
2242 	if (reset_phy)
2243 		bnx2_reset_phy(bp);
2244 
2245 	bp->mii_up1 = BCM5708S_UP1;
2246 
2247 	bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG3);
2248 	bnx2_write_phy(bp, BCM5708S_DIG_3_0, BCM5708S_DIG_3_0_USE_IEEE);
2249 	bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
2250 
2251 	bnx2_read_phy(bp, BCM5708S_1000X_CTL1, &val);
2252 	val |= BCM5708S_1000X_CTL1_FIBER_MODE | BCM5708S_1000X_CTL1_AUTODET_EN;
2253 	bnx2_write_phy(bp, BCM5708S_1000X_CTL1, val);
2254 
2255 	bnx2_read_phy(bp, BCM5708S_1000X_CTL2, &val);
2256 	val |= BCM5708S_1000X_CTL2_PLLEL_DET_EN;
2257 	bnx2_write_phy(bp, BCM5708S_1000X_CTL2, val);
2258 
2259 	if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) {
2260 		bnx2_read_phy(bp, BCM5708S_UP1, &val);
2261 		val |= BCM5708S_UP1_2G5;
2262 		bnx2_write_phy(bp, BCM5708S_UP1, val);
2263 	}
2264 
2265 	if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_A0) ||
2266 	    (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_B0) ||
2267 	    (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_B1)) {
2268 		/* increase tx signal amplitude */
2269 		bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2270 			       BCM5708S_BLK_ADDR_TX_MISC);
2271 		bnx2_read_phy(bp, BCM5708S_TX_ACTL1, &val);
2272 		val &= ~BCM5708S_TX_ACTL1_DRIVER_VCM;
2273 		bnx2_write_phy(bp, BCM5708S_TX_ACTL1, val);
2274 		bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
2275 	}
2276 
2277 	val = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_CONFIG) &
2278 	      BNX2_PORT_HW_CFG_CFG_TXCTL3_MASK;
2279 
2280 	if (val) {
2281 		u32 is_backplane;
2282 
2283 		is_backplane = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG);
2284 		if (is_backplane & BNX2_SHARED_HW_CFG_PHY_BACKPLANE) {
2285 			bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2286 				       BCM5708S_BLK_ADDR_TX_MISC);
2287 			bnx2_write_phy(bp, BCM5708S_TX_ACTL3, val);
2288 			bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2289 				       BCM5708S_BLK_ADDR_DIG);
2290 		}
2291 	}
2292 	return 0;
2293 }
2294 
2295 static int
2296 bnx2_init_5706s_phy(struct bnx2 *bp, int reset_phy)
2297 {
2298 	if (reset_phy)
2299 		bnx2_reset_phy(bp);
2300 
2301 	bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
2302 
2303 	if (BNX2_CHIP(bp) == BNX2_CHIP_5706)
2304 		BNX2_WR(bp, BNX2_MISC_GP_HW_CTL0, 0x300);
2305 
2306 	if (bp->dev->mtu > ETH_DATA_LEN) {
2307 		u32 val;
2308 
2309 		/* Set extended packet length bit */
2310 		bnx2_write_phy(bp, 0x18, 0x7);
2311 		bnx2_read_phy(bp, 0x18, &val);
2312 		bnx2_write_phy(bp, 0x18, (val & 0xfff8) | 0x4000);
2313 
2314 		bnx2_write_phy(bp, 0x1c, 0x6c00);
2315 		bnx2_read_phy(bp, 0x1c, &val);
2316 		bnx2_write_phy(bp, 0x1c, (val & 0x3ff) | 0xec02);
2317 	}
2318 	else {
2319 		u32 val;
2320 
2321 		bnx2_write_phy(bp, 0x18, 0x7);
2322 		bnx2_read_phy(bp, 0x18, &val);
2323 		bnx2_write_phy(bp, 0x18, val & ~0x4007);
2324 
2325 		bnx2_write_phy(bp, 0x1c, 0x6c00);
2326 		bnx2_read_phy(bp, 0x1c, &val);
2327 		bnx2_write_phy(bp, 0x1c, (val & 0x3fd) | 0xec00);
2328 	}
2329 
2330 	return 0;
2331 }
2332 
2333 static int
2334 bnx2_init_copper_phy(struct bnx2 *bp, int reset_phy)
2335 {
2336 	u32 val;
2337 
2338 	if (reset_phy)
2339 		bnx2_reset_phy(bp);
2340 
2341 	if (bp->phy_flags & BNX2_PHY_FLAG_CRC_FIX) {
2342 		bnx2_write_phy(bp, 0x18, 0x0c00);
2343 		bnx2_write_phy(bp, 0x17, 0x000a);
2344 		bnx2_write_phy(bp, 0x15, 0x310b);
2345 		bnx2_write_phy(bp, 0x17, 0x201f);
2346 		bnx2_write_phy(bp, 0x15, 0x9506);
2347 		bnx2_write_phy(bp, 0x17, 0x401f);
2348 		bnx2_write_phy(bp, 0x15, 0x14e2);
2349 		bnx2_write_phy(bp, 0x18, 0x0400);
2350 	}
2351 
2352 	if (bp->phy_flags & BNX2_PHY_FLAG_DIS_EARLY_DAC) {
2353 		bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS,
2354 			       MII_BNX2_DSP_EXPAND_REG | 0x8);
2355 		bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
2356 		val &= ~(1 << 8);
2357 		bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val);
2358 	}
2359 
2360 	if (bp->dev->mtu > ETH_DATA_LEN) {
2361 		/* Set extended packet length bit */
2362 		bnx2_write_phy(bp, 0x18, 0x7);
2363 		bnx2_read_phy(bp, 0x18, &val);
2364 		bnx2_write_phy(bp, 0x18, val | 0x4000);
2365 
2366 		bnx2_read_phy(bp, 0x10, &val);
2367 		bnx2_write_phy(bp, 0x10, val | 0x1);
2368 	}
2369 	else {
2370 		bnx2_write_phy(bp, 0x18, 0x7);
2371 		bnx2_read_phy(bp, 0x18, &val);
2372 		bnx2_write_phy(bp, 0x18, val & ~0x4007);
2373 
2374 		bnx2_read_phy(bp, 0x10, &val);
2375 		bnx2_write_phy(bp, 0x10, val & ~0x1);
2376 	}
2377 
2378 	/* ethernet@wirespeed */
2379 	bnx2_write_phy(bp, MII_BNX2_AUX_CTL, AUX_CTL_MISC_CTL);
2380 	bnx2_read_phy(bp, MII_BNX2_AUX_CTL, &val);
2381 	val |=  AUX_CTL_MISC_CTL_WR | AUX_CTL_MISC_CTL_WIRESPEED;
2382 
2383 	/* auto-mdix */
2384 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
2385 		val |=  AUX_CTL_MISC_CTL_AUTOMDIX;
2386 
2387 	bnx2_write_phy(bp, MII_BNX2_AUX_CTL, val);
2388 	return 0;
2389 }
2390 
2391 
2392 static int
2393 bnx2_init_phy(struct bnx2 *bp, int reset_phy)
2394 __releases(&bp->phy_lock)
2395 __acquires(&bp->phy_lock)
2396 {
2397 	u32 val;
2398 	int rc = 0;
2399 
2400 	bp->phy_flags &= ~BNX2_PHY_FLAG_INT_MODE_MASK;
2401 	bp->phy_flags |= BNX2_PHY_FLAG_INT_MODE_LINK_READY;
2402 
2403 	bp->mii_bmcr = MII_BMCR;
2404 	bp->mii_bmsr = MII_BMSR;
2405 	bp->mii_bmsr1 = MII_BMSR;
2406 	bp->mii_adv = MII_ADVERTISE;
2407 	bp->mii_lpa = MII_LPA;
2408 
2409 	BNX2_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
2410 
2411 	if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
2412 		goto setup_phy;
2413 
2414 	bnx2_read_phy(bp, MII_PHYSID1, &val);
2415 	bp->phy_id = val << 16;
2416 	bnx2_read_phy(bp, MII_PHYSID2, &val);
2417 	bp->phy_id |= val & 0xffff;
2418 
2419 	if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
2420 		if (BNX2_CHIP(bp) == BNX2_CHIP_5706)
2421 			rc = bnx2_init_5706s_phy(bp, reset_phy);
2422 		else if (BNX2_CHIP(bp) == BNX2_CHIP_5708)
2423 			rc = bnx2_init_5708s_phy(bp, reset_phy);
2424 		else if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
2425 			rc = bnx2_init_5709s_phy(bp, reset_phy);
2426 	}
2427 	else {
2428 		rc = bnx2_init_copper_phy(bp, reset_phy);
2429 	}
2430 
2431 setup_phy:
2432 	if (!rc)
2433 		rc = bnx2_setup_phy(bp, bp->phy_port);
2434 
2435 	return rc;
2436 }
2437 
2438 static int
2439 bnx2_set_mac_loopback(struct bnx2 *bp)
2440 {
2441 	u32 mac_mode;
2442 
2443 	mac_mode = BNX2_RD(bp, BNX2_EMAC_MODE);
2444 	mac_mode &= ~BNX2_EMAC_MODE_PORT;
2445 	mac_mode |= BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK;
2446 	BNX2_WR(bp, BNX2_EMAC_MODE, mac_mode);
2447 	bp->link_up = 1;
2448 	return 0;
2449 }
2450 
2451 static int bnx2_test_link(struct bnx2 *);
2452 
2453 static int
2454 bnx2_set_phy_loopback(struct bnx2 *bp)
2455 {
2456 	u32 mac_mode;
2457 	int rc, i;
2458 
2459 	spin_lock_bh(&bp->phy_lock);
2460 	rc = bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK | BMCR_FULLDPLX |
2461 			    BMCR_SPEED1000);
2462 	spin_unlock_bh(&bp->phy_lock);
2463 	if (rc)
2464 		return rc;
2465 
2466 	for (i = 0; i < 10; i++) {
2467 		if (bnx2_test_link(bp) == 0)
2468 			break;
2469 		msleep(100);
2470 	}
2471 
2472 	mac_mode = BNX2_RD(bp, BNX2_EMAC_MODE);
2473 	mac_mode &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
2474 		      BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
2475 		      BNX2_EMAC_MODE_25G_MODE);
2476 
2477 	mac_mode |= BNX2_EMAC_MODE_PORT_GMII;
2478 	BNX2_WR(bp, BNX2_EMAC_MODE, mac_mode);
2479 	bp->link_up = 1;
2480 	return 0;
2481 }
2482 
2483 static void
2484 bnx2_dump_mcp_state(struct bnx2 *bp)
2485 {
2486 	struct net_device *dev = bp->dev;
2487 	u32 mcp_p0, mcp_p1;
2488 
2489 	netdev_err(dev, "<--- start MCP states dump --->\n");
2490 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
2491 		mcp_p0 = BNX2_MCP_STATE_P0;
2492 		mcp_p1 = BNX2_MCP_STATE_P1;
2493 	} else {
2494 		mcp_p0 = BNX2_MCP_STATE_P0_5708;
2495 		mcp_p1 = BNX2_MCP_STATE_P1_5708;
2496 	}
2497 	netdev_err(dev, "DEBUG: MCP_STATE_P0[%08x] MCP_STATE_P1[%08x]\n",
2498 		   bnx2_reg_rd_ind(bp, mcp_p0), bnx2_reg_rd_ind(bp, mcp_p1));
2499 	netdev_err(dev, "DEBUG: MCP mode[%08x] state[%08x] evt_mask[%08x]\n",
2500 		   bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_MODE),
2501 		   bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_STATE),
2502 		   bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_EVENT_MASK));
2503 	netdev_err(dev, "DEBUG: pc[%08x] pc[%08x] instr[%08x]\n",
2504 		   bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_PROGRAM_COUNTER),
2505 		   bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_PROGRAM_COUNTER),
2506 		   bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_INSTRUCTION));
2507 	netdev_err(dev, "DEBUG: shmem states:\n");
2508 	netdev_err(dev, "DEBUG: drv_mb[%08x] fw_mb[%08x] link_status[%08x]",
2509 		   bnx2_shmem_rd(bp, BNX2_DRV_MB),
2510 		   bnx2_shmem_rd(bp, BNX2_FW_MB),
2511 		   bnx2_shmem_rd(bp, BNX2_LINK_STATUS));
2512 	pr_cont(" drv_pulse_mb[%08x]\n", bnx2_shmem_rd(bp, BNX2_DRV_PULSE_MB));
2513 	netdev_err(dev, "DEBUG: dev_info_signature[%08x] reset_type[%08x]",
2514 		   bnx2_shmem_rd(bp, BNX2_DEV_INFO_SIGNATURE),
2515 		   bnx2_shmem_rd(bp, BNX2_BC_STATE_RESET_TYPE));
2516 	pr_cont(" condition[%08x]\n",
2517 		bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION));
2518 	DP_SHMEM_LINE(bp, BNX2_BC_RESET_TYPE);
2519 	DP_SHMEM_LINE(bp, 0x3cc);
2520 	DP_SHMEM_LINE(bp, 0x3dc);
2521 	DP_SHMEM_LINE(bp, 0x3ec);
2522 	netdev_err(dev, "DEBUG: 0x3fc[%08x]\n", bnx2_shmem_rd(bp, 0x3fc));
2523 	netdev_err(dev, "<--- end MCP states dump --->\n");
2524 }
2525 
2526 static int
2527 bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int ack, int silent)
2528 {
2529 	int i;
2530 	u32 val;
2531 
2532 	bp->fw_wr_seq++;
2533 	msg_data |= bp->fw_wr_seq;
2534 	bp->fw_last_msg = msg_data;
2535 
2536 	bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data);
2537 
2538 	if (!ack)
2539 		return 0;
2540 
2541 	/* wait for an acknowledgement. */
2542 	for (i = 0; i < (BNX2_FW_ACK_TIME_OUT_MS / 10); i++) {
2543 		msleep(10);
2544 
2545 		val = bnx2_shmem_rd(bp, BNX2_FW_MB);
2546 
2547 		if ((val & BNX2_FW_MSG_ACK) == (msg_data & BNX2_DRV_MSG_SEQ))
2548 			break;
2549 	}
2550 	if ((msg_data & BNX2_DRV_MSG_DATA) == BNX2_DRV_MSG_DATA_WAIT0)
2551 		return 0;
2552 
2553 	/* If we timed out, inform the firmware that this is the case. */
2554 	if ((val & BNX2_FW_MSG_ACK) != (msg_data & BNX2_DRV_MSG_SEQ)) {
2555 		msg_data &= ~BNX2_DRV_MSG_CODE;
2556 		msg_data |= BNX2_DRV_MSG_CODE_FW_TIMEOUT;
2557 
2558 		bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data);
2559 		if (!silent) {
2560 			pr_err("fw sync timeout, reset code = %x\n", msg_data);
2561 			bnx2_dump_mcp_state(bp);
2562 		}
2563 
2564 		return -EBUSY;
2565 	}
2566 
2567 	if ((val & BNX2_FW_MSG_STATUS_MASK) != BNX2_FW_MSG_STATUS_OK)
2568 		return -EIO;
2569 
2570 	return 0;
2571 }
2572 
2573 static int
2574 bnx2_init_5709_context(struct bnx2 *bp)
2575 {
2576 	int i, ret = 0;
2577 	u32 val;
2578 
2579 	val = BNX2_CTX_COMMAND_ENABLED | BNX2_CTX_COMMAND_MEM_INIT | (1 << 12);
2580 	val |= (BNX2_PAGE_BITS - 8) << 16;
2581 	BNX2_WR(bp, BNX2_CTX_COMMAND, val);
2582 	for (i = 0; i < 10; i++) {
2583 		val = BNX2_RD(bp, BNX2_CTX_COMMAND);
2584 		if (!(val & BNX2_CTX_COMMAND_MEM_INIT))
2585 			break;
2586 		udelay(2);
2587 	}
2588 	if (val & BNX2_CTX_COMMAND_MEM_INIT)
2589 		return -EBUSY;
2590 
2591 	for (i = 0; i < bp->ctx_pages; i++) {
2592 		int j;
2593 
2594 		if (bp->ctx_blk[i])
2595 			memset(bp->ctx_blk[i], 0, BNX2_PAGE_SIZE);
2596 		else
2597 			return -ENOMEM;
2598 
2599 		BNX2_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA0,
2600 			(bp->ctx_blk_mapping[i] & 0xffffffff) |
2601 			BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID);
2602 		BNX2_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA1,
2603 			(u64) bp->ctx_blk_mapping[i] >> 32);
2604 		BNX2_WR(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL, i |
2605 			BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
2606 		for (j = 0; j < 10; j++) {
2607 
2608 			val = BNX2_RD(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL);
2609 			if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ))
2610 				break;
2611 			udelay(5);
2612 		}
2613 		if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) {
2614 			ret = -EBUSY;
2615 			break;
2616 		}
2617 	}
2618 	return ret;
2619 }
2620 
2621 static void
2622 bnx2_init_context(struct bnx2 *bp)
2623 {
2624 	u32 vcid;
2625 
2626 	vcid = 96;
2627 	while (vcid) {
2628 		u32 vcid_addr, pcid_addr, offset;
2629 		int i;
2630 
2631 		vcid--;
2632 
2633 		if (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) {
2634 			u32 new_vcid;
2635 
2636 			vcid_addr = GET_PCID_ADDR(vcid);
2637 			if (vcid & 0x8) {
2638 				new_vcid = 0x60 + (vcid & 0xf0) + (vcid & 0x7);
2639 			}
2640 			else {
2641 				new_vcid = vcid;
2642 			}
2643 			pcid_addr = GET_PCID_ADDR(new_vcid);
2644 		}
2645 		else {
2646 	    		vcid_addr = GET_CID_ADDR(vcid);
2647 			pcid_addr = vcid_addr;
2648 		}
2649 
2650 		for (i = 0; i < (CTX_SIZE / PHY_CTX_SIZE); i++) {
2651 			vcid_addr += (i << PHY_CTX_SHIFT);
2652 			pcid_addr += (i << PHY_CTX_SHIFT);
2653 
2654 			BNX2_WR(bp, BNX2_CTX_VIRT_ADDR, vcid_addr);
2655 			BNX2_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
2656 
2657 			/* Zero out the context. */
2658 			for (offset = 0; offset < PHY_CTX_SIZE; offset += 4)
2659 				bnx2_ctx_wr(bp, vcid_addr, offset, 0);
2660 		}
2661 	}
2662 }
2663 
2664 static int
2665 bnx2_alloc_bad_rbuf(struct bnx2 *bp)
2666 {
2667 	u16 *good_mbuf;
2668 	u32 good_mbuf_cnt;
2669 	u32 val;
2670 
2671 	good_mbuf = kmalloc(512 * sizeof(u16), GFP_KERNEL);
2672 	if (good_mbuf == NULL)
2673 		return -ENOMEM;
2674 
2675 	BNX2_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
2676 		BNX2_MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE);
2677 
2678 	good_mbuf_cnt = 0;
2679 
2680 	/* Allocate a bunch of mbufs and save the good ones in an array. */
2681 	val = bnx2_reg_rd_ind(bp, BNX2_RBUF_STATUS1);
2682 	while (val & BNX2_RBUF_STATUS1_FREE_COUNT) {
2683 		bnx2_reg_wr_ind(bp, BNX2_RBUF_COMMAND,
2684 				BNX2_RBUF_COMMAND_ALLOC_REQ);
2685 
2686 		val = bnx2_reg_rd_ind(bp, BNX2_RBUF_FW_BUF_ALLOC);
2687 
2688 		val &= BNX2_RBUF_FW_BUF_ALLOC_VALUE;
2689 
2690 		/* The addresses with Bit 9 set are bad memory blocks. */
2691 		if (!(val & (1 << 9))) {
2692 			good_mbuf[good_mbuf_cnt] = (u16) val;
2693 			good_mbuf_cnt++;
2694 		}
2695 
2696 		val = bnx2_reg_rd_ind(bp, BNX2_RBUF_STATUS1);
2697 	}
2698 
2699 	/* Free the good ones back to the mbuf pool thus discarding
2700 	 * all the bad ones. */
2701 	while (good_mbuf_cnt) {
2702 		good_mbuf_cnt--;
2703 
2704 		val = good_mbuf[good_mbuf_cnt];
2705 		val = (val << 9) | val | 1;
2706 
2707 		bnx2_reg_wr_ind(bp, BNX2_RBUF_FW_BUF_FREE, val);
2708 	}
2709 	kfree(good_mbuf);
2710 	return 0;
2711 }
2712 
2713 static void
2714 bnx2_set_mac_addr(struct bnx2 *bp, u8 *mac_addr, u32 pos)
2715 {
2716 	u32 val;
2717 
2718 	val = (mac_addr[0] << 8) | mac_addr[1];
2719 
2720 	BNX2_WR(bp, BNX2_EMAC_MAC_MATCH0 + (pos * 8), val);
2721 
2722 	val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
2723 		(mac_addr[4] << 8) | mac_addr[5];
2724 
2725 	BNX2_WR(bp, BNX2_EMAC_MAC_MATCH1 + (pos * 8), val);
2726 }
2727 
2728 static inline int
2729 bnx2_alloc_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index, gfp_t gfp)
2730 {
2731 	dma_addr_t mapping;
2732 	struct bnx2_sw_pg *rx_pg = &rxr->rx_pg_ring[index];
2733 	struct bnx2_rx_bd *rxbd =
2734 		&rxr->rx_pg_desc_ring[BNX2_RX_RING(index)][BNX2_RX_IDX(index)];
2735 	struct page *page = alloc_page(gfp);
2736 
2737 	if (!page)
2738 		return -ENOMEM;
2739 	mapping = dma_map_page(&bp->pdev->dev, page, 0, PAGE_SIZE,
2740 			       PCI_DMA_FROMDEVICE);
2741 	if (dma_mapping_error(&bp->pdev->dev, mapping)) {
2742 		__free_page(page);
2743 		return -EIO;
2744 	}
2745 
2746 	rx_pg->page = page;
2747 	dma_unmap_addr_set(rx_pg, mapping, mapping);
2748 	rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2749 	rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2750 	return 0;
2751 }
2752 
2753 static void
2754 bnx2_free_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
2755 {
2756 	struct bnx2_sw_pg *rx_pg = &rxr->rx_pg_ring[index];
2757 	struct page *page = rx_pg->page;
2758 
2759 	if (!page)
2760 		return;
2761 
2762 	dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(rx_pg, mapping),
2763 		       PAGE_SIZE, PCI_DMA_FROMDEVICE);
2764 
2765 	__free_page(page);
2766 	rx_pg->page = NULL;
2767 }
2768 
2769 static inline int
2770 bnx2_alloc_rx_data(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index, gfp_t gfp)
2771 {
2772 	u8 *data;
2773 	struct bnx2_sw_bd *rx_buf = &rxr->rx_buf_ring[index];
2774 	dma_addr_t mapping;
2775 	struct bnx2_rx_bd *rxbd =
2776 		&rxr->rx_desc_ring[BNX2_RX_RING(index)][BNX2_RX_IDX(index)];
2777 
2778 	data = kmalloc(bp->rx_buf_size, gfp);
2779 	if (!data)
2780 		return -ENOMEM;
2781 
2782 	mapping = dma_map_single(&bp->pdev->dev,
2783 				 get_l2_fhdr(data),
2784 				 bp->rx_buf_use_size,
2785 				 PCI_DMA_FROMDEVICE);
2786 	if (dma_mapping_error(&bp->pdev->dev, mapping)) {
2787 		kfree(data);
2788 		return -EIO;
2789 	}
2790 
2791 	rx_buf->data = data;
2792 	dma_unmap_addr_set(rx_buf, mapping, mapping);
2793 
2794 	rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2795 	rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2796 
2797 	rxr->rx_prod_bseq += bp->rx_buf_use_size;
2798 
2799 	return 0;
2800 }
2801 
2802 static int
2803 bnx2_phy_event_is_set(struct bnx2 *bp, struct bnx2_napi *bnapi, u32 event)
2804 {
2805 	struct status_block *sblk = bnapi->status_blk.msi;
2806 	u32 new_link_state, old_link_state;
2807 	int is_set = 1;
2808 
2809 	new_link_state = sblk->status_attn_bits & event;
2810 	old_link_state = sblk->status_attn_bits_ack & event;
2811 	if (new_link_state != old_link_state) {
2812 		if (new_link_state)
2813 			BNX2_WR(bp, BNX2_PCICFG_STATUS_BIT_SET_CMD, event);
2814 		else
2815 			BNX2_WR(bp, BNX2_PCICFG_STATUS_BIT_CLEAR_CMD, event);
2816 	} else
2817 		is_set = 0;
2818 
2819 	return is_set;
2820 }
2821 
2822 static void
2823 bnx2_phy_int(struct bnx2 *bp, struct bnx2_napi *bnapi)
2824 {
2825 	spin_lock(&bp->phy_lock);
2826 
2827 	if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_LINK_STATE))
2828 		bnx2_set_link(bp);
2829 	if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_TIMER_ABORT))
2830 		bnx2_set_remote_link(bp);
2831 
2832 	spin_unlock(&bp->phy_lock);
2833 
2834 }
2835 
2836 static inline u16
2837 bnx2_get_hw_tx_cons(struct bnx2_napi *bnapi)
2838 {
2839 	u16 cons;
2840 
2841 	/* Tell compiler that status block fields can change. */
2842 	barrier();
2843 	cons = *bnapi->hw_tx_cons_ptr;
2844 	barrier();
2845 	if (unlikely((cons & BNX2_MAX_TX_DESC_CNT) == BNX2_MAX_TX_DESC_CNT))
2846 		cons++;
2847 	return cons;
2848 }
2849 
2850 static int
2851 bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
2852 {
2853 	struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
2854 	u16 hw_cons, sw_cons, sw_ring_cons;
2855 	int tx_pkt = 0, index;
2856 	unsigned int tx_bytes = 0;
2857 	struct netdev_queue *txq;
2858 
2859 	index = (bnapi - bp->bnx2_napi);
2860 	txq = netdev_get_tx_queue(bp->dev, index);
2861 
2862 	hw_cons = bnx2_get_hw_tx_cons(bnapi);
2863 	sw_cons = txr->tx_cons;
2864 
2865 	while (sw_cons != hw_cons) {
2866 		struct bnx2_sw_tx_bd *tx_buf;
2867 		struct sk_buff *skb;
2868 		int i, last;
2869 
2870 		sw_ring_cons = BNX2_TX_RING_IDX(sw_cons);
2871 
2872 		tx_buf = &txr->tx_buf_ring[sw_ring_cons];
2873 		skb = tx_buf->skb;
2874 
2875 		/* prefetch skb_end_pointer() to speedup skb_shinfo(skb) */
2876 		prefetch(&skb->end);
2877 
2878 		/* partial BD completions possible with TSO packets */
2879 		if (tx_buf->is_gso) {
2880 			u16 last_idx, last_ring_idx;
2881 
2882 			last_idx = sw_cons + tx_buf->nr_frags + 1;
2883 			last_ring_idx = sw_ring_cons + tx_buf->nr_frags + 1;
2884 			if (unlikely(last_ring_idx >= BNX2_MAX_TX_DESC_CNT)) {
2885 				last_idx++;
2886 			}
2887 			if (((s16) ((s16) last_idx - (s16) hw_cons)) > 0) {
2888 				break;
2889 			}
2890 		}
2891 
2892 		dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(tx_buf, mapping),
2893 			skb_headlen(skb), PCI_DMA_TODEVICE);
2894 
2895 		tx_buf->skb = NULL;
2896 		last = tx_buf->nr_frags;
2897 
2898 		for (i = 0; i < last; i++) {
2899 			struct bnx2_sw_tx_bd *tx_buf;
2900 
2901 			sw_cons = BNX2_NEXT_TX_BD(sw_cons);
2902 
2903 			tx_buf = &txr->tx_buf_ring[BNX2_TX_RING_IDX(sw_cons)];
2904 			dma_unmap_page(&bp->pdev->dev,
2905 				dma_unmap_addr(tx_buf, mapping),
2906 				skb_frag_size(&skb_shinfo(skb)->frags[i]),
2907 				PCI_DMA_TODEVICE);
2908 		}
2909 
2910 		sw_cons = BNX2_NEXT_TX_BD(sw_cons);
2911 
2912 		tx_bytes += skb->len;
2913 		dev_kfree_skb_any(skb);
2914 		tx_pkt++;
2915 		if (tx_pkt == budget)
2916 			break;
2917 
2918 		if (hw_cons == sw_cons)
2919 			hw_cons = bnx2_get_hw_tx_cons(bnapi);
2920 	}
2921 
2922 	netdev_tx_completed_queue(txq, tx_pkt, tx_bytes);
2923 	txr->hw_tx_cons = hw_cons;
2924 	txr->tx_cons = sw_cons;
2925 
2926 	/* Need to make the tx_cons update visible to bnx2_start_xmit()
2927 	 * before checking for netif_tx_queue_stopped().  Without the
2928 	 * memory barrier, there is a small possibility that bnx2_start_xmit()
2929 	 * will miss it and cause the queue to be stopped forever.
2930 	 */
2931 	smp_mb();
2932 
2933 	if (unlikely(netif_tx_queue_stopped(txq)) &&
2934 		     (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh)) {
2935 		__netif_tx_lock(txq, smp_processor_id());
2936 		if ((netif_tx_queue_stopped(txq)) &&
2937 		    (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh))
2938 			netif_tx_wake_queue(txq);
2939 		__netif_tx_unlock(txq);
2940 	}
2941 
2942 	return tx_pkt;
2943 }
2944 
2945 static void
2946 bnx2_reuse_rx_skb_pages(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
2947 			struct sk_buff *skb, int count)
2948 {
2949 	struct bnx2_sw_pg *cons_rx_pg, *prod_rx_pg;
2950 	struct bnx2_rx_bd *cons_bd, *prod_bd;
2951 	int i;
2952 	u16 hw_prod, prod;
2953 	u16 cons = rxr->rx_pg_cons;
2954 
2955 	cons_rx_pg = &rxr->rx_pg_ring[cons];
2956 
2957 	/* The caller was unable to allocate a new page to replace the
2958 	 * last one in the frags array, so we need to recycle that page
2959 	 * and then free the skb.
2960 	 */
2961 	if (skb) {
2962 		struct page *page;
2963 		struct skb_shared_info *shinfo;
2964 
2965 		shinfo = skb_shinfo(skb);
2966 		shinfo->nr_frags--;
2967 		page = skb_frag_page(&shinfo->frags[shinfo->nr_frags]);
2968 		__skb_frag_set_page(&shinfo->frags[shinfo->nr_frags], NULL);
2969 
2970 		cons_rx_pg->page = page;
2971 		dev_kfree_skb(skb);
2972 	}
2973 
2974 	hw_prod = rxr->rx_pg_prod;
2975 
2976 	for (i = 0; i < count; i++) {
2977 		prod = BNX2_RX_PG_RING_IDX(hw_prod);
2978 
2979 		prod_rx_pg = &rxr->rx_pg_ring[prod];
2980 		cons_rx_pg = &rxr->rx_pg_ring[cons];
2981 		cons_bd = &rxr->rx_pg_desc_ring[BNX2_RX_RING(cons)]
2982 						[BNX2_RX_IDX(cons)];
2983 		prod_bd = &rxr->rx_pg_desc_ring[BNX2_RX_RING(prod)]
2984 						[BNX2_RX_IDX(prod)];
2985 
2986 		if (prod != cons) {
2987 			prod_rx_pg->page = cons_rx_pg->page;
2988 			cons_rx_pg->page = NULL;
2989 			dma_unmap_addr_set(prod_rx_pg, mapping,
2990 				dma_unmap_addr(cons_rx_pg, mapping));
2991 
2992 			prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2993 			prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
2994 
2995 		}
2996 		cons = BNX2_RX_PG_RING_IDX(BNX2_NEXT_RX_BD(cons));
2997 		hw_prod = BNX2_NEXT_RX_BD(hw_prod);
2998 	}
2999 	rxr->rx_pg_prod = hw_prod;
3000 	rxr->rx_pg_cons = cons;
3001 }
3002 
3003 static inline void
3004 bnx2_reuse_rx_data(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
3005 		   u8 *data, u16 cons, u16 prod)
3006 {
3007 	struct bnx2_sw_bd *cons_rx_buf, *prod_rx_buf;
3008 	struct bnx2_rx_bd *cons_bd, *prod_bd;
3009 
3010 	cons_rx_buf = &rxr->rx_buf_ring[cons];
3011 	prod_rx_buf = &rxr->rx_buf_ring[prod];
3012 
3013 	dma_sync_single_for_device(&bp->pdev->dev,
3014 		dma_unmap_addr(cons_rx_buf, mapping),
3015 		BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
3016 
3017 	rxr->rx_prod_bseq += bp->rx_buf_use_size;
3018 
3019 	prod_rx_buf->data = data;
3020 
3021 	if (cons == prod)
3022 		return;
3023 
3024 	dma_unmap_addr_set(prod_rx_buf, mapping,
3025 			dma_unmap_addr(cons_rx_buf, mapping));
3026 
3027 	cons_bd = &rxr->rx_desc_ring[BNX2_RX_RING(cons)][BNX2_RX_IDX(cons)];
3028 	prod_bd = &rxr->rx_desc_ring[BNX2_RX_RING(prod)][BNX2_RX_IDX(prod)];
3029 	prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
3030 	prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
3031 }
3032 
3033 static struct sk_buff *
3034 bnx2_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u8 *data,
3035 	    unsigned int len, unsigned int hdr_len, dma_addr_t dma_addr,
3036 	    u32 ring_idx)
3037 {
3038 	int err;
3039 	u16 prod = ring_idx & 0xffff;
3040 	struct sk_buff *skb;
3041 
3042 	err = bnx2_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC);
3043 	if (unlikely(err)) {
3044 		bnx2_reuse_rx_data(bp, rxr, data, (u16) (ring_idx >> 16), prod);
3045 error:
3046 		if (hdr_len) {
3047 			unsigned int raw_len = len + 4;
3048 			int pages = PAGE_ALIGN(raw_len - hdr_len) >> PAGE_SHIFT;
3049 
3050 			bnx2_reuse_rx_skb_pages(bp, rxr, NULL, pages);
3051 		}
3052 		return NULL;
3053 	}
3054 
3055 	dma_unmap_single(&bp->pdev->dev, dma_addr, bp->rx_buf_use_size,
3056 			 PCI_DMA_FROMDEVICE);
3057 	skb = build_skb(data, 0);
3058 	if (!skb) {
3059 		kfree(data);
3060 		goto error;
3061 	}
3062 	skb_reserve(skb, ((u8 *)get_l2_fhdr(data) - data) + BNX2_RX_OFFSET);
3063 	if (hdr_len == 0) {
3064 		skb_put(skb, len);
3065 		return skb;
3066 	} else {
3067 		unsigned int i, frag_len, frag_size, pages;
3068 		struct bnx2_sw_pg *rx_pg;
3069 		u16 pg_cons = rxr->rx_pg_cons;
3070 		u16 pg_prod = rxr->rx_pg_prod;
3071 
3072 		frag_size = len + 4 - hdr_len;
3073 		pages = PAGE_ALIGN(frag_size) >> PAGE_SHIFT;
3074 		skb_put(skb, hdr_len);
3075 
3076 		for (i = 0; i < pages; i++) {
3077 			dma_addr_t mapping_old;
3078 
3079 			frag_len = min(frag_size, (unsigned int) PAGE_SIZE);
3080 			if (unlikely(frag_len <= 4)) {
3081 				unsigned int tail = 4 - frag_len;
3082 
3083 				rxr->rx_pg_cons = pg_cons;
3084 				rxr->rx_pg_prod = pg_prod;
3085 				bnx2_reuse_rx_skb_pages(bp, rxr, NULL,
3086 							pages - i);
3087 				skb->len -= tail;
3088 				if (i == 0) {
3089 					skb->tail -= tail;
3090 				} else {
3091 					skb_frag_t *frag =
3092 						&skb_shinfo(skb)->frags[i - 1];
3093 					skb_frag_size_sub(frag, tail);
3094 					skb->data_len -= tail;
3095 				}
3096 				return skb;
3097 			}
3098 			rx_pg = &rxr->rx_pg_ring[pg_cons];
3099 
3100 			/* Don't unmap yet.  If we're unable to allocate a new
3101 			 * page, we need to recycle the page and the DMA addr.
3102 			 */
3103 			mapping_old = dma_unmap_addr(rx_pg, mapping);
3104 			if (i == pages - 1)
3105 				frag_len -= 4;
3106 
3107 			skb_fill_page_desc(skb, i, rx_pg->page, 0, frag_len);
3108 			rx_pg->page = NULL;
3109 
3110 			err = bnx2_alloc_rx_page(bp, rxr,
3111 						 BNX2_RX_PG_RING_IDX(pg_prod),
3112 						 GFP_ATOMIC);
3113 			if (unlikely(err)) {
3114 				rxr->rx_pg_cons = pg_cons;
3115 				rxr->rx_pg_prod = pg_prod;
3116 				bnx2_reuse_rx_skb_pages(bp, rxr, skb,
3117 							pages - i);
3118 				return NULL;
3119 			}
3120 
3121 			dma_unmap_page(&bp->pdev->dev, mapping_old,
3122 				       PAGE_SIZE, PCI_DMA_FROMDEVICE);
3123 
3124 			frag_size -= frag_len;
3125 			skb->data_len += frag_len;
3126 			skb->truesize += PAGE_SIZE;
3127 			skb->len += frag_len;
3128 
3129 			pg_prod = BNX2_NEXT_RX_BD(pg_prod);
3130 			pg_cons = BNX2_RX_PG_RING_IDX(BNX2_NEXT_RX_BD(pg_cons));
3131 		}
3132 		rxr->rx_pg_prod = pg_prod;
3133 		rxr->rx_pg_cons = pg_cons;
3134 	}
3135 	return skb;
3136 }
3137 
3138 static inline u16
3139 bnx2_get_hw_rx_cons(struct bnx2_napi *bnapi)
3140 {
3141 	u16 cons;
3142 
3143 	/* Tell compiler that status block fields can change. */
3144 	barrier();
3145 	cons = *bnapi->hw_rx_cons_ptr;
3146 	barrier();
3147 	if (unlikely((cons & BNX2_MAX_RX_DESC_CNT) == BNX2_MAX_RX_DESC_CNT))
3148 		cons++;
3149 	return cons;
3150 }
3151 
3152 static int
3153 bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
3154 {
3155 	struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3156 	u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod;
3157 	struct l2_fhdr *rx_hdr;
3158 	int rx_pkt = 0, pg_ring_used = 0;
3159 
3160 	if (budget <= 0)
3161 		return rx_pkt;
3162 
3163 	hw_cons = bnx2_get_hw_rx_cons(bnapi);
3164 	sw_cons = rxr->rx_cons;
3165 	sw_prod = rxr->rx_prod;
3166 
3167 	/* Memory barrier necessary as speculative reads of the rx
3168 	 * buffer can be ahead of the index in the status block
3169 	 */
3170 	rmb();
3171 	while (sw_cons != hw_cons) {
3172 		unsigned int len, hdr_len;
3173 		u32 status;
3174 		struct bnx2_sw_bd *rx_buf, *next_rx_buf;
3175 		struct sk_buff *skb;
3176 		dma_addr_t dma_addr;
3177 		u8 *data;
3178 		u16 next_ring_idx;
3179 
3180 		sw_ring_cons = BNX2_RX_RING_IDX(sw_cons);
3181 		sw_ring_prod = BNX2_RX_RING_IDX(sw_prod);
3182 
3183 		rx_buf = &rxr->rx_buf_ring[sw_ring_cons];
3184 		data = rx_buf->data;
3185 		rx_buf->data = NULL;
3186 
3187 		rx_hdr = get_l2_fhdr(data);
3188 		prefetch(rx_hdr);
3189 
3190 		dma_addr = dma_unmap_addr(rx_buf, mapping);
3191 
3192 		dma_sync_single_for_cpu(&bp->pdev->dev, dma_addr,
3193 			BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH,
3194 			PCI_DMA_FROMDEVICE);
3195 
3196 		next_ring_idx = BNX2_RX_RING_IDX(BNX2_NEXT_RX_BD(sw_cons));
3197 		next_rx_buf = &rxr->rx_buf_ring[next_ring_idx];
3198 		prefetch(get_l2_fhdr(next_rx_buf->data));
3199 
3200 		len = rx_hdr->l2_fhdr_pkt_len;
3201 		status = rx_hdr->l2_fhdr_status;
3202 
3203 		hdr_len = 0;
3204 		if (status & L2_FHDR_STATUS_SPLIT) {
3205 			hdr_len = rx_hdr->l2_fhdr_ip_xsum;
3206 			pg_ring_used = 1;
3207 		} else if (len > bp->rx_jumbo_thresh) {
3208 			hdr_len = bp->rx_jumbo_thresh;
3209 			pg_ring_used = 1;
3210 		}
3211 
3212 		if (unlikely(status & (L2_FHDR_ERRORS_BAD_CRC |
3213 				       L2_FHDR_ERRORS_PHY_DECODE |
3214 				       L2_FHDR_ERRORS_ALIGNMENT |
3215 				       L2_FHDR_ERRORS_TOO_SHORT |
3216 				       L2_FHDR_ERRORS_GIANT_FRAME))) {
3217 
3218 			bnx2_reuse_rx_data(bp, rxr, data, sw_ring_cons,
3219 					  sw_ring_prod);
3220 			if (pg_ring_used) {
3221 				int pages;
3222 
3223 				pages = PAGE_ALIGN(len - hdr_len) >> PAGE_SHIFT;
3224 
3225 				bnx2_reuse_rx_skb_pages(bp, rxr, NULL, pages);
3226 			}
3227 			goto next_rx;
3228 		}
3229 
3230 		len -= 4;
3231 
3232 		if (len <= bp->rx_copy_thresh) {
3233 			skb = netdev_alloc_skb(bp->dev, len + 6);
3234 			if (skb == NULL) {
3235 				bnx2_reuse_rx_data(bp, rxr, data, sw_ring_cons,
3236 						  sw_ring_prod);
3237 				goto next_rx;
3238 			}
3239 
3240 			/* aligned copy */
3241 			memcpy(skb->data,
3242 			       (u8 *)rx_hdr + BNX2_RX_OFFSET - 6,
3243 			       len + 6);
3244 			skb_reserve(skb, 6);
3245 			skb_put(skb, len);
3246 
3247 			bnx2_reuse_rx_data(bp, rxr, data,
3248 				sw_ring_cons, sw_ring_prod);
3249 
3250 		} else {
3251 			skb = bnx2_rx_skb(bp, rxr, data, len, hdr_len, dma_addr,
3252 					  (sw_ring_cons << 16) | sw_ring_prod);
3253 			if (!skb)
3254 				goto next_rx;
3255 		}
3256 		if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) &&
3257 		    !(bp->rx_mode & BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG))
3258 			__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rx_hdr->l2_fhdr_vlan_tag);
3259 
3260 		skb->protocol = eth_type_trans(skb, bp->dev);
3261 
3262 		if (len > (bp->dev->mtu + ETH_HLEN) &&
3263 		    skb->protocol != htons(0x8100) &&
3264 		    skb->protocol != htons(ETH_P_8021AD)) {
3265 
3266 			dev_kfree_skb(skb);
3267 			goto next_rx;
3268 
3269 		}
3270 
3271 		skb_checksum_none_assert(skb);
3272 		if ((bp->dev->features & NETIF_F_RXCSUM) &&
3273 			(status & (L2_FHDR_STATUS_TCP_SEGMENT |
3274 			L2_FHDR_STATUS_UDP_DATAGRAM))) {
3275 
3276 			if (likely((status & (L2_FHDR_ERRORS_TCP_XSUM |
3277 					      L2_FHDR_ERRORS_UDP_XSUM)) == 0))
3278 				skb->ip_summed = CHECKSUM_UNNECESSARY;
3279 		}
3280 		if ((bp->dev->features & NETIF_F_RXHASH) &&
3281 		    ((status & L2_FHDR_STATUS_USE_RXHASH) ==
3282 		     L2_FHDR_STATUS_USE_RXHASH))
3283 			skb_set_hash(skb, rx_hdr->l2_fhdr_hash,
3284 				     PKT_HASH_TYPE_L3);
3285 
3286 		skb_record_rx_queue(skb, bnapi - &bp->bnx2_napi[0]);
3287 		napi_gro_receive(&bnapi->napi, skb);
3288 		rx_pkt++;
3289 
3290 next_rx:
3291 		sw_cons = BNX2_NEXT_RX_BD(sw_cons);
3292 		sw_prod = BNX2_NEXT_RX_BD(sw_prod);
3293 
3294 		if ((rx_pkt == budget))
3295 			break;
3296 
3297 		/* Refresh hw_cons to see if there is new work */
3298 		if (sw_cons == hw_cons) {
3299 			hw_cons = bnx2_get_hw_rx_cons(bnapi);
3300 			rmb();
3301 		}
3302 	}
3303 	rxr->rx_cons = sw_cons;
3304 	rxr->rx_prod = sw_prod;
3305 
3306 	if (pg_ring_used)
3307 		BNX2_WR16(bp, rxr->rx_pg_bidx_addr, rxr->rx_pg_prod);
3308 
3309 	BNX2_WR16(bp, rxr->rx_bidx_addr, sw_prod);
3310 
3311 	BNX2_WR(bp, rxr->rx_bseq_addr, rxr->rx_prod_bseq);
3312 
3313 	mmiowb();
3314 
3315 	return rx_pkt;
3316 
3317 }
3318 
3319 /* MSI ISR - The only difference between this and the INTx ISR
3320  * is that the MSI interrupt is always serviced.
3321  */
3322 static irqreturn_t
3323 bnx2_msi(int irq, void *dev_instance)
3324 {
3325 	struct bnx2_napi *bnapi = dev_instance;
3326 	struct bnx2 *bp = bnapi->bp;
3327 
3328 	prefetch(bnapi->status_blk.msi);
3329 	BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3330 		BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
3331 		BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3332 
3333 	/* Return here if interrupt is disabled. */
3334 	if (unlikely(atomic_read(&bp->intr_sem) != 0))
3335 		return IRQ_HANDLED;
3336 
3337 	napi_schedule(&bnapi->napi);
3338 
3339 	return IRQ_HANDLED;
3340 }
3341 
3342 static irqreturn_t
3343 bnx2_msi_1shot(int irq, void *dev_instance)
3344 {
3345 	struct bnx2_napi *bnapi = dev_instance;
3346 	struct bnx2 *bp = bnapi->bp;
3347 
3348 	prefetch(bnapi->status_blk.msi);
3349 
3350 	/* Return here if interrupt is disabled. */
3351 	if (unlikely(atomic_read(&bp->intr_sem) != 0))
3352 		return IRQ_HANDLED;
3353 
3354 	napi_schedule(&bnapi->napi);
3355 
3356 	return IRQ_HANDLED;
3357 }
3358 
3359 static irqreturn_t
3360 bnx2_interrupt(int irq, void *dev_instance)
3361 {
3362 	struct bnx2_napi *bnapi = dev_instance;
3363 	struct bnx2 *bp = bnapi->bp;
3364 	struct status_block *sblk = bnapi->status_blk.msi;
3365 
3366 	/* When using INTx, it is possible for the interrupt to arrive
3367 	 * at the CPU before the status block posted prior to the
3368 	 * interrupt. Reading a register will flush the status block.
3369 	 * When using MSI, the MSI message will always complete after
3370 	 * the status block write.
3371 	 */
3372 	if ((sblk->status_idx == bnapi->last_status_idx) &&
3373 	    (BNX2_RD(bp, BNX2_PCICFG_MISC_STATUS) &
3374 	     BNX2_PCICFG_MISC_STATUS_INTA_VALUE))
3375 		return IRQ_NONE;
3376 
3377 	BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3378 		BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
3379 		BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3380 
3381 	/* Read back to deassert IRQ immediately to avoid too many
3382 	 * spurious interrupts.
3383 	 */
3384 	BNX2_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
3385 
3386 	/* Return here if interrupt is shared and is disabled. */
3387 	if (unlikely(atomic_read(&bp->intr_sem) != 0))
3388 		return IRQ_HANDLED;
3389 
3390 	if (napi_schedule_prep(&bnapi->napi)) {
3391 		bnapi->last_status_idx = sblk->status_idx;
3392 		__napi_schedule(&bnapi->napi);
3393 	}
3394 
3395 	return IRQ_HANDLED;
3396 }
3397 
3398 static inline int
3399 bnx2_has_fast_work(struct bnx2_napi *bnapi)
3400 {
3401 	struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
3402 	struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3403 
3404 	if ((bnx2_get_hw_rx_cons(bnapi) != rxr->rx_cons) ||
3405 	    (bnx2_get_hw_tx_cons(bnapi) != txr->hw_tx_cons))
3406 		return 1;
3407 	return 0;
3408 }
3409 
3410 #define STATUS_ATTN_EVENTS	(STATUS_ATTN_BITS_LINK_STATE | \
3411 				 STATUS_ATTN_BITS_TIMER_ABORT)
3412 
3413 static inline int
3414 bnx2_has_work(struct bnx2_napi *bnapi)
3415 {
3416 	struct status_block *sblk = bnapi->status_blk.msi;
3417 
3418 	if (bnx2_has_fast_work(bnapi))
3419 		return 1;
3420 
3421 #ifdef BCM_CNIC
3422 	if (bnapi->cnic_present && (bnapi->cnic_tag != sblk->status_idx))
3423 		return 1;
3424 #endif
3425 
3426 	if ((sblk->status_attn_bits & STATUS_ATTN_EVENTS) !=
3427 	    (sblk->status_attn_bits_ack & STATUS_ATTN_EVENTS))
3428 		return 1;
3429 
3430 	return 0;
3431 }
3432 
3433 static void
3434 bnx2_chk_missed_msi(struct bnx2 *bp)
3435 {
3436 	struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
3437 	u32 msi_ctrl;
3438 
3439 	if (bnx2_has_work(bnapi)) {
3440 		msi_ctrl = BNX2_RD(bp, BNX2_PCICFG_MSI_CONTROL);
3441 		if (!(msi_ctrl & BNX2_PCICFG_MSI_CONTROL_ENABLE))
3442 			return;
3443 
3444 		if (bnapi->last_status_idx == bp->idle_chk_status_idx) {
3445 			BNX2_WR(bp, BNX2_PCICFG_MSI_CONTROL, msi_ctrl &
3446 				~BNX2_PCICFG_MSI_CONTROL_ENABLE);
3447 			BNX2_WR(bp, BNX2_PCICFG_MSI_CONTROL, msi_ctrl);
3448 			bnx2_msi(bp->irq_tbl[0].vector, bnapi);
3449 		}
3450 	}
3451 
3452 	bp->idle_chk_status_idx = bnapi->last_status_idx;
3453 }
3454 
3455 #ifdef BCM_CNIC
3456 static void bnx2_poll_cnic(struct bnx2 *bp, struct bnx2_napi *bnapi)
3457 {
3458 	struct cnic_ops *c_ops;
3459 
3460 	if (!bnapi->cnic_present)
3461 		return;
3462 
3463 	rcu_read_lock();
3464 	c_ops = rcu_dereference(bp->cnic_ops);
3465 	if (c_ops)
3466 		bnapi->cnic_tag = c_ops->cnic_handler(bp->cnic_data,
3467 						      bnapi->status_blk.msi);
3468 	rcu_read_unlock();
3469 }
3470 #endif
3471 
3472 static void bnx2_poll_link(struct bnx2 *bp, struct bnx2_napi *bnapi)
3473 {
3474 	struct status_block *sblk = bnapi->status_blk.msi;
3475 	u32 status_attn_bits = sblk->status_attn_bits;
3476 	u32 status_attn_bits_ack = sblk->status_attn_bits_ack;
3477 
3478 	if ((status_attn_bits & STATUS_ATTN_EVENTS) !=
3479 	    (status_attn_bits_ack & STATUS_ATTN_EVENTS)) {
3480 
3481 		bnx2_phy_int(bp, bnapi);
3482 
3483 		/* This is needed to take care of transient status
3484 		 * during link changes.
3485 		 */
3486 		BNX2_WR(bp, BNX2_HC_COMMAND,
3487 			bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
3488 		BNX2_RD(bp, BNX2_HC_COMMAND);
3489 	}
3490 }
3491 
3492 static int bnx2_poll_work(struct bnx2 *bp, struct bnx2_napi *bnapi,
3493 			  int work_done, int budget)
3494 {
3495 	struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
3496 	struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3497 
3498 	if (bnx2_get_hw_tx_cons(bnapi) != txr->hw_tx_cons)
3499 		bnx2_tx_int(bp, bnapi, 0);
3500 
3501 	if (bnx2_get_hw_rx_cons(bnapi) != rxr->rx_cons)
3502 		work_done += bnx2_rx_int(bp, bnapi, budget - work_done);
3503 
3504 	return work_done;
3505 }
3506 
3507 static int bnx2_poll_msix(struct napi_struct *napi, int budget)
3508 {
3509 	struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
3510 	struct bnx2 *bp = bnapi->bp;
3511 	int work_done = 0;
3512 	struct status_block_msix *sblk = bnapi->status_blk.msix;
3513 
3514 	while (1) {
3515 		work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
3516 		if (unlikely(work_done >= budget))
3517 			break;
3518 
3519 		bnapi->last_status_idx = sblk->status_idx;
3520 		/* status idx must be read before checking for more work. */
3521 		rmb();
3522 		if (likely(!bnx2_has_fast_work(bnapi))) {
3523 
3524 			napi_complete(napi);
3525 			BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
3526 				BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3527 				bnapi->last_status_idx);
3528 			break;
3529 		}
3530 	}
3531 	return work_done;
3532 }
3533 
3534 static int bnx2_poll(struct napi_struct *napi, int budget)
3535 {
3536 	struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
3537 	struct bnx2 *bp = bnapi->bp;
3538 	int work_done = 0;
3539 	struct status_block *sblk = bnapi->status_blk.msi;
3540 
3541 	while (1) {
3542 		bnx2_poll_link(bp, bnapi);
3543 
3544 		work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
3545 
3546 #ifdef BCM_CNIC
3547 		bnx2_poll_cnic(bp, bnapi);
3548 #endif
3549 
3550 		/* bnapi->last_status_idx is used below to tell the hw how
3551 		 * much work has been processed, so we must read it before
3552 		 * checking for more work.
3553 		 */
3554 		bnapi->last_status_idx = sblk->status_idx;
3555 
3556 		if (unlikely(work_done >= budget))
3557 			break;
3558 
3559 		rmb();
3560 		if (likely(!bnx2_has_work(bnapi))) {
3561 			napi_complete(napi);
3562 			if (likely(bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)) {
3563 				BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3564 					BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3565 					bnapi->last_status_idx);
3566 				break;
3567 			}
3568 			BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3569 				BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3570 				BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
3571 				bnapi->last_status_idx);
3572 
3573 			BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3574 				BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3575 				bnapi->last_status_idx);
3576 			break;
3577 		}
3578 	}
3579 
3580 	return work_done;
3581 }
3582 
3583 /* Called with rtnl_lock from vlan functions and also netif_tx_lock
3584  * from set_multicast.
3585  */
3586 static void
3587 bnx2_set_rx_mode(struct net_device *dev)
3588 {
3589 	struct bnx2 *bp = netdev_priv(dev);
3590 	u32 rx_mode, sort_mode;
3591 	struct netdev_hw_addr *ha;
3592 	int i;
3593 
3594 	if (!netif_running(dev))
3595 		return;
3596 
3597 	spin_lock_bh(&bp->phy_lock);
3598 
3599 	rx_mode = bp->rx_mode & ~(BNX2_EMAC_RX_MODE_PROMISCUOUS |
3600 				  BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG);
3601 	sort_mode = 1 | BNX2_RPM_SORT_USER0_BC_EN;
3602 	if (!(dev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
3603 	     (bp->flags & BNX2_FLAG_CAN_KEEP_VLAN))
3604 		rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
3605 	if (dev->flags & IFF_PROMISC) {
3606 		/* Promiscuous mode. */
3607 		rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
3608 		sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
3609 			     BNX2_RPM_SORT_USER0_PROM_VLAN;
3610 	}
3611 	else if (dev->flags & IFF_ALLMULTI) {
3612 		for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3613 			BNX2_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3614 				0xffffffff);
3615         	}
3616 		sort_mode |= BNX2_RPM_SORT_USER0_MC_EN;
3617 	}
3618 	else {
3619 		/* Accept one or more multicast(s). */
3620 		u32 mc_filter[NUM_MC_HASH_REGISTERS];
3621 		u32 regidx;
3622 		u32 bit;
3623 		u32 crc;
3624 
3625 		memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS);
3626 
3627 		netdev_for_each_mc_addr(ha, dev) {
3628 			crc = ether_crc_le(ETH_ALEN, ha->addr);
3629 			bit = crc & 0xff;
3630 			regidx = (bit & 0xe0) >> 5;
3631 			bit &= 0x1f;
3632 			mc_filter[regidx] |= (1 << bit);
3633 		}
3634 
3635 		for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3636 			BNX2_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3637 				mc_filter[i]);
3638 		}
3639 
3640 		sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN;
3641 	}
3642 
3643 	if (netdev_uc_count(dev) > BNX2_MAX_UNICAST_ADDRESSES) {
3644 		rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
3645 		sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
3646 			     BNX2_RPM_SORT_USER0_PROM_VLAN;
3647 	} else if (!(dev->flags & IFF_PROMISC)) {
3648 		/* Add all entries into to the match filter list */
3649 		i = 0;
3650 		netdev_for_each_uc_addr(ha, dev) {
3651 			bnx2_set_mac_addr(bp, ha->addr,
3652 					  i + BNX2_START_UNICAST_ADDRESS_INDEX);
3653 			sort_mode |= (1 <<
3654 				      (i + BNX2_START_UNICAST_ADDRESS_INDEX));
3655 			i++;
3656 		}
3657 
3658 	}
3659 
3660 	if (rx_mode != bp->rx_mode) {
3661 		bp->rx_mode = rx_mode;
3662 		BNX2_WR(bp, BNX2_EMAC_RX_MODE, rx_mode);
3663 	}
3664 
3665 	BNX2_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3666 	BNX2_WR(bp, BNX2_RPM_SORT_USER0, sort_mode);
3667 	BNX2_WR(bp, BNX2_RPM_SORT_USER0, sort_mode | BNX2_RPM_SORT_USER0_ENA);
3668 
3669 	spin_unlock_bh(&bp->phy_lock);
3670 }
3671 
3672 static int
3673 check_fw_section(const struct firmware *fw,
3674 		 const struct bnx2_fw_file_section *section,
3675 		 u32 alignment, bool non_empty)
3676 {
3677 	u32 offset = be32_to_cpu(section->offset);
3678 	u32 len = be32_to_cpu(section->len);
3679 
3680 	if ((offset == 0 && len != 0) || offset >= fw->size || offset & 3)
3681 		return -EINVAL;
3682 	if ((non_empty && len == 0) || len > fw->size - offset ||
3683 	    len & (alignment - 1))
3684 		return -EINVAL;
3685 	return 0;
3686 }
3687 
3688 static int
3689 check_mips_fw_entry(const struct firmware *fw,
3690 		    const struct bnx2_mips_fw_file_entry *entry)
3691 {
3692 	if (check_fw_section(fw, &entry->text, 4, true) ||
3693 	    check_fw_section(fw, &entry->data, 4, false) ||
3694 	    check_fw_section(fw, &entry->rodata, 4, false))
3695 		return -EINVAL;
3696 	return 0;
3697 }
3698 
3699 static void bnx2_release_firmware(struct bnx2 *bp)
3700 {
3701 	if (bp->rv2p_firmware) {
3702 		release_firmware(bp->mips_firmware);
3703 		release_firmware(bp->rv2p_firmware);
3704 		bp->rv2p_firmware = NULL;
3705 	}
3706 }
3707 
3708 static int bnx2_request_uncached_firmware(struct bnx2 *bp)
3709 {
3710 	const char *mips_fw_file, *rv2p_fw_file;
3711 	const struct bnx2_mips_fw_file *mips_fw;
3712 	const struct bnx2_rv2p_fw_file *rv2p_fw;
3713 	int rc;
3714 
3715 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
3716 		mips_fw_file = FW_MIPS_FILE_09;
3717 		if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5709_A0) ||
3718 		    (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5709_A1))
3719 			rv2p_fw_file = FW_RV2P_FILE_09_Ax;
3720 		else
3721 			rv2p_fw_file = FW_RV2P_FILE_09;
3722 	} else {
3723 		mips_fw_file = FW_MIPS_FILE_06;
3724 		rv2p_fw_file = FW_RV2P_FILE_06;
3725 	}
3726 
3727 	rc = request_firmware(&bp->mips_firmware, mips_fw_file, &bp->pdev->dev);
3728 	if (rc) {
3729 		pr_err("Can't load firmware file \"%s\"\n", mips_fw_file);
3730 		goto out;
3731 	}
3732 
3733 	rc = request_firmware(&bp->rv2p_firmware, rv2p_fw_file, &bp->pdev->dev);
3734 	if (rc) {
3735 		pr_err("Can't load firmware file \"%s\"\n", rv2p_fw_file);
3736 		goto err_release_mips_firmware;
3737 	}
3738 	mips_fw = (const struct bnx2_mips_fw_file *) bp->mips_firmware->data;
3739 	rv2p_fw = (const struct bnx2_rv2p_fw_file *) bp->rv2p_firmware->data;
3740 	if (bp->mips_firmware->size < sizeof(*mips_fw) ||
3741 	    check_mips_fw_entry(bp->mips_firmware, &mips_fw->com) ||
3742 	    check_mips_fw_entry(bp->mips_firmware, &mips_fw->cp) ||
3743 	    check_mips_fw_entry(bp->mips_firmware, &mips_fw->rxp) ||
3744 	    check_mips_fw_entry(bp->mips_firmware, &mips_fw->tpat) ||
3745 	    check_mips_fw_entry(bp->mips_firmware, &mips_fw->txp)) {
3746 		pr_err("Firmware file \"%s\" is invalid\n", mips_fw_file);
3747 		rc = -EINVAL;
3748 		goto err_release_firmware;
3749 	}
3750 	if (bp->rv2p_firmware->size < sizeof(*rv2p_fw) ||
3751 	    check_fw_section(bp->rv2p_firmware, &rv2p_fw->proc1.rv2p, 8, true) ||
3752 	    check_fw_section(bp->rv2p_firmware, &rv2p_fw->proc2.rv2p, 8, true)) {
3753 		pr_err("Firmware file \"%s\" is invalid\n", rv2p_fw_file);
3754 		rc = -EINVAL;
3755 		goto err_release_firmware;
3756 	}
3757 out:
3758 	return rc;
3759 
3760 err_release_firmware:
3761 	release_firmware(bp->rv2p_firmware);
3762 	bp->rv2p_firmware = NULL;
3763 err_release_mips_firmware:
3764 	release_firmware(bp->mips_firmware);
3765 	goto out;
3766 }
3767 
3768 static int bnx2_request_firmware(struct bnx2 *bp)
3769 {
3770 	return bp->rv2p_firmware ? 0 : bnx2_request_uncached_firmware(bp);
3771 }
3772 
3773 static u32
3774 rv2p_fw_fixup(u32 rv2p_proc, int idx, u32 loc, u32 rv2p_code)
3775 {
3776 	switch (idx) {
3777 	case RV2P_P1_FIXUP_PAGE_SIZE_IDX:
3778 		rv2p_code &= ~RV2P_BD_PAGE_SIZE_MSK;
3779 		rv2p_code |= RV2P_BD_PAGE_SIZE;
3780 		break;
3781 	}
3782 	return rv2p_code;
3783 }
3784 
3785 static int
3786 load_rv2p_fw(struct bnx2 *bp, u32 rv2p_proc,
3787 	     const struct bnx2_rv2p_fw_file_entry *fw_entry)
3788 {
3789 	u32 rv2p_code_len, file_offset;
3790 	__be32 *rv2p_code;
3791 	int i;
3792 	u32 val, cmd, addr;
3793 
3794 	rv2p_code_len = be32_to_cpu(fw_entry->rv2p.len);
3795 	file_offset = be32_to_cpu(fw_entry->rv2p.offset);
3796 
3797 	rv2p_code = (__be32 *)(bp->rv2p_firmware->data + file_offset);
3798 
3799 	if (rv2p_proc == RV2P_PROC1) {
3800 		cmd = BNX2_RV2P_PROC1_ADDR_CMD_RDWR;
3801 		addr = BNX2_RV2P_PROC1_ADDR_CMD;
3802 	} else {
3803 		cmd = BNX2_RV2P_PROC2_ADDR_CMD_RDWR;
3804 		addr = BNX2_RV2P_PROC2_ADDR_CMD;
3805 	}
3806 
3807 	for (i = 0; i < rv2p_code_len; i += 8) {
3808 		BNX2_WR(bp, BNX2_RV2P_INSTR_HIGH, be32_to_cpu(*rv2p_code));
3809 		rv2p_code++;
3810 		BNX2_WR(bp, BNX2_RV2P_INSTR_LOW, be32_to_cpu(*rv2p_code));
3811 		rv2p_code++;
3812 
3813 		val = (i / 8) | cmd;
3814 		BNX2_WR(bp, addr, val);
3815 	}
3816 
3817 	rv2p_code = (__be32 *)(bp->rv2p_firmware->data + file_offset);
3818 	for (i = 0; i < 8; i++) {
3819 		u32 loc, code;
3820 
3821 		loc = be32_to_cpu(fw_entry->fixup[i]);
3822 		if (loc && ((loc * 4) < rv2p_code_len)) {
3823 			code = be32_to_cpu(*(rv2p_code + loc - 1));
3824 			BNX2_WR(bp, BNX2_RV2P_INSTR_HIGH, code);
3825 			code = be32_to_cpu(*(rv2p_code + loc));
3826 			code = rv2p_fw_fixup(rv2p_proc, i, loc, code);
3827 			BNX2_WR(bp, BNX2_RV2P_INSTR_LOW, code);
3828 
3829 			val = (loc / 2) | cmd;
3830 			BNX2_WR(bp, addr, val);
3831 		}
3832 	}
3833 
3834 	/* Reset the processor, un-stall is done later. */
3835 	if (rv2p_proc == RV2P_PROC1) {
3836 		BNX2_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC1_RESET);
3837 	}
3838 	else {
3839 		BNX2_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC2_RESET);
3840 	}
3841 
3842 	return 0;
3843 }
3844 
3845 static int
3846 load_cpu_fw(struct bnx2 *bp, const struct cpu_reg *cpu_reg,
3847 	    const struct bnx2_mips_fw_file_entry *fw_entry)
3848 {
3849 	u32 addr, len, file_offset;
3850 	__be32 *data;
3851 	u32 offset;
3852 	u32 val;
3853 
3854 	/* Halt the CPU. */
3855 	val = bnx2_reg_rd_ind(bp, cpu_reg->mode);
3856 	val |= cpu_reg->mode_value_halt;
3857 	bnx2_reg_wr_ind(bp, cpu_reg->mode, val);
3858 	bnx2_reg_wr_ind(bp, cpu_reg->state, cpu_reg->state_value_clear);
3859 
3860 	/* Load the Text area. */
3861 	addr = be32_to_cpu(fw_entry->text.addr);
3862 	len = be32_to_cpu(fw_entry->text.len);
3863 	file_offset = be32_to_cpu(fw_entry->text.offset);
3864 	data = (__be32 *)(bp->mips_firmware->data + file_offset);
3865 
3866 	offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
3867 	if (len) {
3868 		int j;
3869 
3870 		for (j = 0; j < (len / 4); j++, offset += 4)
3871 			bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
3872 	}
3873 
3874 	/* Load the Data area. */
3875 	addr = be32_to_cpu(fw_entry->data.addr);
3876 	len = be32_to_cpu(fw_entry->data.len);
3877 	file_offset = be32_to_cpu(fw_entry->data.offset);
3878 	data = (__be32 *)(bp->mips_firmware->data + file_offset);
3879 
3880 	offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
3881 	if (len) {
3882 		int j;
3883 
3884 		for (j = 0; j < (len / 4); j++, offset += 4)
3885 			bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
3886 	}
3887 
3888 	/* Load the Read-Only area. */
3889 	addr = be32_to_cpu(fw_entry->rodata.addr);
3890 	len = be32_to_cpu(fw_entry->rodata.len);
3891 	file_offset = be32_to_cpu(fw_entry->rodata.offset);
3892 	data = (__be32 *)(bp->mips_firmware->data + file_offset);
3893 
3894 	offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
3895 	if (len) {
3896 		int j;
3897 
3898 		for (j = 0; j < (len / 4); j++, offset += 4)
3899 			bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
3900 	}
3901 
3902 	/* Clear the pre-fetch instruction. */
3903 	bnx2_reg_wr_ind(bp, cpu_reg->inst, 0);
3904 
3905 	val = be32_to_cpu(fw_entry->start_addr);
3906 	bnx2_reg_wr_ind(bp, cpu_reg->pc, val);
3907 
3908 	/* Start the CPU. */
3909 	val = bnx2_reg_rd_ind(bp, cpu_reg->mode);
3910 	val &= ~cpu_reg->mode_value_halt;
3911 	bnx2_reg_wr_ind(bp, cpu_reg->state, cpu_reg->state_value_clear);
3912 	bnx2_reg_wr_ind(bp, cpu_reg->mode, val);
3913 
3914 	return 0;
3915 }
3916 
3917 static int
3918 bnx2_init_cpus(struct bnx2 *bp)
3919 {
3920 	const struct bnx2_mips_fw_file *mips_fw =
3921 		(const struct bnx2_mips_fw_file *) bp->mips_firmware->data;
3922 	const struct bnx2_rv2p_fw_file *rv2p_fw =
3923 		(const struct bnx2_rv2p_fw_file *) bp->rv2p_firmware->data;
3924 	int rc;
3925 
3926 	/* Initialize the RV2P processor. */
3927 	load_rv2p_fw(bp, RV2P_PROC1, &rv2p_fw->proc1);
3928 	load_rv2p_fw(bp, RV2P_PROC2, &rv2p_fw->proc2);
3929 
3930 	/* Initialize the RX Processor. */
3931 	rc = load_cpu_fw(bp, &cpu_reg_rxp, &mips_fw->rxp);
3932 	if (rc)
3933 		goto init_cpu_err;
3934 
3935 	/* Initialize the TX Processor. */
3936 	rc = load_cpu_fw(bp, &cpu_reg_txp, &mips_fw->txp);
3937 	if (rc)
3938 		goto init_cpu_err;
3939 
3940 	/* Initialize the TX Patch-up Processor. */
3941 	rc = load_cpu_fw(bp, &cpu_reg_tpat, &mips_fw->tpat);
3942 	if (rc)
3943 		goto init_cpu_err;
3944 
3945 	/* Initialize the Completion Processor. */
3946 	rc = load_cpu_fw(bp, &cpu_reg_com, &mips_fw->com);
3947 	if (rc)
3948 		goto init_cpu_err;
3949 
3950 	/* Initialize the Command Processor. */
3951 	rc = load_cpu_fw(bp, &cpu_reg_cp, &mips_fw->cp);
3952 
3953 init_cpu_err:
3954 	return rc;
3955 }
3956 
3957 static void
3958 bnx2_setup_wol(struct bnx2 *bp)
3959 {
3960 	int i;
3961 	u32 val, wol_msg;
3962 
3963 	if (bp->wol) {
3964 		u32 advertising;
3965 		u8 autoneg;
3966 
3967 		autoneg = bp->autoneg;
3968 		advertising = bp->advertising;
3969 
3970 		if (bp->phy_port == PORT_TP) {
3971 			bp->autoneg = AUTONEG_SPEED;
3972 			bp->advertising = ADVERTISED_10baseT_Half |
3973 				ADVERTISED_10baseT_Full |
3974 				ADVERTISED_100baseT_Half |
3975 				ADVERTISED_100baseT_Full |
3976 				ADVERTISED_Autoneg;
3977 		}
3978 
3979 		spin_lock_bh(&bp->phy_lock);
3980 		bnx2_setup_phy(bp, bp->phy_port);
3981 		spin_unlock_bh(&bp->phy_lock);
3982 
3983 		bp->autoneg = autoneg;
3984 		bp->advertising = advertising;
3985 
3986 		bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
3987 
3988 		val = BNX2_RD(bp, BNX2_EMAC_MODE);
3989 
3990 		/* Enable port mode. */
3991 		val &= ~BNX2_EMAC_MODE_PORT;
3992 		val |= BNX2_EMAC_MODE_MPKT_RCVD |
3993 		       BNX2_EMAC_MODE_ACPI_RCVD |
3994 		       BNX2_EMAC_MODE_MPKT;
3995 		if (bp->phy_port == PORT_TP) {
3996 			val |= BNX2_EMAC_MODE_PORT_MII;
3997 		} else {
3998 			val |= BNX2_EMAC_MODE_PORT_GMII;
3999 			if (bp->line_speed == SPEED_2500)
4000 				val |= BNX2_EMAC_MODE_25G_MODE;
4001 		}
4002 
4003 		BNX2_WR(bp, BNX2_EMAC_MODE, val);
4004 
4005 		/* receive all multicast */
4006 		for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
4007 			BNX2_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
4008 				0xffffffff);
4009 		}
4010 		BNX2_WR(bp, BNX2_EMAC_RX_MODE, BNX2_EMAC_RX_MODE_SORT_MODE);
4011 
4012 		val = 1 | BNX2_RPM_SORT_USER0_BC_EN | BNX2_RPM_SORT_USER0_MC_EN;
4013 		BNX2_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
4014 		BNX2_WR(bp, BNX2_RPM_SORT_USER0, val);
4015 		BNX2_WR(bp, BNX2_RPM_SORT_USER0, val | BNX2_RPM_SORT_USER0_ENA);
4016 
4017 		/* Need to enable EMAC and RPM for WOL. */
4018 		BNX2_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
4019 			BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE |
4020 			BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE |
4021 			BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE);
4022 
4023 		val = BNX2_RD(bp, BNX2_RPM_CONFIG);
4024 		val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
4025 		BNX2_WR(bp, BNX2_RPM_CONFIG, val);
4026 
4027 		wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
4028 	} else {
4029 			wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
4030 	}
4031 
4032 	if (!(bp->flags & BNX2_FLAG_NO_WOL)) {
4033 		u32 val;
4034 
4035 		wol_msg |= BNX2_DRV_MSG_DATA_WAIT3;
4036 		if (bp->fw_last_msg || BNX2_CHIP(bp) != BNX2_CHIP_5709) {
4037 			bnx2_fw_sync(bp, wol_msg, 1, 0);
4038 			return;
4039 		}
4040 		/* Tell firmware not to power down the PHY yet, otherwise
4041 		 * the chip will take a long time to respond to MMIO reads.
4042 		 */
4043 		val = bnx2_shmem_rd(bp, BNX2_PORT_FEATURE);
4044 		bnx2_shmem_wr(bp, BNX2_PORT_FEATURE,
4045 			      val | BNX2_PORT_FEATURE_ASF_ENABLED);
4046 		bnx2_fw_sync(bp, wol_msg, 1, 0);
4047 		bnx2_shmem_wr(bp, BNX2_PORT_FEATURE, val);
4048 	}
4049 
4050 }
4051 
4052 static int
4053 bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
4054 {
4055 	switch (state) {
4056 	case PCI_D0: {
4057 		u32 val;
4058 
4059 		pci_enable_wake(bp->pdev, PCI_D0, false);
4060 		pci_set_power_state(bp->pdev, PCI_D0);
4061 
4062 		val = BNX2_RD(bp, BNX2_EMAC_MODE);
4063 		val |= BNX2_EMAC_MODE_MPKT_RCVD | BNX2_EMAC_MODE_ACPI_RCVD;
4064 		val &= ~BNX2_EMAC_MODE_MPKT;
4065 		BNX2_WR(bp, BNX2_EMAC_MODE, val);
4066 
4067 		val = BNX2_RD(bp, BNX2_RPM_CONFIG);
4068 		val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
4069 		BNX2_WR(bp, BNX2_RPM_CONFIG, val);
4070 		break;
4071 	}
4072 	case PCI_D3hot: {
4073 		bnx2_setup_wol(bp);
4074 		pci_wake_from_d3(bp->pdev, bp->wol);
4075 		if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) ||
4076 		    (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A1)) {
4077 
4078 			if (bp->wol)
4079 				pci_set_power_state(bp->pdev, PCI_D3hot);
4080 			break;
4081 
4082 		}
4083 		if (!bp->fw_last_msg && BNX2_CHIP(bp) == BNX2_CHIP_5709) {
4084 			u32 val;
4085 
4086 			/* Tell firmware not to power down the PHY yet,
4087 			 * otherwise the other port may not respond to
4088 			 * MMIO reads.
4089 			 */
4090 			val = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION);
4091 			val &= ~BNX2_CONDITION_PM_STATE_MASK;
4092 			val |= BNX2_CONDITION_PM_STATE_UNPREP;
4093 			bnx2_shmem_wr(bp, BNX2_BC_STATE_CONDITION, val);
4094 		}
4095 		pci_set_power_state(bp->pdev, PCI_D3hot);
4096 
4097 		/* No more memory access after this point until
4098 		 * device is brought back to D0.
4099 		 */
4100 		break;
4101 	}
4102 	default:
4103 		return -EINVAL;
4104 	}
4105 	return 0;
4106 }
4107 
4108 static int
4109 bnx2_acquire_nvram_lock(struct bnx2 *bp)
4110 {
4111 	u32 val;
4112 	int j;
4113 
4114 	/* Request access to the flash interface. */
4115 	BNX2_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_SET2);
4116 	for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4117 		val = BNX2_RD(bp, BNX2_NVM_SW_ARB);
4118 		if (val & BNX2_NVM_SW_ARB_ARB_ARB2)
4119 			break;
4120 
4121 		udelay(5);
4122 	}
4123 
4124 	if (j >= NVRAM_TIMEOUT_COUNT)
4125 		return -EBUSY;
4126 
4127 	return 0;
4128 }
4129 
4130 static int
4131 bnx2_release_nvram_lock(struct bnx2 *bp)
4132 {
4133 	int j;
4134 	u32 val;
4135 
4136 	/* Relinquish nvram interface. */
4137 	BNX2_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_CLR2);
4138 
4139 	for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4140 		val = BNX2_RD(bp, BNX2_NVM_SW_ARB);
4141 		if (!(val & BNX2_NVM_SW_ARB_ARB_ARB2))
4142 			break;
4143 
4144 		udelay(5);
4145 	}
4146 
4147 	if (j >= NVRAM_TIMEOUT_COUNT)
4148 		return -EBUSY;
4149 
4150 	return 0;
4151 }
4152 
4153 
4154 static int
4155 bnx2_enable_nvram_write(struct bnx2 *bp)
4156 {
4157 	u32 val;
4158 
4159 	val = BNX2_RD(bp, BNX2_MISC_CFG);
4160 	BNX2_WR(bp, BNX2_MISC_CFG, val | BNX2_MISC_CFG_NVM_WR_EN_PCI);
4161 
4162 	if (bp->flash_info->flags & BNX2_NV_WREN) {
4163 		int j;
4164 
4165 		BNX2_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4166 		BNX2_WR(bp, BNX2_NVM_COMMAND,
4167 			BNX2_NVM_COMMAND_WREN | BNX2_NVM_COMMAND_DOIT);
4168 
4169 		for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4170 			udelay(5);
4171 
4172 			val = BNX2_RD(bp, BNX2_NVM_COMMAND);
4173 			if (val & BNX2_NVM_COMMAND_DONE)
4174 				break;
4175 		}
4176 
4177 		if (j >= NVRAM_TIMEOUT_COUNT)
4178 			return -EBUSY;
4179 	}
4180 	return 0;
4181 }
4182 
4183 static void
4184 bnx2_disable_nvram_write(struct bnx2 *bp)
4185 {
4186 	u32 val;
4187 
4188 	val = BNX2_RD(bp, BNX2_MISC_CFG);
4189 	BNX2_WR(bp, BNX2_MISC_CFG, val & ~BNX2_MISC_CFG_NVM_WR_EN);
4190 }
4191 
4192 
4193 static void
4194 bnx2_enable_nvram_access(struct bnx2 *bp)
4195 {
4196 	u32 val;
4197 
4198 	val = BNX2_RD(bp, BNX2_NVM_ACCESS_ENABLE);
4199 	/* Enable both bits, even on read. */
4200 	BNX2_WR(bp, BNX2_NVM_ACCESS_ENABLE,
4201 		val | BNX2_NVM_ACCESS_ENABLE_EN | BNX2_NVM_ACCESS_ENABLE_WR_EN);
4202 }
4203 
4204 static void
4205 bnx2_disable_nvram_access(struct bnx2 *bp)
4206 {
4207 	u32 val;
4208 
4209 	val = BNX2_RD(bp, BNX2_NVM_ACCESS_ENABLE);
4210 	/* Disable both bits, even after read. */
4211 	BNX2_WR(bp, BNX2_NVM_ACCESS_ENABLE,
4212 		val & ~(BNX2_NVM_ACCESS_ENABLE_EN |
4213 			BNX2_NVM_ACCESS_ENABLE_WR_EN));
4214 }
4215 
4216 static int
4217 bnx2_nvram_erase_page(struct bnx2 *bp, u32 offset)
4218 {
4219 	u32 cmd;
4220 	int j;
4221 
4222 	if (bp->flash_info->flags & BNX2_NV_BUFFERED)
4223 		/* Buffered flash, no erase needed */
4224 		return 0;
4225 
4226 	/* Build an erase command */
4227 	cmd = BNX2_NVM_COMMAND_ERASE | BNX2_NVM_COMMAND_WR |
4228 	      BNX2_NVM_COMMAND_DOIT;
4229 
4230 	/* Need to clear DONE bit separately. */
4231 	BNX2_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4232 
4233 	/* Address of the NVRAM to read from. */
4234 	BNX2_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
4235 
4236 	/* Issue an erase command. */
4237 	BNX2_WR(bp, BNX2_NVM_COMMAND, cmd);
4238 
4239 	/* Wait for completion. */
4240 	for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4241 		u32 val;
4242 
4243 		udelay(5);
4244 
4245 		val = BNX2_RD(bp, BNX2_NVM_COMMAND);
4246 		if (val & BNX2_NVM_COMMAND_DONE)
4247 			break;
4248 	}
4249 
4250 	if (j >= NVRAM_TIMEOUT_COUNT)
4251 		return -EBUSY;
4252 
4253 	return 0;
4254 }
4255 
4256 static int
4257 bnx2_nvram_read_dword(struct bnx2 *bp, u32 offset, u8 *ret_val, u32 cmd_flags)
4258 {
4259 	u32 cmd;
4260 	int j;
4261 
4262 	/* Build the command word. */
4263 	cmd = BNX2_NVM_COMMAND_DOIT | cmd_flags;
4264 
4265 	/* Calculate an offset of a buffered flash, not needed for 5709. */
4266 	if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
4267 		offset = ((offset / bp->flash_info->page_size) <<
4268 			   bp->flash_info->page_bits) +
4269 			  (offset % bp->flash_info->page_size);
4270 	}
4271 
4272 	/* Need to clear DONE bit separately. */
4273 	BNX2_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4274 
4275 	/* Address of the NVRAM to read from. */
4276 	BNX2_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
4277 
4278 	/* Issue a read command. */
4279 	BNX2_WR(bp, BNX2_NVM_COMMAND, cmd);
4280 
4281 	/* Wait for completion. */
4282 	for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4283 		u32 val;
4284 
4285 		udelay(5);
4286 
4287 		val = BNX2_RD(bp, BNX2_NVM_COMMAND);
4288 		if (val & BNX2_NVM_COMMAND_DONE) {
4289 			__be32 v = cpu_to_be32(BNX2_RD(bp, BNX2_NVM_READ));
4290 			memcpy(ret_val, &v, 4);
4291 			break;
4292 		}
4293 	}
4294 	if (j >= NVRAM_TIMEOUT_COUNT)
4295 		return -EBUSY;
4296 
4297 	return 0;
4298 }
4299 
4300 
4301 static int
4302 bnx2_nvram_write_dword(struct bnx2 *bp, u32 offset, u8 *val, u32 cmd_flags)
4303 {
4304 	u32 cmd;
4305 	__be32 val32;
4306 	int j;
4307 
4308 	/* Build the command word. */
4309 	cmd = BNX2_NVM_COMMAND_DOIT | BNX2_NVM_COMMAND_WR | cmd_flags;
4310 
4311 	/* Calculate an offset of a buffered flash, not needed for 5709. */
4312 	if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
4313 		offset = ((offset / bp->flash_info->page_size) <<
4314 			  bp->flash_info->page_bits) +
4315 			 (offset % bp->flash_info->page_size);
4316 	}
4317 
4318 	/* Need to clear DONE bit separately. */
4319 	BNX2_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4320 
4321 	memcpy(&val32, val, 4);
4322 
4323 	/* Write the data. */
4324 	BNX2_WR(bp, BNX2_NVM_WRITE, be32_to_cpu(val32));
4325 
4326 	/* Address of the NVRAM to write to. */
4327 	BNX2_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
4328 
4329 	/* Issue the write command. */
4330 	BNX2_WR(bp, BNX2_NVM_COMMAND, cmd);
4331 
4332 	/* Wait for completion. */
4333 	for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4334 		udelay(5);
4335 
4336 		if (BNX2_RD(bp, BNX2_NVM_COMMAND) & BNX2_NVM_COMMAND_DONE)
4337 			break;
4338 	}
4339 	if (j >= NVRAM_TIMEOUT_COUNT)
4340 		return -EBUSY;
4341 
4342 	return 0;
4343 }
4344 
4345 static int
4346 bnx2_init_nvram(struct bnx2 *bp)
4347 {
4348 	u32 val;
4349 	int j, entry_count, rc = 0;
4350 	const struct flash_spec *flash;
4351 
4352 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
4353 		bp->flash_info = &flash_5709;
4354 		goto get_flash_size;
4355 	}
4356 
4357 	/* Determine the selected interface. */
4358 	val = BNX2_RD(bp, BNX2_NVM_CFG1);
4359 
4360 	entry_count = ARRAY_SIZE(flash_table);
4361 
4362 	if (val & 0x40000000) {
4363 
4364 		/* Flash interface has been reconfigured */
4365 		for (j = 0, flash = &flash_table[0]; j < entry_count;
4366 		     j++, flash++) {
4367 			if ((val & FLASH_BACKUP_STRAP_MASK) ==
4368 			    (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
4369 				bp->flash_info = flash;
4370 				break;
4371 			}
4372 		}
4373 	}
4374 	else {
4375 		u32 mask;
4376 		/* Not yet been reconfigured */
4377 
4378 		if (val & (1 << 23))
4379 			mask = FLASH_BACKUP_STRAP_MASK;
4380 		else
4381 			mask = FLASH_STRAP_MASK;
4382 
4383 		for (j = 0, flash = &flash_table[0]; j < entry_count;
4384 			j++, flash++) {
4385 
4386 			if ((val & mask) == (flash->strapping & mask)) {
4387 				bp->flash_info = flash;
4388 
4389 				/* Request access to the flash interface. */
4390 				if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4391 					return rc;
4392 
4393 				/* Enable access to flash interface */
4394 				bnx2_enable_nvram_access(bp);
4395 
4396 				/* Reconfigure the flash interface */
4397 				BNX2_WR(bp, BNX2_NVM_CFG1, flash->config1);
4398 				BNX2_WR(bp, BNX2_NVM_CFG2, flash->config2);
4399 				BNX2_WR(bp, BNX2_NVM_CFG3, flash->config3);
4400 				BNX2_WR(bp, BNX2_NVM_WRITE1, flash->write1);
4401 
4402 				/* Disable access to flash interface */
4403 				bnx2_disable_nvram_access(bp);
4404 				bnx2_release_nvram_lock(bp);
4405 
4406 				break;
4407 			}
4408 		}
4409 	} /* if (val & 0x40000000) */
4410 
4411 	if (j == entry_count) {
4412 		bp->flash_info = NULL;
4413 		pr_alert("Unknown flash/EEPROM type\n");
4414 		return -ENODEV;
4415 	}
4416 
4417 get_flash_size:
4418 	val = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG2);
4419 	val &= BNX2_SHARED_HW_CFG2_NVM_SIZE_MASK;
4420 	if (val)
4421 		bp->flash_size = val;
4422 	else
4423 		bp->flash_size = bp->flash_info->total_size;
4424 
4425 	return rc;
4426 }
4427 
4428 static int
4429 bnx2_nvram_read(struct bnx2 *bp, u32 offset, u8 *ret_buf,
4430 		int buf_size)
4431 {
4432 	int rc = 0;
4433 	u32 cmd_flags, offset32, len32, extra;
4434 
4435 	if (buf_size == 0)
4436 		return 0;
4437 
4438 	/* Request access to the flash interface. */
4439 	if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4440 		return rc;
4441 
4442 	/* Enable access to flash interface */
4443 	bnx2_enable_nvram_access(bp);
4444 
4445 	len32 = buf_size;
4446 	offset32 = offset;
4447 	extra = 0;
4448 
4449 	cmd_flags = 0;
4450 
4451 	if (offset32 & 3) {
4452 		u8 buf[4];
4453 		u32 pre_len;
4454 
4455 		offset32 &= ~3;
4456 		pre_len = 4 - (offset & 3);
4457 
4458 		if (pre_len >= len32) {
4459 			pre_len = len32;
4460 			cmd_flags = BNX2_NVM_COMMAND_FIRST |
4461 				    BNX2_NVM_COMMAND_LAST;
4462 		}
4463 		else {
4464 			cmd_flags = BNX2_NVM_COMMAND_FIRST;
4465 		}
4466 
4467 		rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4468 
4469 		if (rc)
4470 			return rc;
4471 
4472 		memcpy(ret_buf, buf + (offset & 3), pre_len);
4473 
4474 		offset32 += 4;
4475 		ret_buf += pre_len;
4476 		len32 -= pre_len;
4477 	}
4478 	if (len32 & 3) {
4479 		extra = 4 - (len32 & 3);
4480 		len32 = (len32 + 4) & ~3;
4481 	}
4482 
4483 	if (len32 == 4) {
4484 		u8 buf[4];
4485 
4486 		if (cmd_flags)
4487 			cmd_flags = BNX2_NVM_COMMAND_LAST;
4488 		else
4489 			cmd_flags = BNX2_NVM_COMMAND_FIRST |
4490 				    BNX2_NVM_COMMAND_LAST;
4491 
4492 		rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4493 
4494 		memcpy(ret_buf, buf, 4 - extra);
4495 	}
4496 	else if (len32 > 0) {
4497 		u8 buf[4];
4498 
4499 		/* Read the first word. */
4500 		if (cmd_flags)
4501 			cmd_flags = 0;
4502 		else
4503 			cmd_flags = BNX2_NVM_COMMAND_FIRST;
4504 
4505 		rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, cmd_flags);
4506 
4507 		/* Advance to the next dword. */
4508 		offset32 += 4;
4509 		ret_buf += 4;
4510 		len32 -= 4;
4511 
4512 		while (len32 > 4 && rc == 0) {
4513 			rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, 0);
4514 
4515 			/* Advance to the next dword. */
4516 			offset32 += 4;
4517 			ret_buf += 4;
4518 			len32 -= 4;
4519 		}
4520 
4521 		if (rc)
4522 			return rc;
4523 
4524 		cmd_flags = BNX2_NVM_COMMAND_LAST;
4525 		rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4526 
4527 		memcpy(ret_buf, buf, 4 - extra);
4528 	}
4529 
4530 	/* Disable access to flash interface */
4531 	bnx2_disable_nvram_access(bp);
4532 
4533 	bnx2_release_nvram_lock(bp);
4534 
4535 	return rc;
4536 }
4537 
4538 static int
4539 bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
4540 		int buf_size)
4541 {
4542 	u32 written, offset32, len32;
4543 	u8 *buf, start[4], end[4], *align_buf = NULL, *flash_buffer = NULL;
4544 	int rc = 0;
4545 	int align_start, align_end;
4546 
4547 	buf = data_buf;
4548 	offset32 = offset;
4549 	len32 = buf_size;
4550 	align_start = align_end = 0;
4551 
4552 	if ((align_start = (offset32 & 3))) {
4553 		offset32 &= ~3;
4554 		len32 += align_start;
4555 		if (len32 < 4)
4556 			len32 = 4;
4557 		if ((rc = bnx2_nvram_read(bp, offset32, start, 4)))
4558 			return rc;
4559 	}
4560 
4561 	if (len32 & 3) {
4562 		align_end = 4 - (len32 & 3);
4563 		len32 += align_end;
4564 		if ((rc = bnx2_nvram_read(bp, offset32 + len32 - 4, end, 4)))
4565 			return rc;
4566 	}
4567 
4568 	if (align_start || align_end) {
4569 		align_buf = kmalloc(len32, GFP_KERNEL);
4570 		if (align_buf == NULL)
4571 			return -ENOMEM;
4572 		if (align_start) {
4573 			memcpy(align_buf, start, 4);
4574 		}
4575 		if (align_end) {
4576 			memcpy(align_buf + len32 - 4, end, 4);
4577 		}
4578 		memcpy(align_buf + align_start, data_buf, buf_size);
4579 		buf = align_buf;
4580 	}
4581 
4582 	if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4583 		flash_buffer = kmalloc(264, GFP_KERNEL);
4584 		if (flash_buffer == NULL) {
4585 			rc = -ENOMEM;
4586 			goto nvram_write_end;
4587 		}
4588 	}
4589 
4590 	written = 0;
4591 	while ((written < len32) && (rc == 0)) {
4592 		u32 page_start, page_end, data_start, data_end;
4593 		u32 addr, cmd_flags;
4594 		int i;
4595 
4596 	        /* Find the page_start addr */
4597 		page_start = offset32 + written;
4598 		page_start -= (page_start % bp->flash_info->page_size);
4599 		/* Find the page_end addr */
4600 		page_end = page_start + bp->flash_info->page_size;
4601 		/* Find the data_start addr */
4602 		data_start = (written == 0) ? offset32 : page_start;
4603 		/* Find the data_end addr */
4604 		data_end = (page_end > offset32 + len32) ?
4605 			(offset32 + len32) : page_end;
4606 
4607 		/* Request access to the flash interface. */
4608 		if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4609 			goto nvram_write_end;
4610 
4611 		/* Enable access to flash interface */
4612 		bnx2_enable_nvram_access(bp);
4613 
4614 		cmd_flags = BNX2_NVM_COMMAND_FIRST;
4615 		if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4616 			int j;
4617 
4618 			/* Read the whole page into the buffer
4619 			 * (non-buffer flash only) */
4620 			for (j = 0; j < bp->flash_info->page_size; j += 4) {
4621 				if (j == (bp->flash_info->page_size - 4)) {
4622 					cmd_flags |= BNX2_NVM_COMMAND_LAST;
4623 				}
4624 				rc = bnx2_nvram_read_dword(bp,
4625 					page_start + j,
4626 					&flash_buffer[j],
4627 					cmd_flags);
4628 
4629 				if (rc)
4630 					goto nvram_write_end;
4631 
4632 				cmd_flags = 0;
4633 			}
4634 		}
4635 
4636 		/* Enable writes to flash interface (unlock write-protect) */
4637 		if ((rc = bnx2_enable_nvram_write(bp)) != 0)
4638 			goto nvram_write_end;
4639 
4640 		/* Loop to write back the buffer data from page_start to
4641 		 * data_start */
4642 		i = 0;
4643 		if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4644 			/* Erase the page */
4645 			if ((rc = bnx2_nvram_erase_page(bp, page_start)) != 0)
4646 				goto nvram_write_end;
4647 
4648 			/* Re-enable the write again for the actual write */
4649 			bnx2_enable_nvram_write(bp);
4650 
4651 			for (addr = page_start; addr < data_start;
4652 				addr += 4, i += 4) {
4653 
4654 				rc = bnx2_nvram_write_dword(bp, addr,
4655 					&flash_buffer[i], cmd_flags);
4656 
4657 				if (rc != 0)
4658 					goto nvram_write_end;
4659 
4660 				cmd_flags = 0;
4661 			}
4662 		}
4663 
4664 		/* Loop to write the new data from data_start to data_end */
4665 		for (addr = data_start; addr < data_end; addr += 4, i += 4) {
4666 			if ((addr == page_end - 4) ||
4667 				((bp->flash_info->flags & BNX2_NV_BUFFERED) &&
4668 				 (addr == data_end - 4))) {
4669 
4670 				cmd_flags |= BNX2_NVM_COMMAND_LAST;
4671 			}
4672 			rc = bnx2_nvram_write_dword(bp, addr, buf,
4673 				cmd_flags);
4674 
4675 			if (rc != 0)
4676 				goto nvram_write_end;
4677 
4678 			cmd_flags = 0;
4679 			buf += 4;
4680 		}
4681 
4682 		/* Loop to write back the buffer data from data_end
4683 		 * to page_end */
4684 		if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4685 			for (addr = data_end; addr < page_end;
4686 				addr += 4, i += 4) {
4687 
4688 				if (addr == page_end-4) {
4689 					cmd_flags = BNX2_NVM_COMMAND_LAST;
4690                 		}
4691 				rc = bnx2_nvram_write_dword(bp, addr,
4692 					&flash_buffer[i], cmd_flags);
4693 
4694 				if (rc != 0)
4695 					goto nvram_write_end;
4696 
4697 				cmd_flags = 0;
4698 			}
4699 		}
4700 
4701 		/* Disable writes to flash interface (lock write-protect) */
4702 		bnx2_disable_nvram_write(bp);
4703 
4704 		/* Disable access to flash interface */
4705 		bnx2_disable_nvram_access(bp);
4706 		bnx2_release_nvram_lock(bp);
4707 
4708 		/* Increment written */
4709 		written += data_end - data_start;
4710 	}
4711 
4712 nvram_write_end:
4713 	kfree(flash_buffer);
4714 	kfree(align_buf);
4715 	return rc;
4716 }
4717 
4718 static void
4719 bnx2_init_fw_cap(struct bnx2 *bp)
4720 {
4721 	u32 val, sig = 0;
4722 
4723 	bp->phy_flags &= ~BNX2_PHY_FLAG_REMOTE_PHY_CAP;
4724 	bp->flags &= ~BNX2_FLAG_CAN_KEEP_VLAN;
4725 
4726 	if (!(bp->flags & BNX2_FLAG_ASF_ENABLE))
4727 		bp->flags |= BNX2_FLAG_CAN_KEEP_VLAN;
4728 
4729 	val = bnx2_shmem_rd(bp, BNX2_FW_CAP_MB);
4730 	if ((val & BNX2_FW_CAP_SIGNATURE_MASK) != BNX2_FW_CAP_SIGNATURE)
4731 		return;
4732 
4733 	if ((val & BNX2_FW_CAP_CAN_KEEP_VLAN) == BNX2_FW_CAP_CAN_KEEP_VLAN) {
4734 		bp->flags |= BNX2_FLAG_CAN_KEEP_VLAN;
4735 		sig |= BNX2_DRV_ACK_CAP_SIGNATURE | BNX2_FW_CAP_CAN_KEEP_VLAN;
4736 	}
4737 
4738 	if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
4739 	    (val & BNX2_FW_CAP_REMOTE_PHY_CAPABLE)) {
4740 		u32 link;
4741 
4742 		bp->phy_flags |= BNX2_PHY_FLAG_REMOTE_PHY_CAP;
4743 
4744 		link = bnx2_shmem_rd(bp, BNX2_LINK_STATUS);
4745 		if (link & BNX2_LINK_STATUS_SERDES_LINK)
4746 			bp->phy_port = PORT_FIBRE;
4747 		else
4748 			bp->phy_port = PORT_TP;
4749 
4750 		sig |= BNX2_DRV_ACK_CAP_SIGNATURE |
4751 		       BNX2_FW_CAP_REMOTE_PHY_CAPABLE;
4752 	}
4753 
4754 	if (netif_running(bp->dev) && sig)
4755 		bnx2_shmem_wr(bp, BNX2_DRV_ACK_CAP_MB, sig);
4756 }
4757 
4758 static void
4759 bnx2_setup_msix_tbl(struct bnx2 *bp)
4760 {
4761 	BNX2_WR(bp, BNX2_PCI_GRC_WINDOW_ADDR, BNX2_PCI_GRC_WINDOW_ADDR_SEP_WIN);
4762 
4763 	BNX2_WR(bp, BNX2_PCI_GRC_WINDOW2_ADDR, BNX2_MSIX_TABLE_ADDR);
4764 	BNX2_WR(bp, BNX2_PCI_GRC_WINDOW3_ADDR, BNX2_MSIX_PBA_ADDR);
4765 }
4766 
4767 static int
4768 bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
4769 {
4770 	u32 val;
4771 	int i, rc = 0;
4772 	u8 old_port;
4773 
4774 	/* Wait for the current PCI transaction to complete before
4775 	 * issuing a reset. */
4776 	if ((BNX2_CHIP(bp) == BNX2_CHIP_5706) ||
4777 	    (BNX2_CHIP(bp) == BNX2_CHIP_5708)) {
4778 		BNX2_WR(bp, BNX2_MISC_ENABLE_CLR_BITS,
4779 			BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
4780 			BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
4781 			BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
4782 			BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
4783 		val = BNX2_RD(bp, BNX2_MISC_ENABLE_CLR_BITS);
4784 		udelay(5);
4785 	} else {  /* 5709 */
4786 		val = BNX2_RD(bp, BNX2_MISC_NEW_CORE_CTL);
4787 		val &= ~BNX2_MISC_NEW_CORE_CTL_DMA_ENABLE;
4788 		BNX2_WR(bp, BNX2_MISC_NEW_CORE_CTL, val);
4789 		val = BNX2_RD(bp, BNX2_MISC_NEW_CORE_CTL);
4790 
4791 		for (i = 0; i < 100; i++) {
4792 			msleep(1);
4793 			val = BNX2_RD(bp, BNX2_PCICFG_DEVICE_CONTROL);
4794 			if (!(val & BNX2_PCICFG_DEVICE_STATUS_NO_PEND))
4795 				break;
4796 		}
4797 	}
4798 
4799 	/* Wait for the firmware to tell us it is ok to issue a reset. */
4800 	bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1, 1);
4801 
4802 	/* Deposit a driver reset signature so the firmware knows that
4803 	 * this is a soft reset. */
4804 	bnx2_shmem_wr(bp, BNX2_DRV_RESET_SIGNATURE,
4805 		      BNX2_DRV_RESET_SIGNATURE_MAGIC);
4806 
4807 	/* Do a dummy read to force the chip to complete all current transaction
4808 	 * before we issue a reset. */
4809 	val = BNX2_RD(bp, BNX2_MISC_ID);
4810 
4811 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
4812 		BNX2_WR(bp, BNX2_MISC_COMMAND, BNX2_MISC_COMMAND_SW_RESET);
4813 		BNX2_RD(bp, BNX2_MISC_COMMAND);
4814 		udelay(5);
4815 
4816 		val = BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4817 		      BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
4818 
4819 		BNX2_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
4820 
4821 	} else {
4822 		val = BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4823 		      BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4824 		      BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
4825 
4826 		/* Chip reset. */
4827 		BNX2_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
4828 
4829 		/* Reading back any register after chip reset will hang the
4830 		 * bus on 5706 A0 and A1.  The msleep below provides plenty
4831 		 * of margin for write posting.
4832 		 */
4833 		if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) ||
4834 		    (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A1))
4835 			msleep(20);
4836 
4837 		/* Reset takes approximate 30 usec */
4838 		for (i = 0; i < 10; i++) {
4839 			val = BNX2_RD(bp, BNX2_PCICFG_MISC_CONFIG);
4840 			if ((val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4841 				    BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0)
4842 				break;
4843 			udelay(10);
4844 		}
4845 
4846 		if (val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4847 			   BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
4848 			pr_err("Chip reset did not complete\n");
4849 			return -EBUSY;
4850 		}
4851 	}
4852 
4853 	/* Make sure byte swapping is properly configured. */
4854 	val = BNX2_RD(bp, BNX2_PCI_SWAP_DIAG0);
4855 	if (val != 0x01020304) {
4856 		pr_err("Chip not in correct endian mode\n");
4857 		return -ENODEV;
4858 	}
4859 
4860 	/* Wait for the firmware to finish its initialization. */
4861 	rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT1 | reset_code, 1, 0);
4862 	if (rc)
4863 		return rc;
4864 
4865 	spin_lock_bh(&bp->phy_lock);
4866 	old_port = bp->phy_port;
4867 	bnx2_init_fw_cap(bp);
4868 	if ((bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) &&
4869 	    old_port != bp->phy_port)
4870 		bnx2_set_default_remote_link(bp);
4871 	spin_unlock_bh(&bp->phy_lock);
4872 
4873 	if (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) {
4874 		/* Adjust the voltage regular to two steps lower.  The default
4875 		 * of this register is 0x0000000e. */
4876 		BNX2_WR(bp, BNX2_MISC_VREG_CONTROL, 0x000000fa);
4877 
4878 		/* Remove bad rbuf memory from the free pool. */
4879 		rc = bnx2_alloc_bad_rbuf(bp);
4880 	}
4881 
4882 	if (bp->flags & BNX2_FLAG_USING_MSIX) {
4883 		bnx2_setup_msix_tbl(bp);
4884 		/* Prevent MSIX table reads and write from timing out */
4885 		BNX2_WR(bp, BNX2_MISC_ECO_HW_CTL,
4886 			BNX2_MISC_ECO_HW_CTL_LARGE_GRC_TMOUT_EN);
4887 	}
4888 
4889 	return rc;
4890 }
4891 
4892 static int
4893 bnx2_init_chip(struct bnx2 *bp)
4894 {
4895 	u32 val, mtu;
4896 	int rc, i;
4897 
4898 	/* Make sure the interrupt is not active. */
4899 	BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD, BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
4900 
4901 	val = BNX2_DMA_CONFIG_DATA_BYTE_SWAP |
4902 	      BNX2_DMA_CONFIG_DATA_WORD_SWAP |
4903 #ifdef __BIG_ENDIAN
4904 	      BNX2_DMA_CONFIG_CNTL_BYTE_SWAP |
4905 #endif
4906 	      BNX2_DMA_CONFIG_CNTL_WORD_SWAP |
4907 	      DMA_READ_CHANS << 12 |
4908 	      DMA_WRITE_CHANS << 16;
4909 
4910 	val |= (0x2 << 20) | (1 << 11);
4911 
4912 	if ((bp->flags & BNX2_FLAG_PCIX) && (bp->bus_speed_mhz == 133))
4913 		val |= (1 << 23);
4914 
4915 	if ((BNX2_CHIP(bp) == BNX2_CHIP_5706) &&
4916 	    (BNX2_CHIP_ID(bp) != BNX2_CHIP_ID_5706_A0) &&
4917 	    !(bp->flags & BNX2_FLAG_PCIX))
4918 		val |= BNX2_DMA_CONFIG_CNTL_PING_PONG_DMA;
4919 
4920 	BNX2_WR(bp, BNX2_DMA_CONFIG, val);
4921 
4922 	if (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) {
4923 		val = BNX2_RD(bp, BNX2_TDMA_CONFIG);
4924 		val |= BNX2_TDMA_CONFIG_ONE_DMA;
4925 		BNX2_WR(bp, BNX2_TDMA_CONFIG, val);
4926 	}
4927 
4928 	if (bp->flags & BNX2_FLAG_PCIX) {
4929 		u16 val16;
4930 
4931 		pci_read_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4932 				     &val16);
4933 		pci_write_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4934 				      val16 & ~PCI_X_CMD_ERO);
4935 	}
4936 
4937 	BNX2_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
4938 		BNX2_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
4939 		BNX2_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
4940 		BNX2_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
4941 
4942 	/* Initialize context mapping and zero out the quick contexts.  The
4943 	 * context block must have already been enabled. */
4944 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
4945 		rc = bnx2_init_5709_context(bp);
4946 		if (rc)
4947 			return rc;
4948 	} else
4949 		bnx2_init_context(bp);
4950 
4951 	if ((rc = bnx2_init_cpus(bp)) != 0)
4952 		return rc;
4953 
4954 	bnx2_init_nvram(bp);
4955 
4956 	bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
4957 
4958 	val = BNX2_RD(bp, BNX2_MQ_CONFIG);
4959 	val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
4960 	val |= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
4961 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
4962 		val |= BNX2_MQ_CONFIG_BIN_MQ_MODE;
4963 		if (BNX2_CHIP_REV(bp) == BNX2_CHIP_REV_Ax)
4964 			val |= BNX2_MQ_CONFIG_HALT_DIS;
4965 	}
4966 
4967 	BNX2_WR(bp, BNX2_MQ_CONFIG, val);
4968 
4969 	val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
4970 	BNX2_WR(bp, BNX2_MQ_KNL_BYP_WIND_START, val);
4971 	BNX2_WR(bp, BNX2_MQ_KNL_WIND_END, val);
4972 
4973 	val = (BNX2_PAGE_BITS - 8) << 24;
4974 	BNX2_WR(bp, BNX2_RV2P_CONFIG, val);
4975 
4976 	/* Configure page size. */
4977 	val = BNX2_RD(bp, BNX2_TBDR_CONFIG);
4978 	val &= ~BNX2_TBDR_CONFIG_PAGE_SIZE;
4979 	val |= (BNX2_PAGE_BITS - 8) << 24 | 0x40;
4980 	BNX2_WR(bp, BNX2_TBDR_CONFIG, val);
4981 
4982 	val = bp->mac_addr[0] +
4983 	      (bp->mac_addr[1] << 8) +
4984 	      (bp->mac_addr[2] << 16) +
4985 	      bp->mac_addr[3] +
4986 	      (bp->mac_addr[4] << 8) +
4987 	      (bp->mac_addr[5] << 16);
4988 	BNX2_WR(bp, BNX2_EMAC_BACKOFF_SEED, val);
4989 
4990 	/* Program the MTU.  Also include 4 bytes for CRC32. */
4991 	mtu = bp->dev->mtu;
4992 	val = mtu + ETH_HLEN + ETH_FCS_LEN;
4993 	if (val > (MAX_ETHERNET_PACKET_SIZE + ETH_HLEN + 4))
4994 		val |= BNX2_EMAC_RX_MTU_SIZE_JUMBO_ENA;
4995 	BNX2_WR(bp, BNX2_EMAC_RX_MTU_SIZE, val);
4996 
4997 	if (mtu < ETH_DATA_LEN)
4998 		mtu = ETH_DATA_LEN;
4999 
5000 	bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG, BNX2_RBUF_CONFIG_VAL(mtu));
5001 	bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG2, BNX2_RBUF_CONFIG2_VAL(mtu));
5002 	bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG3, BNX2_RBUF_CONFIG3_VAL(mtu));
5003 
5004 	memset(bp->bnx2_napi[0].status_blk.msi, 0, bp->status_stats_size);
5005 	for (i = 0; i < BNX2_MAX_MSIX_VEC; i++)
5006 		bp->bnx2_napi[i].last_status_idx = 0;
5007 
5008 	bp->idle_chk_status_idx = 0xffff;
5009 
5010 	/* Set up how to generate a link change interrupt. */
5011 	BNX2_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
5012 
5013 	BNX2_WR(bp, BNX2_HC_STATUS_ADDR_L,
5014 		(u64) bp->status_blk_mapping & 0xffffffff);
5015 	BNX2_WR(bp, BNX2_HC_STATUS_ADDR_H, (u64) bp->status_blk_mapping >> 32);
5016 
5017 	BNX2_WR(bp, BNX2_HC_STATISTICS_ADDR_L,
5018 		(u64) bp->stats_blk_mapping & 0xffffffff);
5019 	BNX2_WR(bp, BNX2_HC_STATISTICS_ADDR_H,
5020 		(u64) bp->stats_blk_mapping >> 32);
5021 
5022 	BNX2_WR(bp, BNX2_HC_TX_QUICK_CONS_TRIP,
5023 		(bp->tx_quick_cons_trip_int << 16) | bp->tx_quick_cons_trip);
5024 
5025 	BNX2_WR(bp, BNX2_HC_RX_QUICK_CONS_TRIP,
5026 		(bp->rx_quick_cons_trip_int << 16) | bp->rx_quick_cons_trip);
5027 
5028 	BNX2_WR(bp, BNX2_HC_COMP_PROD_TRIP,
5029 		(bp->comp_prod_trip_int << 16) | bp->comp_prod_trip);
5030 
5031 	BNX2_WR(bp, BNX2_HC_TX_TICKS, (bp->tx_ticks_int << 16) | bp->tx_ticks);
5032 
5033 	BNX2_WR(bp, BNX2_HC_RX_TICKS, (bp->rx_ticks_int << 16) | bp->rx_ticks);
5034 
5035 	BNX2_WR(bp, BNX2_HC_COM_TICKS,
5036 		(bp->com_ticks_int << 16) | bp->com_ticks);
5037 
5038 	BNX2_WR(bp, BNX2_HC_CMD_TICKS,
5039 		(bp->cmd_ticks_int << 16) | bp->cmd_ticks);
5040 
5041 	if (bp->flags & BNX2_FLAG_BROKEN_STATS)
5042 		BNX2_WR(bp, BNX2_HC_STATS_TICKS, 0);
5043 	else
5044 		BNX2_WR(bp, BNX2_HC_STATS_TICKS, bp->stats_ticks);
5045 	BNX2_WR(bp, BNX2_HC_STAT_COLLECT_TICKS, 0xbb8);  /* 3ms */
5046 
5047 	if (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A1)
5048 		val = BNX2_HC_CONFIG_COLLECT_STATS;
5049 	else {
5050 		val = BNX2_HC_CONFIG_RX_TMR_MODE | BNX2_HC_CONFIG_TX_TMR_MODE |
5051 		      BNX2_HC_CONFIG_COLLECT_STATS;
5052 	}
5053 
5054 	if (bp->flags & BNX2_FLAG_USING_MSIX) {
5055 		BNX2_WR(bp, BNX2_HC_MSIX_BIT_VECTOR,
5056 			BNX2_HC_MSIX_BIT_VECTOR_VAL);
5057 
5058 		val |= BNX2_HC_CONFIG_SB_ADDR_INC_128B;
5059 	}
5060 
5061 	if (bp->flags & BNX2_FLAG_ONE_SHOT_MSI)
5062 		val |= BNX2_HC_CONFIG_ONE_SHOT | BNX2_HC_CONFIG_USE_INT_PARAM;
5063 
5064 	BNX2_WR(bp, BNX2_HC_CONFIG, val);
5065 
5066 	if (bp->rx_ticks < 25)
5067 		bnx2_reg_wr_ind(bp, BNX2_FW_RX_LOW_LATENCY, 1);
5068 	else
5069 		bnx2_reg_wr_ind(bp, BNX2_FW_RX_LOW_LATENCY, 0);
5070 
5071 	for (i = 1; i < bp->irq_nvecs; i++) {
5072 		u32 base = ((i - 1) * BNX2_HC_SB_CONFIG_SIZE) +
5073 			   BNX2_HC_SB_CONFIG_1;
5074 
5075 		BNX2_WR(bp, base,
5076 			BNX2_HC_SB_CONFIG_1_TX_TMR_MODE |
5077 			BNX2_HC_SB_CONFIG_1_RX_TMR_MODE |
5078 			BNX2_HC_SB_CONFIG_1_ONE_SHOT);
5079 
5080 		BNX2_WR(bp, base + BNX2_HC_TX_QUICK_CONS_TRIP_OFF,
5081 			(bp->tx_quick_cons_trip_int << 16) |
5082 			 bp->tx_quick_cons_trip);
5083 
5084 		BNX2_WR(bp, base + BNX2_HC_TX_TICKS_OFF,
5085 			(bp->tx_ticks_int << 16) | bp->tx_ticks);
5086 
5087 		BNX2_WR(bp, base + BNX2_HC_RX_QUICK_CONS_TRIP_OFF,
5088 			(bp->rx_quick_cons_trip_int << 16) |
5089 			bp->rx_quick_cons_trip);
5090 
5091 		BNX2_WR(bp, base + BNX2_HC_RX_TICKS_OFF,
5092 			(bp->rx_ticks_int << 16) | bp->rx_ticks);
5093 	}
5094 
5095 	/* Clear internal stats counters. */
5096 	BNX2_WR(bp, BNX2_HC_COMMAND, BNX2_HC_COMMAND_CLR_STAT_NOW);
5097 
5098 	BNX2_WR(bp, BNX2_HC_ATTN_BITS_ENABLE, STATUS_ATTN_EVENTS);
5099 
5100 	/* Initialize the receive filter. */
5101 	bnx2_set_rx_mode(bp->dev);
5102 
5103 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
5104 		val = BNX2_RD(bp, BNX2_MISC_NEW_CORE_CTL);
5105 		val |= BNX2_MISC_NEW_CORE_CTL_DMA_ENABLE;
5106 		BNX2_WR(bp, BNX2_MISC_NEW_CORE_CTL, val);
5107 	}
5108 	rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT2 | BNX2_DRV_MSG_CODE_RESET,
5109 			  1, 0);
5110 
5111 	BNX2_WR(bp, BNX2_MISC_ENABLE_SET_BITS, BNX2_MISC_ENABLE_DEFAULT);
5112 	BNX2_RD(bp, BNX2_MISC_ENABLE_SET_BITS);
5113 
5114 	udelay(20);
5115 
5116 	bp->hc_cmd = BNX2_RD(bp, BNX2_HC_COMMAND);
5117 
5118 	return rc;
5119 }
5120 
5121 static void
5122 bnx2_clear_ring_states(struct bnx2 *bp)
5123 {
5124 	struct bnx2_napi *bnapi;
5125 	struct bnx2_tx_ring_info *txr;
5126 	struct bnx2_rx_ring_info *rxr;
5127 	int i;
5128 
5129 	for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
5130 		bnapi = &bp->bnx2_napi[i];
5131 		txr = &bnapi->tx_ring;
5132 		rxr = &bnapi->rx_ring;
5133 
5134 		txr->tx_cons = 0;
5135 		txr->hw_tx_cons = 0;
5136 		rxr->rx_prod_bseq = 0;
5137 		rxr->rx_prod = 0;
5138 		rxr->rx_cons = 0;
5139 		rxr->rx_pg_prod = 0;
5140 		rxr->rx_pg_cons = 0;
5141 	}
5142 }
5143 
5144 static void
5145 bnx2_init_tx_context(struct bnx2 *bp, u32 cid, struct bnx2_tx_ring_info *txr)
5146 {
5147 	u32 val, offset0, offset1, offset2, offset3;
5148 	u32 cid_addr = GET_CID_ADDR(cid);
5149 
5150 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
5151 		offset0 = BNX2_L2CTX_TYPE_XI;
5152 		offset1 = BNX2_L2CTX_CMD_TYPE_XI;
5153 		offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI;
5154 		offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI;
5155 	} else {
5156 		offset0 = BNX2_L2CTX_TYPE;
5157 		offset1 = BNX2_L2CTX_CMD_TYPE;
5158 		offset2 = BNX2_L2CTX_TBDR_BHADDR_HI;
5159 		offset3 = BNX2_L2CTX_TBDR_BHADDR_LO;
5160 	}
5161 	val = BNX2_L2CTX_TYPE_TYPE_L2 | BNX2_L2CTX_TYPE_SIZE_L2;
5162 	bnx2_ctx_wr(bp, cid_addr, offset0, val);
5163 
5164 	val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
5165 	bnx2_ctx_wr(bp, cid_addr, offset1, val);
5166 
5167 	val = (u64) txr->tx_desc_mapping >> 32;
5168 	bnx2_ctx_wr(bp, cid_addr, offset2, val);
5169 
5170 	val = (u64) txr->tx_desc_mapping & 0xffffffff;
5171 	bnx2_ctx_wr(bp, cid_addr, offset3, val);
5172 }
5173 
5174 static void
5175 bnx2_init_tx_ring(struct bnx2 *bp, int ring_num)
5176 {
5177 	struct bnx2_tx_bd *txbd;
5178 	u32 cid = TX_CID;
5179 	struct bnx2_napi *bnapi;
5180 	struct bnx2_tx_ring_info *txr;
5181 
5182 	bnapi = &bp->bnx2_napi[ring_num];
5183 	txr = &bnapi->tx_ring;
5184 
5185 	if (ring_num == 0)
5186 		cid = TX_CID;
5187 	else
5188 		cid = TX_TSS_CID + ring_num - 1;
5189 
5190 	bp->tx_wake_thresh = bp->tx_ring_size / 2;
5191 
5192 	txbd = &txr->tx_desc_ring[BNX2_MAX_TX_DESC_CNT];
5193 
5194 	txbd->tx_bd_haddr_hi = (u64) txr->tx_desc_mapping >> 32;
5195 	txbd->tx_bd_haddr_lo = (u64) txr->tx_desc_mapping & 0xffffffff;
5196 
5197 	txr->tx_prod = 0;
5198 	txr->tx_prod_bseq = 0;
5199 
5200 	txr->tx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BIDX;
5201 	txr->tx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BSEQ;
5202 
5203 	bnx2_init_tx_context(bp, cid, txr);
5204 }
5205 
5206 static void
5207 bnx2_init_rxbd_rings(struct bnx2_rx_bd *rx_ring[], dma_addr_t dma[],
5208 		     u32 buf_size, int num_rings)
5209 {
5210 	int i;
5211 	struct bnx2_rx_bd *rxbd;
5212 
5213 	for (i = 0; i < num_rings; i++) {
5214 		int j;
5215 
5216 		rxbd = &rx_ring[i][0];
5217 		for (j = 0; j < BNX2_MAX_RX_DESC_CNT; j++, rxbd++) {
5218 			rxbd->rx_bd_len = buf_size;
5219 			rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
5220 		}
5221 		if (i == (num_rings - 1))
5222 			j = 0;
5223 		else
5224 			j = i + 1;
5225 		rxbd->rx_bd_haddr_hi = (u64) dma[j] >> 32;
5226 		rxbd->rx_bd_haddr_lo = (u64) dma[j] & 0xffffffff;
5227 	}
5228 }
5229 
5230 static void
5231 bnx2_init_rx_ring(struct bnx2 *bp, int ring_num)
5232 {
5233 	int i;
5234 	u16 prod, ring_prod;
5235 	u32 cid, rx_cid_addr, val;
5236 	struct bnx2_napi *bnapi = &bp->bnx2_napi[ring_num];
5237 	struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
5238 
5239 	if (ring_num == 0)
5240 		cid = RX_CID;
5241 	else
5242 		cid = RX_RSS_CID + ring_num - 1;
5243 
5244 	rx_cid_addr = GET_CID_ADDR(cid);
5245 
5246 	bnx2_init_rxbd_rings(rxr->rx_desc_ring, rxr->rx_desc_mapping,
5247 			     bp->rx_buf_use_size, bp->rx_max_ring);
5248 
5249 	bnx2_init_rx_context(bp, cid);
5250 
5251 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
5252 		val = BNX2_RD(bp, BNX2_MQ_MAP_L2_5);
5253 		BNX2_WR(bp, BNX2_MQ_MAP_L2_5, val | BNX2_MQ_MAP_L2_5_ARM);
5254 	}
5255 
5256 	bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, 0);
5257 	if (bp->rx_pg_ring_size) {
5258 		bnx2_init_rxbd_rings(rxr->rx_pg_desc_ring,
5259 				     rxr->rx_pg_desc_mapping,
5260 				     PAGE_SIZE, bp->rx_max_pg_ring);
5261 		val = (bp->rx_buf_use_size << 16) | PAGE_SIZE;
5262 		bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, val);
5263 		bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_RBDC_KEY,
5264 		       BNX2_L2CTX_RBDC_JUMBO_KEY - ring_num);
5265 
5266 		val = (u64) rxr->rx_pg_desc_mapping[0] >> 32;
5267 		bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_HI, val);
5268 
5269 		val = (u64) rxr->rx_pg_desc_mapping[0] & 0xffffffff;
5270 		bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_LO, val);
5271 
5272 		if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
5273 			BNX2_WR(bp, BNX2_MQ_MAP_L2_3, BNX2_MQ_MAP_L2_3_DEFAULT);
5274 	}
5275 
5276 	val = (u64) rxr->rx_desc_mapping[0] >> 32;
5277 	bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_HI, val);
5278 
5279 	val = (u64) rxr->rx_desc_mapping[0] & 0xffffffff;
5280 	bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_LO, val);
5281 
5282 	ring_prod = prod = rxr->rx_pg_prod;
5283 	for (i = 0; i < bp->rx_pg_ring_size; i++) {
5284 		if (bnx2_alloc_rx_page(bp, rxr, ring_prod, GFP_KERNEL) < 0) {
5285 			netdev_warn(bp->dev, "init'ed rx page ring %d with %d/%d pages only\n",
5286 				    ring_num, i, bp->rx_pg_ring_size);
5287 			break;
5288 		}
5289 		prod = BNX2_NEXT_RX_BD(prod);
5290 		ring_prod = BNX2_RX_PG_RING_IDX(prod);
5291 	}
5292 	rxr->rx_pg_prod = prod;
5293 
5294 	ring_prod = prod = rxr->rx_prod;
5295 	for (i = 0; i < bp->rx_ring_size; i++) {
5296 		if (bnx2_alloc_rx_data(bp, rxr, ring_prod, GFP_KERNEL) < 0) {
5297 			netdev_warn(bp->dev, "init'ed rx ring %d with %d/%d skbs only\n",
5298 				    ring_num, i, bp->rx_ring_size);
5299 			break;
5300 		}
5301 		prod = BNX2_NEXT_RX_BD(prod);
5302 		ring_prod = BNX2_RX_RING_IDX(prod);
5303 	}
5304 	rxr->rx_prod = prod;
5305 
5306 	rxr->rx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_BDIDX;
5307 	rxr->rx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_BSEQ;
5308 	rxr->rx_pg_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_PG_BDIDX;
5309 
5310 	BNX2_WR16(bp, rxr->rx_pg_bidx_addr, rxr->rx_pg_prod);
5311 	BNX2_WR16(bp, rxr->rx_bidx_addr, prod);
5312 
5313 	BNX2_WR(bp, rxr->rx_bseq_addr, rxr->rx_prod_bseq);
5314 }
5315 
5316 static void
5317 bnx2_init_all_rings(struct bnx2 *bp)
5318 {
5319 	int i;
5320 	u32 val;
5321 
5322 	bnx2_clear_ring_states(bp);
5323 
5324 	BNX2_WR(bp, BNX2_TSCH_TSS_CFG, 0);
5325 	for (i = 0; i < bp->num_tx_rings; i++)
5326 		bnx2_init_tx_ring(bp, i);
5327 
5328 	if (bp->num_tx_rings > 1)
5329 		BNX2_WR(bp, BNX2_TSCH_TSS_CFG, ((bp->num_tx_rings - 1) << 24) |
5330 			(TX_TSS_CID << 7));
5331 
5332 	BNX2_WR(bp, BNX2_RLUP_RSS_CONFIG, 0);
5333 	bnx2_reg_wr_ind(bp, BNX2_RXP_SCRATCH_RSS_TBL_SZ, 0);
5334 
5335 	for (i = 0; i < bp->num_rx_rings; i++)
5336 		bnx2_init_rx_ring(bp, i);
5337 
5338 	if (bp->num_rx_rings > 1) {
5339 		u32 tbl_32 = 0;
5340 
5341 		for (i = 0; i < BNX2_RXP_SCRATCH_RSS_TBL_MAX_ENTRIES; i++) {
5342 			int shift = (i % 8) << 2;
5343 
5344 			tbl_32 |= (i % (bp->num_rx_rings - 1)) << shift;
5345 			if ((i % 8) == 7) {
5346 				BNX2_WR(bp, BNX2_RLUP_RSS_DATA, tbl_32);
5347 				BNX2_WR(bp, BNX2_RLUP_RSS_COMMAND, (i >> 3) |
5348 					BNX2_RLUP_RSS_COMMAND_RSS_WRITE_MASK |
5349 					BNX2_RLUP_RSS_COMMAND_WRITE |
5350 					BNX2_RLUP_RSS_COMMAND_HASH_MASK);
5351 				tbl_32 = 0;
5352 			}
5353 		}
5354 
5355 		val = BNX2_RLUP_RSS_CONFIG_IPV4_RSS_TYPE_ALL_XI |
5356 		      BNX2_RLUP_RSS_CONFIG_IPV6_RSS_TYPE_ALL_XI;
5357 
5358 		BNX2_WR(bp, BNX2_RLUP_RSS_CONFIG, val);
5359 
5360 	}
5361 }
5362 
5363 static u32 bnx2_find_max_ring(u32 ring_size, u32 max_size)
5364 {
5365 	u32 max, num_rings = 1;
5366 
5367 	while (ring_size > BNX2_MAX_RX_DESC_CNT) {
5368 		ring_size -= BNX2_MAX_RX_DESC_CNT;
5369 		num_rings++;
5370 	}
5371 	/* round to next power of 2 */
5372 	max = max_size;
5373 	while ((max & num_rings) == 0)
5374 		max >>= 1;
5375 
5376 	if (num_rings != max)
5377 		max <<= 1;
5378 
5379 	return max;
5380 }
5381 
5382 static void
5383 bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size)
5384 {
5385 	u32 rx_size, rx_space, jumbo_size;
5386 
5387 	/* 8 for CRC and VLAN */
5388 	rx_size = bp->dev->mtu + ETH_HLEN + BNX2_RX_OFFSET + 8;
5389 
5390 	rx_space = SKB_DATA_ALIGN(rx_size + BNX2_RX_ALIGN) + NET_SKB_PAD +
5391 		SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
5392 
5393 	bp->rx_copy_thresh = BNX2_RX_COPY_THRESH;
5394 	bp->rx_pg_ring_size = 0;
5395 	bp->rx_max_pg_ring = 0;
5396 	bp->rx_max_pg_ring_idx = 0;
5397 	if ((rx_space > PAGE_SIZE) && !(bp->flags & BNX2_FLAG_JUMBO_BROKEN)) {
5398 		int pages = PAGE_ALIGN(bp->dev->mtu - 40) >> PAGE_SHIFT;
5399 
5400 		jumbo_size = size * pages;
5401 		if (jumbo_size > BNX2_MAX_TOTAL_RX_PG_DESC_CNT)
5402 			jumbo_size = BNX2_MAX_TOTAL_RX_PG_DESC_CNT;
5403 
5404 		bp->rx_pg_ring_size = jumbo_size;
5405 		bp->rx_max_pg_ring = bnx2_find_max_ring(jumbo_size,
5406 							BNX2_MAX_RX_PG_RINGS);
5407 		bp->rx_max_pg_ring_idx =
5408 			(bp->rx_max_pg_ring * BNX2_RX_DESC_CNT) - 1;
5409 		rx_size = BNX2_RX_COPY_THRESH + BNX2_RX_OFFSET;
5410 		bp->rx_copy_thresh = 0;
5411 	}
5412 
5413 	bp->rx_buf_use_size = rx_size;
5414 	/* hw alignment + build_skb() overhead*/
5415 	bp->rx_buf_size = SKB_DATA_ALIGN(bp->rx_buf_use_size + BNX2_RX_ALIGN) +
5416 		NET_SKB_PAD + SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
5417 	bp->rx_jumbo_thresh = rx_size - BNX2_RX_OFFSET;
5418 	bp->rx_ring_size = size;
5419 	bp->rx_max_ring = bnx2_find_max_ring(size, BNX2_MAX_RX_RINGS);
5420 	bp->rx_max_ring_idx = (bp->rx_max_ring * BNX2_RX_DESC_CNT) - 1;
5421 }
5422 
5423 static void
5424 bnx2_free_tx_skbs(struct bnx2 *bp)
5425 {
5426 	int i;
5427 
5428 	for (i = 0; i < bp->num_tx_rings; i++) {
5429 		struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
5430 		struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
5431 		int j;
5432 
5433 		if (txr->tx_buf_ring == NULL)
5434 			continue;
5435 
5436 		for (j = 0; j < BNX2_TX_DESC_CNT; ) {
5437 			struct bnx2_sw_tx_bd *tx_buf = &txr->tx_buf_ring[j];
5438 			struct sk_buff *skb = tx_buf->skb;
5439 			int k, last;
5440 
5441 			if (skb == NULL) {
5442 				j = BNX2_NEXT_TX_BD(j);
5443 				continue;
5444 			}
5445 
5446 			dma_unmap_single(&bp->pdev->dev,
5447 					 dma_unmap_addr(tx_buf, mapping),
5448 					 skb_headlen(skb),
5449 					 PCI_DMA_TODEVICE);
5450 
5451 			tx_buf->skb = NULL;
5452 
5453 			last = tx_buf->nr_frags;
5454 			j = BNX2_NEXT_TX_BD(j);
5455 			for (k = 0; k < last; k++, j = BNX2_NEXT_TX_BD(j)) {
5456 				tx_buf = &txr->tx_buf_ring[BNX2_TX_RING_IDX(j)];
5457 				dma_unmap_page(&bp->pdev->dev,
5458 					dma_unmap_addr(tx_buf, mapping),
5459 					skb_frag_size(&skb_shinfo(skb)->frags[k]),
5460 					PCI_DMA_TODEVICE);
5461 			}
5462 			dev_kfree_skb(skb);
5463 		}
5464 		netdev_tx_reset_queue(netdev_get_tx_queue(bp->dev, i));
5465 	}
5466 }
5467 
5468 static void
5469 bnx2_free_rx_skbs(struct bnx2 *bp)
5470 {
5471 	int i;
5472 
5473 	for (i = 0; i < bp->num_rx_rings; i++) {
5474 		struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
5475 		struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
5476 		int j;
5477 
5478 		if (rxr->rx_buf_ring == NULL)
5479 			return;
5480 
5481 		for (j = 0; j < bp->rx_max_ring_idx; j++) {
5482 			struct bnx2_sw_bd *rx_buf = &rxr->rx_buf_ring[j];
5483 			u8 *data = rx_buf->data;
5484 
5485 			if (data == NULL)
5486 				continue;
5487 
5488 			dma_unmap_single(&bp->pdev->dev,
5489 					 dma_unmap_addr(rx_buf, mapping),
5490 					 bp->rx_buf_use_size,
5491 					 PCI_DMA_FROMDEVICE);
5492 
5493 			rx_buf->data = NULL;
5494 
5495 			kfree(data);
5496 		}
5497 		for (j = 0; j < bp->rx_max_pg_ring_idx; j++)
5498 			bnx2_free_rx_page(bp, rxr, j);
5499 	}
5500 }
5501 
5502 static void
5503 bnx2_free_skbs(struct bnx2 *bp)
5504 {
5505 	bnx2_free_tx_skbs(bp);
5506 	bnx2_free_rx_skbs(bp);
5507 }
5508 
5509 static int
5510 bnx2_reset_nic(struct bnx2 *bp, u32 reset_code)
5511 {
5512 	int rc;
5513 
5514 	rc = bnx2_reset_chip(bp, reset_code);
5515 	bnx2_free_skbs(bp);
5516 	if (rc)
5517 		return rc;
5518 
5519 	if ((rc = bnx2_init_chip(bp)) != 0)
5520 		return rc;
5521 
5522 	bnx2_init_all_rings(bp);
5523 	return 0;
5524 }
5525 
5526 static int
5527 bnx2_init_nic(struct bnx2 *bp, int reset_phy)
5528 {
5529 	int rc;
5530 
5531 	if ((rc = bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET)) != 0)
5532 		return rc;
5533 
5534 	spin_lock_bh(&bp->phy_lock);
5535 	bnx2_init_phy(bp, reset_phy);
5536 	bnx2_set_link(bp);
5537 	if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
5538 		bnx2_remote_phy_event(bp);
5539 	spin_unlock_bh(&bp->phy_lock);
5540 	return 0;
5541 }
5542 
5543 static int
5544 bnx2_shutdown_chip(struct bnx2 *bp)
5545 {
5546 	u32 reset_code;
5547 
5548 	if (bp->flags & BNX2_FLAG_NO_WOL)
5549 		reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
5550 	else if (bp->wol)
5551 		reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
5552 	else
5553 		reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
5554 
5555 	return bnx2_reset_chip(bp, reset_code);
5556 }
5557 
5558 static int
5559 bnx2_test_registers(struct bnx2 *bp)
5560 {
5561 	int ret;
5562 	int i, is_5709;
5563 	static const struct {
5564 		u16   offset;
5565 		u16   flags;
5566 #define BNX2_FL_NOT_5709	1
5567 		u32   rw_mask;
5568 		u32   ro_mask;
5569 	} reg_tbl[] = {
5570 		{ 0x006c, 0, 0x00000000, 0x0000003f },
5571 		{ 0x0090, 0, 0xffffffff, 0x00000000 },
5572 		{ 0x0094, 0, 0x00000000, 0x00000000 },
5573 
5574 		{ 0x0404, BNX2_FL_NOT_5709, 0x00003f00, 0x00000000 },
5575 		{ 0x0418, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5576 		{ 0x041c, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5577 		{ 0x0420, BNX2_FL_NOT_5709, 0x00000000, 0x80ffffff },
5578 		{ 0x0424, BNX2_FL_NOT_5709, 0x00000000, 0x00000000 },
5579 		{ 0x0428, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
5580 		{ 0x0450, BNX2_FL_NOT_5709, 0x00000000, 0x0000ffff },
5581 		{ 0x0454, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5582 		{ 0x0458, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5583 
5584 		{ 0x0808, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5585 		{ 0x0854, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5586 		{ 0x0868, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5587 		{ 0x086c, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5588 		{ 0x0870, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5589 		{ 0x0874, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5590 
5591 		{ 0x0c00, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
5592 		{ 0x0c04, BNX2_FL_NOT_5709, 0x00000000, 0x03ff0001 },
5593 		{ 0x0c08, BNX2_FL_NOT_5709,  0x0f0ff073, 0x00000000 },
5594 
5595 		{ 0x1000, 0, 0x00000000, 0x00000001 },
5596 		{ 0x1004, BNX2_FL_NOT_5709, 0x00000000, 0x000f0001 },
5597 
5598 		{ 0x1408, 0, 0x01c00800, 0x00000000 },
5599 		{ 0x149c, 0, 0x8000ffff, 0x00000000 },
5600 		{ 0x14a8, 0, 0x00000000, 0x000001ff },
5601 		{ 0x14ac, 0, 0x0fffffff, 0x10000000 },
5602 		{ 0x14b0, 0, 0x00000002, 0x00000001 },
5603 		{ 0x14b8, 0, 0x00000000, 0x00000000 },
5604 		{ 0x14c0, 0, 0x00000000, 0x00000009 },
5605 		{ 0x14c4, 0, 0x00003fff, 0x00000000 },
5606 		{ 0x14cc, 0, 0x00000000, 0x00000001 },
5607 		{ 0x14d0, 0, 0xffffffff, 0x00000000 },
5608 
5609 		{ 0x1800, 0, 0x00000000, 0x00000001 },
5610 		{ 0x1804, 0, 0x00000000, 0x00000003 },
5611 
5612 		{ 0x2800, 0, 0x00000000, 0x00000001 },
5613 		{ 0x2804, 0, 0x00000000, 0x00003f01 },
5614 		{ 0x2808, 0, 0x0f3f3f03, 0x00000000 },
5615 		{ 0x2810, 0, 0xffff0000, 0x00000000 },
5616 		{ 0x2814, 0, 0xffff0000, 0x00000000 },
5617 		{ 0x2818, 0, 0xffff0000, 0x00000000 },
5618 		{ 0x281c, 0, 0xffff0000, 0x00000000 },
5619 		{ 0x2834, 0, 0xffffffff, 0x00000000 },
5620 		{ 0x2840, 0, 0x00000000, 0xffffffff },
5621 		{ 0x2844, 0, 0x00000000, 0xffffffff },
5622 		{ 0x2848, 0, 0xffffffff, 0x00000000 },
5623 		{ 0x284c, 0, 0xf800f800, 0x07ff07ff },
5624 
5625 		{ 0x2c00, 0, 0x00000000, 0x00000011 },
5626 		{ 0x2c04, 0, 0x00000000, 0x00030007 },
5627 
5628 		{ 0x3c00, 0, 0x00000000, 0x00000001 },
5629 		{ 0x3c04, 0, 0x00000000, 0x00070000 },
5630 		{ 0x3c08, 0, 0x00007f71, 0x07f00000 },
5631 		{ 0x3c0c, 0, 0x1f3ffffc, 0x00000000 },
5632 		{ 0x3c10, 0, 0xffffffff, 0x00000000 },
5633 		{ 0x3c14, 0, 0x00000000, 0xffffffff },
5634 		{ 0x3c18, 0, 0x00000000, 0xffffffff },
5635 		{ 0x3c1c, 0, 0xfffff000, 0x00000000 },
5636 		{ 0x3c20, 0, 0xffffff00, 0x00000000 },
5637 
5638 		{ 0x5004, 0, 0x00000000, 0x0000007f },
5639 		{ 0x5008, 0, 0x0f0007ff, 0x00000000 },
5640 
5641 		{ 0x5c00, 0, 0x00000000, 0x00000001 },
5642 		{ 0x5c04, 0, 0x00000000, 0x0003000f },
5643 		{ 0x5c08, 0, 0x00000003, 0x00000000 },
5644 		{ 0x5c0c, 0, 0x0000fff8, 0x00000000 },
5645 		{ 0x5c10, 0, 0x00000000, 0xffffffff },
5646 		{ 0x5c80, 0, 0x00000000, 0x0f7113f1 },
5647 		{ 0x5c84, 0, 0x00000000, 0x0000f333 },
5648 		{ 0x5c88, 0, 0x00000000, 0x00077373 },
5649 		{ 0x5c8c, 0, 0x00000000, 0x0007f737 },
5650 
5651 		{ 0x6808, 0, 0x0000ff7f, 0x00000000 },
5652 		{ 0x680c, 0, 0xffffffff, 0x00000000 },
5653 		{ 0x6810, 0, 0xffffffff, 0x00000000 },
5654 		{ 0x6814, 0, 0xffffffff, 0x00000000 },
5655 		{ 0x6818, 0, 0xffffffff, 0x00000000 },
5656 		{ 0x681c, 0, 0xffffffff, 0x00000000 },
5657 		{ 0x6820, 0, 0x00ff00ff, 0x00000000 },
5658 		{ 0x6824, 0, 0x00ff00ff, 0x00000000 },
5659 		{ 0x6828, 0, 0x00ff00ff, 0x00000000 },
5660 		{ 0x682c, 0, 0x03ff03ff, 0x00000000 },
5661 		{ 0x6830, 0, 0x03ff03ff, 0x00000000 },
5662 		{ 0x6834, 0, 0x03ff03ff, 0x00000000 },
5663 		{ 0x6838, 0, 0x03ff03ff, 0x00000000 },
5664 		{ 0x683c, 0, 0x0000ffff, 0x00000000 },
5665 		{ 0x6840, 0, 0x00000ff0, 0x00000000 },
5666 		{ 0x6844, 0, 0x00ffff00, 0x00000000 },
5667 		{ 0x684c, 0, 0xffffffff, 0x00000000 },
5668 		{ 0x6850, 0, 0x7f7f7f7f, 0x00000000 },
5669 		{ 0x6854, 0, 0x7f7f7f7f, 0x00000000 },
5670 		{ 0x6858, 0, 0x7f7f7f7f, 0x00000000 },
5671 		{ 0x685c, 0, 0x7f7f7f7f, 0x00000000 },
5672 		{ 0x6908, 0, 0x00000000, 0x0001ff0f },
5673 		{ 0x690c, 0, 0x00000000, 0x0ffe00f0 },
5674 
5675 		{ 0xffff, 0, 0x00000000, 0x00000000 },
5676 	};
5677 
5678 	ret = 0;
5679 	is_5709 = 0;
5680 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
5681 		is_5709 = 1;
5682 
5683 	for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
5684 		u32 offset, rw_mask, ro_mask, save_val, val;
5685 		u16 flags = reg_tbl[i].flags;
5686 
5687 		if (is_5709 && (flags & BNX2_FL_NOT_5709))
5688 			continue;
5689 
5690 		offset = (u32) reg_tbl[i].offset;
5691 		rw_mask = reg_tbl[i].rw_mask;
5692 		ro_mask = reg_tbl[i].ro_mask;
5693 
5694 		save_val = readl(bp->regview + offset);
5695 
5696 		writel(0, bp->regview + offset);
5697 
5698 		val = readl(bp->regview + offset);
5699 		if ((val & rw_mask) != 0) {
5700 			goto reg_test_err;
5701 		}
5702 
5703 		if ((val & ro_mask) != (save_val & ro_mask)) {
5704 			goto reg_test_err;
5705 		}
5706 
5707 		writel(0xffffffff, bp->regview + offset);
5708 
5709 		val = readl(bp->regview + offset);
5710 		if ((val & rw_mask) != rw_mask) {
5711 			goto reg_test_err;
5712 		}
5713 
5714 		if ((val & ro_mask) != (save_val & ro_mask)) {
5715 			goto reg_test_err;
5716 		}
5717 
5718 		writel(save_val, bp->regview + offset);
5719 		continue;
5720 
5721 reg_test_err:
5722 		writel(save_val, bp->regview + offset);
5723 		ret = -ENODEV;
5724 		break;
5725 	}
5726 	return ret;
5727 }
5728 
5729 static int
5730 bnx2_do_mem_test(struct bnx2 *bp, u32 start, u32 size)
5731 {
5732 	static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0x55555555,
5733 		0xaaaaaaaa , 0xaa55aa55, 0x55aa55aa };
5734 	int i;
5735 
5736 	for (i = 0; i < sizeof(test_pattern) / 4; i++) {
5737 		u32 offset;
5738 
5739 		for (offset = 0; offset < size; offset += 4) {
5740 
5741 			bnx2_reg_wr_ind(bp, start + offset, test_pattern[i]);
5742 
5743 			if (bnx2_reg_rd_ind(bp, start + offset) !=
5744 				test_pattern[i]) {
5745 				return -ENODEV;
5746 			}
5747 		}
5748 	}
5749 	return 0;
5750 }
5751 
5752 static int
5753 bnx2_test_memory(struct bnx2 *bp)
5754 {
5755 	int ret = 0;
5756 	int i;
5757 	static struct mem_entry {
5758 		u32   offset;
5759 		u32   len;
5760 	} mem_tbl_5706[] = {
5761 		{ 0x60000,  0x4000 },
5762 		{ 0xa0000,  0x3000 },
5763 		{ 0xe0000,  0x4000 },
5764 		{ 0x120000, 0x4000 },
5765 		{ 0x1a0000, 0x4000 },
5766 		{ 0x160000, 0x4000 },
5767 		{ 0xffffffff, 0    },
5768 	},
5769 	mem_tbl_5709[] = {
5770 		{ 0x60000,  0x4000 },
5771 		{ 0xa0000,  0x3000 },
5772 		{ 0xe0000,  0x4000 },
5773 		{ 0x120000, 0x4000 },
5774 		{ 0x1a0000, 0x4000 },
5775 		{ 0xffffffff, 0    },
5776 	};
5777 	struct mem_entry *mem_tbl;
5778 
5779 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
5780 		mem_tbl = mem_tbl_5709;
5781 	else
5782 		mem_tbl = mem_tbl_5706;
5783 
5784 	for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
5785 		if ((ret = bnx2_do_mem_test(bp, mem_tbl[i].offset,
5786 			mem_tbl[i].len)) != 0) {
5787 			return ret;
5788 		}
5789 	}
5790 
5791 	return ret;
5792 }
5793 
5794 #define BNX2_MAC_LOOPBACK	0
5795 #define BNX2_PHY_LOOPBACK	1
5796 
5797 static int
5798 bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
5799 {
5800 	unsigned int pkt_size, num_pkts, i;
5801 	struct sk_buff *skb;
5802 	u8 *data;
5803 	unsigned char *packet;
5804 	u16 rx_start_idx, rx_idx;
5805 	dma_addr_t map;
5806 	struct bnx2_tx_bd *txbd;
5807 	struct bnx2_sw_bd *rx_buf;
5808 	struct l2_fhdr *rx_hdr;
5809 	int ret = -ENODEV;
5810 	struct bnx2_napi *bnapi = &bp->bnx2_napi[0], *tx_napi;
5811 	struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
5812 	struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
5813 
5814 	tx_napi = bnapi;
5815 
5816 	txr = &tx_napi->tx_ring;
5817 	rxr = &bnapi->rx_ring;
5818 	if (loopback_mode == BNX2_MAC_LOOPBACK) {
5819 		bp->loopback = MAC_LOOPBACK;
5820 		bnx2_set_mac_loopback(bp);
5821 	}
5822 	else if (loopback_mode == BNX2_PHY_LOOPBACK) {
5823 		if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
5824 			return 0;
5825 
5826 		bp->loopback = PHY_LOOPBACK;
5827 		bnx2_set_phy_loopback(bp);
5828 	}
5829 	else
5830 		return -EINVAL;
5831 
5832 	pkt_size = min(bp->dev->mtu + ETH_HLEN, bp->rx_jumbo_thresh - 4);
5833 	skb = netdev_alloc_skb(bp->dev, pkt_size);
5834 	if (!skb)
5835 		return -ENOMEM;
5836 	packet = skb_put(skb, pkt_size);
5837 	memcpy(packet, bp->dev->dev_addr, ETH_ALEN);
5838 	memset(packet + ETH_ALEN, 0x0, 8);
5839 	for (i = 14; i < pkt_size; i++)
5840 		packet[i] = (unsigned char) (i & 0xff);
5841 
5842 	map = dma_map_single(&bp->pdev->dev, skb->data, pkt_size,
5843 			     PCI_DMA_TODEVICE);
5844 	if (dma_mapping_error(&bp->pdev->dev, map)) {
5845 		dev_kfree_skb(skb);
5846 		return -EIO;
5847 	}
5848 
5849 	BNX2_WR(bp, BNX2_HC_COMMAND,
5850 		bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
5851 
5852 	BNX2_RD(bp, BNX2_HC_COMMAND);
5853 
5854 	udelay(5);
5855 	rx_start_idx = bnx2_get_hw_rx_cons(bnapi);
5856 
5857 	num_pkts = 0;
5858 
5859 	txbd = &txr->tx_desc_ring[BNX2_TX_RING_IDX(txr->tx_prod)];
5860 
5861 	txbd->tx_bd_haddr_hi = (u64) map >> 32;
5862 	txbd->tx_bd_haddr_lo = (u64) map & 0xffffffff;
5863 	txbd->tx_bd_mss_nbytes = pkt_size;
5864 	txbd->tx_bd_vlan_tag_flags = TX_BD_FLAGS_START | TX_BD_FLAGS_END;
5865 
5866 	num_pkts++;
5867 	txr->tx_prod = BNX2_NEXT_TX_BD(txr->tx_prod);
5868 	txr->tx_prod_bseq += pkt_size;
5869 
5870 	BNX2_WR16(bp, txr->tx_bidx_addr, txr->tx_prod);
5871 	BNX2_WR(bp, txr->tx_bseq_addr, txr->tx_prod_bseq);
5872 
5873 	udelay(100);
5874 
5875 	BNX2_WR(bp, BNX2_HC_COMMAND,
5876 		bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
5877 
5878 	BNX2_RD(bp, BNX2_HC_COMMAND);
5879 
5880 	udelay(5);
5881 
5882 	dma_unmap_single(&bp->pdev->dev, map, pkt_size, PCI_DMA_TODEVICE);
5883 	dev_kfree_skb(skb);
5884 
5885 	if (bnx2_get_hw_tx_cons(tx_napi) != txr->tx_prod)
5886 		goto loopback_test_done;
5887 
5888 	rx_idx = bnx2_get_hw_rx_cons(bnapi);
5889 	if (rx_idx != rx_start_idx + num_pkts) {
5890 		goto loopback_test_done;
5891 	}
5892 
5893 	rx_buf = &rxr->rx_buf_ring[rx_start_idx];
5894 	data = rx_buf->data;
5895 
5896 	rx_hdr = get_l2_fhdr(data);
5897 	data = (u8 *)rx_hdr + BNX2_RX_OFFSET;
5898 
5899 	dma_sync_single_for_cpu(&bp->pdev->dev,
5900 		dma_unmap_addr(rx_buf, mapping),
5901 		bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
5902 
5903 	if (rx_hdr->l2_fhdr_status &
5904 		(L2_FHDR_ERRORS_BAD_CRC |
5905 		L2_FHDR_ERRORS_PHY_DECODE |
5906 		L2_FHDR_ERRORS_ALIGNMENT |
5907 		L2_FHDR_ERRORS_TOO_SHORT |
5908 		L2_FHDR_ERRORS_GIANT_FRAME)) {
5909 
5910 		goto loopback_test_done;
5911 	}
5912 
5913 	if ((rx_hdr->l2_fhdr_pkt_len - 4) != pkt_size) {
5914 		goto loopback_test_done;
5915 	}
5916 
5917 	for (i = 14; i < pkt_size; i++) {
5918 		if (*(data + i) != (unsigned char) (i & 0xff)) {
5919 			goto loopback_test_done;
5920 		}
5921 	}
5922 
5923 	ret = 0;
5924 
5925 loopback_test_done:
5926 	bp->loopback = 0;
5927 	return ret;
5928 }
5929 
5930 #define BNX2_MAC_LOOPBACK_FAILED	1
5931 #define BNX2_PHY_LOOPBACK_FAILED	2
5932 #define BNX2_LOOPBACK_FAILED		(BNX2_MAC_LOOPBACK_FAILED |	\
5933 					 BNX2_PHY_LOOPBACK_FAILED)
5934 
5935 static int
5936 bnx2_test_loopback(struct bnx2 *bp)
5937 {
5938 	int rc = 0;
5939 
5940 	if (!netif_running(bp->dev))
5941 		return BNX2_LOOPBACK_FAILED;
5942 
5943 	bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
5944 	spin_lock_bh(&bp->phy_lock);
5945 	bnx2_init_phy(bp, 1);
5946 	spin_unlock_bh(&bp->phy_lock);
5947 	if (bnx2_run_loopback(bp, BNX2_MAC_LOOPBACK))
5948 		rc |= BNX2_MAC_LOOPBACK_FAILED;
5949 	if (bnx2_run_loopback(bp, BNX2_PHY_LOOPBACK))
5950 		rc |= BNX2_PHY_LOOPBACK_FAILED;
5951 	return rc;
5952 }
5953 
5954 #define NVRAM_SIZE 0x200
5955 #define CRC32_RESIDUAL 0xdebb20e3
5956 
5957 static int
5958 bnx2_test_nvram(struct bnx2 *bp)
5959 {
5960 	__be32 buf[NVRAM_SIZE / 4];
5961 	u8 *data = (u8 *) buf;
5962 	int rc = 0;
5963 	u32 magic, csum;
5964 
5965 	if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
5966 		goto test_nvram_done;
5967 
5968         magic = be32_to_cpu(buf[0]);
5969 	if (magic != 0x669955aa) {
5970 		rc = -ENODEV;
5971 		goto test_nvram_done;
5972 	}
5973 
5974 	if ((rc = bnx2_nvram_read(bp, 0x100, data, NVRAM_SIZE)) != 0)
5975 		goto test_nvram_done;
5976 
5977 	csum = ether_crc_le(0x100, data);
5978 	if (csum != CRC32_RESIDUAL) {
5979 		rc = -ENODEV;
5980 		goto test_nvram_done;
5981 	}
5982 
5983 	csum = ether_crc_le(0x100, data + 0x100);
5984 	if (csum != CRC32_RESIDUAL) {
5985 		rc = -ENODEV;
5986 	}
5987 
5988 test_nvram_done:
5989 	return rc;
5990 }
5991 
5992 static int
5993 bnx2_test_link(struct bnx2 *bp)
5994 {
5995 	u32 bmsr;
5996 
5997 	if (!netif_running(bp->dev))
5998 		return -ENODEV;
5999 
6000 	if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
6001 		if (bp->link_up)
6002 			return 0;
6003 		return -ENODEV;
6004 	}
6005 	spin_lock_bh(&bp->phy_lock);
6006 	bnx2_enable_bmsr1(bp);
6007 	bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
6008 	bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
6009 	bnx2_disable_bmsr1(bp);
6010 	spin_unlock_bh(&bp->phy_lock);
6011 
6012 	if (bmsr & BMSR_LSTATUS) {
6013 		return 0;
6014 	}
6015 	return -ENODEV;
6016 }
6017 
6018 static int
6019 bnx2_test_intr(struct bnx2 *bp)
6020 {
6021 	int i;
6022 	u16 status_idx;
6023 
6024 	if (!netif_running(bp->dev))
6025 		return -ENODEV;
6026 
6027 	status_idx = BNX2_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff;
6028 
6029 	/* This register is not touched during run-time. */
6030 	BNX2_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
6031 	BNX2_RD(bp, BNX2_HC_COMMAND);
6032 
6033 	for (i = 0; i < 10; i++) {
6034 		if ((BNX2_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff) !=
6035 			status_idx) {
6036 
6037 			break;
6038 		}
6039 
6040 		msleep_interruptible(10);
6041 	}
6042 	if (i < 10)
6043 		return 0;
6044 
6045 	return -ENODEV;
6046 }
6047 
6048 /* Determining link for parallel detection. */
6049 static int
6050 bnx2_5706_serdes_has_link(struct bnx2 *bp)
6051 {
6052 	u32 mode_ctl, an_dbg, exp;
6053 
6054 	if (bp->phy_flags & BNX2_PHY_FLAG_NO_PARALLEL)
6055 		return 0;
6056 
6057 	bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_MODE_CTL);
6058 	bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &mode_ctl);
6059 
6060 	if (!(mode_ctl & MISC_SHDW_MODE_CTL_SIG_DET))
6061 		return 0;
6062 
6063 	bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
6064 	bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
6065 	bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
6066 
6067 	if (an_dbg & (MISC_SHDW_AN_DBG_NOSYNC | MISC_SHDW_AN_DBG_RUDI_INVALID))
6068 		return 0;
6069 
6070 	bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS, MII_EXPAND_REG1);
6071 	bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &exp);
6072 	bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &exp);
6073 
6074 	if (exp & MII_EXPAND_REG1_RUDI_C)	/* receiving CONFIG */
6075 		return 0;
6076 
6077 	return 1;
6078 }
6079 
6080 static void
6081 bnx2_5706_serdes_timer(struct bnx2 *bp)
6082 {
6083 	int check_link = 1;
6084 
6085 	spin_lock(&bp->phy_lock);
6086 	if (bp->serdes_an_pending) {
6087 		bp->serdes_an_pending--;
6088 		check_link = 0;
6089 	} else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
6090 		u32 bmcr;
6091 
6092 		bp->current_interval = BNX2_TIMER_INTERVAL;
6093 
6094 		bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
6095 
6096 		if (bmcr & BMCR_ANENABLE) {
6097 			if (bnx2_5706_serdes_has_link(bp)) {
6098 				bmcr &= ~BMCR_ANENABLE;
6099 				bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
6100 				bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
6101 				bp->phy_flags |= BNX2_PHY_FLAG_PARALLEL_DETECT;
6102 			}
6103 		}
6104 	}
6105 	else if ((bp->link_up) && (bp->autoneg & AUTONEG_SPEED) &&
6106 		 (bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT)) {
6107 		u32 phy2;
6108 
6109 		bnx2_write_phy(bp, 0x17, 0x0f01);
6110 		bnx2_read_phy(bp, 0x15, &phy2);
6111 		if (phy2 & 0x20) {
6112 			u32 bmcr;
6113 
6114 			bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
6115 			bmcr |= BMCR_ANENABLE;
6116 			bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
6117 
6118 			bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
6119 		}
6120 	} else
6121 		bp->current_interval = BNX2_TIMER_INTERVAL;
6122 
6123 	if (check_link) {
6124 		u32 val;
6125 
6126 		bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
6127 		bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &val);
6128 		bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &val);
6129 
6130 		if (bp->link_up && (val & MISC_SHDW_AN_DBG_NOSYNC)) {
6131 			if (!(bp->phy_flags & BNX2_PHY_FLAG_FORCED_DOWN)) {
6132 				bnx2_5706s_force_link_dn(bp, 1);
6133 				bp->phy_flags |= BNX2_PHY_FLAG_FORCED_DOWN;
6134 			} else
6135 				bnx2_set_link(bp);
6136 		} else if (!bp->link_up && !(val & MISC_SHDW_AN_DBG_NOSYNC))
6137 			bnx2_set_link(bp);
6138 	}
6139 	spin_unlock(&bp->phy_lock);
6140 }
6141 
6142 static void
6143 bnx2_5708_serdes_timer(struct bnx2 *bp)
6144 {
6145 	if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
6146 		return;
6147 
6148 	if ((bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) == 0) {
6149 		bp->serdes_an_pending = 0;
6150 		return;
6151 	}
6152 
6153 	spin_lock(&bp->phy_lock);
6154 	if (bp->serdes_an_pending)
6155 		bp->serdes_an_pending--;
6156 	else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
6157 		u32 bmcr;
6158 
6159 		bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
6160 		if (bmcr & BMCR_ANENABLE) {
6161 			bnx2_enable_forced_2g5(bp);
6162 			bp->current_interval = BNX2_SERDES_FORCED_TIMEOUT;
6163 		} else {
6164 			bnx2_disable_forced_2g5(bp);
6165 			bp->serdes_an_pending = 2;
6166 			bp->current_interval = BNX2_TIMER_INTERVAL;
6167 		}
6168 
6169 	} else
6170 		bp->current_interval = BNX2_TIMER_INTERVAL;
6171 
6172 	spin_unlock(&bp->phy_lock);
6173 }
6174 
6175 static void
6176 bnx2_timer(unsigned long data)
6177 {
6178 	struct bnx2 *bp = (struct bnx2 *) data;
6179 
6180 	if (!netif_running(bp->dev))
6181 		return;
6182 
6183 	if (atomic_read(&bp->intr_sem) != 0)
6184 		goto bnx2_restart_timer;
6185 
6186 	if ((bp->flags & (BNX2_FLAG_USING_MSI | BNX2_FLAG_ONE_SHOT_MSI)) ==
6187 	     BNX2_FLAG_USING_MSI)
6188 		bnx2_chk_missed_msi(bp);
6189 
6190 	bnx2_send_heart_beat(bp);
6191 
6192 	bp->stats_blk->stat_FwRxDrop =
6193 		bnx2_reg_rd_ind(bp, BNX2_FW_RX_DROP_COUNT);
6194 
6195 	/* workaround occasional corrupted counters */
6196 	if ((bp->flags & BNX2_FLAG_BROKEN_STATS) && bp->stats_ticks)
6197 		BNX2_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd |
6198 			BNX2_HC_COMMAND_STATS_NOW);
6199 
6200 	if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
6201 		if (BNX2_CHIP(bp) == BNX2_CHIP_5706)
6202 			bnx2_5706_serdes_timer(bp);
6203 		else
6204 			bnx2_5708_serdes_timer(bp);
6205 	}
6206 
6207 bnx2_restart_timer:
6208 	mod_timer(&bp->timer, jiffies + bp->current_interval);
6209 }
6210 
6211 static int
6212 bnx2_request_irq(struct bnx2 *bp)
6213 {
6214 	unsigned long flags;
6215 	struct bnx2_irq *irq;
6216 	int rc = 0, i;
6217 
6218 	if (bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)
6219 		flags = 0;
6220 	else
6221 		flags = IRQF_SHARED;
6222 
6223 	for (i = 0; i < bp->irq_nvecs; i++) {
6224 		irq = &bp->irq_tbl[i];
6225 		rc = request_irq(irq->vector, irq->handler, flags, irq->name,
6226 				 &bp->bnx2_napi[i]);
6227 		if (rc)
6228 			break;
6229 		irq->requested = 1;
6230 	}
6231 	return rc;
6232 }
6233 
6234 static void
6235 __bnx2_free_irq(struct bnx2 *bp)
6236 {
6237 	struct bnx2_irq *irq;
6238 	int i;
6239 
6240 	for (i = 0; i < bp->irq_nvecs; i++) {
6241 		irq = &bp->irq_tbl[i];
6242 		if (irq->requested)
6243 			free_irq(irq->vector, &bp->bnx2_napi[i]);
6244 		irq->requested = 0;
6245 	}
6246 }
6247 
6248 static void
6249 bnx2_free_irq(struct bnx2 *bp)
6250 {
6251 
6252 	__bnx2_free_irq(bp);
6253 	if (bp->flags & BNX2_FLAG_USING_MSI)
6254 		pci_disable_msi(bp->pdev);
6255 	else if (bp->flags & BNX2_FLAG_USING_MSIX)
6256 		pci_disable_msix(bp->pdev);
6257 
6258 	bp->flags &= ~(BNX2_FLAG_USING_MSI_OR_MSIX | BNX2_FLAG_ONE_SHOT_MSI);
6259 }
6260 
6261 static void
6262 bnx2_enable_msix(struct bnx2 *bp, int msix_vecs)
6263 {
6264 	int i, total_vecs;
6265 	struct msix_entry msix_ent[BNX2_MAX_MSIX_VEC];
6266 	struct net_device *dev = bp->dev;
6267 	const int len = sizeof(bp->irq_tbl[0].name);
6268 
6269 	bnx2_setup_msix_tbl(bp);
6270 	BNX2_WR(bp, BNX2_PCI_MSIX_CONTROL, BNX2_MAX_MSIX_HW_VEC - 1);
6271 	BNX2_WR(bp, BNX2_PCI_MSIX_TBL_OFF_BIR, BNX2_PCI_GRC_WINDOW2_BASE);
6272 	BNX2_WR(bp, BNX2_PCI_MSIX_PBA_OFF_BIT, BNX2_PCI_GRC_WINDOW3_BASE);
6273 
6274 	/*  Need to flush the previous three writes to ensure MSI-X
6275 	 *  is setup properly */
6276 	BNX2_RD(bp, BNX2_PCI_MSIX_CONTROL);
6277 
6278 	for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
6279 		msix_ent[i].entry = i;
6280 		msix_ent[i].vector = 0;
6281 	}
6282 
6283 	total_vecs = msix_vecs;
6284 #ifdef BCM_CNIC
6285 	total_vecs++;
6286 #endif
6287 	total_vecs = pci_enable_msix_range(bp->pdev, msix_ent,
6288 					   BNX2_MIN_MSIX_VEC, total_vecs);
6289 	if (total_vecs < 0)
6290 		return;
6291 
6292 	msix_vecs = total_vecs;
6293 #ifdef BCM_CNIC
6294 	msix_vecs--;
6295 #endif
6296 	bp->irq_nvecs = msix_vecs;
6297 	bp->flags |= BNX2_FLAG_USING_MSIX | BNX2_FLAG_ONE_SHOT_MSI;
6298 	for (i = 0; i < total_vecs; i++) {
6299 		bp->irq_tbl[i].vector = msix_ent[i].vector;
6300 		snprintf(bp->irq_tbl[i].name, len, "%s-%d", dev->name, i);
6301 		bp->irq_tbl[i].handler = bnx2_msi_1shot;
6302 	}
6303 }
6304 
6305 static int
6306 bnx2_setup_int_mode(struct bnx2 *bp, int dis_msi)
6307 {
6308 	int cpus = netif_get_num_default_rss_queues();
6309 	int msix_vecs;
6310 
6311 	if (!bp->num_req_rx_rings)
6312 		msix_vecs = max(cpus + 1, bp->num_req_tx_rings);
6313 	else if (!bp->num_req_tx_rings)
6314 		msix_vecs = max(cpus, bp->num_req_rx_rings);
6315 	else
6316 		msix_vecs = max(bp->num_req_rx_rings, bp->num_req_tx_rings);
6317 
6318 	msix_vecs = min(msix_vecs, RX_MAX_RINGS);
6319 
6320 	bp->irq_tbl[0].handler = bnx2_interrupt;
6321 	strcpy(bp->irq_tbl[0].name, bp->dev->name);
6322 	bp->irq_nvecs = 1;
6323 	bp->irq_tbl[0].vector = bp->pdev->irq;
6324 
6325 	if ((bp->flags & BNX2_FLAG_MSIX_CAP) && !dis_msi)
6326 		bnx2_enable_msix(bp, msix_vecs);
6327 
6328 	if ((bp->flags & BNX2_FLAG_MSI_CAP) && !dis_msi &&
6329 	    !(bp->flags & BNX2_FLAG_USING_MSIX)) {
6330 		if (pci_enable_msi(bp->pdev) == 0) {
6331 			bp->flags |= BNX2_FLAG_USING_MSI;
6332 			if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
6333 				bp->flags |= BNX2_FLAG_ONE_SHOT_MSI;
6334 				bp->irq_tbl[0].handler = bnx2_msi_1shot;
6335 			} else
6336 				bp->irq_tbl[0].handler = bnx2_msi;
6337 
6338 			bp->irq_tbl[0].vector = bp->pdev->irq;
6339 		}
6340 	}
6341 
6342 	if (!bp->num_req_tx_rings)
6343 		bp->num_tx_rings = rounddown_pow_of_two(bp->irq_nvecs);
6344 	else
6345 		bp->num_tx_rings = min(bp->irq_nvecs, bp->num_req_tx_rings);
6346 
6347 	if (!bp->num_req_rx_rings)
6348 		bp->num_rx_rings = bp->irq_nvecs;
6349 	else
6350 		bp->num_rx_rings = min(bp->irq_nvecs, bp->num_req_rx_rings);
6351 
6352 	netif_set_real_num_tx_queues(bp->dev, bp->num_tx_rings);
6353 
6354 	return netif_set_real_num_rx_queues(bp->dev, bp->num_rx_rings);
6355 }
6356 
6357 /* Called with rtnl_lock */
6358 static int
6359 bnx2_open(struct net_device *dev)
6360 {
6361 	struct bnx2 *bp = netdev_priv(dev);
6362 	int rc;
6363 
6364 	netif_carrier_off(dev);
6365 
6366 	bnx2_disable_int(bp);
6367 
6368 	rc = bnx2_setup_int_mode(bp, disable_msi);
6369 	if (rc)
6370 		goto open_err;
6371 	bnx2_init_napi(bp);
6372 	bnx2_napi_enable(bp);
6373 	rc = bnx2_alloc_mem(bp);
6374 	if (rc)
6375 		goto open_err;
6376 
6377 	rc = bnx2_request_irq(bp);
6378 	if (rc)
6379 		goto open_err;
6380 
6381 	rc = bnx2_init_nic(bp, 1);
6382 	if (rc)
6383 		goto open_err;
6384 
6385 	mod_timer(&bp->timer, jiffies + bp->current_interval);
6386 
6387 	atomic_set(&bp->intr_sem, 0);
6388 
6389 	memset(bp->temp_stats_blk, 0, sizeof(struct statistics_block));
6390 
6391 	bnx2_enable_int(bp);
6392 
6393 	if (bp->flags & BNX2_FLAG_USING_MSI) {
6394 		/* Test MSI to make sure it is working
6395 		 * If MSI test fails, go back to INTx mode
6396 		 */
6397 		if (bnx2_test_intr(bp) != 0) {
6398 			netdev_warn(bp->dev, "No interrupt was generated using MSI, switching to INTx mode. Please report this failure to the PCI maintainer and include system chipset information.\n");
6399 
6400 			bnx2_disable_int(bp);
6401 			bnx2_free_irq(bp);
6402 
6403 			bnx2_setup_int_mode(bp, 1);
6404 
6405 			rc = bnx2_init_nic(bp, 0);
6406 
6407 			if (!rc)
6408 				rc = bnx2_request_irq(bp);
6409 
6410 			if (rc) {
6411 				del_timer_sync(&bp->timer);
6412 				goto open_err;
6413 			}
6414 			bnx2_enable_int(bp);
6415 		}
6416 	}
6417 	if (bp->flags & BNX2_FLAG_USING_MSI)
6418 		netdev_info(dev, "using MSI\n");
6419 	else if (bp->flags & BNX2_FLAG_USING_MSIX)
6420 		netdev_info(dev, "using MSIX\n");
6421 
6422 	netif_tx_start_all_queues(dev);
6423 out:
6424 	return rc;
6425 
6426 open_err:
6427 	bnx2_napi_disable(bp);
6428 	bnx2_free_skbs(bp);
6429 	bnx2_free_irq(bp);
6430 	bnx2_free_mem(bp);
6431 	bnx2_del_napi(bp);
6432 	goto out;
6433 }
6434 
6435 static void
6436 bnx2_reset_task(struct work_struct *work)
6437 {
6438 	struct bnx2 *bp = container_of(work, struct bnx2, reset_task);
6439 	int rc;
6440 	u16 pcicmd;
6441 
6442 	rtnl_lock();
6443 	if (!netif_running(bp->dev)) {
6444 		rtnl_unlock();
6445 		return;
6446 	}
6447 
6448 	bnx2_netif_stop(bp, true);
6449 
6450 	pci_read_config_word(bp->pdev, PCI_COMMAND, &pcicmd);
6451 	if (!(pcicmd & PCI_COMMAND_MEMORY)) {
6452 		/* in case PCI block has reset */
6453 		pci_restore_state(bp->pdev);
6454 		pci_save_state(bp->pdev);
6455 	}
6456 	rc = bnx2_init_nic(bp, 1);
6457 	if (rc) {
6458 		netdev_err(bp->dev, "failed to reset NIC, closing\n");
6459 		bnx2_napi_enable(bp);
6460 		dev_close(bp->dev);
6461 		rtnl_unlock();
6462 		return;
6463 	}
6464 
6465 	atomic_set(&bp->intr_sem, 1);
6466 	bnx2_netif_start(bp, true);
6467 	rtnl_unlock();
6468 }
6469 
6470 #define BNX2_FTQ_ENTRY(ftq) { __stringify(ftq##FTQ_CTL), BNX2_##ftq##FTQ_CTL }
6471 
6472 static void
6473 bnx2_dump_ftq(struct bnx2 *bp)
6474 {
6475 	int i;
6476 	u32 reg, bdidx, cid, valid;
6477 	struct net_device *dev = bp->dev;
6478 	static const struct ftq_reg {
6479 		char *name;
6480 		u32 off;
6481 	} ftq_arr[] = {
6482 		BNX2_FTQ_ENTRY(RV2P_P),
6483 		BNX2_FTQ_ENTRY(RV2P_T),
6484 		BNX2_FTQ_ENTRY(RV2P_M),
6485 		BNX2_FTQ_ENTRY(TBDR_),
6486 		BNX2_FTQ_ENTRY(TDMA_),
6487 		BNX2_FTQ_ENTRY(TXP_),
6488 		BNX2_FTQ_ENTRY(TXP_),
6489 		BNX2_FTQ_ENTRY(TPAT_),
6490 		BNX2_FTQ_ENTRY(RXP_C),
6491 		BNX2_FTQ_ENTRY(RXP_),
6492 		BNX2_FTQ_ENTRY(COM_COMXQ_),
6493 		BNX2_FTQ_ENTRY(COM_COMTQ_),
6494 		BNX2_FTQ_ENTRY(COM_COMQ_),
6495 		BNX2_FTQ_ENTRY(CP_CPQ_),
6496 	};
6497 
6498 	netdev_err(dev, "<--- start FTQ dump --->\n");
6499 	for (i = 0; i < ARRAY_SIZE(ftq_arr); i++)
6500 		netdev_err(dev, "%s %08x\n", ftq_arr[i].name,
6501 			   bnx2_reg_rd_ind(bp, ftq_arr[i].off));
6502 
6503 	netdev_err(dev, "CPU states:\n");
6504 	for (reg = BNX2_TXP_CPU_MODE; reg <= BNX2_CP_CPU_MODE; reg += 0x40000)
6505 		netdev_err(dev, "%06x mode %x state %x evt_mask %x pc %x pc %x instr %x\n",
6506 			   reg, bnx2_reg_rd_ind(bp, reg),
6507 			   bnx2_reg_rd_ind(bp, reg + 4),
6508 			   bnx2_reg_rd_ind(bp, reg + 8),
6509 			   bnx2_reg_rd_ind(bp, reg + 0x1c),
6510 			   bnx2_reg_rd_ind(bp, reg + 0x1c),
6511 			   bnx2_reg_rd_ind(bp, reg + 0x20));
6512 
6513 	netdev_err(dev, "<--- end FTQ dump --->\n");
6514 	netdev_err(dev, "<--- start TBDC dump --->\n");
6515 	netdev_err(dev, "TBDC free cnt: %ld\n",
6516 		   BNX2_RD(bp, BNX2_TBDC_STATUS) & BNX2_TBDC_STATUS_FREE_CNT);
6517 	netdev_err(dev, "LINE     CID  BIDX   CMD  VALIDS\n");
6518 	for (i = 0; i < 0x20; i++) {
6519 		int j = 0;
6520 
6521 		BNX2_WR(bp, BNX2_TBDC_BD_ADDR, i);
6522 		BNX2_WR(bp, BNX2_TBDC_CAM_OPCODE,
6523 			BNX2_TBDC_CAM_OPCODE_OPCODE_CAM_READ);
6524 		BNX2_WR(bp, BNX2_TBDC_COMMAND, BNX2_TBDC_COMMAND_CMD_REG_ARB);
6525 		while ((BNX2_RD(bp, BNX2_TBDC_COMMAND) &
6526 			BNX2_TBDC_COMMAND_CMD_REG_ARB) && j < 100)
6527 			j++;
6528 
6529 		cid = BNX2_RD(bp, BNX2_TBDC_CID);
6530 		bdidx = BNX2_RD(bp, BNX2_TBDC_BIDX);
6531 		valid = BNX2_RD(bp, BNX2_TBDC_CAM_OPCODE);
6532 		netdev_err(dev, "%02x    %06x  %04lx   %02x    [%x]\n",
6533 			   i, cid, bdidx & BNX2_TBDC_BDIDX_BDIDX,
6534 			   bdidx >> 24, (valid >> 8) & 0x0ff);
6535 	}
6536 	netdev_err(dev, "<--- end TBDC dump --->\n");
6537 }
6538 
6539 static void
6540 bnx2_dump_state(struct bnx2 *bp)
6541 {
6542 	struct net_device *dev = bp->dev;
6543 	u32 val1, val2;
6544 
6545 	pci_read_config_dword(bp->pdev, PCI_COMMAND, &val1);
6546 	netdev_err(dev, "DEBUG: intr_sem[%x] PCI_CMD[%08x]\n",
6547 		   atomic_read(&bp->intr_sem), val1);
6548 	pci_read_config_dword(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &val1);
6549 	pci_read_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG, &val2);
6550 	netdev_err(dev, "DEBUG: PCI_PM[%08x] PCI_MISC_CFG[%08x]\n", val1, val2);
6551 	netdev_err(dev, "DEBUG: EMAC_TX_STATUS[%08x] EMAC_RX_STATUS[%08x]\n",
6552 		   BNX2_RD(bp, BNX2_EMAC_TX_STATUS),
6553 		   BNX2_RD(bp, BNX2_EMAC_RX_STATUS));
6554 	netdev_err(dev, "DEBUG: RPM_MGMT_PKT_CTRL[%08x]\n",
6555 		   BNX2_RD(bp, BNX2_RPM_MGMT_PKT_CTRL));
6556 	netdev_err(dev, "DEBUG: HC_STATS_INTERRUPT_STATUS[%08x]\n",
6557 		   BNX2_RD(bp, BNX2_HC_STATS_INTERRUPT_STATUS));
6558 	if (bp->flags & BNX2_FLAG_USING_MSIX)
6559 		netdev_err(dev, "DEBUG: PBA[%08x]\n",
6560 			   BNX2_RD(bp, BNX2_PCI_GRC_WINDOW3_BASE));
6561 }
6562 
6563 static void
6564 bnx2_tx_timeout(struct net_device *dev)
6565 {
6566 	struct bnx2 *bp = netdev_priv(dev);
6567 
6568 	bnx2_dump_ftq(bp);
6569 	bnx2_dump_state(bp);
6570 	bnx2_dump_mcp_state(bp);
6571 
6572 	/* This allows the netif to be shutdown gracefully before resetting */
6573 	schedule_work(&bp->reset_task);
6574 }
6575 
6576 /* Called with netif_tx_lock.
6577  * bnx2_tx_int() runs without netif_tx_lock unless it needs to call
6578  * netif_wake_queue().
6579  */
6580 static netdev_tx_t
6581 bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
6582 {
6583 	struct bnx2 *bp = netdev_priv(dev);
6584 	dma_addr_t mapping;
6585 	struct bnx2_tx_bd *txbd;
6586 	struct bnx2_sw_tx_bd *tx_buf;
6587 	u32 len, vlan_tag_flags, last_frag, mss;
6588 	u16 prod, ring_prod;
6589 	int i;
6590 	struct bnx2_napi *bnapi;
6591 	struct bnx2_tx_ring_info *txr;
6592 	struct netdev_queue *txq;
6593 
6594 	/*  Determine which tx ring we will be placed on */
6595 	i = skb_get_queue_mapping(skb);
6596 	bnapi = &bp->bnx2_napi[i];
6597 	txr = &bnapi->tx_ring;
6598 	txq = netdev_get_tx_queue(dev, i);
6599 
6600 	if (unlikely(bnx2_tx_avail(bp, txr) <
6601 	    (skb_shinfo(skb)->nr_frags + 1))) {
6602 		netif_tx_stop_queue(txq);
6603 		netdev_err(dev, "BUG! Tx ring full when queue awake!\n");
6604 
6605 		return NETDEV_TX_BUSY;
6606 	}
6607 	len = skb_headlen(skb);
6608 	prod = txr->tx_prod;
6609 	ring_prod = BNX2_TX_RING_IDX(prod);
6610 
6611 	vlan_tag_flags = 0;
6612 	if (skb->ip_summed == CHECKSUM_PARTIAL) {
6613 		vlan_tag_flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
6614 	}
6615 
6616 	if (skb_vlan_tag_present(skb)) {
6617 		vlan_tag_flags |=
6618 			(TX_BD_FLAGS_VLAN_TAG | (skb_vlan_tag_get(skb) << 16));
6619 	}
6620 
6621 	if ((mss = skb_shinfo(skb)->gso_size)) {
6622 		u32 tcp_opt_len;
6623 		struct iphdr *iph;
6624 
6625 		vlan_tag_flags |= TX_BD_FLAGS_SW_LSO;
6626 
6627 		tcp_opt_len = tcp_optlen(skb);
6628 
6629 		if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) {
6630 			u32 tcp_off = skb_transport_offset(skb) -
6631 				      sizeof(struct ipv6hdr) - ETH_HLEN;
6632 
6633 			vlan_tag_flags |= ((tcp_opt_len >> 2) << 8) |
6634 					  TX_BD_FLAGS_SW_FLAGS;
6635 			if (likely(tcp_off == 0))
6636 				vlan_tag_flags &= ~TX_BD_FLAGS_TCP6_OFF0_MSK;
6637 			else {
6638 				tcp_off >>= 3;
6639 				vlan_tag_flags |= ((tcp_off & 0x3) <<
6640 						   TX_BD_FLAGS_TCP6_OFF0_SHL) |
6641 						  ((tcp_off & 0x10) <<
6642 						   TX_BD_FLAGS_TCP6_OFF4_SHL);
6643 				mss |= (tcp_off & 0xc) << TX_BD_TCP6_OFF2_SHL;
6644 			}
6645 		} else {
6646 			iph = ip_hdr(skb);
6647 			if (tcp_opt_len || (iph->ihl > 5)) {
6648 				vlan_tag_flags |= ((iph->ihl - 5) +
6649 						   (tcp_opt_len >> 2)) << 8;
6650 			}
6651 		}
6652 	} else
6653 		mss = 0;
6654 
6655 	mapping = dma_map_single(&bp->pdev->dev, skb->data, len, PCI_DMA_TODEVICE);
6656 	if (dma_mapping_error(&bp->pdev->dev, mapping)) {
6657 		dev_kfree_skb_any(skb);
6658 		return NETDEV_TX_OK;
6659 	}
6660 
6661 	tx_buf = &txr->tx_buf_ring[ring_prod];
6662 	tx_buf->skb = skb;
6663 	dma_unmap_addr_set(tx_buf, mapping, mapping);
6664 
6665 	txbd = &txr->tx_desc_ring[ring_prod];
6666 
6667 	txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
6668 	txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
6669 	txbd->tx_bd_mss_nbytes = len | (mss << 16);
6670 	txbd->tx_bd_vlan_tag_flags = vlan_tag_flags | TX_BD_FLAGS_START;
6671 
6672 	last_frag = skb_shinfo(skb)->nr_frags;
6673 	tx_buf->nr_frags = last_frag;
6674 	tx_buf->is_gso = skb_is_gso(skb);
6675 
6676 	for (i = 0; i < last_frag; i++) {
6677 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
6678 
6679 		prod = BNX2_NEXT_TX_BD(prod);
6680 		ring_prod = BNX2_TX_RING_IDX(prod);
6681 		txbd = &txr->tx_desc_ring[ring_prod];
6682 
6683 		len = skb_frag_size(frag);
6684 		mapping = skb_frag_dma_map(&bp->pdev->dev, frag, 0, len,
6685 					   DMA_TO_DEVICE);
6686 		if (dma_mapping_error(&bp->pdev->dev, mapping))
6687 			goto dma_error;
6688 		dma_unmap_addr_set(&txr->tx_buf_ring[ring_prod], mapping,
6689 				   mapping);
6690 
6691 		txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
6692 		txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
6693 		txbd->tx_bd_mss_nbytes = len | (mss << 16);
6694 		txbd->tx_bd_vlan_tag_flags = vlan_tag_flags;
6695 
6696 	}
6697 	txbd->tx_bd_vlan_tag_flags |= TX_BD_FLAGS_END;
6698 
6699 	/* Sync BD data before updating TX mailbox */
6700 	wmb();
6701 
6702 	netdev_tx_sent_queue(txq, skb->len);
6703 
6704 	prod = BNX2_NEXT_TX_BD(prod);
6705 	txr->tx_prod_bseq += skb->len;
6706 
6707 	BNX2_WR16(bp, txr->tx_bidx_addr, prod);
6708 	BNX2_WR(bp, txr->tx_bseq_addr, txr->tx_prod_bseq);
6709 
6710 	mmiowb();
6711 
6712 	txr->tx_prod = prod;
6713 
6714 	if (unlikely(bnx2_tx_avail(bp, txr) <= MAX_SKB_FRAGS)) {
6715 		netif_tx_stop_queue(txq);
6716 
6717 		/* netif_tx_stop_queue() must be done before checking
6718 		 * tx index in bnx2_tx_avail() below, because in
6719 		 * bnx2_tx_int(), we update tx index before checking for
6720 		 * netif_tx_queue_stopped().
6721 		 */
6722 		smp_mb();
6723 		if (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh)
6724 			netif_tx_wake_queue(txq);
6725 	}
6726 
6727 	return NETDEV_TX_OK;
6728 dma_error:
6729 	/* save value of frag that failed */
6730 	last_frag = i;
6731 
6732 	/* start back at beginning and unmap skb */
6733 	prod = txr->tx_prod;
6734 	ring_prod = BNX2_TX_RING_IDX(prod);
6735 	tx_buf = &txr->tx_buf_ring[ring_prod];
6736 	tx_buf->skb = NULL;
6737 	dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(tx_buf, mapping),
6738 			 skb_headlen(skb), PCI_DMA_TODEVICE);
6739 
6740 	/* unmap remaining mapped pages */
6741 	for (i = 0; i < last_frag; i++) {
6742 		prod = BNX2_NEXT_TX_BD(prod);
6743 		ring_prod = BNX2_TX_RING_IDX(prod);
6744 		tx_buf = &txr->tx_buf_ring[ring_prod];
6745 		dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(tx_buf, mapping),
6746 			       skb_frag_size(&skb_shinfo(skb)->frags[i]),
6747 			       PCI_DMA_TODEVICE);
6748 	}
6749 
6750 	dev_kfree_skb_any(skb);
6751 	return NETDEV_TX_OK;
6752 }
6753 
6754 /* Called with rtnl_lock */
6755 static int
6756 bnx2_close(struct net_device *dev)
6757 {
6758 	struct bnx2 *bp = netdev_priv(dev);
6759 
6760 	bnx2_disable_int_sync(bp);
6761 	bnx2_napi_disable(bp);
6762 	netif_tx_disable(dev);
6763 	del_timer_sync(&bp->timer);
6764 	bnx2_shutdown_chip(bp);
6765 	bnx2_free_irq(bp);
6766 	bnx2_free_skbs(bp);
6767 	bnx2_free_mem(bp);
6768 	bnx2_del_napi(bp);
6769 	bp->link_up = 0;
6770 	netif_carrier_off(bp->dev);
6771 	return 0;
6772 }
6773 
6774 static void
6775 bnx2_save_stats(struct bnx2 *bp)
6776 {
6777 	u32 *hw_stats = (u32 *) bp->stats_blk;
6778 	u32 *temp_stats = (u32 *) bp->temp_stats_blk;
6779 	int i;
6780 
6781 	/* The 1st 10 counters are 64-bit counters */
6782 	for (i = 0; i < 20; i += 2) {
6783 		u32 hi;
6784 		u64 lo;
6785 
6786 		hi = temp_stats[i] + hw_stats[i];
6787 		lo = (u64) temp_stats[i + 1] + (u64) hw_stats[i + 1];
6788 		if (lo > 0xffffffff)
6789 			hi++;
6790 		temp_stats[i] = hi;
6791 		temp_stats[i + 1] = lo & 0xffffffff;
6792 	}
6793 
6794 	for ( ; i < sizeof(struct statistics_block) / 4; i++)
6795 		temp_stats[i] += hw_stats[i];
6796 }
6797 
6798 #define GET_64BIT_NET_STATS64(ctr)		\
6799 	(((u64) (ctr##_hi) << 32) + (u64) (ctr##_lo))
6800 
6801 #define GET_64BIT_NET_STATS(ctr)				\
6802 	GET_64BIT_NET_STATS64(bp->stats_blk->ctr) +		\
6803 	GET_64BIT_NET_STATS64(bp->temp_stats_blk->ctr)
6804 
6805 #define GET_32BIT_NET_STATS(ctr)				\
6806 	(unsigned long) (bp->stats_blk->ctr +			\
6807 			 bp->temp_stats_blk->ctr)
6808 
6809 static struct rtnl_link_stats64 *
6810 bnx2_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *net_stats)
6811 {
6812 	struct bnx2 *bp = netdev_priv(dev);
6813 
6814 	if (bp->stats_blk == NULL)
6815 		return net_stats;
6816 
6817 	net_stats->rx_packets =
6818 		GET_64BIT_NET_STATS(stat_IfHCInUcastPkts) +
6819 		GET_64BIT_NET_STATS(stat_IfHCInMulticastPkts) +
6820 		GET_64BIT_NET_STATS(stat_IfHCInBroadcastPkts);
6821 
6822 	net_stats->tx_packets =
6823 		GET_64BIT_NET_STATS(stat_IfHCOutUcastPkts) +
6824 		GET_64BIT_NET_STATS(stat_IfHCOutMulticastPkts) +
6825 		GET_64BIT_NET_STATS(stat_IfHCOutBroadcastPkts);
6826 
6827 	net_stats->rx_bytes =
6828 		GET_64BIT_NET_STATS(stat_IfHCInOctets);
6829 
6830 	net_stats->tx_bytes =
6831 		GET_64BIT_NET_STATS(stat_IfHCOutOctets);
6832 
6833 	net_stats->multicast =
6834 		GET_64BIT_NET_STATS(stat_IfHCInMulticastPkts);
6835 
6836 	net_stats->collisions =
6837 		GET_32BIT_NET_STATS(stat_EtherStatsCollisions);
6838 
6839 	net_stats->rx_length_errors =
6840 		GET_32BIT_NET_STATS(stat_EtherStatsUndersizePkts) +
6841 		GET_32BIT_NET_STATS(stat_EtherStatsOverrsizePkts);
6842 
6843 	net_stats->rx_over_errors =
6844 		GET_32BIT_NET_STATS(stat_IfInFTQDiscards) +
6845 		GET_32BIT_NET_STATS(stat_IfInMBUFDiscards);
6846 
6847 	net_stats->rx_frame_errors =
6848 		GET_32BIT_NET_STATS(stat_Dot3StatsAlignmentErrors);
6849 
6850 	net_stats->rx_crc_errors =
6851 		GET_32BIT_NET_STATS(stat_Dot3StatsFCSErrors);
6852 
6853 	net_stats->rx_errors = net_stats->rx_length_errors +
6854 		net_stats->rx_over_errors + net_stats->rx_frame_errors +
6855 		net_stats->rx_crc_errors;
6856 
6857 	net_stats->tx_aborted_errors =
6858 		GET_32BIT_NET_STATS(stat_Dot3StatsExcessiveCollisions) +
6859 		GET_32BIT_NET_STATS(stat_Dot3StatsLateCollisions);
6860 
6861 	if ((BNX2_CHIP(bp) == BNX2_CHIP_5706) ||
6862 	    (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_A0))
6863 		net_stats->tx_carrier_errors = 0;
6864 	else {
6865 		net_stats->tx_carrier_errors =
6866 			GET_32BIT_NET_STATS(stat_Dot3StatsCarrierSenseErrors);
6867 	}
6868 
6869 	net_stats->tx_errors =
6870 		GET_32BIT_NET_STATS(stat_emac_tx_stat_dot3statsinternalmactransmiterrors) +
6871 		net_stats->tx_aborted_errors +
6872 		net_stats->tx_carrier_errors;
6873 
6874 	net_stats->rx_missed_errors =
6875 		GET_32BIT_NET_STATS(stat_IfInFTQDiscards) +
6876 		GET_32BIT_NET_STATS(stat_IfInMBUFDiscards) +
6877 		GET_32BIT_NET_STATS(stat_FwRxDrop);
6878 
6879 	return net_stats;
6880 }
6881 
6882 /* All ethtool functions called with rtnl_lock */
6883 
6884 static int
6885 bnx2_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
6886 {
6887 	struct bnx2 *bp = netdev_priv(dev);
6888 	int support_serdes = 0, support_copper = 0;
6889 
6890 	cmd->supported = SUPPORTED_Autoneg;
6891 	if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
6892 		support_serdes = 1;
6893 		support_copper = 1;
6894 	} else if (bp->phy_port == PORT_FIBRE)
6895 		support_serdes = 1;
6896 	else
6897 		support_copper = 1;
6898 
6899 	if (support_serdes) {
6900 		cmd->supported |= SUPPORTED_1000baseT_Full |
6901 			SUPPORTED_FIBRE;
6902 		if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)
6903 			cmd->supported |= SUPPORTED_2500baseX_Full;
6904 
6905 	}
6906 	if (support_copper) {
6907 		cmd->supported |= SUPPORTED_10baseT_Half |
6908 			SUPPORTED_10baseT_Full |
6909 			SUPPORTED_100baseT_Half |
6910 			SUPPORTED_100baseT_Full |
6911 			SUPPORTED_1000baseT_Full |
6912 			SUPPORTED_TP;
6913 
6914 	}
6915 
6916 	spin_lock_bh(&bp->phy_lock);
6917 	cmd->port = bp->phy_port;
6918 	cmd->advertising = bp->advertising;
6919 
6920 	if (bp->autoneg & AUTONEG_SPEED) {
6921 		cmd->autoneg = AUTONEG_ENABLE;
6922 	} else {
6923 		cmd->autoneg = AUTONEG_DISABLE;
6924 	}
6925 
6926 	if (netif_carrier_ok(dev)) {
6927 		ethtool_cmd_speed_set(cmd, bp->line_speed);
6928 		cmd->duplex = bp->duplex;
6929 		if (!(bp->phy_flags & BNX2_PHY_FLAG_SERDES)) {
6930 			if (bp->phy_flags & BNX2_PHY_FLAG_MDIX)
6931 				cmd->eth_tp_mdix = ETH_TP_MDI_X;
6932 			else
6933 				cmd->eth_tp_mdix = ETH_TP_MDI;
6934 		}
6935 	}
6936 	else {
6937 		ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN);
6938 		cmd->duplex = DUPLEX_UNKNOWN;
6939 	}
6940 	spin_unlock_bh(&bp->phy_lock);
6941 
6942 	cmd->transceiver = XCVR_INTERNAL;
6943 	cmd->phy_address = bp->phy_addr;
6944 
6945 	return 0;
6946 }
6947 
6948 static int
6949 bnx2_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
6950 {
6951 	struct bnx2 *bp = netdev_priv(dev);
6952 	u8 autoneg = bp->autoneg;
6953 	u8 req_duplex = bp->req_duplex;
6954 	u16 req_line_speed = bp->req_line_speed;
6955 	u32 advertising = bp->advertising;
6956 	int err = -EINVAL;
6957 
6958 	spin_lock_bh(&bp->phy_lock);
6959 
6960 	if (cmd->port != PORT_TP && cmd->port != PORT_FIBRE)
6961 		goto err_out_unlock;
6962 
6963 	if (cmd->port != bp->phy_port &&
6964 	    !(bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP))
6965 		goto err_out_unlock;
6966 
6967 	/* If device is down, we can store the settings only if the user
6968 	 * is setting the currently active port.
6969 	 */
6970 	if (!netif_running(dev) && cmd->port != bp->phy_port)
6971 		goto err_out_unlock;
6972 
6973 	if (cmd->autoneg == AUTONEG_ENABLE) {
6974 		autoneg |= AUTONEG_SPEED;
6975 
6976 		advertising = cmd->advertising;
6977 		if (cmd->port == PORT_TP) {
6978 			advertising &= ETHTOOL_ALL_COPPER_SPEED;
6979 			if (!advertising)
6980 				advertising = ETHTOOL_ALL_COPPER_SPEED;
6981 		} else {
6982 			advertising &= ETHTOOL_ALL_FIBRE_SPEED;
6983 			if (!advertising)
6984 				advertising = ETHTOOL_ALL_FIBRE_SPEED;
6985 		}
6986 		advertising |= ADVERTISED_Autoneg;
6987 	}
6988 	else {
6989 		u32 speed = ethtool_cmd_speed(cmd);
6990 		if (cmd->port == PORT_FIBRE) {
6991 			if ((speed != SPEED_1000 &&
6992 			     speed != SPEED_2500) ||
6993 			    (cmd->duplex != DUPLEX_FULL))
6994 				goto err_out_unlock;
6995 
6996 			if (speed == SPEED_2500 &&
6997 			    !(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
6998 				goto err_out_unlock;
6999 		} else if (speed == SPEED_1000 || speed == SPEED_2500)
7000 			goto err_out_unlock;
7001 
7002 		autoneg &= ~AUTONEG_SPEED;
7003 		req_line_speed = speed;
7004 		req_duplex = cmd->duplex;
7005 		advertising = 0;
7006 	}
7007 
7008 	bp->autoneg = autoneg;
7009 	bp->advertising = advertising;
7010 	bp->req_line_speed = req_line_speed;
7011 	bp->req_duplex = req_duplex;
7012 
7013 	err = 0;
7014 	/* If device is down, the new settings will be picked up when it is
7015 	 * brought up.
7016 	 */
7017 	if (netif_running(dev))
7018 		err = bnx2_setup_phy(bp, cmd->port);
7019 
7020 err_out_unlock:
7021 	spin_unlock_bh(&bp->phy_lock);
7022 
7023 	return err;
7024 }
7025 
7026 static void
7027 bnx2_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
7028 {
7029 	struct bnx2 *bp = netdev_priv(dev);
7030 
7031 	strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
7032 	strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
7033 	strlcpy(info->bus_info, pci_name(bp->pdev), sizeof(info->bus_info));
7034 	strlcpy(info->fw_version, bp->fw_version, sizeof(info->fw_version));
7035 }
7036 
7037 #define BNX2_REGDUMP_LEN		(32 * 1024)
7038 
7039 static int
7040 bnx2_get_regs_len(struct net_device *dev)
7041 {
7042 	return BNX2_REGDUMP_LEN;
7043 }
7044 
7045 static void
7046 bnx2_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p)
7047 {
7048 	u32 *p = _p, i, offset;
7049 	u8 *orig_p = _p;
7050 	struct bnx2 *bp = netdev_priv(dev);
7051 	static const u32 reg_boundaries[] = {
7052 		0x0000, 0x0098, 0x0400, 0x045c,
7053 		0x0800, 0x0880, 0x0c00, 0x0c10,
7054 		0x0c30, 0x0d08, 0x1000, 0x101c,
7055 		0x1040, 0x1048, 0x1080, 0x10a4,
7056 		0x1400, 0x1490, 0x1498, 0x14f0,
7057 		0x1500, 0x155c, 0x1580, 0x15dc,
7058 		0x1600, 0x1658, 0x1680, 0x16d8,
7059 		0x1800, 0x1820, 0x1840, 0x1854,
7060 		0x1880, 0x1894, 0x1900, 0x1984,
7061 		0x1c00, 0x1c0c, 0x1c40, 0x1c54,
7062 		0x1c80, 0x1c94, 0x1d00, 0x1d84,
7063 		0x2000, 0x2030, 0x23c0, 0x2400,
7064 		0x2800, 0x2820, 0x2830, 0x2850,
7065 		0x2b40, 0x2c10, 0x2fc0, 0x3058,
7066 		0x3c00, 0x3c94, 0x4000, 0x4010,
7067 		0x4080, 0x4090, 0x43c0, 0x4458,
7068 		0x4c00, 0x4c18, 0x4c40, 0x4c54,
7069 		0x4fc0, 0x5010, 0x53c0, 0x5444,
7070 		0x5c00, 0x5c18, 0x5c80, 0x5c90,
7071 		0x5fc0, 0x6000, 0x6400, 0x6428,
7072 		0x6800, 0x6848, 0x684c, 0x6860,
7073 		0x6888, 0x6910, 0x8000
7074 	};
7075 
7076 	regs->version = 0;
7077 
7078 	memset(p, 0, BNX2_REGDUMP_LEN);
7079 
7080 	if (!netif_running(bp->dev))
7081 		return;
7082 
7083 	i = 0;
7084 	offset = reg_boundaries[0];
7085 	p += offset;
7086 	while (offset < BNX2_REGDUMP_LEN) {
7087 		*p++ = BNX2_RD(bp, offset);
7088 		offset += 4;
7089 		if (offset == reg_boundaries[i + 1]) {
7090 			offset = reg_boundaries[i + 2];
7091 			p = (u32 *) (orig_p + offset);
7092 			i += 2;
7093 		}
7094 	}
7095 }
7096 
7097 static void
7098 bnx2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7099 {
7100 	struct bnx2 *bp = netdev_priv(dev);
7101 
7102 	if (bp->flags & BNX2_FLAG_NO_WOL) {
7103 		wol->supported = 0;
7104 		wol->wolopts = 0;
7105 	}
7106 	else {
7107 		wol->supported = WAKE_MAGIC;
7108 		if (bp->wol)
7109 			wol->wolopts = WAKE_MAGIC;
7110 		else
7111 			wol->wolopts = 0;
7112 	}
7113 	memset(&wol->sopass, 0, sizeof(wol->sopass));
7114 }
7115 
7116 static int
7117 bnx2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7118 {
7119 	struct bnx2 *bp = netdev_priv(dev);
7120 
7121 	if (wol->wolopts & ~WAKE_MAGIC)
7122 		return -EINVAL;
7123 
7124 	if (wol->wolopts & WAKE_MAGIC) {
7125 		if (bp->flags & BNX2_FLAG_NO_WOL)
7126 			return -EINVAL;
7127 
7128 		bp->wol = 1;
7129 	}
7130 	else {
7131 		bp->wol = 0;
7132 	}
7133 
7134 	device_set_wakeup_enable(&bp->pdev->dev, bp->wol);
7135 
7136 	return 0;
7137 }
7138 
7139 static int
7140 bnx2_nway_reset(struct net_device *dev)
7141 {
7142 	struct bnx2 *bp = netdev_priv(dev);
7143 	u32 bmcr;
7144 
7145 	if (!netif_running(dev))
7146 		return -EAGAIN;
7147 
7148 	if (!(bp->autoneg & AUTONEG_SPEED)) {
7149 		return -EINVAL;
7150 	}
7151 
7152 	spin_lock_bh(&bp->phy_lock);
7153 
7154 	if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
7155 		int rc;
7156 
7157 		rc = bnx2_setup_remote_phy(bp, bp->phy_port);
7158 		spin_unlock_bh(&bp->phy_lock);
7159 		return rc;
7160 	}
7161 
7162 	/* Force a link down visible on the other side */
7163 	if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
7164 		bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
7165 		spin_unlock_bh(&bp->phy_lock);
7166 
7167 		msleep(20);
7168 
7169 		spin_lock_bh(&bp->phy_lock);
7170 
7171 		bp->current_interval = BNX2_SERDES_AN_TIMEOUT;
7172 		bp->serdes_an_pending = 1;
7173 		mod_timer(&bp->timer, jiffies + bp->current_interval);
7174 	}
7175 
7176 	bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
7177 	bmcr &= ~BMCR_LOOPBACK;
7178 	bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART | BMCR_ANENABLE);
7179 
7180 	spin_unlock_bh(&bp->phy_lock);
7181 
7182 	return 0;
7183 }
7184 
7185 static u32
7186 bnx2_get_link(struct net_device *dev)
7187 {
7188 	struct bnx2 *bp = netdev_priv(dev);
7189 
7190 	return bp->link_up;
7191 }
7192 
7193 static int
7194 bnx2_get_eeprom_len(struct net_device *dev)
7195 {
7196 	struct bnx2 *bp = netdev_priv(dev);
7197 
7198 	if (bp->flash_info == NULL)
7199 		return 0;
7200 
7201 	return (int) bp->flash_size;
7202 }
7203 
7204 static int
7205 bnx2_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
7206 		u8 *eebuf)
7207 {
7208 	struct bnx2 *bp = netdev_priv(dev);
7209 	int rc;
7210 
7211 	/* parameters already validated in ethtool_get_eeprom */
7212 
7213 	rc = bnx2_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
7214 
7215 	return rc;
7216 }
7217 
7218 static int
7219 bnx2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
7220 		u8 *eebuf)
7221 {
7222 	struct bnx2 *bp = netdev_priv(dev);
7223 	int rc;
7224 
7225 	/* parameters already validated in ethtool_set_eeprom */
7226 
7227 	rc = bnx2_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
7228 
7229 	return rc;
7230 }
7231 
7232 static int
7233 bnx2_get_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
7234 {
7235 	struct bnx2 *bp = netdev_priv(dev);
7236 
7237 	memset(coal, 0, sizeof(struct ethtool_coalesce));
7238 
7239 	coal->rx_coalesce_usecs = bp->rx_ticks;
7240 	coal->rx_max_coalesced_frames = bp->rx_quick_cons_trip;
7241 	coal->rx_coalesce_usecs_irq = bp->rx_ticks_int;
7242 	coal->rx_max_coalesced_frames_irq = bp->rx_quick_cons_trip_int;
7243 
7244 	coal->tx_coalesce_usecs = bp->tx_ticks;
7245 	coal->tx_max_coalesced_frames = bp->tx_quick_cons_trip;
7246 	coal->tx_coalesce_usecs_irq = bp->tx_ticks_int;
7247 	coal->tx_max_coalesced_frames_irq = bp->tx_quick_cons_trip_int;
7248 
7249 	coal->stats_block_coalesce_usecs = bp->stats_ticks;
7250 
7251 	return 0;
7252 }
7253 
7254 static int
7255 bnx2_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
7256 {
7257 	struct bnx2 *bp = netdev_priv(dev);
7258 
7259 	bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
7260 	if (bp->rx_ticks > 0x3ff) bp->rx_ticks = 0x3ff;
7261 
7262 	bp->rx_quick_cons_trip = (u16) coal->rx_max_coalesced_frames;
7263 	if (bp->rx_quick_cons_trip > 0xff) bp->rx_quick_cons_trip = 0xff;
7264 
7265 	bp->rx_ticks_int = (u16) coal->rx_coalesce_usecs_irq;
7266 	if (bp->rx_ticks_int > 0x3ff) bp->rx_ticks_int = 0x3ff;
7267 
7268 	bp->rx_quick_cons_trip_int = (u16) coal->rx_max_coalesced_frames_irq;
7269 	if (bp->rx_quick_cons_trip_int > 0xff)
7270 		bp->rx_quick_cons_trip_int = 0xff;
7271 
7272 	bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
7273 	if (bp->tx_ticks > 0x3ff) bp->tx_ticks = 0x3ff;
7274 
7275 	bp->tx_quick_cons_trip = (u16) coal->tx_max_coalesced_frames;
7276 	if (bp->tx_quick_cons_trip > 0xff) bp->tx_quick_cons_trip = 0xff;
7277 
7278 	bp->tx_ticks_int = (u16) coal->tx_coalesce_usecs_irq;
7279 	if (bp->tx_ticks_int > 0x3ff) bp->tx_ticks_int = 0x3ff;
7280 
7281 	bp->tx_quick_cons_trip_int = (u16) coal->tx_max_coalesced_frames_irq;
7282 	if (bp->tx_quick_cons_trip_int > 0xff) bp->tx_quick_cons_trip_int =
7283 		0xff;
7284 
7285 	bp->stats_ticks = coal->stats_block_coalesce_usecs;
7286 	if (bp->flags & BNX2_FLAG_BROKEN_STATS) {
7287 		if (bp->stats_ticks != 0 && bp->stats_ticks != USEC_PER_SEC)
7288 			bp->stats_ticks = USEC_PER_SEC;
7289 	}
7290 	if (bp->stats_ticks > BNX2_HC_STATS_TICKS_HC_STAT_TICKS)
7291 		bp->stats_ticks = BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
7292 	bp->stats_ticks &= BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
7293 
7294 	if (netif_running(bp->dev)) {
7295 		bnx2_netif_stop(bp, true);
7296 		bnx2_init_nic(bp, 0);
7297 		bnx2_netif_start(bp, true);
7298 	}
7299 
7300 	return 0;
7301 }
7302 
7303 static void
7304 bnx2_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
7305 {
7306 	struct bnx2 *bp = netdev_priv(dev);
7307 
7308 	ering->rx_max_pending = BNX2_MAX_TOTAL_RX_DESC_CNT;
7309 	ering->rx_jumbo_max_pending = BNX2_MAX_TOTAL_RX_PG_DESC_CNT;
7310 
7311 	ering->rx_pending = bp->rx_ring_size;
7312 	ering->rx_jumbo_pending = bp->rx_pg_ring_size;
7313 
7314 	ering->tx_max_pending = BNX2_MAX_TX_DESC_CNT;
7315 	ering->tx_pending = bp->tx_ring_size;
7316 }
7317 
7318 static int
7319 bnx2_change_ring_size(struct bnx2 *bp, u32 rx, u32 tx, bool reset_irq)
7320 {
7321 	if (netif_running(bp->dev)) {
7322 		/* Reset will erase chipset stats; save them */
7323 		bnx2_save_stats(bp);
7324 
7325 		bnx2_netif_stop(bp, true);
7326 		bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
7327 		if (reset_irq) {
7328 			bnx2_free_irq(bp);
7329 			bnx2_del_napi(bp);
7330 		} else {
7331 			__bnx2_free_irq(bp);
7332 		}
7333 		bnx2_free_skbs(bp);
7334 		bnx2_free_mem(bp);
7335 	}
7336 
7337 	bnx2_set_rx_ring_size(bp, rx);
7338 	bp->tx_ring_size = tx;
7339 
7340 	if (netif_running(bp->dev)) {
7341 		int rc = 0;
7342 
7343 		if (reset_irq) {
7344 			rc = bnx2_setup_int_mode(bp, disable_msi);
7345 			bnx2_init_napi(bp);
7346 		}
7347 
7348 		if (!rc)
7349 			rc = bnx2_alloc_mem(bp);
7350 
7351 		if (!rc)
7352 			rc = bnx2_request_irq(bp);
7353 
7354 		if (!rc)
7355 			rc = bnx2_init_nic(bp, 0);
7356 
7357 		if (rc) {
7358 			bnx2_napi_enable(bp);
7359 			dev_close(bp->dev);
7360 			return rc;
7361 		}
7362 #ifdef BCM_CNIC
7363 		mutex_lock(&bp->cnic_lock);
7364 		/* Let cnic know about the new status block. */
7365 		if (bp->cnic_eth_dev.drv_state & CNIC_DRV_STATE_REGD)
7366 			bnx2_setup_cnic_irq_info(bp);
7367 		mutex_unlock(&bp->cnic_lock);
7368 #endif
7369 		bnx2_netif_start(bp, true);
7370 	}
7371 	return 0;
7372 }
7373 
7374 static int
7375 bnx2_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
7376 {
7377 	struct bnx2 *bp = netdev_priv(dev);
7378 	int rc;
7379 
7380 	if ((ering->rx_pending > BNX2_MAX_TOTAL_RX_DESC_CNT) ||
7381 		(ering->tx_pending > BNX2_MAX_TX_DESC_CNT) ||
7382 		(ering->tx_pending <= MAX_SKB_FRAGS)) {
7383 
7384 		return -EINVAL;
7385 	}
7386 	rc = bnx2_change_ring_size(bp, ering->rx_pending, ering->tx_pending,
7387 				   false);
7388 	return rc;
7389 }
7390 
7391 static void
7392 bnx2_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
7393 {
7394 	struct bnx2 *bp = netdev_priv(dev);
7395 
7396 	epause->autoneg = ((bp->autoneg & AUTONEG_FLOW_CTRL) != 0);
7397 	epause->rx_pause = ((bp->flow_ctrl & FLOW_CTRL_RX) != 0);
7398 	epause->tx_pause = ((bp->flow_ctrl & FLOW_CTRL_TX) != 0);
7399 }
7400 
7401 static int
7402 bnx2_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
7403 {
7404 	struct bnx2 *bp = netdev_priv(dev);
7405 
7406 	bp->req_flow_ctrl = 0;
7407 	if (epause->rx_pause)
7408 		bp->req_flow_ctrl |= FLOW_CTRL_RX;
7409 	if (epause->tx_pause)
7410 		bp->req_flow_ctrl |= FLOW_CTRL_TX;
7411 
7412 	if (epause->autoneg) {
7413 		bp->autoneg |= AUTONEG_FLOW_CTRL;
7414 	}
7415 	else {
7416 		bp->autoneg &= ~AUTONEG_FLOW_CTRL;
7417 	}
7418 
7419 	if (netif_running(dev)) {
7420 		spin_lock_bh(&bp->phy_lock);
7421 		bnx2_setup_phy(bp, bp->phy_port);
7422 		spin_unlock_bh(&bp->phy_lock);
7423 	}
7424 
7425 	return 0;
7426 }
7427 
7428 static struct {
7429 	char string[ETH_GSTRING_LEN];
7430 } bnx2_stats_str_arr[] = {
7431 	{ "rx_bytes" },
7432 	{ "rx_error_bytes" },
7433 	{ "tx_bytes" },
7434 	{ "tx_error_bytes" },
7435 	{ "rx_ucast_packets" },
7436 	{ "rx_mcast_packets" },
7437 	{ "rx_bcast_packets" },
7438 	{ "tx_ucast_packets" },
7439 	{ "tx_mcast_packets" },
7440 	{ "tx_bcast_packets" },
7441 	{ "tx_mac_errors" },
7442 	{ "tx_carrier_errors" },
7443 	{ "rx_crc_errors" },
7444 	{ "rx_align_errors" },
7445 	{ "tx_single_collisions" },
7446 	{ "tx_multi_collisions" },
7447 	{ "tx_deferred" },
7448 	{ "tx_excess_collisions" },
7449 	{ "tx_late_collisions" },
7450 	{ "tx_total_collisions" },
7451 	{ "rx_fragments" },
7452 	{ "rx_jabbers" },
7453 	{ "rx_undersize_packets" },
7454 	{ "rx_oversize_packets" },
7455 	{ "rx_64_byte_packets" },
7456 	{ "rx_65_to_127_byte_packets" },
7457 	{ "rx_128_to_255_byte_packets" },
7458 	{ "rx_256_to_511_byte_packets" },
7459 	{ "rx_512_to_1023_byte_packets" },
7460 	{ "rx_1024_to_1522_byte_packets" },
7461 	{ "rx_1523_to_9022_byte_packets" },
7462 	{ "tx_64_byte_packets" },
7463 	{ "tx_65_to_127_byte_packets" },
7464 	{ "tx_128_to_255_byte_packets" },
7465 	{ "tx_256_to_511_byte_packets" },
7466 	{ "tx_512_to_1023_byte_packets" },
7467 	{ "tx_1024_to_1522_byte_packets" },
7468 	{ "tx_1523_to_9022_byte_packets" },
7469 	{ "rx_xon_frames" },
7470 	{ "rx_xoff_frames" },
7471 	{ "tx_xon_frames" },
7472 	{ "tx_xoff_frames" },
7473 	{ "rx_mac_ctrl_frames" },
7474 	{ "rx_filtered_packets" },
7475 	{ "rx_ftq_discards" },
7476 	{ "rx_discards" },
7477 	{ "rx_fw_discards" },
7478 };
7479 
7480 #define BNX2_NUM_STATS ARRAY_SIZE(bnx2_stats_str_arr)
7481 
7482 #define STATS_OFFSET32(offset_name) (offsetof(struct statistics_block, offset_name) / 4)
7483 
7484 static const unsigned long bnx2_stats_offset_arr[BNX2_NUM_STATS] = {
7485     STATS_OFFSET32(stat_IfHCInOctets_hi),
7486     STATS_OFFSET32(stat_IfHCInBadOctets_hi),
7487     STATS_OFFSET32(stat_IfHCOutOctets_hi),
7488     STATS_OFFSET32(stat_IfHCOutBadOctets_hi),
7489     STATS_OFFSET32(stat_IfHCInUcastPkts_hi),
7490     STATS_OFFSET32(stat_IfHCInMulticastPkts_hi),
7491     STATS_OFFSET32(stat_IfHCInBroadcastPkts_hi),
7492     STATS_OFFSET32(stat_IfHCOutUcastPkts_hi),
7493     STATS_OFFSET32(stat_IfHCOutMulticastPkts_hi),
7494     STATS_OFFSET32(stat_IfHCOutBroadcastPkts_hi),
7495     STATS_OFFSET32(stat_emac_tx_stat_dot3statsinternalmactransmiterrors),
7496     STATS_OFFSET32(stat_Dot3StatsCarrierSenseErrors),
7497     STATS_OFFSET32(stat_Dot3StatsFCSErrors),
7498     STATS_OFFSET32(stat_Dot3StatsAlignmentErrors),
7499     STATS_OFFSET32(stat_Dot3StatsSingleCollisionFrames),
7500     STATS_OFFSET32(stat_Dot3StatsMultipleCollisionFrames),
7501     STATS_OFFSET32(stat_Dot3StatsDeferredTransmissions),
7502     STATS_OFFSET32(stat_Dot3StatsExcessiveCollisions),
7503     STATS_OFFSET32(stat_Dot3StatsLateCollisions),
7504     STATS_OFFSET32(stat_EtherStatsCollisions),
7505     STATS_OFFSET32(stat_EtherStatsFragments),
7506     STATS_OFFSET32(stat_EtherStatsJabbers),
7507     STATS_OFFSET32(stat_EtherStatsUndersizePkts),
7508     STATS_OFFSET32(stat_EtherStatsOverrsizePkts),
7509     STATS_OFFSET32(stat_EtherStatsPktsRx64Octets),
7510     STATS_OFFSET32(stat_EtherStatsPktsRx65Octetsto127Octets),
7511     STATS_OFFSET32(stat_EtherStatsPktsRx128Octetsto255Octets),
7512     STATS_OFFSET32(stat_EtherStatsPktsRx256Octetsto511Octets),
7513     STATS_OFFSET32(stat_EtherStatsPktsRx512Octetsto1023Octets),
7514     STATS_OFFSET32(stat_EtherStatsPktsRx1024Octetsto1522Octets),
7515     STATS_OFFSET32(stat_EtherStatsPktsRx1523Octetsto9022Octets),
7516     STATS_OFFSET32(stat_EtherStatsPktsTx64Octets),
7517     STATS_OFFSET32(stat_EtherStatsPktsTx65Octetsto127Octets),
7518     STATS_OFFSET32(stat_EtherStatsPktsTx128Octetsto255Octets),
7519     STATS_OFFSET32(stat_EtherStatsPktsTx256Octetsto511Octets),
7520     STATS_OFFSET32(stat_EtherStatsPktsTx512Octetsto1023Octets),
7521     STATS_OFFSET32(stat_EtherStatsPktsTx1024Octetsto1522Octets),
7522     STATS_OFFSET32(stat_EtherStatsPktsTx1523Octetsto9022Octets),
7523     STATS_OFFSET32(stat_XonPauseFramesReceived),
7524     STATS_OFFSET32(stat_XoffPauseFramesReceived),
7525     STATS_OFFSET32(stat_OutXonSent),
7526     STATS_OFFSET32(stat_OutXoffSent),
7527     STATS_OFFSET32(stat_MacControlFramesReceived),
7528     STATS_OFFSET32(stat_IfInFramesL2FilterDiscards),
7529     STATS_OFFSET32(stat_IfInFTQDiscards),
7530     STATS_OFFSET32(stat_IfInMBUFDiscards),
7531     STATS_OFFSET32(stat_FwRxDrop),
7532 };
7533 
7534 /* stat_IfHCInBadOctets and stat_Dot3StatsCarrierSenseErrors are
7535  * skipped because of errata.
7536  */
7537 static u8 bnx2_5706_stats_len_arr[BNX2_NUM_STATS] = {
7538 	8,0,8,8,8,8,8,8,8,8,
7539 	4,0,4,4,4,4,4,4,4,4,
7540 	4,4,4,4,4,4,4,4,4,4,
7541 	4,4,4,4,4,4,4,4,4,4,
7542 	4,4,4,4,4,4,4,
7543 };
7544 
7545 static u8 bnx2_5708_stats_len_arr[BNX2_NUM_STATS] = {
7546 	8,0,8,8,8,8,8,8,8,8,
7547 	4,4,4,4,4,4,4,4,4,4,
7548 	4,4,4,4,4,4,4,4,4,4,
7549 	4,4,4,4,4,4,4,4,4,4,
7550 	4,4,4,4,4,4,4,
7551 };
7552 
7553 #define BNX2_NUM_TESTS 6
7554 
7555 static struct {
7556 	char string[ETH_GSTRING_LEN];
7557 } bnx2_tests_str_arr[BNX2_NUM_TESTS] = {
7558 	{ "register_test (offline)" },
7559 	{ "memory_test (offline)" },
7560 	{ "loopback_test (offline)" },
7561 	{ "nvram_test (online)" },
7562 	{ "interrupt_test (online)" },
7563 	{ "link_test (online)" },
7564 };
7565 
7566 static int
7567 bnx2_get_sset_count(struct net_device *dev, int sset)
7568 {
7569 	switch (sset) {
7570 	case ETH_SS_TEST:
7571 		return BNX2_NUM_TESTS;
7572 	case ETH_SS_STATS:
7573 		return BNX2_NUM_STATS;
7574 	default:
7575 		return -EOPNOTSUPP;
7576 	}
7577 }
7578 
7579 static void
7580 bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf)
7581 {
7582 	struct bnx2 *bp = netdev_priv(dev);
7583 
7584 	memset(buf, 0, sizeof(u64) * BNX2_NUM_TESTS);
7585 	if (etest->flags & ETH_TEST_FL_OFFLINE) {
7586 		int i;
7587 
7588 		bnx2_netif_stop(bp, true);
7589 		bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_DIAG);
7590 		bnx2_free_skbs(bp);
7591 
7592 		if (bnx2_test_registers(bp) != 0) {
7593 			buf[0] = 1;
7594 			etest->flags |= ETH_TEST_FL_FAILED;
7595 		}
7596 		if (bnx2_test_memory(bp) != 0) {
7597 			buf[1] = 1;
7598 			etest->flags |= ETH_TEST_FL_FAILED;
7599 		}
7600 		if ((buf[2] = bnx2_test_loopback(bp)) != 0)
7601 			etest->flags |= ETH_TEST_FL_FAILED;
7602 
7603 		if (!netif_running(bp->dev))
7604 			bnx2_shutdown_chip(bp);
7605 		else {
7606 			bnx2_init_nic(bp, 1);
7607 			bnx2_netif_start(bp, true);
7608 		}
7609 
7610 		/* wait for link up */
7611 		for (i = 0; i < 7; i++) {
7612 			if (bp->link_up)
7613 				break;
7614 			msleep_interruptible(1000);
7615 		}
7616 	}
7617 
7618 	if (bnx2_test_nvram(bp) != 0) {
7619 		buf[3] = 1;
7620 		etest->flags |= ETH_TEST_FL_FAILED;
7621 	}
7622 	if (bnx2_test_intr(bp) != 0) {
7623 		buf[4] = 1;
7624 		etest->flags |= ETH_TEST_FL_FAILED;
7625 	}
7626 
7627 	if (bnx2_test_link(bp) != 0) {
7628 		buf[5] = 1;
7629 		etest->flags |= ETH_TEST_FL_FAILED;
7630 
7631 	}
7632 }
7633 
7634 static void
7635 bnx2_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
7636 {
7637 	switch (stringset) {
7638 	case ETH_SS_STATS:
7639 		memcpy(buf, bnx2_stats_str_arr,
7640 			sizeof(bnx2_stats_str_arr));
7641 		break;
7642 	case ETH_SS_TEST:
7643 		memcpy(buf, bnx2_tests_str_arr,
7644 			sizeof(bnx2_tests_str_arr));
7645 		break;
7646 	}
7647 }
7648 
7649 static void
7650 bnx2_get_ethtool_stats(struct net_device *dev,
7651 		struct ethtool_stats *stats, u64 *buf)
7652 {
7653 	struct bnx2 *bp = netdev_priv(dev);
7654 	int i;
7655 	u32 *hw_stats = (u32 *) bp->stats_blk;
7656 	u32 *temp_stats = (u32 *) bp->temp_stats_blk;
7657 	u8 *stats_len_arr = NULL;
7658 
7659 	if (hw_stats == NULL) {
7660 		memset(buf, 0, sizeof(u64) * BNX2_NUM_STATS);
7661 		return;
7662 	}
7663 
7664 	if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) ||
7665 	    (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A1) ||
7666 	    (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A2) ||
7667 	    (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_A0))
7668 		stats_len_arr = bnx2_5706_stats_len_arr;
7669 	else
7670 		stats_len_arr = bnx2_5708_stats_len_arr;
7671 
7672 	for (i = 0; i < BNX2_NUM_STATS; i++) {
7673 		unsigned long offset;
7674 
7675 		if (stats_len_arr[i] == 0) {
7676 			/* skip this counter */
7677 			buf[i] = 0;
7678 			continue;
7679 		}
7680 
7681 		offset = bnx2_stats_offset_arr[i];
7682 		if (stats_len_arr[i] == 4) {
7683 			/* 4-byte counter */
7684 			buf[i] = (u64) *(hw_stats + offset) +
7685 				 *(temp_stats + offset);
7686 			continue;
7687 		}
7688 		/* 8-byte counter */
7689 		buf[i] = (((u64) *(hw_stats + offset)) << 32) +
7690 			 *(hw_stats + offset + 1) +
7691 			 (((u64) *(temp_stats + offset)) << 32) +
7692 			 *(temp_stats + offset + 1);
7693 	}
7694 }
7695 
7696 static int
7697 bnx2_set_phys_id(struct net_device *dev, enum ethtool_phys_id_state state)
7698 {
7699 	struct bnx2 *bp = netdev_priv(dev);
7700 
7701 	switch (state) {
7702 	case ETHTOOL_ID_ACTIVE:
7703 		bp->leds_save = BNX2_RD(bp, BNX2_MISC_CFG);
7704 		BNX2_WR(bp, BNX2_MISC_CFG, BNX2_MISC_CFG_LEDMODE_MAC);
7705 		return 1;	/* cycle on/off once per second */
7706 
7707 	case ETHTOOL_ID_ON:
7708 		BNX2_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE |
7709 			BNX2_EMAC_LED_1000MB_OVERRIDE |
7710 			BNX2_EMAC_LED_100MB_OVERRIDE |
7711 			BNX2_EMAC_LED_10MB_OVERRIDE |
7712 			BNX2_EMAC_LED_TRAFFIC_OVERRIDE |
7713 			BNX2_EMAC_LED_TRAFFIC);
7714 		break;
7715 
7716 	case ETHTOOL_ID_OFF:
7717 		BNX2_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE);
7718 		break;
7719 
7720 	case ETHTOOL_ID_INACTIVE:
7721 		BNX2_WR(bp, BNX2_EMAC_LED, 0);
7722 		BNX2_WR(bp, BNX2_MISC_CFG, bp->leds_save);
7723 		break;
7724 	}
7725 
7726 	return 0;
7727 }
7728 
7729 static int
7730 bnx2_set_features(struct net_device *dev, netdev_features_t features)
7731 {
7732 	struct bnx2 *bp = netdev_priv(dev);
7733 
7734 	/* TSO with VLAN tag won't work with current firmware */
7735 	if (features & NETIF_F_HW_VLAN_CTAG_TX)
7736 		dev->vlan_features |= (dev->hw_features & NETIF_F_ALL_TSO);
7737 	else
7738 		dev->vlan_features &= ~NETIF_F_ALL_TSO;
7739 
7740 	if ((!!(features & NETIF_F_HW_VLAN_CTAG_RX) !=
7741 	    !!(bp->rx_mode & BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG)) &&
7742 	    netif_running(dev)) {
7743 		bnx2_netif_stop(bp, false);
7744 		dev->features = features;
7745 		bnx2_set_rx_mode(dev);
7746 		bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_KEEP_VLAN_UPDATE, 0, 1);
7747 		bnx2_netif_start(bp, false);
7748 		return 1;
7749 	}
7750 
7751 	return 0;
7752 }
7753 
7754 static void bnx2_get_channels(struct net_device *dev,
7755 			      struct ethtool_channels *channels)
7756 {
7757 	struct bnx2 *bp = netdev_priv(dev);
7758 	u32 max_rx_rings = 1;
7759 	u32 max_tx_rings = 1;
7760 
7761 	if ((bp->flags & BNX2_FLAG_MSIX_CAP) && !disable_msi) {
7762 		max_rx_rings = RX_MAX_RINGS;
7763 		max_tx_rings = TX_MAX_RINGS;
7764 	}
7765 
7766 	channels->max_rx = max_rx_rings;
7767 	channels->max_tx = max_tx_rings;
7768 	channels->max_other = 0;
7769 	channels->max_combined = 0;
7770 	channels->rx_count = bp->num_rx_rings;
7771 	channels->tx_count = bp->num_tx_rings;
7772 	channels->other_count = 0;
7773 	channels->combined_count = 0;
7774 }
7775 
7776 static int bnx2_set_channels(struct net_device *dev,
7777 			      struct ethtool_channels *channels)
7778 {
7779 	struct bnx2 *bp = netdev_priv(dev);
7780 	u32 max_rx_rings = 1;
7781 	u32 max_tx_rings = 1;
7782 	int rc = 0;
7783 
7784 	if ((bp->flags & BNX2_FLAG_MSIX_CAP) && !disable_msi) {
7785 		max_rx_rings = RX_MAX_RINGS;
7786 		max_tx_rings = TX_MAX_RINGS;
7787 	}
7788 	if (channels->rx_count > max_rx_rings ||
7789 	    channels->tx_count > max_tx_rings)
7790 		return -EINVAL;
7791 
7792 	bp->num_req_rx_rings = channels->rx_count;
7793 	bp->num_req_tx_rings = channels->tx_count;
7794 
7795 	if (netif_running(dev))
7796 		rc = bnx2_change_ring_size(bp, bp->rx_ring_size,
7797 					   bp->tx_ring_size, true);
7798 
7799 	return rc;
7800 }
7801 
7802 static const struct ethtool_ops bnx2_ethtool_ops = {
7803 	.get_settings		= bnx2_get_settings,
7804 	.set_settings		= bnx2_set_settings,
7805 	.get_drvinfo		= bnx2_get_drvinfo,
7806 	.get_regs_len		= bnx2_get_regs_len,
7807 	.get_regs		= bnx2_get_regs,
7808 	.get_wol		= bnx2_get_wol,
7809 	.set_wol		= bnx2_set_wol,
7810 	.nway_reset		= bnx2_nway_reset,
7811 	.get_link		= bnx2_get_link,
7812 	.get_eeprom_len		= bnx2_get_eeprom_len,
7813 	.get_eeprom		= bnx2_get_eeprom,
7814 	.set_eeprom		= bnx2_set_eeprom,
7815 	.get_coalesce		= bnx2_get_coalesce,
7816 	.set_coalesce		= bnx2_set_coalesce,
7817 	.get_ringparam		= bnx2_get_ringparam,
7818 	.set_ringparam		= bnx2_set_ringparam,
7819 	.get_pauseparam		= bnx2_get_pauseparam,
7820 	.set_pauseparam		= bnx2_set_pauseparam,
7821 	.self_test		= bnx2_self_test,
7822 	.get_strings		= bnx2_get_strings,
7823 	.set_phys_id		= bnx2_set_phys_id,
7824 	.get_ethtool_stats	= bnx2_get_ethtool_stats,
7825 	.get_sset_count		= bnx2_get_sset_count,
7826 	.get_channels		= bnx2_get_channels,
7827 	.set_channels		= bnx2_set_channels,
7828 };
7829 
7830 /* Called with rtnl_lock */
7831 static int
7832 bnx2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
7833 {
7834 	struct mii_ioctl_data *data = if_mii(ifr);
7835 	struct bnx2 *bp = netdev_priv(dev);
7836 	int err;
7837 
7838 	switch(cmd) {
7839 	case SIOCGMIIPHY:
7840 		data->phy_id = bp->phy_addr;
7841 
7842 		/* fallthru */
7843 	case SIOCGMIIREG: {
7844 		u32 mii_regval;
7845 
7846 		if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
7847 			return -EOPNOTSUPP;
7848 
7849 		if (!netif_running(dev))
7850 			return -EAGAIN;
7851 
7852 		spin_lock_bh(&bp->phy_lock);
7853 		err = bnx2_read_phy(bp, data->reg_num & 0x1f, &mii_regval);
7854 		spin_unlock_bh(&bp->phy_lock);
7855 
7856 		data->val_out = mii_regval;
7857 
7858 		return err;
7859 	}
7860 
7861 	case SIOCSMIIREG:
7862 		if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
7863 			return -EOPNOTSUPP;
7864 
7865 		if (!netif_running(dev))
7866 			return -EAGAIN;
7867 
7868 		spin_lock_bh(&bp->phy_lock);
7869 		err = bnx2_write_phy(bp, data->reg_num & 0x1f, data->val_in);
7870 		spin_unlock_bh(&bp->phy_lock);
7871 
7872 		return err;
7873 
7874 	default:
7875 		/* do nothing */
7876 		break;
7877 	}
7878 	return -EOPNOTSUPP;
7879 }
7880 
7881 /* Called with rtnl_lock */
7882 static int
7883 bnx2_change_mac_addr(struct net_device *dev, void *p)
7884 {
7885 	struct sockaddr *addr = p;
7886 	struct bnx2 *bp = netdev_priv(dev);
7887 
7888 	if (!is_valid_ether_addr(addr->sa_data))
7889 		return -EADDRNOTAVAIL;
7890 
7891 	memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
7892 	if (netif_running(dev))
7893 		bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
7894 
7895 	return 0;
7896 }
7897 
7898 /* Called with rtnl_lock */
7899 static int
7900 bnx2_change_mtu(struct net_device *dev, int new_mtu)
7901 {
7902 	struct bnx2 *bp = netdev_priv(dev);
7903 
7904 	dev->mtu = new_mtu;
7905 	return bnx2_change_ring_size(bp, bp->rx_ring_size, bp->tx_ring_size,
7906 				     false);
7907 }
7908 
7909 #ifdef CONFIG_NET_POLL_CONTROLLER
7910 static void
7911 poll_bnx2(struct net_device *dev)
7912 {
7913 	struct bnx2 *bp = netdev_priv(dev);
7914 	int i;
7915 
7916 	for (i = 0; i < bp->irq_nvecs; i++) {
7917 		struct bnx2_irq *irq = &bp->irq_tbl[i];
7918 
7919 		disable_irq(irq->vector);
7920 		irq->handler(irq->vector, &bp->bnx2_napi[i]);
7921 		enable_irq(irq->vector);
7922 	}
7923 }
7924 #endif
7925 
7926 static void
7927 bnx2_get_5709_media(struct bnx2 *bp)
7928 {
7929 	u32 val = BNX2_RD(bp, BNX2_MISC_DUAL_MEDIA_CTRL);
7930 	u32 bond_id = val & BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID;
7931 	u32 strap;
7932 
7933 	if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_C)
7934 		return;
7935 	else if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_S) {
7936 		bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7937 		return;
7938 	}
7939 
7940 	if (val & BNX2_MISC_DUAL_MEDIA_CTRL_STRAP_OVERRIDE)
7941 		strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL) >> 21;
7942 	else
7943 		strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL_STRAP) >> 8;
7944 
7945 	if (bp->func == 0) {
7946 		switch (strap) {
7947 		case 0x4:
7948 		case 0x5:
7949 		case 0x6:
7950 			bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7951 			return;
7952 		}
7953 	} else {
7954 		switch (strap) {
7955 		case 0x1:
7956 		case 0x2:
7957 		case 0x4:
7958 			bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7959 			return;
7960 		}
7961 	}
7962 }
7963 
7964 static void
7965 bnx2_get_pci_speed(struct bnx2 *bp)
7966 {
7967 	u32 reg;
7968 
7969 	reg = BNX2_RD(bp, BNX2_PCICFG_MISC_STATUS);
7970 	if (reg & BNX2_PCICFG_MISC_STATUS_PCIX_DET) {
7971 		u32 clkreg;
7972 
7973 		bp->flags |= BNX2_FLAG_PCIX;
7974 
7975 		clkreg = BNX2_RD(bp, BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS);
7976 
7977 		clkreg &= BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET;
7978 		switch (clkreg) {
7979 		case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ:
7980 			bp->bus_speed_mhz = 133;
7981 			break;
7982 
7983 		case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ:
7984 			bp->bus_speed_mhz = 100;
7985 			break;
7986 
7987 		case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ:
7988 		case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ:
7989 			bp->bus_speed_mhz = 66;
7990 			break;
7991 
7992 		case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ:
7993 		case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ:
7994 			bp->bus_speed_mhz = 50;
7995 			break;
7996 
7997 		case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW:
7998 		case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ:
7999 		case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ:
8000 			bp->bus_speed_mhz = 33;
8001 			break;
8002 		}
8003 	}
8004 	else {
8005 		if (reg & BNX2_PCICFG_MISC_STATUS_M66EN)
8006 			bp->bus_speed_mhz = 66;
8007 		else
8008 			bp->bus_speed_mhz = 33;
8009 	}
8010 
8011 	if (reg & BNX2_PCICFG_MISC_STATUS_32BIT_DET)
8012 		bp->flags |= BNX2_FLAG_PCI_32BIT;
8013 
8014 }
8015 
8016 static void
8017 bnx2_read_vpd_fw_ver(struct bnx2 *bp)
8018 {
8019 	int rc, i, j;
8020 	u8 *data;
8021 	unsigned int block_end, rosize, len;
8022 
8023 #define BNX2_VPD_NVRAM_OFFSET	0x300
8024 #define BNX2_VPD_LEN		128
8025 #define BNX2_MAX_VER_SLEN	30
8026 
8027 	data = kmalloc(256, GFP_KERNEL);
8028 	if (!data)
8029 		return;
8030 
8031 	rc = bnx2_nvram_read(bp, BNX2_VPD_NVRAM_OFFSET, data + BNX2_VPD_LEN,
8032 			     BNX2_VPD_LEN);
8033 	if (rc)
8034 		goto vpd_done;
8035 
8036 	for (i = 0; i < BNX2_VPD_LEN; i += 4) {
8037 		data[i] = data[i + BNX2_VPD_LEN + 3];
8038 		data[i + 1] = data[i + BNX2_VPD_LEN + 2];
8039 		data[i + 2] = data[i + BNX2_VPD_LEN + 1];
8040 		data[i + 3] = data[i + BNX2_VPD_LEN];
8041 	}
8042 
8043 	i = pci_vpd_find_tag(data, 0, BNX2_VPD_LEN, PCI_VPD_LRDT_RO_DATA);
8044 	if (i < 0)
8045 		goto vpd_done;
8046 
8047 	rosize = pci_vpd_lrdt_size(&data[i]);
8048 	i += PCI_VPD_LRDT_TAG_SIZE;
8049 	block_end = i + rosize;
8050 
8051 	if (block_end > BNX2_VPD_LEN)
8052 		goto vpd_done;
8053 
8054 	j = pci_vpd_find_info_keyword(data, i, rosize,
8055 				      PCI_VPD_RO_KEYWORD_MFR_ID);
8056 	if (j < 0)
8057 		goto vpd_done;
8058 
8059 	len = pci_vpd_info_field_size(&data[j]);
8060 
8061 	j += PCI_VPD_INFO_FLD_HDR_SIZE;
8062 	if (j + len > block_end || len != 4 ||
8063 	    memcmp(&data[j], "1028", 4))
8064 		goto vpd_done;
8065 
8066 	j = pci_vpd_find_info_keyword(data, i, rosize,
8067 				      PCI_VPD_RO_KEYWORD_VENDOR0);
8068 	if (j < 0)
8069 		goto vpd_done;
8070 
8071 	len = pci_vpd_info_field_size(&data[j]);
8072 
8073 	j += PCI_VPD_INFO_FLD_HDR_SIZE;
8074 	if (j + len > block_end || len > BNX2_MAX_VER_SLEN)
8075 		goto vpd_done;
8076 
8077 	memcpy(bp->fw_version, &data[j], len);
8078 	bp->fw_version[len] = ' ';
8079 
8080 vpd_done:
8081 	kfree(data);
8082 }
8083 
8084 static int
8085 bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
8086 {
8087 	struct bnx2 *bp;
8088 	int rc, i, j;
8089 	u32 reg;
8090 	u64 dma_mask, persist_dma_mask;
8091 	int err;
8092 
8093 	SET_NETDEV_DEV(dev, &pdev->dev);
8094 	bp = netdev_priv(dev);
8095 
8096 	bp->flags = 0;
8097 	bp->phy_flags = 0;
8098 
8099 	bp->temp_stats_blk =
8100 		kzalloc(sizeof(struct statistics_block), GFP_KERNEL);
8101 
8102 	if (bp->temp_stats_blk == NULL) {
8103 		rc = -ENOMEM;
8104 		goto err_out;
8105 	}
8106 
8107 	/* enable device (incl. PCI PM wakeup), and bus-mastering */
8108 	rc = pci_enable_device(pdev);
8109 	if (rc) {
8110 		dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
8111 		goto err_out;
8112 	}
8113 
8114 	if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
8115 		dev_err(&pdev->dev,
8116 			"Cannot find PCI device base address, aborting\n");
8117 		rc = -ENODEV;
8118 		goto err_out_disable;
8119 	}
8120 
8121 	rc = pci_request_regions(pdev, DRV_MODULE_NAME);
8122 	if (rc) {
8123 		dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
8124 		goto err_out_disable;
8125 	}
8126 
8127 	pci_set_master(pdev);
8128 
8129 	bp->pm_cap = pdev->pm_cap;
8130 	if (bp->pm_cap == 0) {
8131 		dev_err(&pdev->dev,
8132 			"Cannot find power management capability, aborting\n");
8133 		rc = -EIO;
8134 		goto err_out_release;
8135 	}
8136 
8137 	bp->dev = dev;
8138 	bp->pdev = pdev;
8139 
8140 	spin_lock_init(&bp->phy_lock);
8141 	spin_lock_init(&bp->indirect_lock);
8142 #ifdef BCM_CNIC
8143 	mutex_init(&bp->cnic_lock);
8144 #endif
8145 	INIT_WORK(&bp->reset_task, bnx2_reset_task);
8146 
8147 	bp->regview = pci_iomap(pdev, 0, MB_GET_CID_ADDR(TX_TSS_CID +
8148 							 TX_MAX_TSS_RINGS + 1));
8149 	if (!bp->regview) {
8150 		dev_err(&pdev->dev, "Cannot map register space, aborting\n");
8151 		rc = -ENOMEM;
8152 		goto err_out_release;
8153 	}
8154 
8155 	/* Configure byte swap and enable write to the reg_window registers.
8156 	 * Rely on CPU to do target byte swapping on big endian systems
8157 	 * The chip's target access swapping will not swap all accesses
8158 	 */
8159 	BNX2_WR(bp, BNX2_PCICFG_MISC_CONFIG,
8160 		BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
8161 		BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP);
8162 
8163 	bp->chip_id = BNX2_RD(bp, BNX2_MISC_ID);
8164 
8165 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
8166 		if (!pci_is_pcie(pdev)) {
8167 			dev_err(&pdev->dev, "Not PCIE, aborting\n");
8168 			rc = -EIO;
8169 			goto err_out_unmap;
8170 		}
8171 		bp->flags |= BNX2_FLAG_PCIE;
8172 		if (BNX2_CHIP_REV(bp) == BNX2_CHIP_REV_Ax)
8173 			bp->flags |= BNX2_FLAG_JUMBO_BROKEN;
8174 
8175 		/* AER (Advanced Error Reporting) hooks */
8176 		err = pci_enable_pcie_error_reporting(pdev);
8177 		if (!err)
8178 			bp->flags |= BNX2_FLAG_AER_ENABLED;
8179 
8180 	} else {
8181 		bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX);
8182 		if (bp->pcix_cap == 0) {
8183 			dev_err(&pdev->dev,
8184 				"Cannot find PCIX capability, aborting\n");
8185 			rc = -EIO;
8186 			goto err_out_unmap;
8187 		}
8188 		bp->flags |= BNX2_FLAG_BROKEN_STATS;
8189 	}
8190 
8191 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709 &&
8192 	    BNX2_CHIP_REV(bp) != BNX2_CHIP_REV_Ax) {
8193 		if (pdev->msix_cap)
8194 			bp->flags |= BNX2_FLAG_MSIX_CAP;
8195 	}
8196 
8197 	if (BNX2_CHIP_ID(bp) != BNX2_CHIP_ID_5706_A0 &&
8198 	    BNX2_CHIP_ID(bp) != BNX2_CHIP_ID_5706_A1) {
8199 		if (pdev->msi_cap)
8200 			bp->flags |= BNX2_FLAG_MSI_CAP;
8201 	}
8202 
8203 	/* 5708 cannot support DMA addresses > 40-bit.  */
8204 	if (BNX2_CHIP(bp) == BNX2_CHIP_5708)
8205 		persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
8206 	else
8207 		persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
8208 
8209 	/* Configure DMA attributes. */
8210 	if (pci_set_dma_mask(pdev, dma_mask) == 0) {
8211 		dev->features |= NETIF_F_HIGHDMA;
8212 		rc = pci_set_consistent_dma_mask(pdev, persist_dma_mask);
8213 		if (rc) {
8214 			dev_err(&pdev->dev,
8215 				"pci_set_consistent_dma_mask failed, aborting\n");
8216 			goto err_out_unmap;
8217 		}
8218 	} else if ((rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) != 0) {
8219 		dev_err(&pdev->dev, "System does not support DMA, aborting\n");
8220 		goto err_out_unmap;
8221 	}
8222 
8223 	if (!(bp->flags & BNX2_FLAG_PCIE))
8224 		bnx2_get_pci_speed(bp);
8225 
8226 	/* 5706A0 may falsely detect SERR and PERR. */
8227 	if (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) {
8228 		reg = BNX2_RD(bp, PCI_COMMAND);
8229 		reg &= ~(PCI_COMMAND_SERR | PCI_COMMAND_PARITY);
8230 		BNX2_WR(bp, PCI_COMMAND, reg);
8231 	} else if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A1) &&
8232 		!(bp->flags & BNX2_FLAG_PCIX)) {
8233 
8234 		dev_err(&pdev->dev,
8235 			"5706 A1 can only be used in a PCIX bus, aborting\n");
8236 		goto err_out_unmap;
8237 	}
8238 
8239 	bnx2_init_nvram(bp);
8240 
8241 	reg = bnx2_reg_rd_ind(bp, BNX2_SHM_HDR_SIGNATURE);
8242 
8243 	if (bnx2_reg_rd_ind(bp, BNX2_MCP_TOE_ID) & BNX2_MCP_TOE_ID_FUNCTION_ID)
8244 		bp->func = 1;
8245 
8246 	if ((reg & BNX2_SHM_HDR_SIGNATURE_SIG_MASK) ==
8247 	    BNX2_SHM_HDR_SIGNATURE_SIG) {
8248 		u32 off = bp->func << 2;
8249 
8250 		bp->shmem_base = bnx2_reg_rd_ind(bp, BNX2_SHM_HDR_ADDR_0 + off);
8251 	} else
8252 		bp->shmem_base = HOST_VIEW_SHMEM_BASE;
8253 
8254 	/* Get the permanent MAC address.  First we need to make sure the
8255 	 * firmware is actually running.
8256 	 */
8257 	reg = bnx2_shmem_rd(bp, BNX2_DEV_INFO_SIGNATURE);
8258 
8259 	if ((reg & BNX2_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
8260 	    BNX2_DEV_INFO_SIGNATURE_MAGIC) {
8261 		dev_err(&pdev->dev, "Firmware not running, aborting\n");
8262 		rc = -ENODEV;
8263 		goto err_out_unmap;
8264 	}
8265 
8266 	bnx2_read_vpd_fw_ver(bp);
8267 
8268 	j = strlen(bp->fw_version);
8269 	reg = bnx2_shmem_rd(bp, BNX2_DEV_INFO_BC_REV);
8270 	for (i = 0; i < 3 && j < 24; i++) {
8271 		u8 num, k, skip0;
8272 
8273 		if (i == 0) {
8274 			bp->fw_version[j++] = 'b';
8275 			bp->fw_version[j++] = 'c';
8276 			bp->fw_version[j++] = ' ';
8277 		}
8278 		num = (u8) (reg >> (24 - (i * 8)));
8279 		for (k = 100, skip0 = 1; k >= 1; num %= k, k /= 10) {
8280 			if (num >= k || !skip0 || k == 1) {
8281 				bp->fw_version[j++] = (num / k) + '0';
8282 				skip0 = 0;
8283 			}
8284 		}
8285 		if (i != 2)
8286 			bp->fw_version[j++] = '.';
8287 	}
8288 	reg = bnx2_shmem_rd(bp, BNX2_PORT_FEATURE);
8289 	if (reg & BNX2_PORT_FEATURE_WOL_ENABLED)
8290 		bp->wol = 1;
8291 
8292 	if (reg & BNX2_PORT_FEATURE_ASF_ENABLED) {
8293 		bp->flags |= BNX2_FLAG_ASF_ENABLE;
8294 
8295 		for (i = 0; i < 30; i++) {
8296 			reg = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION);
8297 			if (reg & BNX2_CONDITION_MFW_RUN_MASK)
8298 				break;
8299 			msleep(10);
8300 		}
8301 	}
8302 	reg = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION);
8303 	reg &= BNX2_CONDITION_MFW_RUN_MASK;
8304 	if (reg != BNX2_CONDITION_MFW_RUN_UNKNOWN &&
8305 	    reg != BNX2_CONDITION_MFW_RUN_NONE) {
8306 		u32 addr = bnx2_shmem_rd(bp, BNX2_MFW_VER_PTR);
8307 
8308 		if (j < 32)
8309 			bp->fw_version[j++] = ' ';
8310 		for (i = 0; i < 3 && j < 28; i++) {
8311 			reg = bnx2_reg_rd_ind(bp, addr + i * 4);
8312 			reg = be32_to_cpu(reg);
8313 			memcpy(&bp->fw_version[j], &reg, 4);
8314 			j += 4;
8315 		}
8316 	}
8317 
8318 	reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_MAC_UPPER);
8319 	bp->mac_addr[0] = (u8) (reg >> 8);
8320 	bp->mac_addr[1] = (u8) reg;
8321 
8322 	reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_MAC_LOWER);
8323 	bp->mac_addr[2] = (u8) (reg >> 24);
8324 	bp->mac_addr[3] = (u8) (reg >> 16);
8325 	bp->mac_addr[4] = (u8) (reg >> 8);
8326 	bp->mac_addr[5] = (u8) reg;
8327 
8328 	bp->tx_ring_size = BNX2_MAX_TX_DESC_CNT;
8329 	bnx2_set_rx_ring_size(bp, 255);
8330 
8331 	bp->tx_quick_cons_trip_int = 2;
8332 	bp->tx_quick_cons_trip = 20;
8333 	bp->tx_ticks_int = 18;
8334 	bp->tx_ticks = 80;
8335 
8336 	bp->rx_quick_cons_trip_int = 2;
8337 	bp->rx_quick_cons_trip = 12;
8338 	bp->rx_ticks_int = 18;
8339 	bp->rx_ticks = 18;
8340 
8341 	bp->stats_ticks = USEC_PER_SEC & BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
8342 
8343 	bp->current_interval = BNX2_TIMER_INTERVAL;
8344 
8345 	bp->phy_addr = 1;
8346 
8347 	/* allocate stats_blk */
8348 	rc = bnx2_alloc_stats_blk(dev);
8349 	if (rc)
8350 		goto err_out_unmap;
8351 
8352 	/* Disable WOL support if we are running on a SERDES chip. */
8353 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
8354 		bnx2_get_5709_media(bp);
8355 	else if (BNX2_CHIP_BOND(bp) & BNX2_CHIP_BOND_SERDES_BIT)
8356 		bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
8357 
8358 	bp->phy_port = PORT_TP;
8359 	if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
8360 		bp->phy_port = PORT_FIBRE;
8361 		reg = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG);
8362 		if (!(reg & BNX2_SHARED_HW_CFG_GIG_LINK_ON_VAUX)) {
8363 			bp->flags |= BNX2_FLAG_NO_WOL;
8364 			bp->wol = 0;
8365 		}
8366 		if (BNX2_CHIP(bp) == BNX2_CHIP_5706) {
8367 			/* Don't do parallel detect on this board because of
8368 			 * some board problems.  The link will not go down
8369 			 * if we do parallel detect.
8370 			 */
8371 			if (pdev->subsystem_vendor == PCI_VENDOR_ID_HP &&
8372 			    pdev->subsystem_device == 0x310c)
8373 				bp->phy_flags |= BNX2_PHY_FLAG_NO_PARALLEL;
8374 		} else {
8375 			bp->phy_addr = 2;
8376 			if (reg & BNX2_SHARED_HW_CFG_PHY_2_5G)
8377 				bp->phy_flags |= BNX2_PHY_FLAG_2_5G_CAPABLE;
8378 		}
8379 	} else if (BNX2_CHIP(bp) == BNX2_CHIP_5706 ||
8380 		   BNX2_CHIP(bp) == BNX2_CHIP_5708)
8381 		bp->phy_flags |= BNX2_PHY_FLAG_CRC_FIX;
8382 	else if (BNX2_CHIP(bp) == BNX2_CHIP_5709 &&
8383 		 (BNX2_CHIP_REV(bp) == BNX2_CHIP_REV_Ax ||
8384 		  BNX2_CHIP_REV(bp) == BNX2_CHIP_REV_Bx))
8385 		bp->phy_flags |= BNX2_PHY_FLAG_DIS_EARLY_DAC;
8386 
8387 	bnx2_init_fw_cap(bp);
8388 
8389 	if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_A0) ||
8390 	    (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_B0) ||
8391 	    (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_B1) ||
8392 	    !(BNX2_RD(bp, BNX2_PCI_CONFIG_3) & BNX2_PCI_CONFIG_3_VAUX_PRESET)) {
8393 		bp->flags |= BNX2_FLAG_NO_WOL;
8394 		bp->wol = 0;
8395 	}
8396 
8397 	if (bp->flags & BNX2_FLAG_NO_WOL)
8398 		device_set_wakeup_capable(&bp->pdev->dev, false);
8399 	else
8400 		device_set_wakeup_enable(&bp->pdev->dev, bp->wol);
8401 
8402 	if (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) {
8403 		bp->tx_quick_cons_trip_int =
8404 			bp->tx_quick_cons_trip;
8405 		bp->tx_ticks_int = bp->tx_ticks;
8406 		bp->rx_quick_cons_trip_int =
8407 			bp->rx_quick_cons_trip;
8408 		bp->rx_ticks_int = bp->rx_ticks;
8409 		bp->comp_prod_trip_int = bp->comp_prod_trip;
8410 		bp->com_ticks_int = bp->com_ticks;
8411 		bp->cmd_ticks_int = bp->cmd_ticks;
8412 	}
8413 
8414 	/* Disable MSI on 5706 if AMD 8132 bridge is found.
8415 	 *
8416 	 * MSI is defined to be 32-bit write.  The 5706 does 64-bit MSI writes
8417 	 * with byte enables disabled on the unused 32-bit word.  This is legal
8418 	 * but causes problems on the AMD 8132 which will eventually stop
8419 	 * responding after a while.
8420 	 *
8421 	 * AMD believes this incompatibility is unique to the 5706, and
8422 	 * prefers to locally disable MSI rather than globally disabling it.
8423 	 */
8424 	if (BNX2_CHIP(bp) == BNX2_CHIP_5706 && disable_msi == 0) {
8425 		struct pci_dev *amd_8132 = NULL;
8426 
8427 		while ((amd_8132 = pci_get_device(PCI_VENDOR_ID_AMD,
8428 						  PCI_DEVICE_ID_AMD_8132_BRIDGE,
8429 						  amd_8132))) {
8430 
8431 			if (amd_8132->revision >= 0x10 &&
8432 			    amd_8132->revision <= 0x13) {
8433 				disable_msi = 1;
8434 				pci_dev_put(amd_8132);
8435 				break;
8436 			}
8437 		}
8438 	}
8439 
8440 	bnx2_set_default_link(bp);
8441 	bp->req_flow_ctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
8442 
8443 	init_timer(&bp->timer);
8444 	bp->timer.expires = RUN_AT(BNX2_TIMER_INTERVAL);
8445 	bp->timer.data = (unsigned long) bp;
8446 	bp->timer.function = bnx2_timer;
8447 
8448 #ifdef BCM_CNIC
8449 	if (bnx2_shmem_rd(bp, BNX2_ISCSI_INITIATOR) & BNX2_ISCSI_INITIATOR_EN)
8450 		bp->cnic_eth_dev.max_iscsi_conn =
8451 			(bnx2_shmem_rd(bp, BNX2_ISCSI_MAX_CONN) &
8452 			 BNX2_ISCSI_MAX_CONN_MASK) >> BNX2_ISCSI_MAX_CONN_SHIFT;
8453 	bp->cnic_probe = bnx2_cnic_probe;
8454 #endif
8455 	pci_save_state(pdev);
8456 
8457 	return 0;
8458 
8459 err_out_unmap:
8460 	if (bp->flags & BNX2_FLAG_AER_ENABLED) {
8461 		pci_disable_pcie_error_reporting(pdev);
8462 		bp->flags &= ~BNX2_FLAG_AER_ENABLED;
8463 	}
8464 
8465 	pci_iounmap(pdev, bp->regview);
8466 	bp->regview = NULL;
8467 
8468 err_out_release:
8469 	pci_release_regions(pdev);
8470 
8471 err_out_disable:
8472 	pci_disable_device(pdev);
8473 
8474 err_out:
8475 	kfree(bp->temp_stats_blk);
8476 
8477 	return rc;
8478 }
8479 
8480 static char *
8481 bnx2_bus_string(struct bnx2 *bp, char *str)
8482 {
8483 	char *s = str;
8484 
8485 	if (bp->flags & BNX2_FLAG_PCIE) {
8486 		s += sprintf(s, "PCI Express");
8487 	} else {
8488 		s += sprintf(s, "PCI");
8489 		if (bp->flags & BNX2_FLAG_PCIX)
8490 			s += sprintf(s, "-X");
8491 		if (bp->flags & BNX2_FLAG_PCI_32BIT)
8492 			s += sprintf(s, " 32-bit");
8493 		else
8494 			s += sprintf(s, " 64-bit");
8495 		s += sprintf(s, " %dMHz", bp->bus_speed_mhz);
8496 	}
8497 	return str;
8498 }
8499 
8500 static void
8501 bnx2_del_napi(struct bnx2 *bp)
8502 {
8503 	int i;
8504 
8505 	for (i = 0; i < bp->irq_nvecs; i++)
8506 		netif_napi_del(&bp->bnx2_napi[i].napi);
8507 }
8508 
8509 static void
8510 bnx2_init_napi(struct bnx2 *bp)
8511 {
8512 	int i;
8513 
8514 	for (i = 0; i < bp->irq_nvecs; i++) {
8515 		struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
8516 		int (*poll)(struct napi_struct *, int);
8517 
8518 		if (i == 0)
8519 			poll = bnx2_poll;
8520 		else
8521 			poll = bnx2_poll_msix;
8522 
8523 		netif_napi_add(bp->dev, &bp->bnx2_napi[i].napi, poll, 64);
8524 		bnapi->bp = bp;
8525 	}
8526 }
8527 
8528 static const struct net_device_ops bnx2_netdev_ops = {
8529 	.ndo_open		= bnx2_open,
8530 	.ndo_start_xmit		= bnx2_start_xmit,
8531 	.ndo_stop		= bnx2_close,
8532 	.ndo_get_stats64	= bnx2_get_stats64,
8533 	.ndo_set_rx_mode	= bnx2_set_rx_mode,
8534 	.ndo_do_ioctl		= bnx2_ioctl,
8535 	.ndo_validate_addr	= eth_validate_addr,
8536 	.ndo_set_mac_address	= bnx2_change_mac_addr,
8537 	.ndo_change_mtu		= bnx2_change_mtu,
8538 	.ndo_set_features	= bnx2_set_features,
8539 	.ndo_tx_timeout		= bnx2_tx_timeout,
8540 #ifdef CONFIG_NET_POLL_CONTROLLER
8541 	.ndo_poll_controller	= poll_bnx2,
8542 #endif
8543 };
8544 
8545 static int
8546 bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
8547 {
8548 	static int version_printed = 0;
8549 	struct net_device *dev;
8550 	struct bnx2 *bp;
8551 	int rc;
8552 	char str[40];
8553 
8554 	if (version_printed++ == 0)
8555 		pr_info("%s", version);
8556 
8557 	/* dev zeroed in init_etherdev */
8558 	dev = alloc_etherdev_mq(sizeof(*bp), TX_MAX_RINGS);
8559 	if (!dev)
8560 		return -ENOMEM;
8561 
8562 	rc = bnx2_init_board(pdev, dev);
8563 	if (rc < 0)
8564 		goto err_free;
8565 
8566 	dev->netdev_ops = &bnx2_netdev_ops;
8567 	dev->watchdog_timeo = TX_TIMEOUT;
8568 	dev->ethtool_ops = &bnx2_ethtool_ops;
8569 
8570 	bp = netdev_priv(dev);
8571 
8572 	pci_set_drvdata(pdev, dev);
8573 
8574 	rc = bnx2_request_firmware(bp);
8575 	if (rc < 0)
8576 		goto error;
8577 
8578 
8579 	bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
8580 	memcpy(dev->dev_addr, bp->mac_addr, ETH_ALEN);
8581 
8582 	dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG |
8583 		NETIF_F_TSO | NETIF_F_TSO_ECN |
8584 		NETIF_F_RXHASH | NETIF_F_RXCSUM;
8585 
8586 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
8587 		dev->hw_features |= NETIF_F_IPV6_CSUM | NETIF_F_TSO6;
8588 
8589 	dev->vlan_features = dev->hw_features;
8590 	dev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
8591 	dev->features |= dev->hw_features;
8592 	dev->priv_flags |= IFF_UNICAST_FLT;
8593 	dev->min_mtu = MIN_ETHERNET_PACKET_SIZE;
8594 	dev->max_mtu = MAX_ETHERNET_JUMBO_PACKET_SIZE;
8595 
8596 	if (!(bp->flags & BNX2_FLAG_CAN_KEEP_VLAN))
8597 		dev->hw_features &= ~NETIF_F_HW_VLAN_CTAG_RX;
8598 
8599 	if ((rc = register_netdev(dev))) {
8600 		dev_err(&pdev->dev, "Cannot register net device\n");
8601 		goto error;
8602 	}
8603 
8604 	netdev_info(dev, "%s (%c%d) %s found at mem %lx, IRQ %d, "
8605 		    "node addr %pM\n", board_info[ent->driver_data].name,
8606 		    ((BNX2_CHIP_ID(bp) & 0xf000) >> 12) + 'A',
8607 		    ((BNX2_CHIP_ID(bp) & 0x0ff0) >> 4),
8608 		    bnx2_bus_string(bp, str), (long)pci_resource_start(pdev, 0),
8609 		    pdev->irq, dev->dev_addr);
8610 
8611 	return 0;
8612 
8613 error:
8614 	bnx2_release_firmware(bp);
8615 	pci_iounmap(pdev, bp->regview);
8616 	pci_release_regions(pdev);
8617 	pci_disable_device(pdev);
8618 err_free:
8619 	bnx2_free_stats_blk(dev);
8620 	free_netdev(dev);
8621 	return rc;
8622 }
8623 
8624 static void
8625 bnx2_remove_one(struct pci_dev *pdev)
8626 {
8627 	struct net_device *dev = pci_get_drvdata(pdev);
8628 	struct bnx2 *bp = netdev_priv(dev);
8629 
8630 	unregister_netdev(dev);
8631 
8632 	del_timer_sync(&bp->timer);
8633 	cancel_work_sync(&bp->reset_task);
8634 
8635 	pci_iounmap(bp->pdev, bp->regview);
8636 
8637 	bnx2_free_stats_blk(dev);
8638 	kfree(bp->temp_stats_blk);
8639 
8640 	if (bp->flags & BNX2_FLAG_AER_ENABLED) {
8641 		pci_disable_pcie_error_reporting(pdev);
8642 		bp->flags &= ~BNX2_FLAG_AER_ENABLED;
8643 	}
8644 
8645 	bnx2_release_firmware(bp);
8646 
8647 	free_netdev(dev);
8648 
8649 	pci_release_regions(pdev);
8650 	pci_disable_device(pdev);
8651 }
8652 
8653 #ifdef CONFIG_PM_SLEEP
8654 static int
8655 bnx2_suspend(struct device *device)
8656 {
8657 	struct pci_dev *pdev = to_pci_dev(device);
8658 	struct net_device *dev = pci_get_drvdata(pdev);
8659 	struct bnx2 *bp = netdev_priv(dev);
8660 
8661 	if (netif_running(dev)) {
8662 		cancel_work_sync(&bp->reset_task);
8663 		bnx2_netif_stop(bp, true);
8664 		netif_device_detach(dev);
8665 		del_timer_sync(&bp->timer);
8666 		bnx2_shutdown_chip(bp);
8667 		__bnx2_free_irq(bp);
8668 		bnx2_free_skbs(bp);
8669 	}
8670 	bnx2_setup_wol(bp);
8671 	return 0;
8672 }
8673 
8674 static int
8675 bnx2_resume(struct device *device)
8676 {
8677 	struct pci_dev *pdev = to_pci_dev(device);
8678 	struct net_device *dev = pci_get_drvdata(pdev);
8679 	struct bnx2 *bp = netdev_priv(dev);
8680 
8681 	if (!netif_running(dev))
8682 		return 0;
8683 
8684 	bnx2_set_power_state(bp, PCI_D0);
8685 	netif_device_attach(dev);
8686 	bnx2_request_irq(bp);
8687 	bnx2_init_nic(bp, 1);
8688 	bnx2_netif_start(bp, true);
8689 	return 0;
8690 }
8691 
8692 static SIMPLE_DEV_PM_OPS(bnx2_pm_ops, bnx2_suspend, bnx2_resume);
8693 #define BNX2_PM_OPS (&bnx2_pm_ops)
8694 
8695 #else
8696 
8697 #define BNX2_PM_OPS NULL
8698 
8699 #endif /* CONFIG_PM_SLEEP */
8700 /**
8701  * bnx2_io_error_detected - called when PCI error is detected
8702  * @pdev: Pointer to PCI device
8703  * @state: The current pci connection state
8704  *
8705  * This function is called after a PCI bus error affecting
8706  * this device has been detected.
8707  */
8708 static pci_ers_result_t bnx2_io_error_detected(struct pci_dev *pdev,
8709 					       pci_channel_state_t state)
8710 {
8711 	struct net_device *dev = pci_get_drvdata(pdev);
8712 	struct bnx2 *bp = netdev_priv(dev);
8713 
8714 	rtnl_lock();
8715 	netif_device_detach(dev);
8716 
8717 	if (state == pci_channel_io_perm_failure) {
8718 		rtnl_unlock();
8719 		return PCI_ERS_RESULT_DISCONNECT;
8720 	}
8721 
8722 	if (netif_running(dev)) {
8723 		bnx2_netif_stop(bp, true);
8724 		del_timer_sync(&bp->timer);
8725 		bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
8726 	}
8727 
8728 	pci_disable_device(pdev);
8729 	rtnl_unlock();
8730 
8731 	/* Request a slot slot reset. */
8732 	return PCI_ERS_RESULT_NEED_RESET;
8733 }
8734 
8735 /**
8736  * bnx2_io_slot_reset - called after the pci bus has been reset.
8737  * @pdev: Pointer to PCI device
8738  *
8739  * Restart the card from scratch, as if from a cold-boot.
8740  */
8741 static pci_ers_result_t bnx2_io_slot_reset(struct pci_dev *pdev)
8742 {
8743 	struct net_device *dev = pci_get_drvdata(pdev);
8744 	struct bnx2 *bp = netdev_priv(dev);
8745 	pci_ers_result_t result = PCI_ERS_RESULT_DISCONNECT;
8746 	int err = 0;
8747 
8748 	rtnl_lock();
8749 	if (pci_enable_device(pdev)) {
8750 		dev_err(&pdev->dev,
8751 			"Cannot re-enable PCI device after reset\n");
8752 	} else {
8753 		pci_set_master(pdev);
8754 		pci_restore_state(pdev);
8755 		pci_save_state(pdev);
8756 
8757 		if (netif_running(dev))
8758 			err = bnx2_init_nic(bp, 1);
8759 
8760 		if (!err)
8761 			result = PCI_ERS_RESULT_RECOVERED;
8762 	}
8763 
8764 	if (result != PCI_ERS_RESULT_RECOVERED && netif_running(dev)) {
8765 		bnx2_napi_enable(bp);
8766 		dev_close(dev);
8767 	}
8768 	rtnl_unlock();
8769 
8770 	if (!(bp->flags & BNX2_FLAG_AER_ENABLED))
8771 		return result;
8772 
8773 	err = pci_cleanup_aer_uncorrect_error_status(pdev);
8774 	if (err) {
8775 		dev_err(&pdev->dev,
8776 			"pci_cleanup_aer_uncorrect_error_status failed 0x%0x\n",
8777 			 err); /* non-fatal, continue */
8778 	}
8779 
8780 	return result;
8781 }
8782 
8783 /**
8784  * bnx2_io_resume - called when traffic can start flowing again.
8785  * @pdev: Pointer to PCI device
8786  *
8787  * This callback is called when the error recovery driver tells us that
8788  * its OK to resume normal operation.
8789  */
8790 static void bnx2_io_resume(struct pci_dev *pdev)
8791 {
8792 	struct net_device *dev = pci_get_drvdata(pdev);
8793 	struct bnx2 *bp = netdev_priv(dev);
8794 
8795 	rtnl_lock();
8796 	if (netif_running(dev))
8797 		bnx2_netif_start(bp, true);
8798 
8799 	netif_device_attach(dev);
8800 	rtnl_unlock();
8801 }
8802 
8803 static void bnx2_shutdown(struct pci_dev *pdev)
8804 {
8805 	struct net_device *dev = pci_get_drvdata(pdev);
8806 	struct bnx2 *bp;
8807 
8808 	if (!dev)
8809 		return;
8810 
8811 	bp = netdev_priv(dev);
8812 	if (!bp)
8813 		return;
8814 
8815 	rtnl_lock();
8816 	if (netif_running(dev))
8817 		dev_close(bp->dev);
8818 
8819 	if (system_state == SYSTEM_POWER_OFF)
8820 		bnx2_set_power_state(bp, PCI_D3hot);
8821 
8822 	rtnl_unlock();
8823 }
8824 
8825 static const struct pci_error_handlers bnx2_err_handler = {
8826 	.error_detected	= bnx2_io_error_detected,
8827 	.slot_reset	= bnx2_io_slot_reset,
8828 	.resume		= bnx2_io_resume,
8829 };
8830 
8831 static struct pci_driver bnx2_pci_driver = {
8832 	.name		= DRV_MODULE_NAME,
8833 	.id_table	= bnx2_pci_tbl,
8834 	.probe		= bnx2_init_one,
8835 	.remove		= bnx2_remove_one,
8836 	.driver.pm	= BNX2_PM_OPS,
8837 	.err_handler	= &bnx2_err_handler,
8838 	.shutdown	= bnx2_shutdown,
8839 };
8840 
8841 module_pci_driver(bnx2_pci_driver);
8842