1 /* bnx2.c: QLogic bnx2 network driver.
2  *
3  * Copyright (c) 2004-2014 Broadcom Corporation
4  * Copyright (c) 2014-2015 QLogic Corporation
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License as published by
8  * the Free Software Foundation.
9  *
10  * Written by: Michael Chan  (mchan@broadcom.com)
11  */
12 
13 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14 
15 #include <linux/module.h>
16 #include <linux/moduleparam.h>
17 
18 #include <linux/stringify.h>
19 #include <linux/kernel.h>
20 #include <linux/timer.h>
21 #include <linux/errno.h>
22 #include <linux/ioport.h>
23 #include <linux/slab.h>
24 #include <linux/vmalloc.h>
25 #include <linux/interrupt.h>
26 #include <linux/pci.h>
27 #include <linux/netdevice.h>
28 #include <linux/etherdevice.h>
29 #include <linux/skbuff.h>
30 #include <linux/dma-mapping.h>
31 #include <linux/bitops.h>
32 #include <asm/io.h>
33 #include <asm/irq.h>
34 #include <linux/delay.h>
35 #include <asm/byteorder.h>
36 #include <asm/page.h>
37 #include <linux/time.h>
38 #include <linux/ethtool.h>
39 #include <linux/mii.h>
40 #include <linux/if.h>
41 #include <linux/if_vlan.h>
42 #include <net/ip.h>
43 #include <net/tcp.h>
44 #include <net/checksum.h>
45 #include <linux/workqueue.h>
46 #include <linux/crc32.h>
47 #include <linux/prefetch.h>
48 #include <linux/cache.h>
49 #include <linux/firmware.h>
50 #include <linux/log2.h>
51 #include <linux/crash_dump.h>
52 
53 #if IS_ENABLED(CONFIG_CNIC)
54 #define BCM_CNIC 1
55 #include "cnic_if.h"
56 #endif
57 #include "bnx2.h"
58 #include "bnx2_fw.h"
59 
60 #define DRV_MODULE_NAME		"bnx2"
61 #define FW_MIPS_FILE_06		"bnx2/bnx2-mips-06-6.2.3.fw"
62 #define FW_RV2P_FILE_06		"bnx2/bnx2-rv2p-06-6.0.15.fw"
63 #define FW_MIPS_FILE_09		"bnx2/bnx2-mips-09-6.2.1b.fw"
64 #define FW_RV2P_FILE_09_Ax	"bnx2/bnx2-rv2p-09ax-6.0.17.fw"
65 #define FW_RV2P_FILE_09		"bnx2/bnx2-rv2p-09-6.0.17.fw"
66 
67 #define RUN_AT(x) (jiffies + (x))
68 
69 /* Time in jiffies before concluding the transmitter is hung. */
70 #define TX_TIMEOUT  (5*HZ)
71 
72 MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>");
73 MODULE_DESCRIPTION("QLogic BCM5706/5708/5709/5716 Driver");
74 MODULE_LICENSE("GPL");
75 MODULE_FIRMWARE(FW_MIPS_FILE_06);
76 MODULE_FIRMWARE(FW_RV2P_FILE_06);
77 MODULE_FIRMWARE(FW_MIPS_FILE_09);
78 MODULE_FIRMWARE(FW_RV2P_FILE_09);
79 MODULE_FIRMWARE(FW_RV2P_FILE_09_Ax);
80 
81 static int disable_msi = 0;
82 
83 module_param(disable_msi, int, 0444);
84 MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
85 
86 typedef enum {
87 	BCM5706 = 0,
88 	NC370T,
89 	NC370I,
90 	BCM5706S,
91 	NC370F,
92 	BCM5708,
93 	BCM5708S,
94 	BCM5709,
95 	BCM5709S,
96 	BCM5716,
97 	BCM5716S,
98 } board_t;
99 
100 /* indexed by board_t, above */
101 static struct {
102 	char *name;
103 } board_info[] = {
104 	{ "Broadcom NetXtreme II BCM5706 1000Base-T" },
105 	{ "HP NC370T Multifunction Gigabit Server Adapter" },
106 	{ "HP NC370i Multifunction Gigabit Server Adapter" },
107 	{ "Broadcom NetXtreme II BCM5706 1000Base-SX" },
108 	{ "HP NC370F Multifunction Gigabit Server Adapter" },
109 	{ "Broadcom NetXtreme II BCM5708 1000Base-T" },
110 	{ "Broadcom NetXtreme II BCM5708 1000Base-SX" },
111 	{ "Broadcom NetXtreme II BCM5709 1000Base-T" },
112 	{ "Broadcom NetXtreme II BCM5709 1000Base-SX" },
113 	{ "Broadcom NetXtreme II BCM5716 1000Base-T" },
114 	{ "Broadcom NetXtreme II BCM5716 1000Base-SX" },
115 	};
116 
117 static const struct pci_device_id bnx2_pci_tbl[] = {
118 	{ PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
119 	  PCI_VENDOR_ID_HP, 0x3101, 0, 0, NC370T },
120 	{ PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
121 	  PCI_VENDOR_ID_HP, 0x3106, 0, 0, NC370I },
122 	{ PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
123 	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706 },
124 	{ PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708,
125 	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708 },
126 	{ PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
127 	  PCI_VENDOR_ID_HP, 0x3102, 0, 0, NC370F },
128 	{ PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
129 	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706S },
130 	{ PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708S,
131 	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708S },
132 	{ PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709,
133 	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709 },
134 	{ PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709S,
135 	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709S },
136 	{ PCI_VENDOR_ID_BROADCOM, 0x163b,
137 	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5716 },
138 	{ PCI_VENDOR_ID_BROADCOM, 0x163c,
139 	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5716S },
140 	{ 0, }
141 };
142 
143 static const struct flash_spec flash_table[] =
144 {
145 #define BUFFERED_FLAGS		(BNX2_NV_BUFFERED | BNX2_NV_TRANSLATE)
146 #define NONBUFFERED_FLAGS	(BNX2_NV_WREN)
147 	/* Slow EEPROM */
148 	{0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
149 	 BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
150 	 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
151 	 "EEPROM - slow"},
152 	/* Expansion entry 0001 */
153 	{0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
154 	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
155 	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
156 	 "Entry 0001"},
157 	/* Saifun SA25F010 (non-buffered flash) */
158 	/* strap, cfg1, & write1 need updates */
159 	{0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
160 	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
161 	 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
162 	 "Non-buffered flash (128kB)"},
163 	/* Saifun SA25F020 (non-buffered flash) */
164 	/* strap, cfg1, & write1 need updates */
165 	{0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
166 	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
167 	 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
168 	 "Non-buffered flash (256kB)"},
169 	/* Expansion entry 0100 */
170 	{0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
171 	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
172 	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
173 	 "Entry 0100"},
174 	/* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
175 	{0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
176 	 NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
177 	 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
178 	 "Entry 0101: ST M45PE10 (128kB non-buffered)"},
179 	/* Entry 0110: ST M45PE20 (non-buffered flash)*/
180 	{0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
181 	 NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
182 	 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
183 	 "Entry 0110: ST M45PE20 (256kB non-buffered)"},
184 	/* Saifun SA25F005 (non-buffered flash) */
185 	/* strap, cfg1, & write1 need updates */
186 	{0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
187 	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
188 	 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
189 	 "Non-buffered flash (64kB)"},
190 	/* Fast EEPROM */
191 	{0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
192 	 BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
193 	 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
194 	 "EEPROM - fast"},
195 	/* Expansion entry 1001 */
196 	{0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
197 	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
198 	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
199 	 "Entry 1001"},
200 	/* Expansion entry 1010 */
201 	{0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
202 	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
203 	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
204 	 "Entry 1010"},
205 	/* ATMEL AT45DB011B (buffered flash) */
206 	{0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
207 	 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
208 	 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
209 	 "Buffered flash (128kB)"},
210 	/* Expansion entry 1100 */
211 	{0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
212 	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
213 	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
214 	 "Entry 1100"},
215 	/* Expansion entry 1101 */
216 	{0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
217 	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
218 	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
219 	 "Entry 1101"},
220 	/* Ateml Expansion entry 1110 */
221 	{0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
222 	 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
223 	 BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
224 	 "Entry 1110 (Atmel)"},
225 	/* ATMEL AT45DB021B (buffered flash) */
226 	{0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
227 	 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
228 	 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
229 	 "Buffered flash (256kB)"},
230 };
231 
232 static const struct flash_spec flash_5709 = {
233 	.flags		= BNX2_NV_BUFFERED,
234 	.page_bits	= BCM5709_FLASH_PAGE_BITS,
235 	.page_size	= BCM5709_FLASH_PAGE_SIZE,
236 	.addr_mask	= BCM5709_FLASH_BYTE_ADDR_MASK,
237 	.total_size	= BUFFERED_FLASH_TOTAL_SIZE*2,
238 	.name		= "5709 Buffered flash (256kB)",
239 };
240 
241 MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
242 
243 static void bnx2_init_napi(struct bnx2 *bp);
244 static void bnx2_del_napi(struct bnx2 *bp);
245 
bnx2_tx_avail(struct bnx2 * bp,struct bnx2_tx_ring_info * txr)246 static inline u32 bnx2_tx_avail(struct bnx2 *bp, struct bnx2_tx_ring_info *txr)
247 {
248 	u32 diff;
249 
250 	/* The ring uses 256 indices for 255 entries, one of them
251 	 * needs to be skipped.
252 	 */
253 	diff = READ_ONCE(txr->tx_prod) - READ_ONCE(txr->tx_cons);
254 	if (unlikely(diff >= BNX2_TX_DESC_CNT)) {
255 		diff &= 0xffff;
256 		if (diff == BNX2_TX_DESC_CNT)
257 			diff = BNX2_MAX_TX_DESC_CNT;
258 	}
259 	return bp->tx_ring_size - diff;
260 }
261 
262 static u32
bnx2_reg_rd_ind(struct bnx2 * bp,u32 offset)263 bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset)
264 {
265 	unsigned long flags;
266 	u32 val;
267 
268 	spin_lock_irqsave(&bp->indirect_lock, flags);
269 	BNX2_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
270 	val = BNX2_RD(bp, BNX2_PCICFG_REG_WINDOW);
271 	spin_unlock_irqrestore(&bp->indirect_lock, flags);
272 	return val;
273 }
274 
275 static void
bnx2_reg_wr_ind(struct bnx2 * bp,u32 offset,u32 val)276 bnx2_reg_wr_ind(struct bnx2 *bp, u32 offset, u32 val)
277 {
278 	unsigned long flags;
279 
280 	spin_lock_irqsave(&bp->indirect_lock, flags);
281 	BNX2_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
282 	BNX2_WR(bp, BNX2_PCICFG_REG_WINDOW, val);
283 	spin_unlock_irqrestore(&bp->indirect_lock, flags);
284 }
285 
286 static void
bnx2_shmem_wr(struct bnx2 * bp,u32 offset,u32 val)287 bnx2_shmem_wr(struct bnx2 *bp, u32 offset, u32 val)
288 {
289 	bnx2_reg_wr_ind(bp, bp->shmem_base + offset, val);
290 }
291 
292 static u32
bnx2_shmem_rd(struct bnx2 * bp,u32 offset)293 bnx2_shmem_rd(struct bnx2 *bp, u32 offset)
294 {
295 	return bnx2_reg_rd_ind(bp, bp->shmem_base + offset);
296 }
297 
298 static void
bnx2_ctx_wr(struct bnx2 * bp,u32 cid_addr,u32 offset,u32 val)299 bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
300 {
301 	unsigned long flags;
302 
303 	offset += cid_addr;
304 	spin_lock_irqsave(&bp->indirect_lock, flags);
305 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
306 		int i;
307 
308 		BNX2_WR(bp, BNX2_CTX_CTX_DATA, val);
309 		BNX2_WR(bp, BNX2_CTX_CTX_CTRL,
310 			offset | BNX2_CTX_CTX_CTRL_WRITE_REQ);
311 		for (i = 0; i < 5; i++) {
312 			val = BNX2_RD(bp, BNX2_CTX_CTX_CTRL);
313 			if ((val & BNX2_CTX_CTX_CTRL_WRITE_REQ) == 0)
314 				break;
315 			udelay(5);
316 		}
317 	} else {
318 		BNX2_WR(bp, BNX2_CTX_DATA_ADR, offset);
319 		BNX2_WR(bp, BNX2_CTX_DATA, val);
320 	}
321 	spin_unlock_irqrestore(&bp->indirect_lock, flags);
322 }
323 
324 #ifdef BCM_CNIC
325 static int
bnx2_drv_ctl(struct net_device * dev,struct drv_ctl_info * info)326 bnx2_drv_ctl(struct net_device *dev, struct drv_ctl_info *info)
327 {
328 	struct bnx2 *bp = netdev_priv(dev);
329 	struct drv_ctl_io *io = &info->data.io;
330 
331 	switch (info->cmd) {
332 	case DRV_CTL_IO_WR_CMD:
333 		bnx2_reg_wr_ind(bp, io->offset, io->data);
334 		break;
335 	case DRV_CTL_IO_RD_CMD:
336 		io->data = bnx2_reg_rd_ind(bp, io->offset);
337 		break;
338 	case DRV_CTL_CTX_WR_CMD:
339 		bnx2_ctx_wr(bp, io->cid_addr, io->offset, io->data);
340 		break;
341 	default:
342 		return -EINVAL;
343 	}
344 	return 0;
345 }
346 
bnx2_setup_cnic_irq_info(struct bnx2 * bp)347 static void bnx2_setup_cnic_irq_info(struct bnx2 *bp)
348 {
349 	struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
350 	struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
351 	int sb_id;
352 
353 	if (bp->flags & BNX2_FLAG_USING_MSIX) {
354 		cp->drv_state |= CNIC_DRV_STATE_USING_MSIX;
355 		bnapi->cnic_present = 0;
356 		sb_id = bp->irq_nvecs;
357 		cp->irq_arr[0].irq_flags |= CNIC_IRQ_FL_MSIX;
358 	} else {
359 		cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX;
360 		bnapi->cnic_tag = bnapi->last_status_idx;
361 		bnapi->cnic_present = 1;
362 		sb_id = 0;
363 		cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX;
364 	}
365 
366 	cp->irq_arr[0].vector = bp->irq_tbl[sb_id].vector;
367 	cp->irq_arr[0].status_blk = (void *)
368 		((unsigned long) bnapi->status_blk.msi +
369 		(BNX2_SBLK_MSIX_ALIGN_SIZE * sb_id));
370 	cp->irq_arr[0].status_blk_num = sb_id;
371 	cp->num_irq = 1;
372 }
373 
bnx2_register_cnic(struct net_device * dev,struct cnic_ops * ops,void * data)374 static int bnx2_register_cnic(struct net_device *dev, struct cnic_ops *ops,
375 			      void *data)
376 {
377 	struct bnx2 *bp = netdev_priv(dev);
378 	struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
379 
380 	if (!ops)
381 		return -EINVAL;
382 
383 	if (cp->drv_state & CNIC_DRV_STATE_REGD)
384 		return -EBUSY;
385 
386 	if (!bnx2_reg_rd_ind(bp, BNX2_FW_MAX_ISCSI_CONN))
387 		return -ENODEV;
388 
389 	bp->cnic_data = data;
390 	rcu_assign_pointer(bp->cnic_ops, ops);
391 
392 	cp->num_irq = 0;
393 	cp->drv_state = CNIC_DRV_STATE_REGD;
394 
395 	bnx2_setup_cnic_irq_info(bp);
396 
397 	return 0;
398 }
399 
bnx2_unregister_cnic(struct net_device * dev)400 static int bnx2_unregister_cnic(struct net_device *dev)
401 {
402 	struct bnx2 *bp = netdev_priv(dev);
403 	struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
404 	struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
405 
406 	mutex_lock(&bp->cnic_lock);
407 	cp->drv_state = 0;
408 	bnapi->cnic_present = 0;
409 	RCU_INIT_POINTER(bp->cnic_ops, NULL);
410 	mutex_unlock(&bp->cnic_lock);
411 	synchronize_rcu();
412 	return 0;
413 }
414 
bnx2_cnic_probe(struct net_device * dev)415 static struct cnic_eth_dev *bnx2_cnic_probe(struct net_device *dev)
416 {
417 	struct bnx2 *bp = netdev_priv(dev);
418 	struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
419 
420 	if (!cp->max_iscsi_conn)
421 		return NULL;
422 
423 	cp->drv_owner = THIS_MODULE;
424 	cp->chip_id = bp->chip_id;
425 	cp->pdev = bp->pdev;
426 	cp->io_base = bp->regview;
427 	cp->drv_ctl = bnx2_drv_ctl;
428 	cp->drv_register_cnic = bnx2_register_cnic;
429 	cp->drv_unregister_cnic = bnx2_unregister_cnic;
430 
431 	return cp;
432 }
433 
434 static void
bnx2_cnic_stop(struct bnx2 * bp)435 bnx2_cnic_stop(struct bnx2 *bp)
436 {
437 	struct cnic_ops *c_ops;
438 	struct cnic_ctl_info info;
439 
440 	mutex_lock(&bp->cnic_lock);
441 	c_ops = rcu_dereference_protected(bp->cnic_ops,
442 					  lockdep_is_held(&bp->cnic_lock));
443 	if (c_ops) {
444 		info.cmd = CNIC_CTL_STOP_CMD;
445 		c_ops->cnic_ctl(bp->cnic_data, &info);
446 	}
447 	mutex_unlock(&bp->cnic_lock);
448 }
449 
450 static void
bnx2_cnic_start(struct bnx2 * bp)451 bnx2_cnic_start(struct bnx2 *bp)
452 {
453 	struct cnic_ops *c_ops;
454 	struct cnic_ctl_info info;
455 
456 	mutex_lock(&bp->cnic_lock);
457 	c_ops = rcu_dereference_protected(bp->cnic_ops,
458 					  lockdep_is_held(&bp->cnic_lock));
459 	if (c_ops) {
460 		if (!(bp->flags & BNX2_FLAG_USING_MSIX)) {
461 			struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
462 
463 			bnapi->cnic_tag = bnapi->last_status_idx;
464 		}
465 		info.cmd = CNIC_CTL_START_CMD;
466 		c_ops->cnic_ctl(bp->cnic_data, &info);
467 	}
468 	mutex_unlock(&bp->cnic_lock);
469 }
470 
471 #else
472 
473 static void
bnx2_cnic_stop(struct bnx2 * bp)474 bnx2_cnic_stop(struct bnx2 *bp)
475 {
476 }
477 
478 static void
bnx2_cnic_start(struct bnx2 * bp)479 bnx2_cnic_start(struct bnx2 *bp)
480 {
481 }
482 
483 #endif
484 
485 static int
bnx2_read_phy(struct bnx2 * bp,u32 reg,u32 * val)486 bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val)
487 {
488 	u32 val1;
489 	int i, ret;
490 
491 	if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
492 		val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
493 		val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
494 
495 		BNX2_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
496 		BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
497 
498 		udelay(40);
499 	}
500 
501 	val1 = (bp->phy_addr << 21) | (reg << 16) |
502 		BNX2_EMAC_MDIO_COMM_COMMAND_READ | BNX2_EMAC_MDIO_COMM_DISEXT |
503 		BNX2_EMAC_MDIO_COMM_START_BUSY;
504 	BNX2_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
505 
506 	for (i = 0; i < 50; i++) {
507 		udelay(10);
508 
509 		val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_COMM);
510 		if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
511 			udelay(5);
512 
513 			val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_COMM);
514 			val1 &= BNX2_EMAC_MDIO_COMM_DATA;
515 
516 			break;
517 		}
518 	}
519 
520 	if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY) {
521 		*val = 0x0;
522 		ret = -EBUSY;
523 	}
524 	else {
525 		*val = val1;
526 		ret = 0;
527 	}
528 
529 	if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
530 		val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
531 		val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
532 
533 		BNX2_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
534 		BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
535 
536 		udelay(40);
537 	}
538 
539 	return ret;
540 }
541 
542 static int
bnx2_write_phy(struct bnx2 * bp,u32 reg,u32 val)543 bnx2_write_phy(struct bnx2 *bp, u32 reg, u32 val)
544 {
545 	u32 val1;
546 	int i, ret;
547 
548 	if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
549 		val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
550 		val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
551 
552 		BNX2_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
553 		BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
554 
555 		udelay(40);
556 	}
557 
558 	val1 = (bp->phy_addr << 21) | (reg << 16) | val |
559 		BNX2_EMAC_MDIO_COMM_COMMAND_WRITE |
560 		BNX2_EMAC_MDIO_COMM_START_BUSY | BNX2_EMAC_MDIO_COMM_DISEXT;
561 	BNX2_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
562 
563 	for (i = 0; i < 50; i++) {
564 		udelay(10);
565 
566 		val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_COMM);
567 		if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
568 			udelay(5);
569 			break;
570 		}
571 	}
572 
573 	if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)
574 		ret = -EBUSY;
575 	else
576 		ret = 0;
577 
578 	if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
579 		val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
580 		val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
581 
582 		BNX2_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
583 		BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
584 
585 		udelay(40);
586 	}
587 
588 	return ret;
589 }
590 
591 static void
bnx2_disable_int(struct bnx2 * bp)592 bnx2_disable_int(struct bnx2 *bp)
593 {
594 	int i;
595 	struct bnx2_napi *bnapi;
596 
597 	for (i = 0; i < bp->irq_nvecs; i++) {
598 		bnapi = &bp->bnx2_napi[i];
599 		BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
600 		       BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
601 	}
602 	BNX2_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
603 }
604 
605 static void
bnx2_enable_int(struct bnx2 * bp)606 bnx2_enable_int(struct bnx2 *bp)
607 {
608 	int i;
609 	struct bnx2_napi *bnapi;
610 
611 	for (i = 0; i < bp->irq_nvecs; i++) {
612 		bnapi = &bp->bnx2_napi[i];
613 
614 		BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
615 			BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
616 			BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
617 			bnapi->last_status_idx);
618 
619 		BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
620 			BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
621 			bnapi->last_status_idx);
622 	}
623 	BNX2_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
624 }
625 
626 static void
bnx2_disable_int_sync(struct bnx2 * bp)627 bnx2_disable_int_sync(struct bnx2 *bp)
628 {
629 	int i;
630 
631 	atomic_inc(&bp->intr_sem);
632 	if (!netif_running(bp->dev))
633 		return;
634 
635 	bnx2_disable_int(bp);
636 	for (i = 0; i < bp->irq_nvecs; i++)
637 		synchronize_irq(bp->irq_tbl[i].vector);
638 }
639 
640 static void
bnx2_napi_disable(struct bnx2 * bp)641 bnx2_napi_disable(struct bnx2 *bp)
642 {
643 	int i;
644 
645 	for (i = 0; i < bp->irq_nvecs; i++)
646 		napi_disable(&bp->bnx2_napi[i].napi);
647 }
648 
649 static void
bnx2_napi_enable(struct bnx2 * bp)650 bnx2_napi_enable(struct bnx2 *bp)
651 {
652 	int i;
653 
654 	for (i = 0; i < bp->irq_nvecs; i++)
655 		napi_enable(&bp->bnx2_napi[i].napi);
656 }
657 
658 static void
bnx2_netif_stop(struct bnx2 * bp,bool stop_cnic)659 bnx2_netif_stop(struct bnx2 *bp, bool stop_cnic)
660 {
661 	if (stop_cnic)
662 		bnx2_cnic_stop(bp);
663 	if (netif_running(bp->dev)) {
664 		bnx2_napi_disable(bp);
665 		netif_tx_disable(bp->dev);
666 	}
667 	bnx2_disable_int_sync(bp);
668 	netif_carrier_off(bp->dev);	/* prevent tx timeout */
669 }
670 
671 static void
bnx2_netif_start(struct bnx2 * bp,bool start_cnic)672 bnx2_netif_start(struct bnx2 *bp, bool start_cnic)
673 {
674 	if (atomic_dec_and_test(&bp->intr_sem)) {
675 		if (netif_running(bp->dev)) {
676 			netif_tx_wake_all_queues(bp->dev);
677 			spin_lock_bh(&bp->phy_lock);
678 			if (bp->link_up)
679 				netif_carrier_on(bp->dev);
680 			spin_unlock_bh(&bp->phy_lock);
681 			bnx2_napi_enable(bp);
682 			bnx2_enable_int(bp);
683 			if (start_cnic)
684 				bnx2_cnic_start(bp);
685 		}
686 	}
687 }
688 
689 static void
bnx2_free_tx_mem(struct bnx2 * bp)690 bnx2_free_tx_mem(struct bnx2 *bp)
691 {
692 	int i;
693 
694 	for (i = 0; i < bp->num_tx_rings; i++) {
695 		struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
696 		struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
697 
698 		if (txr->tx_desc_ring) {
699 			dma_free_coherent(&bp->pdev->dev, TXBD_RING_SIZE,
700 					  txr->tx_desc_ring,
701 					  txr->tx_desc_mapping);
702 			txr->tx_desc_ring = NULL;
703 		}
704 		kfree(txr->tx_buf_ring);
705 		txr->tx_buf_ring = NULL;
706 	}
707 }
708 
709 static void
bnx2_free_rx_mem(struct bnx2 * bp)710 bnx2_free_rx_mem(struct bnx2 *bp)
711 {
712 	int i;
713 
714 	for (i = 0; i < bp->num_rx_rings; i++) {
715 		struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
716 		struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
717 		int j;
718 
719 		for (j = 0; j < bp->rx_max_ring; j++) {
720 			if (rxr->rx_desc_ring[j])
721 				dma_free_coherent(&bp->pdev->dev, RXBD_RING_SIZE,
722 						  rxr->rx_desc_ring[j],
723 						  rxr->rx_desc_mapping[j]);
724 			rxr->rx_desc_ring[j] = NULL;
725 		}
726 		vfree(rxr->rx_buf_ring);
727 		rxr->rx_buf_ring = NULL;
728 
729 		for (j = 0; j < bp->rx_max_pg_ring; j++) {
730 			if (rxr->rx_pg_desc_ring[j])
731 				dma_free_coherent(&bp->pdev->dev, RXBD_RING_SIZE,
732 						  rxr->rx_pg_desc_ring[j],
733 						  rxr->rx_pg_desc_mapping[j]);
734 			rxr->rx_pg_desc_ring[j] = NULL;
735 		}
736 		vfree(rxr->rx_pg_ring);
737 		rxr->rx_pg_ring = NULL;
738 	}
739 }
740 
741 static int
bnx2_alloc_tx_mem(struct bnx2 * bp)742 bnx2_alloc_tx_mem(struct bnx2 *bp)
743 {
744 	int i;
745 
746 	for (i = 0; i < bp->num_tx_rings; i++) {
747 		struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
748 		struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
749 
750 		txr->tx_buf_ring = kzalloc(SW_TXBD_RING_SIZE, GFP_KERNEL);
751 		if (!txr->tx_buf_ring)
752 			return -ENOMEM;
753 
754 		txr->tx_desc_ring =
755 			dma_alloc_coherent(&bp->pdev->dev, TXBD_RING_SIZE,
756 					   &txr->tx_desc_mapping, GFP_KERNEL);
757 		if (!txr->tx_desc_ring)
758 			return -ENOMEM;
759 	}
760 	return 0;
761 }
762 
763 static int
bnx2_alloc_rx_mem(struct bnx2 * bp)764 bnx2_alloc_rx_mem(struct bnx2 *bp)
765 {
766 	int i;
767 
768 	for (i = 0; i < bp->num_rx_rings; i++) {
769 		struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
770 		struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
771 		int j;
772 
773 		rxr->rx_buf_ring =
774 			vzalloc(array_size(SW_RXBD_RING_SIZE, bp->rx_max_ring));
775 		if (!rxr->rx_buf_ring)
776 			return -ENOMEM;
777 
778 		for (j = 0; j < bp->rx_max_ring; j++) {
779 			rxr->rx_desc_ring[j] =
780 				dma_alloc_coherent(&bp->pdev->dev,
781 						   RXBD_RING_SIZE,
782 						   &rxr->rx_desc_mapping[j],
783 						   GFP_KERNEL);
784 			if (!rxr->rx_desc_ring[j])
785 				return -ENOMEM;
786 
787 		}
788 
789 		if (bp->rx_pg_ring_size) {
790 			rxr->rx_pg_ring =
791 				vzalloc(array_size(SW_RXPG_RING_SIZE,
792 						   bp->rx_max_pg_ring));
793 			if (!rxr->rx_pg_ring)
794 				return -ENOMEM;
795 
796 		}
797 
798 		for (j = 0; j < bp->rx_max_pg_ring; j++) {
799 			rxr->rx_pg_desc_ring[j] =
800 				dma_alloc_coherent(&bp->pdev->dev,
801 						   RXBD_RING_SIZE,
802 						   &rxr->rx_pg_desc_mapping[j],
803 						   GFP_KERNEL);
804 			if (!rxr->rx_pg_desc_ring[j])
805 				return -ENOMEM;
806 
807 		}
808 	}
809 	return 0;
810 }
811 
812 static void
bnx2_free_stats_blk(struct net_device * dev)813 bnx2_free_stats_blk(struct net_device *dev)
814 {
815 	struct bnx2 *bp = netdev_priv(dev);
816 
817 	if (bp->status_blk) {
818 		dma_free_coherent(&bp->pdev->dev, bp->status_stats_size,
819 				  bp->status_blk,
820 				  bp->status_blk_mapping);
821 		bp->status_blk = NULL;
822 		bp->stats_blk = NULL;
823 	}
824 }
825 
826 static int
bnx2_alloc_stats_blk(struct net_device * dev)827 bnx2_alloc_stats_blk(struct net_device *dev)
828 {
829 	int status_blk_size;
830 	void *status_blk;
831 	struct bnx2 *bp = netdev_priv(dev);
832 
833 	/* Combine status and statistics blocks into one allocation. */
834 	status_blk_size = L1_CACHE_ALIGN(sizeof(struct status_block));
835 	if (bp->flags & BNX2_FLAG_MSIX_CAP)
836 		status_blk_size = L1_CACHE_ALIGN(BNX2_MAX_MSIX_HW_VEC *
837 						 BNX2_SBLK_MSIX_ALIGN_SIZE);
838 	bp->status_stats_size = status_blk_size +
839 				sizeof(struct statistics_block);
840 	status_blk = dma_alloc_coherent(&bp->pdev->dev, bp->status_stats_size,
841 					&bp->status_blk_mapping, GFP_KERNEL);
842 	if (!status_blk)
843 		return -ENOMEM;
844 
845 	bp->status_blk = status_blk;
846 	bp->stats_blk = status_blk + status_blk_size;
847 	bp->stats_blk_mapping = bp->status_blk_mapping + status_blk_size;
848 
849 	return 0;
850 }
851 
852 static void
bnx2_free_mem(struct bnx2 * bp)853 bnx2_free_mem(struct bnx2 *bp)
854 {
855 	int i;
856 	struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
857 
858 	bnx2_free_tx_mem(bp);
859 	bnx2_free_rx_mem(bp);
860 
861 	for (i = 0; i < bp->ctx_pages; i++) {
862 		if (bp->ctx_blk[i]) {
863 			dma_free_coherent(&bp->pdev->dev, BNX2_PAGE_SIZE,
864 					  bp->ctx_blk[i],
865 					  bp->ctx_blk_mapping[i]);
866 			bp->ctx_blk[i] = NULL;
867 		}
868 	}
869 
870 	if (bnapi->status_blk.msi)
871 		bnapi->status_blk.msi = NULL;
872 }
873 
874 static int
bnx2_alloc_mem(struct bnx2 * bp)875 bnx2_alloc_mem(struct bnx2 *bp)
876 {
877 	int i, err;
878 	struct bnx2_napi *bnapi;
879 
880 	bnapi = &bp->bnx2_napi[0];
881 	bnapi->status_blk.msi = bp->status_blk;
882 	bnapi->hw_tx_cons_ptr =
883 		&bnapi->status_blk.msi->status_tx_quick_consumer_index0;
884 	bnapi->hw_rx_cons_ptr =
885 		&bnapi->status_blk.msi->status_rx_quick_consumer_index0;
886 	if (bp->flags & BNX2_FLAG_MSIX_CAP) {
887 		for (i = 1; i < bp->irq_nvecs; i++) {
888 			struct status_block_msix *sblk;
889 
890 			bnapi = &bp->bnx2_napi[i];
891 
892 			sblk = (bp->status_blk + BNX2_SBLK_MSIX_ALIGN_SIZE * i);
893 			bnapi->status_blk.msix = sblk;
894 			bnapi->hw_tx_cons_ptr =
895 				&sblk->status_tx_quick_consumer_index;
896 			bnapi->hw_rx_cons_ptr =
897 				&sblk->status_rx_quick_consumer_index;
898 			bnapi->int_num = i << 24;
899 		}
900 	}
901 
902 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
903 		bp->ctx_pages = 0x2000 / BNX2_PAGE_SIZE;
904 		if (bp->ctx_pages == 0)
905 			bp->ctx_pages = 1;
906 		for (i = 0; i < bp->ctx_pages; i++) {
907 			bp->ctx_blk[i] = dma_alloc_coherent(&bp->pdev->dev,
908 						BNX2_PAGE_SIZE,
909 						&bp->ctx_blk_mapping[i],
910 						GFP_KERNEL);
911 			if (!bp->ctx_blk[i])
912 				goto alloc_mem_err;
913 		}
914 	}
915 
916 	err = bnx2_alloc_rx_mem(bp);
917 	if (err)
918 		goto alloc_mem_err;
919 
920 	err = bnx2_alloc_tx_mem(bp);
921 	if (err)
922 		goto alloc_mem_err;
923 
924 	return 0;
925 
926 alloc_mem_err:
927 	bnx2_free_mem(bp);
928 	return -ENOMEM;
929 }
930 
931 static void
bnx2_report_fw_link(struct bnx2 * bp)932 bnx2_report_fw_link(struct bnx2 *bp)
933 {
934 	u32 fw_link_status = 0;
935 
936 	if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
937 		return;
938 
939 	if (bp->link_up) {
940 		u32 bmsr;
941 
942 		switch (bp->line_speed) {
943 		case SPEED_10:
944 			if (bp->duplex == DUPLEX_HALF)
945 				fw_link_status = BNX2_LINK_STATUS_10HALF;
946 			else
947 				fw_link_status = BNX2_LINK_STATUS_10FULL;
948 			break;
949 		case SPEED_100:
950 			if (bp->duplex == DUPLEX_HALF)
951 				fw_link_status = BNX2_LINK_STATUS_100HALF;
952 			else
953 				fw_link_status = BNX2_LINK_STATUS_100FULL;
954 			break;
955 		case SPEED_1000:
956 			if (bp->duplex == DUPLEX_HALF)
957 				fw_link_status = BNX2_LINK_STATUS_1000HALF;
958 			else
959 				fw_link_status = BNX2_LINK_STATUS_1000FULL;
960 			break;
961 		case SPEED_2500:
962 			if (bp->duplex == DUPLEX_HALF)
963 				fw_link_status = BNX2_LINK_STATUS_2500HALF;
964 			else
965 				fw_link_status = BNX2_LINK_STATUS_2500FULL;
966 			break;
967 		}
968 
969 		fw_link_status |= BNX2_LINK_STATUS_LINK_UP;
970 
971 		if (bp->autoneg) {
972 			fw_link_status |= BNX2_LINK_STATUS_AN_ENABLED;
973 
974 			bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
975 			bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
976 
977 			if (!(bmsr & BMSR_ANEGCOMPLETE) ||
978 			    bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT)
979 				fw_link_status |= BNX2_LINK_STATUS_PARALLEL_DET;
980 			else
981 				fw_link_status |= BNX2_LINK_STATUS_AN_COMPLETE;
982 		}
983 	}
984 	else
985 		fw_link_status = BNX2_LINK_STATUS_LINK_DOWN;
986 
987 	bnx2_shmem_wr(bp, BNX2_LINK_STATUS, fw_link_status);
988 }
989 
990 static char *
bnx2_xceiver_str(struct bnx2 * bp)991 bnx2_xceiver_str(struct bnx2 *bp)
992 {
993 	return (bp->phy_port == PORT_FIBRE) ? "SerDes" :
994 		((bp->phy_flags & BNX2_PHY_FLAG_SERDES) ? "Remote Copper" :
995 		 "Copper");
996 }
997 
998 static void
bnx2_report_link(struct bnx2 * bp)999 bnx2_report_link(struct bnx2 *bp)
1000 {
1001 	if (bp->link_up) {
1002 		netif_carrier_on(bp->dev);
1003 		netdev_info(bp->dev, "NIC %s Link is Up, %d Mbps %s duplex",
1004 			    bnx2_xceiver_str(bp),
1005 			    bp->line_speed,
1006 			    bp->duplex == DUPLEX_FULL ? "full" : "half");
1007 
1008 		if (bp->flow_ctrl) {
1009 			if (bp->flow_ctrl & FLOW_CTRL_RX) {
1010 				pr_cont(", receive ");
1011 				if (bp->flow_ctrl & FLOW_CTRL_TX)
1012 					pr_cont("& transmit ");
1013 			}
1014 			else {
1015 				pr_cont(", transmit ");
1016 			}
1017 			pr_cont("flow control ON");
1018 		}
1019 		pr_cont("\n");
1020 	} else {
1021 		netif_carrier_off(bp->dev);
1022 		netdev_err(bp->dev, "NIC %s Link is Down\n",
1023 			   bnx2_xceiver_str(bp));
1024 	}
1025 
1026 	bnx2_report_fw_link(bp);
1027 }
1028 
1029 static void
bnx2_resolve_flow_ctrl(struct bnx2 * bp)1030 bnx2_resolve_flow_ctrl(struct bnx2 *bp)
1031 {
1032 	u32 local_adv, remote_adv;
1033 
1034 	bp->flow_ctrl = 0;
1035 	if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
1036 		(AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
1037 
1038 		if (bp->duplex == DUPLEX_FULL) {
1039 			bp->flow_ctrl = bp->req_flow_ctrl;
1040 		}
1041 		return;
1042 	}
1043 
1044 	if (bp->duplex != DUPLEX_FULL) {
1045 		return;
1046 	}
1047 
1048 	if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1049 	    (BNX2_CHIP(bp) == BNX2_CHIP_5708)) {
1050 		u32 val;
1051 
1052 		bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
1053 		if (val & BCM5708S_1000X_STAT1_TX_PAUSE)
1054 			bp->flow_ctrl |= FLOW_CTRL_TX;
1055 		if (val & BCM5708S_1000X_STAT1_RX_PAUSE)
1056 			bp->flow_ctrl |= FLOW_CTRL_RX;
1057 		return;
1058 	}
1059 
1060 	bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1061 	bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
1062 
1063 	if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1064 		u32 new_local_adv = 0;
1065 		u32 new_remote_adv = 0;
1066 
1067 		if (local_adv & ADVERTISE_1000XPAUSE)
1068 			new_local_adv |= ADVERTISE_PAUSE_CAP;
1069 		if (local_adv & ADVERTISE_1000XPSE_ASYM)
1070 			new_local_adv |= ADVERTISE_PAUSE_ASYM;
1071 		if (remote_adv & ADVERTISE_1000XPAUSE)
1072 			new_remote_adv |= ADVERTISE_PAUSE_CAP;
1073 		if (remote_adv & ADVERTISE_1000XPSE_ASYM)
1074 			new_remote_adv |= ADVERTISE_PAUSE_ASYM;
1075 
1076 		local_adv = new_local_adv;
1077 		remote_adv = new_remote_adv;
1078 	}
1079 
1080 	/* See Table 28B-3 of 802.3ab-1999 spec. */
1081 	if (local_adv & ADVERTISE_PAUSE_CAP) {
1082 		if(local_adv & ADVERTISE_PAUSE_ASYM) {
1083 	                if (remote_adv & ADVERTISE_PAUSE_CAP) {
1084 				bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
1085 			}
1086 			else if (remote_adv & ADVERTISE_PAUSE_ASYM) {
1087 				bp->flow_ctrl = FLOW_CTRL_RX;
1088 			}
1089 		}
1090 		else {
1091 			if (remote_adv & ADVERTISE_PAUSE_CAP) {
1092 				bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
1093 			}
1094 		}
1095 	}
1096 	else if (local_adv & ADVERTISE_PAUSE_ASYM) {
1097 		if ((remote_adv & ADVERTISE_PAUSE_CAP) &&
1098 			(remote_adv & ADVERTISE_PAUSE_ASYM)) {
1099 
1100 			bp->flow_ctrl = FLOW_CTRL_TX;
1101 		}
1102 	}
1103 }
1104 
1105 static int
bnx2_5709s_linkup(struct bnx2 * bp)1106 bnx2_5709s_linkup(struct bnx2 *bp)
1107 {
1108 	u32 val, speed;
1109 
1110 	bp->link_up = 1;
1111 
1112 	bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_GP_STATUS);
1113 	bnx2_read_phy(bp, MII_BNX2_GP_TOP_AN_STATUS1, &val);
1114 	bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1115 
1116 	if ((bp->autoneg & AUTONEG_SPEED) == 0) {
1117 		bp->line_speed = bp->req_line_speed;
1118 		bp->duplex = bp->req_duplex;
1119 		return 0;
1120 	}
1121 	speed = val & MII_BNX2_GP_TOP_AN_SPEED_MSK;
1122 	switch (speed) {
1123 		case MII_BNX2_GP_TOP_AN_SPEED_10:
1124 			bp->line_speed = SPEED_10;
1125 			break;
1126 		case MII_BNX2_GP_TOP_AN_SPEED_100:
1127 			bp->line_speed = SPEED_100;
1128 			break;
1129 		case MII_BNX2_GP_TOP_AN_SPEED_1G:
1130 		case MII_BNX2_GP_TOP_AN_SPEED_1GKV:
1131 			bp->line_speed = SPEED_1000;
1132 			break;
1133 		case MII_BNX2_GP_TOP_AN_SPEED_2_5G:
1134 			bp->line_speed = SPEED_2500;
1135 			break;
1136 	}
1137 	if (val & MII_BNX2_GP_TOP_AN_FD)
1138 		bp->duplex = DUPLEX_FULL;
1139 	else
1140 		bp->duplex = DUPLEX_HALF;
1141 	return 0;
1142 }
1143 
1144 static int
bnx2_5708s_linkup(struct bnx2 * bp)1145 bnx2_5708s_linkup(struct bnx2 *bp)
1146 {
1147 	u32 val;
1148 
1149 	bp->link_up = 1;
1150 	bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
1151 	switch (val & BCM5708S_1000X_STAT1_SPEED_MASK) {
1152 		case BCM5708S_1000X_STAT1_SPEED_10:
1153 			bp->line_speed = SPEED_10;
1154 			break;
1155 		case BCM5708S_1000X_STAT1_SPEED_100:
1156 			bp->line_speed = SPEED_100;
1157 			break;
1158 		case BCM5708S_1000X_STAT1_SPEED_1G:
1159 			bp->line_speed = SPEED_1000;
1160 			break;
1161 		case BCM5708S_1000X_STAT1_SPEED_2G5:
1162 			bp->line_speed = SPEED_2500;
1163 			break;
1164 	}
1165 	if (val & BCM5708S_1000X_STAT1_FD)
1166 		bp->duplex = DUPLEX_FULL;
1167 	else
1168 		bp->duplex = DUPLEX_HALF;
1169 
1170 	return 0;
1171 }
1172 
1173 static int
bnx2_5706s_linkup(struct bnx2 * bp)1174 bnx2_5706s_linkup(struct bnx2 *bp)
1175 {
1176 	u32 bmcr, local_adv, remote_adv, common;
1177 
1178 	bp->link_up = 1;
1179 	bp->line_speed = SPEED_1000;
1180 
1181 	bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1182 	if (bmcr & BMCR_FULLDPLX) {
1183 		bp->duplex = DUPLEX_FULL;
1184 	}
1185 	else {
1186 		bp->duplex = DUPLEX_HALF;
1187 	}
1188 
1189 	if (!(bmcr & BMCR_ANENABLE)) {
1190 		return 0;
1191 	}
1192 
1193 	bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1194 	bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
1195 
1196 	common = local_adv & remote_adv;
1197 	if (common & (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL)) {
1198 
1199 		if (common & ADVERTISE_1000XFULL) {
1200 			bp->duplex = DUPLEX_FULL;
1201 		}
1202 		else {
1203 			bp->duplex = DUPLEX_HALF;
1204 		}
1205 	}
1206 
1207 	return 0;
1208 }
1209 
1210 static int
bnx2_copper_linkup(struct bnx2 * bp)1211 bnx2_copper_linkup(struct bnx2 *bp)
1212 {
1213 	u32 bmcr;
1214 
1215 	bp->phy_flags &= ~BNX2_PHY_FLAG_MDIX;
1216 
1217 	bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1218 	if (bmcr & BMCR_ANENABLE) {
1219 		u32 local_adv, remote_adv, common;
1220 
1221 		bnx2_read_phy(bp, MII_CTRL1000, &local_adv);
1222 		bnx2_read_phy(bp, MII_STAT1000, &remote_adv);
1223 
1224 		common = local_adv & (remote_adv >> 2);
1225 		if (common & ADVERTISE_1000FULL) {
1226 			bp->line_speed = SPEED_1000;
1227 			bp->duplex = DUPLEX_FULL;
1228 		}
1229 		else if (common & ADVERTISE_1000HALF) {
1230 			bp->line_speed = SPEED_1000;
1231 			bp->duplex = DUPLEX_HALF;
1232 		}
1233 		else {
1234 			bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1235 			bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
1236 
1237 			common = local_adv & remote_adv;
1238 			if (common & ADVERTISE_100FULL) {
1239 				bp->line_speed = SPEED_100;
1240 				bp->duplex = DUPLEX_FULL;
1241 			}
1242 			else if (common & ADVERTISE_100HALF) {
1243 				bp->line_speed = SPEED_100;
1244 				bp->duplex = DUPLEX_HALF;
1245 			}
1246 			else if (common & ADVERTISE_10FULL) {
1247 				bp->line_speed = SPEED_10;
1248 				bp->duplex = DUPLEX_FULL;
1249 			}
1250 			else if (common & ADVERTISE_10HALF) {
1251 				bp->line_speed = SPEED_10;
1252 				bp->duplex = DUPLEX_HALF;
1253 			}
1254 			else {
1255 				bp->line_speed = 0;
1256 				bp->link_up = 0;
1257 			}
1258 		}
1259 	}
1260 	else {
1261 		if (bmcr & BMCR_SPEED100) {
1262 			bp->line_speed = SPEED_100;
1263 		}
1264 		else {
1265 			bp->line_speed = SPEED_10;
1266 		}
1267 		if (bmcr & BMCR_FULLDPLX) {
1268 			bp->duplex = DUPLEX_FULL;
1269 		}
1270 		else {
1271 			bp->duplex = DUPLEX_HALF;
1272 		}
1273 	}
1274 
1275 	if (bp->link_up) {
1276 		u32 ext_status;
1277 
1278 		bnx2_read_phy(bp, MII_BNX2_EXT_STATUS, &ext_status);
1279 		if (ext_status & EXT_STATUS_MDIX)
1280 			bp->phy_flags |= BNX2_PHY_FLAG_MDIX;
1281 	}
1282 
1283 	return 0;
1284 }
1285 
1286 static void
bnx2_init_rx_context(struct bnx2 * bp,u32 cid)1287 bnx2_init_rx_context(struct bnx2 *bp, u32 cid)
1288 {
1289 	u32 val, rx_cid_addr = GET_CID_ADDR(cid);
1290 
1291 	val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE;
1292 	val |= BNX2_L2CTX_CTX_TYPE_SIZE_L2;
1293 	val |= 0x02 << 8;
1294 
1295 	if (bp->flow_ctrl & FLOW_CTRL_TX)
1296 		val |= BNX2_L2CTX_FLOW_CTRL_ENABLE;
1297 
1298 	bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_CTX_TYPE, val);
1299 }
1300 
1301 static void
bnx2_init_all_rx_contexts(struct bnx2 * bp)1302 bnx2_init_all_rx_contexts(struct bnx2 *bp)
1303 {
1304 	int i;
1305 	u32 cid;
1306 
1307 	for (i = 0, cid = RX_CID; i < bp->num_rx_rings; i++, cid++) {
1308 		if (i == 1)
1309 			cid = RX_RSS_CID;
1310 		bnx2_init_rx_context(bp, cid);
1311 	}
1312 }
1313 
1314 static void
bnx2_set_mac_link(struct bnx2 * bp)1315 bnx2_set_mac_link(struct bnx2 *bp)
1316 {
1317 	u32 val;
1318 
1319 	BNX2_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x2620);
1320 	if (bp->link_up && (bp->line_speed == SPEED_1000) &&
1321 		(bp->duplex == DUPLEX_HALF)) {
1322 		BNX2_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x26ff);
1323 	}
1324 
1325 	/* Configure the EMAC mode register. */
1326 	val = BNX2_RD(bp, BNX2_EMAC_MODE);
1327 
1328 	val &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
1329 		BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
1330 		BNX2_EMAC_MODE_25G_MODE);
1331 
1332 	if (bp->link_up) {
1333 		switch (bp->line_speed) {
1334 			case SPEED_10:
1335 				if (BNX2_CHIP(bp) != BNX2_CHIP_5706) {
1336 					val |= BNX2_EMAC_MODE_PORT_MII_10M;
1337 					break;
1338 				}
1339 				fallthrough;
1340 			case SPEED_100:
1341 				val |= BNX2_EMAC_MODE_PORT_MII;
1342 				break;
1343 			case SPEED_2500:
1344 				val |= BNX2_EMAC_MODE_25G_MODE;
1345 				fallthrough;
1346 			case SPEED_1000:
1347 				val |= BNX2_EMAC_MODE_PORT_GMII;
1348 				break;
1349 		}
1350 	}
1351 	else {
1352 		val |= BNX2_EMAC_MODE_PORT_GMII;
1353 	}
1354 
1355 	/* Set the MAC to operate in the appropriate duplex mode. */
1356 	if (bp->duplex == DUPLEX_HALF)
1357 		val |= BNX2_EMAC_MODE_HALF_DUPLEX;
1358 	BNX2_WR(bp, BNX2_EMAC_MODE, val);
1359 
1360 	/* Enable/disable rx PAUSE. */
1361 	bp->rx_mode &= ~BNX2_EMAC_RX_MODE_FLOW_EN;
1362 
1363 	if (bp->flow_ctrl & FLOW_CTRL_RX)
1364 		bp->rx_mode |= BNX2_EMAC_RX_MODE_FLOW_EN;
1365 	BNX2_WR(bp, BNX2_EMAC_RX_MODE, bp->rx_mode);
1366 
1367 	/* Enable/disable tx PAUSE. */
1368 	val = BNX2_RD(bp, BNX2_EMAC_TX_MODE);
1369 	val &= ~BNX2_EMAC_TX_MODE_FLOW_EN;
1370 
1371 	if (bp->flow_ctrl & FLOW_CTRL_TX)
1372 		val |= BNX2_EMAC_TX_MODE_FLOW_EN;
1373 	BNX2_WR(bp, BNX2_EMAC_TX_MODE, val);
1374 
1375 	/* Acknowledge the interrupt. */
1376 	BNX2_WR(bp, BNX2_EMAC_STATUS, BNX2_EMAC_STATUS_LINK_CHANGE);
1377 
1378 	bnx2_init_all_rx_contexts(bp);
1379 }
1380 
1381 static void
bnx2_enable_bmsr1(struct bnx2 * bp)1382 bnx2_enable_bmsr1(struct bnx2 *bp)
1383 {
1384 	if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1385 	    (BNX2_CHIP(bp) == BNX2_CHIP_5709))
1386 		bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1387 			       MII_BNX2_BLK_ADDR_GP_STATUS);
1388 }
1389 
1390 static void
bnx2_disable_bmsr1(struct bnx2 * bp)1391 bnx2_disable_bmsr1(struct bnx2 *bp)
1392 {
1393 	if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1394 	    (BNX2_CHIP(bp) == BNX2_CHIP_5709))
1395 		bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1396 			       MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1397 }
1398 
1399 static int
bnx2_test_and_enable_2g5(struct bnx2 * bp)1400 bnx2_test_and_enable_2g5(struct bnx2 *bp)
1401 {
1402 	u32 up1;
1403 	int ret = 1;
1404 
1405 	if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1406 		return 0;
1407 
1408 	if (bp->autoneg & AUTONEG_SPEED)
1409 		bp->advertising |= ADVERTISED_2500baseX_Full;
1410 
1411 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
1412 		bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1413 
1414 	bnx2_read_phy(bp, bp->mii_up1, &up1);
1415 	if (!(up1 & BCM5708S_UP1_2G5)) {
1416 		up1 |= BCM5708S_UP1_2G5;
1417 		bnx2_write_phy(bp, bp->mii_up1, up1);
1418 		ret = 0;
1419 	}
1420 
1421 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
1422 		bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1423 			       MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1424 
1425 	return ret;
1426 }
1427 
1428 static int
bnx2_test_and_disable_2g5(struct bnx2 * bp)1429 bnx2_test_and_disable_2g5(struct bnx2 *bp)
1430 {
1431 	u32 up1;
1432 	int ret = 0;
1433 
1434 	if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1435 		return 0;
1436 
1437 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
1438 		bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1439 
1440 	bnx2_read_phy(bp, bp->mii_up1, &up1);
1441 	if (up1 & BCM5708S_UP1_2G5) {
1442 		up1 &= ~BCM5708S_UP1_2G5;
1443 		bnx2_write_phy(bp, bp->mii_up1, up1);
1444 		ret = 1;
1445 	}
1446 
1447 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
1448 		bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1449 			       MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1450 
1451 	return ret;
1452 }
1453 
1454 static void
bnx2_enable_forced_2g5(struct bnx2 * bp)1455 bnx2_enable_forced_2g5(struct bnx2 *bp)
1456 {
1457 	u32 bmcr;
1458 	int err;
1459 
1460 	if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1461 		return;
1462 
1463 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
1464 		u32 val;
1465 
1466 		bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1467 			       MII_BNX2_BLK_ADDR_SERDES_DIG);
1468 		if (!bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val)) {
1469 			val &= ~MII_BNX2_SD_MISC1_FORCE_MSK;
1470 			val |= MII_BNX2_SD_MISC1_FORCE |
1471 				MII_BNX2_SD_MISC1_FORCE_2_5G;
1472 			bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1473 		}
1474 
1475 		bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1476 			       MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1477 		err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1478 
1479 	} else if (BNX2_CHIP(bp) == BNX2_CHIP_5708) {
1480 		err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1481 		if (!err)
1482 			bmcr |= BCM5708S_BMCR_FORCE_2500;
1483 	} else {
1484 		return;
1485 	}
1486 
1487 	if (err)
1488 		return;
1489 
1490 	if (bp->autoneg & AUTONEG_SPEED) {
1491 		bmcr &= ~BMCR_ANENABLE;
1492 		if (bp->req_duplex == DUPLEX_FULL)
1493 			bmcr |= BMCR_FULLDPLX;
1494 	}
1495 	bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1496 }
1497 
1498 static void
bnx2_disable_forced_2g5(struct bnx2 * bp)1499 bnx2_disable_forced_2g5(struct bnx2 *bp)
1500 {
1501 	u32 bmcr;
1502 	int err;
1503 
1504 	if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1505 		return;
1506 
1507 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
1508 		u32 val;
1509 
1510 		bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1511 			       MII_BNX2_BLK_ADDR_SERDES_DIG);
1512 		if (!bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val)) {
1513 			val &= ~MII_BNX2_SD_MISC1_FORCE;
1514 			bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1515 		}
1516 
1517 		bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1518 			       MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1519 		err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1520 
1521 	} else if (BNX2_CHIP(bp) == BNX2_CHIP_5708) {
1522 		err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1523 		if (!err)
1524 			bmcr &= ~BCM5708S_BMCR_FORCE_2500;
1525 	} else {
1526 		return;
1527 	}
1528 
1529 	if (err)
1530 		return;
1531 
1532 	if (bp->autoneg & AUTONEG_SPEED)
1533 		bmcr |= BMCR_SPEED1000 | BMCR_ANENABLE | BMCR_ANRESTART;
1534 	bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1535 }
1536 
1537 static void
bnx2_5706s_force_link_dn(struct bnx2 * bp,int start)1538 bnx2_5706s_force_link_dn(struct bnx2 *bp, int start)
1539 {
1540 	u32 val;
1541 
1542 	bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS, MII_EXPAND_SERDES_CTL);
1543 	bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
1544 	if (start)
1545 		bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val & 0xff0f);
1546 	else
1547 		bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val | 0xc0);
1548 }
1549 
1550 static int
bnx2_set_link(struct bnx2 * bp)1551 bnx2_set_link(struct bnx2 *bp)
1552 {
1553 	u32 bmsr;
1554 	u8 link_up;
1555 
1556 	if (bp->loopback == MAC_LOOPBACK || bp->loopback == PHY_LOOPBACK) {
1557 		bp->link_up = 1;
1558 		return 0;
1559 	}
1560 
1561 	if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
1562 		return 0;
1563 
1564 	link_up = bp->link_up;
1565 
1566 	bnx2_enable_bmsr1(bp);
1567 	bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1568 	bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1569 	bnx2_disable_bmsr1(bp);
1570 
1571 	if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1572 	    (BNX2_CHIP(bp) == BNX2_CHIP_5706)) {
1573 		u32 val, an_dbg;
1574 
1575 		if (bp->phy_flags & BNX2_PHY_FLAG_FORCED_DOWN) {
1576 			bnx2_5706s_force_link_dn(bp, 0);
1577 			bp->phy_flags &= ~BNX2_PHY_FLAG_FORCED_DOWN;
1578 		}
1579 		val = BNX2_RD(bp, BNX2_EMAC_STATUS);
1580 
1581 		bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
1582 		bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
1583 		bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
1584 
1585 		if ((val & BNX2_EMAC_STATUS_LINK) &&
1586 		    !(an_dbg & MISC_SHDW_AN_DBG_NOSYNC))
1587 			bmsr |= BMSR_LSTATUS;
1588 		else
1589 			bmsr &= ~BMSR_LSTATUS;
1590 	}
1591 
1592 	if (bmsr & BMSR_LSTATUS) {
1593 		bp->link_up = 1;
1594 
1595 		if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1596 			if (BNX2_CHIP(bp) == BNX2_CHIP_5706)
1597 				bnx2_5706s_linkup(bp);
1598 			else if (BNX2_CHIP(bp) == BNX2_CHIP_5708)
1599 				bnx2_5708s_linkup(bp);
1600 			else if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
1601 				bnx2_5709s_linkup(bp);
1602 		}
1603 		else {
1604 			bnx2_copper_linkup(bp);
1605 		}
1606 		bnx2_resolve_flow_ctrl(bp);
1607 	}
1608 	else {
1609 		if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1610 		    (bp->autoneg & AUTONEG_SPEED))
1611 			bnx2_disable_forced_2g5(bp);
1612 
1613 		if (bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT) {
1614 			u32 bmcr;
1615 
1616 			bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1617 			bmcr |= BMCR_ANENABLE;
1618 			bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1619 
1620 			bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
1621 		}
1622 		bp->link_up = 0;
1623 	}
1624 
1625 	if (bp->link_up != link_up) {
1626 		bnx2_report_link(bp);
1627 	}
1628 
1629 	bnx2_set_mac_link(bp);
1630 
1631 	return 0;
1632 }
1633 
1634 static int
bnx2_reset_phy(struct bnx2 * bp)1635 bnx2_reset_phy(struct bnx2 *bp)
1636 {
1637 	int i;
1638 	u32 reg;
1639 
1640         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_RESET);
1641 
1642 #define PHY_RESET_MAX_WAIT 100
1643 	for (i = 0; i < PHY_RESET_MAX_WAIT; i++) {
1644 		udelay(10);
1645 
1646 		bnx2_read_phy(bp, bp->mii_bmcr, &reg);
1647 		if (!(reg & BMCR_RESET)) {
1648 			udelay(20);
1649 			break;
1650 		}
1651 	}
1652 	if (i == PHY_RESET_MAX_WAIT) {
1653 		return -EBUSY;
1654 	}
1655 	return 0;
1656 }
1657 
1658 static u32
bnx2_phy_get_pause_adv(struct bnx2 * bp)1659 bnx2_phy_get_pause_adv(struct bnx2 *bp)
1660 {
1661 	u32 adv = 0;
1662 
1663 	if ((bp->req_flow_ctrl & (FLOW_CTRL_RX | FLOW_CTRL_TX)) ==
1664 		(FLOW_CTRL_RX | FLOW_CTRL_TX)) {
1665 
1666 		if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1667 			adv = ADVERTISE_1000XPAUSE;
1668 		}
1669 		else {
1670 			adv = ADVERTISE_PAUSE_CAP;
1671 		}
1672 	}
1673 	else if (bp->req_flow_ctrl & FLOW_CTRL_TX) {
1674 		if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1675 			adv = ADVERTISE_1000XPSE_ASYM;
1676 		}
1677 		else {
1678 			adv = ADVERTISE_PAUSE_ASYM;
1679 		}
1680 	}
1681 	else if (bp->req_flow_ctrl & FLOW_CTRL_RX) {
1682 		if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1683 			adv = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1684 		}
1685 		else {
1686 			adv = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1687 		}
1688 	}
1689 	return adv;
1690 }
1691 
1692 static int bnx2_fw_sync(struct bnx2 *, u32, int, int);
1693 
1694 static int
bnx2_setup_remote_phy(struct bnx2 * bp,u8 port)1695 bnx2_setup_remote_phy(struct bnx2 *bp, u8 port)
1696 __releases(&bp->phy_lock)
1697 __acquires(&bp->phy_lock)
1698 {
1699 	u32 speed_arg = 0, pause_adv;
1700 
1701 	pause_adv = bnx2_phy_get_pause_adv(bp);
1702 
1703 	if (bp->autoneg & AUTONEG_SPEED) {
1704 		speed_arg |= BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG;
1705 		if (bp->advertising & ADVERTISED_10baseT_Half)
1706 			speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1707 		if (bp->advertising & ADVERTISED_10baseT_Full)
1708 			speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1709 		if (bp->advertising & ADVERTISED_100baseT_Half)
1710 			speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1711 		if (bp->advertising & ADVERTISED_100baseT_Full)
1712 			speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1713 		if (bp->advertising & ADVERTISED_1000baseT_Full)
1714 			speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1715 		if (bp->advertising & ADVERTISED_2500baseX_Full)
1716 			speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1717 	} else {
1718 		if (bp->req_line_speed == SPEED_2500)
1719 			speed_arg = BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1720 		else if (bp->req_line_speed == SPEED_1000)
1721 			speed_arg = BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1722 		else if (bp->req_line_speed == SPEED_100) {
1723 			if (bp->req_duplex == DUPLEX_FULL)
1724 				speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1725 			else
1726 				speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1727 		} else if (bp->req_line_speed == SPEED_10) {
1728 			if (bp->req_duplex == DUPLEX_FULL)
1729 				speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1730 			else
1731 				speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1732 		}
1733 	}
1734 
1735 	if (pause_adv & (ADVERTISE_1000XPAUSE | ADVERTISE_PAUSE_CAP))
1736 		speed_arg |= BNX2_NETLINK_SET_LINK_FC_SYM_PAUSE;
1737 	if (pause_adv & (ADVERTISE_1000XPSE_ASYM | ADVERTISE_PAUSE_ASYM))
1738 		speed_arg |= BNX2_NETLINK_SET_LINK_FC_ASYM_PAUSE;
1739 
1740 	if (port == PORT_TP)
1741 		speed_arg |= BNX2_NETLINK_SET_LINK_PHY_APP_REMOTE |
1742 			     BNX2_NETLINK_SET_LINK_ETH_AT_WIRESPEED;
1743 
1744 	bnx2_shmem_wr(bp, BNX2_DRV_MB_ARG0, speed_arg);
1745 
1746 	spin_unlock_bh(&bp->phy_lock);
1747 	bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_CMD_SET_LINK, 1, 0);
1748 	spin_lock_bh(&bp->phy_lock);
1749 
1750 	return 0;
1751 }
1752 
1753 static int
bnx2_setup_serdes_phy(struct bnx2 * bp,u8 port)1754 bnx2_setup_serdes_phy(struct bnx2 *bp, u8 port)
1755 __releases(&bp->phy_lock)
1756 __acquires(&bp->phy_lock)
1757 {
1758 	u32 adv, bmcr;
1759 	u32 new_adv = 0;
1760 
1761 	if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
1762 		return bnx2_setup_remote_phy(bp, port);
1763 
1764 	if (!(bp->autoneg & AUTONEG_SPEED)) {
1765 		u32 new_bmcr;
1766 		int force_link_down = 0;
1767 
1768 		if (bp->req_line_speed == SPEED_2500) {
1769 			if (!bnx2_test_and_enable_2g5(bp))
1770 				force_link_down = 1;
1771 		} else if (bp->req_line_speed == SPEED_1000) {
1772 			if (bnx2_test_and_disable_2g5(bp))
1773 				force_link_down = 1;
1774 		}
1775 		bnx2_read_phy(bp, bp->mii_adv, &adv);
1776 		adv &= ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF);
1777 
1778 		bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1779 		new_bmcr = bmcr & ~BMCR_ANENABLE;
1780 		new_bmcr |= BMCR_SPEED1000;
1781 
1782 		if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
1783 			if (bp->req_line_speed == SPEED_2500)
1784 				bnx2_enable_forced_2g5(bp);
1785 			else if (bp->req_line_speed == SPEED_1000) {
1786 				bnx2_disable_forced_2g5(bp);
1787 				new_bmcr &= ~0x2000;
1788 			}
1789 
1790 		} else if (BNX2_CHIP(bp) == BNX2_CHIP_5708) {
1791 			if (bp->req_line_speed == SPEED_2500)
1792 				new_bmcr |= BCM5708S_BMCR_FORCE_2500;
1793 			else
1794 				new_bmcr = bmcr & ~BCM5708S_BMCR_FORCE_2500;
1795 		}
1796 
1797 		if (bp->req_duplex == DUPLEX_FULL) {
1798 			adv |= ADVERTISE_1000XFULL;
1799 			new_bmcr |= BMCR_FULLDPLX;
1800 		}
1801 		else {
1802 			adv |= ADVERTISE_1000XHALF;
1803 			new_bmcr &= ~BMCR_FULLDPLX;
1804 		}
1805 		if ((new_bmcr != bmcr) || (force_link_down)) {
1806 			/* Force a link down visible on the other side */
1807 			if (bp->link_up) {
1808 				bnx2_write_phy(bp, bp->mii_adv, adv &
1809 					       ~(ADVERTISE_1000XFULL |
1810 						 ADVERTISE_1000XHALF));
1811 				bnx2_write_phy(bp, bp->mii_bmcr, bmcr |
1812 					BMCR_ANRESTART | BMCR_ANENABLE);
1813 
1814 				bp->link_up = 0;
1815 				netif_carrier_off(bp->dev);
1816 				bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1817 				bnx2_report_link(bp);
1818 			}
1819 			bnx2_write_phy(bp, bp->mii_adv, adv);
1820 			bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1821 		} else {
1822 			bnx2_resolve_flow_ctrl(bp);
1823 			bnx2_set_mac_link(bp);
1824 		}
1825 		return 0;
1826 	}
1827 
1828 	bnx2_test_and_enable_2g5(bp);
1829 
1830 	if (bp->advertising & ADVERTISED_1000baseT_Full)
1831 		new_adv |= ADVERTISE_1000XFULL;
1832 
1833 	new_adv |= bnx2_phy_get_pause_adv(bp);
1834 
1835 	bnx2_read_phy(bp, bp->mii_adv, &adv);
1836 	bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1837 
1838 	bp->serdes_an_pending = 0;
1839 	if ((adv != new_adv) || ((bmcr & BMCR_ANENABLE) == 0)) {
1840 		/* Force a link down visible on the other side */
1841 		if (bp->link_up) {
1842 			bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
1843 			spin_unlock_bh(&bp->phy_lock);
1844 			msleep(20);
1845 			spin_lock_bh(&bp->phy_lock);
1846 		}
1847 
1848 		bnx2_write_phy(bp, bp->mii_adv, new_adv);
1849 		bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART |
1850 			BMCR_ANENABLE);
1851 		/* Speed up link-up time when the link partner
1852 		 * does not autonegotiate which is very common
1853 		 * in blade servers. Some blade servers use
1854 		 * IPMI for kerboard input and it's important
1855 		 * to minimize link disruptions. Autoneg. involves
1856 		 * exchanging base pages plus 3 next pages and
1857 		 * normally completes in about 120 msec.
1858 		 */
1859 		bp->current_interval = BNX2_SERDES_AN_TIMEOUT;
1860 		bp->serdes_an_pending = 1;
1861 		mod_timer(&bp->timer, jiffies + bp->current_interval);
1862 	} else {
1863 		bnx2_resolve_flow_ctrl(bp);
1864 		bnx2_set_mac_link(bp);
1865 	}
1866 
1867 	return 0;
1868 }
1869 
1870 #define ETHTOOL_ALL_FIBRE_SPEED						\
1871 	(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) ?			\
1872 		(ADVERTISED_2500baseX_Full | ADVERTISED_1000baseT_Full) :\
1873 		(ADVERTISED_1000baseT_Full)
1874 
1875 #define ETHTOOL_ALL_COPPER_SPEED					\
1876 	(ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |		\
1877 	ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |		\
1878 	ADVERTISED_1000baseT_Full)
1879 
1880 #define PHY_ALL_10_100_SPEED (ADVERTISE_10HALF | ADVERTISE_10FULL | \
1881 	ADVERTISE_100HALF | ADVERTISE_100FULL | ADVERTISE_CSMA)
1882 
1883 #define PHY_ALL_1000_SPEED (ADVERTISE_1000HALF | ADVERTISE_1000FULL)
1884 
1885 static void
bnx2_set_default_remote_link(struct bnx2 * bp)1886 bnx2_set_default_remote_link(struct bnx2 *bp)
1887 {
1888 	u32 link;
1889 
1890 	if (bp->phy_port == PORT_TP)
1891 		link = bnx2_shmem_rd(bp, BNX2_RPHY_COPPER_LINK);
1892 	else
1893 		link = bnx2_shmem_rd(bp, BNX2_RPHY_SERDES_LINK);
1894 
1895 	if (link & BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG) {
1896 		bp->req_line_speed = 0;
1897 		bp->autoneg |= AUTONEG_SPEED;
1898 		bp->advertising = ADVERTISED_Autoneg;
1899 		if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1900 			bp->advertising |= ADVERTISED_10baseT_Half;
1901 		if (link & BNX2_NETLINK_SET_LINK_SPEED_10FULL)
1902 			bp->advertising |= ADVERTISED_10baseT_Full;
1903 		if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1904 			bp->advertising |= ADVERTISED_100baseT_Half;
1905 		if (link & BNX2_NETLINK_SET_LINK_SPEED_100FULL)
1906 			bp->advertising |= ADVERTISED_100baseT_Full;
1907 		if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1908 			bp->advertising |= ADVERTISED_1000baseT_Full;
1909 		if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1910 			bp->advertising |= ADVERTISED_2500baseX_Full;
1911 	} else {
1912 		bp->autoneg = 0;
1913 		bp->advertising = 0;
1914 		bp->req_duplex = DUPLEX_FULL;
1915 		if (link & BNX2_NETLINK_SET_LINK_SPEED_10) {
1916 			bp->req_line_speed = SPEED_10;
1917 			if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1918 				bp->req_duplex = DUPLEX_HALF;
1919 		}
1920 		if (link & BNX2_NETLINK_SET_LINK_SPEED_100) {
1921 			bp->req_line_speed = SPEED_100;
1922 			if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1923 				bp->req_duplex = DUPLEX_HALF;
1924 		}
1925 		if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1926 			bp->req_line_speed = SPEED_1000;
1927 		if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1928 			bp->req_line_speed = SPEED_2500;
1929 	}
1930 }
1931 
1932 static void
bnx2_set_default_link(struct bnx2 * bp)1933 bnx2_set_default_link(struct bnx2 *bp)
1934 {
1935 	if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
1936 		bnx2_set_default_remote_link(bp);
1937 		return;
1938 	}
1939 
1940 	bp->autoneg = AUTONEG_SPEED | AUTONEG_FLOW_CTRL;
1941 	bp->req_line_speed = 0;
1942 	if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1943 		u32 reg;
1944 
1945 		bp->advertising = ETHTOOL_ALL_FIBRE_SPEED | ADVERTISED_Autoneg;
1946 
1947 		reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_CONFIG);
1948 		reg &= BNX2_PORT_HW_CFG_CFG_DFLT_LINK_MASK;
1949 		if (reg == BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G) {
1950 			bp->autoneg = 0;
1951 			bp->req_line_speed = bp->line_speed = SPEED_1000;
1952 			bp->req_duplex = DUPLEX_FULL;
1953 		}
1954 	} else
1955 		bp->advertising = ETHTOOL_ALL_COPPER_SPEED | ADVERTISED_Autoneg;
1956 }
1957 
1958 static void
bnx2_send_heart_beat(struct bnx2 * bp)1959 bnx2_send_heart_beat(struct bnx2 *bp)
1960 {
1961 	u32 msg;
1962 	u32 addr;
1963 
1964 	spin_lock(&bp->indirect_lock);
1965 	msg = (u32) (++bp->fw_drv_pulse_wr_seq & BNX2_DRV_PULSE_SEQ_MASK);
1966 	addr = bp->shmem_base + BNX2_DRV_PULSE_MB;
1967 	BNX2_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, addr);
1968 	BNX2_WR(bp, BNX2_PCICFG_REG_WINDOW, msg);
1969 	spin_unlock(&bp->indirect_lock);
1970 }
1971 
1972 static void
bnx2_remote_phy_event(struct bnx2 * bp)1973 bnx2_remote_phy_event(struct bnx2 *bp)
1974 {
1975 	u32 msg;
1976 	u8 link_up = bp->link_up;
1977 	u8 old_port;
1978 
1979 	msg = bnx2_shmem_rd(bp, BNX2_LINK_STATUS);
1980 
1981 	if (msg & BNX2_LINK_STATUS_HEART_BEAT_EXPIRED)
1982 		bnx2_send_heart_beat(bp);
1983 
1984 	msg &= ~BNX2_LINK_STATUS_HEART_BEAT_EXPIRED;
1985 
1986 	if ((msg & BNX2_LINK_STATUS_LINK_UP) == BNX2_LINK_STATUS_LINK_DOWN)
1987 		bp->link_up = 0;
1988 	else {
1989 		u32 speed;
1990 
1991 		bp->link_up = 1;
1992 		speed = msg & BNX2_LINK_STATUS_SPEED_MASK;
1993 		bp->duplex = DUPLEX_FULL;
1994 		switch (speed) {
1995 			case BNX2_LINK_STATUS_10HALF:
1996 				bp->duplex = DUPLEX_HALF;
1997 				fallthrough;
1998 			case BNX2_LINK_STATUS_10FULL:
1999 				bp->line_speed = SPEED_10;
2000 				break;
2001 			case BNX2_LINK_STATUS_100HALF:
2002 				bp->duplex = DUPLEX_HALF;
2003 				fallthrough;
2004 			case BNX2_LINK_STATUS_100BASE_T4:
2005 			case BNX2_LINK_STATUS_100FULL:
2006 				bp->line_speed = SPEED_100;
2007 				break;
2008 			case BNX2_LINK_STATUS_1000HALF:
2009 				bp->duplex = DUPLEX_HALF;
2010 				fallthrough;
2011 			case BNX2_LINK_STATUS_1000FULL:
2012 				bp->line_speed = SPEED_1000;
2013 				break;
2014 			case BNX2_LINK_STATUS_2500HALF:
2015 				bp->duplex = DUPLEX_HALF;
2016 				fallthrough;
2017 			case BNX2_LINK_STATUS_2500FULL:
2018 				bp->line_speed = SPEED_2500;
2019 				break;
2020 			default:
2021 				bp->line_speed = 0;
2022 				break;
2023 		}
2024 
2025 		bp->flow_ctrl = 0;
2026 		if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
2027 		    (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
2028 			if (bp->duplex == DUPLEX_FULL)
2029 				bp->flow_ctrl = bp->req_flow_ctrl;
2030 		} else {
2031 			if (msg & BNX2_LINK_STATUS_TX_FC_ENABLED)
2032 				bp->flow_ctrl |= FLOW_CTRL_TX;
2033 			if (msg & BNX2_LINK_STATUS_RX_FC_ENABLED)
2034 				bp->flow_ctrl |= FLOW_CTRL_RX;
2035 		}
2036 
2037 		old_port = bp->phy_port;
2038 		if (msg & BNX2_LINK_STATUS_SERDES_LINK)
2039 			bp->phy_port = PORT_FIBRE;
2040 		else
2041 			bp->phy_port = PORT_TP;
2042 
2043 		if (old_port != bp->phy_port)
2044 			bnx2_set_default_link(bp);
2045 
2046 	}
2047 	if (bp->link_up != link_up)
2048 		bnx2_report_link(bp);
2049 
2050 	bnx2_set_mac_link(bp);
2051 }
2052 
2053 static int
bnx2_set_remote_link(struct bnx2 * bp)2054 bnx2_set_remote_link(struct bnx2 *bp)
2055 {
2056 	u32 evt_code;
2057 
2058 	evt_code = bnx2_shmem_rd(bp, BNX2_FW_EVT_CODE_MB);
2059 	switch (evt_code) {
2060 		case BNX2_FW_EVT_CODE_LINK_EVENT:
2061 			bnx2_remote_phy_event(bp);
2062 			break;
2063 		case BNX2_FW_EVT_CODE_SW_TIMER_EXPIRATION_EVENT:
2064 		default:
2065 			bnx2_send_heart_beat(bp);
2066 			break;
2067 	}
2068 	return 0;
2069 }
2070 
2071 static int
bnx2_setup_copper_phy(struct bnx2 * bp)2072 bnx2_setup_copper_phy(struct bnx2 *bp)
2073 __releases(&bp->phy_lock)
2074 __acquires(&bp->phy_lock)
2075 {
2076 	u32 bmcr, adv_reg, new_adv = 0;
2077 	u32 new_bmcr;
2078 
2079 	bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
2080 
2081 	bnx2_read_phy(bp, bp->mii_adv, &adv_reg);
2082 	adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP |
2083 		    ADVERTISE_PAUSE_ASYM);
2084 
2085 	new_adv = ADVERTISE_CSMA | ethtool_adv_to_mii_adv_t(bp->advertising);
2086 
2087 	if (bp->autoneg & AUTONEG_SPEED) {
2088 		u32 adv1000_reg;
2089 		u32 new_adv1000 = 0;
2090 
2091 		new_adv |= bnx2_phy_get_pause_adv(bp);
2092 
2093 		bnx2_read_phy(bp, MII_CTRL1000, &adv1000_reg);
2094 		adv1000_reg &= PHY_ALL_1000_SPEED;
2095 
2096 		new_adv1000 |= ethtool_adv_to_mii_ctrl1000_t(bp->advertising);
2097 		if ((adv1000_reg != new_adv1000) ||
2098 			(adv_reg != new_adv) ||
2099 			((bmcr & BMCR_ANENABLE) == 0)) {
2100 
2101 			bnx2_write_phy(bp, bp->mii_adv, new_adv);
2102 			bnx2_write_phy(bp, MII_CTRL1000, new_adv1000);
2103 			bnx2_write_phy(bp, bp->mii_bmcr, BMCR_ANRESTART |
2104 				BMCR_ANENABLE);
2105 		}
2106 		else if (bp->link_up) {
2107 			/* Flow ctrl may have changed from auto to forced */
2108 			/* or vice-versa. */
2109 
2110 			bnx2_resolve_flow_ctrl(bp);
2111 			bnx2_set_mac_link(bp);
2112 		}
2113 		return 0;
2114 	}
2115 
2116 	/* advertise nothing when forcing speed */
2117 	if (adv_reg != new_adv)
2118 		bnx2_write_phy(bp, bp->mii_adv, new_adv);
2119 
2120 	new_bmcr = 0;
2121 	if (bp->req_line_speed == SPEED_100) {
2122 		new_bmcr |= BMCR_SPEED100;
2123 	}
2124 	if (bp->req_duplex == DUPLEX_FULL) {
2125 		new_bmcr |= BMCR_FULLDPLX;
2126 	}
2127 	if (new_bmcr != bmcr) {
2128 		u32 bmsr;
2129 
2130 		bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2131 		bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2132 
2133 		if (bmsr & BMSR_LSTATUS) {
2134 			/* Force link down */
2135 			bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
2136 			spin_unlock_bh(&bp->phy_lock);
2137 			msleep(50);
2138 			spin_lock_bh(&bp->phy_lock);
2139 
2140 			bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2141 			bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2142 		}
2143 
2144 		bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
2145 
2146 		/* Normally, the new speed is setup after the link has
2147 		 * gone down and up again. In some cases, link will not go
2148 		 * down so we need to set up the new speed here.
2149 		 */
2150 		if (bmsr & BMSR_LSTATUS) {
2151 			bp->line_speed = bp->req_line_speed;
2152 			bp->duplex = bp->req_duplex;
2153 			bnx2_resolve_flow_ctrl(bp);
2154 			bnx2_set_mac_link(bp);
2155 		}
2156 	} else {
2157 		bnx2_resolve_flow_ctrl(bp);
2158 		bnx2_set_mac_link(bp);
2159 	}
2160 	return 0;
2161 }
2162 
2163 static int
bnx2_setup_phy(struct bnx2 * bp,u8 port)2164 bnx2_setup_phy(struct bnx2 *bp, u8 port)
2165 __releases(&bp->phy_lock)
2166 __acquires(&bp->phy_lock)
2167 {
2168 	if (bp->loopback == MAC_LOOPBACK)
2169 		return 0;
2170 
2171 	if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
2172 		return bnx2_setup_serdes_phy(bp, port);
2173 	}
2174 	else {
2175 		return bnx2_setup_copper_phy(bp);
2176 	}
2177 }
2178 
2179 static int
bnx2_init_5709s_phy(struct bnx2 * bp,int reset_phy)2180 bnx2_init_5709s_phy(struct bnx2 *bp, int reset_phy)
2181 {
2182 	u32 val;
2183 
2184 	bp->mii_bmcr = MII_BMCR + 0x10;
2185 	bp->mii_bmsr = MII_BMSR + 0x10;
2186 	bp->mii_bmsr1 = MII_BNX2_GP_TOP_AN_STATUS1;
2187 	bp->mii_adv = MII_ADVERTISE + 0x10;
2188 	bp->mii_lpa = MII_LPA + 0x10;
2189 	bp->mii_up1 = MII_BNX2_OVER1G_UP1;
2190 
2191 	bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_AER);
2192 	bnx2_write_phy(bp, MII_BNX2_AER_AER, MII_BNX2_AER_AER_AN_MMD);
2193 
2194 	bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
2195 	if (reset_phy)
2196 		bnx2_reset_phy(bp);
2197 
2198 	bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_SERDES_DIG);
2199 
2200 	bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, &val);
2201 	val &= ~MII_BNX2_SD_1000XCTL1_AUTODET;
2202 	val |= MII_BNX2_SD_1000XCTL1_FIBER;
2203 	bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, val);
2204 
2205 	bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
2206 	bnx2_read_phy(bp, MII_BNX2_OVER1G_UP1, &val);
2207 	if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)
2208 		val |= BCM5708S_UP1_2G5;
2209 	else
2210 		val &= ~BCM5708S_UP1_2G5;
2211 	bnx2_write_phy(bp, MII_BNX2_OVER1G_UP1, val);
2212 
2213 	bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_BAM_NXTPG);
2214 	bnx2_read_phy(bp, MII_BNX2_BAM_NXTPG_CTL, &val);
2215 	val |= MII_BNX2_NXTPG_CTL_T2 | MII_BNX2_NXTPG_CTL_BAM;
2216 	bnx2_write_phy(bp, MII_BNX2_BAM_NXTPG_CTL, val);
2217 
2218 	bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_CL73_USERB0);
2219 
2220 	val = MII_BNX2_CL73_BAM_EN | MII_BNX2_CL73_BAM_STA_MGR_EN |
2221 	      MII_BNX2_CL73_BAM_NP_AFT_BP_EN;
2222 	bnx2_write_phy(bp, MII_BNX2_CL73_BAM_CTL1, val);
2223 
2224 	bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
2225 
2226 	return 0;
2227 }
2228 
2229 static int
bnx2_init_5708s_phy(struct bnx2 * bp,int reset_phy)2230 bnx2_init_5708s_phy(struct bnx2 *bp, int reset_phy)
2231 {
2232 	u32 val;
2233 
2234 	if (reset_phy)
2235 		bnx2_reset_phy(bp);
2236 
2237 	bp->mii_up1 = BCM5708S_UP1;
2238 
2239 	bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG3);
2240 	bnx2_write_phy(bp, BCM5708S_DIG_3_0, BCM5708S_DIG_3_0_USE_IEEE);
2241 	bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
2242 
2243 	bnx2_read_phy(bp, BCM5708S_1000X_CTL1, &val);
2244 	val |= BCM5708S_1000X_CTL1_FIBER_MODE | BCM5708S_1000X_CTL1_AUTODET_EN;
2245 	bnx2_write_phy(bp, BCM5708S_1000X_CTL1, val);
2246 
2247 	bnx2_read_phy(bp, BCM5708S_1000X_CTL2, &val);
2248 	val |= BCM5708S_1000X_CTL2_PLLEL_DET_EN;
2249 	bnx2_write_phy(bp, BCM5708S_1000X_CTL2, val);
2250 
2251 	if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) {
2252 		bnx2_read_phy(bp, BCM5708S_UP1, &val);
2253 		val |= BCM5708S_UP1_2G5;
2254 		bnx2_write_phy(bp, BCM5708S_UP1, val);
2255 	}
2256 
2257 	if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_A0) ||
2258 	    (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_B0) ||
2259 	    (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_B1)) {
2260 		/* increase tx signal amplitude */
2261 		bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2262 			       BCM5708S_BLK_ADDR_TX_MISC);
2263 		bnx2_read_phy(bp, BCM5708S_TX_ACTL1, &val);
2264 		val &= ~BCM5708S_TX_ACTL1_DRIVER_VCM;
2265 		bnx2_write_phy(bp, BCM5708S_TX_ACTL1, val);
2266 		bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
2267 	}
2268 
2269 	val = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_CONFIG) &
2270 	      BNX2_PORT_HW_CFG_CFG_TXCTL3_MASK;
2271 
2272 	if (val) {
2273 		u32 is_backplane;
2274 
2275 		is_backplane = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG);
2276 		if (is_backplane & BNX2_SHARED_HW_CFG_PHY_BACKPLANE) {
2277 			bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2278 				       BCM5708S_BLK_ADDR_TX_MISC);
2279 			bnx2_write_phy(bp, BCM5708S_TX_ACTL3, val);
2280 			bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2281 				       BCM5708S_BLK_ADDR_DIG);
2282 		}
2283 	}
2284 	return 0;
2285 }
2286 
2287 static int
bnx2_init_5706s_phy(struct bnx2 * bp,int reset_phy)2288 bnx2_init_5706s_phy(struct bnx2 *bp, int reset_phy)
2289 {
2290 	if (reset_phy)
2291 		bnx2_reset_phy(bp);
2292 
2293 	bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
2294 
2295 	if (BNX2_CHIP(bp) == BNX2_CHIP_5706)
2296 		BNX2_WR(bp, BNX2_MISC_GP_HW_CTL0, 0x300);
2297 
2298 	if (bp->dev->mtu > ETH_DATA_LEN) {
2299 		u32 val;
2300 
2301 		/* Set extended packet length bit */
2302 		bnx2_write_phy(bp, 0x18, 0x7);
2303 		bnx2_read_phy(bp, 0x18, &val);
2304 		bnx2_write_phy(bp, 0x18, (val & 0xfff8) | 0x4000);
2305 
2306 		bnx2_write_phy(bp, 0x1c, 0x6c00);
2307 		bnx2_read_phy(bp, 0x1c, &val);
2308 		bnx2_write_phy(bp, 0x1c, (val & 0x3ff) | 0xec02);
2309 	}
2310 	else {
2311 		u32 val;
2312 
2313 		bnx2_write_phy(bp, 0x18, 0x7);
2314 		bnx2_read_phy(bp, 0x18, &val);
2315 		bnx2_write_phy(bp, 0x18, val & ~0x4007);
2316 
2317 		bnx2_write_phy(bp, 0x1c, 0x6c00);
2318 		bnx2_read_phy(bp, 0x1c, &val);
2319 		bnx2_write_phy(bp, 0x1c, (val & 0x3fd) | 0xec00);
2320 	}
2321 
2322 	return 0;
2323 }
2324 
2325 static int
bnx2_init_copper_phy(struct bnx2 * bp,int reset_phy)2326 bnx2_init_copper_phy(struct bnx2 *bp, int reset_phy)
2327 {
2328 	u32 val;
2329 
2330 	if (reset_phy)
2331 		bnx2_reset_phy(bp);
2332 
2333 	if (bp->phy_flags & BNX2_PHY_FLAG_CRC_FIX) {
2334 		bnx2_write_phy(bp, 0x18, 0x0c00);
2335 		bnx2_write_phy(bp, 0x17, 0x000a);
2336 		bnx2_write_phy(bp, 0x15, 0x310b);
2337 		bnx2_write_phy(bp, 0x17, 0x201f);
2338 		bnx2_write_phy(bp, 0x15, 0x9506);
2339 		bnx2_write_phy(bp, 0x17, 0x401f);
2340 		bnx2_write_phy(bp, 0x15, 0x14e2);
2341 		bnx2_write_phy(bp, 0x18, 0x0400);
2342 	}
2343 
2344 	if (bp->phy_flags & BNX2_PHY_FLAG_DIS_EARLY_DAC) {
2345 		bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS,
2346 			       MII_BNX2_DSP_EXPAND_REG | 0x8);
2347 		bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
2348 		val &= ~(1 << 8);
2349 		bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val);
2350 	}
2351 
2352 	if (bp->dev->mtu > ETH_DATA_LEN) {
2353 		/* Set extended packet length bit */
2354 		bnx2_write_phy(bp, 0x18, 0x7);
2355 		bnx2_read_phy(bp, 0x18, &val);
2356 		bnx2_write_phy(bp, 0x18, val | 0x4000);
2357 
2358 		bnx2_read_phy(bp, 0x10, &val);
2359 		bnx2_write_phy(bp, 0x10, val | 0x1);
2360 	}
2361 	else {
2362 		bnx2_write_phy(bp, 0x18, 0x7);
2363 		bnx2_read_phy(bp, 0x18, &val);
2364 		bnx2_write_phy(bp, 0x18, val & ~0x4007);
2365 
2366 		bnx2_read_phy(bp, 0x10, &val);
2367 		bnx2_write_phy(bp, 0x10, val & ~0x1);
2368 	}
2369 
2370 	/* ethernet@wirespeed */
2371 	bnx2_write_phy(bp, MII_BNX2_AUX_CTL, AUX_CTL_MISC_CTL);
2372 	bnx2_read_phy(bp, MII_BNX2_AUX_CTL, &val);
2373 	val |=  AUX_CTL_MISC_CTL_WR | AUX_CTL_MISC_CTL_WIRESPEED;
2374 
2375 	/* auto-mdix */
2376 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
2377 		val |=  AUX_CTL_MISC_CTL_AUTOMDIX;
2378 
2379 	bnx2_write_phy(bp, MII_BNX2_AUX_CTL, val);
2380 	return 0;
2381 }
2382 
2383 
2384 static int
bnx2_init_phy(struct bnx2 * bp,int reset_phy)2385 bnx2_init_phy(struct bnx2 *bp, int reset_phy)
2386 __releases(&bp->phy_lock)
2387 __acquires(&bp->phy_lock)
2388 {
2389 	u32 val;
2390 	int rc = 0;
2391 
2392 	bp->phy_flags &= ~BNX2_PHY_FLAG_INT_MODE_MASK;
2393 	bp->phy_flags |= BNX2_PHY_FLAG_INT_MODE_LINK_READY;
2394 
2395 	bp->mii_bmcr = MII_BMCR;
2396 	bp->mii_bmsr = MII_BMSR;
2397 	bp->mii_bmsr1 = MII_BMSR;
2398 	bp->mii_adv = MII_ADVERTISE;
2399 	bp->mii_lpa = MII_LPA;
2400 
2401 	BNX2_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
2402 
2403 	if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
2404 		goto setup_phy;
2405 
2406 	bnx2_read_phy(bp, MII_PHYSID1, &val);
2407 	bp->phy_id = val << 16;
2408 	bnx2_read_phy(bp, MII_PHYSID2, &val);
2409 	bp->phy_id |= val & 0xffff;
2410 
2411 	if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
2412 		if (BNX2_CHIP(bp) == BNX2_CHIP_5706)
2413 			rc = bnx2_init_5706s_phy(bp, reset_phy);
2414 		else if (BNX2_CHIP(bp) == BNX2_CHIP_5708)
2415 			rc = bnx2_init_5708s_phy(bp, reset_phy);
2416 		else if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
2417 			rc = bnx2_init_5709s_phy(bp, reset_phy);
2418 	}
2419 	else {
2420 		rc = bnx2_init_copper_phy(bp, reset_phy);
2421 	}
2422 
2423 setup_phy:
2424 	if (!rc)
2425 		rc = bnx2_setup_phy(bp, bp->phy_port);
2426 
2427 	return rc;
2428 }
2429 
2430 static int
bnx2_set_mac_loopback(struct bnx2 * bp)2431 bnx2_set_mac_loopback(struct bnx2 *bp)
2432 {
2433 	u32 mac_mode;
2434 
2435 	mac_mode = BNX2_RD(bp, BNX2_EMAC_MODE);
2436 	mac_mode &= ~BNX2_EMAC_MODE_PORT;
2437 	mac_mode |= BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK;
2438 	BNX2_WR(bp, BNX2_EMAC_MODE, mac_mode);
2439 	bp->link_up = 1;
2440 	return 0;
2441 }
2442 
2443 static int bnx2_test_link(struct bnx2 *);
2444 
2445 static int
bnx2_set_phy_loopback(struct bnx2 * bp)2446 bnx2_set_phy_loopback(struct bnx2 *bp)
2447 {
2448 	u32 mac_mode;
2449 	int rc, i;
2450 
2451 	spin_lock_bh(&bp->phy_lock);
2452 	rc = bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK | BMCR_FULLDPLX |
2453 			    BMCR_SPEED1000);
2454 	spin_unlock_bh(&bp->phy_lock);
2455 	if (rc)
2456 		return rc;
2457 
2458 	for (i = 0; i < 10; i++) {
2459 		if (bnx2_test_link(bp) == 0)
2460 			break;
2461 		msleep(100);
2462 	}
2463 
2464 	mac_mode = BNX2_RD(bp, BNX2_EMAC_MODE);
2465 	mac_mode &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
2466 		      BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
2467 		      BNX2_EMAC_MODE_25G_MODE);
2468 
2469 	mac_mode |= BNX2_EMAC_MODE_PORT_GMII;
2470 	BNX2_WR(bp, BNX2_EMAC_MODE, mac_mode);
2471 	bp->link_up = 1;
2472 	return 0;
2473 }
2474 
2475 static void
bnx2_dump_mcp_state(struct bnx2 * bp)2476 bnx2_dump_mcp_state(struct bnx2 *bp)
2477 {
2478 	struct net_device *dev = bp->dev;
2479 	u32 mcp_p0, mcp_p1;
2480 
2481 	netdev_err(dev, "<--- start MCP states dump --->\n");
2482 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
2483 		mcp_p0 = BNX2_MCP_STATE_P0;
2484 		mcp_p1 = BNX2_MCP_STATE_P1;
2485 	} else {
2486 		mcp_p0 = BNX2_MCP_STATE_P0_5708;
2487 		mcp_p1 = BNX2_MCP_STATE_P1_5708;
2488 	}
2489 	netdev_err(dev, "DEBUG: MCP_STATE_P0[%08x] MCP_STATE_P1[%08x]\n",
2490 		   bnx2_reg_rd_ind(bp, mcp_p0), bnx2_reg_rd_ind(bp, mcp_p1));
2491 	netdev_err(dev, "DEBUG: MCP mode[%08x] state[%08x] evt_mask[%08x]\n",
2492 		   bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_MODE),
2493 		   bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_STATE),
2494 		   bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_EVENT_MASK));
2495 	netdev_err(dev, "DEBUG: pc[%08x] pc[%08x] instr[%08x]\n",
2496 		   bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_PROGRAM_COUNTER),
2497 		   bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_PROGRAM_COUNTER),
2498 		   bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_INSTRUCTION));
2499 	netdev_err(dev, "DEBUG: shmem states:\n");
2500 	netdev_err(dev, "DEBUG: drv_mb[%08x] fw_mb[%08x] link_status[%08x]",
2501 		   bnx2_shmem_rd(bp, BNX2_DRV_MB),
2502 		   bnx2_shmem_rd(bp, BNX2_FW_MB),
2503 		   bnx2_shmem_rd(bp, BNX2_LINK_STATUS));
2504 	pr_cont(" drv_pulse_mb[%08x]\n", bnx2_shmem_rd(bp, BNX2_DRV_PULSE_MB));
2505 	netdev_err(dev, "DEBUG: dev_info_signature[%08x] reset_type[%08x]",
2506 		   bnx2_shmem_rd(bp, BNX2_DEV_INFO_SIGNATURE),
2507 		   bnx2_shmem_rd(bp, BNX2_BC_STATE_RESET_TYPE));
2508 	pr_cont(" condition[%08x]\n",
2509 		bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION));
2510 	DP_SHMEM_LINE(bp, BNX2_BC_RESET_TYPE);
2511 	DP_SHMEM_LINE(bp, 0x3cc);
2512 	DP_SHMEM_LINE(bp, 0x3dc);
2513 	DP_SHMEM_LINE(bp, 0x3ec);
2514 	netdev_err(dev, "DEBUG: 0x3fc[%08x]\n", bnx2_shmem_rd(bp, 0x3fc));
2515 	netdev_err(dev, "<--- end MCP states dump --->\n");
2516 }
2517 
2518 static int
bnx2_fw_sync(struct bnx2 * bp,u32 msg_data,int ack,int silent)2519 bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int ack, int silent)
2520 {
2521 	int i;
2522 	u32 val;
2523 
2524 	bp->fw_wr_seq++;
2525 	msg_data |= bp->fw_wr_seq;
2526 	bp->fw_last_msg = msg_data;
2527 
2528 	bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data);
2529 
2530 	if (!ack)
2531 		return 0;
2532 
2533 	/* wait for an acknowledgement. */
2534 	for (i = 0; i < (BNX2_FW_ACK_TIME_OUT_MS / 10); i++) {
2535 		msleep(10);
2536 
2537 		val = bnx2_shmem_rd(bp, BNX2_FW_MB);
2538 
2539 		if ((val & BNX2_FW_MSG_ACK) == (msg_data & BNX2_DRV_MSG_SEQ))
2540 			break;
2541 	}
2542 	if ((msg_data & BNX2_DRV_MSG_DATA) == BNX2_DRV_MSG_DATA_WAIT0)
2543 		return 0;
2544 
2545 	/* If we timed out, inform the firmware that this is the case. */
2546 	if ((val & BNX2_FW_MSG_ACK) != (msg_data & BNX2_DRV_MSG_SEQ)) {
2547 		msg_data &= ~BNX2_DRV_MSG_CODE;
2548 		msg_data |= BNX2_DRV_MSG_CODE_FW_TIMEOUT;
2549 
2550 		bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data);
2551 		if (!silent) {
2552 			pr_err("fw sync timeout, reset code = %x\n", msg_data);
2553 			bnx2_dump_mcp_state(bp);
2554 		}
2555 
2556 		return -EBUSY;
2557 	}
2558 
2559 	if ((val & BNX2_FW_MSG_STATUS_MASK) != BNX2_FW_MSG_STATUS_OK)
2560 		return -EIO;
2561 
2562 	return 0;
2563 }
2564 
2565 static int
bnx2_init_5709_context(struct bnx2 * bp)2566 bnx2_init_5709_context(struct bnx2 *bp)
2567 {
2568 	int i, ret = 0;
2569 	u32 val;
2570 
2571 	val = BNX2_CTX_COMMAND_ENABLED | BNX2_CTX_COMMAND_MEM_INIT | (1 << 12);
2572 	val |= (BNX2_PAGE_BITS - 8) << 16;
2573 	BNX2_WR(bp, BNX2_CTX_COMMAND, val);
2574 	for (i = 0; i < 10; i++) {
2575 		val = BNX2_RD(bp, BNX2_CTX_COMMAND);
2576 		if (!(val & BNX2_CTX_COMMAND_MEM_INIT))
2577 			break;
2578 		udelay(2);
2579 	}
2580 	if (val & BNX2_CTX_COMMAND_MEM_INIT)
2581 		return -EBUSY;
2582 
2583 	for (i = 0; i < bp->ctx_pages; i++) {
2584 		int j;
2585 
2586 		if (bp->ctx_blk[i])
2587 			memset(bp->ctx_blk[i], 0, BNX2_PAGE_SIZE);
2588 		else
2589 			return -ENOMEM;
2590 
2591 		BNX2_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA0,
2592 			(bp->ctx_blk_mapping[i] & 0xffffffff) |
2593 			BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID);
2594 		BNX2_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA1,
2595 			(u64) bp->ctx_blk_mapping[i] >> 32);
2596 		BNX2_WR(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL, i |
2597 			BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
2598 		for (j = 0; j < 10; j++) {
2599 
2600 			val = BNX2_RD(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL);
2601 			if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ))
2602 				break;
2603 			udelay(5);
2604 		}
2605 		if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) {
2606 			ret = -EBUSY;
2607 			break;
2608 		}
2609 	}
2610 	return ret;
2611 }
2612 
2613 static void
bnx2_init_context(struct bnx2 * bp)2614 bnx2_init_context(struct bnx2 *bp)
2615 {
2616 	u32 vcid;
2617 
2618 	vcid = 96;
2619 	while (vcid) {
2620 		u32 vcid_addr, pcid_addr, offset;
2621 		int i;
2622 
2623 		vcid--;
2624 
2625 		if (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) {
2626 			u32 new_vcid;
2627 
2628 			vcid_addr = GET_PCID_ADDR(vcid);
2629 			if (vcid & 0x8) {
2630 				new_vcid = 0x60 + (vcid & 0xf0) + (vcid & 0x7);
2631 			}
2632 			else {
2633 				new_vcid = vcid;
2634 			}
2635 			pcid_addr = GET_PCID_ADDR(new_vcid);
2636 		}
2637 		else {
2638 	    		vcid_addr = GET_CID_ADDR(vcid);
2639 			pcid_addr = vcid_addr;
2640 		}
2641 
2642 		for (i = 0; i < (CTX_SIZE / PHY_CTX_SIZE); i++) {
2643 			vcid_addr += (i << PHY_CTX_SHIFT);
2644 			pcid_addr += (i << PHY_CTX_SHIFT);
2645 
2646 			BNX2_WR(bp, BNX2_CTX_VIRT_ADDR, vcid_addr);
2647 			BNX2_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
2648 
2649 			/* Zero out the context. */
2650 			for (offset = 0; offset < PHY_CTX_SIZE; offset += 4)
2651 				bnx2_ctx_wr(bp, vcid_addr, offset, 0);
2652 		}
2653 	}
2654 }
2655 
2656 static int
bnx2_alloc_bad_rbuf(struct bnx2 * bp)2657 bnx2_alloc_bad_rbuf(struct bnx2 *bp)
2658 {
2659 	u16 *good_mbuf;
2660 	u32 good_mbuf_cnt;
2661 	u32 val;
2662 
2663 	good_mbuf = kmalloc_array(512, sizeof(u16), GFP_KERNEL);
2664 	if (!good_mbuf)
2665 		return -ENOMEM;
2666 
2667 	BNX2_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
2668 		BNX2_MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE);
2669 
2670 	good_mbuf_cnt = 0;
2671 
2672 	/* Allocate a bunch of mbufs and save the good ones in an array. */
2673 	val = bnx2_reg_rd_ind(bp, BNX2_RBUF_STATUS1);
2674 	while (val & BNX2_RBUF_STATUS1_FREE_COUNT) {
2675 		bnx2_reg_wr_ind(bp, BNX2_RBUF_COMMAND,
2676 				BNX2_RBUF_COMMAND_ALLOC_REQ);
2677 
2678 		val = bnx2_reg_rd_ind(bp, BNX2_RBUF_FW_BUF_ALLOC);
2679 
2680 		val &= BNX2_RBUF_FW_BUF_ALLOC_VALUE;
2681 
2682 		/* The addresses with Bit 9 set are bad memory blocks. */
2683 		if (!(val & (1 << 9))) {
2684 			good_mbuf[good_mbuf_cnt] = (u16) val;
2685 			good_mbuf_cnt++;
2686 		}
2687 
2688 		val = bnx2_reg_rd_ind(bp, BNX2_RBUF_STATUS1);
2689 	}
2690 
2691 	/* Free the good ones back to the mbuf pool thus discarding
2692 	 * all the bad ones. */
2693 	while (good_mbuf_cnt) {
2694 		good_mbuf_cnt--;
2695 
2696 		val = good_mbuf[good_mbuf_cnt];
2697 		val = (val << 9) | val | 1;
2698 
2699 		bnx2_reg_wr_ind(bp, BNX2_RBUF_FW_BUF_FREE, val);
2700 	}
2701 	kfree(good_mbuf);
2702 	return 0;
2703 }
2704 
2705 static void
bnx2_set_mac_addr(struct bnx2 * bp,const u8 * mac_addr,u32 pos)2706 bnx2_set_mac_addr(struct bnx2 *bp, const u8 *mac_addr, u32 pos)
2707 {
2708 	u32 val;
2709 
2710 	val = (mac_addr[0] << 8) | mac_addr[1];
2711 
2712 	BNX2_WR(bp, BNX2_EMAC_MAC_MATCH0 + (pos * 8), val);
2713 
2714 	val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
2715 		(mac_addr[4] << 8) | mac_addr[5];
2716 
2717 	BNX2_WR(bp, BNX2_EMAC_MAC_MATCH1 + (pos * 8), val);
2718 }
2719 
2720 static inline int
bnx2_alloc_rx_page(struct bnx2 * bp,struct bnx2_rx_ring_info * rxr,u16 index,gfp_t gfp)2721 bnx2_alloc_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index, gfp_t gfp)
2722 {
2723 	dma_addr_t mapping;
2724 	struct bnx2_sw_pg *rx_pg = &rxr->rx_pg_ring[index];
2725 	struct bnx2_rx_bd *rxbd =
2726 		&rxr->rx_pg_desc_ring[BNX2_RX_RING(index)][BNX2_RX_IDX(index)];
2727 	struct page *page = alloc_page(gfp);
2728 
2729 	if (!page)
2730 		return -ENOMEM;
2731 	mapping = dma_map_page(&bp->pdev->dev, page, 0, PAGE_SIZE,
2732 			       DMA_FROM_DEVICE);
2733 	if (dma_mapping_error(&bp->pdev->dev, mapping)) {
2734 		__free_page(page);
2735 		return -EIO;
2736 	}
2737 
2738 	rx_pg->page = page;
2739 	dma_unmap_addr_set(rx_pg, mapping, mapping);
2740 	rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2741 	rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2742 	return 0;
2743 }
2744 
2745 static void
bnx2_free_rx_page(struct bnx2 * bp,struct bnx2_rx_ring_info * rxr,u16 index)2746 bnx2_free_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
2747 {
2748 	struct bnx2_sw_pg *rx_pg = &rxr->rx_pg_ring[index];
2749 	struct page *page = rx_pg->page;
2750 
2751 	if (!page)
2752 		return;
2753 
2754 	dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(rx_pg, mapping),
2755 		       PAGE_SIZE, DMA_FROM_DEVICE);
2756 
2757 	__free_page(page);
2758 	rx_pg->page = NULL;
2759 }
2760 
2761 static inline int
bnx2_alloc_rx_data(struct bnx2 * bp,struct bnx2_rx_ring_info * rxr,u16 index,gfp_t gfp)2762 bnx2_alloc_rx_data(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index, gfp_t gfp)
2763 {
2764 	u8 *data;
2765 	struct bnx2_sw_bd *rx_buf = &rxr->rx_buf_ring[index];
2766 	dma_addr_t mapping;
2767 	struct bnx2_rx_bd *rxbd =
2768 		&rxr->rx_desc_ring[BNX2_RX_RING(index)][BNX2_RX_IDX(index)];
2769 
2770 	data = kmalloc(bp->rx_buf_size, gfp);
2771 	if (!data)
2772 		return -ENOMEM;
2773 
2774 	mapping = dma_map_single(&bp->pdev->dev,
2775 				 get_l2_fhdr(data),
2776 				 bp->rx_buf_use_size,
2777 				 DMA_FROM_DEVICE);
2778 	if (dma_mapping_error(&bp->pdev->dev, mapping)) {
2779 		kfree(data);
2780 		return -EIO;
2781 	}
2782 
2783 	rx_buf->data = data;
2784 	dma_unmap_addr_set(rx_buf, mapping, mapping);
2785 
2786 	rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2787 	rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2788 
2789 	rxr->rx_prod_bseq += bp->rx_buf_use_size;
2790 
2791 	return 0;
2792 }
2793 
2794 static int
bnx2_phy_event_is_set(struct bnx2 * bp,struct bnx2_napi * bnapi,u32 event)2795 bnx2_phy_event_is_set(struct bnx2 *bp, struct bnx2_napi *bnapi, u32 event)
2796 {
2797 	struct status_block *sblk = bnapi->status_blk.msi;
2798 	u32 new_link_state, old_link_state;
2799 	int is_set = 1;
2800 
2801 	new_link_state = sblk->status_attn_bits & event;
2802 	old_link_state = sblk->status_attn_bits_ack & event;
2803 	if (new_link_state != old_link_state) {
2804 		if (new_link_state)
2805 			BNX2_WR(bp, BNX2_PCICFG_STATUS_BIT_SET_CMD, event);
2806 		else
2807 			BNX2_WR(bp, BNX2_PCICFG_STATUS_BIT_CLEAR_CMD, event);
2808 	} else
2809 		is_set = 0;
2810 
2811 	return is_set;
2812 }
2813 
2814 static void
bnx2_phy_int(struct bnx2 * bp,struct bnx2_napi * bnapi)2815 bnx2_phy_int(struct bnx2 *bp, struct bnx2_napi *bnapi)
2816 {
2817 	spin_lock(&bp->phy_lock);
2818 
2819 	if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_LINK_STATE))
2820 		bnx2_set_link(bp);
2821 	if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_TIMER_ABORT))
2822 		bnx2_set_remote_link(bp);
2823 
2824 	spin_unlock(&bp->phy_lock);
2825 
2826 }
2827 
2828 static inline u16
bnx2_get_hw_tx_cons(struct bnx2_napi * bnapi)2829 bnx2_get_hw_tx_cons(struct bnx2_napi *bnapi)
2830 {
2831 	u16 cons;
2832 
2833 	cons = READ_ONCE(*bnapi->hw_tx_cons_ptr);
2834 
2835 	if (unlikely((cons & BNX2_MAX_TX_DESC_CNT) == BNX2_MAX_TX_DESC_CNT))
2836 		cons++;
2837 	return cons;
2838 }
2839 
2840 static int
bnx2_tx_int(struct bnx2 * bp,struct bnx2_napi * bnapi,int budget)2841 bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
2842 {
2843 	struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
2844 	u16 hw_cons, sw_cons, sw_ring_cons;
2845 	int tx_pkt = 0, index;
2846 	unsigned int tx_bytes = 0;
2847 	struct netdev_queue *txq;
2848 
2849 	index = (bnapi - bp->bnx2_napi);
2850 	txq = netdev_get_tx_queue(bp->dev, index);
2851 
2852 	hw_cons = bnx2_get_hw_tx_cons(bnapi);
2853 	sw_cons = txr->tx_cons;
2854 
2855 	while (sw_cons != hw_cons) {
2856 		struct bnx2_sw_tx_bd *tx_buf;
2857 		struct sk_buff *skb;
2858 		int i, last;
2859 
2860 		sw_ring_cons = BNX2_TX_RING_IDX(sw_cons);
2861 
2862 		tx_buf = &txr->tx_buf_ring[sw_ring_cons];
2863 		skb = tx_buf->skb;
2864 
2865 		/* prefetch skb_end_pointer() to speedup skb_shinfo(skb) */
2866 		prefetch(&skb->end);
2867 
2868 		/* partial BD completions possible with TSO packets */
2869 		if (tx_buf->is_gso) {
2870 			u16 last_idx, last_ring_idx;
2871 
2872 			last_idx = sw_cons + tx_buf->nr_frags + 1;
2873 			last_ring_idx = sw_ring_cons + tx_buf->nr_frags + 1;
2874 			if (unlikely(last_ring_idx >= BNX2_MAX_TX_DESC_CNT)) {
2875 				last_idx++;
2876 			}
2877 			if (((s16) ((s16) last_idx - (s16) hw_cons)) > 0) {
2878 				break;
2879 			}
2880 		}
2881 
2882 		dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(tx_buf, mapping),
2883 			skb_headlen(skb), DMA_TO_DEVICE);
2884 
2885 		tx_buf->skb = NULL;
2886 		last = tx_buf->nr_frags;
2887 
2888 		for (i = 0; i < last; i++) {
2889 			struct bnx2_sw_tx_bd *tx_buf;
2890 
2891 			sw_cons = BNX2_NEXT_TX_BD(sw_cons);
2892 
2893 			tx_buf = &txr->tx_buf_ring[BNX2_TX_RING_IDX(sw_cons)];
2894 			dma_unmap_page(&bp->pdev->dev,
2895 				dma_unmap_addr(tx_buf, mapping),
2896 				skb_frag_size(&skb_shinfo(skb)->frags[i]),
2897 				DMA_TO_DEVICE);
2898 		}
2899 
2900 		sw_cons = BNX2_NEXT_TX_BD(sw_cons);
2901 
2902 		tx_bytes += skb->len;
2903 		dev_kfree_skb_any(skb);
2904 		tx_pkt++;
2905 		if (tx_pkt == budget)
2906 			break;
2907 
2908 		if (hw_cons == sw_cons)
2909 			hw_cons = bnx2_get_hw_tx_cons(bnapi);
2910 	}
2911 
2912 	netdev_tx_completed_queue(txq, tx_pkt, tx_bytes);
2913 	txr->hw_tx_cons = hw_cons;
2914 	txr->tx_cons = sw_cons;
2915 
2916 	/* Need to make the tx_cons update visible to bnx2_start_xmit()
2917 	 * before checking for netif_tx_queue_stopped().  Without the
2918 	 * memory barrier, there is a small possibility that bnx2_start_xmit()
2919 	 * will miss it and cause the queue to be stopped forever.
2920 	 */
2921 	smp_mb();
2922 
2923 	if (unlikely(netif_tx_queue_stopped(txq)) &&
2924 		     (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh)) {
2925 		__netif_tx_lock(txq, smp_processor_id());
2926 		if ((netif_tx_queue_stopped(txq)) &&
2927 		    (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh))
2928 			netif_tx_wake_queue(txq);
2929 		__netif_tx_unlock(txq);
2930 	}
2931 
2932 	return tx_pkt;
2933 }
2934 
2935 static void
bnx2_reuse_rx_skb_pages(struct bnx2 * bp,struct bnx2_rx_ring_info * rxr,struct sk_buff * skb,int count)2936 bnx2_reuse_rx_skb_pages(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
2937 			struct sk_buff *skb, int count)
2938 {
2939 	struct bnx2_sw_pg *cons_rx_pg, *prod_rx_pg;
2940 	struct bnx2_rx_bd *cons_bd, *prod_bd;
2941 	int i;
2942 	u16 hw_prod, prod;
2943 	u16 cons = rxr->rx_pg_cons;
2944 
2945 	cons_rx_pg = &rxr->rx_pg_ring[cons];
2946 
2947 	/* The caller was unable to allocate a new page to replace the
2948 	 * last one in the frags array, so we need to recycle that page
2949 	 * and then free the skb.
2950 	 */
2951 	if (skb) {
2952 		struct page *page;
2953 		struct skb_shared_info *shinfo;
2954 
2955 		shinfo = skb_shinfo(skb);
2956 		shinfo->nr_frags--;
2957 		page = skb_frag_page(&shinfo->frags[shinfo->nr_frags]);
2958 
2959 		cons_rx_pg->page = page;
2960 		dev_kfree_skb(skb);
2961 	}
2962 
2963 	hw_prod = rxr->rx_pg_prod;
2964 
2965 	for (i = 0; i < count; i++) {
2966 		prod = BNX2_RX_PG_RING_IDX(hw_prod);
2967 
2968 		prod_rx_pg = &rxr->rx_pg_ring[prod];
2969 		cons_rx_pg = &rxr->rx_pg_ring[cons];
2970 		cons_bd = &rxr->rx_pg_desc_ring[BNX2_RX_RING(cons)]
2971 						[BNX2_RX_IDX(cons)];
2972 		prod_bd = &rxr->rx_pg_desc_ring[BNX2_RX_RING(prod)]
2973 						[BNX2_RX_IDX(prod)];
2974 
2975 		if (prod != cons) {
2976 			prod_rx_pg->page = cons_rx_pg->page;
2977 			cons_rx_pg->page = NULL;
2978 			dma_unmap_addr_set(prod_rx_pg, mapping,
2979 				dma_unmap_addr(cons_rx_pg, mapping));
2980 
2981 			prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2982 			prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
2983 
2984 		}
2985 		cons = BNX2_RX_PG_RING_IDX(BNX2_NEXT_RX_BD(cons));
2986 		hw_prod = BNX2_NEXT_RX_BD(hw_prod);
2987 	}
2988 	rxr->rx_pg_prod = hw_prod;
2989 	rxr->rx_pg_cons = cons;
2990 }
2991 
2992 static inline void
bnx2_reuse_rx_data(struct bnx2 * bp,struct bnx2_rx_ring_info * rxr,u8 * data,u16 cons,u16 prod)2993 bnx2_reuse_rx_data(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
2994 		   u8 *data, u16 cons, u16 prod)
2995 {
2996 	struct bnx2_sw_bd *cons_rx_buf, *prod_rx_buf;
2997 	struct bnx2_rx_bd *cons_bd, *prod_bd;
2998 
2999 	cons_rx_buf = &rxr->rx_buf_ring[cons];
3000 	prod_rx_buf = &rxr->rx_buf_ring[prod];
3001 
3002 	dma_sync_single_for_device(&bp->pdev->dev,
3003 		dma_unmap_addr(cons_rx_buf, mapping),
3004 		BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH, DMA_FROM_DEVICE);
3005 
3006 	rxr->rx_prod_bseq += bp->rx_buf_use_size;
3007 
3008 	prod_rx_buf->data = data;
3009 
3010 	if (cons == prod)
3011 		return;
3012 
3013 	dma_unmap_addr_set(prod_rx_buf, mapping,
3014 			dma_unmap_addr(cons_rx_buf, mapping));
3015 
3016 	cons_bd = &rxr->rx_desc_ring[BNX2_RX_RING(cons)][BNX2_RX_IDX(cons)];
3017 	prod_bd = &rxr->rx_desc_ring[BNX2_RX_RING(prod)][BNX2_RX_IDX(prod)];
3018 	prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
3019 	prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
3020 }
3021 
3022 static struct sk_buff *
bnx2_rx_skb(struct bnx2 * bp,struct bnx2_rx_ring_info * rxr,u8 * data,unsigned int len,unsigned int hdr_len,dma_addr_t dma_addr,u32 ring_idx)3023 bnx2_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u8 *data,
3024 	    unsigned int len, unsigned int hdr_len, dma_addr_t dma_addr,
3025 	    u32 ring_idx)
3026 {
3027 	int err;
3028 	u16 prod = ring_idx & 0xffff;
3029 	struct sk_buff *skb;
3030 
3031 	err = bnx2_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC);
3032 	if (unlikely(err)) {
3033 		bnx2_reuse_rx_data(bp, rxr, data, (u16) (ring_idx >> 16), prod);
3034 error:
3035 		if (hdr_len) {
3036 			unsigned int raw_len = len + 4;
3037 			int pages = PAGE_ALIGN(raw_len - hdr_len) >> PAGE_SHIFT;
3038 
3039 			bnx2_reuse_rx_skb_pages(bp, rxr, NULL, pages);
3040 		}
3041 		return NULL;
3042 	}
3043 
3044 	dma_unmap_single(&bp->pdev->dev, dma_addr, bp->rx_buf_use_size,
3045 			 DMA_FROM_DEVICE);
3046 	skb = slab_build_skb(data);
3047 	if (!skb) {
3048 		kfree(data);
3049 		goto error;
3050 	}
3051 	skb_reserve(skb, ((u8 *)get_l2_fhdr(data) - data) + BNX2_RX_OFFSET);
3052 	if (hdr_len == 0) {
3053 		skb_put(skb, len);
3054 		return skb;
3055 	} else {
3056 		unsigned int i, frag_len, frag_size, pages;
3057 		struct bnx2_sw_pg *rx_pg;
3058 		u16 pg_cons = rxr->rx_pg_cons;
3059 		u16 pg_prod = rxr->rx_pg_prod;
3060 
3061 		frag_size = len + 4 - hdr_len;
3062 		pages = PAGE_ALIGN(frag_size) >> PAGE_SHIFT;
3063 		skb_put(skb, hdr_len);
3064 
3065 		for (i = 0; i < pages; i++) {
3066 			dma_addr_t mapping_old;
3067 
3068 			frag_len = min(frag_size, (unsigned int) PAGE_SIZE);
3069 			if (unlikely(frag_len <= 4)) {
3070 				unsigned int tail = 4 - frag_len;
3071 
3072 				rxr->rx_pg_cons = pg_cons;
3073 				rxr->rx_pg_prod = pg_prod;
3074 				bnx2_reuse_rx_skb_pages(bp, rxr, NULL,
3075 							pages - i);
3076 				skb->len -= tail;
3077 				if (i == 0) {
3078 					skb->tail -= tail;
3079 				} else {
3080 					skb_frag_t *frag =
3081 						&skb_shinfo(skb)->frags[i - 1];
3082 					skb_frag_size_sub(frag, tail);
3083 					skb->data_len -= tail;
3084 				}
3085 				return skb;
3086 			}
3087 			rx_pg = &rxr->rx_pg_ring[pg_cons];
3088 
3089 			/* Don't unmap yet.  If we're unable to allocate a new
3090 			 * page, we need to recycle the page and the DMA addr.
3091 			 */
3092 			mapping_old = dma_unmap_addr(rx_pg, mapping);
3093 			if (i == pages - 1)
3094 				frag_len -= 4;
3095 
3096 			skb_fill_page_desc(skb, i, rx_pg->page, 0, frag_len);
3097 			rx_pg->page = NULL;
3098 
3099 			err = bnx2_alloc_rx_page(bp, rxr,
3100 						 BNX2_RX_PG_RING_IDX(pg_prod),
3101 						 GFP_ATOMIC);
3102 			if (unlikely(err)) {
3103 				rxr->rx_pg_cons = pg_cons;
3104 				rxr->rx_pg_prod = pg_prod;
3105 				bnx2_reuse_rx_skb_pages(bp, rxr, skb,
3106 							pages - i);
3107 				return NULL;
3108 			}
3109 
3110 			dma_unmap_page(&bp->pdev->dev, mapping_old,
3111 				       PAGE_SIZE, DMA_FROM_DEVICE);
3112 
3113 			frag_size -= frag_len;
3114 			skb->data_len += frag_len;
3115 			skb->truesize += PAGE_SIZE;
3116 			skb->len += frag_len;
3117 
3118 			pg_prod = BNX2_NEXT_RX_BD(pg_prod);
3119 			pg_cons = BNX2_RX_PG_RING_IDX(BNX2_NEXT_RX_BD(pg_cons));
3120 		}
3121 		rxr->rx_pg_prod = pg_prod;
3122 		rxr->rx_pg_cons = pg_cons;
3123 	}
3124 	return skb;
3125 }
3126 
3127 static inline u16
bnx2_get_hw_rx_cons(struct bnx2_napi * bnapi)3128 bnx2_get_hw_rx_cons(struct bnx2_napi *bnapi)
3129 {
3130 	u16 cons;
3131 
3132 	cons = READ_ONCE(*bnapi->hw_rx_cons_ptr);
3133 
3134 	if (unlikely((cons & BNX2_MAX_RX_DESC_CNT) == BNX2_MAX_RX_DESC_CNT))
3135 		cons++;
3136 	return cons;
3137 }
3138 
3139 static int
bnx2_rx_int(struct bnx2 * bp,struct bnx2_napi * bnapi,int budget)3140 bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
3141 {
3142 	struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3143 	u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod;
3144 	struct l2_fhdr *rx_hdr;
3145 	int rx_pkt = 0, pg_ring_used = 0;
3146 
3147 	if (budget <= 0)
3148 		return rx_pkt;
3149 
3150 	hw_cons = bnx2_get_hw_rx_cons(bnapi);
3151 	sw_cons = rxr->rx_cons;
3152 	sw_prod = rxr->rx_prod;
3153 
3154 	/* Memory barrier necessary as speculative reads of the rx
3155 	 * buffer can be ahead of the index in the status block
3156 	 */
3157 	rmb();
3158 	while (sw_cons != hw_cons) {
3159 		unsigned int len, hdr_len;
3160 		u32 status;
3161 		struct bnx2_sw_bd *rx_buf, *next_rx_buf;
3162 		struct sk_buff *skb;
3163 		dma_addr_t dma_addr;
3164 		u8 *data;
3165 		u16 next_ring_idx;
3166 
3167 		sw_ring_cons = BNX2_RX_RING_IDX(sw_cons);
3168 		sw_ring_prod = BNX2_RX_RING_IDX(sw_prod);
3169 
3170 		rx_buf = &rxr->rx_buf_ring[sw_ring_cons];
3171 		data = rx_buf->data;
3172 		rx_buf->data = NULL;
3173 
3174 		rx_hdr = get_l2_fhdr(data);
3175 		prefetch(rx_hdr);
3176 
3177 		dma_addr = dma_unmap_addr(rx_buf, mapping);
3178 
3179 		dma_sync_single_for_cpu(&bp->pdev->dev, dma_addr,
3180 			BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH,
3181 			DMA_FROM_DEVICE);
3182 
3183 		next_ring_idx = BNX2_RX_RING_IDX(BNX2_NEXT_RX_BD(sw_cons));
3184 		next_rx_buf = &rxr->rx_buf_ring[next_ring_idx];
3185 		prefetch(get_l2_fhdr(next_rx_buf->data));
3186 
3187 		len = rx_hdr->l2_fhdr_pkt_len;
3188 		status = rx_hdr->l2_fhdr_status;
3189 
3190 		hdr_len = 0;
3191 		if (status & L2_FHDR_STATUS_SPLIT) {
3192 			hdr_len = rx_hdr->l2_fhdr_ip_xsum;
3193 			pg_ring_used = 1;
3194 		} else if (len > bp->rx_jumbo_thresh) {
3195 			hdr_len = bp->rx_jumbo_thresh;
3196 			pg_ring_used = 1;
3197 		}
3198 
3199 		if (unlikely(status & (L2_FHDR_ERRORS_BAD_CRC |
3200 				       L2_FHDR_ERRORS_PHY_DECODE |
3201 				       L2_FHDR_ERRORS_ALIGNMENT |
3202 				       L2_FHDR_ERRORS_TOO_SHORT |
3203 				       L2_FHDR_ERRORS_GIANT_FRAME))) {
3204 
3205 			bnx2_reuse_rx_data(bp, rxr, data, sw_ring_cons,
3206 					  sw_ring_prod);
3207 			if (pg_ring_used) {
3208 				int pages;
3209 
3210 				pages = PAGE_ALIGN(len - hdr_len) >> PAGE_SHIFT;
3211 
3212 				bnx2_reuse_rx_skb_pages(bp, rxr, NULL, pages);
3213 			}
3214 			goto next_rx;
3215 		}
3216 
3217 		len -= 4;
3218 
3219 		if (len <= bp->rx_copy_thresh) {
3220 			skb = netdev_alloc_skb(bp->dev, len + 6);
3221 			if (!skb) {
3222 				bnx2_reuse_rx_data(bp, rxr, data, sw_ring_cons,
3223 						  sw_ring_prod);
3224 				goto next_rx;
3225 			}
3226 
3227 			/* aligned copy */
3228 			memcpy(skb->data,
3229 			       (u8 *)rx_hdr + BNX2_RX_OFFSET - 6,
3230 			       len + 6);
3231 			skb_reserve(skb, 6);
3232 			skb_put(skb, len);
3233 
3234 			bnx2_reuse_rx_data(bp, rxr, data,
3235 				sw_ring_cons, sw_ring_prod);
3236 
3237 		} else {
3238 			skb = bnx2_rx_skb(bp, rxr, data, len, hdr_len, dma_addr,
3239 					  (sw_ring_cons << 16) | sw_ring_prod);
3240 			if (!skb)
3241 				goto next_rx;
3242 		}
3243 		if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) &&
3244 		    !(bp->rx_mode & BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG))
3245 			__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rx_hdr->l2_fhdr_vlan_tag);
3246 
3247 		skb->protocol = eth_type_trans(skb, bp->dev);
3248 
3249 		if (len > (bp->dev->mtu + ETH_HLEN) &&
3250 		    skb->protocol != htons(0x8100) &&
3251 		    skb->protocol != htons(ETH_P_8021AD)) {
3252 
3253 			dev_kfree_skb(skb);
3254 			goto next_rx;
3255 
3256 		}
3257 
3258 		skb_checksum_none_assert(skb);
3259 		if ((bp->dev->features & NETIF_F_RXCSUM) &&
3260 			(status & (L2_FHDR_STATUS_TCP_SEGMENT |
3261 			L2_FHDR_STATUS_UDP_DATAGRAM))) {
3262 
3263 			if (likely((status & (L2_FHDR_ERRORS_TCP_XSUM |
3264 					      L2_FHDR_ERRORS_UDP_XSUM)) == 0))
3265 				skb->ip_summed = CHECKSUM_UNNECESSARY;
3266 		}
3267 		if ((bp->dev->features & NETIF_F_RXHASH) &&
3268 		    ((status & L2_FHDR_STATUS_USE_RXHASH) ==
3269 		     L2_FHDR_STATUS_USE_RXHASH))
3270 			skb_set_hash(skb, rx_hdr->l2_fhdr_hash,
3271 				     PKT_HASH_TYPE_L3);
3272 
3273 		skb_record_rx_queue(skb, bnapi - &bp->bnx2_napi[0]);
3274 		napi_gro_receive(&bnapi->napi, skb);
3275 		rx_pkt++;
3276 
3277 next_rx:
3278 		sw_cons = BNX2_NEXT_RX_BD(sw_cons);
3279 		sw_prod = BNX2_NEXT_RX_BD(sw_prod);
3280 
3281 		if (rx_pkt == budget)
3282 			break;
3283 
3284 		/* Refresh hw_cons to see if there is new work */
3285 		if (sw_cons == hw_cons) {
3286 			hw_cons = bnx2_get_hw_rx_cons(bnapi);
3287 			rmb();
3288 		}
3289 	}
3290 	rxr->rx_cons = sw_cons;
3291 	rxr->rx_prod = sw_prod;
3292 
3293 	if (pg_ring_used)
3294 		BNX2_WR16(bp, rxr->rx_pg_bidx_addr, rxr->rx_pg_prod);
3295 
3296 	BNX2_WR16(bp, rxr->rx_bidx_addr, sw_prod);
3297 
3298 	BNX2_WR(bp, rxr->rx_bseq_addr, rxr->rx_prod_bseq);
3299 
3300 	return rx_pkt;
3301 
3302 }
3303 
3304 /* MSI ISR - The only difference between this and the INTx ISR
3305  * is that the MSI interrupt is always serviced.
3306  */
3307 static irqreturn_t
bnx2_msi(int irq,void * dev_instance)3308 bnx2_msi(int irq, void *dev_instance)
3309 {
3310 	struct bnx2_napi *bnapi = dev_instance;
3311 	struct bnx2 *bp = bnapi->bp;
3312 
3313 	prefetch(bnapi->status_blk.msi);
3314 	BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3315 		BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
3316 		BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3317 
3318 	/* Return here if interrupt is disabled. */
3319 	if (unlikely(atomic_read(&bp->intr_sem) != 0))
3320 		return IRQ_HANDLED;
3321 
3322 	napi_schedule(&bnapi->napi);
3323 
3324 	return IRQ_HANDLED;
3325 }
3326 
3327 static irqreturn_t
bnx2_msi_1shot(int irq,void * dev_instance)3328 bnx2_msi_1shot(int irq, void *dev_instance)
3329 {
3330 	struct bnx2_napi *bnapi = dev_instance;
3331 	struct bnx2 *bp = bnapi->bp;
3332 
3333 	prefetch(bnapi->status_blk.msi);
3334 
3335 	/* Return here if interrupt is disabled. */
3336 	if (unlikely(atomic_read(&bp->intr_sem) != 0))
3337 		return IRQ_HANDLED;
3338 
3339 	napi_schedule(&bnapi->napi);
3340 
3341 	return IRQ_HANDLED;
3342 }
3343 
3344 static irqreturn_t
bnx2_interrupt(int irq,void * dev_instance)3345 bnx2_interrupt(int irq, void *dev_instance)
3346 {
3347 	struct bnx2_napi *bnapi = dev_instance;
3348 	struct bnx2 *bp = bnapi->bp;
3349 	struct status_block *sblk = bnapi->status_blk.msi;
3350 
3351 	/* When using INTx, it is possible for the interrupt to arrive
3352 	 * at the CPU before the status block posted prior to the
3353 	 * interrupt. Reading a register will flush the status block.
3354 	 * When using MSI, the MSI message will always complete after
3355 	 * the status block write.
3356 	 */
3357 	if ((sblk->status_idx == bnapi->last_status_idx) &&
3358 	    (BNX2_RD(bp, BNX2_PCICFG_MISC_STATUS) &
3359 	     BNX2_PCICFG_MISC_STATUS_INTA_VALUE))
3360 		return IRQ_NONE;
3361 
3362 	BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3363 		BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
3364 		BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3365 
3366 	/* Read back to deassert IRQ immediately to avoid too many
3367 	 * spurious interrupts.
3368 	 */
3369 	BNX2_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
3370 
3371 	/* Return here if interrupt is shared and is disabled. */
3372 	if (unlikely(atomic_read(&bp->intr_sem) != 0))
3373 		return IRQ_HANDLED;
3374 
3375 	if (napi_schedule_prep(&bnapi->napi)) {
3376 		bnapi->last_status_idx = sblk->status_idx;
3377 		__napi_schedule(&bnapi->napi);
3378 	}
3379 
3380 	return IRQ_HANDLED;
3381 }
3382 
3383 static inline int
bnx2_has_fast_work(struct bnx2_napi * bnapi)3384 bnx2_has_fast_work(struct bnx2_napi *bnapi)
3385 {
3386 	struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
3387 	struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3388 
3389 	if ((bnx2_get_hw_rx_cons(bnapi) != rxr->rx_cons) ||
3390 	    (bnx2_get_hw_tx_cons(bnapi) != txr->hw_tx_cons))
3391 		return 1;
3392 	return 0;
3393 }
3394 
3395 #define STATUS_ATTN_EVENTS	(STATUS_ATTN_BITS_LINK_STATE | \
3396 				 STATUS_ATTN_BITS_TIMER_ABORT)
3397 
3398 static inline int
bnx2_has_work(struct bnx2_napi * bnapi)3399 bnx2_has_work(struct bnx2_napi *bnapi)
3400 {
3401 	struct status_block *sblk = bnapi->status_blk.msi;
3402 
3403 	if (bnx2_has_fast_work(bnapi))
3404 		return 1;
3405 
3406 #ifdef BCM_CNIC
3407 	if (bnapi->cnic_present && (bnapi->cnic_tag != sblk->status_idx))
3408 		return 1;
3409 #endif
3410 
3411 	if ((sblk->status_attn_bits & STATUS_ATTN_EVENTS) !=
3412 	    (sblk->status_attn_bits_ack & STATUS_ATTN_EVENTS))
3413 		return 1;
3414 
3415 	return 0;
3416 }
3417 
3418 static void
bnx2_chk_missed_msi(struct bnx2 * bp)3419 bnx2_chk_missed_msi(struct bnx2 *bp)
3420 {
3421 	struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
3422 	u32 msi_ctrl;
3423 
3424 	if (bnx2_has_work(bnapi)) {
3425 		msi_ctrl = BNX2_RD(bp, BNX2_PCICFG_MSI_CONTROL);
3426 		if (!(msi_ctrl & BNX2_PCICFG_MSI_CONTROL_ENABLE))
3427 			return;
3428 
3429 		if (bnapi->last_status_idx == bp->idle_chk_status_idx) {
3430 			BNX2_WR(bp, BNX2_PCICFG_MSI_CONTROL, msi_ctrl &
3431 				~BNX2_PCICFG_MSI_CONTROL_ENABLE);
3432 			BNX2_WR(bp, BNX2_PCICFG_MSI_CONTROL, msi_ctrl);
3433 			bnx2_msi(bp->irq_tbl[0].vector, bnapi);
3434 		}
3435 	}
3436 
3437 	bp->idle_chk_status_idx = bnapi->last_status_idx;
3438 }
3439 
3440 #ifdef BCM_CNIC
bnx2_poll_cnic(struct bnx2 * bp,struct bnx2_napi * bnapi)3441 static void bnx2_poll_cnic(struct bnx2 *bp, struct bnx2_napi *bnapi)
3442 {
3443 	struct cnic_ops *c_ops;
3444 
3445 	if (!bnapi->cnic_present)
3446 		return;
3447 
3448 	rcu_read_lock();
3449 	c_ops = rcu_dereference(bp->cnic_ops);
3450 	if (c_ops)
3451 		bnapi->cnic_tag = c_ops->cnic_handler(bp->cnic_data,
3452 						      bnapi->status_blk.msi);
3453 	rcu_read_unlock();
3454 }
3455 #endif
3456 
bnx2_poll_link(struct bnx2 * bp,struct bnx2_napi * bnapi)3457 static void bnx2_poll_link(struct bnx2 *bp, struct bnx2_napi *bnapi)
3458 {
3459 	struct status_block *sblk = bnapi->status_blk.msi;
3460 	u32 status_attn_bits = sblk->status_attn_bits;
3461 	u32 status_attn_bits_ack = sblk->status_attn_bits_ack;
3462 
3463 	if ((status_attn_bits & STATUS_ATTN_EVENTS) !=
3464 	    (status_attn_bits_ack & STATUS_ATTN_EVENTS)) {
3465 
3466 		bnx2_phy_int(bp, bnapi);
3467 
3468 		/* This is needed to take care of transient status
3469 		 * during link changes.
3470 		 */
3471 		BNX2_WR(bp, BNX2_HC_COMMAND,
3472 			bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
3473 		BNX2_RD(bp, BNX2_HC_COMMAND);
3474 	}
3475 }
3476 
bnx2_poll_work(struct bnx2 * bp,struct bnx2_napi * bnapi,int work_done,int budget)3477 static int bnx2_poll_work(struct bnx2 *bp, struct bnx2_napi *bnapi,
3478 			  int work_done, int budget)
3479 {
3480 	struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
3481 	struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3482 
3483 	if (bnx2_get_hw_tx_cons(bnapi) != txr->hw_tx_cons)
3484 		bnx2_tx_int(bp, bnapi, 0);
3485 
3486 	if (bnx2_get_hw_rx_cons(bnapi) != rxr->rx_cons)
3487 		work_done += bnx2_rx_int(bp, bnapi, budget - work_done);
3488 
3489 	return work_done;
3490 }
3491 
bnx2_poll_msix(struct napi_struct * napi,int budget)3492 static int bnx2_poll_msix(struct napi_struct *napi, int budget)
3493 {
3494 	struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
3495 	struct bnx2 *bp = bnapi->bp;
3496 	int work_done = 0;
3497 	struct status_block_msix *sblk = bnapi->status_blk.msix;
3498 
3499 	while (1) {
3500 		work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
3501 		if (unlikely(work_done >= budget))
3502 			break;
3503 
3504 		bnapi->last_status_idx = sblk->status_idx;
3505 		/* status idx must be read before checking for more work. */
3506 		rmb();
3507 		if (likely(!bnx2_has_fast_work(bnapi))) {
3508 
3509 			napi_complete_done(napi, work_done);
3510 			BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
3511 				BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3512 				bnapi->last_status_idx);
3513 			break;
3514 		}
3515 	}
3516 	return work_done;
3517 }
3518 
bnx2_poll(struct napi_struct * napi,int budget)3519 static int bnx2_poll(struct napi_struct *napi, int budget)
3520 {
3521 	struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
3522 	struct bnx2 *bp = bnapi->bp;
3523 	int work_done = 0;
3524 	struct status_block *sblk = bnapi->status_blk.msi;
3525 
3526 	while (1) {
3527 		bnx2_poll_link(bp, bnapi);
3528 
3529 		work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
3530 
3531 #ifdef BCM_CNIC
3532 		bnx2_poll_cnic(bp, bnapi);
3533 #endif
3534 
3535 		/* bnapi->last_status_idx is used below to tell the hw how
3536 		 * much work has been processed, so we must read it before
3537 		 * checking for more work.
3538 		 */
3539 		bnapi->last_status_idx = sblk->status_idx;
3540 
3541 		if (unlikely(work_done >= budget))
3542 			break;
3543 
3544 		rmb();
3545 		if (likely(!bnx2_has_work(bnapi))) {
3546 			napi_complete_done(napi, work_done);
3547 			if (likely(bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)) {
3548 				BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3549 					BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3550 					bnapi->last_status_idx);
3551 				break;
3552 			}
3553 			BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3554 				BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3555 				BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
3556 				bnapi->last_status_idx);
3557 
3558 			BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3559 				BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3560 				bnapi->last_status_idx);
3561 			break;
3562 		}
3563 	}
3564 
3565 	return work_done;
3566 }
3567 
3568 /* Called with rtnl_lock from vlan functions and also netif_tx_lock
3569  * from set_multicast.
3570  */
3571 static void
bnx2_set_rx_mode(struct net_device * dev)3572 bnx2_set_rx_mode(struct net_device *dev)
3573 {
3574 	struct bnx2 *bp = netdev_priv(dev);
3575 	u32 rx_mode, sort_mode;
3576 	struct netdev_hw_addr *ha;
3577 	int i;
3578 
3579 	if (!netif_running(dev))
3580 		return;
3581 
3582 	spin_lock_bh(&bp->phy_lock);
3583 
3584 	rx_mode = bp->rx_mode & ~(BNX2_EMAC_RX_MODE_PROMISCUOUS |
3585 				  BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG);
3586 	sort_mode = 1 | BNX2_RPM_SORT_USER0_BC_EN;
3587 	if (!(dev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
3588 	     (bp->flags & BNX2_FLAG_CAN_KEEP_VLAN))
3589 		rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
3590 	if (dev->flags & IFF_PROMISC) {
3591 		/* Promiscuous mode. */
3592 		rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
3593 		sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
3594 			     BNX2_RPM_SORT_USER0_PROM_VLAN;
3595 	}
3596 	else if (dev->flags & IFF_ALLMULTI) {
3597 		for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3598 			BNX2_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3599 				0xffffffff);
3600 		}
3601 		sort_mode |= BNX2_RPM_SORT_USER0_MC_EN;
3602 	}
3603 	else {
3604 		/* Accept one or more multicast(s). */
3605 		u32 mc_filter[NUM_MC_HASH_REGISTERS];
3606 		u32 regidx;
3607 		u32 bit;
3608 		u32 crc;
3609 
3610 		memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS);
3611 
3612 		netdev_for_each_mc_addr(ha, dev) {
3613 			crc = ether_crc_le(ETH_ALEN, ha->addr);
3614 			bit = crc & 0xff;
3615 			regidx = (bit & 0xe0) >> 5;
3616 			bit &= 0x1f;
3617 			mc_filter[regidx] |= (1 << bit);
3618 		}
3619 
3620 		for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3621 			BNX2_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3622 				mc_filter[i]);
3623 		}
3624 
3625 		sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN;
3626 	}
3627 
3628 	if (netdev_uc_count(dev) > BNX2_MAX_UNICAST_ADDRESSES) {
3629 		rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
3630 		sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
3631 			     BNX2_RPM_SORT_USER0_PROM_VLAN;
3632 	} else if (!(dev->flags & IFF_PROMISC)) {
3633 		/* Add all entries into to the match filter list */
3634 		i = 0;
3635 		netdev_for_each_uc_addr(ha, dev) {
3636 			bnx2_set_mac_addr(bp, ha->addr,
3637 					  i + BNX2_START_UNICAST_ADDRESS_INDEX);
3638 			sort_mode |= (1 <<
3639 				      (i + BNX2_START_UNICAST_ADDRESS_INDEX));
3640 			i++;
3641 		}
3642 
3643 	}
3644 
3645 	if (rx_mode != bp->rx_mode) {
3646 		bp->rx_mode = rx_mode;
3647 		BNX2_WR(bp, BNX2_EMAC_RX_MODE, rx_mode);
3648 	}
3649 
3650 	BNX2_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3651 	BNX2_WR(bp, BNX2_RPM_SORT_USER0, sort_mode);
3652 	BNX2_WR(bp, BNX2_RPM_SORT_USER0, sort_mode | BNX2_RPM_SORT_USER0_ENA);
3653 
3654 	spin_unlock_bh(&bp->phy_lock);
3655 }
3656 
3657 static int
check_fw_section(const struct firmware * fw,const struct bnx2_fw_file_section * section,u32 alignment,bool non_empty)3658 check_fw_section(const struct firmware *fw,
3659 		 const struct bnx2_fw_file_section *section,
3660 		 u32 alignment, bool non_empty)
3661 {
3662 	u32 offset = be32_to_cpu(section->offset);
3663 	u32 len = be32_to_cpu(section->len);
3664 
3665 	if ((offset == 0 && len != 0) || offset >= fw->size || offset & 3)
3666 		return -EINVAL;
3667 	if ((non_empty && len == 0) || len > fw->size - offset ||
3668 	    len & (alignment - 1))
3669 		return -EINVAL;
3670 	return 0;
3671 }
3672 
3673 static int
check_mips_fw_entry(const struct firmware * fw,const struct bnx2_mips_fw_file_entry * entry)3674 check_mips_fw_entry(const struct firmware *fw,
3675 		    const struct bnx2_mips_fw_file_entry *entry)
3676 {
3677 	if (check_fw_section(fw, &entry->text, 4, true) ||
3678 	    check_fw_section(fw, &entry->data, 4, false) ||
3679 	    check_fw_section(fw, &entry->rodata, 4, false))
3680 		return -EINVAL;
3681 	return 0;
3682 }
3683 
bnx2_release_firmware(struct bnx2 * bp)3684 static void bnx2_release_firmware(struct bnx2 *bp)
3685 {
3686 	if (bp->rv2p_firmware) {
3687 		release_firmware(bp->mips_firmware);
3688 		release_firmware(bp->rv2p_firmware);
3689 		bp->rv2p_firmware = NULL;
3690 	}
3691 }
3692 
bnx2_request_uncached_firmware(struct bnx2 * bp)3693 static int bnx2_request_uncached_firmware(struct bnx2 *bp)
3694 {
3695 	const char *mips_fw_file, *rv2p_fw_file;
3696 	const struct bnx2_mips_fw_file *mips_fw;
3697 	const struct bnx2_rv2p_fw_file *rv2p_fw;
3698 	int rc;
3699 
3700 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
3701 		mips_fw_file = FW_MIPS_FILE_09;
3702 		if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5709_A0) ||
3703 		    (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5709_A1))
3704 			rv2p_fw_file = FW_RV2P_FILE_09_Ax;
3705 		else
3706 			rv2p_fw_file = FW_RV2P_FILE_09;
3707 	} else {
3708 		mips_fw_file = FW_MIPS_FILE_06;
3709 		rv2p_fw_file = FW_RV2P_FILE_06;
3710 	}
3711 
3712 	rc = request_firmware(&bp->mips_firmware, mips_fw_file, &bp->pdev->dev);
3713 	if (rc) {
3714 		pr_err("Can't load firmware file \"%s\"\n", mips_fw_file);
3715 		goto out;
3716 	}
3717 
3718 	rc = request_firmware(&bp->rv2p_firmware, rv2p_fw_file, &bp->pdev->dev);
3719 	if (rc) {
3720 		pr_err("Can't load firmware file \"%s\"\n", rv2p_fw_file);
3721 		goto err_release_mips_firmware;
3722 	}
3723 	mips_fw = (const struct bnx2_mips_fw_file *) bp->mips_firmware->data;
3724 	rv2p_fw = (const struct bnx2_rv2p_fw_file *) bp->rv2p_firmware->data;
3725 	if (bp->mips_firmware->size < sizeof(*mips_fw) ||
3726 	    check_mips_fw_entry(bp->mips_firmware, &mips_fw->com) ||
3727 	    check_mips_fw_entry(bp->mips_firmware, &mips_fw->cp) ||
3728 	    check_mips_fw_entry(bp->mips_firmware, &mips_fw->rxp) ||
3729 	    check_mips_fw_entry(bp->mips_firmware, &mips_fw->tpat) ||
3730 	    check_mips_fw_entry(bp->mips_firmware, &mips_fw->txp)) {
3731 		pr_err("Firmware file \"%s\" is invalid\n", mips_fw_file);
3732 		rc = -EINVAL;
3733 		goto err_release_firmware;
3734 	}
3735 	if (bp->rv2p_firmware->size < sizeof(*rv2p_fw) ||
3736 	    check_fw_section(bp->rv2p_firmware, &rv2p_fw->proc1.rv2p, 8, true) ||
3737 	    check_fw_section(bp->rv2p_firmware, &rv2p_fw->proc2.rv2p, 8, true)) {
3738 		pr_err("Firmware file \"%s\" is invalid\n", rv2p_fw_file);
3739 		rc = -EINVAL;
3740 		goto err_release_firmware;
3741 	}
3742 out:
3743 	return rc;
3744 
3745 err_release_firmware:
3746 	release_firmware(bp->rv2p_firmware);
3747 	bp->rv2p_firmware = NULL;
3748 err_release_mips_firmware:
3749 	release_firmware(bp->mips_firmware);
3750 	goto out;
3751 }
3752 
bnx2_request_firmware(struct bnx2 * bp)3753 static int bnx2_request_firmware(struct bnx2 *bp)
3754 {
3755 	return bp->rv2p_firmware ? 0 : bnx2_request_uncached_firmware(bp);
3756 }
3757 
3758 static u32
rv2p_fw_fixup(u32 rv2p_proc,int idx,u32 loc,u32 rv2p_code)3759 rv2p_fw_fixup(u32 rv2p_proc, int idx, u32 loc, u32 rv2p_code)
3760 {
3761 	switch (idx) {
3762 	case RV2P_P1_FIXUP_PAGE_SIZE_IDX:
3763 		rv2p_code &= ~RV2P_BD_PAGE_SIZE_MSK;
3764 		rv2p_code |= RV2P_BD_PAGE_SIZE;
3765 		break;
3766 	}
3767 	return rv2p_code;
3768 }
3769 
3770 static int
load_rv2p_fw(struct bnx2 * bp,u32 rv2p_proc,const struct bnx2_rv2p_fw_file_entry * fw_entry)3771 load_rv2p_fw(struct bnx2 *bp, u32 rv2p_proc,
3772 	     const struct bnx2_rv2p_fw_file_entry *fw_entry)
3773 {
3774 	u32 rv2p_code_len, file_offset;
3775 	__be32 *rv2p_code;
3776 	int i;
3777 	u32 val, cmd, addr;
3778 
3779 	rv2p_code_len = be32_to_cpu(fw_entry->rv2p.len);
3780 	file_offset = be32_to_cpu(fw_entry->rv2p.offset);
3781 
3782 	rv2p_code = (__be32 *)(bp->rv2p_firmware->data + file_offset);
3783 
3784 	if (rv2p_proc == RV2P_PROC1) {
3785 		cmd = BNX2_RV2P_PROC1_ADDR_CMD_RDWR;
3786 		addr = BNX2_RV2P_PROC1_ADDR_CMD;
3787 	} else {
3788 		cmd = BNX2_RV2P_PROC2_ADDR_CMD_RDWR;
3789 		addr = BNX2_RV2P_PROC2_ADDR_CMD;
3790 	}
3791 
3792 	for (i = 0; i < rv2p_code_len; i += 8) {
3793 		BNX2_WR(bp, BNX2_RV2P_INSTR_HIGH, be32_to_cpu(*rv2p_code));
3794 		rv2p_code++;
3795 		BNX2_WR(bp, BNX2_RV2P_INSTR_LOW, be32_to_cpu(*rv2p_code));
3796 		rv2p_code++;
3797 
3798 		val = (i / 8) | cmd;
3799 		BNX2_WR(bp, addr, val);
3800 	}
3801 
3802 	rv2p_code = (__be32 *)(bp->rv2p_firmware->data + file_offset);
3803 	for (i = 0; i < 8; i++) {
3804 		u32 loc, code;
3805 
3806 		loc = be32_to_cpu(fw_entry->fixup[i]);
3807 		if (loc && ((loc * 4) < rv2p_code_len)) {
3808 			code = be32_to_cpu(*(rv2p_code + loc - 1));
3809 			BNX2_WR(bp, BNX2_RV2P_INSTR_HIGH, code);
3810 			code = be32_to_cpu(*(rv2p_code + loc));
3811 			code = rv2p_fw_fixup(rv2p_proc, i, loc, code);
3812 			BNX2_WR(bp, BNX2_RV2P_INSTR_LOW, code);
3813 
3814 			val = (loc / 2) | cmd;
3815 			BNX2_WR(bp, addr, val);
3816 		}
3817 	}
3818 
3819 	/* Reset the processor, un-stall is done later. */
3820 	if (rv2p_proc == RV2P_PROC1) {
3821 		BNX2_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC1_RESET);
3822 	}
3823 	else {
3824 		BNX2_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC2_RESET);
3825 	}
3826 
3827 	return 0;
3828 }
3829 
3830 static void
load_cpu_fw(struct bnx2 * bp,const struct cpu_reg * cpu_reg,const struct bnx2_mips_fw_file_entry * fw_entry)3831 load_cpu_fw(struct bnx2 *bp, const struct cpu_reg *cpu_reg,
3832 	    const struct bnx2_mips_fw_file_entry *fw_entry)
3833 {
3834 	u32 addr, len, file_offset;
3835 	__be32 *data;
3836 	u32 offset;
3837 	u32 val;
3838 
3839 	/* Halt the CPU. */
3840 	val = bnx2_reg_rd_ind(bp, cpu_reg->mode);
3841 	val |= cpu_reg->mode_value_halt;
3842 	bnx2_reg_wr_ind(bp, cpu_reg->mode, val);
3843 	bnx2_reg_wr_ind(bp, cpu_reg->state, cpu_reg->state_value_clear);
3844 
3845 	/* Load the Text area. */
3846 	addr = be32_to_cpu(fw_entry->text.addr);
3847 	len = be32_to_cpu(fw_entry->text.len);
3848 	file_offset = be32_to_cpu(fw_entry->text.offset);
3849 	data = (__be32 *)(bp->mips_firmware->data + file_offset);
3850 
3851 	offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
3852 	if (len) {
3853 		int j;
3854 
3855 		for (j = 0; j < (len / 4); j++, offset += 4)
3856 			bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
3857 	}
3858 
3859 	/* Load the Data area. */
3860 	addr = be32_to_cpu(fw_entry->data.addr);
3861 	len = be32_to_cpu(fw_entry->data.len);
3862 	file_offset = be32_to_cpu(fw_entry->data.offset);
3863 	data = (__be32 *)(bp->mips_firmware->data + file_offset);
3864 
3865 	offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
3866 	if (len) {
3867 		int j;
3868 
3869 		for (j = 0; j < (len / 4); j++, offset += 4)
3870 			bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
3871 	}
3872 
3873 	/* Load the Read-Only area. */
3874 	addr = be32_to_cpu(fw_entry->rodata.addr);
3875 	len = be32_to_cpu(fw_entry->rodata.len);
3876 	file_offset = be32_to_cpu(fw_entry->rodata.offset);
3877 	data = (__be32 *)(bp->mips_firmware->data + file_offset);
3878 
3879 	offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
3880 	if (len) {
3881 		int j;
3882 
3883 		for (j = 0; j < (len / 4); j++, offset += 4)
3884 			bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
3885 	}
3886 
3887 	/* Clear the pre-fetch instruction. */
3888 	bnx2_reg_wr_ind(bp, cpu_reg->inst, 0);
3889 
3890 	val = be32_to_cpu(fw_entry->start_addr);
3891 	bnx2_reg_wr_ind(bp, cpu_reg->pc, val);
3892 
3893 	/* Start the CPU. */
3894 	val = bnx2_reg_rd_ind(bp, cpu_reg->mode);
3895 	val &= ~cpu_reg->mode_value_halt;
3896 	bnx2_reg_wr_ind(bp, cpu_reg->state, cpu_reg->state_value_clear);
3897 	bnx2_reg_wr_ind(bp, cpu_reg->mode, val);
3898 }
3899 
3900 static void
bnx2_init_cpus(struct bnx2 * bp)3901 bnx2_init_cpus(struct bnx2 *bp)
3902 {
3903 	const struct bnx2_mips_fw_file *mips_fw =
3904 		(const struct bnx2_mips_fw_file *) bp->mips_firmware->data;
3905 	const struct bnx2_rv2p_fw_file *rv2p_fw =
3906 		(const struct bnx2_rv2p_fw_file *) bp->rv2p_firmware->data;
3907 
3908 	/* Initialize the RV2P processor. */
3909 	load_rv2p_fw(bp, RV2P_PROC1, &rv2p_fw->proc1);
3910 	load_rv2p_fw(bp, RV2P_PROC2, &rv2p_fw->proc2);
3911 
3912 	/* Initialize the RX Processor. */
3913 	load_cpu_fw(bp, &cpu_reg_rxp, &mips_fw->rxp);
3914 
3915 	/* Initialize the TX Processor. */
3916 	load_cpu_fw(bp, &cpu_reg_txp, &mips_fw->txp);
3917 
3918 	/* Initialize the TX Patch-up Processor. */
3919 	load_cpu_fw(bp, &cpu_reg_tpat, &mips_fw->tpat);
3920 
3921 	/* Initialize the Completion Processor. */
3922 	load_cpu_fw(bp, &cpu_reg_com, &mips_fw->com);
3923 
3924 	/* Initialize the Command Processor. */
3925 	load_cpu_fw(bp, &cpu_reg_cp, &mips_fw->cp);
3926 }
3927 
3928 static void
bnx2_setup_wol(struct bnx2 * bp)3929 bnx2_setup_wol(struct bnx2 *bp)
3930 {
3931 	int i;
3932 	u32 val, wol_msg;
3933 
3934 	if (bp->wol) {
3935 		u32 advertising;
3936 		u8 autoneg;
3937 
3938 		autoneg = bp->autoneg;
3939 		advertising = bp->advertising;
3940 
3941 		if (bp->phy_port == PORT_TP) {
3942 			bp->autoneg = AUTONEG_SPEED;
3943 			bp->advertising = ADVERTISED_10baseT_Half |
3944 				ADVERTISED_10baseT_Full |
3945 				ADVERTISED_100baseT_Half |
3946 				ADVERTISED_100baseT_Full |
3947 				ADVERTISED_Autoneg;
3948 		}
3949 
3950 		spin_lock_bh(&bp->phy_lock);
3951 		bnx2_setup_phy(bp, bp->phy_port);
3952 		spin_unlock_bh(&bp->phy_lock);
3953 
3954 		bp->autoneg = autoneg;
3955 		bp->advertising = advertising;
3956 
3957 		bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
3958 
3959 		val = BNX2_RD(bp, BNX2_EMAC_MODE);
3960 
3961 		/* Enable port mode. */
3962 		val &= ~BNX2_EMAC_MODE_PORT;
3963 		val |= BNX2_EMAC_MODE_MPKT_RCVD |
3964 		       BNX2_EMAC_MODE_ACPI_RCVD |
3965 		       BNX2_EMAC_MODE_MPKT;
3966 		if (bp->phy_port == PORT_TP) {
3967 			val |= BNX2_EMAC_MODE_PORT_MII;
3968 		} else {
3969 			val |= BNX2_EMAC_MODE_PORT_GMII;
3970 			if (bp->line_speed == SPEED_2500)
3971 				val |= BNX2_EMAC_MODE_25G_MODE;
3972 		}
3973 
3974 		BNX2_WR(bp, BNX2_EMAC_MODE, val);
3975 
3976 		/* receive all multicast */
3977 		for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3978 			BNX2_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3979 				0xffffffff);
3980 		}
3981 		BNX2_WR(bp, BNX2_EMAC_RX_MODE, BNX2_EMAC_RX_MODE_SORT_MODE);
3982 
3983 		val = 1 | BNX2_RPM_SORT_USER0_BC_EN | BNX2_RPM_SORT_USER0_MC_EN;
3984 		BNX2_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3985 		BNX2_WR(bp, BNX2_RPM_SORT_USER0, val);
3986 		BNX2_WR(bp, BNX2_RPM_SORT_USER0, val | BNX2_RPM_SORT_USER0_ENA);
3987 
3988 		/* Need to enable EMAC and RPM for WOL. */
3989 		BNX2_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
3990 			BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE |
3991 			BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE |
3992 			BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE);
3993 
3994 		val = BNX2_RD(bp, BNX2_RPM_CONFIG);
3995 		val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3996 		BNX2_WR(bp, BNX2_RPM_CONFIG, val);
3997 
3998 		wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
3999 	} else {
4000 			wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
4001 	}
4002 
4003 	if (!(bp->flags & BNX2_FLAG_NO_WOL)) {
4004 		u32 val;
4005 
4006 		wol_msg |= BNX2_DRV_MSG_DATA_WAIT3;
4007 		if (bp->fw_last_msg || BNX2_CHIP(bp) != BNX2_CHIP_5709) {
4008 			bnx2_fw_sync(bp, wol_msg, 1, 0);
4009 			return;
4010 		}
4011 		/* Tell firmware not to power down the PHY yet, otherwise
4012 		 * the chip will take a long time to respond to MMIO reads.
4013 		 */
4014 		val = bnx2_shmem_rd(bp, BNX2_PORT_FEATURE);
4015 		bnx2_shmem_wr(bp, BNX2_PORT_FEATURE,
4016 			      val | BNX2_PORT_FEATURE_ASF_ENABLED);
4017 		bnx2_fw_sync(bp, wol_msg, 1, 0);
4018 		bnx2_shmem_wr(bp, BNX2_PORT_FEATURE, val);
4019 	}
4020 
4021 }
4022 
4023 static int
bnx2_set_power_state(struct bnx2 * bp,pci_power_t state)4024 bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
4025 {
4026 	switch (state) {
4027 	case PCI_D0: {
4028 		u32 val;
4029 
4030 		pci_enable_wake(bp->pdev, PCI_D0, false);
4031 		pci_set_power_state(bp->pdev, PCI_D0);
4032 
4033 		val = BNX2_RD(bp, BNX2_EMAC_MODE);
4034 		val |= BNX2_EMAC_MODE_MPKT_RCVD | BNX2_EMAC_MODE_ACPI_RCVD;
4035 		val &= ~BNX2_EMAC_MODE_MPKT;
4036 		BNX2_WR(bp, BNX2_EMAC_MODE, val);
4037 
4038 		val = BNX2_RD(bp, BNX2_RPM_CONFIG);
4039 		val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
4040 		BNX2_WR(bp, BNX2_RPM_CONFIG, val);
4041 		break;
4042 	}
4043 	case PCI_D3hot: {
4044 		bnx2_setup_wol(bp);
4045 		pci_wake_from_d3(bp->pdev, bp->wol);
4046 		if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) ||
4047 		    (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A1)) {
4048 
4049 			if (bp->wol)
4050 				pci_set_power_state(bp->pdev, PCI_D3hot);
4051 			break;
4052 
4053 		}
4054 		if (!bp->fw_last_msg && BNX2_CHIP(bp) == BNX2_CHIP_5709) {
4055 			u32 val;
4056 
4057 			/* Tell firmware not to power down the PHY yet,
4058 			 * otherwise the other port may not respond to
4059 			 * MMIO reads.
4060 			 */
4061 			val = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION);
4062 			val &= ~BNX2_CONDITION_PM_STATE_MASK;
4063 			val |= BNX2_CONDITION_PM_STATE_UNPREP;
4064 			bnx2_shmem_wr(bp, BNX2_BC_STATE_CONDITION, val);
4065 		}
4066 		pci_set_power_state(bp->pdev, PCI_D3hot);
4067 
4068 		/* No more memory access after this point until
4069 		 * device is brought back to D0.
4070 		 */
4071 		break;
4072 	}
4073 	default:
4074 		return -EINVAL;
4075 	}
4076 	return 0;
4077 }
4078 
4079 static int
bnx2_acquire_nvram_lock(struct bnx2 * bp)4080 bnx2_acquire_nvram_lock(struct bnx2 *bp)
4081 {
4082 	u32 val;
4083 	int j;
4084 
4085 	/* Request access to the flash interface. */
4086 	BNX2_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_SET2);
4087 	for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4088 		val = BNX2_RD(bp, BNX2_NVM_SW_ARB);
4089 		if (val & BNX2_NVM_SW_ARB_ARB_ARB2)
4090 			break;
4091 
4092 		udelay(5);
4093 	}
4094 
4095 	if (j >= NVRAM_TIMEOUT_COUNT)
4096 		return -EBUSY;
4097 
4098 	return 0;
4099 }
4100 
4101 static int
bnx2_release_nvram_lock(struct bnx2 * bp)4102 bnx2_release_nvram_lock(struct bnx2 *bp)
4103 {
4104 	int j;
4105 	u32 val;
4106 
4107 	/* Relinquish nvram interface. */
4108 	BNX2_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_CLR2);
4109 
4110 	for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4111 		val = BNX2_RD(bp, BNX2_NVM_SW_ARB);
4112 		if (!(val & BNX2_NVM_SW_ARB_ARB_ARB2))
4113 			break;
4114 
4115 		udelay(5);
4116 	}
4117 
4118 	if (j >= NVRAM_TIMEOUT_COUNT)
4119 		return -EBUSY;
4120 
4121 	return 0;
4122 }
4123 
4124 
4125 static int
bnx2_enable_nvram_write(struct bnx2 * bp)4126 bnx2_enable_nvram_write(struct bnx2 *bp)
4127 {
4128 	u32 val;
4129 
4130 	val = BNX2_RD(bp, BNX2_MISC_CFG);
4131 	BNX2_WR(bp, BNX2_MISC_CFG, val | BNX2_MISC_CFG_NVM_WR_EN_PCI);
4132 
4133 	if (bp->flash_info->flags & BNX2_NV_WREN) {
4134 		int j;
4135 
4136 		BNX2_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4137 		BNX2_WR(bp, BNX2_NVM_COMMAND,
4138 			BNX2_NVM_COMMAND_WREN | BNX2_NVM_COMMAND_DOIT);
4139 
4140 		for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4141 			udelay(5);
4142 
4143 			val = BNX2_RD(bp, BNX2_NVM_COMMAND);
4144 			if (val & BNX2_NVM_COMMAND_DONE)
4145 				break;
4146 		}
4147 
4148 		if (j >= NVRAM_TIMEOUT_COUNT)
4149 			return -EBUSY;
4150 	}
4151 	return 0;
4152 }
4153 
4154 static void
bnx2_disable_nvram_write(struct bnx2 * bp)4155 bnx2_disable_nvram_write(struct bnx2 *bp)
4156 {
4157 	u32 val;
4158 
4159 	val = BNX2_RD(bp, BNX2_MISC_CFG);
4160 	BNX2_WR(bp, BNX2_MISC_CFG, val & ~BNX2_MISC_CFG_NVM_WR_EN);
4161 }
4162 
4163 
4164 static void
bnx2_enable_nvram_access(struct bnx2 * bp)4165 bnx2_enable_nvram_access(struct bnx2 *bp)
4166 {
4167 	u32 val;
4168 
4169 	val = BNX2_RD(bp, BNX2_NVM_ACCESS_ENABLE);
4170 	/* Enable both bits, even on read. */
4171 	BNX2_WR(bp, BNX2_NVM_ACCESS_ENABLE,
4172 		val | BNX2_NVM_ACCESS_ENABLE_EN | BNX2_NVM_ACCESS_ENABLE_WR_EN);
4173 }
4174 
4175 static void
bnx2_disable_nvram_access(struct bnx2 * bp)4176 bnx2_disable_nvram_access(struct bnx2 *bp)
4177 {
4178 	u32 val;
4179 
4180 	val = BNX2_RD(bp, BNX2_NVM_ACCESS_ENABLE);
4181 	/* Disable both bits, even after read. */
4182 	BNX2_WR(bp, BNX2_NVM_ACCESS_ENABLE,
4183 		val & ~(BNX2_NVM_ACCESS_ENABLE_EN |
4184 			BNX2_NVM_ACCESS_ENABLE_WR_EN));
4185 }
4186 
4187 static int
bnx2_nvram_erase_page(struct bnx2 * bp,u32 offset)4188 bnx2_nvram_erase_page(struct bnx2 *bp, u32 offset)
4189 {
4190 	u32 cmd;
4191 	int j;
4192 
4193 	if (bp->flash_info->flags & BNX2_NV_BUFFERED)
4194 		/* Buffered flash, no erase needed */
4195 		return 0;
4196 
4197 	/* Build an erase command */
4198 	cmd = BNX2_NVM_COMMAND_ERASE | BNX2_NVM_COMMAND_WR |
4199 	      BNX2_NVM_COMMAND_DOIT;
4200 
4201 	/* Need to clear DONE bit separately. */
4202 	BNX2_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4203 
4204 	/* Address of the NVRAM to read from. */
4205 	BNX2_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
4206 
4207 	/* Issue an erase command. */
4208 	BNX2_WR(bp, BNX2_NVM_COMMAND, cmd);
4209 
4210 	/* Wait for completion. */
4211 	for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4212 		u32 val;
4213 
4214 		udelay(5);
4215 
4216 		val = BNX2_RD(bp, BNX2_NVM_COMMAND);
4217 		if (val & BNX2_NVM_COMMAND_DONE)
4218 			break;
4219 	}
4220 
4221 	if (j >= NVRAM_TIMEOUT_COUNT)
4222 		return -EBUSY;
4223 
4224 	return 0;
4225 }
4226 
4227 static int
bnx2_nvram_read_dword(struct bnx2 * bp,u32 offset,u8 * ret_val,u32 cmd_flags)4228 bnx2_nvram_read_dword(struct bnx2 *bp, u32 offset, u8 *ret_val, u32 cmd_flags)
4229 {
4230 	u32 cmd;
4231 	int j;
4232 
4233 	/* Build the command word. */
4234 	cmd = BNX2_NVM_COMMAND_DOIT | cmd_flags;
4235 
4236 	/* Calculate an offset of a buffered flash, not needed for 5709. */
4237 	if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
4238 		offset = ((offset / bp->flash_info->page_size) <<
4239 			   bp->flash_info->page_bits) +
4240 			  (offset % bp->flash_info->page_size);
4241 	}
4242 
4243 	/* Need to clear DONE bit separately. */
4244 	BNX2_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4245 
4246 	/* Address of the NVRAM to read from. */
4247 	BNX2_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
4248 
4249 	/* Issue a read command. */
4250 	BNX2_WR(bp, BNX2_NVM_COMMAND, cmd);
4251 
4252 	/* Wait for completion. */
4253 	for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4254 		u32 val;
4255 
4256 		udelay(5);
4257 
4258 		val = BNX2_RD(bp, BNX2_NVM_COMMAND);
4259 		if (val & BNX2_NVM_COMMAND_DONE) {
4260 			__be32 v = cpu_to_be32(BNX2_RD(bp, BNX2_NVM_READ));
4261 			memcpy(ret_val, &v, 4);
4262 			break;
4263 		}
4264 	}
4265 	if (j >= NVRAM_TIMEOUT_COUNT)
4266 		return -EBUSY;
4267 
4268 	return 0;
4269 }
4270 
4271 
4272 static int
bnx2_nvram_write_dword(struct bnx2 * bp,u32 offset,u8 * val,u32 cmd_flags)4273 bnx2_nvram_write_dword(struct bnx2 *bp, u32 offset, u8 *val, u32 cmd_flags)
4274 {
4275 	u32 cmd;
4276 	__be32 val32;
4277 	int j;
4278 
4279 	/* Build the command word. */
4280 	cmd = BNX2_NVM_COMMAND_DOIT | BNX2_NVM_COMMAND_WR | cmd_flags;
4281 
4282 	/* Calculate an offset of a buffered flash, not needed for 5709. */
4283 	if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
4284 		offset = ((offset / bp->flash_info->page_size) <<
4285 			  bp->flash_info->page_bits) +
4286 			 (offset % bp->flash_info->page_size);
4287 	}
4288 
4289 	/* Need to clear DONE bit separately. */
4290 	BNX2_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4291 
4292 	memcpy(&val32, val, 4);
4293 
4294 	/* Write the data. */
4295 	BNX2_WR(bp, BNX2_NVM_WRITE, be32_to_cpu(val32));
4296 
4297 	/* Address of the NVRAM to write to. */
4298 	BNX2_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
4299 
4300 	/* Issue the write command. */
4301 	BNX2_WR(bp, BNX2_NVM_COMMAND, cmd);
4302 
4303 	/* Wait for completion. */
4304 	for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4305 		udelay(5);
4306 
4307 		if (BNX2_RD(bp, BNX2_NVM_COMMAND) & BNX2_NVM_COMMAND_DONE)
4308 			break;
4309 	}
4310 	if (j >= NVRAM_TIMEOUT_COUNT)
4311 		return -EBUSY;
4312 
4313 	return 0;
4314 }
4315 
4316 static int
bnx2_init_nvram(struct bnx2 * bp)4317 bnx2_init_nvram(struct bnx2 *bp)
4318 {
4319 	u32 val;
4320 	int j, entry_count, rc = 0;
4321 	const struct flash_spec *flash;
4322 
4323 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
4324 		bp->flash_info = &flash_5709;
4325 		goto get_flash_size;
4326 	}
4327 
4328 	/* Determine the selected interface. */
4329 	val = BNX2_RD(bp, BNX2_NVM_CFG1);
4330 
4331 	entry_count = ARRAY_SIZE(flash_table);
4332 
4333 	if (val & 0x40000000) {
4334 
4335 		/* Flash interface has been reconfigured */
4336 		for (j = 0, flash = &flash_table[0]; j < entry_count;
4337 		     j++, flash++) {
4338 			if ((val & FLASH_BACKUP_STRAP_MASK) ==
4339 			    (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
4340 				bp->flash_info = flash;
4341 				break;
4342 			}
4343 		}
4344 	}
4345 	else {
4346 		u32 mask;
4347 		/* Not yet been reconfigured */
4348 
4349 		if (val & (1 << 23))
4350 			mask = FLASH_BACKUP_STRAP_MASK;
4351 		else
4352 			mask = FLASH_STRAP_MASK;
4353 
4354 		for (j = 0, flash = &flash_table[0]; j < entry_count;
4355 			j++, flash++) {
4356 
4357 			if ((val & mask) == (flash->strapping & mask)) {
4358 				bp->flash_info = flash;
4359 
4360 				/* Request access to the flash interface. */
4361 				if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4362 					return rc;
4363 
4364 				/* Enable access to flash interface */
4365 				bnx2_enable_nvram_access(bp);
4366 
4367 				/* Reconfigure the flash interface */
4368 				BNX2_WR(bp, BNX2_NVM_CFG1, flash->config1);
4369 				BNX2_WR(bp, BNX2_NVM_CFG2, flash->config2);
4370 				BNX2_WR(bp, BNX2_NVM_CFG3, flash->config3);
4371 				BNX2_WR(bp, BNX2_NVM_WRITE1, flash->write1);
4372 
4373 				/* Disable access to flash interface */
4374 				bnx2_disable_nvram_access(bp);
4375 				bnx2_release_nvram_lock(bp);
4376 
4377 				break;
4378 			}
4379 		}
4380 	} /* if (val & 0x40000000) */
4381 
4382 	if (j == entry_count) {
4383 		bp->flash_info = NULL;
4384 		pr_alert("Unknown flash/EEPROM type\n");
4385 		return -ENODEV;
4386 	}
4387 
4388 get_flash_size:
4389 	val = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG2);
4390 	val &= BNX2_SHARED_HW_CFG2_NVM_SIZE_MASK;
4391 	if (val)
4392 		bp->flash_size = val;
4393 	else
4394 		bp->flash_size = bp->flash_info->total_size;
4395 
4396 	return rc;
4397 }
4398 
4399 static int
bnx2_nvram_read(struct bnx2 * bp,u32 offset,u8 * ret_buf,int buf_size)4400 bnx2_nvram_read(struct bnx2 *bp, u32 offset, u8 *ret_buf,
4401 		int buf_size)
4402 {
4403 	int rc = 0;
4404 	u32 cmd_flags, offset32, len32, extra;
4405 
4406 	if (buf_size == 0)
4407 		return 0;
4408 
4409 	/* Request access to the flash interface. */
4410 	if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4411 		return rc;
4412 
4413 	/* Enable access to flash interface */
4414 	bnx2_enable_nvram_access(bp);
4415 
4416 	len32 = buf_size;
4417 	offset32 = offset;
4418 	extra = 0;
4419 
4420 	cmd_flags = 0;
4421 
4422 	if (offset32 & 3) {
4423 		u8 buf[4];
4424 		u32 pre_len;
4425 
4426 		offset32 &= ~3;
4427 		pre_len = 4 - (offset & 3);
4428 
4429 		if (pre_len >= len32) {
4430 			pre_len = len32;
4431 			cmd_flags = BNX2_NVM_COMMAND_FIRST |
4432 				    BNX2_NVM_COMMAND_LAST;
4433 		}
4434 		else {
4435 			cmd_flags = BNX2_NVM_COMMAND_FIRST;
4436 		}
4437 
4438 		rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4439 
4440 		if (rc)
4441 			return rc;
4442 
4443 		memcpy(ret_buf, buf + (offset & 3), pre_len);
4444 
4445 		offset32 += 4;
4446 		ret_buf += pre_len;
4447 		len32 -= pre_len;
4448 	}
4449 	if (len32 & 3) {
4450 		extra = 4 - (len32 & 3);
4451 		len32 = (len32 + 4) & ~3;
4452 	}
4453 
4454 	if (len32 == 4) {
4455 		u8 buf[4];
4456 
4457 		if (cmd_flags)
4458 			cmd_flags = BNX2_NVM_COMMAND_LAST;
4459 		else
4460 			cmd_flags = BNX2_NVM_COMMAND_FIRST |
4461 				    BNX2_NVM_COMMAND_LAST;
4462 
4463 		rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4464 
4465 		memcpy(ret_buf, buf, 4 - extra);
4466 	}
4467 	else if (len32 > 0) {
4468 		u8 buf[4];
4469 
4470 		/* Read the first word. */
4471 		if (cmd_flags)
4472 			cmd_flags = 0;
4473 		else
4474 			cmd_flags = BNX2_NVM_COMMAND_FIRST;
4475 
4476 		rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, cmd_flags);
4477 
4478 		/* Advance to the next dword. */
4479 		offset32 += 4;
4480 		ret_buf += 4;
4481 		len32 -= 4;
4482 
4483 		while (len32 > 4 && rc == 0) {
4484 			rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, 0);
4485 
4486 			/* Advance to the next dword. */
4487 			offset32 += 4;
4488 			ret_buf += 4;
4489 			len32 -= 4;
4490 		}
4491 
4492 		if (rc)
4493 			return rc;
4494 
4495 		cmd_flags = BNX2_NVM_COMMAND_LAST;
4496 		rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4497 
4498 		memcpy(ret_buf, buf, 4 - extra);
4499 	}
4500 
4501 	/* Disable access to flash interface */
4502 	bnx2_disable_nvram_access(bp);
4503 
4504 	bnx2_release_nvram_lock(bp);
4505 
4506 	return rc;
4507 }
4508 
4509 static int
bnx2_nvram_write(struct bnx2 * bp,u32 offset,u8 * data_buf,int buf_size)4510 bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
4511 		int buf_size)
4512 {
4513 	u32 written, offset32, len32;
4514 	u8 *buf, start[4], end[4], *align_buf = NULL, *flash_buffer = NULL;
4515 	int rc = 0;
4516 	int align_start, align_end;
4517 
4518 	buf = data_buf;
4519 	offset32 = offset;
4520 	len32 = buf_size;
4521 	align_start = align_end = 0;
4522 
4523 	if ((align_start = (offset32 & 3))) {
4524 		offset32 &= ~3;
4525 		len32 += align_start;
4526 		if (len32 < 4)
4527 			len32 = 4;
4528 		if ((rc = bnx2_nvram_read(bp, offset32, start, 4)))
4529 			return rc;
4530 	}
4531 
4532 	if (len32 & 3) {
4533 		align_end = 4 - (len32 & 3);
4534 		len32 += align_end;
4535 		if ((rc = bnx2_nvram_read(bp, offset32 + len32 - 4, end, 4)))
4536 			return rc;
4537 	}
4538 
4539 	if (align_start || align_end) {
4540 		align_buf = kmalloc(len32, GFP_KERNEL);
4541 		if (!align_buf)
4542 			return -ENOMEM;
4543 		if (align_start) {
4544 			memcpy(align_buf, start, 4);
4545 		}
4546 		if (align_end) {
4547 			memcpy(align_buf + len32 - 4, end, 4);
4548 		}
4549 		memcpy(align_buf + align_start, data_buf, buf_size);
4550 		buf = align_buf;
4551 	}
4552 
4553 	if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4554 		flash_buffer = kmalloc(264, GFP_KERNEL);
4555 		if (!flash_buffer) {
4556 			rc = -ENOMEM;
4557 			goto nvram_write_end;
4558 		}
4559 	}
4560 
4561 	written = 0;
4562 	while ((written < len32) && (rc == 0)) {
4563 		u32 page_start, page_end, data_start, data_end;
4564 		u32 addr, cmd_flags;
4565 		int i;
4566 
4567 	        /* Find the page_start addr */
4568 		page_start = offset32 + written;
4569 		page_start -= (page_start % bp->flash_info->page_size);
4570 		/* Find the page_end addr */
4571 		page_end = page_start + bp->flash_info->page_size;
4572 		/* Find the data_start addr */
4573 		data_start = (written == 0) ? offset32 : page_start;
4574 		/* Find the data_end addr */
4575 		data_end = (page_end > offset32 + len32) ?
4576 			(offset32 + len32) : page_end;
4577 
4578 		/* Request access to the flash interface. */
4579 		if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4580 			goto nvram_write_end;
4581 
4582 		/* Enable access to flash interface */
4583 		bnx2_enable_nvram_access(bp);
4584 
4585 		cmd_flags = BNX2_NVM_COMMAND_FIRST;
4586 		if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4587 			int j;
4588 
4589 			/* Read the whole page into the buffer
4590 			 * (non-buffer flash only) */
4591 			for (j = 0; j < bp->flash_info->page_size; j += 4) {
4592 				if (j == (bp->flash_info->page_size - 4)) {
4593 					cmd_flags |= BNX2_NVM_COMMAND_LAST;
4594 				}
4595 				rc = bnx2_nvram_read_dword(bp,
4596 					page_start + j,
4597 					&flash_buffer[j],
4598 					cmd_flags);
4599 
4600 				if (rc)
4601 					goto nvram_write_end;
4602 
4603 				cmd_flags = 0;
4604 			}
4605 		}
4606 
4607 		/* Enable writes to flash interface (unlock write-protect) */
4608 		if ((rc = bnx2_enable_nvram_write(bp)) != 0)
4609 			goto nvram_write_end;
4610 
4611 		/* Loop to write back the buffer data from page_start to
4612 		 * data_start */
4613 		i = 0;
4614 		if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4615 			/* Erase the page */
4616 			if ((rc = bnx2_nvram_erase_page(bp, page_start)) != 0)
4617 				goto nvram_write_end;
4618 
4619 			/* Re-enable the write again for the actual write */
4620 			bnx2_enable_nvram_write(bp);
4621 
4622 			for (addr = page_start; addr < data_start;
4623 				addr += 4, i += 4) {
4624 
4625 				rc = bnx2_nvram_write_dword(bp, addr,
4626 					&flash_buffer[i], cmd_flags);
4627 
4628 				if (rc != 0)
4629 					goto nvram_write_end;
4630 
4631 				cmd_flags = 0;
4632 			}
4633 		}
4634 
4635 		/* Loop to write the new data from data_start to data_end */
4636 		for (addr = data_start; addr < data_end; addr += 4, i += 4) {
4637 			if ((addr == page_end - 4) ||
4638 				((bp->flash_info->flags & BNX2_NV_BUFFERED) &&
4639 				 (addr == data_end - 4))) {
4640 
4641 				cmd_flags |= BNX2_NVM_COMMAND_LAST;
4642 			}
4643 			rc = bnx2_nvram_write_dword(bp, addr, buf,
4644 				cmd_flags);
4645 
4646 			if (rc != 0)
4647 				goto nvram_write_end;
4648 
4649 			cmd_flags = 0;
4650 			buf += 4;
4651 		}
4652 
4653 		/* Loop to write back the buffer data from data_end
4654 		 * to page_end */
4655 		if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4656 			for (addr = data_end; addr < page_end;
4657 				addr += 4, i += 4) {
4658 
4659 				if (addr == page_end-4) {
4660 					cmd_flags = BNX2_NVM_COMMAND_LAST;
4661 				}
4662 				rc = bnx2_nvram_write_dword(bp, addr,
4663 					&flash_buffer[i], cmd_flags);
4664 
4665 				if (rc != 0)
4666 					goto nvram_write_end;
4667 
4668 				cmd_flags = 0;
4669 			}
4670 		}
4671 
4672 		/* Disable writes to flash interface (lock write-protect) */
4673 		bnx2_disable_nvram_write(bp);
4674 
4675 		/* Disable access to flash interface */
4676 		bnx2_disable_nvram_access(bp);
4677 		bnx2_release_nvram_lock(bp);
4678 
4679 		/* Increment written */
4680 		written += data_end - data_start;
4681 	}
4682 
4683 nvram_write_end:
4684 	kfree(flash_buffer);
4685 	kfree(align_buf);
4686 	return rc;
4687 }
4688 
4689 static void
bnx2_init_fw_cap(struct bnx2 * bp)4690 bnx2_init_fw_cap(struct bnx2 *bp)
4691 {
4692 	u32 val, sig = 0;
4693 
4694 	bp->phy_flags &= ~BNX2_PHY_FLAG_REMOTE_PHY_CAP;
4695 	bp->flags &= ~BNX2_FLAG_CAN_KEEP_VLAN;
4696 
4697 	if (!(bp->flags & BNX2_FLAG_ASF_ENABLE))
4698 		bp->flags |= BNX2_FLAG_CAN_KEEP_VLAN;
4699 
4700 	val = bnx2_shmem_rd(bp, BNX2_FW_CAP_MB);
4701 	if ((val & BNX2_FW_CAP_SIGNATURE_MASK) != BNX2_FW_CAP_SIGNATURE)
4702 		return;
4703 
4704 	if ((val & BNX2_FW_CAP_CAN_KEEP_VLAN) == BNX2_FW_CAP_CAN_KEEP_VLAN) {
4705 		bp->flags |= BNX2_FLAG_CAN_KEEP_VLAN;
4706 		sig |= BNX2_DRV_ACK_CAP_SIGNATURE | BNX2_FW_CAP_CAN_KEEP_VLAN;
4707 	}
4708 
4709 	if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
4710 	    (val & BNX2_FW_CAP_REMOTE_PHY_CAPABLE)) {
4711 		u32 link;
4712 
4713 		bp->phy_flags |= BNX2_PHY_FLAG_REMOTE_PHY_CAP;
4714 
4715 		link = bnx2_shmem_rd(bp, BNX2_LINK_STATUS);
4716 		if (link & BNX2_LINK_STATUS_SERDES_LINK)
4717 			bp->phy_port = PORT_FIBRE;
4718 		else
4719 			bp->phy_port = PORT_TP;
4720 
4721 		sig |= BNX2_DRV_ACK_CAP_SIGNATURE |
4722 		       BNX2_FW_CAP_REMOTE_PHY_CAPABLE;
4723 	}
4724 
4725 	if (netif_running(bp->dev) && sig)
4726 		bnx2_shmem_wr(bp, BNX2_DRV_ACK_CAP_MB, sig);
4727 }
4728 
4729 static void
bnx2_setup_msix_tbl(struct bnx2 * bp)4730 bnx2_setup_msix_tbl(struct bnx2 *bp)
4731 {
4732 	BNX2_WR(bp, BNX2_PCI_GRC_WINDOW_ADDR, BNX2_PCI_GRC_WINDOW_ADDR_SEP_WIN);
4733 
4734 	BNX2_WR(bp, BNX2_PCI_GRC_WINDOW2_ADDR, BNX2_MSIX_TABLE_ADDR);
4735 	BNX2_WR(bp, BNX2_PCI_GRC_WINDOW3_ADDR, BNX2_MSIX_PBA_ADDR);
4736 }
4737 
4738 static void
bnx2_wait_dma_complete(struct bnx2 * bp)4739 bnx2_wait_dma_complete(struct bnx2 *bp)
4740 {
4741 	u32 val;
4742 	int i;
4743 
4744 	/*
4745 	 * Wait for the current PCI transaction to complete before
4746 	 * issuing a reset.
4747 	 */
4748 	if ((BNX2_CHIP(bp) == BNX2_CHIP_5706) ||
4749 	    (BNX2_CHIP(bp) == BNX2_CHIP_5708)) {
4750 		BNX2_WR(bp, BNX2_MISC_ENABLE_CLR_BITS,
4751 			BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
4752 			BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
4753 			BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
4754 			BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
4755 		val = BNX2_RD(bp, BNX2_MISC_ENABLE_CLR_BITS);
4756 		udelay(5);
4757 	} else {  /* 5709 */
4758 		val = BNX2_RD(bp, BNX2_MISC_NEW_CORE_CTL);
4759 		val &= ~BNX2_MISC_NEW_CORE_CTL_DMA_ENABLE;
4760 		BNX2_WR(bp, BNX2_MISC_NEW_CORE_CTL, val);
4761 		val = BNX2_RD(bp, BNX2_MISC_NEW_CORE_CTL);
4762 
4763 		for (i = 0; i < 100; i++) {
4764 			msleep(1);
4765 			val = BNX2_RD(bp, BNX2_PCICFG_DEVICE_CONTROL);
4766 			if (!(val & BNX2_PCICFG_DEVICE_STATUS_NO_PEND))
4767 				break;
4768 		}
4769 	}
4770 
4771 	return;
4772 }
4773 
4774 
4775 static int
bnx2_reset_chip(struct bnx2 * bp,u32 reset_code)4776 bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
4777 {
4778 	u32 val;
4779 	int i, rc = 0;
4780 	u8 old_port;
4781 
4782 	/* Wait for the current PCI transaction to complete before
4783 	 * issuing a reset. */
4784 	bnx2_wait_dma_complete(bp);
4785 
4786 	/* Wait for the firmware to tell us it is ok to issue a reset. */
4787 	bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1, 1);
4788 
4789 	/* Deposit a driver reset signature so the firmware knows that
4790 	 * this is a soft reset. */
4791 	bnx2_shmem_wr(bp, BNX2_DRV_RESET_SIGNATURE,
4792 		      BNX2_DRV_RESET_SIGNATURE_MAGIC);
4793 
4794 	/* Do a dummy read to force the chip to complete all current transaction
4795 	 * before we issue a reset. */
4796 	val = BNX2_RD(bp, BNX2_MISC_ID);
4797 
4798 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
4799 		BNX2_WR(bp, BNX2_MISC_COMMAND, BNX2_MISC_COMMAND_SW_RESET);
4800 		BNX2_RD(bp, BNX2_MISC_COMMAND);
4801 		udelay(5);
4802 
4803 		val = BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4804 		      BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
4805 
4806 		BNX2_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
4807 
4808 	} else {
4809 		val = BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4810 		      BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4811 		      BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
4812 
4813 		/* Chip reset. */
4814 		BNX2_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
4815 
4816 		/* Reading back any register after chip reset will hang the
4817 		 * bus on 5706 A0 and A1.  The msleep below provides plenty
4818 		 * of margin for write posting.
4819 		 */
4820 		if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) ||
4821 		    (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A1))
4822 			msleep(20);
4823 
4824 		/* Reset takes approximate 30 usec */
4825 		for (i = 0; i < 10; i++) {
4826 			val = BNX2_RD(bp, BNX2_PCICFG_MISC_CONFIG);
4827 			if ((val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4828 				    BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0)
4829 				break;
4830 			udelay(10);
4831 		}
4832 
4833 		if (val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4834 			   BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
4835 			pr_err("Chip reset did not complete\n");
4836 			return -EBUSY;
4837 		}
4838 	}
4839 
4840 	/* Make sure byte swapping is properly configured. */
4841 	val = BNX2_RD(bp, BNX2_PCI_SWAP_DIAG0);
4842 	if (val != 0x01020304) {
4843 		pr_err("Chip not in correct endian mode\n");
4844 		return -ENODEV;
4845 	}
4846 
4847 	/* Wait for the firmware to finish its initialization. */
4848 	rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT1 | reset_code, 1, 0);
4849 	if (rc)
4850 		return rc;
4851 
4852 	spin_lock_bh(&bp->phy_lock);
4853 	old_port = bp->phy_port;
4854 	bnx2_init_fw_cap(bp);
4855 	if ((bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) &&
4856 	    old_port != bp->phy_port)
4857 		bnx2_set_default_remote_link(bp);
4858 	spin_unlock_bh(&bp->phy_lock);
4859 
4860 	if (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) {
4861 		/* Adjust the voltage regular to two steps lower.  The default
4862 		 * of this register is 0x0000000e. */
4863 		BNX2_WR(bp, BNX2_MISC_VREG_CONTROL, 0x000000fa);
4864 
4865 		/* Remove bad rbuf memory from the free pool. */
4866 		rc = bnx2_alloc_bad_rbuf(bp);
4867 	}
4868 
4869 	if (bp->flags & BNX2_FLAG_USING_MSIX) {
4870 		bnx2_setup_msix_tbl(bp);
4871 		/* Prevent MSIX table reads and write from timing out */
4872 		BNX2_WR(bp, BNX2_MISC_ECO_HW_CTL,
4873 			BNX2_MISC_ECO_HW_CTL_LARGE_GRC_TMOUT_EN);
4874 	}
4875 
4876 	return rc;
4877 }
4878 
4879 static int
bnx2_init_chip(struct bnx2 * bp)4880 bnx2_init_chip(struct bnx2 *bp)
4881 {
4882 	u32 val, mtu;
4883 	int rc, i;
4884 
4885 	/* Make sure the interrupt is not active. */
4886 	BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD, BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
4887 
4888 	val = BNX2_DMA_CONFIG_DATA_BYTE_SWAP |
4889 	      BNX2_DMA_CONFIG_DATA_WORD_SWAP |
4890 #ifdef __BIG_ENDIAN
4891 	      BNX2_DMA_CONFIG_CNTL_BYTE_SWAP |
4892 #endif
4893 	      BNX2_DMA_CONFIG_CNTL_WORD_SWAP |
4894 	      DMA_READ_CHANS << 12 |
4895 	      DMA_WRITE_CHANS << 16;
4896 
4897 	val |= (0x2 << 20) | (1 << 11);
4898 
4899 	if ((bp->flags & BNX2_FLAG_PCIX) && (bp->bus_speed_mhz == 133))
4900 		val |= (1 << 23);
4901 
4902 	if ((BNX2_CHIP(bp) == BNX2_CHIP_5706) &&
4903 	    (BNX2_CHIP_ID(bp) != BNX2_CHIP_ID_5706_A0) &&
4904 	    !(bp->flags & BNX2_FLAG_PCIX))
4905 		val |= BNX2_DMA_CONFIG_CNTL_PING_PONG_DMA;
4906 
4907 	BNX2_WR(bp, BNX2_DMA_CONFIG, val);
4908 
4909 	if (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) {
4910 		val = BNX2_RD(bp, BNX2_TDMA_CONFIG);
4911 		val |= BNX2_TDMA_CONFIG_ONE_DMA;
4912 		BNX2_WR(bp, BNX2_TDMA_CONFIG, val);
4913 	}
4914 
4915 	if (bp->flags & BNX2_FLAG_PCIX) {
4916 		u16 val16;
4917 
4918 		pci_read_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4919 				     &val16);
4920 		pci_write_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4921 				      val16 & ~PCI_X_CMD_ERO);
4922 	}
4923 
4924 	BNX2_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
4925 		BNX2_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
4926 		BNX2_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
4927 		BNX2_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
4928 
4929 	/* Initialize context mapping and zero out the quick contexts.  The
4930 	 * context block must have already been enabled. */
4931 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
4932 		rc = bnx2_init_5709_context(bp);
4933 		if (rc)
4934 			return rc;
4935 	} else
4936 		bnx2_init_context(bp);
4937 
4938 	bnx2_init_cpus(bp);
4939 
4940 	bnx2_init_nvram(bp);
4941 
4942 	bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
4943 
4944 	val = BNX2_RD(bp, BNX2_MQ_CONFIG);
4945 	val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
4946 	val |= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
4947 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
4948 		val |= BNX2_MQ_CONFIG_BIN_MQ_MODE;
4949 		if (BNX2_CHIP_REV(bp) == BNX2_CHIP_REV_Ax)
4950 			val |= BNX2_MQ_CONFIG_HALT_DIS;
4951 	}
4952 
4953 	BNX2_WR(bp, BNX2_MQ_CONFIG, val);
4954 
4955 	val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
4956 	BNX2_WR(bp, BNX2_MQ_KNL_BYP_WIND_START, val);
4957 	BNX2_WR(bp, BNX2_MQ_KNL_WIND_END, val);
4958 
4959 	val = (BNX2_PAGE_BITS - 8) << 24;
4960 	BNX2_WR(bp, BNX2_RV2P_CONFIG, val);
4961 
4962 	/* Configure page size. */
4963 	val = BNX2_RD(bp, BNX2_TBDR_CONFIG);
4964 	val &= ~BNX2_TBDR_CONFIG_PAGE_SIZE;
4965 	val |= (BNX2_PAGE_BITS - 8) << 24 | 0x40;
4966 	BNX2_WR(bp, BNX2_TBDR_CONFIG, val);
4967 
4968 	val = bp->mac_addr[0] +
4969 	      (bp->mac_addr[1] << 8) +
4970 	      (bp->mac_addr[2] << 16) +
4971 	      bp->mac_addr[3] +
4972 	      (bp->mac_addr[4] << 8) +
4973 	      (bp->mac_addr[5] << 16);
4974 	BNX2_WR(bp, BNX2_EMAC_BACKOFF_SEED, val);
4975 
4976 	/* Program the MTU.  Also include 4 bytes for CRC32. */
4977 	mtu = bp->dev->mtu;
4978 	val = mtu + ETH_HLEN + ETH_FCS_LEN;
4979 	if (val > (MAX_ETHERNET_PACKET_SIZE + ETH_HLEN + 4))
4980 		val |= BNX2_EMAC_RX_MTU_SIZE_JUMBO_ENA;
4981 	BNX2_WR(bp, BNX2_EMAC_RX_MTU_SIZE, val);
4982 
4983 	if (mtu < ETH_DATA_LEN)
4984 		mtu = ETH_DATA_LEN;
4985 
4986 	bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG, BNX2_RBUF_CONFIG_VAL(mtu));
4987 	bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG2, BNX2_RBUF_CONFIG2_VAL(mtu));
4988 	bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG3, BNX2_RBUF_CONFIG3_VAL(mtu));
4989 
4990 	memset(bp->bnx2_napi[0].status_blk.msi, 0, bp->status_stats_size);
4991 	for (i = 0; i < BNX2_MAX_MSIX_VEC; i++)
4992 		bp->bnx2_napi[i].last_status_idx = 0;
4993 
4994 	bp->idle_chk_status_idx = 0xffff;
4995 
4996 	/* Set up how to generate a link change interrupt. */
4997 	BNX2_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
4998 
4999 	BNX2_WR(bp, BNX2_HC_STATUS_ADDR_L,
5000 		(u64) bp->status_blk_mapping & 0xffffffff);
5001 	BNX2_WR(bp, BNX2_HC_STATUS_ADDR_H, (u64) bp->status_blk_mapping >> 32);
5002 
5003 	BNX2_WR(bp, BNX2_HC_STATISTICS_ADDR_L,
5004 		(u64) bp->stats_blk_mapping & 0xffffffff);
5005 	BNX2_WR(bp, BNX2_HC_STATISTICS_ADDR_H,
5006 		(u64) bp->stats_blk_mapping >> 32);
5007 
5008 	BNX2_WR(bp, BNX2_HC_TX_QUICK_CONS_TRIP,
5009 		(bp->tx_quick_cons_trip_int << 16) | bp->tx_quick_cons_trip);
5010 
5011 	BNX2_WR(bp, BNX2_HC_RX_QUICK_CONS_TRIP,
5012 		(bp->rx_quick_cons_trip_int << 16) | bp->rx_quick_cons_trip);
5013 
5014 	BNX2_WR(bp, BNX2_HC_COMP_PROD_TRIP,
5015 		(bp->comp_prod_trip_int << 16) | bp->comp_prod_trip);
5016 
5017 	BNX2_WR(bp, BNX2_HC_TX_TICKS, (bp->tx_ticks_int << 16) | bp->tx_ticks);
5018 
5019 	BNX2_WR(bp, BNX2_HC_RX_TICKS, (bp->rx_ticks_int << 16) | bp->rx_ticks);
5020 
5021 	BNX2_WR(bp, BNX2_HC_COM_TICKS,
5022 		(bp->com_ticks_int << 16) | bp->com_ticks);
5023 
5024 	BNX2_WR(bp, BNX2_HC_CMD_TICKS,
5025 		(bp->cmd_ticks_int << 16) | bp->cmd_ticks);
5026 
5027 	if (bp->flags & BNX2_FLAG_BROKEN_STATS)
5028 		BNX2_WR(bp, BNX2_HC_STATS_TICKS, 0);
5029 	else
5030 		BNX2_WR(bp, BNX2_HC_STATS_TICKS, bp->stats_ticks);
5031 	BNX2_WR(bp, BNX2_HC_STAT_COLLECT_TICKS, 0xbb8);  /* 3ms */
5032 
5033 	if (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A1)
5034 		val = BNX2_HC_CONFIG_COLLECT_STATS;
5035 	else {
5036 		val = BNX2_HC_CONFIG_RX_TMR_MODE | BNX2_HC_CONFIG_TX_TMR_MODE |
5037 		      BNX2_HC_CONFIG_COLLECT_STATS;
5038 	}
5039 
5040 	if (bp->flags & BNX2_FLAG_USING_MSIX) {
5041 		BNX2_WR(bp, BNX2_HC_MSIX_BIT_VECTOR,
5042 			BNX2_HC_MSIX_BIT_VECTOR_VAL);
5043 
5044 		val |= BNX2_HC_CONFIG_SB_ADDR_INC_128B;
5045 	}
5046 
5047 	if (bp->flags & BNX2_FLAG_ONE_SHOT_MSI)
5048 		val |= BNX2_HC_CONFIG_ONE_SHOT | BNX2_HC_CONFIG_USE_INT_PARAM;
5049 
5050 	BNX2_WR(bp, BNX2_HC_CONFIG, val);
5051 
5052 	if (bp->rx_ticks < 25)
5053 		bnx2_reg_wr_ind(bp, BNX2_FW_RX_LOW_LATENCY, 1);
5054 	else
5055 		bnx2_reg_wr_ind(bp, BNX2_FW_RX_LOW_LATENCY, 0);
5056 
5057 	for (i = 1; i < bp->irq_nvecs; i++) {
5058 		u32 base = ((i - 1) * BNX2_HC_SB_CONFIG_SIZE) +
5059 			   BNX2_HC_SB_CONFIG_1;
5060 
5061 		BNX2_WR(bp, base,
5062 			BNX2_HC_SB_CONFIG_1_TX_TMR_MODE |
5063 			BNX2_HC_SB_CONFIG_1_RX_TMR_MODE |
5064 			BNX2_HC_SB_CONFIG_1_ONE_SHOT);
5065 
5066 		BNX2_WR(bp, base + BNX2_HC_TX_QUICK_CONS_TRIP_OFF,
5067 			(bp->tx_quick_cons_trip_int << 16) |
5068 			 bp->tx_quick_cons_trip);
5069 
5070 		BNX2_WR(bp, base + BNX2_HC_TX_TICKS_OFF,
5071 			(bp->tx_ticks_int << 16) | bp->tx_ticks);
5072 
5073 		BNX2_WR(bp, base + BNX2_HC_RX_QUICK_CONS_TRIP_OFF,
5074 			(bp->rx_quick_cons_trip_int << 16) |
5075 			bp->rx_quick_cons_trip);
5076 
5077 		BNX2_WR(bp, base + BNX2_HC_RX_TICKS_OFF,
5078 			(bp->rx_ticks_int << 16) | bp->rx_ticks);
5079 	}
5080 
5081 	/* Clear internal stats counters. */
5082 	BNX2_WR(bp, BNX2_HC_COMMAND, BNX2_HC_COMMAND_CLR_STAT_NOW);
5083 
5084 	BNX2_WR(bp, BNX2_HC_ATTN_BITS_ENABLE, STATUS_ATTN_EVENTS);
5085 
5086 	/* Initialize the receive filter. */
5087 	bnx2_set_rx_mode(bp->dev);
5088 
5089 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
5090 		val = BNX2_RD(bp, BNX2_MISC_NEW_CORE_CTL);
5091 		val |= BNX2_MISC_NEW_CORE_CTL_DMA_ENABLE;
5092 		BNX2_WR(bp, BNX2_MISC_NEW_CORE_CTL, val);
5093 	}
5094 	rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT2 | BNX2_DRV_MSG_CODE_RESET,
5095 			  1, 0);
5096 
5097 	BNX2_WR(bp, BNX2_MISC_ENABLE_SET_BITS, BNX2_MISC_ENABLE_DEFAULT);
5098 	BNX2_RD(bp, BNX2_MISC_ENABLE_SET_BITS);
5099 
5100 	udelay(20);
5101 
5102 	bp->hc_cmd = BNX2_RD(bp, BNX2_HC_COMMAND);
5103 
5104 	return rc;
5105 }
5106 
5107 static void
bnx2_clear_ring_states(struct bnx2 * bp)5108 bnx2_clear_ring_states(struct bnx2 *bp)
5109 {
5110 	struct bnx2_napi *bnapi;
5111 	struct bnx2_tx_ring_info *txr;
5112 	struct bnx2_rx_ring_info *rxr;
5113 	int i;
5114 
5115 	for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
5116 		bnapi = &bp->bnx2_napi[i];
5117 		txr = &bnapi->tx_ring;
5118 		rxr = &bnapi->rx_ring;
5119 
5120 		txr->tx_cons = 0;
5121 		txr->hw_tx_cons = 0;
5122 		rxr->rx_prod_bseq = 0;
5123 		rxr->rx_prod = 0;
5124 		rxr->rx_cons = 0;
5125 		rxr->rx_pg_prod = 0;
5126 		rxr->rx_pg_cons = 0;
5127 	}
5128 }
5129 
5130 static void
bnx2_init_tx_context(struct bnx2 * bp,u32 cid,struct bnx2_tx_ring_info * txr)5131 bnx2_init_tx_context(struct bnx2 *bp, u32 cid, struct bnx2_tx_ring_info *txr)
5132 {
5133 	u32 val, offset0, offset1, offset2, offset3;
5134 	u32 cid_addr = GET_CID_ADDR(cid);
5135 
5136 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
5137 		offset0 = BNX2_L2CTX_TYPE_XI;
5138 		offset1 = BNX2_L2CTX_CMD_TYPE_XI;
5139 		offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI;
5140 		offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI;
5141 	} else {
5142 		offset0 = BNX2_L2CTX_TYPE;
5143 		offset1 = BNX2_L2CTX_CMD_TYPE;
5144 		offset2 = BNX2_L2CTX_TBDR_BHADDR_HI;
5145 		offset3 = BNX2_L2CTX_TBDR_BHADDR_LO;
5146 	}
5147 	val = BNX2_L2CTX_TYPE_TYPE_L2 | BNX2_L2CTX_TYPE_SIZE_L2;
5148 	bnx2_ctx_wr(bp, cid_addr, offset0, val);
5149 
5150 	val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
5151 	bnx2_ctx_wr(bp, cid_addr, offset1, val);
5152 
5153 	val = (u64) txr->tx_desc_mapping >> 32;
5154 	bnx2_ctx_wr(bp, cid_addr, offset2, val);
5155 
5156 	val = (u64) txr->tx_desc_mapping & 0xffffffff;
5157 	bnx2_ctx_wr(bp, cid_addr, offset3, val);
5158 }
5159 
5160 static void
bnx2_init_tx_ring(struct bnx2 * bp,int ring_num)5161 bnx2_init_tx_ring(struct bnx2 *bp, int ring_num)
5162 {
5163 	struct bnx2_tx_bd *txbd;
5164 	u32 cid = TX_CID;
5165 	struct bnx2_napi *bnapi;
5166 	struct bnx2_tx_ring_info *txr;
5167 
5168 	bnapi = &bp->bnx2_napi[ring_num];
5169 	txr = &bnapi->tx_ring;
5170 
5171 	if (ring_num == 0)
5172 		cid = TX_CID;
5173 	else
5174 		cid = TX_TSS_CID + ring_num - 1;
5175 
5176 	bp->tx_wake_thresh = bp->tx_ring_size / 2;
5177 
5178 	txbd = &txr->tx_desc_ring[BNX2_MAX_TX_DESC_CNT];
5179 
5180 	txbd->tx_bd_haddr_hi = (u64) txr->tx_desc_mapping >> 32;
5181 	txbd->tx_bd_haddr_lo = (u64) txr->tx_desc_mapping & 0xffffffff;
5182 
5183 	txr->tx_prod = 0;
5184 	txr->tx_prod_bseq = 0;
5185 
5186 	txr->tx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BIDX;
5187 	txr->tx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BSEQ;
5188 
5189 	bnx2_init_tx_context(bp, cid, txr);
5190 }
5191 
5192 static void
bnx2_init_rxbd_rings(struct bnx2_rx_bd * rx_ring[],dma_addr_t dma[],u32 buf_size,int num_rings)5193 bnx2_init_rxbd_rings(struct bnx2_rx_bd *rx_ring[], dma_addr_t dma[],
5194 		     u32 buf_size, int num_rings)
5195 {
5196 	int i;
5197 	struct bnx2_rx_bd *rxbd;
5198 
5199 	for (i = 0; i < num_rings; i++) {
5200 		int j;
5201 
5202 		rxbd = &rx_ring[i][0];
5203 		for (j = 0; j < BNX2_MAX_RX_DESC_CNT; j++, rxbd++) {
5204 			rxbd->rx_bd_len = buf_size;
5205 			rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
5206 		}
5207 		if (i == (num_rings - 1))
5208 			j = 0;
5209 		else
5210 			j = i + 1;
5211 		rxbd->rx_bd_haddr_hi = (u64) dma[j] >> 32;
5212 		rxbd->rx_bd_haddr_lo = (u64) dma[j] & 0xffffffff;
5213 	}
5214 }
5215 
5216 static void
bnx2_init_rx_ring(struct bnx2 * bp,int ring_num)5217 bnx2_init_rx_ring(struct bnx2 *bp, int ring_num)
5218 {
5219 	int i;
5220 	u16 prod, ring_prod;
5221 	u32 cid, rx_cid_addr, val;
5222 	struct bnx2_napi *bnapi = &bp->bnx2_napi[ring_num];
5223 	struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
5224 
5225 	if (ring_num == 0)
5226 		cid = RX_CID;
5227 	else
5228 		cid = RX_RSS_CID + ring_num - 1;
5229 
5230 	rx_cid_addr = GET_CID_ADDR(cid);
5231 
5232 	bnx2_init_rxbd_rings(rxr->rx_desc_ring, rxr->rx_desc_mapping,
5233 			     bp->rx_buf_use_size, bp->rx_max_ring);
5234 
5235 	bnx2_init_rx_context(bp, cid);
5236 
5237 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
5238 		val = BNX2_RD(bp, BNX2_MQ_MAP_L2_5);
5239 		BNX2_WR(bp, BNX2_MQ_MAP_L2_5, val | BNX2_MQ_MAP_L2_5_ARM);
5240 	}
5241 
5242 	bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, 0);
5243 	if (bp->rx_pg_ring_size) {
5244 		bnx2_init_rxbd_rings(rxr->rx_pg_desc_ring,
5245 				     rxr->rx_pg_desc_mapping,
5246 				     PAGE_SIZE, bp->rx_max_pg_ring);
5247 		val = (bp->rx_buf_use_size << 16) | PAGE_SIZE;
5248 		bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, val);
5249 		bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_RBDC_KEY,
5250 		       BNX2_L2CTX_RBDC_JUMBO_KEY - ring_num);
5251 
5252 		val = (u64) rxr->rx_pg_desc_mapping[0] >> 32;
5253 		bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_HI, val);
5254 
5255 		val = (u64) rxr->rx_pg_desc_mapping[0] & 0xffffffff;
5256 		bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_LO, val);
5257 
5258 		if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
5259 			BNX2_WR(bp, BNX2_MQ_MAP_L2_3, BNX2_MQ_MAP_L2_3_DEFAULT);
5260 	}
5261 
5262 	val = (u64) rxr->rx_desc_mapping[0] >> 32;
5263 	bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_HI, val);
5264 
5265 	val = (u64) rxr->rx_desc_mapping[0] & 0xffffffff;
5266 	bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_LO, val);
5267 
5268 	ring_prod = prod = rxr->rx_pg_prod;
5269 	for (i = 0; i < bp->rx_pg_ring_size; i++) {
5270 		if (bnx2_alloc_rx_page(bp, rxr, ring_prod, GFP_KERNEL) < 0) {
5271 			netdev_warn(bp->dev, "init'ed rx page ring %d with %d/%d pages only\n",
5272 				    ring_num, i, bp->rx_pg_ring_size);
5273 			break;
5274 		}
5275 		prod = BNX2_NEXT_RX_BD(prod);
5276 		ring_prod = BNX2_RX_PG_RING_IDX(prod);
5277 	}
5278 	rxr->rx_pg_prod = prod;
5279 
5280 	ring_prod = prod = rxr->rx_prod;
5281 	for (i = 0; i < bp->rx_ring_size; i++) {
5282 		if (bnx2_alloc_rx_data(bp, rxr, ring_prod, GFP_KERNEL) < 0) {
5283 			netdev_warn(bp->dev, "init'ed rx ring %d with %d/%d skbs only\n",
5284 				    ring_num, i, bp->rx_ring_size);
5285 			break;
5286 		}
5287 		prod = BNX2_NEXT_RX_BD(prod);
5288 		ring_prod = BNX2_RX_RING_IDX(prod);
5289 	}
5290 	rxr->rx_prod = prod;
5291 
5292 	rxr->rx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_BDIDX;
5293 	rxr->rx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_BSEQ;
5294 	rxr->rx_pg_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_PG_BDIDX;
5295 
5296 	BNX2_WR16(bp, rxr->rx_pg_bidx_addr, rxr->rx_pg_prod);
5297 	BNX2_WR16(bp, rxr->rx_bidx_addr, prod);
5298 
5299 	BNX2_WR(bp, rxr->rx_bseq_addr, rxr->rx_prod_bseq);
5300 }
5301 
5302 static void
bnx2_init_all_rings(struct bnx2 * bp)5303 bnx2_init_all_rings(struct bnx2 *bp)
5304 {
5305 	int i;
5306 	u32 val;
5307 
5308 	bnx2_clear_ring_states(bp);
5309 
5310 	BNX2_WR(bp, BNX2_TSCH_TSS_CFG, 0);
5311 	for (i = 0; i < bp->num_tx_rings; i++)
5312 		bnx2_init_tx_ring(bp, i);
5313 
5314 	if (bp->num_tx_rings > 1)
5315 		BNX2_WR(bp, BNX2_TSCH_TSS_CFG, ((bp->num_tx_rings - 1) << 24) |
5316 			(TX_TSS_CID << 7));
5317 
5318 	BNX2_WR(bp, BNX2_RLUP_RSS_CONFIG, 0);
5319 	bnx2_reg_wr_ind(bp, BNX2_RXP_SCRATCH_RSS_TBL_SZ, 0);
5320 
5321 	for (i = 0; i < bp->num_rx_rings; i++)
5322 		bnx2_init_rx_ring(bp, i);
5323 
5324 	if (bp->num_rx_rings > 1) {
5325 		u32 tbl_32 = 0;
5326 
5327 		for (i = 0; i < BNX2_RXP_SCRATCH_RSS_TBL_MAX_ENTRIES; i++) {
5328 			int shift = (i % 8) << 2;
5329 
5330 			tbl_32 |= (i % (bp->num_rx_rings - 1)) << shift;
5331 			if ((i % 8) == 7) {
5332 				BNX2_WR(bp, BNX2_RLUP_RSS_DATA, tbl_32);
5333 				BNX2_WR(bp, BNX2_RLUP_RSS_COMMAND, (i >> 3) |
5334 					BNX2_RLUP_RSS_COMMAND_RSS_WRITE_MASK |
5335 					BNX2_RLUP_RSS_COMMAND_WRITE |
5336 					BNX2_RLUP_RSS_COMMAND_HASH_MASK);
5337 				tbl_32 = 0;
5338 			}
5339 		}
5340 
5341 		val = BNX2_RLUP_RSS_CONFIG_IPV4_RSS_TYPE_ALL_XI |
5342 		      BNX2_RLUP_RSS_CONFIG_IPV6_RSS_TYPE_ALL_XI;
5343 
5344 		BNX2_WR(bp, BNX2_RLUP_RSS_CONFIG, val);
5345 
5346 	}
5347 }
5348 
bnx2_find_max_ring(u32 ring_size,u32 max_size)5349 static u32 bnx2_find_max_ring(u32 ring_size, u32 max_size)
5350 {
5351 	u32 max, num_rings = 1;
5352 
5353 	while (ring_size > BNX2_MAX_RX_DESC_CNT) {
5354 		ring_size -= BNX2_MAX_RX_DESC_CNT;
5355 		num_rings++;
5356 	}
5357 	/* round to next power of 2 */
5358 	max = max_size;
5359 	while ((max & num_rings) == 0)
5360 		max >>= 1;
5361 
5362 	if (num_rings != max)
5363 		max <<= 1;
5364 
5365 	return max;
5366 }
5367 
5368 static void
bnx2_set_rx_ring_size(struct bnx2 * bp,u32 size)5369 bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size)
5370 {
5371 	u32 rx_size, rx_space, jumbo_size;
5372 
5373 	/* 8 for CRC and VLAN */
5374 	rx_size = bp->dev->mtu + ETH_HLEN + BNX2_RX_OFFSET + 8;
5375 
5376 	rx_space = SKB_DATA_ALIGN(rx_size + BNX2_RX_ALIGN) + NET_SKB_PAD +
5377 		SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
5378 
5379 	bp->rx_copy_thresh = BNX2_RX_COPY_THRESH;
5380 	bp->rx_pg_ring_size = 0;
5381 	bp->rx_max_pg_ring = 0;
5382 	bp->rx_max_pg_ring_idx = 0;
5383 	if ((rx_space > PAGE_SIZE) && !(bp->flags & BNX2_FLAG_JUMBO_BROKEN)) {
5384 		int pages = PAGE_ALIGN(bp->dev->mtu - 40) >> PAGE_SHIFT;
5385 
5386 		jumbo_size = size * pages;
5387 		if (jumbo_size > BNX2_MAX_TOTAL_RX_PG_DESC_CNT)
5388 			jumbo_size = BNX2_MAX_TOTAL_RX_PG_DESC_CNT;
5389 
5390 		bp->rx_pg_ring_size = jumbo_size;
5391 		bp->rx_max_pg_ring = bnx2_find_max_ring(jumbo_size,
5392 							BNX2_MAX_RX_PG_RINGS);
5393 		bp->rx_max_pg_ring_idx =
5394 			(bp->rx_max_pg_ring * BNX2_RX_DESC_CNT) - 1;
5395 		rx_size = BNX2_RX_COPY_THRESH + BNX2_RX_OFFSET;
5396 		bp->rx_copy_thresh = 0;
5397 	}
5398 
5399 	bp->rx_buf_use_size = rx_size;
5400 	/* hw alignment + build_skb() overhead*/
5401 	bp->rx_buf_size = kmalloc_size_roundup(
5402 		SKB_DATA_ALIGN(bp->rx_buf_use_size + BNX2_RX_ALIGN) +
5403 		NET_SKB_PAD + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)));
5404 	bp->rx_jumbo_thresh = rx_size - BNX2_RX_OFFSET;
5405 	bp->rx_ring_size = size;
5406 	bp->rx_max_ring = bnx2_find_max_ring(size, BNX2_MAX_RX_RINGS);
5407 	bp->rx_max_ring_idx = (bp->rx_max_ring * BNX2_RX_DESC_CNT) - 1;
5408 }
5409 
5410 static void
bnx2_free_tx_skbs(struct bnx2 * bp)5411 bnx2_free_tx_skbs(struct bnx2 *bp)
5412 {
5413 	int i;
5414 
5415 	for (i = 0; i < bp->num_tx_rings; i++) {
5416 		struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
5417 		struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
5418 		int j;
5419 
5420 		if (!txr->tx_buf_ring)
5421 			continue;
5422 
5423 		for (j = 0; j < BNX2_TX_DESC_CNT; ) {
5424 			struct bnx2_sw_tx_bd *tx_buf = &txr->tx_buf_ring[j];
5425 			struct sk_buff *skb = tx_buf->skb;
5426 			int k, last;
5427 
5428 			if (!skb) {
5429 				j = BNX2_NEXT_TX_BD(j);
5430 				continue;
5431 			}
5432 
5433 			dma_unmap_single(&bp->pdev->dev,
5434 					 dma_unmap_addr(tx_buf, mapping),
5435 					 skb_headlen(skb),
5436 					 DMA_TO_DEVICE);
5437 
5438 			tx_buf->skb = NULL;
5439 
5440 			last = tx_buf->nr_frags;
5441 			j = BNX2_NEXT_TX_BD(j);
5442 			for (k = 0; k < last; k++, j = BNX2_NEXT_TX_BD(j)) {
5443 				tx_buf = &txr->tx_buf_ring[BNX2_TX_RING_IDX(j)];
5444 				dma_unmap_page(&bp->pdev->dev,
5445 					dma_unmap_addr(tx_buf, mapping),
5446 					skb_frag_size(&skb_shinfo(skb)->frags[k]),
5447 					DMA_TO_DEVICE);
5448 			}
5449 			dev_kfree_skb(skb);
5450 		}
5451 		netdev_tx_reset_queue(netdev_get_tx_queue(bp->dev, i));
5452 	}
5453 }
5454 
5455 static void
bnx2_free_rx_skbs(struct bnx2 * bp)5456 bnx2_free_rx_skbs(struct bnx2 *bp)
5457 {
5458 	int i;
5459 
5460 	for (i = 0; i < bp->num_rx_rings; i++) {
5461 		struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
5462 		struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
5463 		int j;
5464 
5465 		if (!rxr->rx_buf_ring)
5466 			return;
5467 
5468 		for (j = 0; j < bp->rx_max_ring_idx; j++) {
5469 			struct bnx2_sw_bd *rx_buf = &rxr->rx_buf_ring[j];
5470 			u8 *data = rx_buf->data;
5471 
5472 			if (!data)
5473 				continue;
5474 
5475 			dma_unmap_single(&bp->pdev->dev,
5476 					 dma_unmap_addr(rx_buf, mapping),
5477 					 bp->rx_buf_use_size,
5478 					 DMA_FROM_DEVICE);
5479 
5480 			rx_buf->data = NULL;
5481 
5482 			kfree(data);
5483 		}
5484 		for (j = 0; j < bp->rx_max_pg_ring_idx; j++)
5485 			bnx2_free_rx_page(bp, rxr, j);
5486 	}
5487 }
5488 
5489 static void
bnx2_free_skbs(struct bnx2 * bp)5490 bnx2_free_skbs(struct bnx2 *bp)
5491 {
5492 	bnx2_free_tx_skbs(bp);
5493 	bnx2_free_rx_skbs(bp);
5494 }
5495 
5496 static int
bnx2_reset_nic(struct bnx2 * bp,u32 reset_code)5497 bnx2_reset_nic(struct bnx2 *bp, u32 reset_code)
5498 {
5499 	int rc;
5500 
5501 	rc = bnx2_reset_chip(bp, reset_code);
5502 	bnx2_free_skbs(bp);
5503 	if (rc)
5504 		return rc;
5505 
5506 	if ((rc = bnx2_init_chip(bp)) != 0)
5507 		return rc;
5508 
5509 	bnx2_init_all_rings(bp);
5510 	return 0;
5511 }
5512 
5513 static int
bnx2_init_nic(struct bnx2 * bp,int reset_phy)5514 bnx2_init_nic(struct bnx2 *bp, int reset_phy)
5515 {
5516 	int rc;
5517 
5518 	if ((rc = bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET)) != 0)
5519 		return rc;
5520 
5521 	spin_lock_bh(&bp->phy_lock);
5522 	bnx2_init_phy(bp, reset_phy);
5523 	bnx2_set_link(bp);
5524 	if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
5525 		bnx2_remote_phy_event(bp);
5526 	spin_unlock_bh(&bp->phy_lock);
5527 	return 0;
5528 }
5529 
5530 static int
bnx2_shutdown_chip(struct bnx2 * bp)5531 bnx2_shutdown_chip(struct bnx2 *bp)
5532 {
5533 	u32 reset_code;
5534 
5535 	if (bp->flags & BNX2_FLAG_NO_WOL)
5536 		reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
5537 	else if (bp->wol)
5538 		reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
5539 	else
5540 		reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
5541 
5542 	return bnx2_reset_chip(bp, reset_code);
5543 }
5544 
5545 static int
bnx2_test_registers(struct bnx2 * bp)5546 bnx2_test_registers(struct bnx2 *bp)
5547 {
5548 	int ret;
5549 	int i, is_5709;
5550 	static const struct {
5551 		u16   offset;
5552 		u16   flags;
5553 #define BNX2_FL_NOT_5709	1
5554 		u32   rw_mask;
5555 		u32   ro_mask;
5556 	} reg_tbl[] = {
5557 		{ 0x006c, 0, 0x00000000, 0x0000003f },
5558 		{ 0x0090, 0, 0xffffffff, 0x00000000 },
5559 		{ 0x0094, 0, 0x00000000, 0x00000000 },
5560 
5561 		{ 0x0404, BNX2_FL_NOT_5709, 0x00003f00, 0x00000000 },
5562 		{ 0x0418, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5563 		{ 0x041c, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5564 		{ 0x0420, BNX2_FL_NOT_5709, 0x00000000, 0x80ffffff },
5565 		{ 0x0424, BNX2_FL_NOT_5709, 0x00000000, 0x00000000 },
5566 		{ 0x0428, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
5567 		{ 0x0450, BNX2_FL_NOT_5709, 0x00000000, 0x0000ffff },
5568 		{ 0x0454, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5569 		{ 0x0458, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5570 
5571 		{ 0x0808, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5572 		{ 0x0854, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5573 		{ 0x0868, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5574 		{ 0x086c, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5575 		{ 0x0870, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5576 		{ 0x0874, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5577 
5578 		{ 0x0c00, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
5579 		{ 0x0c04, BNX2_FL_NOT_5709, 0x00000000, 0x03ff0001 },
5580 		{ 0x0c08, BNX2_FL_NOT_5709,  0x0f0ff073, 0x00000000 },
5581 
5582 		{ 0x1000, 0, 0x00000000, 0x00000001 },
5583 		{ 0x1004, BNX2_FL_NOT_5709, 0x00000000, 0x000f0001 },
5584 
5585 		{ 0x1408, 0, 0x01c00800, 0x00000000 },
5586 		{ 0x149c, 0, 0x8000ffff, 0x00000000 },
5587 		{ 0x14a8, 0, 0x00000000, 0x000001ff },
5588 		{ 0x14ac, 0, 0x0fffffff, 0x10000000 },
5589 		{ 0x14b0, 0, 0x00000002, 0x00000001 },
5590 		{ 0x14b8, 0, 0x00000000, 0x00000000 },
5591 		{ 0x14c0, 0, 0x00000000, 0x00000009 },
5592 		{ 0x14c4, 0, 0x00003fff, 0x00000000 },
5593 		{ 0x14cc, 0, 0x00000000, 0x00000001 },
5594 		{ 0x14d0, 0, 0xffffffff, 0x00000000 },
5595 
5596 		{ 0x1800, 0, 0x00000000, 0x00000001 },
5597 		{ 0x1804, 0, 0x00000000, 0x00000003 },
5598 
5599 		{ 0x2800, 0, 0x00000000, 0x00000001 },
5600 		{ 0x2804, 0, 0x00000000, 0x00003f01 },
5601 		{ 0x2808, 0, 0x0f3f3f03, 0x00000000 },
5602 		{ 0x2810, 0, 0xffff0000, 0x00000000 },
5603 		{ 0x2814, 0, 0xffff0000, 0x00000000 },
5604 		{ 0x2818, 0, 0xffff0000, 0x00000000 },
5605 		{ 0x281c, 0, 0xffff0000, 0x00000000 },
5606 		{ 0x2834, 0, 0xffffffff, 0x00000000 },
5607 		{ 0x2840, 0, 0x00000000, 0xffffffff },
5608 		{ 0x2844, 0, 0x00000000, 0xffffffff },
5609 		{ 0x2848, 0, 0xffffffff, 0x00000000 },
5610 		{ 0x284c, 0, 0xf800f800, 0x07ff07ff },
5611 
5612 		{ 0x2c00, 0, 0x00000000, 0x00000011 },
5613 		{ 0x2c04, 0, 0x00000000, 0x00030007 },
5614 
5615 		{ 0x3c00, 0, 0x00000000, 0x00000001 },
5616 		{ 0x3c04, 0, 0x00000000, 0x00070000 },
5617 		{ 0x3c08, 0, 0x00007f71, 0x07f00000 },
5618 		{ 0x3c0c, 0, 0x1f3ffffc, 0x00000000 },
5619 		{ 0x3c10, 0, 0xffffffff, 0x00000000 },
5620 		{ 0x3c14, 0, 0x00000000, 0xffffffff },
5621 		{ 0x3c18, 0, 0x00000000, 0xffffffff },
5622 		{ 0x3c1c, 0, 0xfffff000, 0x00000000 },
5623 		{ 0x3c20, 0, 0xffffff00, 0x00000000 },
5624 
5625 		{ 0x5004, 0, 0x00000000, 0x0000007f },
5626 		{ 0x5008, 0, 0x0f0007ff, 0x00000000 },
5627 
5628 		{ 0x5c00, 0, 0x00000000, 0x00000001 },
5629 		{ 0x5c04, 0, 0x00000000, 0x0003000f },
5630 		{ 0x5c08, 0, 0x00000003, 0x00000000 },
5631 		{ 0x5c0c, 0, 0x0000fff8, 0x00000000 },
5632 		{ 0x5c10, 0, 0x00000000, 0xffffffff },
5633 		{ 0x5c80, 0, 0x00000000, 0x0f7113f1 },
5634 		{ 0x5c84, 0, 0x00000000, 0x0000f333 },
5635 		{ 0x5c88, 0, 0x00000000, 0x00077373 },
5636 		{ 0x5c8c, 0, 0x00000000, 0x0007f737 },
5637 
5638 		{ 0x6808, 0, 0x0000ff7f, 0x00000000 },
5639 		{ 0x680c, 0, 0xffffffff, 0x00000000 },
5640 		{ 0x6810, 0, 0xffffffff, 0x00000000 },
5641 		{ 0x6814, 0, 0xffffffff, 0x00000000 },
5642 		{ 0x6818, 0, 0xffffffff, 0x00000000 },
5643 		{ 0x681c, 0, 0xffffffff, 0x00000000 },
5644 		{ 0x6820, 0, 0x00ff00ff, 0x00000000 },
5645 		{ 0x6824, 0, 0x00ff00ff, 0x00000000 },
5646 		{ 0x6828, 0, 0x00ff00ff, 0x00000000 },
5647 		{ 0x682c, 0, 0x03ff03ff, 0x00000000 },
5648 		{ 0x6830, 0, 0x03ff03ff, 0x00000000 },
5649 		{ 0x6834, 0, 0x03ff03ff, 0x00000000 },
5650 		{ 0x6838, 0, 0x03ff03ff, 0x00000000 },
5651 		{ 0x683c, 0, 0x0000ffff, 0x00000000 },
5652 		{ 0x6840, 0, 0x00000ff0, 0x00000000 },
5653 		{ 0x6844, 0, 0x00ffff00, 0x00000000 },
5654 		{ 0x684c, 0, 0xffffffff, 0x00000000 },
5655 		{ 0x6850, 0, 0x7f7f7f7f, 0x00000000 },
5656 		{ 0x6854, 0, 0x7f7f7f7f, 0x00000000 },
5657 		{ 0x6858, 0, 0x7f7f7f7f, 0x00000000 },
5658 		{ 0x685c, 0, 0x7f7f7f7f, 0x00000000 },
5659 		{ 0x6908, 0, 0x00000000, 0x0001ff0f },
5660 		{ 0x690c, 0, 0x00000000, 0x0ffe00f0 },
5661 
5662 		{ 0xffff, 0, 0x00000000, 0x00000000 },
5663 	};
5664 
5665 	ret = 0;
5666 	is_5709 = 0;
5667 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
5668 		is_5709 = 1;
5669 
5670 	for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
5671 		u32 offset, rw_mask, ro_mask, save_val, val;
5672 		u16 flags = reg_tbl[i].flags;
5673 
5674 		if (is_5709 && (flags & BNX2_FL_NOT_5709))
5675 			continue;
5676 
5677 		offset = (u32) reg_tbl[i].offset;
5678 		rw_mask = reg_tbl[i].rw_mask;
5679 		ro_mask = reg_tbl[i].ro_mask;
5680 
5681 		save_val = readl(bp->regview + offset);
5682 
5683 		writel(0, bp->regview + offset);
5684 
5685 		val = readl(bp->regview + offset);
5686 		if ((val & rw_mask) != 0) {
5687 			goto reg_test_err;
5688 		}
5689 
5690 		if ((val & ro_mask) != (save_val & ro_mask)) {
5691 			goto reg_test_err;
5692 		}
5693 
5694 		writel(0xffffffff, bp->regview + offset);
5695 
5696 		val = readl(bp->regview + offset);
5697 		if ((val & rw_mask) != rw_mask) {
5698 			goto reg_test_err;
5699 		}
5700 
5701 		if ((val & ro_mask) != (save_val & ro_mask)) {
5702 			goto reg_test_err;
5703 		}
5704 
5705 		writel(save_val, bp->regview + offset);
5706 		continue;
5707 
5708 reg_test_err:
5709 		writel(save_val, bp->regview + offset);
5710 		ret = -ENODEV;
5711 		break;
5712 	}
5713 	return ret;
5714 }
5715 
5716 static int
bnx2_do_mem_test(struct bnx2 * bp,u32 start,u32 size)5717 bnx2_do_mem_test(struct bnx2 *bp, u32 start, u32 size)
5718 {
5719 	static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0x55555555,
5720 		0xaaaaaaaa , 0xaa55aa55, 0x55aa55aa };
5721 	int i;
5722 
5723 	for (i = 0; i < sizeof(test_pattern) / 4; i++) {
5724 		u32 offset;
5725 
5726 		for (offset = 0; offset < size; offset += 4) {
5727 
5728 			bnx2_reg_wr_ind(bp, start + offset, test_pattern[i]);
5729 
5730 			if (bnx2_reg_rd_ind(bp, start + offset) !=
5731 				test_pattern[i]) {
5732 				return -ENODEV;
5733 			}
5734 		}
5735 	}
5736 	return 0;
5737 }
5738 
5739 static int
bnx2_test_memory(struct bnx2 * bp)5740 bnx2_test_memory(struct bnx2 *bp)
5741 {
5742 	int ret = 0;
5743 	int i;
5744 	static struct mem_entry {
5745 		u32   offset;
5746 		u32   len;
5747 	} mem_tbl_5706[] = {
5748 		{ 0x60000,  0x4000 },
5749 		{ 0xa0000,  0x3000 },
5750 		{ 0xe0000,  0x4000 },
5751 		{ 0x120000, 0x4000 },
5752 		{ 0x1a0000, 0x4000 },
5753 		{ 0x160000, 0x4000 },
5754 		{ 0xffffffff, 0    },
5755 	},
5756 	mem_tbl_5709[] = {
5757 		{ 0x60000,  0x4000 },
5758 		{ 0xa0000,  0x3000 },
5759 		{ 0xe0000,  0x4000 },
5760 		{ 0x120000, 0x4000 },
5761 		{ 0x1a0000, 0x4000 },
5762 		{ 0xffffffff, 0    },
5763 	};
5764 	struct mem_entry *mem_tbl;
5765 
5766 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
5767 		mem_tbl = mem_tbl_5709;
5768 	else
5769 		mem_tbl = mem_tbl_5706;
5770 
5771 	for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
5772 		if ((ret = bnx2_do_mem_test(bp, mem_tbl[i].offset,
5773 			mem_tbl[i].len)) != 0) {
5774 			return ret;
5775 		}
5776 	}
5777 
5778 	return ret;
5779 }
5780 
5781 #define BNX2_MAC_LOOPBACK	0
5782 #define BNX2_PHY_LOOPBACK	1
5783 
5784 static int
bnx2_run_loopback(struct bnx2 * bp,int loopback_mode)5785 bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
5786 {
5787 	unsigned int pkt_size, num_pkts, i;
5788 	struct sk_buff *skb;
5789 	u8 *data;
5790 	unsigned char *packet;
5791 	u16 rx_start_idx, rx_idx;
5792 	dma_addr_t map;
5793 	struct bnx2_tx_bd *txbd;
5794 	struct bnx2_sw_bd *rx_buf;
5795 	struct l2_fhdr *rx_hdr;
5796 	int ret = -ENODEV;
5797 	struct bnx2_napi *bnapi = &bp->bnx2_napi[0], *tx_napi;
5798 	struct bnx2_tx_ring_info *txr;
5799 	struct bnx2_rx_ring_info *rxr;
5800 
5801 	tx_napi = bnapi;
5802 
5803 	txr = &tx_napi->tx_ring;
5804 	rxr = &bnapi->rx_ring;
5805 	if (loopback_mode == BNX2_MAC_LOOPBACK) {
5806 		bp->loopback = MAC_LOOPBACK;
5807 		bnx2_set_mac_loopback(bp);
5808 	}
5809 	else if (loopback_mode == BNX2_PHY_LOOPBACK) {
5810 		if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
5811 			return 0;
5812 
5813 		bp->loopback = PHY_LOOPBACK;
5814 		bnx2_set_phy_loopback(bp);
5815 	}
5816 	else
5817 		return -EINVAL;
5818 
5819 	pkt_size = min(bp->dev->mtu + ETH_HLEN, bp->rx_jumbo_thresh - 4);
5820 	skb = netdev_alloc_skb(bp->dev, pkt_size);
5821 	if (!skb)
5822 		return -ENOMEM;
5823 	packet = skb_put(skb, pkt_size);
5824 	memcpy(packet, bp->dev->dev_addr, ETH_ALEN);
5825 	memset(packet + ETH_ALEN, 0x0, 8);
5826 	for (i = 14; i < pkt_size; i++)
5827 		packet[i] = (unsigned char) (i & 0xff);
5828 
5829 	map = dma_map_single(&bp->pdev->dev, skb->data, pkt_size,
5830 			     DMA_TO_DEVICE);
5831 	if (dma_mapping_error(&bp->pdev->dev, map)) {
5832 		dev_kfree_skb(skb);
5833 		return -EIO;
5834 	}
5835 
5836 	BNX2_WR(bp, BNX2_HC_COMMAND,
5837 		bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
5838 
5839 	BNX2_RD(bp, BNX2_HC_COMMAND);
5840 
5841 	udelay(5);
5842 	rx_start_idx = bnx2_get_hw_rx_cons(bnapi);
5843 
5844 	num_pkts = 0;
5845 
5846 	txbd = &txr->tx_desc_ring[BNX2_TX_RING_IDX(txr->tx_prod)];
5847 
5848 	txbd->tx_bd_haddr_hi = (u64) map >> 32;
5849 	txbd->tx_bd_haddr_lo = (u64) map & 0xffffffff;
5850 	txbd->tx_bd_mss_nbytes = pkt_size;
5851 	txbd->tx_bd_vlan_tag_flags = TX_BD_FLAGS_START | TX_BD_FLAGS_END;
5852 
5853 	num_pkts++;
5854 	txr->tx_prod = BNX2_NEXT_TX_BD(txr->tx_prod);
5855 	txr->tx_prod_bseq += pkt_size;
5856 
5857 	BNX2_WR16(bp, txr->tx_bidx_addr, txr->tx_prod);
5858 	BNX2_WR(bp, txr->tx_bseq_addr, txr->tx_prod_bseq);
5859 
5860 	udelay(100);
5861 
5862 	BNX2_WR(bp, BNX2_HC_COMMAND,
5863 		bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
5864 
5865 	BNX2_RD(bp, BNX2_HC_COMMAND);
5866 
5867 	udelay(5);
5868 
5869 	dma_unmap_single(&bp->pdev->dev, map, pkt_size, DMA_TO_DEVICE);
5870 	dev_kfree_skb(skb);
5871 
5872 	if (bnx2_get_hw_tx_cons(tx_napi) != txr->tx_prod)
5873 		goto loopback_test_done;
5874 
5875 	rx_idx = bnx2_get_hw_rx_cons(bnapi);
5876 	if (rx_idx != rx_start_idx + num_pkts) {
5877 		goto loopback_test_done;
5878 	}
5879 
5880 	rx_buf = &rxr->rx_buf_ring[rx_start_idx];
5881 	data = rx_buf->data;
5882 
5883 	rx_hdr = get_l2_fhdr(data);
5884 	data = (u8 *)rx_hdr + BNX2_RX_OFFSET;
5885 
5886 	dma_sync_single_for_cpu(&bp->pdev->dev,
5887 		dma_unmap_addr(rx_buf, mapping),
5888 		bp->rx_buf_use_size, DMA_FROM_DEVICE);
5889 
5890 	if (rx_hdr->l2_fhdr_status &
5891 		(L2_FHDR_ERRORS_BAD_CRC |
5892 		L2_FHDR_ERRORS_PHY_DECODE |
5893 		L2_FHDR_ERRORS_ALIGNMENT |
5894 		L2_FHDR_ERRORS_TOO_SHORT |
5895 		L2_FHDR_ERRORS_GIANT_FRAME)) {
5896 
5897 		goto loopback_test_done;
5898 	}
5899 
5900 	if ((rx_hdr->l2_fhdr_pkt_len - 4) != pkt_size) {
5901 		goto loopback_test_done;
5902 	}
5903 
5904 	for (i = 14; i < pkt_size; i++) {
5905 		if (*(data + i) != (unsigned char) (i & 0xff)) {
5906 			goto loopback_test_done;
5907 		}
5908 	}
5909 
5910 	ret = 0;
5911 
5912 loopback_test_done:
5913 	bp->loopback = 0;
5914 	return ret;
5915 }
5916 
5917 #define BNX2_MAC_LOOPBACK_FAILED	1
5918 #define BNX2_PHY_LOOPBACK_FAILED	2
5919 #define BNX2_LOOPBACK_FAILED		(BNX2_MAC_LOOPBACK_FAILED |	\
5920 					 BNX2_PHY_LOOPBACK_FAILED)
5921 
5922 static int
bnx2_test_loopback(struct bnx2 * bp)5923 bnx2_test_loopback(struct bnx2 *bp)
5924 {
5925 	int rc = 0;
5926 
5927 	if (!netif_running(bp->dev))
5928 		return BNX2_LOOPBACK_FAILED;
5929 
5930 	bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
5931 	spin_lock_bh(&bp->phy_lock);
5932 	bnx2_init_phy(bp, 1);
5933 	spin_unlock_bh(&bp->phy_lock);
5934 	if (bnx2_run_loopback(bp, BNX2_MAC_LOOPBACK))
5935 		rc |= BNX2_MAC_LOOPBACK_FAILED;
5936 	if (bnx2_run_loopback(bp, BNX2_PHY_LOOPBACK))
5937 		rc |= BNX2_PHY_LOOPBACK_FAILED;
5938 	return rc;
5939 }
5940 
5941 #define NVRAM_SIZE 0x200
5942 #define CRC32_RESIDUAL 0xdebb20e3
5943 
5944 static int
bnx2_test_nvram(struct bnx2 * bp)5945 bnx2_test_nvram(struct bnx2 *bp)
5946 {
5947 	__be32 buf[NVRAM_SIZE / 4];
5948 	u8 *data = (u8 *) buf;
5949 	int rc = 0;
5950 	u32 magic, csum;
5951 
5952 	if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
5953 		goto test_nvram_done;
5954 
5955         magic = be32_to_cpu(buf[0]);
5956 	if (magic != 0x669955aa) {
5957 		rc = -ENODEV;
5958 		goto test_nvram_done;
5959 	}
5960 
5961 	if ((rc = bnx2_nvram_read(bp, 0x100, data, NVRAM_SIZE)) != 0)
5962 		goto test_nvram_done;
5963 
5964 	csum = ether_crc_le(0x100, data);
5965 	if (csum != CRC32_RESIDUAL) {
5966 		rc = -ENODEV;
5967 		goto test_nvram_done;
5968 	}
5969 
5970 	csum = ether_crc_le(0x100, data + 0x100);
5971 	if (csum != CRC32_RESIDUAL) {
5972 		rc = -ENODEV;
5973 	}
5974 
5975 test_nvram_done:
5976 	return rc;
5977 }
5978 
5979 static int
bnx2_test_link(struct bnx2 * bp)5980 bnx2_test_link(struct bnx2 *bp)
5981 {
5982 	u32 bmsr;
5983 
5984 	if (!netif_running(bp->dev))
5985 		return -ENODEV;
5986 
5987 	if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
5988 		if (bp->link_up)
5989 			return 0;
5990 		return -ENODEV;
5991 	}
5992 	spin_lock_bh(&bp->phy_lock);
5993 	bnx2_enable_bmsr1(bp);
5994 	bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
5995 	bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
5996 	bnx2_disable_bmsr1(bp);
5997 	spin_unlock_bh(&bp->phy_lock);
5998 
5999 	if (bmsr & BMSR_LSTATUS) {
6000 		return 0;
6001 	}
6002 	return -ENODEV;
6003 }
6004 
6005 static int
bnx2_test_intr(struct bnx2 * bp)6006 bnx2_test_intr(struct bnx2 *bp)
6007 {
6008 	int i;
6009 	u16 status_idx;
6010 
6011 	if (!netif_running(bp->dev))
6012 		return -ENODEV;
6013 
6014 	status_idx = BNX2_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff;
6015 
6016 	/* This register is not touched during run-time. */
6017 	BNX2_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
6018 	BNX2_RD(bp, BNX2_HC_COMMAND);
6019 
6020 	for (i = 0; i < 10; i++) {
6021 		if ((BNX2_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff) !=
6022 			status_idx) {
6023 
6024 			break;
6025 		}
6026 
6027 		msleep_interruptible(10);
6028 	}
6029 	if (i < 10)
6030 		return 0;
6031 
6032 	return -ENODEV;
6033 }
6034 
6035 /* Determining link for parallel detection. */
6036 static int
bnx2_5706_serdes_has_link(struct bnx2 * bp)6037 bnx2_5706_serdes_has_link(struct bnx2 *bp)
6038 {
6039 	u32 mode_ctl, an_dbg, exp;
6040 
6041 	if (bp->phy_flags & BNX2_PHY_FLAG_NO_PARALLEL)
6042 		return 0;
6043 
6044 	bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_MODE_CTL);
6045 	bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &mode_ctl);
6046 
6047 	if (!(mode_ctl & MISC_SHDW_MODE_CTL_SIG_DET))
6048 		return 0;
6049 
6050 	bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
6051 	bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
6052 	bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
6053 
6054 	if (an_dbg & (MISC_SHDW_AN_DBG_NOSYNC | MISC_SHDW_AN_DBG_RUDI_INVALID))
6055 		return 0;
6056 
6057 	bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS, MII_EXPAND_REG1);
6058 	bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &exp);
6059 	bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &exp);
6060 
6061 	if (exp & MII_EXPAND_REG1_RUDI_C)	/* receiving CONFIG */
6062 		return 0;
6063 
6064 	return 1;
6065 }
6066 
6067 static void
bnx2_5706_serdes_timer(struct bnx2 * bp)6068 bnx2_5706_serdes_timer(struct bnx2 *bp)
6069 {
6070 	int check_link = 1;
6071 
6072 	spin_lock(&bp->phy_lock);
6073 	if (bp->serdes_an_pending) {
6074 		bp->serdes_an_pending--;
6075 		check_link = 0;
6076 	} else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
6077 		u32 bmcr;
6078 
6079 		bp->current_interval = BNX2_TIMER_INTERVAL;
6080 
6081 		bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
6082 
6083 		if (bmcr & BMCR_ANENABLE) {
6084 			if (bnx2_5706_serdes_has_link(bp)) {
6085 				bmcr &= ~BMCR_ANENABLE;
6086 				bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
6087 				bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
6088 				bp->phy_flags |= BNX2_PHY_FLAG_PARALLEL_DETECT;
6089 			}
6090 		}
6091 	}
6092 	else if ((bp->link_up) && (bp->autoneg & AUTONEG_SPEED) &&
6093 		 (bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT)) {
6094 		u32 phy2;
6095 
6096 		bnx2_write_phy(bp, 0x17, 0x0f01);
6097 		bnx2_read_phy(bp, 0x15, &phy2);
6098 		if (phy2 & 0x20) {
6099 			u32 bmcr;
6100 
6101 			bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
6102 			bmcr |= BMCR_ANENABLE;
6103 			bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
6104 
6105 			bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
6106 		}
6107 	} else
6108 		bp->current_interval = BNX2_TIMER_INTERVAL;
6109 
6110 	if (check_link) {
6111 		u32 val;
6112 
6113 		bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
6114 		bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &val);
6115 		bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &val);
6116 
6117 		if (bp->link_up && (val & MISC_SHDW_AN_DBG_NOSYNC)) {
6118 			if (!(bp->phy_flags & BNX2_PHY_FLAG_FORCED_DOWN)) {
6119 				bnx2_5706s_force_link_dn(bp, 1);
6120 				bp->phy_flags |= BNX2_PHY_FLAG_FORCED_DOWN;
6121 			} else
6122 				bnx2_set_link(bp);
6123 		} else if (!bp->link_up && !(val & MISC_SHDW_AN_DBG_NOSYNC))
6124 			bnx2_set_link(bp);
6125 	}
6126 	spin_unlock(&bp->phy_lock);
6127 }
6128 
6129 static void
bnx2_5708_serdes_timer(struct bnx2 * bp)6130 bnx2_5708_serdes_timer(struct bnx2 *bp)
6131 {
6132 	if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
6133 		return;
6134 
6135 	if ((bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) == 0) {
6136 		bp->serdes_an_pending = 0;
6137 		return;
6138 	}
6139 
6140 	spin_lock(&bp->phy_lock);
6141 	if (bp->serdes_an_pending)
6142 		bp->serdes_an_pending--;
6143 	else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
6144 		u32 bmcr;
6145 
6146 		bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
6147 		if (bmcr & BMCR_ANENABLE) {
6148 			bnx2_enable_forced_2g5(bp);
6149 			bp->current_interval = BNX2_SERDES_FORCED_TIMEOUT;
6150 		} else {
6151 			bnx2_disable_forced_2g5(bp);
6152 			bp->serdes_an_pending = 2;
6153 			bp->current_interval = BNX2_TIMER_INTERVAL;
6154 		}
6155 
6156 	} else
6157 		bp->current_interval = BNX2_TIMER_INTERVAL;
6158 
6159 	spin_unlock(&bp->phy_lock);
6160 }
6161 
6162 static void
bnx2_timer(struct timer_list * t)6163 bnx2_timer(struct timer_list *t)
6164 {
6165 	struct bnx2 *bp = from_timer(bp, t, timer);
6166 
6167 	if (!netif_running(bp->dev))
6168 		return;
6169 
6170 	if (atomic_read(&bp->intr_sem) != 0)
6171 		goto bnx2_restart_timer;
6172 
6173 	if ((bp->flags & (BNX2_FLAG_USING_MSI | BNX2_FLAG_ONE_SHOT_MSI)) ==
6174 	     BNX2_FLAG_USING_MSI)
6175 		bnx2_chk_missed_msi(bp);
6176 
6177 	bnx2_send_heart_beat(bp);
6178 
6179 	bp->stats_blk->stat_FwRxDrop =
6180 		bnx2_reg_rd_ind(bp, BNX2_FW_RX_DROP_COUNT);
6181 
6182 	/* workaround occasional corrupted counters */
6183 	if ((bp->flags & BNX2_FLAG_BROKEN_STATS) && bp->stats_ticks)
6184 		BNX2_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd |
6185 			BNX2_HC_COMMAND_STATS_NOW);
6186 
6187 	if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
6188 		if (BNX2_CHIP(bp) == BNX2_CHIP_5706)
6189 			bnx2_5706_serdes_timer(bp);
6190 		else
6191 			bnx2_5708_serdes_timer(bp);
6192 	}
6193 
6194 bnx2_restart_timer:
6195 	mod_timer(&bp->timer, jiffies + bp->current_interval);
6196 }
6197 
6198 static int
bnx2_request_irq(struct bnx2 * bp)6199 bnx2_request_irq(struct bnx2 *bp)
6200 {
6201 	unsigned long flags;
6202 	struct bnx2_irq *irq;
6203 	int rc = 0, i;
6204 
6205 	if (bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)
6206 		flags = 0;
6207 	else
6208 		flags = IRQF_SHARED;
6209 
6210 	for (i = 0; i < bp->irq_nvecs; i++) {
6211 		irq = &bp->irq_tbl[i];
6212 		rc = request_irq(irq->vector, irq->handler, flags, irq->name,
6213 				 &bp->bnx2_napi[i]);
6214 		if (rc)
6215 			break;
6216 		irq->requested = 1;
6217 	}
6218 	return rc;
6219 }
6220 
6221 static void
__bnx2_free_irq(struct bnx2 * bp)6222 __bnx2_free_irq(struct bnx2 *bp)
6223 {
6224 	struct bnx2_irq *irq;
6225 	int i;
6226 
6227 	for (i = 0; i < bp->irq_nvecs; i++) {
6228 		irq = &bp->irq_tbl[i];
6229 		if (irq->requested)
6230 			free_irq(irq->vector, &bp->bnx2_napi[i]);
6231 		irq->requested = 0;
6232 	}
6233 }
6234 
6235 static void
bnx2_free_irq(struct bnx2 * bp)6236 bnx2_free_irq(struct bnx2 *bp)
6237 {
6238 
6239 	__bnx2_free_irq(bp);
6240 	if (bp->flags & BNX2_FLAG_USING_MSI)
6241 		pci_disable_msi(bp->pdev);
6242 	else if (bp->flags & BNX2_FLAG_USING_MSIX)
6243 		pci_disable_msix(bp->pdev);
6244 
6245 	bp->flags &= ~(BNX2_FLAG_USING_MSI_OR_MSIX | BNX2_FLAG_ONE_SHOT_MSI);
6246 }
6247 
6248 static void
bnx2_enable_msix(struct bnx2 * bp,int msix_vecs)6249 bnx2_enable_msix(struct bnx2 *bp, int msix_vecs)
6250 {
6251 	int i, total_vecs;
6252 	struct msix_entry msix_ent[BNX2_MAX_MSIX_VEC];
6253 	struct net_device *dev = bp->dev;
6254 	const int len = sizeof(bp->irq_tbl[0].name);
6255 
6256 	bnx2_setup_msix_tbl(bp);
6257 	BNX2_WR(bp, BNX2_PCI_MSIX_CONTROL, BNX2_MAX_MSIX_HW_VEC - 1);
6258 	BNX2_WR(bp, BNX2_PCI_MSIX_TBL_OFF_BIR, BNX2_PCI_GRC_WINDOW2_BASE);
6259 	BNX2_WR(bp, BNX2_PCI_MSIX_PBA_OFF_BIT, BNX2_PCI_GRC_WINDOW3_BASE);
6260 
6261 	/*  Need to flush the previous three writes to ensure MSI-X
6262 	 *  is setup properly */
6263 	BNX2_RD(bp, BNX2_PCI_MSIX_CONTROL);
6264 
6265 	for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
6266 		msix_ent[i].entry = i;
6267 		msix_ent[i].vector = 0;
6268 	}
6269 
6270 	total_vecs = msix_vecs;
6271 #ifdef BCM_CNIC
6272 	total_vecs++;
6273 #endif
6274 	total_vecs = pci_enable_msix_range(bp->pdev, msix_ent,
6275 					   BNX2_MIN_MSIX_VEC, total_vecs);
6276 	if (total_vecs < 0)
6277 		return;
6278 
6279 	msix_vecs = total_vecs;
6280 #ifdef BCM_CNIC
6281 	msix_vecs--;
6282 #endif
6283 	bp->irq_nvecs = msix_vecs;
6284 	bp->flags |= BNX2_FLAG_USING_MSIX | BNX2_FLAG_ONE_SHOT_MSI;
6285 	for (i = 0; i < total_vecs; i++) {
6286 		bp->irq_tbl[i].vector = msix_ent[i].vector;
6287 		snprintf(bp->irq_tbl[i].name, len, "%s-%d", dev->name, i);
6288 		bp->irq_tbl[i].handler = bnx2_msi_1shot;
6289 	}
6290 }
6291 
6292 static int
bnx2_setup_int_mode(struct bnx2 * bp,int dis_msi)6293 bnx2_setup_int_mode(struct bnx2 *bp, int dis_msi)
6294 {
6295 	int cpus = netif_get_num_default_rss_queues();
6296 	int msix_vecs;
6297 
6298 	if (!bp->num_req_rx_rings)
6299 		msix_vecs = max(cpus + 1, bp->num_req_tx_rings);
6300 	else if (!bp->num_req_tx_rings)
6301 		msix_vecs = max(cpus, bp->num_req_rx_rings);
6302 	else
6303 		msix_vecs = max(bp->num_req_rx_rings, bp->num_req_tx_rings);
6304 
6305 	msix_vecs = min(msix_vecs, RX_MAX_RINGS);
6306 
6307 	bp->irq_tbl[0].handler = bnx2_interrupt;
6308 	strcpy(bp->irq_tbl[0].name, bp->dev->name);
6309 	bp->irq_nvecs = 1;
6310 	bp->irq_tbl[0].vector = bp->pdev->irq;
6311 
6312 	if ((bp->flags & BNX2_FLAG_MSIX_CAP) && !dis_msi)
6313 		bnx2_enable_msix(bp, msix_vecs);
6314 
6315 	if ((bp->flags & BNX2_FLAG_MSI_CAP) && !dis_msi &&
6316 	    !(bp->flags & BNX2_FLAG_USING_MSIX)) {
6317 		if (pci_enable_msi(bp->pdev) == 0) {
6318 			bp->flags |= BNX2_FLAG_USING_MSI;
6319 			if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
6320 				bp->flags |= BNX2_FLAG_ONE_SHOT_MSI;
6321 				bp->irq_tbl[0].handler = bnx2_msi_1shot;
6322 			} else
6323 				bp->irq_tbl[0].handler = bnx2_msi;
6324 
6325 			bp->irq_tbl[0].vector = bp->pdev->irq;
6326 		}
6327 	}
6328 
6329 	if (!bp->num_req_tx_rings)
6330 		bp->num_tx_rings = rounddown_pow_of_two(bp->irq_nvecs);
6331 	else
6332 		bp->num_tx_rings = min(bp->irq_nvecs, bp->num_req_tx_rings);
6333 
6334 	if (!bp->num_req_rx_rings)
6335 		bp->num_rx_rings = bp->irq_nvecs;
6336 	else
6337 		bp->num_rx_rings = min(bp->irq_nvecs, bp->num_req_rx_rings);
6338 
6339 	netif_set_real_num_tx_queues(bp->dev, bp->num_tx_rings);
6340 
6341 	return netif_set_real_num_rx_queues(bp->dev, bp->num_rx_rings);
6342 }
6343 
6344 /* Called with rtnl_lock */
6345 static int
bnx2_open(struct net_device * dev)6346 bnx2_open(struct net_device *dev)
6347 {
6348 	struct bnx2 *bp = netdev_priv(dev);
6349 	int rc;
6350 
6351 	rc = bnx2_request_firmware(bp);
6352 	if (rc < 0)
6353 		goto out;
6354 
6355 	netif_carrier_off(dev);
6356 
6357 	bnx2_disable_int(bp);
6358 
6359 	rc = bnx2_setup_int_mode(bp, disable_msi);
6360 	if (rc)
6361 		goto open_err;
6362 	bnx2_init_napi(bp);
6363 	bnx2_napi_enable(bp);
6364 	rc = bnx2_alloc_mem(bp);
6365 	if (rc)
6366 		goto open_err;
6367 
6368 	rc = bnx2_request_irq(bp);
6369 	if (rc)
6370 		goto open_err;
6371 
6372 	rc = bnx2_init_nic(bp, 1);
6373 	if (rc)
6374 		goto open_err;
6375 
6376 	mod_timer(&bp->timer, jiffies + bp->current_interval);
6377 
6378 	atomic_set(&bp->intr_sem, 0);
6379 
6380 	memset(bp->temp_stats_blk, 0, sizeof(struct statistics_block));
6381 
6382 	bnx2_enable_int(bp);
6383 
6384 	if (bp->flags & BNX2_FLAG_USING_MSI) {
6385 		/* Test MSI to make sure it is working
6386 		 * If MSI test fails, go back to INTx mode
6387 		 */
6388 		if (bnx2_test_intr(bp) != 0) {
6389 			netdev_warn(bp->dev, "No interrupt was generated using MSI, switching to INTx mode. Please report this failure to the PCI maintainer and include system chipset information.\n");
6390 
6391 			bnx2_disable_int(bp);
6392 			bnx2_free_irq(bp);
6393 
6394 			bnx2_setup_int_mode(bp, 1);
6395 
6396 			rc = bnx2_init_nic(bp, 0);
6397 
6398 			if (!rc)
6399 				rc = bnx2_request_irq(bp);
6400 
6401 			if (rc) {
6402 				del_timer_sync(&bp->timer);
6403 				goto open_err;
6404 			}
6405 			bnx2_enable_int(bp);
6406 		}
6407 	}
6408 	if (bp->flags & BNX2_FLAG_USING_MSI)
6409 		netdev_info(dev, "using MSI\n");
6410 	else if (bp->flags & BNX2_FLAG_USING_MSIX)
6411 		netdev_info(dev, "using MSIX\n");
6412 
6413 	netif_tx_start_all_queues(dev);
6414 out:
6415 	return rc;
6416 
6417 open_err:
6418 	bnx2_napi_disable(bp);
6419 	bnx2_free_skbs(bp);
6420 	bnx2_free_irq(bp);
6421 	bnx2_free_mem(bp);
6422 	bnx2_del_napi(bp);
6423 	bnx2_release_firmware(bp);
6424 	goto out;
6425 }
6426 
6427 static void
bnx2_reset_task(struct work_struct * work)6428 bnx2_reset_task(struct work_struct *work)
6429 {
6430 	struct bnx2 *bp = container_of(work, struct bnx2, reset_task);
6431 	int rc;
6432 	u16 pcicmd;
6433 
6434 	rtnl_lock();
6435 	if (!netif_running(bp->dev)) {
6436 		rtnl_unlock();
6437 		return;
6438 	}
6439 
6440 	bnx2_netif_stop(bp, true);
6441 
6442 	pci_read_config_word(bp->pdev, PCI_COMMAND, &pcicmd);
6443 	if (!(pcicmd & PCI_COMMAND_MEMORY)) {
6444 		/* in case PCI block has reset */
6445 		pci_restore_state(bp->pdev);
6446 		pci_save_state(bp->pdev);
6447 	}
6448 	rc = bnx2_init_nic(bp, 1);
6449 	if (rc) {
6450 		netdev_err(bp->dev, "failed to reset NIC, closing\n");
6451 		bnx2_napi_enable(bp);
6452 		dev_close(bp->dev);
6453 		rtnl_unlock();
6454 		return;
6455 	}
6456 
6457 	atomic_set(&bp->intr_sem, 1);
6458 	bnx2_netif_start(bp, true);
6459 	rtnl_unlock();
6460 }
6461 
6462 #define BNX2_FTQ_ENTRY(ftq) { __stringify(ftq##FTQ_CTL), BNX2_##ftq##FTQ_CTL }
6463 
6464 static void
bnx2_dump_ftq(struct bnx2 * bp)6465 bnx2_dump_ftq(struct bnx2 *bp)
6466 {
6467 	int i;
6468 	u32 reg, bdidx, cid, valid;
6469 	struct net_device *dev = bp->dev;
6470 	static const struct ftq_reg {
6471 		char *name;
6472 		u32 off;
6473 	} ftq_arr[] = {
6474 		BNX2_FTQ_ENTRY(RV2P_P),
6475 		BNX2_FTQ_ENTRY(RV2P_T),
6476 		BNX2_FTQ_ENTRY(RV2P_M),
6477 		BNX2_FTQ_ENTRY(TBDR_),
6478 		BNX2_FTQ_ENTRY(TDMA_),
6479 		BNX2_FTQ_ENTRY(TXP_),
6480 		BNX2_FTQ_ENTRY(TXP_),
6481 		BNX2_FTQ_ENTRY(TPAT_),
6482 		BNX2_FTQ_ENTRY(RXP_C),
6483 		BNX2_FTQ_ENTRY(RXP_),
6484 		BNX2_FTQ_ENTRY(COM_COMXQ_),
6485 		BNX2_FTQ_ENTRY(COM_COMTQ_),
6486 		BNX2_FTQ_ENTRY(COM_COMQ_),
6487 		BNX2_FTQ_ENTRY(CP_CPQ_),
6488 	};
6489 
6490 	netdev_err(dev, "<--- start FTQ dump --->\n");
6491 	for (i = 0; i < ARRAY_SIZE(ftq_arr); i++)
6492 		netdev_err(dev, "%s %08x\n", ftq_arr[i].name,
6493 			   bnx2_reg_rd_ind(bp, ftq_arr[i].off));
6494 
6495 	netdev_err(dev, "CPU states:\n");
6496 	for (reg = BNX2_TXP_CPU_MODE; reg <= BNX2_CP_CPU_MODE; reg += 0x40000)
6497 		netdev_err(dev, "%06x mode %x state %x evt_mask %x pc %x pc %x instr %x\n",
6498 			   reg, bnx2_reg_rd_ind(bp, reg),
6499 			   bnx2_reg_rd_ind(bp, reg + 4),
6500 			   bnx2_reg_rd_ind(bp, reg + 8),
6501 			   bnx2_reg_rd_ind(bp, reg + 0x1c),
6502 			   bnx2_reg_rd_ind(bp, reg + 0x1c),
6503 			   bnx2_reg_rd_ind(bp, reg + 0x20));
6504 
6505 	netdev_err(dev, "<--- end FTQ dump --->\n");
6506 	netdev_err(dev, "<--- start TBDC dump --->\n");
6507 	netdev_err(dev, "TBDC free cnt: %ld\n",
6508 		   BNX2_RD(bp, BNX2_TBDC_STATUS) & BNX2_TBDC_STATUS_FREE_CNT);
6509 	netdev_err(dev, "LINE     CID  BIDX   CMD  VALIDS\n");
6510 	for (i = 0; i < 0x20; i++) {
6511 		int j = 0;
6512 
6513 		BNX2_WR(bp, BNX2_TBDC_BD_ADDR, i);
6514 		BNX2_WR(bp, BNX2_TBDC_CAM_OPCODE,
6515 			BNX2_TBDC_CAM_OPCODE_OPCODE_CAM_READ);
6516 		BNX2_WR(bp, BNX2_TBDC_COMMAND, BNX2_TBDC_COMMAND_CMD_REG_ARB);
6517 		while ((BNX2_RD(bp, BNX2_TBDC_COMMAND) &
6518 			BNX2_TBDC_COMMAND_CMD_REG_ARB) && j < 100)
6519 			j++;
6520 
6521 		cid = BNX2_RD(bp, BNX2_TBDC_CID);
6522 		bdidx = BNX2_RD(bp, BNX2_TBDC_BIDX);
6523 		valid = BNX2_RD(bp, BNX2_TBDC_CAM_OPCODE);
6524 		netdev_err(dev, "%02x    %06x  %04lx   %02x    [%x]\n",
6525 			   i, cid, bdidx & BNX2_TBDC_BDIDX_BDIDX,
6526 			   bdidx >> 24, (valid >> 8) & 0x0ff);
6527 	}
6528 	netdev_err(dev, "<--- end TBDC dump --->\n");
6529 }
6530 
6531 static void
bnx2_dump_state(struct bnx2 * bp)6532 bnx2_dump_state(struct bnx2 *bp)
6533 {
6534 	struct net_device *dev = bp->dev;
6535 	u32 val1, val2;
6536 
6537 	pci_read_config_dword(bp->pdev, PCI_COMMAND, &val1);
6538 	netdev_err(dev, "DEBUG: intr_sem[%x] PCI_CMD[%08x]\n",
6539 		   atomic_read(&bp->intr_sem), val1);
6540 	pci_read_config_dword(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &val1);
6541 	pci_read_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG, &val2);
6542 	netdev_err(dev, "DEBUG: PCI_PM[%08x] PCI_MISC_CFG[%08x]\n", val1, val2);
6543 	netdev_err(dev, "DEBUG: EMAC_TX_STATUS[%08x] EMAC_RX_STATUS[%08x]\n",
6544 		   BNX2_RD(bp, BNX2_EMAC_TX_STATUS),
6545 		   BNX2_RD(bp, BNX2_EMAC_RX_STATUS));
6546 	netdev_err(dev, "DEBUG: RPM_MGMT_PKT_CTRL[%08x]\n",
6547 		   BNX2_RD(bp, BNX2_RPM_MGMT_PKT_CTRL));
6548 	netdev_err(dev, "DEBUG: HC_STATS_INTERRUPT_STATUS[%08x]\n",
6549 		   BNX2_RD(bp, BNX2_HC_STATS_INTERRUPT_STATUS));
6550 	if (bp->flags & BNX2_FLAG_USING_MSIX)
6551 		netdev_err(dev, "DEBUG: PBA[%08x]\n",
6552 			   BNX2_RD(bp, BNX2_PCI_GRC_WINDOW3_BASE));
6553 }
6554 
6555 static void
bnx2_tx_timeout(struct net_device * dev,unsigned int txqueue)6556 bnx2_tx_timeout(struct net_device *dev, unsigned int txqueue)
6557 {
6558 	struct bnx2 *bp = netdev_priv(dev);
6559 
6560 	bnx2_dump_ftq(bp);
6561 	bnx2_dump_state(bp);
6562 	bnx2_dump_mcp_state(bp);
6563 
6564 	/* This allows the netif to be shutdown gracefully before resetting */
6565 	schedule_work(&bp->reset_task);
6566 }
6567 
6568 /* Called with netif_tx_lock.
6569  * bnx2_tx_int() runs without netif_tx_lock unless it needs to call
6570  * netif_wake_queue().
6571  */
6572 static netdev_tx_t
bnx2_start_xmit(struct sk_buff * skb,struct net_device * dev)6573 bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
6574 {
6575 	struct bnx2 *bp = netdev_priv(dev);
6576 	dma_addr_t mapping;
6577 	struct bnx2_tx_bd *txbd;
6578 	struct bnx2_sw_tx_bd *tx_buf;
6579 	u32 len, vlan_tag_flags, last_frag, mss;
6580 	u16 prod, ring_prod;
6581 	int i;
6582 	struct bnx2_napi *bnapi;
6583 	struct bnx2_tx_ring_info *txr;
6584 	struct netdev_queue *txq;
6585 
6586 	/*  Determine which tx ring we will be placed on */
6587 	i = skb_get_queue_mapping(skb);
6588 	bnapi = &bp->bnx2_napi[i];
6589 	txr = &bnapi->tx_ring;
6590 	txq = netdev_get_tx_queue(dev, i);
6591 
6592 	if (unlikely(bnx2_tx_avail(bp, txr) <
6593 	    (skb_shinfo(skb)->nr_frags + 1))) {
6594 		netif_tx_stop_queue(txq);
6595 		netdev_err(dev, "BUG! Tx ring full when queue awake!\n");
6596 
6597 		return NETDEV_TX_BUSY;
6598 	}
6599 	len = skb_headlen(skb);
6600 	prod = txr->tx_prod;
6601 	ring_prod = BNX2_TX_RING_IDX(prod);
6602 
6603 	vlan_tag_flags = 0;
6604 	if (skb->ip_summed == CHECKSUM_PARTIAL) {
6605 		vlan_tag_flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
6606 	}
6607 
6608 	if (skb_vlan_tag_present(skb)) {
6609 		vlan_tag_flags |=
6610 			(TX_BD_FLAGS_VLAN_TAG | (skb_vlan_tag_get(skb) << 16));
6611 	}
6612 
6613 	if ((mss = skb_shinfo(skb)->gso_size)) {
6614 		u32 tcp_opt_len;
6615 		struct iphdr *iph;
6616 
6617 		vlan_tag_flags |= TX_BD_FLAGS_SW_LSO;
6618 
6619 		tcp_opt_len = tcp_optlen(skb);
6620 
6621 		if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) {
6622 			u32 tcp_off = skb_transport_offset(skb) -
6623 				      sizeof(struct ipv6hdr) - ETH_HLEN;
6624 
6625 			vlan_tag_flags |= ((tcp_opt_len >> 2) << 8) |
6626 					  TX_BD_FLAGS_SW_FLAGS;
6627 			if (likely(tcp_off == 0))
6628 				vlan_tag_flags &= ~TX_BD_FLAGS_TCP6_OFF0_MSK;
6629 			else {
6630 				tcp_off >>= 3;
6631 				vlan_tag_flags |= ((tcp_off & 0x3) <<
6632 						   TX_BD_FLAGS_TCP6_OFF0_SHL) |
6633 						  ((tcp_off & 0x10) <<
6634 						   TX_BD_FLAGS_TCP6_OFF4_SHL);
6635 				mss |= (tcp_off & 0xc) << TX_BD_TCP6_OFF2_SHL;
6636 			}
6637 		} else {
6638 			iph = ip_hdr(skb);
6639 			if (tcp_opt_len || (iph->ihl > 5)) {
6640 				vlan_tag_flags |= ((iph->ihl - 5) +
6641 						   (tcp_opt_len >> 2)) << 8;
6642 			}
6643 		}
6644 	} else
6645 		mss = 0;
6646 
6647 	mapping = dma_map_single(&bp->pdev->dev, skb->data, len,
6648 				 DMA_TO_DEVICE);
6649 	if (dma_mapping_error(&bp->pdev->dev, mapping)) {
6650 		dev_kfree_skb_any(skb);
6651 		return NETDEV_TX_OK;
6652 	}
6653 
6654 	tx_buf = &txr->tx_buf_ring[ring_prod];
6655 	tx_buf->skb = skb;
6656 	dma_unmap_addr_set(tx_buf, mapping, mapping);
6657 
6658 	txbd = &txr->tx_desc_ring[ring_prod];
6659 
6660 	txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
6661 	txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
6662 	txbd->tx_bd_mss_nbytes = len | (mss << 16);
6663 	txbd->tx_bd_vlan_tag_flags = vlan_tag_flags | TX_BD_FLAGS_START;
6664 
6665 	last_frag = skb_shinfo(skb)->nr_frags;
6666 	tx_buf->nr_frags = last_frag;
6667 	tx_buf->is_gso = skb_is_gso(skb);
6668 
6669 	for (i = 0; i < last_frag; i++) {
6670 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
6671 
6672 		prod = BNX2_NEXT_TX_BD(prod);
6673 		ring_prod = BNX2_TX_RING_IDX(prod);
6674 		txbd = &txr->tx_desc_ring[ring_prod];
6675 
6676 		len = skb_frag_size(frag);
6677 		mapping = skb_frag_dma_map(&bp->pdev->dev, frag, 0, len,
6678 					   DMA_TO_DEVICE);
6679 		if (dma_mapping_error(&bp->pdev->dev, mapping))
6680 			goto dma_error;
6681 		dma_unmap_addr_set(&txr->tx_buf_ring[ring_prod], mapping,
6682 				   mapping);
6683 
6684 		txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
6685 		txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
6686 		txbd->tx_bd_mss_nbytes = len | (mss << 16);
6687 		txbd->tx_bd_vlan_tag_flags = vlan_tag_flags;
6688 
6689 	}
6690 	txbd->tx_bd_vlan_tag_flags |= TX_BD_FLAGS_END;
6691 
6692 	/* Sync BD data before updating TX mailbox */
6693 	wmb();
6694 
6695 	netdev_tx_sent_queue(txq, skb->len);
6696 
6697 	prod = BNX2_NEXT_TX_BD(prod);
6698 	txr->tx_prod_bseq += skb->len;
6699 
6700 	BNX2_WR16(bp, txr->tx_bidx_addr, prod);
6701 	BNX2_WR(bp, txr->tx_bseq_addr, txr->tx_prod_bseq);
6702 
6703 	txr->tx_prod = prod;
6704 
6705 	if (unlikely(bnx2_tx_avail(bp, txr) <= MAX_SKB_FRAGS)) {
6706 		netif_tx_stop_queue(txq);
6707 
6708 		/* netif_tx_stop_queue() must be done before checking
6709 		 * tx index in bnx2_tx_avail() below, because in
6710 		 * bnx2_tx_int(), we update tx index before checking for
6711 		 * netif_tx_queue_stopped().
6712 		 */
6713 		smp_mb();
6714 		if (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh)
6715 			netif_tx_wake_queue(txq);
6716 	}
6717 
6718 	return NETDEV_TX_OK;
6719 dma_error:
6720 	/* save value of frag that failed */
6721 	last_frag = i;
6722 
6723 	/* start back at beginning and unmap skb */
6724 	prod = txr->tx_prod;
6725 	ring_prod = BNX2_TX_RING_IDX(prod);
6726 	tx_buf = &txr->tx_buf_ring[ring_prod];
6727 	tx_buf->skb = NULL;
6728 	dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(tx_buf, mapping),
6729 			 skb_headlen(skb), DMA_TO_DEVICE);
6730 
6731 	/* unmap remaining mapped pages */
6732 	for (i = 0; i < last_frag; i++) {
6733 		prod = BNX2_NEXT_TX_BD(prod);
6734 		ring_prod = BNX2_TX_RING_IDX(prod);
6735 		tx_buf = &txr->tx_buf_ring[ring_prod];
6736 		dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(tx_buf, mapping),
6737 			       skb_frag_size(&skb_shinfo(skb)->frags[i]),
6738 			       DMA_TO_DEVICE);
6739 	}
6740 
6741 	dev_kfree_skb_any(skb);
6742 	return NETDEV_TX_OK;
6743 }
6744 
6745 /* Called with rtnl_lock */
6746 static int
bnx2_close(struct net_device * dev)6747 bnx2_close(struct net_device *dev)
6748 {
6749 	struct bnx2 *bp = netdev_priv(dev);
6750 
6751 	bnx2_disable_int_sync(bp);
6752 	bnx2_napi_disable(bp);
6753 	netif_tx_disable(dev);
6754 	del_timer_sync(&bp->timer);
6755 	bnx2_shutdown_chip(bp);
6756 	bnx2_free_irq(bp);
6757 	bnx2_free_skbs(bp);
6758 	bnx2_free_mem(bp);
6759 	bnx2_del_napi(bp);
6760 	bp->link_up = 0;
6761 	netif_carrier_off(bp->dev);
6762 	return 0;
6763 }
6764 
6765 static void
bnx2_save_stats(struct bnx2 * bp)6766 bnx2_save_stats(struct bnx2 *bp)
6767 {
6768 	u32 *hw_stats = (u32 *) bp->stats_blk;
6769 	u32 *temp_stats = (u32 *) bp->temp_stats_blk;
6770 	int i;
6771 
6772 	/* The 1st 10 counters are 64-bit counters */
6773 	for (i = 0; i < 20; i += 2) {
6774 		u32 hi;
6775 		u64 lo;
6776 
6777 		hi = temp_stats[i] + hw_stats[i];
6778 		lo = (u64) temp_stats[i + 1] + (u64) hw_stats[i + 1];
6779 		if (lo > 0xffffffff)
6780 			hi++;
6781 		temp_stats[i] = hi;
6782 		temp_stats[i + 1] = lo & 0xffffffff;
6783 	}
6784 
6785 	for ( ; i < sizeof(struct statistics_block) / 4; i++)
6786 		temp_stats[i] += hw_stats[i];
6787 }
6788 
6789 #define GET_64BIT_NET_STATS64(ctr)		\
6790 	(((u64) (ctr##_hi) << 32) + (u64) (ctr##_lo))
6791 
6792 #define GET_64BIT_NET_STATS(ctr)				\
6793 	GET_64BIT_NET_STATS64(bp->stats_blk->ctr) +		\
6794 	GET_64BIT_NET_STATS64(bp->temp_stats_blk->ctr)
6795 
6796 #define GET_32BIT_NET_STATS(ctr)				\
6797 	(unsigned long) (bp->stats_blk->ctr +			\
6798 			 bp->temp_stats_blk->ctr)
6799 
6800 static void
bnx2_get_stats64(struct net_device * dev,struct rtnl_link_stats64 * net_stats)6801 bnx2_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *net_stats)
6802 {
6803 	struct bnx2 *bp = netdev_priv(dev);
6804 
6805 	if (!bp->stats_blk)
6806 		return;
6807 
6808 	net_stats->rx_packets =
6809 		GET_64BIT_NET_STATS(stat_IfHCInUcastPkts) +
6810 		GET_64BIT_NET_STATS(stat_IfHCInMulticastPkts) +
6811 		GET_64BIT_NET_STATS(stat_IfHCInBroadcastPkts);
6812 
6813 	net_stats->tx_packets =
6814 		GET_64BIT_NET_STATS(stat_IfHCOutUcastPkts) +
6815 		GET_64BIT_NET_STATS(stat_IfHCOutMulticastPkts) +
6816 		GET_64BIT_NET_STATS(stat_IfHCOutBroadcastPkts);
6817 
6818 	net_stats->rx_bytes =
6819 		GET_64BIT_NET_STATS(stat_IfHCInOctets);
6820 
6821 	net_stats->tx_bytes =
6822 		GET_64BIT_NET_STATS(stat_IfHCOutOctets);
6823 
6824 	net_stats->multicast =
6825 		GET_64BIT_NET_STATS(stat_IfHCInMulticastPkts);
6826 
6827 	net_stats->collisions =
6828 		GET_32BIT_NET_STATS(stat_EtherStatsCollisions);
6829 
6830 	net_stats->rx_length_errors =
6831 		GET_32BIT_NET_STATS(stat_EtherStatsUndersizePkts) +
6832 		GET_32BIT_NET_STATS(stat_EtherStatsOverrsizePkts);
6833 
6834 	net_stats->rx_over_errors =
6835 		GET_32BIT_NET_STATS(stat_IfInFTQDiscards) +
6836 		GET_32BIT_NET_STATS(stat_IfInMBUFDiscards);
6837 
6838 	net_stats->rx_frame_errors =
6839 		GET_32BIT_NET_STATS(stat_Dot3StatsAlignmentErrors);
6840 
6841 	net_stats->rx_crc_errors =
6842 		GET_32BIT_NET_STATS(stat_Dot3StatsFCSErrors);
6843 
6844 	net_stats->rx_errors = net_stats->rx_length_errors +
6845 		net_stats->rx_over_errors + net_stats->rx_frame_errors +
6846 		net_stats->rx_crc_errors;
6847 
6848 	net_stats->tx_aborted_errors =
6849 		GET_32BIT_NET_STATS(stat_Dot3StatsExcessiveCollisions) +
6850 		GET_32BIT_NET_STATS(stat_Dot3StatsLateCollisions);
6851 
6852 	if ((BNX2_CHIP(bp) == BNX2_CHIP_5706) ||
6853 	    (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_A0))
6854 		net_stats->tx_carrier_errors = 0;
6855 	else {
6856 		net_stats->tx_carrier_errors =
6857 			GET_32BIT_NET_STATS(stat_Dot3StatsCarrierSenseErrors);
6858 	}
6859 
6860 	net_stats->tx_errors =
6861 		GET_32BIT_NET_STATS(stat_emac_tx_stat_dot3statsinternalmactransmiterrors) +
6862 		net_stats->tx_aborted_errors +
6863 		net_stats->tx_carrier_errors;
6864 
6865 	net_stats->rx_missed_errors =
6866 		GET_32BIT_NET_STATS(stat_IfInFTQDiscards) +
6867 		GET_32BIT_NET_STATS(stat_IfInMBUFDiscards) +
6868 		GET_32BIT_NET_STATS(stat_FwRxDrop);
6869 
6870 }
6871 
6872 /* All ethtool functions called with rtnl_lock */
6873 
6874 static int
bnx2_get_link_ksettings(struct net_device * dev,struct ethtool_link_ksettings * cmd)6875 bnx2_get_link_ksettings(struct net_device *dev,
6876 			struct ethtool_link_ksettings *cmd)
6877 {
6878 	struct bnx2 *bp = netdev_priv(dev);
6879 	int support_serdes = 0, support_copper = 0;
6880 	u32 supported, advertising;
6881 
6882 	supported = SUPPORTED_Autoneg;
6883 	if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
6884 		support_serdes = 1;
6885 		support_copper = 1;
6886 	} else if (bp->phy_port == PORT_FIBRE)
6887 		support_serdes = 1;
6888 	else
6889 		support_copper = 1;
6890 
6891 	if (support_serdes) {
6892 		supported |= SUPPORTED_1000baseT_Full |
6893 			SUPPORTED_FIBRE;
6894 		if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)
6895 			supported |= SUPPORTED_2500baseX_Full;
6896 	}
6897 	if (support_copper) {
6898 		supported |= SUPPORTED_10baseT_Half |
6899 			SUPPORTED_10baseT_Full |
6900 			SUPPORTED_100baseT_Half |
6901 			SUPPORTED_100baseT_Full |
6902 			SUPPORTED_1000baseT_Full |
6903 			SUPPORTED_TP;
6904 	}
6905 
6906 	spin_lock_bh(&bp->phy_lock);
6907 	cmd->base.port = bp->phy_port;
6908 	advertising = bp->advertising;
6909 
6910 	if (bp->autoneg & AUTONEG_SPEED) {
6911 		cmd->base.autoneg = AUTONEG_ENABLE;
6912 	} else {
6913 		cmd->base.autoneg = AUTONEG_DISABLE;
6914 	}
6915 
6916 	if (netif_carrier_ok(dev)) {
6917 		cmd->base.speed = bp->line_speed;
6918 		cmd->base.duplex = bp->duplex;
6919 		if (!(bp->phy_flags & BNX2_PHY_FLAG_SERDES)) {
6920 			if (bp->phy_flags & BNX2_PHY_FLAG_MDIX)
6921 				cmd->base.eth_tp_mdix = ETH_TP_MDI_X;
6922 			else
6923 				cmd->base.eth_tp_mdix = ETH_TP_MDI;
6924 		}
6925 	}
6926 	else {
6927 		cmd->base.speed = SPEED_UNKNOWN;
6928 		cmd->base.duplex = DUPLEX_UNKNOWN;
6929 	}
6930 	spin_unlock_bh(&bp->phy_lock);
6931 
6932 	cmd->base.phy_address = bp->phy_addr;
6933 
6934 	ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
6935 						supported);
6936 	ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
6937 						advertising);
6938 
6939 	return 0;
6940 }
6941 
6942 static int
bnx2_set_link_ksettings(struct net_device * dev,const struct ethtool_link_ksettings * cmd)6943 bnx2_set_link_ksettings(struct net_device *dev,
6944 			const struct ethtool_link_ksettings *cmd)
6945 {
6946 	struct bnx2 *bp = netdev_priv(dev);
6947 	u8 autoneg = bp->autoneg;
6948 	u8 req_duplex = bp->req_duplex;
6949 	u16 req_line_speed = bp->req_line_speed;
6950 	u32 advertising = bp->advertising;
6951 	int err = -EINVAL;
6952 
6953 	spin_lock_bh(&bp->phy_lock);
6954 
6955 	if (cmd->base.port != PORT_TP && cmd->base.port != PORT_FIBRE)
6956 		goto err_out_unlock;
6957 
6958 	if (cmd->base.port != bp->phy_port &&
6959 	    !(bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP))
6960 		goto err_out_unlock;
6961 
6962 	/* If device is down, we can store the settings only if the user
6963 	 * is setting the currently active port.
6964 	 */
6965 	if (!netif_running(dev) && cmd->base.port != bp->phy_port)
6966 		goto err_out_unlock;
6967 
6968 	if (cmd->base.autoneg == AUTONEG_ENABLE) {
6969 		autoneg |= AUTONEG_SPEED;
6970 
6971 		ethtool_convert_link_mode_to_legacy_u32(
6972 			&advertising, cmd->link_modes.advertising);
6973 
6974 		if (cmd->base.port == PORT_TP) {
6975 			advertising &= ETHTOOL_ALL_COPPER_SPEED;
6976 			if (!advertising)
6977 				advertising = ETHTOOL_ALL_COPPER_SPEED;
6978 		} else {
6979 			advertising &= ETHTOOL_ALL_FIBRE_SPEED;
6980 			if (!advertising)
6981 				advertising = ETHTOOL_ALL_FIBRE_SPEED;
6982 		}
6983 		advertising |= ADVERTISED_Autoneg;
6984 	}
6985 	else {
6986 		u32 speed = cmd->base.speed;
6987 
6988 		if (cmd->base.port == PORT_FIBRE) {
6989 			if ((speed != SPEED_1000 &&
6990 			     speed != SPEED_2500) ||
6991 			    (cmd->base.duplex != DUPLEX_FULL))
6992 				goto err_out_unlock;
6993 
6994 			if (speed == SPEED_2500 &&
6995 			    !(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
6996 				goto err_out_unlock;
6997 		} else if (speed == SPEED_1000 || speed == SPEED_2500)
6998 			goto err_out_unlock;
6999 
7000 		autoneg &= ~AUTONEG_SPEED;
7001 		req_line_speed = speed;
7002 		req_duplex = cmd->base.duplex;
7003 		advertising = 0;
7004 	}
7005 
7006 	bp->autoneg = autoneg;
7007 	bp->advertising = advertising;
7008 	bp->req_line_speed = req_line_speed;
7009 	bp->req_duplex = req_duplex;
7010 
7011 	err = 0;
7012 	/* If device is down, the new settings will be picked up when it is
7013 	 * brought up.
7014 	 */
7015 	if (netif_running(dev))
7016 		err = bnx2_setup_phy(bp, cmd->base.port);
7017 
7018 err_out_unlock:
7019 	spin_unlock_bh(&bp->phy_lock);
7020 
7021 	return err;
7022 }
7023 
7024 static void
bnx2_get_drvinfo(struct net_device * dev,struct ethtool_drvinfo * info)7025 bnx2_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
7026 {
7027 	struct bnx2 *bp = netdev_priv(dev);
7028 
7029 	strscpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
7030 	strscpy(info->bus_info, pci_name(bp->pdev), sizeof(info->bus_info));
7031 	strscpy(info->fw_version, bp->fw_version, sizeof(info->fw_version));
7032 }
7033 
7034 #define BNX2_REGDUMP_LEN		(32 * 1024)
7035 
7036 static int
bnx2_get_regs_len(struct net_device * dev)7037 bnx2_get_regs_len(struct net_device *dev)
7038 {
7039 	return BNX2_REGDUMP_LEN;
7040 }
7041 
7042 static void
bnx2_get_regs(struct net_device * dev,struct ethtool_regs * regs,void * _p)7043 bnx2_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p)
7044 {
7045 	u32 *p = _p, i, offset;
7046 	u8 *orig_p = _p;
7047 	struct bnx2 *bp = netdev_priv(dev);
7048 	static const u32 reg_boundaries[] = {
7049 		0x0000, 0x0098, 0x0400, 0x045c,
7050 		0x0800, 0x0880, 0x0c00, 0x0c10,
7051 		0x0c30, 0x0d08, 0x1000, 0x101c,
7052 		0x1040, 0x1048, 0x1080, 0x10a4,
7053 		0x1400, 0x1490, 0x1498, 0x14f0,
7054 		0x1500, 0x155c, 0x1580, 0x15dc,
7055 		0x1600, 0x1658, 0x1680, 0x16d8,
7056 		0x1800, 0x1820, 0x1840, 0x1854,
7057 		0x1880, 0x1894, 0x1900, 0x1984,
7058 		0x1c00, 0x1c0c, 0x1c40, 0x1c54,
7059 		0x1c80, 0x1c94, 0x1d00, 0x1d84,
7060 		0x2000, 0x2030, 0x23c0, 0x2400,
7061 		0x2800, 0x2820, 0x2830, 0x2850,
7062 		0x2b40, 0x2c10, 0x2fc0, 0x3058,
7063 		0x3c00, 0x3c94, 0x4000, 0x4010,
7064 		0x4080, 0x4090, 0x43c0, 0x4458,
7065 		0x4c00, 0x4c18, 0x4c40, 0x4c54,
7066 		0x4fc0, 0x5010, 0x53c0, 0x5444,
7067 		0x5c00, 0x5c18, 0x5c80, 0x5c90,
7068 		0x5fc0, 0x6000, 0x6400, 0x6428,
7069 		0x6800, 0x6848, 0x684c, 0x6860,
7070 		0x6888, 0x6910, 0x8000
7071 	};
7072 
7073 	regs->version = 0;
7074 
7075 	memset(p, 0, BNX2_REGDUMP_LEN);
7076 
7077 	if (!netif_running(bp->dev))
7078 		return;
7079 
7080 	i = 0;
7081 	offset = reg_boundaries[0];
7082 	p += offset;
7083 	while (offset < BNX2_REGDUMP_LEN) {
7084 		*p++ = BNX2_RD(bp, offset);
7085 		offset += 4;
7086 		if (offset == reg_boundaries[i + 1]) {
7087 			offset = reg_boundaries[i + 2];
7088 			p = (u32 *) (orig_p + offset);
7089 			i += 2;
7090 		}
7091 	}
7092 }
7093 
7094 static void
bnx2_get_wol(struct net_device * dev,struct ethtool_wolinfo * wol)7095 bnx2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7096 {
7097 	struct bnx2 *bp = netdev_priv(dev);
7098 
7099 	if (bp->flags & BNX2_FLAG_NO_WOL) {
7100 		wol->supported = 0;
7101 		wol->wolopts = 0;
7102 	}
7103 	else {
7104 		wol->supported = WAKE_MAGIC;
7105 		if (bp->wol)
7106 			wol->wolopts = WAKE_MAGIC;
7107 		else
7108 			wol->wolopts = 0;
7109 	}
7110 	memset(&wol->sopass, 0, sizeof(wol->sopass));
7111 }
7112 
7113 static int
bnx2_set_wol(struct net_device * dev,struct ethtool_wolinfo * wol)7114 bnx2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7115 {
7116 	struct bnx2 *bp = netdev_priv(dev);
7117 
7118 	if (wol->wolopts & ~WAKE_MAGIC)
7119 		return -EINVAL;
7120 
7121 	if (wol->wolopts & WAKE_MAGIC) {
7122 		if (bp->flags & BNX2_FLAG_NO_WOL)
7123 			return -EINVAL;
7124 
7125 		bp->wol = 1;
7126 	}
7127 	else {
7128 		bp->wol = 0;
7129 	}
7130 
7131 	device_set_wakeup_enable(&bp->pdev->dev, bp->wol);
7132 
7133 	return 0;
7134 }
7135 
7136 static int
bnx2_nway_reset(struct net_device * dev)7137 bnx2_nway_reset(struct net_device *dev)
7138 {
7139 	struct bnx2 *bp = netdev_priv(dev);
7140 	u32 bmcr;
7141 
7142 	if (!netif_running(dev))
7143 		return -EAGAIN;
7144 
7145 	if (!(bp->autoneg & AUTONEG_SPEED)) {
7146 		return -EINVAL;
7147 	}
7148 
7149 	spin_lock_bh(&bp->phy_lock);
7150 
7151 	if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
7152 		int rc;
7153 
7154 		rc = bnx2_setup_remote_phy(bp, bp->phy_port);
7155 		spin_unlock_bh(&bp->phy_lock);
7156 		return rc;
7157 	}
7158 
7159 	/* Force a link down visible on the other side */
7160 	if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
7161 		bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
7162 		spin_unlock_bh(&bp->phy_lock);
7163 
7164 		msleep(20);
7165 
7166 		spin_lock_bh(&bp->phy_lock);
7167 
7168 		bp->current_interval = BNX2_SERDES_AN_TIMEOUT;
7169 		bp->serdes_an_pending = 1;
7170 		mod_timer(&bp->timer, jiffies + bp->current_interval);
7171 	}
7172 
7173 	bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
7174 	bmcr &= ~BMCR_LOOPBACK;
7175 	bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART | BMCR_ANENABLE);
7176 
7177 	spin_unlock_bh(&bp->phy_lock);
7178 
7179 	return 0;
7180 }
7181 
7182 static u32
bnx2_get_link(struct net_device * dev)7183 bnx2_get_link(struct net_device *dev)
7184 {
7185 	struct bnx2 *bp = netdev_priv(dev);
7186 
7187 	return bp->link_up;
7188 }
7189 
7190 static int
bnx2_get_eeprom_len(struct net_device * dev)7191 bnx2_get_eeprom_len(struct net_device *dev)
7192 {
7193 	struct bnx2 *bp = netdev_priv(dev);
7194 
7195 	if (!bp->flash_info)
7196 		return 0;
7197 
7198 	return (int) bp->flash_size;
7199 }
7200 
7201 static int
bnx2_get_eeprom(struct net_device * dev,struct ethtool_eeprom * eeprom,u8 * eebuf)7202 bnx2_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
7203 		u8 *eebuf)
7204 {
7205 	struct bnx2 *bp = netdev_priv(dev);
7206 	int rc;
7207 
7208 	/* parameters already validated in ethtool_get_eeprom */
7209 
7210 	rc = bnx2_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
7211 
7212 	return rc;
7213 }
7214 
7215 static int
bnx2_set_eeprom(struct net_device * dev,struct ethtool_eeprom * eeprom,u8 * eebuf)7216 bnx2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
7217 		u8 *eebuf)
7218 {
7219 	struct bnx2 *bp = netdev_priv(dev);
7220 	int rc;
7221 
7222 	/* parameters already validated in ethtool_set_eeprom */
7223 
7224 	rc = bnx2_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
7225 
7226 	return rc;
7227 }
7228 
bnx2_get_coalesce(struct net_device * dev,struct ethtool_coalesce * coal,struct kernel_ethtool_coalesce * kernel_coal,struct netlink_ext_ack * extack)7229 static int bnx2_get_coalesce(struct net_device *dev,
7230 			     struct ethtool_coalesce *coal,
7231 			     struct kernel_ethtool_coalesce *kernel_coal,
7232 			     struct netlink_ext_ack *extack)
7233 {
7234 	struct bnx2 *bp = netdev_priv(dev);
7235 
7236 	memset(coal, 0, sizeof(struct ethtool_coalesce));
7237 
7238 	coal->rx_coalesce_usecs = bp->rx_ticks;
7239 	coal->rx_max_coalesced_frames = bp->rx_quick_cons_trip;
7240 	coal->rx_coalesce_usecs_irq = bp->rx_ticks_int;
7241 	coal->rx_max_coalesced_frames_irq = bp->rx_quick_cons_trip_int;
7242 
7243 	coal->tx_coalesce_usecs = bp->tx_ticks;
7244 	coal->tx_max_coalesced_frames = bp->tx_quick_cons_trip;
7245 	coal->tx_coalesce_usecs_irq = bp->tx_ticks_int;
7246 	coal->tx_max_coalesced_frames_irq = bp->tx_quick_cons_trip_int;
7247 
7248 	coal->stats_block_coalesce_usecs = bp->stats_ticks;
7249 
7250 	return 0;
7251 }
7252 
bnx2_set_coalesce(struct net_device * dev,struct ethtool_coalesce * coal,struct kernel_ethtool_coalesce * kernel_coal,struct netlink_ext_ack * extack)7253 static int bnx2_set_coalesce(struct net_device *dev,
7254 			     struct ethtool_coalesce *coal,
7255 			     struct kernel_ethtool_coalesce *kernel_coal,
7256 			     struct netlink_ext_ack *extack)
7257 {
7258 	struct bnx2 *bp = netdev_priv(dev);
7259 
7260 	bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
7261 	if (bp->rx_ticks > 0x3ff) bp->rx_ticks = 0x3ff;
7262 
7263 	bp->rx_quick_cons_trip = (u16) coal->rx_max_coalesced_frames;
7264 	if (bp->rx_quick_cons_trip > 0xff) bp->rx_quick_cons_trip = 0xff;
7265 
7266 	bp->rx_ticks_int = (u16) coal->rx_coalesce_usecs_irq;
7267 	if (bp->rx_ticks_int > 0x3ff) bp->rx_ticks_int = 0x3ff;
7268 
7269 	bp->rx_quick_cons_trip_int = (u16) coal->rx_max_coalesced_frames_irq;
7270 	if (bp->rx_quick_cons_trip_int > 0xff)
7271 		bp->rx_quick_cons_trip_int = 0xff;
7272 
7273 	bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
7274 	if (bp->tx_ticks > 0x3ff) bp->tx_ticks = 0x3ff;
7275 
7276 	bp->tx_quick_cons_trip = (u16) coal->tx_max_coalesced_frames;
7277 	if (bp->tx_quick_cons_trip > 0xff) bp->tx_quick_cons_trip = 0xff;
7278 
7279 	bp->tx_ticks_int = (u16) coal->tx_coalesce_usecs_irq;
7280 	if (bp->tx_ticks_int > 0x3ff) bp->tx_ticks_int = 0x3ff;
7281 
7282 	bp->tx_quick_cons_trip_int = (u16) coal->tx_max_coalesced_frames_irq;
7283 	if (bp->tx_quick_cons_trip_int > 0xff) bp->tx_quick_cons_trip_int =
7284 		0xff;
7285 
7286 	bp->stats_ticks = coal->stats_block_coalesce_usecs;
7287 	if (bp->flags & BNX2_FLAG_BROKEN_STATS) {
7288 		if (bp->stats_ticks != 0 && bp->stats_ticks != USEC_PER_SEC)
7289 			bp->stats_ticks = USEC_PER_SEC;
7290 	}
7291 	if (bp->stats_ticks > BNX2_HC_STATS_TICKS_HC_STAT_TICKS)
7292 		bp->stats_ticks = BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
7293 	bp->stats_ticks &= BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
7294 
7295 	if (netif_running(bp->dev)) {
7296 		bnx2_netif_stop(bp, true);
7297 		bnx2_init_nic(bp, 0);
7298 		bnx2_netif_start(bp, true);
7299 	}
7300 
7301 	return 0;
7302 }
7303 
7304 static void
bnx2_get_ringparam(struct net_device * dev,struct ethtool_ringparam * ering,struct kernel_ethtool_ringparam * kernel_ering,struct netlink_ext_ack * extack)7305 bnx2_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering,
7306 		   struct kernel_ethtool_ringparam *kernel_ering,
7307 		   struct netlink_ext_ack *extack)
7308 {
7309 	struct bnx2 *bp = netdev_priv(dev);
7310 
7311 	ering->rx_max_pending = BNX2_MAX_TOTAL_RX_DESC_CNT;
7312 	ering->rx_jumbo_max_pending = BNX2_MAX_TOTAL_RX_PG_DESC_CNT;
7313 
7314 	ering->rx_pending = bp->rx_ring_size;
7315 	ering->rx_jumbo_pending = bp->rx_pg_ring_size;
7316 
7317 	ering->tx_max_pending = BNX2_MAX_TX_DESC_CNT;
7318 	ering->tx_pending = bp->tx_ring_size;
7319 }
7320 
7321 static int
bnx2_change_ring_size(struct bnx2 * bp,u32 rx,u32 tx,bool reset_irq)7322 bnx2_change_ring_size(struct bnx2 *bp, u32 rx, u32 tx, bool reset_irq)
7323 {
7324 	if (netif_running(bp->dev)) {
7325 		/* Reset will erase chipset stats; save them */
7326 		bnx2_save_stats(bp);
7327 
7328 		bnx2_netif_stop(bp, true);
7329 		bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
7330 		if (reset_irq) {
7331 			bnx2_free_irq(bp);
7332 			bnx2_del_napi(bp);
7333 		} else {
7334 			__bnx2_free_irq(bp);
7335 		}
7336 		bnx2_free_skbs(bp);
7337 		bnx2_free_mem(bp);
7338 	}
7339 
7340 	bnx2_set_rx_ring_size(bp, rx);
7341 	bp->tx_ring_size = tx;
7342 
7343 	if (netif_running(bp->dev)) {
7344 		int rc = 0;
7345 
7346 		if (reset_irq) {
7347 			rc = bnx2_setup_int_mode(bp, disable_msi);
7348 			bnx2_init_napi(bp);
7349 		}
7350 
7351 		if (!rc)
7352 			rc = bnx2_alloc_mem(bp);
7353 
7354 		if (!rc)
7355 			rc = bnx2_request_irq(bp);
7356 
7357 		if (!rc)
7358 			rc = bnx2_init_nic(bp, 0);
7359 
7360 		if (rc) {
7361 			bnx2_napi_enable(bp);
7362 			dev_close(bp->dev);
7363 			return rc;
7364 		}
7365 #ifdef BCM_CNIC
7366 		mutex_lock(&bp->cnic_lock);
7367 		/* Let cnic know about the new status block. */
7368 		if (bp->cnic_eth_dev.drv_state & CNIC_DRV_STATE_REGD)
7369 			bnx2_setup_cnic_irq_info(bp);
7370 		mutex_unlock(&bp->cnic_lock);
7371 #endif
7372 		bnx2_netif_start(bp, true);
7373 	}
7374 	return 0;
7375 }
7376 
7377 static int
bnx2_set_ringparam(struct net_device * dev,struct ethtool_ringparam * ering,struct kernel_ethtool_ringparam * kernel_ering,struct netlink_ext_ack * extack)7378 bnx2_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering,
7379 		   struct kernel_ethtool_ringparam *kernel_ering,
7380 		   struct netlink_ext_ack *extack)
7381 {
7382 	struct bnx2 *bp = netdev_priv(dev);
7383 	int rc;
7384 
7385 	if ((ering->rx_pending > BNX2_MAX_TOTAL_RX_DESC_CNT) ||
7386 		(ering->tx_pending > BNX2_MAX_TX_DESC_CNT) ||
7387 		(ering->tx_pending <= MAX_SKB_FRAGS)) {
7388 
7389 		return -EINVAL;
7390 	}
7391 	rc = bnx2_change_ring_size(bp, ering->rx_pending, ering->tx_pending,
7392 				   false);
7393 	return rc;
7394 }
7395 
7396 static void
bnx2_get_pauseparam(struct net_device * dev,struct ethtool_pauseparam * epause)7397 bnx2_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
7398 {
7399 	struct bnx2 *bp = netdev_priv(dev);
7400 
7401 	epause->autoneg = ((bp->autoneg & AUTONEG_FLOW_CTRL) != 0);
7402 	epause->rx_pause = ((bp->flow_ctrl & FLOW_CTRL_RX) != 0);
7403 	epause->tx_pause = ((bp->flow_ctrl & FLOW_CTRL_TX) != 0);
7404 }
7405 
7406 static int
bnx2_set_pauseparam(struct net_device * dev,struct ethtool_pauseparam * epause)7407 bnx2_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
7408 {
7409 	struct bnx2 *bp = netdev_priv(dev);
7410 
7411 	bp->req_flow_ctrl = 0;
7412 	if (epause->rx_pause)
7413 		bp->req_flow_ctrl |= FLOW_CTRL_RX;
7414 	if (epause->tx_pause)
7415 		bp->req_flow_ctrl |= FLOW_CTRL_TX;
7416 
7417 	if (epause->autoneg) {
7418 		bp->autoneg |= AUTONEG_FLOW_CTRL;
7419 	}
7420 	else {
7421 		bp->autoneg &= ~AUTONEG_FLOW_CTRL;
7422 	}
7423 
7424 	if (netif_running(dev)) {
7425 		spin_lock_bh(&bp->phy_lock);
7426 		bnx2_setup_phy(bp, bp->phy_port);
7427 		spin_unlock_bh(&bp->phy_lock);
7428 	}
7429 
7430 	return 0;
7431 }
7432 
7433 static struct {
7434 	char string[ETH_GSTRING_LEN];
7435 } bnx2_stats_str_arr[] = {
7436 	{ "rx_bytes" },
7437 	{ "rx_error_bytes" },
7438 	{ "tx_bytes" },
7439 	{ "tx_error_bytes" },
7440 	{ "rx_ucast_packets" },
7441 	{ "rx_mcast_packets" },
7442 	{ "rx_bcast_packets" },
7443 	{ "tx_ucast_packets" },
7444 	{ "tx_mcast_packets" },
7445 	{ "tx_bcast_packets" },
7446 	{ "tx_mac_errors" },
7447 	{ "tx_carrier_errors" },
7448 	{ "rx_crc_errors" },
7449 	{ "rx_align_errors" },
7450 	{ "tx_single_collisions" },
7451 	{ "tx_multi_collisions" },
7452 	{ "tx_deferred" },
7453 	{ "tx_excess_collisions" },
7454 	{ "tx_late_collisions" },
7455 	{ "tx_total_collisions" },
7456 	{ "rx_fragments" },
7457 	{ "rx_jabbers" },
7458 	{ "rx_undersize_packets" },
7459 	{ "rx_oversize_packets" },
7460 	{ "rx_64_byte_packets" },
7461 	{ "rx_65_to_127_byte_packets" },
7462 	{ "rx_128_to_255_byte_packets" },
7463 	{ "rx_256_to_511_byte_packets" },
7464 	{ "rx_512_to_1023_byte_packets" },
7465 	{ "rx_1024_to_1522_byte_packets" },
7466 	{ "rx_1523_to_9022_byte_packets" },
7467 	{ "tx_64_byte_packets" },
7468 	{ "tx_65_to_127_byte_packets" },
7469 	{ "tx_128_to_255_byte_packets" },
7470 	{ "tx_256_to_511_byte_packets" },
7471 	{ "tx_512_to_1023_byte_packets" },
7472 	{ "tx_1024_to_1522_byte_packets" },
7473 	{ "tx_1523_to_9022_byte_packets" },
7474 	{ "rx_xon_frames" },
7475 	{ "rx_xoff_frames" },
7476 	{ "tx_xon_frames" },
7477 	{ "tx_xoff_frames" },
7478 	{ "rx_mac_ctrl_frames" },
7479 	{ "rx_filtered_packets" },
7480 	{ "rx_ftq_discards" },
7481 	{ "rx_discards" },
7482 	{ "rx_fw_discards" },
7483 };
7484 
7485 #define BNX2_NUM_STATS ARRAY_SIZE(bnx2_stats_str_arr)
7486 
7487 #define STATS_OFFSET32(offset_name) (offsetof(struct statistics_block, offset_name) / 4)
7488 
7489 static const unsigned long bnx2_stats_offset_arr[BNX2_NUM_STATS] = {
7490     STATS_OFFSET32(stat_IfHCInOctets_hi),
7491     STATS_OFFSET32(stat_IfHCInBadOctets_hi),
7492     STATS_OFFSET32(stat_IfHCOutOctets_hi),
7493     STATS_OFFSET32(stat_IfHCOutBadOctets_hi),
7494     STATS_OFFSET32(stat_IfHCInUcastPkts_hi),
7495     STATS_OFFSET32(stat_IfHCInMulticastPkts_hi),
7496     STATS_OFFSET32(stat_IfHCInBroadcastPkts_hi),
7497     STATS_OFFSET32(stat_IfHCOutUcastPkts_hi),
7498     STATS_OFFSET32(stat_IfHCOutMulticastPkts_hi),
7499     STATS_OFFSET32(stat_IfHCOutBroadcastPkts_hi),
7500     STATS_OFFSET32(stat_emac_tx_stat_dot3statsinternalmactransmiterrors),
7501     STATS_OFFSET32(stat_Dot3StatsCarrierSenseErrors),
7502     STATS_OFFSET32(stat_Dot3StatsFCSErrors),
7503     STATS_OFFSET32(stat_Dot3StatsAlignmentErrors),
7504     STATS_OFFSET32(stat_Dot3StatsSingleCollisionFrames),
7505     STATS_OFFSET32(stat_Dot3StatsMultipleCollisionFrames),
7506     STATS_OFFSET32(stat_Dot3StatsDeferredTransmissions),
7507     STATS_OFFSET32(stat_Dot3StatsExcessiveCollisions),
7508     STATS_OFFSET32(stat_Dot3StatsLateCollisions),
7509     STATS_OFFSET32(stat_EtherStatsCollisions),
7510     STATS_OFFSET32(stat_EtherStatsFragments),
7511     STATS_OFFSET32(stat_EtherStatsJabbers),
7512     STATS_OFFSET32(stat_EtherStatsUndersizePkts),
7513     STATS_OFFSET32(stat_EtherStatsOverrsizePkts),
7514     STATS_OFFSET32(stat_EtherStatsPktsRx64Octets),
7515     STATS_OFFSET32(stat_EtherStatsPktsRx65Octetsto127Octets),
7516     STATS_OFFSET32(stat_EtherStatsPktsRx128Octetsto255Octets),
7517     STATS_OFFSET32(stat_EtherStatsPktsRx256Octetsto511Octets),
7518     STATS_OFFSET32(stat_EtherStatsPktsRx512Octetsto1023Octets),
7519     STATS_OFFSET32(stat_EtherStatsPktsRx1024Octetsto1522Octets),
7520     STATS_OFFSET32(stat_EtherStatsPktsRx1523Octetsto9022Octets),
7521     STATS_OFFSET32(stat_EtherStatsPktsTx64Octets),
7522     STATS_OFFSET32(stat_EtherStatsPktsTx65Octetsto127Octets),
7523     STATS_OFFSET32(stat_EtherStatsPktsTx128Octetsto255Octets),
7524     STATS_OFFSET32(stat_EtherStatsPktsTx256Octetsto511Octets),
7525     STATS_OFFSET32(stat_EtherStatsPktsTx512Octetsto1023Octets),
7526     STATS_OFFSET32(stat_EtherStatsPktsTx1024Octetsto1522Octets),
7527     STATS_OFFSET32(stat_EtherStatsPktsTx1523Octetsto9022Octets),
7528     STATS_OFFSET32(stat_XonPauseFramesReceived),
7529     STATS_OFFSET32(stat_XoffPauseFramesReceived),
7530     STATS_OFFSET32(stat_OutXonSent),
7531     STATS_OFFSET32(stat_OutXoffSent),
7532     STATS_OFFSET32(stat_MacControlFramesReceived),
7533     STATS_OFFSET32(stat_IfInFramesL2FilterDiscards),
7534     STATS_OFFSET32(stat_IfInFTQDiscards),
7535     STATS_OFFSET32(stat_IfInMBUFDiscards),
7536     STATS_OFFSET32(stat_FwRxDrop),
7537 };
7538 
7539 /* stat_IfHCInBadOctets and stat_Dot3StatsCarrierSenseErrors are
7540  * skipped because of errata.
7541  */
7542 static u8 bnx2_5706_stats_len_arr[BNX2_NUM_STATS] = {
7543 	8,0,8,8,8,8,8,8,8,8,
7544 	4,0,4,4,4,4,4,4,4,4,
7545 	4,4,4,4,4,4,4,4,4,4,
7546 	4,4,4,4,4,4,4,4,4,4,
7547 	4,4,4,4,4,4,4,
7548 };
7549 
7550 static u8 bnx2_5708_stats_len_arr[BNX2_NUM_STATS] = {
7551 	8,0,8,8,8,8,8,8,8,8,
7552 	4,4,4,4,4,4,4,4,4,4,
7553 	4,4,4,4,4,4,4,4,4,4,
7554 	4,4,4,4,4,4,4,4,4,4,
7555 	4,4,4,4,4,4,4,
7556 };
7557 
7558 #define BNX2_NUM_TESTS 6
7559 
7560 static struct {
7561 	char string[ETH_GSTRING_LEN];
7562 } bnx2_tests_str_arr[BNX2_NUM_TESTS] = {
7563 	{ "register_test (offline)" },
7564 	{ "memory_test (offline)" },
7565 	{ "loopback_test (offline)" },
7566 	{ "nvram_test (online)" },
7567 	{ "interrupt_test (online)" },
7568 	{ "link_test (online)" },
7569 };
7570 
7571 static int
bnx2_get_sset_count(struct net_device * dev,int sset)7572 bnx2_get_sset_count(struct net_device *dev, int sset)
7573 {
7574 	switch (sset) {
7575 	case ETH_SS_TEST:
7576 		return BNX2_NUM_TESTS;
7577 	case ETH_SS_STATS:
7578 		return BNX2_NUM_STATS;
7579 	default:
7580 		return -EOPNOTSUPP;
7581 	}
7582 }
7583 
7584 static void
bnx2_self_test(struct net_device * dev,struct ethtool_test * etest,u64 * buf)7585 bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf)
7586 {
7587 	struct bnx2 *bp = netdev_priv(dev);
7588 
7589 	memset(buf, 0, sizeof(u64) * BNX2_NUM_TESTS);
7590 	if (etest->flags & ETH_TEST_FL_OFFLINE) {
7591 		int i;
7592 
7593 		bnx2_netif_stop(bp, true);
7594 		bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_DIAG);
7595 		bnx2_free_skbs(bp);
7596 
7597 		if (bnx2_test_registers(bp) != 0) {
7598 			buf[0] = 1;
7599 			etest->flags |= ETH_TEST_FL_FAILED;
7600 		}
7601 		if (bnx2_test_memory(bp) != 0) {
7602 			buf[1] = 1;
7603 			etest->flags |= ETH_TEST_FL_FAILED;
7604 		}
7605 		if ((buf[2] = bnx2_test_loopback(bp)) != 0)
7606 			etest->flags |= ETH_TEST_FL_FAILED;
7607 
7608 		if (!netif_running(bp->dev))
7609 			bnx2_shutdown_chip(bp);
7610 		else {
7611 			bnx2_init_nic(bp, 1);
7612 			bnx2_netif_start(bp, true);
7613 		}
7614 
7615 		/* wait for link up */
7616 		for (i = 0; i < 7; i++) {
7617 			if (bp->link_up)
7618 				break;
7619 			msleep_interruptible(1000);
7620 		}
7621 	}
7622 
7623 	if (bnx2_test_nvram(bp) != 0) {
7624 		buf[3] = 1;
7625 		etest->flags |= ETH_TEST_FL_FAILED;
7626 	}
7627 	if (bnx2_test_intr(bp) != 0) {
7628 		buf[4] = 1;
7629 		etest->flags |= ETH_TEST_FL_FAILED;
7630 	}
7631 
7632 	if (bnx2_test_link(bp) != 0) {
7633 		buf[5] = 1;
7634 		etest->flags |= ETH_TEST_FL_FAILED;
7635 
7636 	}
7637 }
7638 
7639 static void
bnx2_get_strings(struct net_device * dev,u32 stringset,u8 * buf)7640 bnx2_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
7641 {
7642 	switch (stringset) {
7643 	case ETH_SS_STATS:
7644 		memcpy(buf, bnx2_stats_str_arr,
7645 			sizeof(bnx2_stats_str_arr));
7646 		break;
7647 	case ETH_SS_TEST:
7648 		memcpy(buf, bnx2_tests_str_arr,
7649 			sizeof(bnx2_tests_str_arr));
7650 		break;
7651 	}
7652 }
7653 
7654 static void
bnx2_get_ethtool_stats(struct net_device * dev,struct ethtool_stats * stats,u64 * buf)7655 bnx2_get_ethtool_stats(struct net_device *dev,
7656 		struct ethtool_stats *stats, u64 *buf)
7657 {
7658 	struct bnx2 *bp = netdev_priv(dev);
7659 	int i;
7660 	u32 *hw_stats = (u32 *) bp->stats_blk;
7661 	u32 *temp_stats = (u32 *) bp->temp_stats_blk;
7662 	u8 *stats_len_arr = NULL;
7663 
7664 	if (!hw_stats) {
7665 		memset(buf, 0, sizeof(u64) * BNX2_NUM_STATS);
7666 		return;
7667 	}
7668 
7669 	if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) ||
7670 	    (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A1) ||
7671 	    (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A2) ||
7672 	    (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_A0))
7673 		stats_len_arr = bnx2_5706_stats_len_arr;
7674 	else
7675 		stats_len_arr = bnx2_5708_stats_len_arr;
7676 
7677 	for (i = 0; i < BNX2_NUM_STATS; i++) {
7678 		unsigned long offset;
7679 
7680 		if (stats_len_arr[i] == 0) {
7681 			/* skip this counter */
7682 			buf[i] = 0;
7683 			continue;
7684 		}
7685 
7686 		offset = bnx2_stats_offset_arr[i];
7687 		if (stats_len_arr[i] == 4) {
7688 			/* 4-byte counter */
7689 			buf[i] = (u64) *(hw_stats + offset) +
7690 				 *(temp_stats + offset);
7691 			continue;
7692 		}
7693 		/* 8-byte counter */
7694 		buf[i] = (((u64) *(hw_stats + offset)) << 32) +
7695 			 *(hw_stats + offset + 1) +
7696 			 (((u64) *(temp_stats + offset)) << 32) +
7697 			 *(temp_stats + offset + 1);
7698 	}
7699 }
7700 
7701 static int
bnx2_set_phys_id(struct net_device * dev,enum ethtool_phys_id_state state)7702 bnx2_set_phys_id(struct net_device *dev, enum ethtool_phys_id_state state)
7703 {
7704 	struct bnx2 *bp = netdev_priv(dev);
7705 
7706 	switch (state) {
7707 	case ETHTOOL_ID_ACTIVE:
7708 		bp->leds_save = BNX2_RD(bp, BNX2_MISC_CFG);
7709 		BNX2_WR(bp, BNX2_MISC_CFG, BNX2_MISC_CFG_LEDMODE_MAC);
7710 		return 1;	/* cycle on/off once per second */
7711 
7712 	case ETHTOOL_ID_ON:
7713 		BNX2_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE |
7714 			BNX2_EMAC_LED_1000MB_OVERRIDE |
7715 			BNX2_EMAC_LED_100MB_OVERRIDE |
7716 			BNX2_EMAC_LED_10MB_OVERRIDE |
7717 			BNX2_EMAC_LED_TRAFFIC_OVERRIDE |
7718 			BNX2_EMAC_LED_TRAFFIC);
7719 		break;
7720 
7721 	case ETHTOOL_ID_OFF:
7722 		BNX2_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE);
7723 		break;
7724 
7725 	case ETHTOOL_ID_INACTIVE:
7726 		BNX2_WR(bp, BNX2_EMAC_LED, 0);
7727 		BNX2_WR(bp, BNX2_MISC_CFG, bp->leds_save);
7728 		break;
7729 	}
7730 
7731 	return 0;
7732 }
7733 
7734 static int
bnx2_set_features(struct net_device * dev,netdev_features_t features)7735 bnx2_set_features(struct net_device *dev, netdev_features_t features)
7736 {
7737 	struct bnx2 *bp = netdev_priv(dev);
7738 
7739 	/* TSO with VLAN tag won't work with current firmware */
7740 	if (features & NETIF_F_HW_VLAN_CTAG_TX)
7741 		dev->vlan_features |= (dev->hw_features & NETIF_F_ALL_TSO);
7742 	else
7743 		dev->vlan_features &= ~NETIF_F_ALL_TSO;
7744 
7745 	if ((!!(features & NETIF_F_HW_VLAN_CTAG_RX) !=
7746 	    !!(bp->rx_mode & BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG)) &&
7747 	    netif_running(dev)) {
7748 		bnx2_netif_stop(bp, false);
7749 		dev->features = features;
7750 		bnx2_set_rx_mode(dev);
7751 		bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_KEEP_VLAN_UPDATE, 0, 1);
7752 		bnx2_netif_start(bp, false);
7753 		return 1;
7754 	}
7755 
7756 	return 0;
7757 }
7758 
bnx2_get_channels(struct net_device * dev,struct ethtool_channels * channels)7759 static void bnx2_get_channels(struct net_device *dev,
7760 			      struct ethtool_channels *channels)
7761 {
7762 	struct bnx2 *bp = netdev_priv(dev);
7763 	u32 max_rx_rings = 1;
7764 	u32 max_tx_rings = 1;
7765 
7766 	if ((bp->flags & BNX2_FLAG_MSIX_CAP) && !disable_msi) {
7767 		max_rx_rings = RX_MAX_RINGS;
7768 		max_tx_rings = TX_MAX_RINGS;
7769 	}
7770 
7771 	channels->max_rx = max_rx_rings;
7772 	channels->max_tx = max_tx_rings;
7773 	channels->max_other = 0;
7774 	channels->max_combined = 0;
7775 	channels->rx_count = bp->num_rx_rings;
7776 	channels->tx_count = bp->num_tx_rings;
7777 	channels->other_count = 0;
7778 	channels->combined_count = 0;
7779 }
7780 
bnx2_set_channels(struct net_device * dev,struct ethtool_channels * channels)7781 static int bnx2_set_channels(struct net_device *dev,
7782 			      struct ethtool_channels *channels)
7783 {
7784 	struct bnx2 *bp = netdev_priv(dev);
7785 	u32 max_rx_rings = 1;
7786 	u32 max_tx_rings = 1;
7787 	int rc = 0;
7788 
7789 	if ((bp->flags & BNX2_FLAG_MSIX_CAP) && !disable_msi) {
7790 		max_rx_rings = RX_MAX_RINGS;
7791 		max_tx_rings = TX_MAX_RINGS;
7792 	}
7793 	if (channels->rx_count > max_rx_rings ||
7794 	    channels->tx_count > max_tx_rings)
7795 		return -EINVAL;
7796 
7797 	bp->num_req_rx_rings = channels->rx_count;
7798 	bp->num_req_tx_rings = channels->tx_count;
7799 
7800 	if (netif_running(dev))
7801 		rc = bnx2_change_ring_size(bp, bp->rx_ring_size,
7802 					   bp->tx_ring_size, true);
7803 
7804 	return rc;
7805 }
7806 
7807 static const struct ethtool_ops bnx2_ethtool_ops = {
7808 	.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
7809 				     ETHTOOL_COALESCE_MAX_FRAMES |
7810 				     ETHTOOL_COALESCE_USECS_IRQ |
7811 				     ETHTOOL_COALESCE_MAX_FRAMES_IRQ |
7812 				     ETHTOOL_COALESCE_STATS_BLOCK_USECS,
7813 	.get_drvinfo		= bnx2_get_drvinfo,
7814 	.get_regs_len		= bnx2_get_regs_len,
7815 	.get_regs		= bnx2_get_regs,
7816 	.get_wol		= bnx2_get_wol,
7817 	.set_wol		= bnx2_set_wol,
7818 	.nway_reset		= bnx2_nway_reset,
7819 	.get_link		= bnx2_get_link,
7820 	.get_eeprom_len		= bnx2_get_eeprom_len,
7821 	.get_eeprom		= bnx2_get_eeprom,
7822 	.set_eeprom		= bnx2_set_eeprom,
7823 	.get_coalesce		= bnx2_get_coalesce,
7824 	.set_coalesce		= bnx2_set_coalesce,
7825 	.get_ringparam		= bnx2_get_ringparam,
7826 	.set_ringparam		= bnx2_set_ringparam,
7827 	.get_pauseparam		= bnx2_get_pauseparam,
7828 	.set_pauseparam		= bnx2_set_pauseparam,
7829 	.self_test		= bnx2_self_test,
7830 	.get_strings		= bnx2_get_strings,
7831 	.set_phys_id		= bnx2_set_phys_id,
7832 	.get_ethtool_stats	= bnx2_get_ethtool_stats,
7833 	.get_sset_count		= bnx2_get_sset_count,
7834 	.get_channels		= bnx2_get_channels,
7835 	.set_channels		= bnx2_set_channels,
7836 	.get_link_ksettings	= bnx2_get_link_ksettings,
7837 	.set_link_ksettings	= bnx2_set_link_ksettings,
7838 };
7839 
7840 /* Called with rtnl_lock */
7841 static int
bnx2_ioctl(struct net_device * dev,struct ifreq * ifr,int cmd)7842 bnx2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
7843 {
7844 	struct mii_ioctl_data *data = if_mii(ifr);
7845 	struct bnx2 *bp = netdev_priv(dev);
7846 	int err;
7847 
7848 	switch(cmd) {
7849 	case SIOCGMIIPHY:
7850 		data->phy_id = bp->phy_addr;
7851 
7852 		fallthrough;
7853 	case SIOCGMIIREG: {
7854 		u32 mii_regval;
7855 
7856 		if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
7857 			return -EOPNOTSUPP;
7858 
7859 		if (!netif_running(dev))
7860 			return -EAGAIN;
7861 
7862 		spin_lock_bh(&bp->phy_lock);
7863 		err = bnx2_read_phy(bp, data->reg_num & 0x1f, &mii_regval);
7864 		spin_unlock_bh(&bp->phy_lock);
7865 
7866 		data->val_out = mii_regval;
7867 
7868 		return err;
7869 	}
7870 
7871 	case SIOCSMIIREG:
7872 		if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
7873 			return -EOPNOTSUPP;
7874 
7875 		if (!netif_running(dev))
7876 			return -EAGAIN;
7877 
7878 		spin_lock_bh(&bp->phy_lock);
7879 		err = bnx2_write_phy(bp, data->reg_num & 0x1f, data->val_in);
7880 		spin_unlock_bh(&bp->phy_lock);
7881 
7882 		return err;
7883 
7884 	default:
7885 		/* do nothing */
7886 		break;
7887 	}
7888 	return -EOPNOTSUPP;
7889 }
7890 
7891 /* Called with rtnl_lock */
7892 static int
bnx2_change_mac_addr(struct net_device * dev,void * p)7893 bnx2_change_mac_addr(struct net_device *dev, void *p)
7894 {
7895 	struct sockaddr *addr = p;
7896 	struct bnx2 *bp = netdev_priv(dev);
7897 
7898 	if (!is_valid_ether_addr(addr->sa_data))
7899 		return -EADDRNOTAVAIL;
7900 
7901 	eth_hw_addr_set(dev, addr->sa_data);
7902 	if (netif_running(dev))
7903 		bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
7904 
7905 	return 0;
7906 }
7907 
7908 /* Called with rtnl_lock */
7909 static int
bnx2_change_mtu(struct net_device * dev,int new_mtu)7910 bnx2_change_mtu(struct net_device *dev, int new_mtu)
7911 {
7912 	struct bnx2 *bp = netdev_priv(dev);
7913 
7914 	dev->mtu = new_mtu;
7915 	return bnx2_change_ring_size(bp, bp->rx_ring_size, bp->tx_ring_size,
7916 				     false);
7917 }
7918 
7919 #ifdef CONFIG_NET_POLL_CONTROLLER
7920 static void
poll_bnx2(struct net_device * dev)7921 poll_bnx2(struct net_device *dev)
7922 {
7923 	struct bnx2 *bp = netdev_priv(dev);
7924 	int i;
7925 
7926 	for (i = 0; i < bp->irq_nvecs; i++) {
7927 		struct bnx2_irq *irq = &bp->irq_tbl[i];
7928 
7929 		disable_irq(irq->vector);
7930 		irq->handler(irq->vector, &bp->bnx2_napi[i]);
7931 		enable_irq(irq->vector);
7932 	}
7933 }
7934 #endif
7935 
7936 static void
bnx2_get_5709_media(struct bnx2 * bp)7937 bnx2_get_5709_media(struct bnx2 *bp)
7938 {
7939 	u32 val = BNX2_RD(bp, BNX2_MISC_DUAL_MEDIA_CTRL);
7940 	u32 bond_id = val & BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID;
7941 	u32 strap;
7942 
7943 	if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_C)
7944 		return;
7945 	else if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_S) {
7946 		bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7947 		return;
7948 	}
7949 
7950 	if (val & BNX2_MISC_DUAL_MEDIA_CTRL_STRAP_OVERRIDE)
7951 		strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL) >> 21;
7952 	else
7953 		strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL_STRAP) >> 8;
7954 
7955 	if (bp->func == 0) {
7956 		switch (strap) {
7957 		case 0x4:
7958 		case 0x5:
7959 		case 0x6:
7960 			bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7961 			return;
7962 		}
7963 	} else {
7964 		switch (strap) {
7965 		case 0x1:
7966 		case 0x2:
7967 		case 0x4:
7968 			bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7969 			return;
7970 		}
7971 	}
7972 }
7973 
7974 static void
bnx2_get_pci_speed(struct bnx2 * bp)7975 bnx2_get_pci_speed(struct bnx2 *bp)
7976 {
7977 	u32 reg;
7978 
7979 	reg = BNX2_RD(bp, BNX2_PCICFG_MISC_STATUS);
7980 	if (reg & BNX2_PCICFG_MISC_STATUS_PCIX_DET) {
7981 		u32 clkreg;
7982 
7983 		bp->flags |= BNX2_FLAG_PCIX;
7984 
7985 		clkreg = BNX2_RD(bp, BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS);
7986 
7987 		clkreg &= BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET;
7988 		switch (clkreg) {
7989 		case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ:
7990 			bp->bus_speed_mhz = 133;
7991 			break;
7992 
7993 		case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ:
7994 			bp->bus_speed_mhz = 100;
7995 			break;
7996 
7997 		case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ:
7998 		case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ:
7999 			bp->bus_speed_mhz = 66;
8000 			break;
8001 
8002 		case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ:
8003 		case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ:
8004 			bp->bus_speed_mhz = 50;
8005 			break;
8006 
8007 		case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW:
8008 		case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ:
8009 		case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ:
8010 			bp->bus_speed_mhz = 33;
8011 			break;
8012 		}
8013 	}
8014 	else {
8015 		if (reg & BNX2_PCICFG_MISC_STATUS_M66EN)
8016 			bp->bus_speed_mhz = 66;
8017 		else
8018 			bp->bus_speed_mhz = 33;
8019 	}
8020 
8021 	if (reg & BNX2_PCICFG_MISC_STATUS_32BIT_DET)
8022 		bp->flags |= BNX2_FLAG_PCI_32BIT;
8023 
8024 }
8025 
8026 static void
bnx2_read_vpd_fw_ver(struct bnx2 * bp)8027 bnx2_read_vpd_fw_ver(struct bnx2 *bp)
8028 {
8029 	unsigned int len;
8030 	int rc, i, j;
8031 	u8 *data;
8032 
8033 #define BNX2_VPD_NVRAM_OFFSET	0x300
8034 #define BNX2_VPD_LEN		128
8035 #define BNX2_MAX_VER_SLEN	30
8036 
8037 	data = kmalloc(BNX2_VPD_LEN, GFP_KERNEL);
8038 	if (!data)
8039 		return;
8040 
8041 	rc = bnx2_nvram_read(bp, BNX2_VPD_NVRAM_OFFSET, data, BNX2_VPD_LEN);
8042 	if (rc)
8043 		goto vpd_done;
8044 
8045 	for (i = 0; i < BNX2_VPD_LEN; i += 4)
8046 		swab32s((u32 *)&data[i]);
8047 
8048 	j = pci_vpd_find_ro_info_keyword(data, BNX2_VPD_LEN,
8049 					 PCI_VPD_RO_KEYWORD_MFR_ID, &len);
8050 	if (j < 0)
8051 		goto vpd_done;
8052 
8053 	if (len != 4 || memcmp(&data[j], "1028", 4))
8054 		goto vpd_done;
8055 
8056 	j = pci_vpd_find_ro_info_keyword(data, BNX2_VPD_LEN,
8057 					 PCI_VPD_RO_KEYWORD_VENDOR0,
8058 					 &len);
8059 	if (j < 0)
8060 		goto vpd_done;
8061 
8062 	if (len > BNX2_MAX_VER_SLEN)
8063 		goto vpd_done;
8064 
8065 	memcpy(bp->fw_version, &data[j], len);
8066 	bp->fw_version[len] = ' ';
8067 
8068 vpd_done:
8069 	kfree(data);
8070 }
8071 
8072 static int
bnx2_init_board(struct pci_dev * pdev,struct net_device * dev)8073 bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
8074 {
8075 	struct bnx2 *bp;
8076 	int rc, i, j;
8077 	u32 reg;
8078 	u64 dma_mask, persist_dma_mask;
8079 
8080 	SET_NETDEV_DEV(dev, &pdev->dev);
8081 	bp = netdev_priv(dev);
8082 
8083 	bp->flags = 0;
8084 	bp->phy_flags = 0;
8085 
8086 	bp->temp_stats_blk =
8087 		kzalloc(sizeof(struct statistics_block), GFP_KERNEL);
8088 
8089 	if (!bp->temp_stats_blk) {
8090 		rc = -ENOMEM;
8091 		goto err_out;
8092 	}
8093 
8094 	/* enable device (incl. PCI PM wakeup), and bus-mastering */
8095 	rc = pci_enable_device(pdev);
8096 	if (rc) {
8097 		dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
8098 		goto err_out;
8099 	}
8100 
8101 	if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
8102 		dev_err(&pdev->dev,
8103 			"Cannot find PCI device base address, aborting\n");
8104 		rc = -ENODEV;
8105 		goto err_out_disable;
8106 	}
8107 
8108 	rc = pci_request_regions(pdev, DRV_MODULE_NAME);
8109 	if (rc) {
8110 		dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
8111 		goto err_out_disable;
8112 	}
8113 
8114 	pci_set_master(pdev);
8115 
8116 	bp->pm_cap = pdev->pm_cap;
8117 	if (bp->pm_cap == 0) {
8118 		dev_err(&pdev->dev,
8119 			"Cannot find power management capability, aborting\n");
8120 		rc = -EIO;
8121 		goto err_out_release;
8122 	}
8123 
8124 	bp->dev = dev;
8125 	bp->pdev = pdev;
8126 
8127 	spin_lock_init(&bp->phy_lock);
8128 	spin_lock_init(&bp->indirect_lock);
8129 #ifdef BCM_CNIC
8130 	mutex_init(&bp->cnic_lock);
8131 #endif
8132 	INIT_WORK(&bp->reset_task, bnx2_reset_task);
8133 
8134 	bp->regview = pci_iomap(pdev, 0, MB_GET_CID_ADDR(TX_TSS_CID +
8135 							 TX_MAX_TSS_RINGS + 1));
8136 	if (!bp->regview) {
8137 		dev_err(&pdev->dev, "Cannot map register space, aborting\n");
8138 		rc = -ENOMEM;
8139 		goto err_out_release;
8140 	}
8141 
8142 	/* Configure byte swap and enable write to the reg_window registers.
8143 	 * Rely on CPU to do target byte swapping on big endian systems
8144 	 * The chip's target access swapping will not swap all accesses
8145 	 */
8146 	BNX2_WR(bp, BNX2_PCICFG_MISC_CONFIG,
8147 		BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
8148 		BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP);
8149 
8150 	bp->chip_id = BNX2_RD(bp, BNX2_MISC_ID);
8151 
8152 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
8153 		if (!pci_is_pcie(pdev)) {
8154 			dev_err(&pdev->dev, "Not PCIE, aborting\n");
8155 			rc = -EIO;
8156 			goto err_out_unmap;
8157 		}
8158 		bp->flags |= BNX2_FLAG_PCIE;
8159 		if (BNX2_CHIP_REV(bp) == BNX2_CHIP_REV_Ax)
8160 			bp->flags |= BNX2_FLAG_JUMBO_BROKEN;
8161 	} else {
8162 		bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX);
8163 		if (bp->pcix_cap == 0) {
8164 			dev_err(&pdev->dev,
8165 				"Cannot find PCIX capability, aborting\n");
8166 			rc = -EIO;
8167 			goto err_out_unmap;
8168 		}
8169 		bp->flags |= BNX2_FLAG_BROKEN_STATS;
8170 	}
8171 
8172 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709 &&
8173 	    BNX2_CHIP_REV(bp) != BNX2_CHIP_REV_Ax) {
8174 		if (pdev->msix_cap)
8175 			bp->flags |= BNX2_FLAG_MSIX_CAP;
8176 	}
8177 
8178 	if (BNX2_CHIP_ID(bp) != BNX2_CHIP_ID_5706_A0 &&
8179 	    BNX2_CHIP_ID(bp) != BNX2_CHIP_ID_5706_A1) {
8180 		if (pdev->msi_cap)
8181 			bp->flags |= BNX2_FLAG_MSI_CAP;
8182 	}
8183 
8184 	/* 5708 cannot support DMA addresses > 40-bit.  */
8185 	if (BNX2_CHIP(bp) == BNX2_CHIP_5708)
8186 		persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
8187 	else
8188 		persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
8189 
8190 	/* Configure DMA attributes. */
8191 	if (dma_set_mask(&pdev->dev, dma_mask) == 0) {
8192 		dev->features |= NETIF_F_HIGHDMA;
8193 		rc = dma_set_coherent_mask(&pdev->dev, persist_dma_mask);
8194 		if (rc) {
8195 			dev_err(&pdev->dev,
8196 				"dma_set_coherent_mask failed, aborting\n");
8197 			goto err_out_unmap;
8198 		}
8199 	} else if ((rc = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) != 0) {
8200 		dev_err(&pdev->dev, "System does not support DMA, aborting\n");
8201 		goto err_out_unmap;
8202 	}
8203 
8204 	if (!(bp->flags & BNX2_FLAG_PCIE))
8205 		bnx2_get_pci_speed(bp);
8206 
8207 	/* 5706A0 may falsely detect SERR and PERR. */
8208 	if (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) {
8209 		reg = BNX2_RD(bp, PCI_COMMAND);
8210 		reg &= ~(PCI_COMMAND_SERR | PCI_COMMAND_PARITY);
8211 		BNX2_WR(bp, PCI_COMMAND, reg);
8212 	} else if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A1) &&
8213 		!(bp->flags & BNX2_FLAG_PCIX)) {
8214 		dev_err(&pdev->dev,
8215 			"5706 A1 can only be used in a PCIX bus, aborting\n");
8216 		rc = -EPERM;
8217 		goto err_out_unmap;
8218 	}
8219 
8220 	bnx2_init_nvram(bp);
8221 
8222 	reg = bnx2_reg_rd_ind(bp, BNX2_SHM_HDR_SIGNATURE);
8223 
8224 	if (bnx2_reg_rd_ind(bp, BNX2_MCP_TOE_ID) & BNX2_MCP_TOE_ID_FUNCTION_ID)
8225 		bp->func = 1;
8226 
8227 	if ((reg & BNX2_SHM_HDR_SIGNATURE_SIG_MASK) ==
8228 	    BNX2_SHM_HDR_SIGNATURE_SIG) {
8229 		u32 off = bp->func << 2;
8230 
8231 		bp->shmem_base = bnx2_reg_rd_ind(bp, BNX2_SHM_HDR_ADDR_0 + off);
8232 	} else
8233 		bp->shmem_base = HOST_VIEW_SHMEM_BASE;
8234 
8235 	/* Get the permanent MAC address.  First we need to make sure the
8236 	 * firmware is actually running.
8237 	 */
8238 	reg = bnx2_shmem_rd(bp, BNX2_DEV_INFO_SIGNATURE);
8239 
8240 	if ((reg & BNX2_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
8241 	    BNX2_DEV_INFO_SIGNATURE_MAGIC) {
8242 		dev_err(&pdev->dev, "Firmware not running, aborting\n");
8243 		rc = -ENODEV;
8244 		goto err_out_unmap;
8245 	}
8246 
8247 	bnx2_read_vpd_fw_ver(bp);
8248 
8249 	j = strlen(bp->fw_version);
8250 	reg = bnx2_shmem_rd(bp, BNX2_DEV_INFO_BC_REV);
8251 	for (i = 0; i < 3 && j < 24; i++) {
8252 		u8 num, k, skip0;
8253 
8254 		if (i == 0) {
8255 			bp->fw_version[j++] = 'b';
8256 			bp->fw_version[j++] = 'c';
8257 			bp->fw_version[j++] = ' ';
8258 		}
8259 		num = (u8) (reg >> (24 - (i * 8)));
8260 		for (k = 100, skip0 = 1; k >= 1; num %= k, k /= 10) {
8261 			if (num >= k || !skip0 || k == 1) {
8262 				bp->fw_version[j++] = (num / k) + '0';
8263 				skip0 = 0;
8264 			}
8265 		}
8266 		if (i != 2)
8267 			bp->fw_version[j++] = '.';
8268 	}
8269 	reg = bnx2_shmem_rd(bp, BNX2_PORT_FEATURE);
8270 	if (reg & BNX2_PORT_FEATURE_WOL_ENABLED)
8271 		bp->wol = 1;
8272 
8273 	if (reg & BNX2_PORT_FEATURE_ASF_ENABLED) {
8274 		bp->flags |= BNX2_FLAG_ASF_ENABLE;
8275 
8276 		for (i = 0; i < 30; i++) {
8277 			reg = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION);
8278 			if (reg & BNX2_CONDITION_MFW_RUN_MASK)
8279 				break;
8280 			msleep(10);
8281 		}
8282 	}
8283 	reg = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION);
8284 	reg &= BNX2_CONDITION_MFW_RUN_MASK;
8285 	if (reg != BNX2_CONDITION_MFW_RUN_UNKNOWN &&
8286 	    reg != BNX2_CONDITION_MFW_RUN_NONE) {
8287 		u32 addr = bnx2_shmem_rd(bp, BNX2_MFW_VER_PTR);
8288 
8289 		if (j < 32)
8290 			bp->fw_version[j++] = ' ';
8291 		for (i = 0; i < 3 && j < 28; i++) {
8292 			reg = bnx2_reg_rd_ind(bp, addr + i * 4);
8293 			reg = be32_to_cpu(reg);
8294 			memcpy(&bp->fw_version[j], &reg, 4);
8295 			j += 4;
8296 		}
8297 	}
8298 
8299 	reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_MAC_UPPER);
8300 	bp->mac_addr[0] = (u8) (reg >> 8);
8301 	bp->mac_addr[1] = (u8) reg;
8302 
8303 	reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_MAC_LOWER);
8304 	bp->mac_addr[2] = (u8) (reg >> 24);
8305 	bp->mac_addr[3] = (u8) (reg >> 16);
8306 	bp->mac_addr[4] = (u8) (reg >> 8);
8307 	bp->mac_addr[5] = (u8) reg;
8308 
8309 	bp->tx_ring_size = BNX2_MAX_TX_DESC_CNT;
8310 	bnx2_set_rx_ring_size(bp, 255);
8311 
8312 	bp->tx_quick_cons_trip_int = 2;
8313 	bp->tx_quick_cons_trip = 20;
8314 	bp->tx_ticks_int = 18;
8315 	bp->tx_ticks = 80;
8316 
8317 	bp->rx_quick_cons_trip_int = 2;
8318 	bp->rx_quick_cons_trip = 12;
8319 	bp->rx_ticks_int = 18;
8320 	bp->rx_ticks = 18;
8321 
8322 	bp->stats_ticks = USEC_PER_SEC & BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
8323 
8324 	bp->current_interval = BNX2_TIMER_INTERVAL;
8325 
8326 	bp->phy_addr = 1;
8327 
8328 	/* allocate stats_blk */
8329 	rc = bnx2_alloc_stats_blk(dev);
8330 	if (rc)
8331 		goto err_out_unmap;
8332 
8333 	/* Disable WOL support if we are running on a SERDES chip. */
8334 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
8335 		bnx2_get_5709_media(bp);
8336 	else if (BNX2_CHIP_BOND(bp) & BNX2_CHIP_BOND_SERDES_BIT)
8337 		bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
8338 
8339 	bp->phy_port = PORT_TP;
8340 	if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
8341 		bp->phy_port = PORT_FIBRE;
8342 		reg = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG);
8343 		if (!(reg & BNX2_SHARED_HW_CFG_GIG_LINK_ON_VAUX)) {
8344 			bp->flags |= BNX2_FLAG_NO_WOL;
8345 			bp->wol = 0;
8346 		}
8347 		if (BNX2_CHIP(bp) == BNX2_CHIP_5706) {
8348 			/* Don't do parallel detect on this board because of
8349 			 * some board problems.  The link will not go down
8350 			 * if we do parallel detect.
8351 			 */
8352 			if (pdev->subsystem_vendor == PCI_VENDOR_ID_HP &&
8353 			    pdev->subsystem_device == 0x310c)
8354 				bp->phy_flags |= BNX2_PHY_FLAG_NO_PARALLEL;
8355 		} else {
8356 			bp->phy_addr = 2;
8357 			if (reg & BNX2_SHARED_HW_CFG_PHY_2_5G)
8358 				bp->phy_flags |= BNX2_PHY_FLAG_2_5G_CAPABLE;
8359 		}
8360 	} else if (BNX2_CHIP(bp) == BNX2_CHIP_5706 ||
8361 		   BNX2_CHIP(bp) == BNX2_CHIP_5708)
8362 		bp->phy_flags |= BNX2_PHY_FLAG_CRC_FIX;
8363 	else if (BNX2_CHIP(bp) == BNX2_CHIP_5709 &&
8364 		 (BNX2_CHIP_REV(bp) == BNX2_CHIP_REV_Ax ||
8365 		  BNX2_CHIP_REV(bp) == BNX2_CHIP_REV_Bx))
8366 		bp->phy_flags |= BNX2_PHY_FLAG_DIS_EARLY_DAC;
8367 
8368 	bnx2_init_fw_cap(bp);
8369 
8370 	if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_A0) ||
8371 	    (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_B0) ||
8372 	    (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_B1) ||
8373 	    !(BNX2_RD(bp, BNX2_PCI_CONFIG_3) & BNX2_PCI_CONFIG_3_VAUX_PRESET)) {
8374 		bp->flags |= BNX2_FLAG_NO_WOL;
8375 		bp->wol = 0;
8376 	}
8377 
8378 	if (bp->flags & BNX2_FLAG_NO_WOL)
8379 		device_set_wakeup_capable(&bp->pdev->dev, false);
8380 	else
8381 		device_set_wakeup_enable(&bp->pdev->dev, bp->wol);
8382 
8383 	if (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) {
8384 		bp->tx_quick_cons_trip_int =
8385 			bp->tx_quick_cons_trip;
8386 		bp->tx_ticks_int = bp->tx_ticks;
8387 		bp->rx_quick_cons_trip_int =
8388 			bp->rx_quick_cons_trip;
8389 		bp->rx_ticks_int = bp->rx_ticks;
8390 		bp->comp_prod_trip_int = bp->comp_prod_trip;
8391 		bp->com_ticks_int = bp->com_ticks;
8392 		bp->cmd_ticks_int = bp->cmd_ticks;
8393 	}
8394 
8395 	/* Disable MSI on 5706 if AMD 8132 bridge is found.
8396 	 *
8397 	 * MSI is defined to be 32-bit write.  The 5706 does 64-bit MSI writes
8398 	 * with byte enables disabled on the unused 32-bit word.  This is legal
8399 	 * but causes problems on the AMD 8132 which will eventually stop
8400 	 * responding after a while.
8401 	 *
8402 	 * AMD believes this incompatibility is unique to the 5706, and
8403 	 * prefers to locally disable MSI rather than globally disabling it.
8404 	 */
8405 	if (BNX2_CHIP(bp) == BNX2_CHIP_5706 && disable_msi == 0) {
8406 		struct pci_dev *amd_8132 = NULL;
8407 
8408 		while ((amd_8132 = pci_get_device(PCI_VENDOR_ID_AMD,
8409 						  PCI_DEVICE_ID_AMD_8132_BRIDGE,
8410 						  amd_8132))) {
8411 
8412 			if (amd_8132->revision >= 0x10 &&
8413 			    amd_8132->revision <= 0x13) {
8414 				disable_msi = 1;
8415 				pci_dev_put(amd_8132);
8416 				break;
8417 			}
8418 		}
8419 	}
8420 
8421 	bnx2_set_default_link(bp);
8422 	bp->req_flow_ctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
8423 
8424 	timer_setup(&bp->timer, bnx2_timer, 0);
8425 	bp->timer.expires = RUN_AT(BNX2_TIMER_INTERVAL);
8426 
8427 #ifdef BCM_CNIC
8428 	if (bnx2_shmem_rd(bp, BNX2_ISCSI_INITIATOR) & BNX2_ISCSI_INITIATOR_EN)
8429 		bp->cnic_eth_dev.max_iscsi_conn =
8430 			(bnx2_shmem_rd(bp, BNX2_ISCSI_MAX_CONN) &
8431 			 BNX2_ISCSI_MAX_CONN_MASK) >> BNX2_ISCSI_MAX_CONN_SHIFT;
8432 	bp->cnic_probe = bnx2_cnic_probe;
8433 #endif
8434 	pci_save_state(pdev);
8435 
8436 	return 0;
8437 
8438 err_out_unmap:
8439 	pci_iounmap(pdev, bp->regview);
8440 	bp->regview = NULL;
8441 
8442 err_out_release:
8443 	pci_release_regions(pdev);
8444 
8445 err_out_disable:
8446 	pci_disable_device(pdev);
8447 
8448 err_out:
8449 	kfree(bp->temp_stats_blk);
8450 
8451 	return rc;
8452 }
8453 
8454 static char *
bnx2_bus_string(struct bnx2 * bp,char * str)8455 bnx2_bus_string(struct bnx2 *bp, char *str)
8456 {
8457 	char *s = str;
8458 
8459 	if (bp->flags & BNX2_FLAG_PCIE) {
8460 		s += sprintf(s, "PCI Express");
8461 	} else {
8462 		s += sprintf(s, "PCI");
8463 		if (bp->flags & BNX2_FLAG_PCIX)
8464 			s += sprintf(s, "-X");
8465 		if (bp->flags & BNX2_FLAG_PCI_32BIT)
8466 			s += sprintf(s, " 32-bit");
8467 		else
8468 			s += sprintf(s, " 64-bit");
8469 		s += sprintf(s, " %dMHz", bp->bus_speed_mhz);
8470 	}
8471 	return str;
8472 }
8473 
8474 static void
bnx2_del_napi(struct bnx2 * bp)8475 bnx2_del_napi(struct bnx2 *bp)
8476 {
8477 	int i;
8478 
8479 	for (i = 0; i < bp->irq_nvecs; i++)
8480 		netif_napi_del(&bp->bnx2_napi[i].napi);
8481 }
8482 
8483 static void
bnx2_init_napi(struct bnx2 * bp)8484 bnx2_init_napi(struct bnx2 *bp)
8485 {
8486 	int i;
8487 
8488 	for (i = 0; i < bp->irq_nvecs; i++) {
8489 		struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
8490 		int (*poll)(struct napi_struct *, int);
8491 
8492 		if (i == 0)
8493 			poll = bnx2_poll;
8494 		else
8495 			poll = bnx2_poll_msix;
8496 
8497 		netif_napi_add(bp->dev, &bp->bnx2_napi[i].napi, poll);
8498 		bnapi->bp = bp;
8499 	}
8500 }
8501 
8502 static const struct net_device_ops bnx2_netdev_ops = {
8503 	.ndo_open		= bnx2_open,
8504 	.ndo_start_xmit		= bnx2_start_xmit,
8505 	.ndo_stop		= bnx2_close,
8506 	.ndo_get_stats64	= bnx2_get_stats64,
8507 	.ndo_set_rx_mode	= bnx2_set_rx_mode,
8508 	.ndo_eth_ioctl		= bnx2_ioctl,
8509 	.ndo_validate_addr	= eth_validate_addr,
8510 	.ndo_set_mac_address	= bnx2_change_mac_addr,
8511 	.ndo_change_mtu		= bnx2_change_mtu,
8512 	.ndo_set_features	= bnx2_set_features,
8513 	.ndo_tx_timeout		= bnx2_tx_timeout,
8514 #ifdef CONFIG_NET_POLL_CONTROLLER
8515 	.ndo_poll_controller	= poll_bnx2,
8516 #endif
8517 };
8518 
8519 static int
bnx2_init_one(struct pci_dev * pdev,const struct pci_device_id * ent)8520 bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
8521 {
8522 	struct net_device *dev;
8523 	struct bnx2 *bp;
8524 	int rc;
8525 	char str[40];
8526 
8527 	/* dev zeroed in init_etherdev */
8528 	dev = alloc_etherdev_mq(sizeof(*bp), TX_MAX_RINGS);
8529 	if (!dev)
8530 		return -ENOMEM;
8531 
8532 	rc = bnx2_init_board(pdev, dev);
8533 	if (rc < 0)
8534 		goto err_free;
8535 
8536 	dev->netdev_ops = &bnx2_netdev_ops;
8537 	dev->watchdog_timeo = TX_TIMEOUT;
8538 	dev->ethtool_ops = &bnx2_ethtool_ops;
8539 
8540 	bp = netdev_priv(dev);
8541 
8542 	pci_set_drvdata(pdev, dev);
8543 
8544 	/*
8545 	 * In-flight DMA from 1st kernel could continue going in kdump kernel.
8546 	 * New io-page table has been created before bnx2 does reset at open stage.
8547 	 * We have to wait for the in-flight DMA to complete to avoid it look up
8548 	 * into the newly created io-page table.
8549 	 */
8550 	if (is_kdump_kernel())
8551 		bnx2_wait_dma_complete(bp);
8552 
8553 	eth_hw_addr_set(dev, bp->mac_addr);
8554 
8555 	dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG |
8556 		NETIF_F_TSO | NETIF_F_TSO_ECN |
8557 		NETIF_F_RXHASH | NETIF_F_RXCSUM;
8558 
8559 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
8560 		dev->hw_features |= NETIF_F_IPV6_CSUM | NETIF_F_TSO6;
8561 
8562 	dev->vlan_features = dev->hw_features;
8563 	dev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
8564 	dev->features |= dev->hw_features;
8565 	dev->priv_flags |= IFF_UNICAST_FLT;
8566 	dev->min_mtu = MIN_ETHERNET_PACKET_SIZE;
8567 	dev->max_mtu = MAX_ETHERNET_JUMBO_PACKET_SIZE;
8568 
8569 	if (!(bp->flags & BNX2_FLAG_CAN_KEEP_VLAN))
8570 		dev->hw_features &= ~NETIF_F_HW_VLAN_CTAG_RX;
8571 
8572 	if ((rc = register_netdev(dev))) {
8573 		dev_err(&pdev->dev, "Cannot register net device\n");
8574 		goto error;
8575 	}
8576 
8577 	netdev_info(dev, "%s (%c%d) %s found at mem %lx, IRQ %d, "
8578 		    "node addr %pM\n", board_info[ent->driver_data].name,
8579 		    ((BNX2_CHIP_ID(bp) & 0xf000) >> 12) + 'A',
8580 		    ((BNX2_CHIP_ID(bp) & 0x0ff0) >> 4),
8581 		    bnx2_bus_string(bp, str), (long)pci_resource_start(pdev, 0),
8582 		    pdev->irq, dev->dev_addr);
8583 
8584 	return 0;
8585 
8586 error:
8587 	pci_iounmap(pdev, bp->regview);
8588 	pci_release_regions(pdev);
8589 	pci_disable_device(pdev);
8590 err_free:
8591 	bnx2_free_stats_blk(dev);
8592 	free_netdev(dev);
8593 	return rc;
8594 }
8595 
8596 static void
bnx2_remove_one(struct pci_dev * pdev)8597 bnx2_remove_one(struct pci_dev *pdev)
8598 {
8599 	struct net_device *dev = pci_get_drvdata(pdev);
8600 	struct bnx2 *bp = netdev_priv(dev);
8601 
8602 	unregister_netdev(dev);
8603 
8604 	del_timer_sync(&bp->timer);
8605 	cancel_work_sync(&bp->reset_task);
8606 
8607 	pci_iounmap(bp->pdev, bp->regview);
8608 
8609 	bnx2_free_stats_blk(dev);
8610 	kfree(bp->temp_stats_blk);
8611 
8612 	bnx2_release_firmware(bp);
8613 
8614 	free_netdev(dev);
8615 
8616 	pci_release_regions(pdev);
8617 	pci_disable_device(pdev);
8618 }
8619 
8620 #ifdef CONFIG_PM_SLEEP
8621 static int
bnx2_suspend(struct device * device)8622 bnx2_suspend(struct device *device)
8623 {
8624 	struct net_device *dev = dev_get_drvdata(device);
8625 	struct bnx2 *bp = netdev_priv(dev);
8626 
8627 	if (netif_running(dev)) {
8628 		cancel_work_sync(&bp->reset_task);
8629 		bnx2_netif_stop(bp, true);
8630 		netif_device_detach(dev);
8631 		del_timer_sync(&bp->timer);
8632 		bnx2_shutdown_chip(bp);
8633 		__bnx2_free_irq(bp);
8634 		bnx2_free_skbs(bp);
8635 	}
8636 	bnx2_setup_wol(bp);
8637 	return 0;
8638 }
8639 
8640 static int
bnx2_resume(struct device * device)8641 bnx2_resume(struct device *device)
8642 {
8643 	struct net_device *dev = dev_get_drvdata(device);
8644 	struct bnx2 *bp = netdev_priv(dev);
8645 
8646 	if (!netif_running(dev))
8647 		return 0;
8648 
8649 	bnx2_set_power_state(bp, PCI_D0);
8650 	netif_device_attach(dev);
8651 	bnx2_request_irq(bp);
8652 	bnx2_init_nic(bp, 1);
8653 	bnx2_netif_start(bp, true);
8654 	return 0;
8655 }
8656 
8657 static SIMPLE_DEV_PM_OPS(bnx2_pm_ops, bnx2_suspend, bnx2_resume);
8658 #define BNX2_PM_OPS (&bnx2_pm_ops)
8659 
8660 #else
8661 
8662 #define BNX2_PM_OPS NULL
8663 
8664 #endif /* CONFIG_PM_SLEEP */
8665 /**
8666  * bnx2_io_error_detected - called when PCI error is detected
8667  * @pdev: Pointer to PCI device
8668  * @state: The current pci connection state
8669  *
8670  * This function is called after a PCI bus error affecting
8671  * this device has been detected.
8672  */
bnx2_io_error_detected(struct pci_dev * pdev,pci_channel_state_t state)8673 static pci_ers_result_t bnx2_io_error_detected(struct pci_dev *pdev,
8674 					       pci_channel_state_t state)
8675 {
8676 	struct net_device *dev = pci_get_drvdata(pdev);
8677 	struct bnx2 *bp = netdev_priv(dev);
8678 
8679 	rtnl_lock();
8680 	netif_device_detach(dev);
8681 
8682 	if (state == pci_channel_io_perm_failure) {
8683 		rtnl_unlock();
8684 		return PCI_ERS_RESULT_DISCONNECT;
8685 	}
8686 
8687 	if (netif_running(dev)) {
8688 		bnx2_netif_stop(bp, true);
8689 		del_timer_sync(&bp->timer);
8690 		bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
8691 	}
8692 
8693 	pci_disable_device(pdev);
8694 	rtnl_unlock();
8695 
8696 	/* Request a slot slot reset. */
8697 	return PCI_ERS_RESULT_NEED_RESET;
8698 }
8699 
8700 /**
8701  * bnx2_io_slot_reset - called after the pci bus has been reset.
8702  * @pdev: Pointer to PCI device
8703  *
8704  * Restart the card from scratch, as if from a cold-boot.
8705  */
bnx2_io_slot_reset(struct pci_dev * pdev)8706 static pci_ers_result_t bnx2_io_slot_reset(struct pci_dev *pdev)
8707 {
8708 	struct net_device *dev = pci_get_drvdata(pdev);
8709 	struct bnx2 *bp = netdev_priv(dev);
8710 	pci_ers_result_t result = PCI_ERS_RESULT_DISCONNECT;
8711 	int err = 0;
8712 
8713 	rtnl_lock();
8714 	if (pci_enable_device(pdev)) {
8715 		dev_err(&pdev->dev,
8716 			"Cannot re-enable PCI device after reset\n");
8717 	} else {
8718 		pci_set_master(pdev);
8719 		pci_restore_state(pdev);
8720 		pci_save_state(pdev);
8721 
8722 		if (netif_running(dev))
8723 			err = bnx2_init_nic(bp, 1);
8724 
8725 		if (!err)
8726 			result = PCI_ERS_RESULT_RECOVERED;
8727 	}
8728 
8729 	if (result != PCI_ERS_RESULT_RECOVERED && netif_running(dev)) {
8730 		bnx2_napi_enable(bp);
8731 		dev_close(dev);
8732 	}
8733 	rtnl_unlock();
8734 
8735 	return result;
8736 }
8737 
8738 /**
8739  * bnx2_io_resume - called when traffic can start flowing again.
8740  * @pdev: Pointer to PCI device
8741  *
8742  * This callback is called when the error recovery driver tells us that
8743  * its OK to resume normal operation.
8744  */
bnx2_io_resume(struct pci_dev * pdev)8745 static void bnx2_io_resume(struct pci_dev *pdev)
8746 {
8747 	struct net_device *dev = pci_get_drvdata(pdev);
8748 	struct bnx2 *bp = netdev_priv(dev);
8749 
8750 	rtnl_lock();
8751 	if (netif_running(dev))
8752 		bnx2_netif_start(bp, true);
8753 
8754 	netif_device_attach(dev);
8755 	rtnl_unlock();
8756 }
8757 
bnx2_shutdown(struct pci_dev * pdev)8758 static void bnx2_shutdown(struct pci_dev *pdev)
8759 {
8760 	struct net_device *dev = pci_get_drvdata(pdev);
8761 	struct bnx2 *bp;
8762 
8763 	if (!dev)
8764 		return;
8765 
8766 	bp = netdev_priv(dev);
8767 	if (!bp)
8768 		return;
8769 
8770 	rtnl_lock();
8771 	if (netif_running(dev))
8772 		dev_close(bp->dev);
8773 
8774 	if (system_state == SYSTEM_POWER_OFF)
8775 		bnx2_set_power_state(bp, PCI_D3hot);
8776 
8777 	rtnl_unlock();
8778 }
8779 
8780 static const struct pci_error_handlers bnx2_err_handler = {
8781 	.error_detected	= bnx2_io_error_detected,
8782 	.slot_reset	= bnx2_io_slot_reset,
8783 	.resume		= bnx2_io_resume,
8784 };
8785 
8786 static struct pci_driver bnx2_pci_driver = {
8787 	.name		= DRV_MODULE_NAME,
8788 	.id_table	= bnx2_pci_tbl,
8789 	.probe		= bnx2_init_one,
8790 	.remove		= bnx2_remove_one,
8791 	.driver.pm	= BNX2_PM_OPS,
8792 	.err_handler	= &bnx2_err_handler,
8793 	.shutdown	= bnx2_shutdown,
8794 };
8795 
8796 module_pci_driver(bnx2_pci_driver);
8797