1 /*
2  * tg3.c: Broadcom Tigon3 ethernet driver.
3  *
4  * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5  * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6  * Copyright (C) 2004 Sun Microsystems Inc.
7  * Copyright (C) 2005-2012 Broadcom Corporation.
8  *
9  * Firmware is:
10  *	Derived from proprietary unpublished source code,
11  *	Copyright (C) 2000-2003 Broadcom Corporation.
12  *
13  *	Permission is hereby granted for the distribution of this firmware
14  *	data in hexadecimal or equivalent format, provided this copyright
15  *	notice is accompanying it.
16  */
17 
18 
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/stringify.h>
22 #include <linux/kernel.h>
23 #include <linux/types.h>
24 #include <linux/compiler.h>
25 #include <linux/slab.h>
26 #include <linux/delay.h>
27 #include <linux/in.h>
28 #include <linux/init.h>
29 #include <linux/interrupt.h>
30 #include <linux/ioport.h>
31 #include <linux/pci.h>
32 #include <linux/netdevice.h>
33 #include <linux/etherdevice.h>
34 #include <linux/skbuff.h>
35 #include <linux/ethtool.h>
36 #include <linux/mdio.h>
37 #include <linux/mii.h>
38 #include <linux/phy.h>
39 #include <linux/brcmphy.h>
40 #include <linux/if_vlan.h>
41 #include <linux/ip.h>
42 #include <linux/tcp.h>
43 #include <linux/workqueue.h>
44 #include <linux/prefetch.h>
45 #include <linux/dma-mapping.h>
46 #include <linux/firmware.h>
47 
48 #include <net/checksum.h>
49 #include <net/ip.h>
50 
51 #include <linux/io.h>
52 #include <asm/byteorder.h>
53 #include <linux/uaccess.h>
54 
55 #ifdef CONFIG_SPARC
56 #include <asm/idprom.h>
57 #include <asm/prom.h>
58 #endif
59 
60 #define BAR_0	0
61 #define BAR_2	2
62 
63 #include "tg3.h"
64 
65 /* Functions & macros to verify TG3_FLAGS types */
66 
67 static inline int _tg3_flag(enum TG3_FLAGS flag, unsigned long *bits)
68 {
69 	return test_bit(flag, bits);
70 }
71 
72 static inline void _tg3_flag_set(enum TG3_FLAGS flag, unsigned long *bits)
73 {
74 	set_bit(flag, bits);
75 }
76 
77 static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
78 {
79 	clear_bit(flag, bits);
80 }
81 
82 #define tg3_flag(tp, flag)				\
83 	_tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags)
84 #define tg3_flag_set(tp, flag)				\
85 	_tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags)
86 #define tg3_flag_clear(tp, flag)			\
87 	_tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags)
88 
89 #define DRV_MODULE_NAME		"tg3"
90 #define TG3_MAJ_NUM			3
91 #define TG3_MIN_NUM			123
92 #define DRV_MODULE_VERSION	\
93 	__stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
94 #define DRV_MODULE_RELDATE	"March 21, 2012"
95 
96 #define RESET_KIND_SHUTDOWN	0
97 #define RESET_KIND_INIT		1
98 #define RESET_KIND_SUSPEND	2
99 
100 #define TG3_DEF_RX_MODE		0
101 #define TG3_DEF_TX_MODE		0
102 #define TG3_DEF_MSG_ENABLE	  \
103 	(NETIF_MSG_DRV		| \
104 	 NETIF_MSG_PROBE	| \
105 	 NETIF_MSG_LINK		| \
106 	 NETIF_MSG_TIMER	| \
107 	 NETIF_MSG_IFDOWN	| \
108 	 NETIF_MSG_IFUP		| \
109 	 NETIF_MSG_RX_ERR	| \
110 	 NETIF_MSG_TX_ERR)
111 
112 #define TG3_GRC_LCLCTL_PWRSW_DELAY	100
113 
114 /* length of time before we decide the hardware is borked,
115  * and dev->tx_timeout() should be called to fix the problem
116  */
117 
118 #define TG3_TX_TIMEOUT			(5 * HZ)
119 
120 /* hardware minimum and maximum for a single frame's data payload */
121 #define TG3_MIN_MTU			60
122 #define TG3_MAX_MTU(tp)	\
123 	(tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500)
124 
125 /* These numbers seem to be hard coded in the NIC firmware somehow.
126  * You can't change the ring sizes, but you can change where you place
127  * them in the NIC onboard memory.
128  */
129 #define TG3_RX_STD_RING_SIZE(tp) \
130 	(tg3_flag(tp, LRG_PROD_RING_CAP) ? \
131 	 TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700)
132 #define TG3_DEF_RX_RING_PENDING		200
133 #define TG3_RX_JMB_RING_SIZE(tp) \
134 	(tg3_flag(tp, LRG_PROD_RING_CAP) ? \
135 	 TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
136 #define TG3_DEF_RX_JUMBO_RING_PENDING	100
137 
138 /* Do not place this n-ring entries value into the tp struct itself,
139  * we really want to expose these constants to GCC so that modulo et
140  * al.  operations are done with shifts and masks instead of with
141  * hw multiply/modulo instructions.  Another solution would be to
142  * replace things like '% foo' with '& (foo - 1)'.
143  */
144 
145 #define TG3_TX_RING_SIZE		512
146 #define TG3_DEF_TX_RING_PENDING		(TG3_TX_RING_SIZE - 1)
147 
148 #define TG3_RX_STD_RING_BYTES(tp) \
149 	(sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
150 #define TG3_RX_JMB_RING_BYTES(tp) \
151 	(sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
152 #define TG3_RX_RCB_RING_BYTES(tp) \
153 	(sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1))
154 #define TG3_TX_RING_BYTES	(sizeof(struct tg3_tx_buffer_desc) * \
155 				 TG3_TX_RING_SIZE)
156 #define NEXT_TX(N)		(((N) + 1) & (TG3_TX_RING_SIZE - 1))
157 
158 #define TG3_DMA_BYTE_ENAB		64
159 
160 #define TG3_RX_STD_DMA_SZ		1536
161 #define TG3_RX_JMB_DMA_SZ		9046
162 
163 #define TG3_RX_DMA_TO_MAP_SZ(x)		((x) + TG3_DMA_BYTE_ENAB)
164 
165 #define TG3_RX_STD_MAP_SZ		TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
166 #define TG3_RX_JMB_MAP_SZ		TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
167 
168 #define TG3_RX_STD_BUFF_RING_SIZE(tp) \
169 	(sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
170 
171 #define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
172 	(sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
173 
174 /* Due to a hardware bug, the 5701 can only DMA to memory addresses
175  * that are at least dword aligned when used in PCIX mode.  The driver
176  * works around this bug by double copying the packet.  This workaround
177  * is built into the normal double copy length check for efficiency.
178  *
179  * However, the double copy is only necessary on those architectures
180  * where unaligned memory accesses are inefficient.  For those architectures
181  * where unaligned memory accesses incur little penalty, we can reintegrate
182  * the 5701 in the normal rx path.  Doing so saves a device structure
183  * dereference by hardcoding the double copy threshold in place.
184  */
185 #define TG3_RX_COPY_THRESHOLD		256
186 #if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
187 	#define TG3_RX_COPY_THRESH(tp)	TG3_RX_COPY_THRESHOLD
188 #else
189 	#define TG3_RX_COPY_THRESH(tp)	((tp)->rx_copy_thresh)
190 #endif
191 
192 #if (NET_IP_ALIGN != 0)
193 #define TG3_RX_OFFSET(tp)	((tp)->rx_offset)
194 #else
195 #define TG3_RX_OFFSET(tp)	(NET_SKB_PAD)
196 #endif
197 
198 /* minimum number of free TX descriptors required to wake up TX process */
199 #define TG3_TX_WAKEUP_THRESH(tnapi)		((tnapi)->tx_pending / 4)
200 #define TG3_TX_BD_DMA_MAX_2K		2048
201 #define TG3_TX_BD_DMA_MAX_4K		4096
202 
203 #define TG3_RAW_IP_ALIGN 2
204 
205 #define TG3_FW_UPDATE_TIMEOUT_SEC	5
206 #define TG3_FW_UPDATE_FREQ_SEC		(TG3_FW_UPDATE_TIMEOUT_SEC / 2)
207 
208 #define FIRMWARE_TG3		"tigon/tg3.bin"
209 #define FIRMWARE_TG3TSO		"tigon/tg3_tso.bin"
210 #define FIRMWARE_TG3TSO5	"tigon/tg3_tso5.bin"
211 
212 static char version[] __devinitdata =
213 	DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")";
214 
215 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
216 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
217 MODULE_LICENSE("GPL");
218 MODULE_VERSION(DRV_MODULE_VERSION);
219 MODULE_FIRMWARE(FIRMWARE_TG3);
220 MODULE_FIRMWARE(FIRMWARE_TG3TSO);
221 MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
222 
223 static int tg3_debug = -1;	/* -1 == use TG3_DEF_MSG_ENABLE as value */
224 module_param(tg3_debug, int, 0);
225 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
226 
227 static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl) = {
228 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
229 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
230 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
231 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
232 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
233 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
234 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
235 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
236 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
237 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
238 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
239 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
240 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
241 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
242 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
243 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
244 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
245 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
246 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901)},
247 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2)},
248 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
249 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F)},
250 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
251 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
252 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
253 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
254 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F)},
255 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
256 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
257 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
258 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
259 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F)},
260 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
261 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
262 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
263 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
264 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
265 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
266 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
267 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
268 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F)},
269 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
270 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
271 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
272 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
273 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
274 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
275 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
276 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
277 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
278 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
279 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
280 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
281 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
282 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
283 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
284 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
285 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)},
286 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)},
287 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
288 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
289 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790)},
290 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
291 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
292 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
293 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)},
294 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)},
295 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)},
296 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)},
297 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791)},
298 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795)},
299 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)},
300 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)},
301 	{PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
302 	{PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
303 	{PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
304 	{PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
305 	{PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
306 	{PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
307 	{PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
308 	{PCI_DEVICE(0x10cf, 0x11a2)}, /* Fujitsu 1000base-SX with BCM5703SKHB */
309 	{}
310 };
311 
312 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
313 
314 static const struct {
315 	const char string[ETH_GSTRING_LEN];
316 } ethtool_stats_keys[] = {
317 	{ "rx_octets" },
318 	{ "rx_fragments" },
319 	{ "rx_ucast_packets" },
320 	{ "rx_mcast_packets" },
321 	{ "rx_bcast_packets" },
322 	{ "rx_fcs_errors" },
323 	{ "rx_align_errors" },
324 	{ "rx_xon_pause_rcvd" },
325 	{ "rx_xoff_pause_rcvd" },
326 	{ "rx_mac_ctrl_rcvd" },
327 	{ "rx_xoff_entered" },
328 	{ "rx_frame_too_long_errors" },
329 	{ "rx_jabbers" },
330 	{ "rx_undersize_packets" },
331 	{ "rx_in_length_errors" },
332 	{ "rx_out_length_errors" },
333 	{ "rx_64_or_less_octet_packets" },
334 	{ "rx_65_to_127_octet_packets" },
335 	{ "rx_128_to_255_octet_packets" },
336 	{ "rx_256_to_511_octet_packets" },
337 	{ "rx_512_to_1023_octet_packets" },
338 	{ "rx_1024_to_1522_octet_packets" },
339 	{ "rx_1523_to_2047_octet_packets" },
340 	{ "rx_2048_to_4095_octet_packets" },
341 	{ "rx_4096_to_8191_octet_packets" },
342 	{ "rx_8192_to_9022_octet_packets" },
343 
344 	{ "tx_octets" },
345 	{ "tx_collisions" },
346 
347 	{ "tx_xon_sent" },
348 	{ "tx_xoff_sent" },
349 	{ "tx_flow_control" },
350 	{ "tx_mac_errors" },
351 	{ "tx_single_collisions" },
352 	{ "tx_mult_collisions" },
353 	{ "tx_deferred" },
354 	{ "tx_excessive_collisions" },
355 	{ "tx_late_collisions" },
356 	{ "tx_collide_2times" },
357 	{ "tx_collide_3times" },
358 	{ "tx_collide_4times" },
359 	{ "tx_collide_5times" },
360 	{ "tx_collide_6times" },
361 	{ "tx_collide_7times" },
362 	{ "tx_collide_8times" },
363 	{ "tx_collide_9times" },
364 	{ "tx_collide_10times" },
365 	{ "tx_collide_11times" },
366 	{ "tx_collide_12times" },
367 	{ "tx_collide_13times" },
368 	{ "tx_collide_14times" },
369 	{ "tx_collide_15times" },
370 	{ "tx_ucast_packets" },
371 	{ "tx_mcast_packets" },
372 	{ "tx_bcast_packets" },
373 	{ "tx_carrier_sense_errors" },
374 	{ "tx_discards" },
375 	{ "tx_errors" },
376 
377 	{ "dma_writeq_full" },
378 	{ "dma_write_prioq_full" },
379 	{ "rxbds_empty" },
380 	{ "rx_discards" },
381 	{ "rx_errors" },
382 	{ "rx_threshold_hit" },
383 
384 	{ "dma_readq_full" },
385 	{ "dma_read_prioq_full" },
386 	{ "tx_comp_queue_full" },
387 
388 	{ "ring_set_send_prod_index" },
389 	{ "ring_status_update" },
390 	{ "nic_irqs" },
391 	{ "nic_avoided_irqs" },
392 	{ "nic_tx_threshold_hit" },
393 
394 	{ "mbuf_lwm_thresh_hit" },
395 };
396 
397 #define TG3_NUM_STATS	ARRAY_SIZE(ethtool_stats_keys)
398 
399 
400 static const struct {
401 	const char string[ETH_GSTRING_LEN];
402 } ethtool_test_keys[] = {
403 	{ "nvram test        (online) " },
404 	{ "link test         (online) " },
405 	{ "register test     (offline)" },
406 	{ "memory test       (offline)" },
407 	{ "mac loopback test (offline)" },
408 	{ "phy loopback test (offline)" },
409 	{ "ext loopback test (offline)" },
410 	{ "interrupt test    (offline)" },
411 };
412 
413 #define TG3_NUM_TEST	ARRAY_SIZE(ethtool_test_keys)
414 
415 
416 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
417 {
418 	writel(val, tp->regs + off);
419 }
420 
421 static u32 tg3_read32(struct tg3 *tp, u32 off)
422 {
423 	return readl(tp->regs + off);
424 }
425 
426 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
427 {
428 	writel(val, tp->aperegs + off);
429 }
430 
431 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
432 {
433 	return readl(tp->aperegs + off);
434 }
435 
436 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
437 {
438 	unsigned long flags;
439 
440 	spin_lock_irqsave(&tp->indirect_lock, flags);
441 	pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
442 	pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
443 	spin_unlock_irqrestore(&tp->indirect_lock, flags);
444 }
445 
446 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
447 {
448 	writel(val, tp->regs + off);
449 	readl(tp->regs + off);
450 }
451 
452 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
453 {
454 	unsigned long flags;
455 	u32 val;
456 
457 	spin_lock_irqsave(&tp->indirect_lock, flags);
458 	pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
459 	pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
460 	spin_unlock_irqrestore(&tp->indirect_lock, flags);
461 	return val;
462 }
463 
464 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
465 {
466 	unsigned long flags;
467 
468 	if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
469 		pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
470 				       TG3_64BIT_REG_LOW, val);
471 		return;
472 	}
473 	if (off == TG3_RX_STD_PROD_IDX_REG) {
474 		pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
475 				       TG3_64BIT_REG_LOW, val);
476 		return;
477 	}
478 
479 	spin_lock_irqsave(&tp->indirect_lock, flags);
480 	pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
481 	pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
482 	spin_unlock_irqrestore(&tp->indirect_lock, flags);
483 
484 	/* In indirect mode when disabling interrupts, we also need
485 	 * to clear the interrupt bit in the GRC local ctrl register.
486 	 */
487 	if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
488 	    (val == 0x1)) {
489 		pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
490 				       tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
491 	}
492 }
493 
494 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
495 {
496 	unsigned long flags;
497 	u32 val;
498 
499 	spin_lock_irqsave(&tp->indirect_lock, flags);
500 	pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
501 	pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
502 	spin_unlock_irqrestore(&tp->indirect_lock, flags);
503 	return val;
504 }
505 
506 /* usec_wait specifies the wait time in usec when writing to certain registers
507  * where it is unsafe to read back the register without some delay.
508  * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
509  * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
510  */
511 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
512 {
513 	if (tg3_flag(tp, PCIX_TARGET_HWBUG) || tg3_flag(tp, ICH_WORKAROUND))
514 		/* Non-posted methods */
515 		tp->write32(tp, off, val);
516 	else {
517 		/* Posted method */
518 		tg3_write32(tp, off, val);
519 		if (usec_wait)
520 			udelay(usec_wait);
521 		tp->read32(tp, off);
522 	}
523 	/* Wait again after the read for the posted method to guarantee that
524 	 * the wait time is met.
525 	 */
526 	if (usec_wait)
527 		udelay(usec_wait);
528 }
529 
530 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
531 {
532 	tp->write32_mbox(tp, off, val);
533 	if (!tg3_flag(tp, MBOX_WRITE_REORDER) && !tg3_flag(tp, ICH_WORKAROUND))
534 		tp->read32_mbox(tp, off);
535 }
536 
537 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
538 {
539 	void __iomem *mbox = tp->regs + off;
540 	writel(val, mbox);
541 	if (tg3_flag(tp, TXD_MBOX_HWBUG))
542 		writel(val, mbox);
543 	if (tg3_flag(tp, MBOX_WRITE_REORDER))
544 		readl(mbox);
545 }
546 
547 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
548 {
549 	return readl(tp->regs + off + GRCMBOX_BASE);
550 }
551 
552 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
553 {
554 	writel(val, tp->regs + off + GRCMBOX_BASE);
555 }
556 
557 #define tw32_mailbox(reg, val)		tp->write32_mbox(tp, reg, val)
558 #define tw32_mailbox_f(reg, val)	tw32_mailbox_flush(tp, (reg), (val))
559 #define tw32_rx_mbox(reg, val)		tp->write32_rx_mbox(tp, reg, val)
560 #define tw32_tx_mbox(reg, val)		tp->write32_tx_mbox(tp, reg, val)
561 #define tr32_mailbox(reg)		tp->read32_mbox(tp, reg)
562 
563 #define tw32(reg, val)			tp->write32(tp, reg, val)
564 #define tw32_f(reg, val)		_tw32_flush(tp, (reg), (val), 0)
565 #define tw32_wait_f(reg, val, us)	_tw32_flush(tp, (reg), (val), (us))
566 #define tr32(reg)			tp->read32(tp, reg)
567 
568 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
569 {
570 	unsigned long flags;
571 
572 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
573 	    (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
574 		return;
575 
576 	spin_lock_irqsave(&tp->indirect_lock, flags);
577 	if (tg3_flag(tp, SRAM_USE_CONFIG)) {
578 		pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
579 		pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
580 
581 		/* Always leave this as zero. */
582 		pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
583 	} else {
584 		tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
585 		tw32_f(TG3PCI_MEM_WIN_DATA, val);
586 
587 		/* Always leave this as zero. */
588 		tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
589 	}
590 	spin_unlock_irqrestore(&tp->indirect_lock, flags);
591 }
592 
593 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
594 {
595 	unsigned long flags;
596 
597 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
598 	    (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
599 		*val = 0;
600 		return;
601 	}
602 
603 	spin_lock_irqsave(&tp->indirect_lock, flags);
604 	if (tg3_flag(tp, SRAM_USE_CONFIG)) {
605 		pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
606 		pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
607 
608 		/* Always leave this as zero. */
609 		pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
610 	} else {
611 		tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
612 		*val = tr32(TG3PCI_MEM_WIN_DATA);
613 
614 		/* Always leave this as zero. */
615 		tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
616 	}
617 	spin_unlock_irqrestore(&tp->indirect_lock, flags);
618 }
619 
620 static void tg3_ape_lock_init(struct tg3 *tp)
621 {
622 	int i;
623 	u32 regbase, bit;
624 
625 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
626 		regbase = TG3_APE_LOCK_GRANT;
627 	else
628 		regbase = TG3_APE_PER_LOCK_GRANT;
629 
630 	/* Make sure the driver hasn't any stale locks. */
631 	for (i = TG3_APE_LOCK_PHY0; i <= TG3_APE_LOCK_GPIO; i++) {
632 		switch (i) {
633 		case TG3_APE_LOCK_PHY0:
634 		case TG3_APE_LOCK_PHY1:
635 		case TG3_APE_LOCK_PHY2:
636 		case TG3_APE_LOCK_PHY3:
637 			bit = APE_LOCK_GRANT_DRIVER;
638 			break;
639 		default:
640 			if (!tp->pci_fn)
641 				bit = APE_LOCK_GRANT_DRIVER;
642 			else
643 				bit = 1 << tp->pci_fn;
644 		}
645 		tg3_ape_write32(tp, regbase + 4 * i, bit);
646 	}
647 
648 }
649 
650 static int tg3_ape_lock(struct tg3 *tp, int locknum)
651 {
652 	int i, off;
653 	int ret = 0;
654 	u32 status, req, gnt, bit;
655 
656 	if (!tg3_flag(tp, ENABLE_APE))
657 		return 0;
658 
659 	switch (locknum) {
660 	case TG3_APE_LOCK_GPIO:
661 		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
662 			return 0;
663 	case TG3_APE_LOCK_GRC:
664 	case TG3_APE_LOCK_MEM:
665 		if (!tp->pci_fn)
666 			bit = APE_LOCK_REQ_DRIVER;
667 		else
668 			bit = 1 << tp->pci_fn;
669 		break;
670 	default:
671 		return -EINVAL;
672 	}
673 
674 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
675 		req = TG3_APE_LOCK_REQ;
676 		gnt = TG3_APE_LOCK_GRANT;
677 	} else {
678 		req = TG3_APE_PER_LOCK_REQ;
679 		gnt = TG3_APE_PER_LOCK_GRANT;
680 	}
681 
682 	off = 4 * locknum;
683 
684 	tg3_ape_write32(tp, req + off, bit);
685 
686 	/* Wait for up to 1 millisecond to acquire lock. */
687 	for (i = 0; i < 100; i++) {
688 		status = tg3_ape_read32(tp, gnt + off);
689 		if (status == bit)
690 			break;
691 		udelay(10);
692 	}
693 
694 	if (status != bit) {
695 		/* Revoke the lock request. */
696 		tg3_ape_write32(tp, gnt + off, bit);
697 		ret = -EBUSY;
698 	}
699 
700 	return ret;
701 }
702 
703 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
704 {
705 	u32 gnt, bit;
706 
707 	if (!tg3_flag(tp, ENABLE_APE))
708 		return;
709 
710 	switch (locknum) {
711 	case TG3_APE_LOCK_GPIO:
712 		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
713 			return;
714 	case TG3_APE_LOCK_GRC:
715 	case TG3_APE_LOCK_MEM:
716 		if (!tp->pci_fn)
717 			bit = APE_LOCK_GRANT_DRIVER;
718 		else
719 			bit = 1 << tp->pci_fn;
720 		break;
721 	default:
722 		return;
723 	}
724 
725 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
726 		gnt = TG3_APE_LOCK_GRANT;
727 	else
728 		gnt = TG3_APE_PER_LOCK_GRANT;
729 
730 	tg3_ape_write32(tp, gnt + 4 * locknum, bit);
731 }
732 
733 static void tg3_ape_send_event(struct tg3 *tp, u32 event)
734 {
735 	int i;
736 	u32 apedata;
737 
738 	/* NCSI does not support APE events */
739 	if (tg3_flag(tp, APE_HAS_NCSI))
740 		return;
741 
742 	apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
743 	if (apedata != APE_SEG_SIG_MAGIC)
744 		return;
745 
746 	apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
747 	if (!(apedata & APE_FW_STATUS_READY))
748 		return;
749 
750 	/* Wait for up to 1 millisecond for APE to service previous event. */
751 	for (i = 0; i < 10; i++) {
752 		if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
753 			return;
754 
755 		apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
756 
757 		if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
758 			tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
759 					event | APE_EVENT_STATUS_EVENT_PENDING);
760 
761 		tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
762 
763 		if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
764 			break;
765 
766 		udelay(100);
767 	}
768 
769 	if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
770 		tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
771 }
772 
773 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
774 {
775 	u32 event;
776 	u32 apedata;
777 
778 	if (!tg3_flag(tp, ENABLE_APE))
779 		return;
780 
781 	switch (kind) {
782 	case RESET_KIND_INIT:
783 		tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
784 				APE_HOST_SEG_SIG_MAGIC);
785 		tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
786 				APE_HOST_SEG_LEN_MAGIC);
787 		apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
788 		tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
789 		tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
790 			APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, TG3_MIN_NUM));
791 		tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
792 				APE_HOST_BEHAV_NO_PHYLOCK);
793 		tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE,
794 				    TG3_APE_HOST_DRVR_STATE_START);
795 
796 		event = APE_EVENT_STATUS_STATE_START;
797 		break;
798 	case RESET_KIND_SHUTDOWN:
799 		/* With the interface we are currently using,
800 		 * APE does not track driver state.  Wiping
801 		 * out the HOST SEGMENT SIGNATURE forces
802 		 * the APE to assume OS absent status.
803 		 */
804 		tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0);
805 
806 		if (device_may_wakeup(&tp->pdev->dev) &&
807 		    tg3_flag(tp, WOL_ENABLE)) {
808 			tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED,
809 					    TG3_APE_HOST_WOL_SPEED_AUTO);
810 			apedata = TG3_APE_HOST_DRVR_STATE_WOL;
811 		} else
812 			apedata = TG3_APE_HOST_DRVR_STATE_UNLOAD;
813 
814 		tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, apedata);
815 
816 		event = APE_EVENT_STATUS_STATE_UNLOAD;
817 		break;
818 	case RESET_KIND_SUSPEND:
819 		event = APE_EVENT_STATUS_STATE_SUSPEND;
820 		break;
821 	default:
822 		return;
823 	}
824 
825 	event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
826 
827 	tg3_ape_send_event(tp, event);
828 }
829 
830 static void tg3_disable_ints(struct tg3 *tp)
831 {
832 	int i;
833 
834 	tw32(TG3PCI_MISC_HOST_CTRL,
835 	     (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
836 	for (i = 0; i < tp->irq_max; i++)
837 		tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001);
838 }
839 
840 static void tg3_enable_ints(struct tg3 *tp)
841 {
842 	int i;
843 
844 	tp->irq_sync = 0;
845 	wmb();
846 
847 	tw32(TG3PCI_MISC_HOST_CTRL,
848 	     (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
849 
850 	tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE;
851 	for (i = 0; i < tp->irq_cnt; i++) {
852 		struct tg3_napi *tnapi = &tp->napi[i];
853 
854 		tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
855 		if (tg3_flag(tp, 1SHOT_MSI))
856 			tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
857 
858 		tp->coal_now |= tnapi->coal_now;
859 	}
860 
861 	/* Force an initial interrupt */
862 	if (!tg3_flag(tp, TAGGED_STATUS) &&
863 	    (tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
864 		tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
865 	else
866 		tw32(HOSTCC_MODE, tp->coal_now);
867 
868 	tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now);
869 }
870 
871 static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
872 {
873 	struct tg3 *tp = tnapi->tp;
874 	struct tg3_hw_status *sblk = tnapi->hw_status;
875 	unsigned int work_exists = 0;
876 
877 	/* check for phy events */
878 	if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
879 		if (sblk->status & SD_STATUS_LINK_CHG)
880 			work_exists = 1;
881 	}
882 	/* check for RX/TX work to do */
883 	if (sblk->idx[0].tx_consumer != tnapi->tx_cons ||
884 	    *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
885 		work_exists = 1;
886 
887 	return work_exists;
888 }
889 
890 /* tg3_int_reenable
891  *  similar to tg3_enable_ints, but it accurately determines whether there
892  *  is new work pending and can return without flushing the PIO write
893  *  which reenables interrupts
894  */
895 static void tg3_int_reenable(struct tg3_napi *tnapi)
896 {
897 	struct tg3 *tp = tnapi->tp;
898 
899 	tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
900 	mmiowb();
901 
902 	/* When doing tagged status, this work check is unnecessary.
903 	 * The last_tag we write above tells the chip which piece of
904 	 * work we've completed.
905 	 */
906 	if (!tg3_flag(tp, TAGGED_STATUS) && tg3_has_work(tnapi))
907 		tw32(HOSTCC_MODE, tp->coalesce_mode |
908 		     HOSTCC_MODE_ENABLE | tnapi->coal_now);
909 }
910 
911 static void tg3_switch_clocks(struct tg3 *tp)
912 {
913 	u32 clock_ctrl;
914 	u32 orig_clock_ctrl;
915 
916 	if (tg3_flag(tp, CPMU_PRESENT) || tg3_flag(tp, 5780_CLASS))
917 		return;
918 
919 	clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
920 
921 	orig_clock_ctrl = clock_ctrl;
922 	clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
923 		       CLOCK_CTRL_CLKRUN_OENABLE |
924 		       0x1f);
925 	tp->pci_clock_ctrl = clock_ctrl;
926 
927 	if (tg3_flag(tp, 5705_PLUS)) {
928 		if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
929 			tw32_wait_f(TG3PCI_CLOCK_CTRL,
930 				    clock_ctrl | CLOCK_CTRL_625_CORE, 40);
931 		}
932 	} else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
933 		tw32_wait_f(TG3PCI_CLOCK_CTRL,
934 			    clock_ctrl |
935 			    (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
936 			    40);
937 		tw32_wait_f(TG3PCI_CLOCK_CTRL,
938 			    clock_ctrl | (CLOCK_CTRL_ALTCLK),
939 			    40);
940 	}
941 	tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
942 }
943 
944 #define PHY_BUSY_LOOPS	5000
945 
946 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
947 {
948 	u32 frame_val;
949 	unsigned int loops;
950 	int ret;
951 
952 	if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
953 		tw32_f(MAC_MI_MODE,
954 		     (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
955 		udelay(80);
956 	}
957 
958 	*val = 0x0;
959 
960 	frame_val  = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
961 		      MI_COM_PHY_ADDR_MASK);
962 	frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
963 		      MI_COM_REG_ADDR_MASK);
964 	frame_val |= (MI_COM_CMD_READ | MI_COM_START);
965 
966 	tw32_f(MAC_MI_COM, frame_val);
967 
968 	loops = PHY_BUSY_LOOPS;
969 	while (loops != 0) {
970 		udelay(10);
971 		frame_val = tr32(MAC_MI_COM);
972 
973 		if ((frame_val & MI_COM_BUSY) == 0) {
974 			udelay(5);
975 			frame_val = tr32(MAC_MI_COM);
976 			break;
977 		}
978 		loops -= 1;
979 	}
980 
981 	ret = -EBUSY;
982 	if (loops != 0) {
983 		*val = frame_val & MI_COM_DATA_MASK;
984 		ret = 0;
985 	}
986 
987 	if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
988 		tw32_f(MAC_MI_MODE, tp->mi_mode);
989 		udelay(80);
990 	}
991 
992 	return ret;
993 }
994 
995 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
996 {
997 	u32 frame_val;
998 	unsigned int loops;
999 	int ret;
1000 
1001 	if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
1002 	    (reg == MII_CTRL1000 || reg == MII_TG3_AUX_CTRL))
1003 		return 0;
1004 
1005 	if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1006 		tw32_f(MAC_MI_MODE,
1007 		     (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1008 		udelay(80);
1009 	}
1010 
1011 	frame_val  = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1012 		      MI_COM_PHY_ADDR_MASK);
1013 	frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1014 		      MI_COM_REG_ADDR_MASK);
1015 	frame_val |= (val & MI_COM_DATA_MASK);
1016 	frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
1017 
1018 	tw32_f(MAC_MI_COM, frame_val);
1019 
1020 	loops = PHY_BUSY_LOOPS;
1021 	while (loops != 0) {
1022 		udelay(10);
1023 		frame_val = tr32(MAC_MI_COM);
1024 		if ((frame_val & MI_COM_BUSY) == 0) {
1025 			udelay(5);
1026 			frame_val = tr32(MAC_MI_COM);
1027 			break;
1028 		}
1029 		loops -= 1;
1030 	}
1031 
1032 	ret = -EBUSY;
1033 	if (loops != 0)
1034 		ret = 0;
1035 
1036 	if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1037 		tw32_f(MAC_MI_MODE, tp->mi_mode);
1038 		udelay(80);
1039 	}
1040 
1041 	return ret;
1042 }
1043 
1044 static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val)
1045 {
1046 	int err;
1047 
1048 	err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1049 	if (err)
1050 		goto done;
1051 
1052 	err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1053 	if (err)
1054 		goto done;
1055 
1056 	err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1057 			   MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1058 	if (err)
1059 		goto done;
1060 
1061 	err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, val);
1062 
1063 done:
1064 	return err;
1065 }
1066 
1067 static int tg3_phy_cl45_read(struct tg3 *tp, u32 devad, u32 addr, u32 *val)
1068 {
1069 	int err;
1070 
1071 	err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1072 	if (err)
1073 		goto done;
1074 
1075 	err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1076 	if (err)
1077 		goto done;
1078 
1079 	err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1080 			   MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1081 	if (err)
1082 		goto done;
1083 
1084 	err = tg3_readphy(tp, MII_TG3_MMD_ADDRESS, val);
1085 
1086 done:
1087 	return err;
1088 }
1089 
1090 static int tg3_phydsp_read(struct tg3 *tp, u32 reg, u32 *val)
1091 {
1092 	int err;
1093 
1094 	err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1095 	if (!err)
1096 		err = tg3_readphy(tp, MII_TG3_DSP_RW_PORT, val);
1097 
1098 	return err;
1099 }
1100 
1101 static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
1102 {
1103 	int err;
1104 
1105 	err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1106 	if (!err)
1107 		err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
1108 
1109 	return err;
1110 }
1111 
1112 static int tg3_phy_auxctl_read(struct tg3 *tp, int reg, u32 *val)
1113 {
1114 	int err;
1115 
1116 	err = tg3_writephy(tp, MII_TG3_AUX_CTRL,
1117 			   (reg << MII_TG3_AUXCTL_MISC_RDSEL_SHIFT) |
1118 			   MII_TG3_AUXCTL_SHDWSEL_MISC);
1119 	if (!err)
1120 		err = tg3_readphy(tp, MII_TG3_AUX_CTRL, val);
1121 
1122 	return err;
1123 }
1124 
1125 static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set)
1126 {
1127 	if (reg == MII_TG3_AUXCTL_SHDWSEL_MISC)
1128 		set |= MII_TG3_AUXCTL_MISC_WREN;
1129 
1130 	return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg);
1131 }
1132 
1133 #define TG3_PHY_AUXCTL_SMDSP_ENABLE(tp) \
1134 	tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \
1135 			     MII_TG3_AUXCTL_ACTL_SMDSP_ENA | \
1136 			     MII_TG3_AUXCTL_ACTL_TX_6DB)
1137 
1138 #define TG3_PHY_AUXCTL_SMDSP_DISABLE(tp) \
1139 	tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \
1140 			     MII_TG3_AUXCTL_ACTL_TX_6DB);
1141 
1142 static int tg3_bmcr_reset(struct tg3 *tp)
1143 {
1144 	u32 phy_control;
1145 	int limit, err;
1146 
1147 	/* OK, reset it, and poll the BMCR_RESET bit until it
1148 	 * clears or we time out.
1149 	 */
1150 	phy_control = BMCR_RESET;
1151 	err = tg3_writephy(tp, MII_BMCR, phy_control);
1152 	if (err != 0)
1153 		return -EBUSY;
1154 
1155 	limit = 5000;
1156 	while (limit--) {
1157 		err = tg3_readphy(tp, MII_BMCR, &phy_control);
1158 		if (err != 0)
1159 			return -EBUSY;
1160 
1161 		if ((phy_control & BMCR_RESET) == 0) {
1162 			udelay(40);
1163 			break;
1164 		}
1165 		udelay(10);
1166 	}
1167 	if (limit < 0)
1168 		return -EBUSY;
1169 
1170 	return 0;
1171 }
1172 
1173 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
1174 {
1175 	struct tg3 *tp = bp->priv;
1176 	u32 val;
1177 
1178 	spin_lock_bh(&tp->lock);
1179 
1180 	if (tg3_readphy(tp, reg, &val))
1181 		val = -EIO;
1182 
1183 	spin_unlock_bh(&tp->lock);
1184 
1185 	return val;
1186 }
1187 
1188 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
1189 {
1190 	struct tg3 *tp = bp->priv;
1191 	u32 ret = 0;
1192 
1193 	spin_lock_bh(&tp->lock);
1194 
1195 	if (tg3_writephy(tp, reg, val))
1196 		ret = -EIO;
1197 
1198 	spin_unlock_bh(&tp->lock);
1199 
1200 	return ret;
1201 }
1202 
1203 static int tg3_mdio_reset(struct mii_bus *bp)
1204 {
1205 	return 0;
1206 }
1207 
1208 static void tg3_mdio_config_5785(struct tg3 *tp)
1209 {
1210 	u32 val;
1211 	struct phy_device *phydev;
1212 
1213 	phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1214 	switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1215 	case PHY_ID_BCM50610:
1216 	case PHY_ID_BCM50610M:
1217 		val = MAC_PHYCFG2_50610_LED_MODES;
1218 		break;
1219 	case PHY_ID_BCMAC131:
1220 		val = MAC_PHYCFG2_AC131_LED_MODES;
1221 		break;
1222 	case PHY_ID_RTL8211C:
1223 		val = MAC_PHYCFG2_RTL8211C_LED_MODES;
1224 		break;
1225 	case PHY_ID_RTL8201E:
1226 		val = MAC_PHYCFG2_RTL8201E_LED_MODES;
1227 		break;
1228 	default:
1229 		return;
1230 	}
1231 
1232 	if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
1233 		tw32(MAC_PHYCFG2, val);
1234 
1235 		val = tr32(MAC_PHYCFG1);
1236 		val &= ~(MAC_PHYCFG1_RGMII_INT |
1237 			 MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK);
1238 		val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT;
1239 		tw32(MAC_PHYCFG1, val);
1240 
1241 		return;
1242 	}
1243 
1244 	if (!tg3_flag(tp, RGMII_INBAND_DISABLE))
1245 		val |= MAC_PHYCFG2_EMODE_MASK_MASK |
1246 		       MAC_PHYCFG2_FMODE_MASK_MASK |
1247 		       MAC_PHYCFG2_GMODE_MASK_MASK |
1248 		       MAC_PHYCFG2_ACT_MASK_MASK   |
1249 		       MAC_PHYCFG2_QUAL_MASK_MASK |
1250 		       MAC_PHYCFG2_INBAND_ENABLE;
1251 
1252 	tw32(MAC_PHYCFG2, val);
1253 
1254 	val = tr32(MAC_PHYCFG1);
1255 	val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
1256 		 MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
1257 	if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1258 		if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1259 			val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
1260 		if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1261 			val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
1262 	}
1263 	val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT |
1264 	       MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV;
1265 	tw32(MAC_PHYCFG1, val);
1266 
1267 	val = tr32(MAC_EXT_RGMII_MODE);
1268 	val &= ~(MAC_RGMII_MODE_RX_INT_B |
1269 		 MAC_RGMII_MODE_RX_QUALITY |
1270 		 MAC_RGMII_MODE_RX_ACTIVITY |
1271 		 MAC_RGMII_MODE_RX_ENG_DET |
1272 		 MAC_RGMII_MODE_TX_ENABLE |
1273 		 MAC_RGMII_MODE_TX_LOWPWR |
1274 		 MAC_RGMII_MODE_TX_RESET);
1275 	if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1276 		if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1277 			val |= MAC_RGMII_MODE_RX_INT_B |
1278 			       MAC_RGMII_MODE_RX_QUALITY |
1279 			       MAC_RGMII_MODE_RX_ACTIVITY |
1280 			       MAC_RGMII_MODE_RX_ENG_DET;
1281 		if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1282 			val |= MAC_RGMII_MODE_TX_ENABLE |
1283 			       MAC_RGMII_MODE_TX_LOWPWR |
1284 			       MAC_RGMII_MODE_TX_RESET;
1285 	}
1286 	tw32(MAC_EXT_RGMII_MODE, val);
1287 }
1288 
1289 static void tg3_mdio_start(struct tg3 *tp)
1290 {
1291 	tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
1292 	tw32_f(MAC_MI_MODE, tp->mi_mode);
1293 	udelay(80);
1294 
1295 	if (tg3_flag(tp, MDIOBUS_INITED) &&
1296 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1297 		tg3_mdio_config_5785(tp);
1298 }
1299 
1300 static int tg3_mdio_init(struct tg3 *tp)
1301 {
1302 	int i;
1303 	u32 reg;
1304 	struct phy_device *phydev;
1305 
1306 	if (tg3_flag(tp, 5717_PLUS)) {
1307 		u32 is_serdes;
1308 
1309 		tp->phy_addr = tp->pci_fn + 1;
1310 
1311 		if (tp->pci_chip_rev_id != CHIPREV_ID_5717_A0)
1312 			is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
1313 		else
1314 			is_serdes = tr32(TG3_CPMU_PHY_STRAP) &
1315 				    TG3_CPMU_PHY_STRAP_IS_SERDES;
1316 		if (is_serdes)
1317 			tp->phy_addr += 7;
1318 	} else
1319 		tp->phy_addr = TG3_PHY_MII_ADDR;
1320 
1321 	tg3_mdio_start(tp);
1322 
1323 	if (!tg3_flag(tp, USE_PHYLIB) || tg3_flag(tp, MDIOBUS_INITED))
1324 		return 0;
1325 
1326 	tp->mdio_bus = mdiobus_alloc();
1327 	if (tp->mdio_bus == NULL)
1328 		return -ENOMEM;
1329 
1330 	tp->mdio_bus->name     = "tg3 mdio bus";
1331 	snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
1332 		 (tp->pdev->bus->number << 8) | tp->pdev->devfn);
1333 	tp->mdio_bus->priv     = tp;
1334 	tp->mdio_bus->parent   = &tp->pdev->dev;
1335 	tp->mdio_bus->read     = &tg3_mdio_read;
1336 	tp->mdio_bus->write    = &tg3_mdio_write;
1337 	tp->mdio_bus->reset    = &tg3_mdio_reset;
1338 	tp->mdio_bus->phy_mask = ~(1 << TG3_PHY_MII_ADDR);
1339 	tp->mdio_bus->irq      = &tp->mdio_irq[0];
1340 
1341 	for (i = 0; i < PHY_MAX_ADDR; i++)
1342 		tp->mdio_bus->irq[i] = PHY_POLL;
1343 
1344 	/* The bus registration will look for all the PHYs on the mdio bus.
1345 	 * Unfortunately, it does not ensure the PHY is powered up before
1346 	 * accessing the PHY ID registers.  A chip reset is the
1347 	 * quickest way to bring the device back to an operational state..
1348 	 */
1349 	if (tg3_readphy(tp, MII_BMCR, &reg) || (reg & BMCR_PDOWN))
1350 		tg3_bmcr_reset(tp);
1351 
1352 	i = mdiobus_register(tp->mdio_bus);
1353 	if (i) {
1354 		dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i);
1355 		mdiobus_free(tp->mdio_bus);
1356 		return i;
1357 	}
1358 
1359 	phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1360 
1361 	if (!phydev || !phydev->drv) {
1362 		dev_warn(&tp->pdev->dev, "No PHY devices\n");
1363 		mdiobus_unregister(tp->mdio_bus);
1364 		mdiobus_free(tp->mdio_bus);
1365 		return -ENODEV;
1366 	}
1367 
1368 	switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1369 	case PHY_ID_BCM57780:
1370 		phydev->interface = PHY_INTERFACE_MODE_GMII;
1371 		phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1372 		break;
1373 	case PHY_ID_BCM50610:
1374 	case PHY_ID_BCM50610M:
1375 		phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE |
1376 				     PHY_BRCM_RX_REFCLK_UNUSED |
1377 				     PHY_BRCM_DIS_TXCRXC_NOENRGY |
1378 				     PHY_BRCM_AUTO_PWRDWN_ENABLE;
1379 		if (tg3_flag(tp, RGMII_INBAND_DISABLE))
1380 			phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
1381 		if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1382 			phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
1383 		if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1384 			phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
1385 		/* fallthru */
1386 	case PHY_ID_RTL8211C:
1387 		phydev->interface = PHY_INTERFACE_MODE_RGMII;
1388 		break;
1389 	case PHY_ID_RTL8201E:
1390 	case PHY_ID_BCMAC131:
1391 		phydev->interface = PHY_INTERFACE_MODE_MII;
1392 		phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1393 		tp->phy_flags |= TG3_PHYFLG_IS_FET;
1394 		break;
1395 	}
1396 
1397 	tg3_flag_set(tp, MDIOBUS_INITED);
1398 
1399 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1400 		tg3_mdio_config_5785(tp);
1401 
1402 	return 0;
1403 }
1404 
1405 static void tg3_mdio_fini(struct tg3 *tp)
1406 {
1407 	if (tg3_flag(tp, MDIOBUS_INITED)) {
1408 		tg3_flag_clear(tp, MDIOBUS_INITED);
1409 		mdiobus_unregister(tp->mdio_bus);
1410 		mdiobus_free(tp->mdio_bus);
1411 	}
1412 }
1413 
1414 /* tp->lock is held. */
1415 static inline void tg3_generate_fw_event(struct tg3 *tp)
1416 {
1417 	u32 val;
1418 
1419 	val = tr32(GRC_RX_CPU_EVENT);
1420 	val |= GRC_RX_CPU_DRIVER_EVENT;
1421 	tw32_f(GRC_RX_CPU_EVENT, val);
1422 
1423 	tp->last_event_jiffies = jiffies;
1424 }
1425 
1426 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1427 
1428 /* tp->lock is held. */
1429 static void tg3_wait_for_event_ack(struct tg3 *tp)
1430 {
1431 	int i;
1432 	unsigned int delay_cnt;
1433 	long time_remain;
1434 
1435 	/* If enough time has passed, no wait is necessary. */
1436 	time_remain = (long)(tp->last_event_jiffies + 1 +
1437 		      usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1438 		      (long)jiffies;
1439 	if (time_remain < 0)
1440 		return;
1441 
1442 	/* Check if we can shorten the wait time. */
1443 	delay_cnt = jiffies_to_usecs(time_remain);
1444 	if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1445 		delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1446 	delay_cnt = (delay_cnt >> 3) + 1;
1447 
1448 	for (i = 0; i < delay_cnt; i++) {
1449 		if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1450 			break;
1451 		udelay(8);
1452 	}
1453 }
1454 
1455 /* tp->lock is held. */
1456 static void tg3_phy_gather_ump_data(struct tg3 *tp, u32 *data)
1457 {
1458 	u32 reg, val;
1459 
1460 	val = 0;
1461 	if (!tg3_readphy(tp, MII_BMCR, &reg))
1462 		val = reg << 16;
1463 	if (!tg3_readphy(tp, MII_BMSR, &reg))
1464 		val |= (reg & 0xffff);
1465 	*data++ = val;
1466 
1467 	val = 0;
1468 	if (!tg3_readphy(tp, MII_ADVERTISE, &reg))
1469 		val = reg << 16;
1470 	if (!tg3_readphy(tp, MII_LPA, &reg))
1471 		val |= (reg & 0xffff);
1472 	*data++ = val;
1473 
1474 	val = 0;
1475 	if (!(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) {
1476 		if (!tg3_readphy(tp, MII_CTRL1000, &reg))
1477 			val = reg << 16;
1478 		if (!tg3_readphy(tp, MII_STAT1000, &reg))
1479 			val |= (reg & 0xffff);
1480 	}
1481 	*data++ = val;
1482 
1483 	if (!tg3_readphy(tp, MII_PHYADDR, &reg))
1484 		val = reg << 16;
1485 	else
1486 		val = 0;
1487 	*data++ = val;
1488 }
1489 
1490 /* tp->lock is held. */
1491 static void tg3_ump_link_report(struct tg3 *tp)
1492 {
1493 	u32 data[4];
1494 
1495 	if (!tg3_flag(tp, 5780_CLASS) || !tg3_flag(tp, ENABLE_ASF))
1496 		return;
1497 
1498 	tg3_phy_gather_ump_data(tp, data);
1499 
1500 	tg3_wait_for_event_ack(tp);
1501 
1502 	tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1503 	tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1504 	tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x0, data[0]);
1505 	tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x4, data[1]);
1506 	tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x8, data[2]);
1507 	tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0xc, data[3]);
1508 
1509 	tg3_generate_fw_event(tp);
1510 }
1511 
1512 /* tp->lock is held. */
1513 static void tg3_stop_fw(struct tg3 *tp)
1514 {
1515 	if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
1516 		/* Wait for RX cpu to ACK the previous event. */
1517 		tg3_wait_for_event_ack(tp);
1518 
1519 		tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
1520 
1521 		tg3_generate_fw_event(tp);
1522 
1523 		/* Wait for RX cpu to ACK this event. */
1524 		tg3_wait_for_event_ack(tp);
1525 	}
1526 }
1527 
1528 /* tp->lock is held. */
1529 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
1530 {
1531 	tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
1532 		      NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
1533 
1534 	if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1535 		switch (kind) {
1536 		case RESET_KIND_INIT:
1537 			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1538 				      DRV_STATE_START);
1539 			break;
1540 
1541 		case RESET_KIND_SHUTDOWN:
1542 			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1543 				      DRV_STATE_UNLOAD);
1544 			break;
1545 
1546 		case RESET_KIND_SUSPEND:
1547 			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1548 				      DRV_STATE_SUSPEND);
1549 			break;
1550 
1551 		default:
1552 			break;
1553 		}
1554 	}
1555 
1556 	if (kind == RESET_KIND_INIT ||
1557 	    kind == RESET_KIND_SUSPEND)
1558 		tg3_ape_driver_state_change(tp, kind);
1559 }
1560 
1561 /* tp->lock is held. */
1562 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
1563 {
1564 	if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1565 		switch (kind) {
1566 		case RESET_KIND_INIT:
1567 			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1568 				      DRV_STATE_START_DONE);
1569 			break;
1570 
1571 		case RESET_KIND_SHUTDOWN:
1572 			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1573 				      DRV_STATE_UNLOAD_DONE);
1574 			break;
1575 
1576 		default:
1577 			break;
1578 		}
1579 	}
1580 
1581 	if (kind == RESET_KIND_SHUTDOWN)
1582 		tg3_ape_driver_state_change(tp, kind);
1583 }
1584 
1585 /* tp->lock is held. */
1586 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
1587 {
1588 	if (tg3_flag(tp, ENABLE_ASF)) {
1589 		switch (kind) {
1590 		case RESET_KIND_INIT:
1591 			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1592 				      DRV_STATE_START);
1593 			break;
1594 
1595 		case RESET_KIND_SHUTDOWN:
1596 			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1597 				      DRV_STATE_UNLOAD);
1598 			break;
1599 
1600 		case RESET_KIND_SUSPEND:
1601 			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1602 				      DRV_STATE_SUSPEND);
1603 			break;
1604 
1605 		default:
1606 			break;
1607 		}
1608 	}
1609 }
1610 
1611 static int tg3_poll_fw(struct tg3 *tp)
1612 {
1613 	int i;
1614 	u32 val;
1615 
1616 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1617 		/* Wait up to 20ms for init done. */
1618 		for (i = 0; i < 200; i++) {
1619 			if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
1620 				return 0;
1621 			udelay(100);
1622 		}
1623 		return -ENODEV;
1624 	}
1625 
1626 	/* Wait for firmware initialization to complete. */
1627 	for (i = 0; i < 100000; i++) {
1628 		tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
1629 		if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1630 			break;
1631 		udelay(10);
1632 	}
1633 
1634 	/* Chip might not be fitted with firmware.  Some Sun onboard
1635 	 * parts are configured like that.  So don't signal the timeout
1636 	 * of the above loop as an error, but do report the lack of
1637 	 * running firmware once.
1638 	 */
1639 	if (i >= 100000 && !tg3_flag(tp, NO_FWARE_REPORTED)) {
1640 		tg3_flag_set(tp, NO_FWARE_REPORTED);
1641 
1642 		netdev_info(tp->dev, "No firmware running\n");
1643 	}
1644 
1645 	if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
1646 		/* The 57765 A0 needs a little more
1647 		 * time to do some important work.
1648 		 */
1649 		mdelay(10);
1650 	}
1651 
1652 	return 0;
1653 }
1654 
1655 static void tg3_link_report(struct tg3 *tp)
1656 {
1657 	if (!netif_carrier_ok(tp->dev)) {
1658 		netif_info(tp, link, tp->dev, "Link is down\n");
1659 		tg3_ump_link_report(tp);
1660 	} else if (netif_msg_link(tp)) {
1661 		netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n",
1662 			    (tp->link_config.active_speed == SPEED_1000 ?
1663 			     1000 :
1664 			     (tp->link_config.active_speed == SPEED_100 ?
1665 			      100 : 10)),
1666 			    (tp->link_config.active_duplex == DUPLEX_FULL ?
1667 			     "full" : "half"));
1668 
1669 		netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n",
1670 			    (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
1671 			    "on" : "off",
1672 			    (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
1673 			    "on" : "off");
1674 
1675 		if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
1676 			netdev_info(tp->dev, "EEE is %s\n",
1677 				    tp->setlpicnt ? "enabled" : "disabled");
1678 
1679 		tg3_ump_link_report(tp);
1680 	}
1681 }
1682 
1683 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1684 {
1685 	u16 miireg;
1686 
1687 	if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1688 		miireg = ADVERTISE_1000XPAUSE;
1689 	else if (flow_ctrl & FLOW_CTRL_TX)
1690 		miireg = ADVERTISE_1000XPSE_ASYM;
1691 	else if (flow_ctrl & FLOW_CTRL_RX)
1692 		miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1693 	else
1694 		miireg = 0;
1695 
1696 	return miireg;
1697 }
1698 
1699 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1700 {
1701 	u8 cap = 0;
1702 
1703 	if (lcladv & rmtadv & ADVERTISE_1000XPAUSE) {
1704 		cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1705 	} else if (lcladv & rmtadv & ADVERTISE_1000XPSE_ASYM) {
1706 		if (lcladv & ADVERTISE_1000XPAUSE)
1707 			cap = FLOW_CTRL_RX;
1708 		if (rmtadv & ADVERTISE_1000XPAUSE)
1709 			cap = FLOW_CTRL_TX;
1710 	}
1711 
1712 	return cap;
1713 }
1714 
1715 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1716 {
1717 	u8 autoneg;
1718 	u8 flowctrl = 0;
1719 	u32 old_rx_mode = tp->rx_mode;
1720 	u32 old_tx_mode = tp->tx_mode;
1721 
1722 	if (tg3_flag(tp, USE_PHYLIB))
1723 		autoneg = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]->autoneg;
1724 	else
1725 		autoneg = tp->link_config.autoneg;
1726 
1727 	if (autoneg == AUTONEG_ENABLE && tg3_flag(tp, PAUSE_AUTONEG)) {
1728 		if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
1729 			flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1730 		else
1731 			flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1732 	} else
1733 		flowctrl = tp->link_config.flowctrl;
1734 
1735 	tp->link_config.active_flowctrl = flowctrl;
1736 
1737 	if (flowctrl & FLOW_CTRL_RX)
1738 		tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1739 	else
1740 		tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1741 
1742 	if (old_rx_mode != tp->rx_mode)
1743 		tw32_f(MAC_RX_MODE, tp->rx_mode);
1744 
1745 	if (flowctrl & FLOW_CTRL_TX)
1746 		tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1747 	else
1748 		tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1749 
1750 	if (old_tx_mode != tp->tx_mode)
1751 		tw32_f(MAC_TX_MODE, tp->tx_mode);
1752 }
1753 
1754 static void tg3_adjust_link(struct net_device *dev)
1755 {
1756 	u8 oldflowctrl, linkmesg = 0;
1757 	u32 mac_mode, lcl_adv, rmt_adv;
1758 	struct tg3 *tp = netdev_priv(dev);
1759 	struct phy_device *phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1760 
1761 	spin_lock_bh(&tp->lock);
1762 
1763 	mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
1764 				    MAC_MODE_HALF_DUPLEX);
1765 
1766 	oldflowctrl = tp->link_config.active_flowctrl;
1767 
1768 	if (phydev->link) {
1769 		lcl_adv = 0;
1770 		rmt_adv = 0;
1771 
1772 		if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
1773 			mac_mode |= MAC_MODE_PORT_MODE_MII;
1774 		else if (phydev->speed == SPEED_1000 ||
1775 			 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785)
1776 			mac_mode |= MAC_MODE_PORT_MODE_GMII;
1777 		else
1778 			mac_mode |= MAC_MODE_PORT_MODE_MII;
1779 
1780 		if (phydev->duplex == DUPLEX_HALF)
1781 			mac_mode |= MAC_MODE_HALF_DUPLEX;
1782 		else {
1783 			lcl_adv = mii_advertise_flowctrl(
1784 				  tp->link_config.flowctrl);
1785 
1786 			if (phydev->pause)
1787 				rmt_adv = LPA_PAUSE_CAP;
1788 			if (phydev->asym_pause)
1789 				rmt_adv |= LPA_PAUSE_ASYM;
1790 		}
1791 
1792 		tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
1793 	} else
1794 		mac_mode |= MAC_MODE_PORT_MODE_GMII;
1795 
1796 	if (mac_mode != tp->mac_mode) {
1797 		tp->mac_mode = mac_mode;
1798 		tw32_f(MAC_MODE, tp->mac_mode);
1799 		udelay(40);
1800 	}
1801 
1802 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
1803 		if (phydev->speed == SPEED_10)
1804 			tw32(MAC_MI_STAT,
1805 			     MAC_MI_STAT_10MBPS_MODE |
1806 			     MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1807 		else
1808 			tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1809 	}
1810 
1811 	if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
1812 		tw32(MAC_TX_LENGTHS,
1813 		     ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1814 		      (6 << TX_LENGTHS_IPG_SHIFT) |
1815 		      (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
1816 	else
1817 		tw32(MAC_TX_LENGTHS,
1818 		     ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1819 		      (6 << TX_LENGTHS_IPG_SHIFT) |
1820 		      (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
1821 
1822 	if (phydev->link != tp->old_link ||
1823 	    phydev->speed != tp->link_config.active_speed ||
1824 	    phydev->duplex != tp->link_config.active_duplex ||
1825 	    oldflowctrl != tp->link_config.active_flowctrl)
1826 		linkmesg = 1;
1827 
1828 	tp->old_link = phydev->link;
1829 	tp->link_config.active_speed = phydev->speed;
1830 	tp->link_config.active_duplex = phydev->duplex;
1831 
1832 	spin_unlock_bh(&tp->lock);
1833 
1834 	if (linkmesg)
1835 		tg3_link_report(tp);
1836 }
1837 
1838 static int tg3_phy_init(struct tg3 *tp)
1839 {
1840 	struct phy_device *phydev;
1841 
1842 	if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)
1843 		return 0;
1844 
1845 	/* Bring the PHY back to a known state. */
1846 	tg3_bmcr_reset(tp);
1847 
1848 	phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1849 
1850 	/* Attach the MAC to the PHY. */
1851 	phydev = phy_connect(tp->dev, dev_name(&phydev->dev), tg3_adjust_link,
1852 			     phydev->dev_flags, phydev->interface);
1853 	if (IS_ERR(phydev)) {
1854 		dev_err(&tp->pdev->dev, "Could not attach to PHY\n");
1855 		return PTR_ERR(phydev);
1856 	}
1857 
1858 	/* Mask with MAC supported features. */
1859 	switch (phydev->interface) {
1860 	case PHY_INTERFACE_MODE_GMII:
1861 	case PHY_INTERFACE_MODE_RGMII:
1862 		if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
1863 			phydev->supported &= (PHY_GBIT_FEATURES |
1864 					      SUPPORTED_Pause |
1865 					      SUPPORTED_Asym_Pause);
1866 			break;
1867 		}
1868 		/* fallthru */
1869 	case PHY_INTERFACE_MODE_MII:
1870 		phydev->supported &= (PHY_BASIC_FEATURES |
1871 				      SUPPORTED_Pause |
1872 				      SUPPORTED_Asym_Pause);
1873 		break;
1874 	default:
1875 		phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1876 		return -EINVAL;
1877 	}
1878 
1879 	tp->phy_flags |= TG3_PHYFLG_IS_CONNECTED;
1880 
1881 	phydev->advertising = phydev->supported;
1882 
1883 	return 0;
1884 }
1885 
1886 static void tg3_phy_start(struct tg3 *tp)
1887 {
1888 	struct phy_device *phydev;
1889 
1890 	if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
1891 		return;
1892 
1893 	phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1894 
1895 	if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
1896 		tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
1897 		phydev->speed = tp->link_config.speed;
1898 		phydev->duplex = tp->link_config.duplex;
1899 		phydev->autoneg = tp->link_config.autoneg;
1900 		phydev->advertising = tp->link_config.advertising;
1901 	}
1902 
1903 	phy_start(phydev);
1904 
1905 	phy_start_aneg(phydev);
1906 }
1907 
1908 static void tg3_phy_stop(struct tg3 *tp)
1909 {
1910 	if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
1911 		return;
1912 
1913 	phy_stop(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1914 }
1915 
1916 static void tg3_phy_fini(struct tg3 *tp)
1917 {
1918 	if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
1919 		phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1920 		tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED;
1921 	}
1922 }
1923 
1924 static int tg3_phy_set_extloopbk(struct tg3 *tp)
1925 {
1926 	int err;
1927 	u32 val;
1928 
1929 	if (tp->phy_flags & TG3_PHYFLG_IS_FET)
1930 		return 0;
1931 
1932 	if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
1933 		/* Cannot do read-modify-write on 5401 */
1934 		err = tg3_phy_auxctl_write(tp,
1935 					   MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
1936 					   MII_TG3_AUXCTL_ACTL_EXTLOOPBK |
1937 					   0x4c20);
1938 		goto done;
1939 	}
1940 
1941 	err = tg3_phy_auxctl_read(tp,
1942 				  MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
1943 	if (err)
1944 		return err;
1945 
1946 	val |= MII_TG3_AUXCTL_ACTL_EXTLOOPBK;
1947 	err = tg3_phy_auxctl_write(tp,
1948 				   MII_TG3_AUXCTL_SHDWSEL_AUXCTL, val);
1949 
1950 done:
1951 	return err;
1952 }
1953 
1954 static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
1955 {
1956 	u32 phytest;
1957 
1958 	if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
1959 		u32 phy;
1960 
1961 		tg3_writephy(tp, MII_TG3_FET_TEST,
1962 			     phytest | MII_TG3_FET_SHADOW_EN);
1963 		if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) {
1964 			if (enable)
1965 				phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD;
1966 			else
1967 				phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD;
1968 			tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy);
1969 		}
1970 		tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
1971 	}
1972 }
1973 
1974 static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
1975 {
1976 	u32 reg;
1977 
1978 	if (!tg3_flag(tp, 5705_PLUS) ||
1979 	    (tg3_flag(tp, 5717_PLUS) &&
1980 	     (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
1981 		return;
1982 
1983 	if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
1984 		tg3_phy_fet_toggle_apd(tp, enable);
1985 		return;
1986 	}
1987 
1988 	reg = MII_TG3_MISC_SHDW_WREN |
1989 	      MII_TG3_MISC_SHDW_SCR5_SEL |
1990 	      MII_TG3_MISC_SHDW_SCR5_LPED |
1991 	      MII_TG3_MISC_SHDW_SCR5_DLPTLM |
1992 	      MII_TG3_MISC_SHDW_SCR5_SDTL |
1993 	      MII_TG3_MISC_SHDW_SCR5_C125OE;
1994 	if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 || !enable)
1995 		reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
1996 
1997 	tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
1998 
1999 
2000 	reg = MII_TG3_MISC_SHDW_WREN |
2001 	      MII_TG3_MISC_SHDW_APD_SEL |
2002 	      MII_TG3_MISC_SHDW_APD_WKTM_84MS;
2003 	if (enable)
2004 		reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
2005 
2006 	tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
2007 }
2008 
2009 static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable)
2010 {
2011 	u32 phy;
2012 
2013 	if (!tg3_flag(tp, 5705_PLUS) ||
2014 	    (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
2015 		return;
2016 
2017 	if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2018 		u32 ephy;
2019 
2020 		if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
2021 			u32 reg = MII_TG3_FET_SHDW_MISCCTRL;
2022 
2023 			tg3_writephy(tp, MII_TG3_FET_TEST,
2024 				     ephy | MII_TG3_FET_SHADOW_EN);
2025 			if (!tg3_readphy(tp, reg, &phy)) {
2026 				if (enable)
2027 					phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2028 				else
2029 					phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2030 				tg3_writephy(tp, reg, phy);
2031 			}
2032 			tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
2033 		}
2034 	} else {
2035 		int ret;
2036 
2037 		ret = tg3_phy_auxctl_read(tp,
2038 					  MII_TG3_AUXCTL_SHDWSEL_MISC, &phy);
2039 		if (!ret) {
2040 			if (enable)
2041 				phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2042 			else
2043 				phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2044 			tg3_phy_auxctl_write(tp,
2045 					     MII_TG3_AUXCTL_SHDWSEL_MISC, phy);
2046 		}
2047 	}
2048 }
2049 
2050 static void tg3_phy_set_wirespeed(struct tg3 *tp)
2051 {
2052 	int ret;
2053 	u32 val;
2054 
2055 	if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED)
2056 		return;
2057 
2058 	ret = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, &val);
2059 	if (!ret)
2060 		tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_MISC,
2061 				     val | MII_TG3_AUXCTL_MISC_WIRESPD_EN);
2062 }
2063 
2064 static void tg3_phy_apply_otp(struct tg3 *tp)
2065 {
2066 	u32 otp, phy;
2067 
2068 	if (!tp->phy_otp)
2069 		return;
2070 
2071 	otp = tp->phy_otp;
2072 
2073 	if (TG3_PHY_AUXCTL_SMDSP_ENABLE(tp))
2074 		return;
2075 
2076 	phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
2077 	phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
2078 	tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
2079 
2080 	phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
2081 	      ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
2082 	tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
2083 
2084 	phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
2085 	phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
2086 	tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
2087 
2088 	phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
2089 	tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
2090 
2091 	phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
2092 	tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
2093 
2094 	phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
2095 	      ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
2096 	tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
2097 
2098 	TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2099 }
2100 
2101 static void tg3_phy_eee_adjust(struct tg3 *tp, u32 current_link_up)
2102 {
2103 	u32 val;
2104 
2105 	if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2106 		return;
2107 
2108 	tp->setlpicnt = 0;
2109 
2110 	if (tp->link_config.autoneg == AUTONEG_ENABLE &&
2111 	    current_link_up == 1 &&
2112 	    tp->link_config.active_duplex == DUPLEX_FULL &&
2113 	    (tp->link_config.active_speed == SPEED_100 ||
2114 	     tp->link_config.active_speed == SPEED_1000)) {
2115 		u32 eeectl;
2116 
2117 		if (tp->link_config.active_speed == SPEED_1000)
2118 			eeectl = TG3_CPMU_EEE_CTRL_EXIT_16_5_US;
2119 		else
2120 			eeectl = TG3_CPMU_EEE_CTRL_EXIT_36_US;
2121 
2122 		tw32(TG3_CPMU_EEE_CTRL, eeectl);
2123 
2124 		tg3_phy_cl45_read(tp, MDIO_MMD_AN,
2125 				  TG3_CL45_D7_EEERES_STAT, &val);
2126 
2127 		if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T ||
2128 		    val == TG3_CL45_D7_EEERES_STAT_LP_100TX)
2129 			tp->setlpicnt = 2;
2130 	}
2131 
2132 	if (!tp->setlpicnt) {
2133 		if (current_link_up == 1 &&
2134 		   !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2135 			tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0000);
2136 			TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2137 		}
2138 
2139 		val = tr32(TG3_CPMU_EEE_MODE);
2140 		tw32(TG3_CPMU_EEE_MODE, val & ~TG3_CPMU_EEEMD_LPI_ENABLE);
2141 	}
2142 }
2143 
2144 static void tg3_phy_eee_enable(struct tg3 *tp)
2145 {
2146 	u32 val;
2147 
2148 	if (tp->link_config.active_speed == SPEED_1000 &&
2149 	    (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2150 	     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2151 	     tg3_flag(tp, 57765_CLASS)) &&
2152 	    !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2153 		val = MII_TG3_DSP_TAP26_ALNOKO |
2154 		      MII_TG3_DSP_TAP26_RMRXSTO;
2155 		tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
2156 		TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2157 	}
2158 
2159 	val = tr32(TG3_CPMU_EEE_MODE);
2160 	tw32(TG3_CPMU_EEE_MODE, val | TG3_CPMU_EEEMD_LPI_ENABLE);
2161 }
2162 
2163 static int tg3_wait_macro_done(struct tg3 *tp)
2164 {
2165 	int limit = 100;
2166 
2167 	while (limit--) {
2168 		u32 tmp32;
2169 
2170 		if (!tg3_readphy(tp, MII_TG3_DSP_CONTROL, &tmp32)) {
2171 			if ((tmp32 & 0x1000) == 0)
2172 				break;
2173 		}
2174 	}
2175 	if (limit < 0)
2176 		return -EBUSY;
2177 
2178 	return 0;
2179 }
2180 
2181 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
2182 {
2183 	static const u32 test_pat[4][6] = {
2184 	{ 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
2185 	{ 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
2186 	{ 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
2187 	{ 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
2188 	};
2189 	int chan;
2190 
2191 	for (chan = 0; chan < 4; chan++) {
2192 		int i;
2193 
2194 		tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2195 			     (chan * 0x2000) | 0x0200);
2196 		tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2197 
2198 		for (i = 0; i < 6; i++)
2199 			tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
2200 				     test_pat[chan][i]);
2201 
2202 		tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2203 		if (tg3_wait_macro_done(tp)) {
2204 			*resetp = 1;
2205 			return -EBUSY;
2206 		}
2207 
2208 		tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2209 			     (chan * 0x2000) | 0x0200);
2210 		tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0082);
2211 		if (tg3_wait_macro_done(tp)) {
2212 			*resetp = 1;
2213 			return -EBUSY;
2214 		}
2215 
2216 		tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0802);
2217 		if (tg3_wait_macro_done(tp)) {
2218 			*resetp = 1;
2219 			return -EBUSY;
2220 		}
2221 
2222 		for (i = 0; i < 6; i += 2) {
2223 			u32 low, high;
2224 
2225 			if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
2226 			    tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
2227 			    tg3_wait_macro_done(tp)) {
2228 				*resetp = 1;
2229 				return -EBUSY;
2230 			}
2231 			low &= 0x7fff;
2232 			high &= 0x000f;
2233 			if (low != test_pat[chan][i] ||
2234 			    high != test_pat[chan][i+1]) {
2235 				tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
2236 				tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
2237 				tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
2238 
2239 				return -EBUSY;
2240 			}
2241 		}
2242 	}
2243 
2244 	return 0;
2245 }
2246 
2247 static int tg3_phy_reset_chanpat(struct tg3 *tp)
2248 {
2249 	int chan;
2250 
2251 	for (chan = 0; chan < 4; chan++) {
2252 		int i;
2253 
2254 		tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2255 			     (chan * 0x2000) | 0x0200);
2256 		tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2257 		for (i = 0; i < 6; i++)
2258 			tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
2259 		tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2260 		if (tg3_wait_macro_done(tp))
2261 			return -EBUSY;
2262 	}
2263 
2264 	return 0;
2265 }
2266 
2267 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
2268 {
2269 	u32 reg32, phy9_orig;
2270 	int retries, do_phy_reset, err;
2271 
2272 	retries = 10;
2273 	do_phy_reset = 1;
2274 	do {
2275 		if (do_phy_reset) {
2276 			err = tg3_bmcr_reset(tp);
2277 			if (err)
2278 				return err;
2279 			do_phy_reset = 0;
2280 		}
2281 
2282 		/* Disable transmitter and interrupt.  */
2283 		if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
2284 			continue;
2285 
2286 		reg32 |= 0x3000;
2287 		tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2288 
2289 		/* Set full-duplex, 1000 mbps.  */
2290 		tg3_writephy(tp, MII_BMCR,
2291 			     BMCR_FULLDPLX | BMCR_SPEED1000);
2292 
2293 		/* Set to master mode.  */
2294 		if (tg3_readphy(tp, MII_CTRL1000, &phy9_orig))
2295 			continue;
2296 
2297 		tg3_writephy(tp, MII_CTRL1000,
2298 			     CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
2299 
2300 		err = TG3_PHY_AUXCTL_SMDSP_ENABLE(tp);
2301 		if (err)
2302 			return err;
2303 
2304 		/* Block the PHY control access.  */
2305 		tg3_phydsp_write(tp, 0x8005, 0x0800);
2306 
2307 		err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
2308 		if (!err)
2309 			break;
2310 	} while (--retries);
2311 
2312 	err = tg3_phy_reset_chanpat(tp);
2313 	if (err)
2314 		return err;
2315 
2316 	tg3_phydsp_write(tp, 0x8005, 0x0000);
2317 
2318 	tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
2319 	tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000);
2320 
2321 	TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2322 
2323 	tg3_writephy(tp, MII_CTRL1000, phy9_orig);
2324 
2325 	if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
2326 		reg32 &= ~0x3000;
2327 		tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2328 	} else if (!err)
2329 		err = -EBUSY;
2330 
2331 	return err;
2332 }
2333 
2334 /* This will reset the tigon3 PHY if there is no valid
2335  * link unless the FORCE argument is non-zero.
2336  */
2337 static int tg3_phy_reset(struct tg3 *tp)
2338 {
2339 	u32 val, cpmuctrl;
2340 	int err;
2341 
2342 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2343 		val = tr32(GRC_MISC_CFG);
2344 		tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
2345 		udelay(40);
2346 	}
2347 	err  = tg3_readphy(tp, MII_BMSR, &val);
2348 	err |= tg3_readphy(tp, MII_BMSR, &val);
2349 	if (err != 0)
2350 		return -EBUSY;
2351 
2352 	if (netif_running(tp->dev) && netif_carrier_ok(tp->dev)) {
2353 		netif_carrier_off(tp->dev);
2354 		tg3_link_report(tp);
2355 	}
2356 
2357 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
2358 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2359 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
2360 		err = tg3_phy_reset_5703_4_5(tp);
2361 		if (err)
2362 			return err;
2363 		goto out;
2364 	}
2365 
2366 	cpmuctrl = 0;
2367 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
2368 	    GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
2369 		cpmuctrl = tr32(TG3_CPMU_CTRL);
2370 		if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
2371 			tw32(TG3_CPMU_CTRL,
2372 			     cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
2373 	}
2374 
2375 	err = tg3_bmcr_reset(tp);
2376 	if (err)
2377 		return err;
2378 
2379 	if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
2380 		val = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
2381 		tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, val);
2382 
2383 		tw32(TG3_CPMU_CTRL, cpmuctrl);
2384 	}
2385 
2386 	if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2387 	    GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
2388 		val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2389 		if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
2390 		    CPMU_LSPD_1000MB_MACCLK_12_5) {
2391 			val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2392 			udelay(40);
2393 			tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2394 		}
2395 	}
2396 
2397 	if (tg3_flag(tp, 5717_PLUS) &&
2398 	    (tp->phy_flags & TG3_PHYFLG_MII_SERDES))
2399 		return 0;
2400 
2401 	tg3_phy_apply_otp(tp);
2402 
2403 	if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
2404 		tg3_phy_toggle_apd(tp, true);
2405 	else
2406 		tg3_phy_toggle_apd(tp, false);
2407 
2408 out:
2409 	if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) &&
2410 	    !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2411 		tg3_phydsp_write(tp, 0x201f, 0x2aaa);
2412 		tg3_phydsp_write(tp, 0x000a, 0x0323);
2413 		TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2414 	}
2415 
2416 	if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) {
2417 		tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2418 		tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2419 	}
2420 
2421 	if (tp->phy_flags & TG3_PHYFLG_BER_BUG) {
2422 		if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2423 			tg3_phydsp_write(tp, 0x000a, 0x310b);
2424 			tg3_phydsp_write(tp, 0x201f, 0x9506);
2425 			tg3_phydsp_write(tp, 0x401f, 0x14e2);
2426 			TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2427 		}
2428 	} else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) {
2429 		if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2430 			tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
2431 			if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) {
2432 				tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
2433 				tg3_writephy(tp, MII_TG3_TEST1,
2434 					     MII_TG3_TEST1_TRIM_EN | 0x4);
2435 			} else
2436 				tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
2437 
2438 			TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2439 		}
2440 	}
2441 
2442 	/* Set Extended packet length bit (bit 14) on all chips that */
2443 	/* support jumbo frames */
2444 	if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2445 		/* Cannot do read-modify-write on 5401 */
2446 		tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
2447 	} else if (tg3_flag(tp, JUMBO_CAPABLE)) {
2448 		/* Set bit 14 with read-modify-write to preserve other bits */
2449 		err = tg3_phy_auxctl_read(tp,
2450 					  MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2451 		if (!err)
2452 			tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2453 					   val | MII_TG3_AUXCTL_ACTL_EXTPKTLEN);
2454 	}
2455 
2456 	/* Set phy register 0x10 bit 0 to high fifo elasticity to support
2457 	 * jumbo frames transmission.
2458 	 */
2459 	if (tg3_flag(tp, JUMBO_CAPABLE)) {
2460 		if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &val))
2461 			tg3_writephy(tp, MII_TG3_EXT_CTRL,
2462 				     val | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
2463 	}
2464 
2465 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2466 		/* adjust output voltage */
2467 		tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
2468 	}
2469 
2470 	tg3_phy_toggle_automdix(tp, 1);
2471 	tg3_phy_set_wirespeed(tp);
2472 	return 0;
2473 }
2474 
2475 #define TG3_GPIO_MSG_DRVR_PRES		 0x00000001
2476 #define TG3_GPIO_MSG_NEED_VAUX		 0x00000002
2477 #define TG3_GPIO_MSG_MASK		 (TG3_GPIO_MSG_DRVR_PRES | \
2478 					  TG3_GPIO_MSG_NEED_VAUX)
2479 #define TG3_GPIO_MSG_ALL_DRVR_PRES_MASK \
2480 	((TG3_GPIO_MSG_DRVR_PRES << 0) | \
2481 	 (TG3_GPIO_MSG_DRVR_PRES << 4) | \
2482 	 (TG3_GPIO_MSG_DRVR_PRES << 8) | \
2483 	 (TG3_GPIO_MSG_DRVR_PRES << 12))
2484 
2485 #define TG3_GPIO_MSG_ALL_NEED_VAUX_MASK \
2486 	((TG3_GPIO_MSG_NEED_VAUX << 0) | \
2487 	 (TG3_GPIO_MSG_NEED_VAUX << 4) | \
2488 	 (TG3_GPIO_MSG_NEED_VAUX << 8) | \
2489 	 (TG3_GPIO_MSG_NEED_VAUX << 12))
2490 
2491 static inline u32 tg3_set_function_status(struct tg3 *tp, u32 newstat)
2492 {
2493 	u32 status, shift;
2494 
2495 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2496 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
2497 		status = tg3_ape_read32(tp, TG3_APE_GPIO_MSG);
2498 	else
2499 		status = tr32(TG3_CPMU_DRV_STATUS);
2500 
2501 	shift = TG3_APE_GPIO_MSG_SHIFT + 4 * tp->pci_fn;
2502 	status &= ~(TG3_GPIO_MSG_MASK << shift);
2503 	status |= (newstat << shift);
2504 
2505 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2506 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
2507 		tg3_ape_write32(tp, TG3_APE_GPIO_MSG, status);
2508 	else
2509 		tw32(TG3_CPMU_DRV_STATUS, status);
2510 
2511 	return status >> TG3_APE_GPIO_MSG_SHIFT;
2512 }
2513 
2514 static inline int tg3_pwrsrc_switch_to_vmain(struct tg3 *tp)
2515 {
2516 	if (!tg3_flag(tp, IS_NIC))
2517 		return 0;
2518 
2519 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2520 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2521 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
2522 		if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2523 			return -EIO;
2524 
2525 		tg3_set_function_status(tp, TG3_GPIO_MSG_DRVR_PRES);
2526 
2527 		tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2528 			    TG3_GRC_LCLCTL_PWRSW_DELAY);
2529 
2530 		tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2531 	} else {
2532 		tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2533 			    TG3_GRC_LCLCTL_PWRSW_DELAY);
2534 	}
2535 
2536 	return 0;
2537 }
2538 
2539 static void tg3_pwrsrc_die_with_vmain(struct tg3 *tp)
2540 {
2541 	u32 grc_local_ctrl;
2542 
2543 	if (!tg3_flag(tp, IS_NIC) ||
2544 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2545 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)
2546 		return;
2547 
2548 	grc_local_ctrl = tp->grc_local_ctrl | GRC_LCLCTRL_GPIO_OE1;
2549 
2550 	tw32_wait_f(GRC_LOCAL_CTRL,
2551 		    grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2552 		    TG3_GRC_LCLCTL_PWRSW_DELAY);
2553 
2554 	tw32_wait_f(GRC_LOCAL_CTRL,
2555 		    grc_local_ctrl,
2556 		    TG3_GRC_LCLCTL_PWRSW_DELAY);
2557 
2558 	tw32_wait_f(GRC_LOCAL_CTRL,
2559 		    grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2560 		    TG3_GRC_LCLCTL_PWRSW_DELAY);
2561 }
2562 
2563 static void tg3_pwrsrc_switch_to_vaux(struct tg3 *tp)
2564 {
2565 	if (!tg3_flag(tp, IS_NIC))
2566 		return;
2567 
2568 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2569 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2570 		tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2571 			    (GRC_LCLCTRL_GPIO_OE0 |
2572 			     GRC_LCLCTRL_GPIO_OE1 |
2573 			     GRC_LCLCTRL_GPIO_OE2 |
2574 			     GRC_LCLCTRL_GPIO_OUTPUT0 |
2575 			     GRC_LCLCTRL_GPIO_OUTPUT1),
2576 			    TG3_GRC_LCLCTL_PWRSW_DELAY);
2577 	} else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
2578 		   tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
2579 		/* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2580 		u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
2581 				     GRC_LCLCTRL_GPIO_OE1 |
2582 				     GRC_LCLCTRL_GPIO_OE2 |
2583 				     GRC_LCLCTRL_GPIO_OUTPUT0 |
2584 				     GRC_LCLCTRL_GPIO_OUTPUT1 |
2585 				     tp->grc_local_ctrl;
2586 		tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2587 			    TG3_GRC_LCLCTL_PWRSW_DELAY);
2588 
2589 		grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
2590 		tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2591 			    TG3_GRC_LCLCTL_PWRSW_DELAY);
2592 
2593 		grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
2594 		tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2595 			    TG3_GRC_LCLCTL_PWRSW_DELAY);
2596 	} else {
2597 		u32 no_gpio2;
2598 		u32 grc_local_ctrl = 0;
2599 
2600 		/* Workaround to prevent overdrawing Amps. */
2601 		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
2602 			grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
2603 			tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2604 				    grc_local_ctrl,
2605 				    TG3_GRC_LCLCTL_PWRSW_DELAY);
2606 		}
2607 
2608 		/* On 5753 and variants, GPIO2 cannot be used. */
2609 		no_gpio2 = tp->nic_sram_data_cfg &
2610 			   NIC_SRAM_DATA_CFG_NO_GPIO2;
2611 
2612 		grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
2613 				  GRC_LCLCTRL_GPIO_OE1 |
2614 				  GRC_LCLCTRL_GPIO_OE2 |
2615 				  GRC_LCLCTRL_GPIO_OUTPUT1 |
2616 				  GRC_LCLCTRL_GPIO_OUTPUT2;
2617 		if (no_gpio2) {
2618 			grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
2619 					    GRC_LCLCTRL_GPIO_OUTPUT2);
2620 		}
2621 		tw32_wait_f(GRC_LOCAL_CTRL,
2622 			    tp->grc_local_ctrl | grc_local_ctrl,
2623 			    TG3_GRC_LCLCTL_PWRSW_DELAY);
2624 
2625 		grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
2626 
2627 		tw32_wait_f(GRC_LOCAL_CTRL,
2628 			    tp->grc_local_ctrl | grc_local_ctrl,
2629 			    TG3_GRC_LCLCTL_PWRSW_DELAY);
2630 
2631 		if (!no_gpio2) {
2632 			grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
2633 			tw32_wait_f(GRC_LOCAL_CTRL,
2634 				    tp->grc_local_ctrl | grc_local_ctrl,
2635 				    TG3_GRC_LCLCTL_PWRSW_DELAY);
2636 		}
2637 	}
2638 }
2639 
2640 static void tg3_frob_aux_power_5717(struct tg3 *tp, bool wol_enable)
2641 {
2642 	u32 msg = 0;
2643 
2644 	/* Serialize power state transitions */
2645 	if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2646 		return;
2647 
2648 	if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE) || wol_enable)
2649 		msg = TG3_GPIO_MSG_NEED_VAUX;
2650 
2651 	msg = tg3_set_function_status(tp, msg);
2652 
2653 	if (msg & TG3_GPIO_MSG_ALL_DRVR_PRES_MASK)
2654 		goto done;
2655 
2656 	if (msg & TG3_GPIO_MSG_ALL_NEED_VAUX_MASK)
2657 		tg3_pwrsrc_switch_to_vaux(tp);
2658 	else
2659 		tg3_pwrsrc_die_with_vmain(tp);
2660 
2661 done:
2662 	tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2663 }
2664 
2665 static void tg3_frob_aux_power(struct tg3 *tp, bool include_wol)
2666 {
2667 	bool need_vaux = false;
2668 
2669 	/* The GPIOs do something completely different on 57765. */
2670 	if (!tg3_flag(tp, IS_NIC) || tg3_flag(tp, 57765_CLASS))
2671 		return;
2672 
2673 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2674 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2675 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
2676 		tg3_frob_aux_power_5717(tp, include_wol ?
2677 					tg3_flag(tp, WOL_ENABLE) != 0 : 0);
2678 		return;
2679 	}
2680 
2681 	if (tp->pdev_peer && tp->pdev_peer != tp->pdev) {
2682 		struct net_device *dev_peer;
2683 
2684 		dev_peer = pci_get_drvdata(tp->pdev_peer);
2685 
2686 		/* remove_one() may have been run on the peer. */
2687 		if (dev_peer) {
2688 			struct tg3 *tp_peer = netdev_priv(dev_peer);
2689 
2690 			if (tg3_flag(tp_peer, INIT_COMPLETE))
2691 				return;
2692 
2693 			if ((include_wol && tg3_flag(tp_peer, WOL_ENABLE)) ||
2694 			    tg3_flag(tp_peer, ENABLE_ASF))
2695 				need_vaux = true;
2696 		}
2697 	}
2698 
2699 	if ((include_wol && tg3_flag(tp, WOL_ENABLE)) ||
2700 	    tg3_flag(tp, ENABLE_ASF))
2701 		need_vaux = true;
2702 
2703 	if (need_vaux)
2704 		tg3_pwrsrc_switch_to_vaux(tp);
2705 	else
2706 		tg3_pwrsrc_die_with_vmain(tp);
2707 }
2708 
2709 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
2710 {
2711 	if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
2712 		return 1;
2713 	else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) {
2714 		if (speed != SPEED_10)
2715 			return 1;
2716 	} else if (speed == SPEED_10)
2717 		return 1;
2718 
2719 	return 0;
2720 }
2721 
2722 static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
2723 {
2724 	u32 val;
2725 
2726 	if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
2727 		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
2728 			u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
2729 			u32 serdes_cfg = tr32(MAC_SERDES_CFG);
2730 
2731 			sg_dig_ctrl |=
2732 				SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
2733 			tw32(SG_DIG_CTRL, sg_dig_ctrl);
2734 			tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
2735 		}
2736 		return;
2737 	}
2738 
2739 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2740 		tg3_bmcr_reset(tp);
2741 		val = tr32(GRC_MISC_CFG);
2742 		tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
2743 		udelay(40);
2744 		return;
2745 	} else if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2746 		u32 phytest;
2747 		if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2748 			u32 phy;
2749 
2750 			tg3_writephy(tp, MII_ADVERTISE, 0);
2751 			tg3_writephy(tp, MII_BMCR,
2752 				     BMCR_ANENABLE | BMCR_ANRESTART);
2753 
2754 			tg3_writephy(tp, MII_TG3_FET_TEST,
2755 				     phytest | MII_TG3_FET_SHADOW_EN);
2756 			if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) {
2757 				phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD;
2758 				tg3_writephy(tp,
2759 					     MII_TG3_FET_SHDW_AUXMODE4,
2760 					     phy);
2761 			}
2762 			tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2763 		}
2764 		return;
2765 	} else if (do_low_power) {
2766 		tg3_writephy(tp, MII_TG3_EXT_CTRL,
2767 			     MII_TG3_EXT_CTRL_FORCE_LED_OFF);
2768 
2769 		val = MII_TG3_AUXCTL_PCTL_100TX_LPWR |
2770 		      MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
2771 		      MII_TG3_AUXCTL_PCTL_VREG_11V;
2772 		tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, val);
2773 	}
2774 
2775 	/* The PHY should not be powered down on some chips because
2776 	 * of bugs.
2777 	 */
2778 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2779 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2780 	    (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 &&
2781 	     (tp->phy_flags & TG3_PHYFLG_MII_SERDES)) ||
2782 	    (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 &&
2783 	     !tp->pci_fn))
2784 		return;
2785 
2786 	if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2787 	    GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
2788 		val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2789 		val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2790 		val |= CPMU_LSPD_1000MB_MACCLK_12_5;
2791 		tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2792 	}
2793 
2794 	tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
2795 }
2796 
2797 /* tp->lock is held. */
2798 static int tg3_nvram_lock(struct tg3 *tp)
2799 {
2800 	if (tg3_flag(tp, NVRAM)) {
2801 		int i;
2802 
2803 		if (tp->nvram_lock_cnt == 0) {
2804 			tw32(NVRAM_SWARB, SWARB_REQ_SET1);
2805 			for (i = 0; i < 8000; i++) {
2806 				if (tr32(NVRAM_SWARB) & SWARB_GNT1)
2807 					break;
2808 				udelay(20);
2809 			}
2810 			if (i == 8000) {
2811 				tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
2812 				return -ENODEV;
2813 			}
2814 		}
2815 		tp->nvram_lock_cnt++;
2816 	}
2817 	return 0;
2818 }
2819 
2820 /* tp->lock is held. */
2821 static void tg3_nvram_unlock(struct tg3 *tp)
2822 {
2823 	if (tg3_flag(tp, NVRAM)) {
2824 		if (tp->nvram_lock_cnt > 0)
2825 			tp->nvram_lock_cnt--;
2826 		if (tp->nvram_lock_cnt == 0)
2827 			tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
2828 	}
2829 }
2830 
2831 /* tp->lock is held. */
2832 static void tg3_enable_nvram_access(struct tg3 *tp)
2833 {
2834 	if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
2835 		u32 nvaccess = tr32(NVRAM_ACCESS);
2836 
2837 		tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
2838 	}
2839 }
2840 
2841 /* tp->lock is held. */
2842 static void tg3_disable_nvram_access(struct tg3 *tp)
2843 {
2844 	if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
2845 		u32 nvaccess = tr32(NVRAM_ACCESS);
2846 
2847 		tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
2848 	}
2849 }
2850 
2851 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
2852 					u32 offset, u32 *val)
2853 {
2854 	u32 tmp;
2855 	int i;
2856 
2857 	if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
2858 		return -EINVAL;
2859 
2860 	tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
2861 					EEPROM_ADDR_DEVID_MASK |
2862 					EEPROM_ADDR_READ);
2863 	tw32(GRC_EEPROM_ADDR,
2864 	     tmp |
2865 	     (0 << EEPROM_ADDR_DEVID_SHIFT) |
2866 	     ((offset << EEPROM_ADDR_ADDR_SHIFT) &
2867 	      EEPROM_ADDR_ADDR_MASK) |
2868 	     EEPROM_ADDR_READ | EEPROM_ADDR_START);
2869 
2870 	for (i = 0; i < 1000; i++) {
2871 		tmp = tr32(GRC_EEPROM_ADDR);
2872 
2873 		if (tmp & EEPROM_ADDR_COMPLETE)
2874 			break;
2875 		msleep(1);
2876 	}
2877 	if (!(tmp & EEPROM_ADDR_COMPLETE))
2878 		return -EBUSY;
2879 
2880 	tmp = tr32(GRC_EEPROM_DATA);
2881 
2882 	/*
2883 	 * The data will always be opposite the native endian
2884 	 * format.  Perform a blind byteswap to compensate.
2885 	 */
2886 	*val = swab32(tmp);
2887 
2888 	return 0;
2889 }
2890 
2891 #define NVRAM_CMD_TIMEOUT 10000
2892 
2893 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
2894 {
2895 	int i;
2896 
2897 	tw32(NVRAM_CMD, nvram_cmd);
2898 	for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
2899 		udelay(10);
2900 		if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
2901 			udelay(10);
2902 			break;
2903 		}
2904 	}
2905 
2906 	if (i == NVRAM_CMD_TIMEOUT)
2907 		return -EBUSY;
2908 
2909 	return 0;
2910 }
2911 
2912 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
2913 {
2914 	if (tg3_flag(tp, NVRAM) &&
2915 	    tg3_flag(tp, NVRAM_BUFFERED) &&
2916 	    tg3_flag(tp, FLASH) &&
2917 	    !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
2918 	    (tp->nvram_jedecnum == JEDEC_ATMEL))
2919 
2920 		addr = ((addr / tp->nvram_pagesize) <<
2921 			ATMEL_AT45DB0X1B_PAGE_POS) +
2922 		       (addr % tp->nvram_pagesize);
2923 
2924 	return addr;
2925 }
2926 
2927 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
2928 {
2929 	if (tg3_flag(tp, NVRAM) &&
2930 	    tg3_flag(tp, NVRAM_BUFFERED) &&
2931 	    tg3_flag(tp, FLASH) &&
2932 	    !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
2933 	    (tp->nvram_jedecnum == JEDEC_ATMEL))
2934 
2935 		addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
2936 			tp->nvram_pagesize) +
2937 		       (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
2938 
2939 	return addr;
2940 }
2941 
2942 /* NOTE: Data read in from NVRAM is byteswapped according to
2943  * the byteswapping settings for all other register accesses.
2944  * tg3 devices are BE devices, so on a BE machine, the data
2945  * returned will be exactly as it is seen in NVRAM.  On a LE
2946  * machine, the 32-bit value will be byteswapped.
2947  */
2948 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
2949 {
2950 	int ret;
2951 
2952 	if (!tg3_flag(tp, NVRAM))
2953 		return tg3_nvram_read_using_eeprom(tp, offset, val);
2954 
2955 	offset = tg3_nvram_phys_addr(tp, offset);
2956 
2957 	if (offset > NVRAM_ADDR_MSK)
2958 		return -EINVAL;
2959 
2960 	ret = tg3_nvram_lock(tp);
2961 	if (ret)
2962 		return ret;
2963 
2964 	tg3_enable_nvram_access(tp);
2965 
2966 	tw32(NVRAM_ADDR, offset);
2967 	ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
2968 		NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
2969 
2970 	if (ret == 0)
2971 		*val = tr32(NVRAM_RDDATA);
2972 
2973 	tg3_disable_nvram_access(tp);
2974 
2975 	tg3_nvram_unlock(tp);
2976 
2977 	return ret;
2978 }
2979 
2980 /* Ensures NVRAM data is in bytestream format. */
2981 static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val)
2982 {
2983 	u32 v;
2984 	int res = tg3_nvram_read(tp, offset, &v);
2985 	if (!res)
2986 		*val = cpu_to_be32(v);
2987 	return res;
2988 }
2989 
2990 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
2991 				    u32 offset, u32 len, u8 *buf)
2992 {
2993 	int i, j, rc = 0;
2994 	u32 val;
2995 
2996 	for (i = 0; i < len; i += 4) {
2997 		u32 addr;
2998 		__be32 data;
2999 
3000 		addr = offset + i;
3001 
3002 		memcpy(&data, buf + i, 4);
3003 
3004 		/*
3005 		 * The SEEPROM interface expects the data to always be opposite
3006 		 * the native endian format.  We accomplish this by reversing
3007 		 * all the operations that would have been performed on the
3008 		 * data from a call to tg3_nvram_read_be32().
3009 		 */
3010 		tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data)));
3011 
3012 		val = tr32(GRC_EEPROM_ADDR);
3013 		tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
3014 
3015 		val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
3016 			EEPROM_ADDR_READ);
3017 		tw32(GRC_EEPROM_ADDR, val |
3018 			(0 << EEPROM_ADDR_DEVID_SHIFT) |
3019 			(addr & EEPROM_ADDR_ADDR_MASK) |
3020 			EEPROM_ADDR_START |
3021 			EEPROM_ADDR_WRITE);
3022 
3023 		for (j = 0; j < 1000; j++) {
3024 			val = tr32(GRC_EEPROM_ADDR);
3025 
3026 			if (val & EEPROM_ADDR_COMPLETE)
3027 				break;
3028 			msleep(1);
3029 		}
3030 		if (!(val & EEPROM_ADDR_COMPLETE)) {
3031 			rc = -EBUSY;
3032 			break;
3033 		}
3034 	}
3035 
3036 	return rc;
3037 }
3038 
3039 /* offset and length are dword aligned */
3040 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
3041 		u8 *buf)
3042 {
3043 	int ret = 0;
3044 	u32 pagesize = tp->nvram_pagesize;
3045 	u32 pagemask = pagesize - 1;
3046 	u32 nvram_cmd;
3047 	u8 *tmp;
3048 
3049 	tmp = kmalloc(pagesize, GFP_KERNEL);
3050 	if (tmp == NULL)
3051 		return -ENOMEM;
3052 
3053 	while (len) {
3054 		int j;
3055 		u32 phy_addr, page_off, size;
3056 
3057 		phy_addr = offset & ~pagemask;
3058 
3059 		for (j = 0; j < pagesize; j += 4) {
3060 			ret = tg3_nvram_read_be32(tp, phy_addr + j,
3061 						  (__be32 *) (tmp + j));
3062 			if (ret)
3063 				break;
3064 		}
3065 		if (ret)
3066 			break;
3067 
3068 		page_off = offset & pagemask;
3069 		size = pagesize;
3070 		if (len < size)
3071 			size = len;
3072 
3073 		len -= size;
3074 
3075 		memcpy(tmp + page_off, buf, size);
3076 
3077 		offset = offset + (pagesize - page_off);
3078 
3079 		tg3_enable_nvram_access(tp);
3080 
3081 		/*
3082 		 * Before we can erase the flash page, we need
3083 		 * to issue a special "write enable" command.
3084 		 */
3085 		nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3086 
3087 		if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3088 			break;
3089 
3090 		/* Erase the target page */
3091 		tw32(NVRAM_ADDR, phy_addr);
3092 
3093 		nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
3094 			NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
3095 
3096 		if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3097 			break;
3098 
3099 		/* Issue another write enable to start the write. */
3100 		nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3101 
3102 		if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3103 			break;
3104 
3105 		for (j = 0; j < pagesize; j += 4) {
3106 			__be32 data;
3107 
3108 			data = *((__be32 *) (tmp + j));
3109 
3110 			tw32(NVRAM_WRDATA, be32_to_cpu(data));
3111 
3112 			tw32(NVRAM_ADDR, phy_addr + j);
3113 
3114 			nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
3115 				NVRAM_CMD_WR;
3116 
3117 			if (j == 0)
3118 				nvram_cmd |= NVRAM_CMD_FIRST;
3119 			else if (j == (pagesize - 4))
3120 				nvram_cmd |= NVRAM_CMD_LAST;
3121 
3122 			ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3123 			if (ret)
3124 				break;
3125 		}
3126 		if (ret)
3127 			break;
3128 	}
3129 
3130 	nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3131 	tg3_nvram_exec_cmd(tp, nvram_cmd);
3132 
3133 	kfree(tmp);
3134 
3135 	return ret;
3136 }
3137 
3138 /* offset and length are dword aligned */
3139 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
3140 		u8 *buf)
3141 {
3142 	int i, ret = 0;
3143 
3144 	for (i = 0; i < len; i += 4, offset += 4) {
3145 		u32 page_off, phy_addr, nvram_cmd;
3146 		__be32 data;
3147 
3148 		memcpy(&data, buf + i, 4);
3149 		tw32(NVRAM_WRDATA, be32_to_cpu(data));
3150 
3151 		page_off = offset % tp->nvram_pagesize;
3152 
3153 		phy_addr = tg3_nvram_phys_addr(tp, offset);
3154 
3155 		nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
3156 
3157 		if (page_off == 0 || i == 0)
3158 			nvram_cmd |= NVRAM_CMD_FIRST;
3159 		if (page_off == (tp->nvram_pagesize - 4))
3160 			nvram_cmd |= NVRAM_CMD_LAST;
3161 
3162 		if (i == (len - 4))
3163 			nvram_cmd |= NVRAM_CMD_LAST;
3164 
3165 		if ((nvram_cmd & NVRAM_CMD_FIRST) ||
3166 		    !tg3_flag(tp, FLASH) ||
3167 		    !tg3_flag(tp, 57765_PLUS))
3168 			tw32(NVRAM_ADDR, phy_addr);
3169 
3170 		if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752 &&
3171 		    !tg3_flag(tp, 5755_PLUS) &&
3172 		    (tp->nvram_jedecnum == JEDEC_ST) &&
3173 		    (nvram_cmd & NVRAM_CMD_FIRST)) {
3174 			u32 cmd;
3175 
3176 			cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3177 			ret = tg3_nvram_exec_cmd(tp, cmd);
3178 			if (ret)
3179 				break;
3180 		}
3181 		if (!tg3_flag(tp, FLASH)) {
3182 			/* We always do complete word writes to eeprom. */
3183 			nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
3184 		}
3185 
3186 		ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3187 		if (ret)
3188 			break;
3189 	}
3190 	return ret;
3191 }
3192 
3193 /* offset and length are dword aligned */
3194 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
3195 {
3196 	int ret;
3197 
3198 	if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3199 		tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
3200 		       ~GRC_LCLCTRL_GPIO_OUTPUT1);
3201 		udelay(40);
3202 	}
3203 
3204 	if (!tg3_flag(tp, NVRAM)) {
3205 		ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
3206 	} else {
3207 		u32 grc_mode;
3208 
3209 		ret = tg3_nvram_lock(tp);
3210 		if (ret)
3211 			return ret;
3212 
3213 		tg3_enable_nvram_access(tp);
3214 		if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM))
3215 			tw32(NVRAM_WRITE1, 0x406);
3216 
3217 		grc_mode = tr32(GRC_MODE);
3218 		tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
3219 
3220 		if (tg3_flag(tp, NVRAM_BUFFERED) || !tg3_flag(tp, FLASH)) {
3221 			ret = tg3_nvram_write_block_buffered(tp, offset, len,
3222 				buf);
3223 		} else {
3224 			ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
3225 				buf);
3226 		}
3227 
3228 		grc_mode = tr32(GRC_MODE);
3229 		tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
3230 
3231 		tg3_disable_nvram_access(tp);
3232 		tg3_nvram_unlock(tp);
3233 	}
3234 
3235 	if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3236 		tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
3237 		udelay(40);
3238 	}
3239 
3240 	return ret;
3241 }
3242 
3243 #define RX_CPU_SCRATCH_BASE	0x30000
3244 #define RX_CPU_SCRATCH_SIZE	0x04000
3245 #define TX_CPU_SCRATCH_BASE	0x34000
3246 #define TX_CPU_SCRATCH_SIZE	0x04000
3247 
3248 /* tp->lock is held. */
3249 static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
3250 {
3251 	int i;
3252 
3253 	BUG_ON(offset == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS));
3254 
3255 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
3256 		u32 val = tr32(GRC_VCPU_EXT_CTRL);
3257 
3258 		tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
3259 		return 0;
3260 	}
3261 	if (offset == RX_CPU_BASE) {
3262 		for (i = 0; i < 10000; i++) {
3263 			tw32(offset + CPU_STATE, 0xffffffff);
3264 			tw32(offset + CPU_MODE,  CPU_MODE_HALT);
3265 			if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
3266 				break;
3267 		}
3268 
3269 		tw32(offset + CPU_STATE, 0xffffffff);
3270 		tw32_f(offset + CPU_MODE,  CPU_MODE_HALT);
3271 		udelay(10);
3272 	} else {
3273 		for (i = 0; i < 10000; i++) {
3274 			tw32(offset + CPU_STATE, 0xffffffff);
3275 			tw32(offset + CPU_MODE,  CPU_MODE_HALT);
3276 			if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
3277 				break;
3278 		}
3279 	}
3280 
3281 	if (i >= 10000) {
3282 		netdev_err(tp->dev, "%s timed out, %s CPU\n",
3283 			   __func__, offset == RX_CPU_BASE ? "RX" : "TX");
3284 		return -ENODEV;
3285 	}
3286 
3287 	/* Clear firmware's nvram arbitration. */
3288 	if (tg3_flag(tp, NVRAM))
3289 		tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
3290 	return 0;
3291 }
3292 
3293 struct fw_info {
3294 	unsigned int fw_base;
3295 	unsigned int fw_len;
3296 	const __be32 *fw_data;
3297 };
3298 
3299 /* tp->lock is held. */
3300 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base,
3301 				 u32 cpu_scratch_base, int cpu_scratch_size,
3302 				 struct fw_info *info)
3303 {
3304 	int err, lock_err, i;
3305 	void (*write_op)(struct tg3 *, u32, u32);
3306 
3307 	if (cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)) {
3308 		netdev_err(tp->dev,
3309 			   "%s: Trying to load TX cpu firmware which is 5705\n",
3310 			   __func__);
3311 		return -EINVAL;
3312 	}
3313 
3314 	if (tg3_flag(tp, 5705_PLUS))
3315 		write_op = tg3_write_mem;
3316 	else
3317 		write_op = tg3_write_indirect_reg32;
3318 
3319 	/* It is possible that bootcode is still loading at this point.
3320 	 * Get the nvram lock first before halting the cpu.
3321 	 */
3322 	lock_err = tg3_nvram_lock(tp);
3323 	err = tg3_halt_cpu(tp, cpu_base);
3324 	if (!lock_err)
3325 		tg3_nvram_unlock(tp);
3326 	if (err)
3327 		goto out;
3328 
3329 	for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
3330 		write_op(tp, cpu_scratch_base + i, 0);
3331 	tw32(cpu_base + CPU_STATE, 0xffffffff);
3332 	tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
3333 	for (i = 0; i < (info->fw_len / sizeof(u32)); i++)
3334 		write_op(tp, (cpu_scratch_base +
3335 			      (info->fw_base & 0xffff) +
3336 			      (i * sizeof(u32))),
3337 			      be32_to_cpu(info->fw_data[i]));
3338 
3339 	err = 0;
3340 
3341 out:
3342 	return err;
3343 }
3344 
3345 /* tp->lock is held. */
3346 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
3347 {
3348 	struct fw_info info;
3349 	const __be32 *fw_data;
3350 	int err, i;
3351 
3352 	fw_data = (void *)tp->fw->data;
3353 
3354 	/* Firmware blob starts with version numbers, followed by
3355 	   start address and length. We are setting complete length.
3356 	   length = end_address_of_bss - start_address_of_text.
3357 	   Remainder is the blob to be loaded contiguously
3358 	   from start address. */
3359 
3360 	info.fw_base = be32_to_cpu(fw_data[1]);
3361 	info.fw_len = tp->fw->size - 12;
3362 	info.fw_data = &fw_data[3];
3363 
3364 	err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
3365 				    RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
3366 				    &info);
3367 	if (err)
3368 		return err;
3369 
3370 	err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
3371 				    TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
3372 				    &info);
3373 	if (err)
3374 		return err;
3375 
3376 	/* Now startup only the RX cpu. */
3377 	tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3378 	tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
3379 
3380 	for (i = 0; i < 5; i++) {
3381 		if (tr32(RX_CPU_BASE + CPU_PC) == info.fw_base)
3382 			break;
3383 		tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3384 		tw32(RX_CPU_BASE + CPU_MODE,  CPU_MODE_HALT);
3385 		tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
3386 		udelay(1000);
3387 	}
3388 	if (i >= 5) {
3389 		netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x "
3390 			   "should be %08x\n", __func__,
3391 			   tr32(RX_CPU_BASE + CPU_PC), info.fw_base);
3392 		return -ENODEV;
3393 	}
3394 	tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3395 	tw32_f(RX_CPU_BASE + CPU_MODE,  0x00000000);
3396 
3397 	return 0;
3398 }
3399 
3400 /* tp->lock is held. */
3401 static int tg3_load_tso_firmware(struct tg3 *tp)
3402 {
3403 	struct fw_info info;
3404 	const __be32 *fw_data;
3405 	unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
3406 	int err, i;
3407 
3408 	if (tg3_flag(tp, HW_TSO_1) ||
3409 	    tg3_flag(tp, HW_TSO_2) ||
3410 	    tg3_flag(tp, HW_TSO_3))
3411 		return 0;
3412 
3413 	fw_data = (void *)tp->fw->data;
3414 
3415 	/* Firmware blob starts with version numbers, followed by
3416 	   start address and length. We are setting complete length.
3417 	   length = end_address_of_bss - start_address_of_text.
3418 	   Remainder is the blob to be loaded contiguously
3419 	   from start address. */
3420 
3421 	info.fw_base = be32_to_cpu(fw_data[1]);
3422 	cpu_scratch_size = tp->fw_len;
3423 	info.fw_len = tp->fw->size - 12;
3424 	info.fw_data = &fw_data[3];
3425 
3426 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
3427 		cpu_base = RX_CPU_BASE;
3428 		cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
3429 	} else {
3430 		cpu_base = TX_CPU_BASE;
3431 		cpu_scratch_base = TX_CPU_SCRATCH_BASE;
3432 		cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
3433 	}
3434 
3435 	err = tg3_load_firmware_cpu(tp, cpu_base,
3436 				    cpu_scratch_base, cpu_scratch_size,
3437 				    &info);
3438 	if (err)
3439 		return err;
3440 
3441 	/* Now startup the cpu. */
3442 	tw32(cpu_base + CPU_STATE, 0xffffffff);
3443 	tw32_f(cpu_base + CPU_PC, info.fw_base);
3444 
3445 	for (i = 0; i < 5; i++) {
3446 		if (tr32(cpu_base + CPU_PC) == info.fw_base)
3447 			break;
3448 		tw32(cpu_base + CPU_STATE, 0xffffffff);
3449 		tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
3450 		tw32_f(cpu_base + CPU_PC, info.fw_base);
3451 		udelay(1000);
3452 	}
3453 	if (i >= 5) {
3454 		netdev_err(tp->dev,
3455 			   "%s fails to set CPU PC, is %08x should be %08x\n",
3456 			   __func__, tr32(cpu_base + CPU_PC), info.fw_base);
3457 		return -ENODEV;
3458 	}
3459 	tw32(cpu_base + CPU_STATE, 0xffffffff);
3460 	tw32_f(cpu_base + CPU_MODE,  0x00000000);
3461 	return 0;
3462 }
3463 
3464 
3465 /* tp->lock is held. */
3466 static void __tg3_set_mac_addr(struct tg3 *tp, int skip_mac_1)
3467 {
3468 	u32 addr_high, addr_low;
3469 	int i;
3470 
3471 	addr_high = ((tp->dev->dev_addr[0] << 8) |
3472 		     tp->dev->dev_addr[1]);
3473 	addr_low = ((tp->dev->dev_addr[2] << 24) |
3474 		    (tp->dev->dev_addr[3] << 16) |
3475 		    (tp->dev->dev_addr[4] <<  8) |
3476 		    (tp->dev->dev_addr[5] <<  0));
3477 	for (i = 0; i < 4; i++) {
3478 		if (i == 1 && skip_mac_1)
3479 			continue;
3480 		tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
3481 		tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
3482 	}
3483 
3484 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
3485 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
3486 		for (i = 0; i < 12; i++) {
3487 			tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
3488 			tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
3489 		}
3490 	}
3491 
3492 	addr_high = (tp->dev->dev_addr[0] +
3493 		     tp->dev->dev_addr[1] +
3494 		     tp->dev->dev_addr[2] +
3495 		     tp->dev->dev_addr[3] +
3496 		     tp->dev->dev_addr[4] +
3497 		     tp->dev->dev_addr[5]) &
3498 		TX_BACKOFF_SEED_MASK;
3499 	tw32(MAC_TX_BACKOFF_SEED, addr_high);
3500 }
3501 
3502 static void tg3_enable_register_access(struct tg3 *tp)
3503 {
3504 	/*
3505 	 * Make sure register accesses (indirect or otherwise) will function
3506 	 * correctly.
3507 	 */
3508 	pci_write_config_dword(tp->pdev,
3509 			       TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl);
3510 }
3511 
3512 static int tg3_power_up(struct tg3 *tp)
3513 {
3514 	int err;
3515 
3516 	tg3_enable_register_access(tp);
3517 
3518 	err = pci_set_power_state(tp->pdev, PCI_D0);
3519 	if (!err) {
3520 		/* Switch out of Vaux if it is a NIC */
3521 		tg3_pwrsrc_switch_to_vmain(tp);
3522 	} else {
3523 		netdev_err(tp->dev, "Transition to D0 failed\n");
3524 	}
3525 
3526 	return err;
3527 }
3528 
3529 static int tg3_setup_phy(struct tg3 *, int);
3530 
3531 static int tg3_power_down_prepare(struct tg3 *tp)
3532 {
3533 	u32 misc_host_ctrl;
3534 	bool device_should_wake, do_low_power;
3535 
3536 	tg3_enable_register_access(tp);
3537 
3538 	/* Restore the CLKREQ setting. */
3539 	if (tg3_flag(tp, CLKREQ_BUG)) {
3540 		u16 lnkctl;
3541 
3542 		pci_read_config_word(tp->pdev,
3543 				     pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
3544 				     &lnkctl);
3545 		lnkctl |= PCI_EXP_LNKCTL_CLKREQ_EN;
3546 		pci_write_config_word(tp->pdev,
3547 				      pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
3548 				      lnkctl);
3549 	}
3550 
3551 	misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
3552 	tw32(TG3PCI_MISC_HOST_CTRL,
3553 	     misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
3554 
3555 	device_should_wake = device_may_wakeup(&tp->pdev->dev) &&
3556 			     tg3_flag(tp, WOL_ENABLE);
3557 
3558 	if (tg3_flag(tp, USE_PHYLIB)) {
3559 		do_low_power = false;
3560 		if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) &&
3561 		    !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
3562 			struct phy_device *phydev;
3563 			u32 phyid, advertising;
3564 
3565 			phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
3566 
3567 			tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
3568 
3569 			tp->link_config.speed = phydev->speed;
3570 			tp->link_config.duplex = phydev->duplex;
3571 			tp->link_config.autoneg = phydev->autoneg;
3572 			tp->link_config.advertising = phydev->advertising;
3573 
3574 			advertising = ADVERTISED_TP |
3575 				      ADVERTISED_Pause |
3576 				      ADVERTISED_Autoneg |
3577 				      ADVERTISED_10baseT_Half;
3578 
3579 			if (tg3_flag(tp, ENABLE_ASF) || device_should_wake) {
3580 				if (tg3_flag(tp, WOL_SPEED_100MB))
3581 					advertising |=
3582 						ADVERTISED_100baseT_Half |
3583 						ADVERTISED_100baseT_Full |
3584 						ADVERTISED_10baseT_Full;
3585 				else
3586 					advertising |= ADVERTISED_10baseT_Full;
3587 			}
3588 
3589 			phydev->advertising = advertising;
3590 
3591 			phy_start_aneg(phydev);
3592 
3593 			phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
3594 			if (phyid != PHY_ID_BCMAC131) {
3595 				phyid &= PHY_BCM_OUI_MASK;
3596 				if (phyid == PHY_BCM_OUI_1 ||
3597 				    phyid == PHY_BCM_OUI_2 ||
3598 				    phyid == PHY_BCM_OUI_3)
3599 					do_low_power = true;
3600 			}
3601 		}
3602 	} else {
3603 		do_low_power = true;
3604 
3605 		if (!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER))
3606 			tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
3607 
3608 		if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
3609 			tg3_setup_phy(tp, 0);
3610 	}
3611 
3612 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
3613 		u32 val;
3614 
3615 		val = tr32(GRC_VCPU_EXT_CTRL);
3616 		tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
3617 	} else if (!tg3_flag(tp, ENABLE_ASF)) {
3618 		int i;
3619 		u32 val;
3620 
3621 		for (i = 0; i < 200; i++) {
3622 			tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
3623 			if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
3624 				break;
3625 			msleep(1);
3626 		}
3627 	}
3628 	if (tg3_flag(tp, WOL_CAP))
3629 		tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
3630 						     WOL_DRV_STATE_SHUTDOWN |
3631 						     WOL_DRV_WOL |
3632 						     WOL_SET_MAGIC_PKT);
3633 
3634 	if (device_should_wake) {
3635 		u32 mac_mode;
3636 
3637 		if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
3638 			if (do_low_power &&
3639 			    !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
3640 				tg3_phy_auxctl_write(tp,
3641 					       MII_TG3_AUXCTL_SHDWSEL_PWRCTL,
3642 					       MII_TG3_AUXCTL_PCTL_WOL_EN |
3643 					       MII_TG3_AUXCTL_PCTL_100TX_LPWR |
3644 					       MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC);
3645 				udelay(40);
3646 			}
3647 
3648 			if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
3649 				mac_mode = MAC_MODE_PORT_MODE_GMII;
3650 			else
3651 				mac_mode = MAC_MODE_PORT_MODE_MII;
3652 
3653 			mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
3654 			if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
3655 			    ASIC_REV_5700) {
3656 				u32 speed = tg3_flag(tp, WOL_SPEED_100MB) ?
3657 					     SPEED_100 : SPEED_10;
3658 				if (tg3_5700_link_polarity(tp, speed))
3659 					mac_mode |= MAC_MODE_LINK_POLARITY;
3660 				else
3661 					mac_mode &= ~MAC_MODE_LINK_POLARITY;
3662 			}
3663 		} else {
3664 			mac_mode = MAC_MODE_PORT_MODE_TBI;
3665 		}
3666 
3667 		if (!tg3_flag(tp, 5750_PLUS))
3668 			tw32(MAC_LED_CTRL, tp->led_ctrl);
3669 
3670 		mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
3671 		if ((tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) &&
3672 		    (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)))
3673 			mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
3674 
3675 		if (tg3_flag(tp, ENABLE_APE))
3676 			mac_mode |= MAC_MODE_APE_TX_EN |
3677 				    MAC_MODE_APE_RX_EN |
3678 				    MAC_MODE_TDE_ENABLE;
3679 
3680 		tw32_f(MAC_MODE, mac_mode);
3681 		udelay(100);
3682 
3683 		tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
3684 		udelay(10);
3685 	}
3686 
3687 	if (!tg3_flag(tp, WOL_SPEED_100MB) &&
3688 	    (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3689 	     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
3690 		u32 base_val;
3691 
3692 		base_val = tp->pci_clock_ctrl;
3693 		base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
3694 			     CLOCK_CTRL_TXCLK_DISABLE);
3695 
3696 		tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
3697 			    CLOCK_CTRL_PWRDOWN_PLL133, 40);
3698 	} else if (tg3_flag(tp, 5780_CLASS) ||
3699 		   tg3_flag(tp, CPMU_PRESENT) ||
3700 		   GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
3701 		/* do nothing */
3702 	} else if (!(tg3_flag(tp, 5750_PLUS) && tg3_flag(tp, ENABLE_ASF))) {
3703 		u32 newbits1, newbits2;
3704 
3705 		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3706 		    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
3707 			newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
3708 				    CLOCK_CTRL_TXCLK_DISABLE |
3709 				    CLOCK_CTRL_ALTCLK);
3710 			newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
3711 		} else if (tg3_flag(tp, 5705_PLUS)) {
3712 			newbits1 = CLOCK_CTRL_625_CORE;
3713 			newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
3714 		} else {
3715 			newbits1 = CLOCK_CTRL_ALTCLK;
3716 			newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
3717 		}
3718 
3719 		tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
3720 			    40);
3721 
3722 		tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
3723 			    40);
3724 
3725 		if (!tg3_flag(tp, 5705_PLUS)) {
3726 			u32 newbits3;
3727 
3728 			if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3729 			    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
3730 				newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
3731 					    CLOCK_CTRL_TXCLK_DISABLE |
3732 					    CLOCK_CTRL_44MHZ_CORE);
3733 			} else {
3734 				newbits3 = CLOCK_CTRL_44MHZ_CORE;
3735 			}
3736 
3737 			tw32_wait_f(TG3PCI_CLOCK_CTRL,
3738 				    tp->pci_clock_ctrl | newbits3, 40);
3739 		}
3740 	}
3741 
3742 	if (!(device_should_wake) && !tg3_flag(tp, ENABLE_ASF))
3743 		tg3_power_down_phy(tp, do_low_power);
3744 
3745 	tg3_frob_aux_power(tp, true);
3746 
3747 	/* Workaround for unstable PLL clock */
3748 	if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
3749 	    (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
3750 		u32 val = tr32(0x7d00);
3751 
3752 		val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
3753 		tw32(0x7d00, val);
3754 		if (!tg3_flag(tp, ENABLE_ASF)) {
3755 			int err;
3756 
3757 			err = tg3_nvram_lock(tp);
3758 			tg3_halt_cpu(tp, RX_CPU_BASE);
3759 			if (!err)
3760 				tg3_nvram_unlock(tp);
3761 		}
3762 	}
3763 
3764 	tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
3765 
3766 	return 0;
3767 }
3768 
3769 static void tg3_power_down(struct tg3 *tp)
3770 {
3771 	tg3_power_down_prepare(tp);
3772 
3773 	pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE));
3774 	pci_set_power_state(tp->pdev, PCI_D3hot);
3775 }
3776 
3777 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
3778 {
3779 	switch (val & MII_TG3_AUX_STAT_SPDMASK) {
3780 	case MII_TG3_AUX_STAT_10HALF:
3781 		*speed = SPEED_10;
3782 		*duplex = DUPLEX_HALF;
3783 		break;
3784 
3785 	case MII_TG3_AUX_STAT_10FULL:
3786 		*speed = SPEED_10;
3787 		*duplex = DUPLEX_FULL;
3788 		break;
3789 
3790 	case MII_TG3_AUX_STAT_100HALF:
3791 		*speed = SPEED_100;
3792 		*duplex = DUPLEX_HALF;
3793 		break;
3794 
3795 	case MII_TG3_AUX_STAT_100FULL:
3796 		*speed = SPEED_100;
3797 		*duplex = DUPLEX_FULL;
3798 		break;
3799 
3800 	case MII_TG3_AUX_STAT_1000HALF:
3801 		*speed = SPEED_1000;
3802 		*duplex = DUPLEX_HALF;
3803 		break;
3804 
3805 	case MII_TG3_AUX_STAT_1000FULL:
3806 		*speed = SPEED_1000;
3807 		*duplex = DUPLEX_FULL;
3808 		break;
3809 
3810 	default:
3811 		if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
3812 			*speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
3813 				 SPEED_10;
3814 			*duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
3815 				  DUPLEX_HALF;
3816 			break;
3817 		}
3818 		*speed = SPEED_UNKNOWN;
3819 		*duplex = DUPLEX_UNKNOWN;
3820 		break;
3821 	}
3822 }
3823 
3824 static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
3825 {
3826 	int err = 0;
3827 	u32 val, new_adv;
3828 
3829 	new_adv = ADVERTISE_CSMA;
3830 	new_adv |= ethtool_adv_to_mii_adv_t(advertise) & ADVERTISE_ALL;
3831 	new_adv |= mii_advertise_flowctrl(flowctrl);
3832 
3833 	err = tg3_writephy(tp, MII_ADVERTISE, new_adv);
3834 	if (err)
3835 		goto done;
3836 
3837 	if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
3838 		new_adv = ethtool_adv_to_mii_ctrl1000_t(advertise);
3839 
3840 		if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
3841 		    tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
3842 			new_adv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
3843 
3844 		err = tg3_writephy(tp, MII_CTRL1000, new_adv);
3845 		if (err)
3846 			goto done;
3847 	}
3848 
3849 	if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
3850 		goto done;
3851 
3852 	tw32(TG3_CPMU_EEE_MODE,
3853 	     tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE);
3854 
3855 	err = TG3_PHY_AUXCTL_SMDSP_ENABLE(tp);
3856 	if (!err) {
3857 		u32 err2;
3858 
3859 		val = 0;
3860 		/* Advertise 100-BaseTX EEE ability */
3861 		if (advertise & ADVERTISED_100baseT_Full)
3862 			val |= MDIO_AN_EEE_ADV_100TX;
3863 		/* Advertise 1000-BaseT EEE ability */
3864 		if (advertise & ADVERTISED_1000baseT_Full)
3865 			val |= MDIO_AN_EEE_ADV_1000T;
3866 		err = tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val);
3867 		if (err)
3868 			val = 0;
3869 
3870 		switch (GET_ASIC_REV(tp->pci_chip_rev_id)) {
3871 		case ASIC_REV_5717:
3872 		case ASIC_REV_57765:
3873 		case ASIC_REV_57766:
3874 		case ASIC_REV_5719:
3875 			/* If we advertised any eee advertisements above... */
3876 			if (val)
3877 				val = MII_TG3_DSP_TAP26_ALNOKO |
3878 				      MII_TG3_DSP_TAP26_RMRXSTO |
3879 				      MII_TG3_DSP_TAP26_OPCSINPT;
3880 			tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
3881 			/* Fall through */
3882 		case ASIC_REV_5720:
3883 			if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val))
3884 				tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val |
3885 						 MII_TG3_DSP_CH34TP2_HIBW01);
3886 		}
3887 
3888 		err2 = TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
3889 		if (!err)
3890 			err = err2;
3891 	}
3892 
3893 done:
3894 	return err;
3895 }
3896 
3897 static void tg3_phy_copper_begin(struct tg3 *tp)
3898 {
3899 	if (tp->link_config.autoneg == AUTONEG_ENABLE ||
3900 	    (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
3901 		u32 adv, fc;
3902 
3903 		if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
3904 			adv = ADVERTISED_10baseT_Half |
3905 			      ADVERTISED_10baseT_Full;
3906 			if (tg3_flag(tp, WOL_SPEED_100MB))
3907 				adv |= ADVERTISED_100baseT_Half |
3908 				       ADVERTISED_100baseT_Full;
3909 
3910 			fc = FLOW_CTRL_TX | FLOW_CTRL_RX;
3911 		} else {
3912 			adv = tp->link_config.advertising;
3913 			if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
3914 				adv &= ~(ADVERTISED_1000baseT_Half |
3915 					 ADVERTISED_1000baseT_Full);
3916 
3917 			fc = tp->link_config.flowctrl;
3918 		}
3919 
3920 		tg3_phy_autoneg_cfg(tp, adv, fc);
3921 
3922 		tg3_writephy(tp, MII_BMCR,
3923 			     BMCR_ANENABLE | BMCR_ANRESTART);
3924 	} else {
3925 		int i;
3926 		u32 bmcr, orig_bmcr;
3927 
3928 		tp->link_config.active_speed = tp->link_config.speed;
3929 		tp->link_config.active_duplex = tp->link_config.duplex;
3930 
3931 		bmcr = 0;
3932 		switch (tp->link_config.speed) {
3933 		default:
3934 		case SPEED_10:
3935 			break;
3936 
3937 		case SPEED_100:
3938 			bmcr |= BMCR_SPEED100;
3939 			break;
3940 
3941 		case SPEED_1000:
3942 			bmcr |= BMCR_SPEED1000;
3943 			break;
3944 		}
3945 
3946 		if (tp->link_config.duplex == DUPLEX_FULL)
3947 			bmcr |= BMCR_FULLDPLX;
3948 
3949 		if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
3950 		    (bmcr != orig_bmcr)) {
3951 			tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
3952 			for (i = 0; i < 1500; i++) {
3953 				u32 tmp;
3954 
3955 				udelay(10);
3956 				if (tg3_readphy(tp, MII_BMSR, &tmp) ||
3957 				    tg3_readphy(tp, MII_BMSR, &tmp))
3958 					continue;
3959 				if (!(tmp & BMSR_LSTATUS)) {
3960 					udelay(40);
3961 					break;
3962 				}
3963 			}
3964 			tg3_writephy(tp, MII_BMCR, bmcr);
3965 			udelay(40);
3966 		}
3967 	}
3968 }
3969 
3970 static int tg3_init_5401phy_dsp(struct tg3 *tp)
3971 {
3972 	int err;
3973 
3974 	/* Turn off tap power management. */
3975 	/* Set Extended packet length bit */
3976 	err = tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
3977 
3978 	err |= tg3_phydsp_write(tp, 0x0012, 0x1804);
3979 	err |= tg3_phydsp_write(tp, 0x0013, 0x1204);
3980 	err |= tg3_phydsp_write(tp, 0x8006, 0x0132);
3981 	err |= tg3_phydsp_write(tp, 0x8006, 0x0232);
3982 	err |= tg3_phydsp_write(tp, 0x201f, 0x0a20);
3983 
3984 	udelay(40);
3985 
3986 	return err;
3987 }
3988 
3989 static bool tg3_phy_copper_an_config_ok(struct tg3 *tp, u32 *lcladv)
3990 {
3991 	u32 advmsk, tgtadv, advertising;
3992 
3993 	advertising = tp->link_config.advertising;
3994 	tgtadv = ethtool_adv_to_mii_adv_t(advertising) & ADVERTISE_ALL;
3995 
3996 	advmsk = ADVERTISE_ALL;
3997 	if (tp->link_config.active_duplex == DUPLEX_FULL) {
3998 		tgtadv |= mii_advertise_flowctrl(tp->link_config.flowctrl);
3999 		advmsk |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
4000 	}
4001 
4002 	if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
4003 		return false;
4004 
4005 	if ((*lcladv & advmsk) != tgtadv)
4006 		return false;
4007 
4008 	if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4009 		u32 tg3_ctrl;
4010 
4011 		tgtadv = ethtool_adv_to_mii_ctrl1000_t(advertising);
4012 
4013 		if (tg3_readphy(tp, MII_CTRL1000, &tg3_ctrl))
4014 			return false;
4015 
4016 		if (tgtadv &&
4017 		    (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
4018 		     tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)) {
4019 			tgtadv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4020 			tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL |
4021 				     CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
4022 		} else {
4023 			tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL);
4024 		}
4025 
4026 		if (tg3_ctrl != tgtadv)
4027 			return false;
4028 	}
4029 
4030 	return true;
4031 }
4032 
4033 static bool tg3_phy_copper_fetch_rmtadv(struct tg3 *tp, u32 *rmtadv)
4034 {
4035 	u32 lpeth = 0;
4036 
4037 	if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4038 		u32 val;
4039 
4040 		if (tg3_readphy(tp, MII_STAT1000, &val))
4041 			return false;
4042 
4043 		lpeth = mii_stat1000_to_ethtool_lpa_t(val);
4044 	}
4045 
4046 	if (tg3_readphy(tp, MII_LPA, rmtadv))
4047 		return false;
4048 
4049 	lpeth |= mii_lpa_to_ethtool_lpa_t(*rmtadv);
4050 	tp->link_config.rmt_adv = lpeth;
4051 
4052 	return true;
4053 }
4054 
4055 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
4056 {
4057 	int current_link_up;
4058 	u32 bmsr, val;
4059 	u32 lcl_adv, rmt_adv;
4060 	u16 current_speed;
4061 	u8 current_duplex;
4062 	int i, err;
4063 
4064 	tw32(MAC_EVENT, 0);
4065 
4066 	tw32_f(MAC_STATUS,
4067 	     (MAC_STATUS_SYNC_CHANGED |
4068 	      MAC_STATUS_CFG_CHANGED |
4069 	      MAC_STATUS_MI_COMPLETION |
4070 	      MAC_STATUS_LNKSTATE_CHANGED));
4071 	udelay(40);
4072 
4073 	if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
4074 		tw32_f(MAC_MI_MODE,
4075 		     (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
4076 		udelay(80);
4077 	}
4078 
4079 	tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, 0);
4080 
4081 	/* Some third-party PHYs need to be reset on link going
4082 	 * down.
4083 	 */
4084 	if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
4085 	     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
4086 	     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
4087 	    netif_carrier_ok(tp->dev)) {
4088 		tg3_readphy(tp, MII_BMSR, &bmsr);
4089 		if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4090 		    !(bmsr & BMSR_LSTATUS))
4091 			force_reset = 1;
4092 	}
4093 	if (force_reset)
4094 		tg3_phy_reset(tp);
4095 
4096 	if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
4097 		tg3_readphy(tp, MII_BMSR, &bmsr);
4098 		if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
4099 		    !tg3_flag(tp, INIT_COMPLETE))
4100 			bmsr = 0;
4101 
4102 		if (!(bmsr & BMSR_LSTATUS)) {
4103 			err = tg3_init_5401phy_dsp(tp);
4104 			if (err)
4105 				return err;
4106 
4107 			tg3_readphy(tp, MII_BMSR, &bmsr);
4108 			for (i = 0; i < 1000; i++) {
4109 				udelay(10);
4110 				if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4111 				    (bmsr & BMSR_LSTATUS)) {
4112 					udelay(40);
4113 					break;
4114 				}
4115 			}
4116 
4117 			if ((tp->phy_id & TG3_PHY_ID_REV_MASK) ==
4118 			    TG3_PHY_REV_BCM5401_B0 &&
4119 			    !(bmsr & BMSR_LSTATUS) &&
4120 			    tp->link_config.active_speed == SPEED_1000) {
4121 				err = tg3_phy_reset(tp);
4122 				if (!err)
4123 					err = tg3_init_5401phy_dsp(tp);
4124 				if (err)
4125 					return err;
4126 			}
4127 		}
4128 	} else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
4129 		   tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
4130 		/* 5701 {A0,B0} CRC bug workaround */
4131 		tg3_writephy(tp, 0x15, 0x0a75);
4132 		tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4133 		tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
4134 		tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4135 	}
4136 
4137 	/* Clear pending interrupts... */
4138 	tg3_readphy(tp, MII_TG3_ISTAT, &val);
4139 	tg3_readphy(tp, MII_TG3_ISTAT, &val);
4140 
4141 	if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT)
4142 		tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
4143 	else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET))
4144 		tg3_writephy(tp, MII_TG3_IMASK, ~0);
4145 
4146 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
4147 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
4148 		if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
4149 			tg3_writephy(tp, MII_TG3_EXT_CTRL,
4150 				     MII_TG3_EXT_CTRL_LNK3_LED_MODE);
4151 		else
4152 			tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
4153 	}
4154 
4155 	current_link_up = 0;
4156 	current_speed = SPEED_UNKNOWN;
4157 	current_duplex = DUPLEX_UNKNOWN;
4158 	tp->phy_flags &= ~TG3_PHYFLG_MDIX_STATE;
4159 	tp->link_config.rmt_adv = 0;
4160 
4161 	if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) {
4162 		err = tg3_phy_auxctl_read(tp,
4163 					  MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4164 					  &val);
4165 		if (!err && !(val & (1 << 10))) {
4166 			tg3_phy_auxctl_write(tp,
4167 					     MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4168 					     val | (1 << 10));
4169 			goto relink;
4170 		}
4171 	}
4172 
4173 	bmsr = 0;
4174 	for (i = 0; i < 100; i++) {
4175 		tg3_readphy(tp, MII_BMSR, &bmsr);
4176 		if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4177 		    (bmsr & BMSR_LSTATUS))
4178 			break;
4179 		udelay(40);
4180 	}
4181 
4182 	if (bmsr & BMSR_LSTATUS) {
4183 		u32 aux_stat, bmcr;
4184 
4185 		tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
4186 		for (i = 0; i < 2000; i++) {
4187 			udelay(10);
4188 			if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
4189 			    aux_stat)
4190 				break;
4191 		}
4192 
4193 		tg3_aux_stat_to_speed_duplex(tp, aux_stat,
4194 					     &current_speed,
4195 					     &current_duplex);
4196 
4197 		bmcr = 0;
4198 		for (i = 0; i < 200; i++) {
4199 			tg3_readphy(tp, MII_BMCR, &bmcr);
4200 			if (tg3_readphy(tp, MII_BMCR, &bmcr))
4201 				continue;
4202 			if (bmcr && bmcr != 0x7fff)
4203 				break;
4204 			udelay(10);
4205 		}
4206 
4207 		lcl_adv = 0;
4208 		rmt_adv = 0;
4209 
4210 		tp->link_config.active_speed = current_speed;
4211 		tp->link_config.active_duplex = current_duplex;
4212 
4213 		if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4214 			if ((bmcr & BMCR_ANENABLE) &&
4215 			    tg3_phy_copper_an_config_ok(tp, &lcl_adv) &&
4216 			    tg3_phy_copper_fetch_rmtadv(tp, &rmt_adv))
4217 				current_link_up = 1;
4218 		} else {
4219 			if (!(bmcr & BMCR_ANENABLE) &&
4220 			    tp->link_config.speed == current_speed &&
4221 			    tp->link_config.duplex == current_duplex &&
4222 			    tp->link_config.flowctrl ==
4223 			    tp->link_config.active_flowctrl) {
4224 				current_link_up = 1;
4225 			}
4226 		}
4227 
4228 		if (current_link_up == 1 &&
4229 		    tp->link_config.active_duplex == DUPLEX_FULL) {
4230 			u32 reg, bit;
4231 
4232 			if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4233 				reg = MII_TG3_FET_GEN_STAT;
4234 				bit = MII_TG3_FET_GEN_STAT_MDIXSTAT;
4235 			} else {
4236 				reg = MII_TG3_EXT_STAT;
4237 				bit = MII_TG3_EXT_STAT_MDIX;
4238 			}
4239 
4240 			if (!tg3_readphy(tp, reg, &val) && (val & bit))
4241 				tp->phy_flags |= TG3_PHYFLG_MDIX_STATE;
4242 
4243 			tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
4244 		}
4245 	}
4246 
4247 relink:
4248 	if (current_link_up == 0 || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4249 		tg3_phy_copper_begin(tp);
4250 
4251 		tg3_readphy(tp, MII_BMSR, &bmsr);
4252 		if ((!tg3_readphy(tp, MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) ||
4253 		    (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
4254 			current_link_up = 1;
4255 	}
4256 
4257 	tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
4258 	if (current_link_up == 1) {
4259 		if (tp->link_config.active_speed == SPEED_100 ||
4260 		    tp->link_config.active_speed == SPEED_10)
4261 			tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4262 		else
4263 			tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4264 	} else if (tp->phy_flags & TG3_PHYFLG_IS_FET)
4265 		tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4266 	else
4267 		tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4268 
4269 	tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
4270 	if (tp->link_config.active_duplex == DUPLEX_HALF)
4271 		tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
4272 
4273 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
4274 		if (current_link_up == 1 &&
4275 		    tg3_5700_link_polarity(tp, tp->link_config.active_speed))
4276 			tp->mac_mode |= MAC_MODE_LINK_POLARITY;
4277 		else
4278 			tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
4279 	}
4280 
4281 	/* ??? Without this setting Netgear GA302T PHY does not
4282 	 * ??? send/receive packets...
4283 	 */
4284 	if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 &&
4285 	    tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
4286 		tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
4287 		tw32_f(MAC_MI_MODE, tp->mi_mode);
4288 		udelay(80);
4289 	}
4290 
4291 	tw32_f(MAC_MODE, tp->mac_mode);
4292 	udelay(40);
4293 
4294 	tg3_phy_eee_adjust(tp, current_link_up);
4295 
4296 	if (tg3_flag(tp, USE_LINKCHG_REG)) {
4297 		/* Polled via timer. */
4298 		tw32_f(MAC_EVENT, 0);
4299 	} else {
4300 		tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4301 	}
4302 	udelay(40);
4303 
4304 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
4305 	    current_link_up == 1 &&
4306 	    tp->link_config.active_speed == SPEED_1000 &&
4307 	    (tg3_flag(tp, PCIX_MODE) || tg3_flag(tp, PCI_HIGH_SPEED))) {
4308 		udelay(120);
4309 		tw32_f(MAC_STATUS,
4310 		     (MAC_STATUS_SYNC_CHANGED |
4311 		      MAC_STATUS_CFG_CHANGED));
4312 		udelay(40);
4313 		tg3_write_mem(tp,
4314 			      NIC_SRAM_FIRMWARE_MBOX,
4315 			      NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
4316 	}
4317 
4318 	/* Prevent send BD corruption. */
4319 	if (tg3_flag(tp, CLKREQ_BUG)) {
4320 		u16 oldlnkctl, newlnkctl;
4321 
4322 		pci_read_config_word(tp->pdev,
4323 				     pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
4324 				     &oldlnkctl);
4325 		if (tp->link_config.active_speed == SPEED_100 ||
4326 		    tp->link_config.active_speed == SPEED_10)
4327 			newlnkctl = oldlnkctl & ~PCI_EXP_LNKCTL_CLKREQ_EN;
4328 		else
4329 			newlnkctl = oldlnkctl | PCI_EXP_LNKCTL_CLKREQ_EN;
4330 		if (newlnkctl != oldlnkctl)
4331 			pci_write_config_word(tp->pdev,
4332 					      pci_pcie_cap(tp->pdev) +
4333 					      PCI_EXP_LNKCTL, newlnkctl);
4334 	}
4335 
4336 	if (current_link_up != netif_carrier_ok(tp->dev)) {
4337 		if (current_link_up)
4338 			netif_carrier_on(tp->dev);
4339 		else
4340 			netif_carrier_off(tp->dev);
4341 		tg3_link_report(tp);
4342 	}
4343 
4344 	return 0;
4345 }
4346 
4347 struct tg3_fiber_aneginfo {
4348 	int state;
4349 #define ANEG_STATE_UNKNOWN		0
4350 #define ANEG_STATE_AN_ENABLE		1
4351 #define ANEG_STATE_RESTART_INIT		2
4352 #define ANEG_STATE_RESTART		3
4353 #define ANEG_STATE_DISABLE_LINK_OK	4
4354 #define ANEG_STATE_ABILITY_DETECT_INIT	5
4355 #define ANEG_STATE_ABILITY_DETECT	6
4356 #define ANEG_STATE_ACK_DETECT_INIT	7
4357 #define ANEG_STATE_ACK_DETECT		8
4358 #define ANEG_STATE_COMPLETE_ACK_INIT	9
4359 #define ANEG_STATE_COMPLETE_ACK		10
4360 #define ANEG_STATE_IDLE_DETECT_INIT	11
4361 #define ANEG_STATE_IDLE_DETECT		12
4362 #define ANEG_STATE_LINK_OK		13
4363 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT	14
4364 #define ANEG_STATE_NEXT_PAGE_WAIT	15
4365 
4366 	u32 flags;
4367 #define MR_AN_ENABLE		0x00000001
4368 #define MR_RESTART_AN		0x00000002
4369 #define MR_AN_COMPLETE		0x00000004
4370 #define MR_PAGE_RX		0x00000008
4371 #define MR_NP_LOADED		0x00000010
4372 #define MR_TOGGLE_TX		0x00000020
4373 #define MR_LP_ADV_FULL_DUPLEX	0x00000040
4374 #define MR_LP_ADV_HALF_DUPLEX	0x00000080
4375 #define MR_LP_ADV_SYM_PAUSE	0x00000100
4376 #define MR_LP_ADV_ASYM_PAUSE	0x00000200
4377 #define MR_LP_ADV_REMOTE_FAULT1	0x00000400
4378 #define MR_LP_ADV_REMOTE_FAULT2	0x00000800
4379 #define MR_LP_ADV_NEXT_PAGE	0x00001000
4380 #define MR_TOGGLE_RX		0x00002000
4381 #define MR_NP_RX		0x00004000
4382 
4383 #define MR_LINK_OK		0x80000000
4384 
4385 	unsigned long link_time, cur_time;
4386 
4387 	u32 ability_match_cfg;
4388 	int ability_match_count;
4389 
4390 	char ability_match, idle_match, ack_match;
4391 
4392 	u32 txconfig, rxconfig;
4393 #define ANEG_CFG_NP		0x00000080
4394 #define ANEG_CFG_ACK		0x00000040
4395 #define ANEG_CFG_RF2		0x00000020
4396 #define ANEG_CFG_RF1		0x00000010
4397 #define ANEG_CFG_PS2		0x00000001
4398 #define ANEG_CFG_PS1		0x00008000
4399 #define ANEG_CFG_HD		0x00004000
4400 #define ANEG_CFG_FD		0x00002000
4401 #define ANEG_CFG_INVAL		0x00001f06
4402 
4403 };
4404 #define ANEG_OK		0
4405 #define ANEG_DONE	1
4406 #define ANEG_TIMER_ENAB	2
4407 #define ANEG_FAILED	-1
4408 
4409 #define ANEG_STATE_SETTLE_TIME	10000
4410 
4411 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
4412 				   struct tg3_fiber_aneginfo *ap)
4413 {
4414 	u16 flowctrl;
4415 	unsigned long delta;
4416 	u32 rx_cfg_reg;
4417 	int ret;
4418 
4419 	if (ap->state == ANEG_STATE_UNKNOWN) {
4420 		ap->rxconfig = 0;
4421 		ap->link_time = 0;
4422 		ap->cur_time = 0;
4423 		ap->ability_match_cfg = 0;
4424 		ap->ability_match_count = 0;
4425 		ap->ability_match = 0;
4426 		ap->idle_match = 0;
4427 		ap->ack_match = 0;
4428 	}
4429 	ap->cur_time++;
4430 
4431 	if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
4432 		rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
4433 
4434 		if (rx_cfg_reg != ap->ability_match_cfg) {
4435 			ap->ability_match_cfg = rx_cfg_reg;
4436 			ap->ability_match = 0;
4437 			ap->ability_match_count = 0;
4438 		} else {
4439 			if (++ap->ability_match_count > 1) {
4440 				ap->ability_match = 1;
4441 				ap->ability_match_cfg = rx_cfg_reg;
4442 			}
4443 		}
4444 		if (rx_cfg_reg & ANEG_CFG_ACK)
4445 			ap->ack_match = 1;
4446 		else
4447 			ap->ack_match = 0;
4448 
4449 		ap->idle_match = 0;
4450 	} else {
4451 		ap->idle_match = 1;
4452 		ap->ability_match_cfg = 0;
4453 		ap->ability_match_count = 0;
4454 		ap->ability_match = 0;
4455 		ap->ack_match = 0;
4456 
4457 		rx_cfg_reg = 0;
4458 	}
4459 
4460 	ap->rxconfig = rx_cfg_reg;
4461 	ret = ANEG_OK;
4462 
4463 	switch (ap->state) {
4464 	case ANEG_STATE_UNKNOWN:
4465 		if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
4466 			ap->state = ANEG_STATE_AN_ENABLE;
4467 
4468 		/* fallthru */
4469 	case ANEG_STATE_AN_ENABLE:
4470 		ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
4471 		if (ap->flags & MR_AN_ENABLE) {
4472 			ap->link_time = 0;
4473 			ap->cur_time = 0;
4474 			ap->ability_match_cfg = 0;
4475 			ap->ability_match_count = 0;
4476 			ap->ability_match = 0;
4477 			ap->idle_match = 0;
4478 			ap->ack_match = 0;
4479 
4480 			ap->state = ANEG_STATE_RESTART_INIT;
4481 		} else {
4482 			ap->state = ANEG_STATE_DISABLE_LINK_OK;
4483 		}
4484 		break;
4485 
4486 	case ANEG_STATE_RESTART_INIT:
4487 		ap->link_time = ap->cur_time;
4488 		ap->flags &= ~(MR_NP_LOADED);
4489 		ap->txconfig = 0;
4490 		tw32(MAC_TX_AUTO_NEG, 0);
4491 		tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
4492 		tw32_f(MAC_MODE, tp->mac_mode);
4493 		udelay(40);
4494 
4495 		ret = ANEG_TIMER_ENAB;
4496 		ap->state = ANEG_STATE_RESTART;
4497 
4498 		/* fallthru */
4499 	case ANEG_STATE_RESTART:
4500 		delta = ap->cur_time - ap->link_time;
4501 		if (delta > ANEG_STATE_SETTLE_TIME)
4502 			ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
4503 		else
4504 			ret = ANEG_TIMER_ENAB;
4505 		break;
4506 
4507 	case ANEG_STATE_DISABLE_LINK_OK:
4508 		ret = ANEG_DONE;
4509 		break;
4510 
4511 	case ANEG_STATE_ABILITY_DETECT_INIT:
4512 		ap->flags &= ~(MR_TOGGLE_TX);
4513 		ap->txconfig = ANEG_CFG_FD;
4514 		flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
4515 		if (flowctrl & ADVERTISE_1000XPAUSE)
4516 			ap->txconfig |= ANEG_CFG_PS1;
4517 		if (flowctrl & ADVERTISE_1000XPSE_ASYM)
4518 			ap->txconfig |= ANEG_CFG_PS2;
4519 		tw32(MAC_TX_AUTO_NEG, ap->txconfig);
4520 		tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
4521 		tw32_f(MAC_MODE, tp->mac_mode);
4522 		udelay(40);
4523 
4524 		ap->state = ANEG_STATE_ABILITY_DETECT;
4525 		break;
4526 
4527 	case ANEG_STATE_ABILITY_DETECT:
4528 		if (ap->ability_match != 0 && ap->rxconfig != 0)
4529 			ap->state = ANEG_STATE_ACK_DETECT_INIT;
4530 		break;
4531 
4532 	case ANEG_STATE_ACK_DETECT_INIT:
4533 		ap->txconfig |= ANEG_CFG_ACK;
4534 		tw32(MAC_TX_AUTO_NEG, ap->txconfig);
4535 		tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
4536 		tw32_f(MAC_MODE, tp->mac_mode);
4537 		udelay(40);
4538 
4539 		ap->state = ANEG_STATE_ACK_DETECT;
4540 
4541 		/* fallthru */
4542 	case ANEG_STATE_ACK_DETECT:
4543 		if (ap->ack_match != 0) {
4544 			if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
4545 			    (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
4546 				ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
4547 			} else {
4548 				ap->state = ANEG_STATE_AN_ENABLE;
4549 			}
4550 		} else if (ap->ability_match != 0 &&
4551 			   ap->rxconfig == 0) {
4552 			ap->state = ANEG_STATE_AN_ENABLE;
4553 		}
4554 		break;
4555 
4556 	case ANEG_STATE_COMPLETE_ACK_INIT:
4557 		if (ap->rxconfig & ANEG_CFG_INVAL) {
4558 			ret = ANEG_FAILED;
4559 			break;
4560 		}
4561 		ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
4562 			       MR_LP_ADV_HALF_DUPLEX |
4563 			       MR_LP_ADV_SYM_PAUSE |
4564 			       MR_LP_ADV_ASYM_PAUSE |
4565 			       MR_LP_ADV_REMOTE_FAULT1 |
4566 			       MR_LP_ADV_REMOTE_FAULT2 |
4567 			       MR_LP_ADV_NEXT_PAGE |
4568 			       MR_TOGGLE_RX |
4569 			       MR_NP_RX);
4570 		if (ap->rxconfig & ANEG_CFG_FD)
4571 			ap->flags |= MR_LP_ADV_FULL_DUPLEX;
4572 		if (ap->rxconfig & ANEG_CFG_HD)
4573 			ap->flags |= MR_LP_ADV_HALF_DUPLEX;
4574 		if (ap->rxconfig & ANEG_CFG_PS1)
4575 			ap->flags |= MR_LP_ADV_SYM_PAUSE;
4576 		if (ap->rxconfig & ANEG_CFG_PS2)
4577 			ap->flags |= MR_LP_ADV_ASYM_PAUSE;
4578 		if (ap->rxconfig & ANEG_CFG_RF1)
4579 			ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
4580 		if (ap->rxconfig & ANEG_CFG_RF2)
4581 			ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
4582 		if (ap->rxconfig & ANEG_CFG_NP)
4583 			ap->flags |= MR_LP_ADV_NEXT_PAGE;
4584 
4585 		ap->link_time = ap->cur_time;
4586 
4587 		ap->flags ^= (MR_TOGGLE_TX);
4588 		if (ap->rxconfig & 0x0008)
4589 			ap->flags |= MR_TOGGLE_RX;
4590 		if (ap->rxconfig & ANEG_CFG_NP)
4591 			ap->flags |= MR_NP_RX;
4592 		ap->flags |= MR_PAGE_RX;
4593 
4594 		ap->state = ANEG_STATE_COMPLETE_ACK;
4595 		ret = ANEG_TIMER_ENAB;
4596 		break;
4597 
4598 	case ANEG_STATE_COMPLETE_ACK:
4599 		if (ap->ability_match != 0 &&
4600 		    ap->rxconfig == 0) {
4601 			ap->state = ANEG_STATE_AN_ENABLE;
4602 			break;
4603 		}
4604 		delta = ap->cur_time - ap->link_time;
4605 		if (delta > ANEG_STATE_SETTLE_TIME) {
4606 			if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
4607 				ap->state = ANEG_STATE_IDLE_DETECT_INIT;
4608 			} else {
4609 				if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
4610 				    !(ap->flags & MR_NP_RX)) {
4611 					ap->state = ANEG_STATE_IDLE_DETECT_INIT;
4612 				} else {
4613 					ret = ANEG_FAILED;
4614 				}
4615 			}
4616 		}
4617 		break;
4618 
4619 	case ANEG_STATE_IDLE_DETECT_INIT:
4620 		ap->link_time = ap->cur_time;
4621 		tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
4622 		tw32_f(MAC_MODE, tp->mac_mode);
4623 		udelay(40);
4624 
4625 		ap->state = ANEG_STATE_IDLE_DETECT;
4626 		ret = ANEG_TIMER_ENAB;
4627 		break;
4628 
4629 	case ANEG_STATE_IDLE_DETECT:
4630 		if (ap->ability_match != 0 &&
4631 		    ap->rxconfig == 0) {
4632 			ap->state = ANEG_STATE_AN_ENABLE;
4633 			break;
4634 		}
4635 		delta = ap->cur_time - ap->link_time;
4636 		if (delta > ANEG_STATE_SETTLE_TIME) {
4637 			/* XXX another gem from the Broadcom driver :( */
4638 			ap->state = ANEG_STATE_LINK_OK;
4639 		}
4640 		break;
4641 
4642 	case ANEG_STATE_LINK_OK:
4643 		ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
4644 		ret = ANEG_DONE;
4645 		break;
4646 
4647 	case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
4648 		/* ??? unimplemented */
4649 		break;
4650 
4651 	case ANEG_STATE_NEXT_PAGE_WAIT:
4652 		/* ??? unimplemented */
4653 		break;
4654 
4655 	default:
4656 		ret = ANEG_FAILED;
4657 		break;
4658 	}
4659 
4660 	return ret;
4661 }
4662 
4663 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
4664 {
4665 	int res = 0;
4666 	struct tg3_fiber_aneginfo aninfo;
4667 	int status = ANEG_FAILED;
4668 	unsigned int tick;
4669 	u32 tmp;
4670 
4671 	tw32_f(MAC_TX_AUTO_NEG, 0);
4672 
4673 	tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
4674 	tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
4675 	udelay(40);
4676 
4677 	tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
4678 	udelay(40);
4679 
4680 	memset(&aninfo, 0, sizeof(aninfo));
4681 	aninfo.flags |= MR_AN_ENABLE;
4682 	aninfo.state = ANEG_STATE_UNKNOWN;
4683 	aninfo.cur_time = 0;
4684 	tick = 0;
4685 	while (++tick < 195000) {
4686 		status = tg3_fiber_aneg_smachine(tp, &aninfo);
4687 		if (status == ANEG_DONE || status == ANEG_FAILED)
4688 			break;
4689 
4690 		udelay(1);
4691 	}
4692 
4693 	tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
4694 	tw32_f(MAC_MODE, tp->mac_mode);
4695 	udelay(40);
4696 
4697 	*txflags = aninfo.txconfig;
4698 	*rxflags = aninfo.flags;
4699 
4700 	if (status == ANEG_DONE &&
4701 	    (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
4702 			     MR_LP_ADV_FULL_DUPLEX)))
4703 		res = 1;
4704 
4705 	return res;
4706 }
4707 
4708 static void tg3_init_bcm8002(struct tg3 *tp)
4709 {
4710 	u32 mac_status = tr32(MAC_STATUS);
4711 	int i;
4712 
4713 	/* Reset when initting first time or we have a link. */
4714 	if (tg3_flag(tp, INIT_COMPLETE) &&
4715 	    !(mac_status & MAC_STATUS_PCS_SYNCED))
4716 		return;
4717 
4718 	/* Set PLL lock range. */
4719 	tg3_writephy(tp, 0x16, 0x8007);
4720 
4721 	/* SW reset */
4722 	tg3_writephy(tp, MII_BMCR, BMCR_RESET);
4723 
4724 	/* Wait for reset to complete. */
4725 	/* XXX schedule_timeout() ... */
4726 	for (i = 0; i < 500; i++)
4727 		udelay(10);
4728 
4729 	/* Config mode; select PMA/Ch 1 regs. */
4730 	tg3_writephy(tp, 0x10, 0x8411);
4731 
4732 	/* Enable auto-lock and comdet, select txclk for tx. */
4733 	tg3_writephy(tp, 0x11, 0x0a10);
4734 
4735 	tg3_writephy(tp, 0x18, 0x00a0);
4736 	tg3_writephy(tp, 0x16, 0x41ff);
4737 
4738 	/* Assert and deassert POR. */
4739 	tg3_writephy(tp, 0x13, 0x0400);
4740 	udelay(40);
4741 	tg3_writephy(tp, 0x13, 0x0000);
4742 
4743 	tg3_writephy(tp, 0x11, 0x0a50);
4744 	udelay(40);
4745 	tg3_writephy(tp, 0x11, 0x0a10);
4746 
4747 	/* Wait for signal to stabilize */
4748 	/* XXX schedule_timeout() ... */
4749 	for (i = 0; i < 15000; i++)
4750 		udelay(10);
4751 
4752 	/* Deselect the channel register so we can read the PHYID
4753 	 * later.
4754 	 */
4755 	tg3_writephy(tp, 0x10, 0x8011);
4756 }
4757 
4758 static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
4759 {
4760 	u16 flowctrl;
4761 	u32 sg_dig_ctrl, sg_dig_status;
4762 	u32 serdes_cfg, expected_sg_dig_ctrl;
4763 	int workaround, port_a;
4764 	int current_link_up;
4765 
4766 	serdes_cfg = 0;
4767 	expected_sg_dig_ctrl = 0;
4768 	workaround = 0;
4769 	port_a = 1;
4770 	current_link_up = 0;
4771 
4772 	if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
4773 	    tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
4774 		workaround = 1;
4775 		if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
4776 			port_a = 0;
4777 
4778 		/* preserve bits 0-11,13,14 for signal pre-emphasis */
4779 		/* preserve bits 20-23 for voltage regulator */
4780 		serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
4781 	}
4782 
4783 	sg_dig_ctrl = tr32(SG_DIG_CTRL);
4784 
4785 	if (tp->link_config.autoneg != AUTONEG_ENABLE) {
4786 		if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
4787 			if (workaround) {
4788 				u32 val = serdes_cfg;
4789 
4790 				if (port_a)
4791 					val |= 0xc010000;
4792 				else
4793 					val |= 0x4010000;
4794 				tw32_f(MAC_SERDES_CFG, val);
4795 			}
4796 
4797 			tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
4798 		}
4799 		if (mac_status & MAC_STATUS_PCS_SYNCED) {
4800 			tg3_setup_flow_control(tp, 0, 0);
4801 			current_link_up = 1;
4802 		}
4803 		goto out;
4804 	}
4805 
4806 	/* Want auto-negotiation.  */
4807 	expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
4808 
4809 	flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
4810 	if (flowctrl & ADVERTISE_1000XPAUSE)
4811 		expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
4812 	if (flowctrl & ADVERTISE_1000XPSE_ASYM)
4813 		expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
4814 
4815 	if (sg_dig_ctrl != expected_sg_dig_ctrl) {
4816 		if ((tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT) &&
4817 		    tp->serdes_counter &&
4818 		    ((mac_status & (MAC_STATUS_PCS_SYNCED |
4819 				    MAC_STATUS_RCVD_CFG)) ==
4820 		     MAC_STATUS_PCS_SYNCED)) {
4821 			tp->serdes_counter--;
4822 			current_link_up = 1;
4823 			goto out;
4824 		}
4825 restart_autoneg:
4826 		if (workaround)
4827 			tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
4828 		tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
4829 		udelay(5);
4830 		tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
4831 
4832 		tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
4833 		tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4834 	} else if (mac_status & (MAC_STATUS_PCS_SYNCED |
4835 				 MAC_STATUS_SIGNAL_DET)) {
4836 		sg_dig_status = tr32(SG_DIG_STATUS);
4837 		mac_status = tr32(MAC_STATUS);
4838 
4839 		if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
4840 		    (mac_status & MAC_STATUS_PCS_SYNCED)) {
4841 			u32 local_adv = 0, remote_adv = 0;
4842 
4843 			if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
4844 				local_adv |= ADVERTISE_1000XPAUSE;
4845 			if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
4846 				local_adv |= ADVERTISE_1000XPSE_ASYM;
4847 
4848 			if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
4849 				remote_adv |= LPA_1000XPAUSE;
4850 			if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
4851 				remote_adv |= LPA_1000XPAUSE_ASYM;
4852 
4853 			tp->link_config.rmt_adv =
4854 					   mii_adv_to_ethtool_adv_x(remote_adv);
4855 
4856 			tg3_setup_flow_control(tp, local_adv, remote_adv);
4857 			current_link_up = 1;
4858 			tp->serdes_counter = 0;
4859 			tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4860 		} else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
4861 			if (tp->serdes_counter)
4862 				tp->serdes_counter--;
4863 			else {
4864 				if (workaround) {
4865 					u32 val = serdes_cfg;
4866 
4867 					if (port_a)
4868 						val |= 0xc010000;
4869 					else
4870 						val |= 0x4010000;
4871 
4872 					tw32_f(MAC_SERDES_CFG, val);
4873 				}
4874 
4875 				tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
4876 				udelay(40);
4877 
4878 				/* Link parallel detection - link is up */
4879 				/* only if we have PCS_SYNC and not */
4880 				/* receiving config code words */
4881 				mac_status = tr32(MAC_STATUS);
4882 				if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
4883 				    !(mac_status & MAC_STATUS_RCVD_CFG)) {
4884 					tg3_setup_flow_control(tp, 0, 0);
4885 					current_link_up = 1;
4886 					tp->phy_flags |=
4887 						TG3_PHYFLG_PARALLEL_DETECT;
4888 					tp->serdes_counter =
4889 						SERDES_PARALLEL_DET_TIMEOUT;
4890 				} else
4891 					goto restart_autoneg;
4892 			}
4893 		}
4894 	} else {
4895 		tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
4896 		tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4897 	}
4898 
4899 out:
4900 	return current_link_up;
4901 }
4902 
4903 static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
4904 {
4905 	int current_link_up = 0;
4906 
4907 	if (!(mac_status & MAC_STATUS_PCS_SYNCED))
4908 		goto out;
4909 
4910 	if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4911 		u32 txflags, rxflags;
4912 		int i;
4913 
4914 		if (fiber_autoneg(tp, &txflags, &rxflags)) {
4915 			u32 local_adv = 0, remote_adv = 0;
4916 
4917 			if (txflags & ANEG_CFG_PS1)
4918 				local_adv |= ADVERTISE_1000XPAUSE;
4919 			if (txflags & ANEG_CFG_PS2)
4920 				local_adv |= ADVERTISE_1000XPSE_ASYM;
4921 
4922 			if (rxflags & MR_LP_ADV_SYM_PAUSE)
4923 				remote_adv |= LPA_1000XPAUSE;
4924 			if (rxflags & MR_LP_ADV_ASYM_PAUSE)
4925 				remote_adv |= LPA_1000XPAUSE_ASYM;
4926 
4927 			tp->link_config.rmt_adv =
4928 					   mii_adv_to_ethtool_adv_x(remote_adv);
4929 
4930 			tg3_setup_flow_control(tp, local_adv, remote_adv);
4931 
4932 			current_link_up = 1;
4933 		}
4934 		for (i = 0; i < 30; i++) {
4935 			udelay(20);
4936 			tw32_f(MAC_STATUS,
4937 			       (MAC_STATUS_SYNC_CHANGED |
4938 				MAC_STATUS_CFG_CHANGED));
4939 			udelay(40);
4940 			if ((tr32(MAC_STATUS) &
4941 			     (MAC_STATUS_SYNC_CHANGED |
4942 			      MAC_STATUS_CFG_CHANGED)) == 0)
4943 				break;
4944 		}
4945 
4946 		mac_status = tr32(MAC_STATUS);
4947 		if (current_link_up == 0 &&
4948 		    (mac_status & MAC_STATUS_PCS_SYNCED) &&
4949 		    !(mac_status & MAC_STATUS_RCVD_CFG))
4950 			current_link_up = 1;
4951 	} else {
4952 		tg3_setup_flow_control(tp, 0, 0);
4953 
4954 		/* Forcing 1000FD link up. */
4955 		current_link_up = 1;
4956 
4957 		tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
4958 		udelay(40);
4959 
4960 		tw32_f(MAC_MODE, tp->mac_mode);
4961 		udelay(40);
4962 	}
4963 
4964 out:
4965 	return current_link_up;
4966 }
4967 
4968 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
4969 {
4970 	u32 orig_pause_cfg;
4971 	u16 orig_active_speed;
4972 	u8 orig_active_duplex;
4973 	u32 mac_status;
4974 	int current_link_up;
4975 	int i;
4976 
4977 	orig_pause_cfg = tp->link_config.active_flowctrl;
4978 	orig_active_speed = tp->link_config.active_speed;
4979 	orig_active_duplex = tp->link_config.active_duplex;
4980 
4981 	if (!tg3_flag(tp, HW_AUTONEG) &&
4982 	    netif_carrier_ok(tp->dev) &&
4983 	    tg3_flag(tp, INIT_COMPLETE)) {
4984 		mac_status = tr32(MAC_STATUS);
4985 		mac_status &= (MAC_STATUS_PCS_SYNCED |
4986 			       MAC_STATUS_SIGNAL_DET |
4987 			       MAC_STATUS_CFG_CHANGED |
4988 			       MAC_STATUS_RCVD_CFG);
4989 		if (mac_status == (MAC_STATUS_PCS_SYNCED |
4990 				   MAC_STATUS_SIGNAL_DET)) {
4991 			tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
4992 					    MAC_STATUS_CFG_CHANGED));
4993 			return 0;
4994 		}
4995 	}
4996 
4997 	tw32_f(MAC_TX_AUTO_NEG, 0);
4998 
4999 	tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
5000 	tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
5001 	tw32_f(MAC_MODE, tp->mac_mode);
5002 	udelay(40);
5003 
5004 	if (tp->phy_id == TG3_PHY_ID_BCM8002)
5005 		tg3_init_bcm8002(tp);
5006 
5007 	/* Enable link change event even when serdes polling.  */
5008 	tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5009 	udelay(40);
5010 
5011 	current_link_up = 0;
5012 	tp->link_config.rmt_adv = 0;
5013 	mac_status = tr32(MAC_STATUS);
5014 
5015 	if (tg3_flag(tp, HW_AUTONEG))
5016 		current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
5017 	else
5018 		current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
5019 
5020 	tp->napi[0].hw_status->status =
5021 		(SD_STATUS_UPDATED |
5022 		 (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG));
5023 
5024 	for (i = 0; i < 100; i++) {
5025 		tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5026 				    MAC_STATUS_CFG_CHANGED));
5027 		udelay(5);
5028 		if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
5029 					 MAC_STATUS_CFG_CHANGED |
5030 					 MAC_STATUS_LNKSTATE_CHANGED)) == 0)
5031 			break;
5032 	}
5033 
5034 	mac_status = tr32(MAC_STATUS);
5035 	if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
5036 		current_link_up = 0;
5037 		if (tp->link_config.autoneg == AUTONEG_ENABLE &&
5038 		    tp->serdes_counter == 0) {
5039 			tw32_f(MAC_MODE, (tp->mac_mode |
5040 					  MAC_MODE_SEND_CONFIGS));
5041 			udelay(1);
5042 			tw32_f(MAC_MODE, tp->mac_mode);
5043 		}
5044 	}
5045 
5046 	if (current_link_up == 1) {
5047 		tp->link_config.active_speed = SPEED_1000;
5048 		tp->link_config.active_duplex = DUPLEX_FULL;
5049 		tw32(MAC_LED_CTRL, (tp->led_ctrl |
5050 				    LED_CTRL_LNKLED_OVERRIDE |
5051 				    LED_CTRL_1000MBPS_ON));
5052 	} else {
5053 		tp->link_config.active_speed = SPEED_UNKNOWN;
5054 		tp->link_config.active_duplex = DUPLEX_UNKNOWN;
5055 		tw32(MAC_LED_CTRL, (tp->led_ctrl |
5056 				    LED_CTRL_LNKLED_OVERRIDE |
5057 				    LED_CTRL_TRAFFIC_OVERRIDE));
5058 	}
5059 
5060 	if (current_link_up != netif_carrier_ok(tp->dev)) {
5061 		if (current_link_up)
5062 			netif_carrier_on(tp->dev);
5063 		else
5064 			netif_carrier_off(tp->dev);
5065 		tg3_link_report(tp);
5066 	} else {
5067 		u32 now_pause_cfg = tp->link_config.active_flowctrl;
5068 		if (orig_pause_cfg != now_pause_cfg ||
5069 		    orig_active_speed != tp->link_config.active_speed ||
5070 		    orig_active_duplex != tp->link_config.active_duplex)
5071 			tg3_link_report(tp);
5072 	}
5073 
5074 	return 0;
5075 }
5076 
5077 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
5078 {
5079 	int current_link_up, err = 0;
5080 	u32 bmsr, bmcr;
5081 	u16 current_speed;
5082 	u8 current_duplex;
5083 	u32 local_adv, remote_adv;
5084 
5085 	tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5086 	tw32_f(MAC_MODE, tp->mac_mode);
5087 	udelay(40);
5088 
5089 	tw32(MAC_EVENT, 0);
5090 
5091 	tw32_f(MAC_STATUS,
5092 	     (MAC_STATUS_SYNC_CHANGED |
5093 	      MAC_STATUS_CFG_CHANGED |
5094 	      MAC_STATUS_MI_COMPLETION |
5095 	      MAC_STATUS_LNKSTATE_CHANGED));
5096 	udelay(40);
5097 
5098 	if (force_reset)
5099 		tg3_phy_reset(tp);
5100 
5101 	current_link_up = 0;
5102 	current_speed = SPEED_UNKNOWN;
5103 	current_duplex = DUPLEX_UNKNOWN;
5104 	tp->link_config.rmt_adv = 0;
5105 
5106 	err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5107 	err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5108 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
5109 		if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5110 			bmsr |= BMSR_LSTATUS;
5111 		else
5112 			bmsr &= ~BMSR_LSTATUS;
5113 	}
5114 
5115 	err |= tg3_readphy(tp, MII_BMCR, &bmcr);
5116 
5117 	if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
5118 	    (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
5119 		/* do nothing, just check for link up at the end */
5120 	} else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5121 		u32 adv, newadv;
5122 
5123 		err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5124 		newadv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
5125 				 ADVERTISE_1000XPAUSE |
5126 				 ADVERTISE_1000XPSE_ASYM |
5127 				 ADVERTISE_SLCT);
5128 
5129 		newadv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5130 		newadv |= ethtool_adv_to_mii_adv_x(tp->link_config.advertising);
5131 
5132 		if ((newadv != adv) || !(bmcr & BMCR_ANENABLE)) {
5133 			tg3_writephy(tp, MII_ADVERTISE, newadv);
5134 			bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
5135 			tg3_writephy(tp, MII_BMCR, bmcr);
5136 
5137 			tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5138 			tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
5139 			tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5140 
5141 			return err;
5142 		}
5143 	} else {
5144 		u32 new_bmcr;
5145 
5146 		bmcr &= ~BMCR_SPEED1000;
5147 		new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
5148 
5149 		if (tp->link_config.duplex == DUPLEX_FULL)
5150 			new_bmcr |= BMCR_FULLDPLX;
5151 
5152 		if (new_bmcr != bmcr) {
5153 			/* BMCR_SPEED1000 is a reserved bit that needs
5154 			 * to be set on write.
5155 			 */
5156 			new_bmcr |= BMCR_SPEED1000;
5157 
5158 			/* Force a linkdown */
5159 			if (netif_carrier_ok(tp->dev)) {
5160 				u32 adv;
5161 
5162 				err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5163 				adv &= ~(ADVERTISE_1000XFULL |
5164 					 ADVERTISE_1000XHALF |
5165 					 ADVERTISE_SLCT);
5166 				tg3_writephy(tp, MII_ADVERTISE, adv);
5167 				tg3_writephy(tp, MII_BMCR, bmcr |
5168 							   BMCR_ANRESTART |
5169 							   BMCR_ANENABLE);
5170 				udelay(10);
5171 				netif_carrier_off(tp->dev);
5172 			}
5173 			tg3_writephy(tp, MII_BMCR, new_bmcr);
5174 			bmcr = new_bmcr;
5175 			err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5176 			err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5177 			if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
5178 			    ASIC_REV_5714) {
5179 				if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5180 					bmsr |= BMSR_LSTATUS;
5181 				else
5182 					bmsr &= ~BMSR_LSTATUS;
5183 			}
5184 			tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5185 		}
5186 	}
5187 
5188 	if (bmsr & BMSR_LSTATUS) {
5189 		current_speed = SPEED_1000;
5190 		current_link_up = 1;
5191 		if (bmcr & BMCR_FULLDPLX)
5192 			current_duplex = DUPLEX_FULL;
5193 		else
5194 			current_duplex = DUPLEX_HALF;
5195 
5196 		local_adv = 0;
5197 		remote_adv = 0;
5198 
5199 		if (bmcr & BMCR_ANENABLE) {
5200 			u32 common;
5201 
5202 			err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
5203 			err |= tg3_readphy(tp, MII_LPA, &remote_adv);
5204 			common = local_adv & remote_adv;
5205 			if (common & (ADVERTISE_1000XHALF |
5206 				      ADVERTISE_1000XFULL)) {
5207 				if (common & ADVERTISE_1000XFULL)
5208 					current_duplex = DUPLEX_FULL;
5209 				else
5210 					current_duplex = DUPLEX_HALF;
5211 
5212 				tp->link_config.rmt_adv =
5213 					   mii_adv_to_ethtool_adv_x(remote_adv);
5214 			} else if (!tg3_flag(tp, 5780_CLASS)) {
5215 				/* Link is up via parallel detect */
5216 			} else {
5217 				current_link_up = 0;
5218 			}
5219 		}
5220 	}
5221 
5222 	if (current_link_up == 1 && current_duplex == DUPLEX_FULL)
5223 		tg3_setup_flow_control(tp, local_adv, remote_adv);
5224 
5225 	tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
5226 	if (tp->link_config.active_duplex == DUPLEX_HALF)
5227 		tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
5228 
5229 	tw32_f(MAC_MODE, tp->mac_mode);
5230 	udelay(40);
5231 
5232 	tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5233 
5234 	tp->link_config.active_speed = current_speed;
5235 	tp->link_config.active_duplex = current_duplex;
5236 
5237 	if (current_link_up != netif_carrier_ok(tp->dev)) {
5238 		if (current_link_up)
5239 			netif_carrier_on(tp->dev);
5240 		else {
5241 			netif_carrier_off(tp->dev);
5242 			tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5243 		}
5244 		tg3_link_report(tp);
5245 	}
5246 	return err;
5247 }
5248 
5249 static void tg3_serdes_parallel_detect(struct tg3 *tp)
5250 {
5251 	if (tp->serdes_counter) {
5252 		/* Give autoneg time to complete. */
5253 		tp->serdes_counter--;
5254 		return;
5255 	}
5256 
5257 	if (!netif_carrier_ok(tp->dev) &&
5258 	    (tp->link_config.autoneg == AUTONEG_ENABLE)) {
5259 		u32 bmcr;
5260 
5261 		tg3_readphy(tp, MII_BMCR, &bmcr);
5262 		if (bmcr & BMCR_ANENABLE) {
5263 			u32 phy1, phy2;
5264 
5265 			/* Select shadow register 0x1f */
5266 			tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x7c00);
5267 			tg3_readphy(tp, MII_TG3_MISC_SHDW, &phy1);
5268 
5269 			/* Select expansion interrupt status register */
5270 			tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
5271 					 MII_TG3_DSP_EXP1_INT_STAT);
5272 			tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5273 			tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5274 
5275 			if ((phy1 & 0x10) && !(phy2 & 0x20)) {
5276 				/* We have signal detect and not receiving
5277 				 * config code words, link is up by parallel
5278 				 * detection.
5279 				 */
5280 
5281 				bmcr &= ~BMCR_ANENABLE;
5282 				bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
5283 				tg3_writephy(tp, MII_BMCR, bmcr);
5284 				tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT;
5285 			}
5286 		}
5287 	} else if (netif_carrier_ok(tp->dev) &&
5288 		   (tp->link_config.autoneg == AUTONEG_ENABLE) &&
5289 		   (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
5290 		u32 phy2;
5291 
5292 		/* Select expansion interrupt status register */
5293 		tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
5294 				 MII_TG3_DSP_EXP1_INT_STAT);
5295 		tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5296 		if (phy2 & 0x20) {
5297 			u32 bmcr;
5298 
5299 			/* Config code words received, turn on autoneg. */
5300 			tg3_readphy(tp, MII_BMCR, &bmcr);
5301 			tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
5302 
5303 			tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5304 
5305 		}
5306 	}
5307 }
5308 
5309 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
5310 {
5311 	u32 val;
5312 	int err;
5313 
5314 	if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
5315 		err = tg3_setup_fiber_phy(tp, force_reset);
5316 	else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
5317 		err = tg3_setup_fiber_mii_phy(tp, force_reset);
5318 	else
5319 		err = tg3_setup_copper_phy(tp, force_reset);
5320 
5321 	if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
5322 		u32 scale;
5323 
5324 		val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
5325 		if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
5326 			scale = 65;
5327 		else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
5328 			scale = 6;
5329 		else
5330 			scale = 12;
5331 
5332 		val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
5333 		val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
5334 		tw32(GRC_MISC_CFG, val);
5335 	}
5336 
5337 	val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
5338 	      (6 << TX_LENGTHS_IPG_SHIFT);
5339 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
5340 		val |= tr32(MAC_TX_LENGTHS) &
5341 		       (TX_LENGTHS_JMB_FRM_LEN_MSK |
5342 			TX_LENGTHS_CNT_DWN_VAL_MSK);
5343 
5344 	if (tp->link_config.active_speed == SPEED_1000 &&
5345 	    tp->link_config.active_duplex == DUPLEX_HALF)
5346 		tw32(MAC_TX_LENGTHS, val |
5347 		     (0xff << TX_LENGTHS_SLOT_TIME_SHIFT));
5348 	else
5349 		tw32(MAC_TX_LENGTHS, val |
5350 		     (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
5351 
5352 	if (!tg3_flag(tp, 5705_PLUS)) {
5353 		if (netif_carrier_ok(tp->dev)) {
5354 			tw32(HOSTCC_STAT_COAL_TICKS,
5355 			     tp->coal.stats_block_coalesce_usecs);
5356 		} else {
5357 			tw32(HOSTCC_STAT_COAL_TICKS, 0);
5358 		}
5359 	}
5360 
5361 	if (tg3_flag(tp, ASPM_WORKAROUND)) {
5362 		val = tr32(PCIE_PWR_MGMT_THRESH);
5363 		if (!netif_carrier_ok(tp->dev))
5364 			val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
5365 			      tp->pwrmgmt_thresh;
5366 		else
5367 			val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
5368 		tw32(PCIE_PWR_MGMT_THRESH, val);
5369 	}
5370 
5371 	return err;
5372 }
5373 
5374 static inline int tg3_irq_sync(struct tg3 *tp)
5375 {
5376 	return tp->irq_sync;
5377 }
5378 
5379 static inline void tg3_rd32_loop(struct tg3 *tp, u32 *dst, u32 off, u32 len)
5380 {
5381 	int i;
5382 
5383 	dst = (u32 *)((u8 *)dst + off);
5384 	for (i = 0; i < len; i += sizeof(u32))
5385 		*dst++ = tr32(off + i);
5386 }
5387 
5388 static void tg3_dump_legacy_regs(struct tg3 *tp, u32 *regs)
5389 {
5390 	tg3_rd32_loop(tp, regs, TG3PCI_VENDOR, 0xb0);
5391 	tg3_rd32_loop(tp, regs, MAILBOX_INTERRUPT_0, 0x200);
5392 	tg3_rd32_loop(tp, regs, MAC_MODE, 0x4f0);
5393 	tg3_rd32_loop(tp, regs, SNDDATAI_MODE, 0xe0);
5394 	tg3_rd32_loop(tp, regs, SNDDATAC_MODE, 0x04);
5395 	tg3_rd32_loop(tp, regs, SNDBDS_MODE, 0x80);
5396 	tg3_rd32_loop(tp, regs, SNDBDI_MODE, 0x48);
5397 	tg3_rd32_loop(tp, regs, SNDBDC_MODE, 0x04);
5398 	tg3_rd32_loop(tp, regs, RCVLPC_MODE, 0x20);
5399 	tg3_rd32_loop(tp, regs, RCVLPC_SELLST_BASE, 0x15c);
5400 	tg3_rd32_loop(tp, regs, RCVDBDI_MODE, 0x0c);
5401 	tg3_rd32_loop(tp, regs, RCVDBDI_JUMBO_BD, 0x3c);
5402 	tg3_rd32_loop(tp, regs, RCVDBDI_BD_PROD_IDX_0, 0x44);
5403 	tg3_rd32_loop(tp, regs, RCVDCC_MODE, 0x04);
5404 	tg3_rd32_loop(tp, regs, RCVBDI_MODE, 0x20);
5405 	tg3_rd32_loop(tp, regs, RCVCC_MODE, 0x14);
5406 	tg3_rd32_loop(tp, regs, RCVLSC_MODE, 0x08);
5407 	tg3_rd32_loop(tp, regs, MBFREE_MODE, 0x08);
5408 	tg3_rd32_loop(tp, regs, HOSTCC_MODE, 0x100);
5409 
5410 	if (tg3_flag(tp, SUPPORT_MSIX))
5411 		tg3_rd32_loop(tp, regs, HOSTCC_RXCOL_TICKS_VEC1, 0x180);
5412 
5413 	tg3_rd32_loop(tp, regs, MEMARB_MODE, 0x10);
5414 	tg3_rd32_loop(tp, regs, BUFMGR_MODE, 0x58);
5415 	tg3_rd32_loop(tp, regs, RDMAC_MODE, 0x08);
5416 	tg3_rd32_loop(tp, regs, WDMAC_MODE, 0x08);
5417 	tg3_rd32_loop(tp, regs, RX_CPU_MODE, 0x04);
5418 	tg3_rd32_loop(tp, regs, RX_CPU_STATE, 0x04);
5419 	tg3_rd32_loop(tp, regs, RX_CPU_PGMCTR, 0x04);
5420 	tg3_rd32_loop(tp, regs, RX_CPU_HWBKPT, 0x04);
5421 
5422 	if (!tg3_flag(tp, 5705_PLUS)) {
5423 		tg3_rd32_loop(tp, regs, TX_CPU_MODE, 0x04);
5424 		tg3_rd32_loop(tp, regs, TX_CPU_STATE, 0x04);
5425 		tg3_rd32_loop(tp, regs, TX_CPU_PGMCTR, 0x04);
5426 	}
5427 
5428 	tg3_rd32_loop(tp, regs, GRCMBOX_INTERRUPT_0, 0x110);
5429 	tg3_rd32_loop(tp, regs, FTQ_RESET, 0x120);
5430 	tg3_rd32_loop(tp, regs, MSGINT_MODE, 0x0c);
5431 	tg3_rd32_loop(tp, regs, DMAC_MODE, 0x04);
5432 	tg3_rd32_loop(tp, regs, GRC_MODE, 0x4c);
5433 
5434 	if (tg3_flag(tp, NVRAM))
5435 		tg3_rd32_loop(tp, regs, NVRAM_CMD, 0x24);
5436 }
5437 
5438 static void tg3_dump_state(struct tg3 *tp)
5439 {
5440 	int i;
5441 	u32 *regs;
5442 
5443 	regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC);
5444 	if (!regs) {
5445 		netdev_err(tp->dev, "Failed allocating register dump buffer\n");
5446 		return;
5447 	}
5448 
5449 	if (tg3_flag(tp, PCI_EXPRESS)) {
5450 		/* Read up to but not including private PCI registers */
5451 		for (i = 0; i < TG3_PCIE_TLDLPL_PORT; i += sizeof(u32))
5452 			regs[i / sizeof(u32)] = tr32(i);
5453 	} else
5454 		tg3_dump_legacy_regs(tp, regs);
5455 
5456 	for (i = 0; i < TG3_REG_BLK_SIZE / sizeof(u32); i += 4) {
5457 		if (!regs[i + 0] && !regs[i + 1] &&
5458 		    !regs[i + 2] && !regs[i + 3])
5459 			continue;
5460 
5461 		netdev_err(tp->dev, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
5462 			   i * 4,
5463 			   regs[i + 0], regs[i + 1], regs[i + 2], regs[i + 3]);
5464 	}
5465 
5466 	kfree(regs);
5467 
5468 	for (i = 0; i < tp->irq_cnt; i++) {
5469 		struct tg3_napi *tnapi = &tp->napi[i];
5470 
5471 		/* SW status block */
5472 		netdev_err(tp->dev,
5473 			 "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
5474 			   i,
5475 			   tnapi->hw_status->status,
5476 			   tnapi->hw_status->status_tag,
5477 			   tnapi->hw_status->rx_jumbo_consumer,
5478 			   tnapi->hw_status->rx_consumer,
5479 			   tnapi->hw_status->rx_mini_consumer,
5480 			   tnapi->hw_status->idx[0].rx_producer,
5481 			   tnapi->hw_status->idx[0].tx_consumer);
5482 
5483 		netdev_err(tp->dev,
5484 		"%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n",
5485 			   i,
5486 			   tnapi->last_tag, tnapi->last_irq_tag,
5487 			   tnapi->tx_prod, tnapi->tx_cons, tnapi->tx_pending,
5488 			   tnapi->rx_rcb_ptr,
5489 			   tnapi->prodring.rx_std_prod_idx,
5490 			   tnapi->prodring.rx_std_cons_idx,
5491 			   tnapi->prodring.rx_jmb_prod_idx,
5492 			   tnapi->prodring.rx_jmb_cons_idx);
5493 	}
5494 }
5495 
5496 /* This is called whenever we suspect that the system chipset is re-
5497  * ordering the sequence of MMIO to the tx send mailbox. The symptom
5498  * is bogus tx completions. We try to recover by setting the
5499  * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
5500  * in the workqueue.
5501  */
5502 static void tg3_tx_recover(struct tg3 *tp)
5503 {
5504 	BUG_ON(tg3_flag(tp, MBOX_WRITE_REORDER) ||
5505 	       tp->write32_tx_mbox == tg3_write_indirect_mbox);
5506 
5507 	netdev_warn(tp->dev,
5508 		    "The system may be re-ordering memory-mapped I/O "
5509 		    "cycles to the network device, attempting to recover. "
5510 		    "Please report the problem to the driver maintainer "
5511 		    "and include system chipset information.\n");
5512 
5513 	spin_lock(&tp->lock);
5514 	tg3_flag_set(tp, TX_RECOVERY_PENDING);
5515 	spin_unlock(&tp->lock);
5516 }
5517 
5518 static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
5519 {
5520 	/* Tell compiler to fetch tx indices from memory. */
5521 	barrier();
5522 	return tnapi->tx_pending -
5523 	       ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1));
5524 }
5525 
5526 /* Tigon3 never reports partial packet sends.  So we do not
5527  * need special logic to handle SKBs that have not had all
5528  * of their frags sent yet, like SunGEM does.
5529  */
5530 static void tg3_tx(struct tg3_napi *tnapi)
5531 {
5532 	struct tg3 *tp = tnapi->tp;
5533 	u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer;
5534 	u32 sw_idx = tnapi->tx_cons;
5535 	struct netdev_queue *txq;
5536 	int index = tnapi - tp->napi;
5537 	unsigned int pkts_compl = 0, bytes_compl = 0;
5538 
5539 	if (tg3_flag(tp, ENABLE_TSS))
5540 		index--;
5541 
5542 	txq = netdev_get_tx_queue(tp->dev, index);
5543 
5544 	while (sw_idx != hw_idx) {
5545 		struct tg3_tx_ring_info *ri = &tnapi->tx_buffers[sw_idx];
5546 		struct sk_buff *skb = ri->skb;
5547 		int i, tx_bug = 0;
5548 
5549 		if (unlikely(skb == NULL)) {
5550 			tg3_tx_recover(tp);
5551 			return;
5552 		}
5553 
5554 		pci_unmap_single(tp->pdev,
5555 				 dma_unmap_addr(ri, mapping),
5556 				 skb_headlen(skb),
5557 				 PCI_DMA_TODEVICE);
5558 
5559 		ri->skb = NULL;
5560 
5561 		while (ri->fragmented) {
5562 			ri->fragmented = false;
5563 			sw_idx = NEXT_TX(sw_idx);
5564 			ri = &tnapi->tx_buffers[sw_idx];
5565 		}
5566 
5567 		sw_idx = NEXT_TX(sw_idx);
5568 
5569 		for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
5570 			ri = &tnapi->tx_buffers[sw_idx];
5571 			if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
5572 				tx_bug = 1;
5573 
5574 			pci_unmap_page(tp->pdev,
5575 				       dma_unmap_addr(ri, mapping),
5576 				       skb_frag_size(&skb_shinfo(skb)->frags[i]),
5577 				       PCI_DMA_TODEVICE);
5578 
5579 			while (ri->fragmented) {
5580 				ri->fragmented = false;
5581 				sw_idx = NEXT_TX(sw_idx);
5582 				ri = &tnapi->tx_buffers[sw_idx];
5583 			}
5584 
5585 			sw_idx = NEXT_TX(sw_idx);
5586 		}
5587 
5588 		pkts_compl++;
5589 		bytes_compl += skb->len;
5590 
5591 		dev_kfree_skb(skb);
5592 
5593 		if (unlikely(tx_bug)) {
5594 			tg3_tx_recover(tp);
5595 			return;
5596 		}
5597 	}
5598 
5599 	netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
5600 
5601 	tnapi->tx_cons = sw_idx;
5602 
5603 	/* Need to make the tx_cons update visible to tg3_start_xmit()
5604 	 * before checking for netif_queue_stopped().  Without the
5605 	 * memory barrier, there is a small possibility that tg3_start_xmit()
5606 	 * will miss it and cause the queue to be stopped forever.
5607 	 */
5608 	smp_mb();
5609 
5610 	if (unlikely(netif_tx_queue_stopped(txq) &&
5611 		     (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) {
5612 		__netif_tx_lock(txq, smp_processor_id());
5613 		if (netif_tx_queue_stopped(txq) &&
5614 		    (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))
5615 			netif_tx_wake_queue(txq);
5616 		__netif_tx_unlock(txq);
5617 	}
5618 }
5619 
5620 static void tg3_rx_data_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
5621 {
5622 	if (!ri->data)
5623 		return;
5624 
5625 	pci_unmap_single(tp->pdev, dma_unmap_addr(ri, mapping),
5626 			 map_sz, PCI_DMA_FROMDEVICE);
5627 	kfree(ri->data);
5628 	ri->data = NULL;
5629 }
5630 
5631 /* Returns size of skb allocated or < 0 on error.
5632  *
5633  * We only need to fill in the address because the other members
5634  * of the RX descriptor are invariant, see tg3_init_rings.
5635  *
5636  * Note the purposeful assymetry of cpu vs. chip accesses.  For
5637  * posting buffers we only dirty the first cache line of the RX
5638  * descriptor (containing the address).  Whereas for the RX status
5639  * buffers the cpu only reads the last cacheline of the RX descriptor
5640  * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
5641  */
5642 static int tg3_alloc_rx_data(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
5643 			    u32 opaque_key, u32 dest_idx_unmasked)
5644 {
5645 	struct tg3_rx_buffer_desc *desc;
5646 	struct ring_info *map;
5647 	u8 *data;
5648 	dma_addr_t mapping;
5649 	int skb_size, data_size, dest_idx;
5650 
5651 	switch (opaque_key) {
5652 	case RXD_OPAQUE_RING_STD:
5653 		dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
5654 		desc = &tpr->rx_std[dest_idx];
5655 		map = &tpr->rx_std_buffers[dest_idx];
5656 		data_size = tp->rx_pkt_map_sz;
5657 		break;
5658 
5659 	case RXD_OPAQUE_RING_JUMBO:
5660 		dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
5661 		desc = &tpr->rx_jmb[dest_idx].std;
5662 		map = &tpr->rx_jmb_buffers[dest_idx];
5663 		data_size = TG3_RX_JMB_MAP_SZ;
5664 		break;
5665 
5666 	default:
5667 		return -EINVAL;
5668 	}
5669 
5670 	/* Do not overwrite any of the map or rp information
5671 	 * until we are sure we can commit to a new buffer.
5672 	 *
5673 	 * Callers depend upon this behavior and assume that
5674 	 * we leave everything unchanged if we fail.
5675 	 */
5676 	skb_size = SKB_DATA_ALIGN(data_size + TG3_RX_OFFSET(tp)) +
5677 		   SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
5678 	data = kmalloc(skb_size, GFP_ATOMIC);
5679 	if (!data)
5680 		return -ENOMEM;
5681 
5682 	mapping = pci_map_single(tp->pdev,
5683 				 data + TG3_RX_OFFSET(tp),
5684 				 data_size,
5685 				 PCI_DMA_FROMDEVICE);
5686 	if (pci_dma_mapping_error(tp->pdev, mapping)) {
5687 		kfree(data);
5688 		return -EIO;
5689 	}
5690 
5691 	map->data = data;
5692 	dma_unmap_addr_set(map, mapping, mapping);
5693 
5694 	desc->addr_hi = ((u64)mapping >> 32);
5695 	desc->addr_lo = ((u64)mapping & 0xffffffff);
5696 
5697 	return data_size;
5698 }
5699 
5700 /* We only need to move over in the address because the other
5701  * members of the RX descriptor are invariant.  See notes above
5702  * tg3_alloc_rx_data for full details.
5703  */
5704 static void tg3_recycle_rx(struct tg3_napi *tnapi,
5705 			   struct tg3_rx_prodring_set *dpr,
5706 			   u32 opaque_key, int src_idx,
5707 			   u32 dest_idx_unmasked)
5708 {
5709 	struct tg3 *tp = tnapi->tp;
5710 	struct tg3_rx_buffer_desc *src_desc, *dest_desc;
5711 	struct ring_info *src_map, *dest_map;
5712 	struct tg3_rx_prodring_set *spr = &tp->napi[0].prodring;
5713 	int dest_idx;
5714 
5715 	switch (opaque_key) {
5716 	case RXD_OPAQUE_RING_STD:
5717 		dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
5718 		dest_desc = &dpr->rx_std[dest_idx];
5719 		dest_map = &dpr->rx_std_buffers[dest_idx];
5720 		src_desc = &spr->rx_std[src_idx];
5721 		src_map = &spr->rx_std_buffers[src_idx];
5722 		break;
5723 
5724 	case RXD_OPAQUE_RING_JUMBO:
5725 		dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
5726 		dest_desc = &dpr->rx_jmb[dest_idx].std;
5727 		dest_map = &dpr->rx_jmb_buffers[dest_idx];
5728 		src_desc = &spr->rx_jmb[src_idx].std;
5729 		src_map = &spr->rx_jmb_buffers[src_idx];
5730 		break;
5731 
5732 	default:
5733 		return;
5734 	}
5735 
5736 	dest_map->data = src_map->data;
5737 	dma_unmap_addr_set(dest_map, mapping,
5738 			   dma_unmap_addr(src_map, mapping));
5739 	dest_desc->addr_hi = src_desc->addr_hi;
5740 	dest_desc->addr_lo = src_desc->addr_lo;
5741 
5742 	/* Ensure that the update to the skb happens after the physical
5743 	 * addresses have been transferred to the new BD location.
5744 	 */
5745 	smp_wmb();
5746 
5747 	src_map->data = NULL;
5748 }
5749 
5750 /* The RX ring scheme is composed of multiple rings which post fresh
5751  * buffers to the chip, and one special ring the chip uses to report
5752  * status back to the host.
5753  *
5754  * The special ring reports the status of received packets to the
5755  * host.  The chip does not write into the original descriptor the
5756  * RX buffer was obtained from.  The chip simply takes the original
5757  * descriptor as provided by the host, updates the status and length
5758  * field, then writes this into the next status ring entry.
5759  *
5760  * Each ring the host uses to post buffers to the chip is described
5761  * by a TG3_BDINFO entry in the chips SRAM area.  When a packet arrives,
5762  * it is first placed into the on-chip ram.  When the packet's length
5763  * is known, it walks down the TG3_BDINFO entries to select the ring.
5764  * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
5765  * which is within the range of the new packet's length is chosen.
5766  *
5767  * The "separate ring for rx status" scheme may sound queer, but it makes
5768  * sense from a cache coherency perspective.  If only the host writes
5769  * to the buffer post rings, and only the chip writes to the rx status
5770  * rings, then cache lines never move beyond shared-modified state.
5771  * If both the host and chip were to write into the same ring, cache line
5772  * eviction could occur since both entities want it in an exclusive state.
5773  */
5774 static int tg3_rx(struct tg3_napi *tnapi, int budget)
5775 {
5776 	struct tg3 *tp = tnapi->tp;
5777 	u32 work_mask, rx_std_posted = 0;
5778 	u32 std_prod_idx, jmb_prod_idx;
5779 	u32 sw_idx = tnapi->rx_rcb_ptr;
5780 	u16 hw_idx;
5781 	int received;
5782 	struct tg3_rx_prodring_set *tpr = &tnapi->prodring;
5783 
5784 	hw_idx = *(tnapi->rx_rcb_prod_idx);
5785 	/*
5786 	 * We need to order the read of hw_idx and the read of
5787 	 * the opaque cookie.
5788 	 */
5789 	rmb();
5790 	work_mask = 0;
5791 	received = 0;
5792 	std_prod_idx = tpr->rx_std_prod_idx;
5793 	jmb_prod_idx = tpr->rx_jmb_prod_idx;
5794 	while (sw_idx != hw_idx && budget > 0) {
5795 		struct ring_info *ri;
5796 		struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
5797 		unsigned int len;
5798 		struct sk_buff *skb;
5799 		dma_addr_t dma_addr;
5800 		u32 opaque_key, desc_idx, *post_ptr;
5801 		u8 *data;
5802 
5803 		desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
5804 		opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
5805 		if (opaque_key == RXD_OPAQUE_RING_STD) {
5806 			ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx];
5807 			dma_addr = dma_unmap_addr(ri, mapping);
5808 			data = ri->data;
5809 			post_ptr = &std_prod_idx;
5810 			rx_std_posted++;
5811 		} else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
5812 			ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx];
5813 			dma_addr = dma_unmap_addr(ri, mapping);
5814 			data = ri->data;
5815 			post_ptr = &jmb_prod_idx;
5816 		} else
5817 			goto next_pkt_nopost;
5818 
5819 		work_mask |= opaque_key;
5820 
5821 		if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
5822 		    (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
5823 		drop_it:
5824 			tg3_recycle_rx(tnapi, tpr, opaque_key,
5825 				       desc_idx, *post_ptr);
5826 		drop_it_no_recycle:
5827 			/* Other statistics kept track of by card. */
5828 			tp->rx_dropped++;
5829 			goto next_pkt;
5830 		}
5831 
5832 		prefetch(data + TG3_RX_OFFSET(tp));
5833 		len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
5834 		      ETH_FCS_LEN;
5835 
5836 		if (len > TG3_RX_COPY_THRESH(tp)) {
5837 			int skb_size;
5838 
5839 			skb_size = tg3_alloc_rx_data(tp, tpr, opaque_key,
5840 						    *post_ptr);
5841 			if (skb_size < 0)
5842 				goto drop_it;
5843 
5844 			pci_unmap_single(tp->pdev, dma_addr, skb_size,
5845 					 PCI_DMA_FROMDEVICE);
5846 
5847 			skb = build_skb(data);
5848 			if (!skb) {
5849 				kfree(data);
5850 				goto drop_it_no_recycle;
5851 			}
5852 			skb_reserve(skb, TG3_RX_OFFSET(tp));
5853 			/* Ensure that the update to the data happens
5854 			 * after the usage of the old DMA mapping.
5855 			 */
5856 			smp_wmb();
5857 
5858 			ri->data = NULL;
5859 
5860 		} else {
5861 			tg3_recycle_rx(tnapi, tpr, opaque_key,
5862 				       desc_idx, *post_ptr);
5863 
5864 			skb = netdev_alloc_skb(tp->dev,
5865 					       len + TG3_RAW_IP_ALIGN);
5866 			if (skb == NULL)
5867 				goto drop_it_no_recycle;
5868 
5869 			skb_reserve(skb, TG3_RAW_IP_ALIGN);
5870 			pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
5871 			memcpy(skb->data,
5872 			       data + TG3_RX_OFFSET(tp),
5873 			       len);
5874 			pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
5875 		}
5876 
5877 		skb_put(skb, len);
5878 		if ((tp->dev->features & NETIF_F_RXCSUM) &&
5879 		    (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
5880 		    (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
5881 		      >> RXD_TCPCSUM_SHIFT) == 0xffff))
5882 			skb->ip_summed = CHECKSUM_UNNECESSARY;
5883 		else
5884 			skb_checksum_none_assert(skb);
5885 
5886 		skb->protocol = eth_type_trans(skb, tp->dev);
5887 
5888 		if (len > (tp->dev->mtu + ETH_HLEN) &&
5889 		    skb->protocol != htons(ETH_P_8021Q)) {
5890 			dev_kfree_skb(skb);
5891 			goto drop_it_no_recycle;
5892 		}
5893 
5894 		if (desc->type_flags & RXD_FLAG_VLAN &&
5895 		    !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG))
5896 			__vlan_hwaccel_put_tag(skb,
5897 					       desc->err_vlan & RXD_VLAN_MASK);
5898 
5899 		napi_gro_receive(&tnapi->napi, skb);
5900 
5901 		received++;
5902 		budget--;
5903 
5904 next_pkt:
5905 		(*post_ptr)++;
5906 
5907 		if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
5908 			tpr->rx_std_prod_idx = std_prod_idx &
5909 					       tp->rx_std_ring_mask;
5910 			tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
5911 				     tpr->rx_std_prod_idx);
5912 			work_mask &= ~RXD_OPAQUE_RING_STD;
5913 			rx_std_posted = 0;
5914 		}
5915 next_pkt_nopost:
5916 		sw_idx++;
5917 		sw_idx &= tp->rx_ret_ring_mask;
5918 
5919 		/* Refresh hw_idx to see if there is new work */
5920 		if (sw_idx == hw_idx) {
5921 			hw_idx = *(tnapi->rx_rcb_prod_idx);
5922 			rmb();
5923 		}
5924 	}
5925 
5926 	/* ACK the status ring. */
5927 	tnapi->rx_rcb_ptr = sw_idx;
5928 	tw32_rx_mbox(tnapi->consmbox, sw_idx);
5929 
5930 	/* Refill RX ring(s). */
5931 	if (!tg3_flag(tp, ENABLE_RSS)) {
5932 		/* Sync BD data before updating mailbox */
5933 		wmb();
5934 
5935 		if (work_mask & RXD_OPAQUE_RING_STD) {
5936 			tpr->rx_std_prod_idx = std_prod_idx &
5937 					       tp->rx_std_ring_mask;
5938 			tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
5939 				     tpr->rx_std_prod_idx);
5940 		}
5941 		if (work_mask & RXD_OPAQUE_RING_JUMBO) {
5942 			tpr->rx_jmb_prod_idx = jmb_prod_idx &
5943 					       tp->rx_jmb_ring_mask;
5944 			tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
5945 				     tpr->rx_jmb_prod_idx);
5946 		}
5947 		mmiowb();
5948 	} else if (work_mask) {
5949 		/* rx_std_buffers[] and rx_jmb_buffers[] entries must be
5950 		 * updated before the producer indices can be updated.
5951 		 */
5952 		smp_wmb();
5953 
5954 		tpr->rx_std_prod_idx = std_prod_idx & tp->rx_std_ring_mask;
5955 		tpr->rx_jmb_prod_idx = jmb_prod_idx & tp->rx_jmb_ring_mask;
5956 
5957 		if (tnapi != &tp->napi[1]) {
5958 			tp->rx_refill = true;
5959 			napi_schedule(&tp->napi[1].napi);
5960 		}
5961 	}
5962 
5963 	return received;
5964 }
5965 
5966 static void tg3_poll_link(struct tg3 *tp)
5967 {
5968 	/* handle link change and other phy events */
5969 	if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
5970 		struct tg3_hw_status *sblk = tp->napi[0].hw_status;
5971 
5972 		if (sblk->status & SD_STATUS_LINK_CHG) {
5973 			sblk->status = SD_STATUS_UPDATED |
5974 				       (sblk->status & ~SD_STATUS_LINK_CHG);
5975 			spin_lock(&tp->lock);
5976 			if (tg3_flag(tp, USE_PHYLIB)) {
5977 				tw32_f(MAC_STATUS,
5978 				     (MAC_STATUS_SYNC_CHANGED |
5979 				      MAC_STATUS_CFG_CHANGED |
5980 				      MAC_STATUS_MI_COMPLETION |
5981 				      MAC_STATUS_LNKSTATE_CHANGED));
5982 				udelay(40);
5983 			} else
5984 				tg3_setup_phy(tp, 0);
5985 			spin_unlock(&tp->lock);
5986 		}
5987 	}
5988 }
5989 
5990 static int tg3_rx_prodring_xfer(struct tg3 *tp,
5991 				struct tg3_rx_prodring_set *dpr,
5992 				struct tg3_rx_prodring_set *spr)
5993 {
5994 	u32 si, di, cpycnt, src_prod_idx;
5995 	int i, err = 0;
5996 
5997 	while (1) {
5998 		src_prod_idx = spr->rx_std_prod_idx;
5999 
6000 		/* Make sure updates to the rx_std_buffers[] entries and the
6001 		 * standard producer index are seen in the correct order.
6002 		 */
6003 		smp_rmb();
6004 
6005 		if (spr->rx_std_cons_idx == src_prod_idx)
6006 			break;
6007 
6008 		if (spr->rx_std_cons_idx < src_prod_idx)
6009 			cpycnt = src_prod_idx - spr->rx_std_cons_idx;
6010 		else
6011 			cpycnt = tp->rx_std_ring_mask + 1 -
6012 				 spr->rx_std_cons_idx;
6013 
6014 		cpycnt = min(cpycnt,
6015 			     tp->rx_std_ring_mask + 1 - dpr->rx_std_prod_idx);
6016 
6017 		si = spr->rx_std_cons_idx;
6018 		di = dpr->rx_std_prod_idx;
6019 
6020 		for (i = di; i < di + cpycnt; i++) {
6021 			if (dpr->rx_std_buffers[i].data) {
6022 				cpycnt = i - di;
6023 				err = -ENOSPC;
6024 				break;
6025 			}
6026 		}
6027 
6028 		if (!cpycnt)
6029 			break;
6030 
6031 		/* Ensure that updates to the rx_std_buffers ring and the
6032 		 * shadowed hardware producer ring from tg3_recycle_skb() are
6033 		 * ordered correctly WRT the skb check above.
6034 		 */
6035 		smp_rmb();
6036 
6037 		memcpy(&dpr->rx_std_buffers[di],
6038 		       &spr->rx_std_buffers[si],
6039 		       cpycnt * sizeof(struct ring_info));
6040 
6041 		for (i = 0; i < cpycnt; i++, di++, si++) {
6042 			struct tg3_rx_buffer_desc *sbd, *dbd;
6043 			sbd = &spr->rx_std[si];
6044 			dbd = &dpr->rx_std[di];
6045 			dbd->addr_hi = sbd->addr_hi;
6046 			dbd->addr_lo = sbd->addr_lo;
6047 		}
6048 
6049 		spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) &
6050 				       tp->rx_std_ring_mask;
6051 		dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) &
6052 				       tp->rx_std_ring_mask;
6053 	}
6054 
6055 	while (1) {
6056 		src_prod_idx = spr->rx_jmb_prod_idx;
6057 
6058 		/* Make sure updates to the rx_jmb_buffers[] entries and
6059 		 * the jumbo producer index are seen in the correct order.
6060 		 */
6061 		smp_rmb();
6062 
6063 		if (spr->rx_jmb_cons_idx == src_prod_idx)
6064 			break;
6065 
6066 		if (spr->rx_jmb_cons_idx < src_prod_idx)
6067 			cpycnt = src_prod_idx - spr->rx_jmb_cons_idx;
6068 		else
6069 			cpycnt = tp->rx_jmb_ring_mask + 1 -
6070 				 spr->rx_jmb_cons_idx;
6071 
6072 		cpycnt = min(cpycnt,
6073 			     tp->rx_jmb_ring_mask + 1 - dpr->rx_jmb_prod_idx);
6074 
6075 		si = spr->rx_jmb_cons_idx;
6076 		di = dpr->rx_jmb_prod_idx;
6077 
6078 		for (i = di; i < di + cpycnt; i++) {
6079 			if (dpr->rx_jmb_buffers[i].data) {
6080 				cpycnt = i - di;
6081 				err = -ENOSPC;
6082 				break;
6083 			}
6084 		}
6085 
6086 		if (!cpycnt)
6087 			break;
6088 
6089 		/* Ensure that updates to the rx_jmb_buffers ring and the
6090 		 * shadowed hardware producer ring from tg3_recycle_skb() are
6091 		 * ordered correctly WRT the skb check above.
6092 		 */
6093 		smp_rmb();
6094 
6095 		memcpy(&dpr->rx_jmb_buffers[di],
6096 		       &spr->rx_jmb_buffers[si],
6097 		       cpycnt * sizeof(struct ring_info));
6098 
6099 		for (i = 0; i < cpycnt; i++, di++, si++) {
6100 			struct tg3_rx_buffer_desc *sbd, *dbd;
6101 			sbd = &spr->rx_jmb[si].std;
6102 			dbd = &dpr->rx_jmb[di].std;
6103 			dbd->addr_hi = sbd->addr_hi;
6104 			dbd->addr_lo = sbd->addr_lo;
6105 		}
6106 
6107 		spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) &
6108 				       tp->rx_jmb_ring_mask;
6109 		dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) &
6110 				       tp->rx_jmb_ring_mask;
6111 	}
6112 
6113 	return err;
6114 }
6115 
6116 static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
6117 {
6118 	struct tg3 *tp = tnapi->tp;
6119 
6120 	/* run TX completion thread */
6121 	if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
6122 		tg3_tx(tnapi);
6123 		if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
6124 			return work_done;
6125 	}
6126 
6127 	/* run RX thread, within the bounds set by NAPI.
6128 	 * All RX "locking" is done by ensuring outside
6129 	 * code synchronizes with tg3->napi.poll()
6130 	 */
6131 	if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
6132 		work_done += tg3_rx(tnapi, budget - work_done);
6133 
6134 	if (tg3_flag(tp, ENABLE_RSS) && tnapi == &tp->napi[1]) {
6135 		struct tg3_rx_prodring_set *dpr = &tp->napi[0].prodring;
6136 		int i, err = 0;
6137 		u32 std_prod_idx = dpr->rx_std_prod_idx;
6138 		u32 jmb_prod_idx = dpr->rx_jmb_prod_idx;
6139 
6140 		tp->rx_refill = false;
6141 		for (i = 1; i < tp->irq_cnt; i++)
6142 			err |= tg3_rx_prodring_xfer(tp, dpr,
6143 						    &tp->napi[i].prodring);
6144 
6145 		wmb();
6146 
6147 		if (std_prod_idx != dpr->rx_std_prod_idx)
6148 			tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6149 				     dpr->rx_std_prod_idx);
6150 
6151 		if (jmb_prod_idx != dpr->rx_jmb_prod_idx)
6152 			tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
6153 				     dpr->rx_jmb_prod_idx);
6154 
6155 		mmiowb();
6156 
6157 		if (err)
6158 			tw32_f(HOSTCC_MODE, tp->coal_now);
6159 	}
6160 
6161 	return work_done;
6162 }
6163 
6164 static inline void tg3_reset_task_schedule(struct tg3 *tp)
6165 {
6166 	if (!test_and_set_bit(TG3_FLAG_RESET_TASK_PENDING, tp->tg3_flags))
6167 		schedule_work(&tp->reset_task);
6168 }
6169 
6170 static inline void tg3_reset_task_cancel(struct tg3 *tp)
6171 {
6172 	cancel_work_sync(&tp->reset_task);
6173 	tg3_flag_clear(tp, RESET_TASK_PENDING);
6174 	tg3_flag_clear(tp, TX_RECOVERY_PENDING);
6175 }
6176 
6177 static int tg3_poll_msix(struct napi_struct *napi, int budget)
6178 {
6179 	struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
6180 	struct tg3 *tp = tnapi->tp;
6181 	int work_done = 0;
6182 	struct tg3_hw_status *sblk = tnapi->hw_status;
6183 
6184 	while (1) {
6185 		work_done = tg3_poll_work(tnapi, work_done, budget);
6186 
6187 		if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
6188 			goto tx_recovery;
6189 
6190 		if (unlikely(work_done >= budget))
6191 			break;
6192 
6193 		/* tp->last_tag is used in tg3_int_reenable() below
6194 		 * to tell the hw how much work has been processed,
6195 		 * so we must read it before checking for more work.
6196 		 */
6197 		tnapi->last_tag = sblk->status_tag;
6198 		tnapi->last_irq_tag = tnapi->last_tag;
6199 		rmb();
6200 
6201 		/* check for RX/TX work to do */
6202 		if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons &&
6203 			   *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) {
6204 
6205 			/* This test here is not race free, but will reduce
6206 			 * the number of interrupts by looping again.
6207 			 */
6208 			if (tnapi == &tp->napi[1] && tp->rx_refill)
6209 				continue;
6210 
6211 			napi_complete(napi);
6212 			/* Reenable interrupts. */
6213 			tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
6214 
6215 			/* This test here is synchronized by napi_schedule()
6216 			 * and napi_complete() to close the race condition.
6217 			 */
6218 			if (unlikely(tnapi == &tp->napi[1] && tp->rx_refill)) {
6219 				tw32(HOSTCC_MODE, tp->coalesce_mode |
6220 						  HOSTCC_MODE_ENABLE |
6221 						  tnapi->coal_now);
6222 			}
6223 			mmiowb();
6224 			break;
6225 		}
6226 	}
6227 
6228 	return work_done;
6229 
6230 tx_recovery:
6231 	/* work_done is guaranteed to be less than budget. */
6232 	napi_complete(napi);
6233 	tg3_reset_task_schedule(tp);
6234 	return work_done;
6235 }
6236 
6237 static void tg3_process_error(struct tg3 *tp)
6238 {
6239 	u32 val;
6240 	bool real_error = false;
6241 
6242 	if (tg3_flag(tp, ERROR_PROCESSED))
6243 		return;
6244 
6245 	/* Check Flow Attention register */
6246 	val = tr32(HOSTCC_FLOW_ATTN);
6247 	if (val & ~HOSTCC_FLOW_ATTN_MBUF_LWM) {
6248 		netdev_err(tp->dev, "FLOW Attention error.  Resetting chip.\n");
6249 		real_error = true;
6250 	}
6251 
6252 	if (tr32(MSGINT_STATUS) & ~MSGINT_STATUS_MSI_REQ) {
6253 		netdev_err(tp->dev, "MSI Status error.  Resetting chip.\n");
6254 		real_error = true;
6255 	}
6256 
6257 	if (tr32(RDMAC_STATUS) || tr32(WDMAC_STATUS)) {
6258 		netdev_err(tp->dev, "DMA Status error.  Resetting chip.\n");
6259 		real_error = true;
6260 	}
6261 
6262 	if (!real_error)
6263 		return;
6264 
6265 	tg3_dump_state(tp);
6266 
6267 	tg3_flag_set(tp, ERROR_PROCESSED);
6268 	tg3_reset_task_schedule(tp);
6269 }
6270 
6271 static int tg3_poll(struct napi_struct *napi, int budget)
6272 {
6273 	struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
6274 	struct tg3 *tp = tnapi->tp;
6275 	int work_done = 0;
6276 	struct tg3_hw_status *sblk = tnapi->hw_status;
6277 
6278 	while (1) {
6279 		if (sblk->status & SD_STATUS_ERROR)
6280 			tg3_process_error(tp);
6281 
6282 		tg3_poll_link(tp);
6283 
6284 		work_done = tg3_poll_work(tnapi, work_done, budget);
6285 
6286 		if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
6287 			goto tx_recovery;
6288 
6289 		if (unlikely(work_done >= budget))
6290 			break;
6291 
6292 		if (tg3_flag(tp, TAGGED_STATUS)) {
6293 			/* tp->last_tag is used in tg3_int_reenable() below
6294 			 * to tell the hw how much work has been processed,
6295 			 * so we must read it before checking for more work.
6296 			 */
6297 			tnapi->last_tag = sblk->status_tag;
6298 			tnapi->last_irq_tag = tnapi->last_tag;
6299 			rmb();
6300 		} else
6301 			sblk->status &= ~SD_STATUS_UPDATED;
6302 
6303 		if (likely(!tg3_has_work(tnapi))) {
6304 			napi_complete(napi);
6305 			tg3_int_reenable(tnapi);
6306 			break;
6307 		}
6308 	}
6309 
6310 	return work_done;
6311 
6312 tx_recovery:
6313 	/* work_done is guaranteed to be less than budget. */
6314 	napi_complete(napi);
6315 	tg3_reset_task_schedule(tp);
6316 	return work_done;
6317 }
6318 
6319 static void tg3_napi_disable(struct tg3 *tp)
6320 {
6321 	int i;
6322 
6323 	for (i = tp->irq_cnt - 1; i >= 0; i--)
6324 		napi_disable(&tp->napi[i].napi);
6325 }
6326 
6327 static void tg3_napi_enable(struct tg3 *tp)
6328 {
6329 	int i;
6330 
6331 	for (i = 0; i < tp->irq_cnt; i++)
6332 		napi_enable(&tp->napi[i].napi);
6333 }
6334 
6335 static void tg3_napi_init(struct tg3 *tp)
6336 {
6337 	int i;
6338 
6339 	netif_napi_add(tp->dev, &tp->napi[0].napi, tg3_poll, 64);
6340 	for (i = 1; i < tp->irq_cnt; i++)
6341 		netif_napi_add(tp->dev, &tp->napi[i].napi, tg3_poll_msix, 64);
6342 }
6343 
6344 static void tg3_napi_fini(struct tg3 *tp)
6345 {
6346 	int i;
6347 
6348 	for (i = 0; i < tp->irq_cnt; i++)
6349 		netif_napi_del(&tp->napi[i].napi);
6350 }
6351 
6352 static inline void tg3_netif_stop(struct tg3 *tp)
6353 {
6354 	tp->dev->trans_start = jiffies;	/* prevent tx timeout */
6355 	tg3_napi_disable(tp);
6356 	netif_tx_disable(tp->dev);
6357 }
6358 
6359 static inline void tg3_netif_start(struct tg3 *tp)
6360 {
6361 	/* NOTE: unconditional netif_tx_wake_all_queues is only
6362 	 * appropriate so long as all callers are assured to
6363 	 * have free tx slots (such as after tg3_init_hw)
6364 	 */
6365 	netif_tx_wake_all_queues(tp->dev);
6366 
6367 	tg3_napi_enable(tp);
6368 	tp->napi[0].hw_status->status |= SD_STATUS_UPDATED;
6369 	tg3_enable_ints(tp);
6370 }
6371 
6372 static void tg3_irq_quiesce(struct tg3 *tp)
6373 {
6374 	int i;
6375 
6376 	BUG_ON(tp->irq_sync);
6377 
6378 	tp->irq_sync = 1;
6379 	smp_mb();
6380 
6381 	for (i = 0; i < tp->irq_cnt; i++)
6382 		synchronize_irq(tp->napi[i].irq_vec);
6383 }
6384 
6385 /* Fully shutdown all tg3 driver activity elsewhere in the system.
6386  * If irq_sync is non-zero, then the IRQ handler must be synchronized
6387  * with as well.  Most of the time, this is not necessary except when
6388  * shutting down the device.
6389  */
6390 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
6391 {
6392 	spin_lock_bh(&tp->lock);
6393 	if (irq_sync)
6394 		tg3_irq_quiesce(tp);
6395 }
6396 
6397 static inline void tg3_full_unlock(struct tg3 *tp)
6398 {
6399 	spin_unlock_bh(&tp->lock);
6400 }
6401 
6402 /* One-shot MSI handler - Chip automatically disables interrupt
6403  * after sending MSI so driver doesn't have to do it.
6404  */
6405 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
6406 {
6407 	struct tg3_napi *tnapi = dev_id;
6408 	struct tg3 *tp = tnapi->tp;
6409 
6410 	prefetch(tnapi->hw_status);
6411 	if (tnapi->rx_rcb)
6412 		prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
6413 
6414 	if (likely(!tg3_irq_sync(tp)))
6415 		napi_schedule(&tnapi->napi);
6416 
6417 	return IRQ_HANDLED;
6418 }
6419 
6420 /* MSI ISR - No need to check for interrupt sharing and no need to
6421  * flush status block and interrupt mailbox. PCI ordering rules
6422  * guarantee that MSI will arrive after the status block.
6423  */
6424 static irqreturn_t tg3_msi(int irq, void *dev_id)
6425 {
6426 	struct tg3_napi *tnapi = dev_id;
6427 	struct tg3 *tp = tnapi->tp;
6428 
6429 	prefetch(tnapi->hw_status);
6430 	if (tnapi->rx_rcb)
6431 		prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
6432 	/*
6433 	 * Writing any value to intr-mbox-0 clears PCI INTA# and
6434 	 * chip-internal interrupt pending events.
6435 	 * Writing non-zero to intr-mbox-0 additional tells the
6436 	 * NIC to stop sending us irqs, engaging "in-intr-handler"
6437 	 * event coalescing.
6438 	 */
6439 	tw32_mailbox(tnapi->int_mbox, 0x00000001);
6440 	if (likely(!tg3_irq_sync(tp)))
6441 		napi_schedule(&tnapi->napi);
6442 
6443 	return IRQ_RETVAL(1);
6444 }
6445 
6446 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
6447 {
6448 	struct tg3_napi *tnapi = dev_id;
6449 	struct tg3 *tp = tnapi->tp;
6450 	struct tg3_hw_status *sblk = tnapi->hw_status;
6451 	unsigned int handled = 1;
6452 
6453 	/* In INTx mode, it is possible for the interrupt to arrive at
6454 	 * the CPU before the status block posted prior to the interrupt.
6455 	 * Reading the PCI State register will confirm whether the
6456 	 * interrupt is ours and will flush the status block.
6457 	 */
6458 	if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
6459 		if (tg3_flag(tp, CHIP_RESETTING) ||
6460 		    (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
6461 			handled = 0;
6462 			goto out;
6463 		}
6464 	}
6465 
6466 	/*
6467 	 * Writing any value to intr-mbox-0 clears PCI INTA# and
6468 	 * chip-internal interrupt pending events.
6469 	 * Writing non-zero to intr-mbox-0 additional tells the
6470 	 * NIC to stop sending us irqs, engaging "in-intr-handler"
6471 	 * event coalescing.
6472 	 *
6473 	 * Flush the mailbox to de-assert the IRQ immediately to prevent
6474 	 * spurious interrupts.  The flush impacts performance but
6475 	 * excessive spurious interrupts can be worse in some cases.
6476 	 */
6477 	tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
6478 	if (tg3_irq_sync(tp))
6479 		goto out;
6480 	sblk->status &= ~SD_STATUS_UPDATED;
6481 	if (likely(tg3_has_work(tnapi))) {
6482 		prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
6483 		napi_schedule(&tnapi->napi);
6484 	} else {
6485 		/* No work, shared interrupt perhaps?  re-enable
6486 		 * interrupts, and flush that PCI write
6487 		 */
6488 		tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
6489 			       0x00000000);
6490 	}
6491 out:
6492 	return IRQ_RETVAL(handled);
6493 }
6494 
6495 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
6496 {
6497 	struct tg3_napi *tnapi = dev_id;
6498 	struct tg3 *tp = tnapi->tp;
6499 	struct tg3_hw_status *sblk = tnapi->hw_status;
6500 	unsigned int handled = 1;
6501 
6502 	/* In INTx mode, it is possible for the interrupt to arrive at
6503 	 * the CPU before the status block posted prior to the interrupt.
6504 	 * Reading the PCI State register will confirm whether the
6505 	 * interrupt is ours and will flush the status block.
6506 	 */
6507 	if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) {
6508 		if (tg3_flag(tp, CHIP_RESETTING) ||
6509 		    (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
6510 			handled = 0;
6511 			goto out;
6512 		}
6513 	}
6514 
6515 	/*
6516 	 * writing any value to intr-mbox-0 clears PCI INTA# and
6517 	 * chip-internal interrupt pending events.
6518 	 * writing non-zero to intr-mbox-0 additional tells the
6519 	 * NIC to stop sending us irqs, engaging "in-intr-handler"
6520 	 * event coalescing.
6521 	 *
6522 	 * Flush the mailbox to de-assert the IRQ immediately to prevent
6523 	 * spurious interrupts.  The flush impacts performance but
6524 	 * excessive spurious interrupts can be worse in some cases.
6525 	 */
6526 	tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
6527 
6528 	/*
6529 	 * In a shared interrupt configuration, sometimes other devices'
6530 	 * interrupts will scream.  We record the current status tag here
6531 	 * so that the above check can report that the screaming interrupts
6532 	 * are unhandled.  Eventually they will be silenced.
6533 	 */
6534 	tnapi->last_irq_tag = sblk->status_tag;
6535 
6536 	if (tg3_irq_sync(tp))
6537 		goto out;
6538 
6539 	prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
6540 
6541 	napi_schedule(&tnapi->napi);
6542 
6543 out:
6544 	return IRQ_RETVAL(handled);
6545 }
6546 
6547 /* ISR for interrupt test */
6548 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
6549 {
6550 	struct tg3_napi *tnapi = dev_id;
6551 	struct tg3 *tp = tnapi->tp;
6552 	struct tg3_hw_status *sblk = tnapi->hw_status;
6553 
6554 	if ((sblk->status & SD_STATUS_UPDATED) ||
6555 	    !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
6556 		tg3_disable_ints(tp);
6557 		return IRQ_RETVAL(1);
6558 	}
6559 	return IRQ_RETVAL(0);
6560 }
6561 
6562 #ifdef CONFIG_NET_POLL_CONTROLLER
6563 static void tg3_poll_controller(struct net_device *dev)
6564 {
6565 	int i;
6566 	struct tg3 *tp = netdev_priv(dev);
6567 
6568 	for (i = 0; i < tp->irq_cnt; i++)
6569 		tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]);
6570 }
6571 #endif
6572 
6573 static void tg3_tx_timeout(struct net_device *dev)
6574 {
6575 	struct tg3 *tp = netdev_priv(dev);
6576 
6577 	if (netif_msg_tx_err(tp)) {
6578 		netdev_err(dev, "transmit timed out, resetting\n");
6579 		tg3_dump_state(tp);
6580 	}
6581 
6582 	tg3_reset_task_schedule(tp);
6583 }
6584 
6585 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
6586 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
6587 {
6588 	u32 base = (u32) mapping & 0xffffffff;
6589 
6590 	return (base > 0xffffdcc0) && (base + len + 8 < base);
6591 }
6592 
6593 /* Test for DMA addresses > 40-bit */
6594 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
6595 					  int len)
6596 {
6597 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
6598 	if (tg3_flag(tp, 40BIT_DMA_BUG))
6599 		return ((u64) mapping + len) > DMA_BIT_MASK(40);
6600 	return 0;
6601 #else
6602 	return 0;
6603 #endif
6604 }
6605 
6606 static inline void tg3_tx_set_bd(struct tg3_tx_buffer_desc *txbd,
6607 				 dma_addr_t mapping, u32 len, u32 flags,
6608 				 u32 mss, u32 vlan)
6609 {
6610 	txbd->addr_hi = ((u64) mapping >> 32);
6611 	txbd->addr_lo = ((u64) mapping & 0xffffffff);
6612 	txbd->len_flags = (len << TXD_LEN_SHIFT) | (flags & 0x0000ffff);
6613 	txbd->vlan_tag = (mss << TXD_MSS_SHIFT) | (vlan << TXD_VLAN_TAG_SHIFT);
6614 }
6615 
6616 static bool tg3_tx_frag_set(struct tg3_napi *tnapi, u32 *entry, u32 *budget,
6617 			    dma_addr_t map, u32 len, u32 flags,
6618 			    u32 mss, u32 vlan)
6619 {
6620 	struct tg3 *tp = tnapi->tp;
6621 	bool hwbug = false;
6622 
6623 	if (tg3_flag(tp, SHORT_DMA_BUG) && len <= 8)
6624 		hwbug = true;
6625 
6626 	if (tg3_4g_overflow_test(map, len))
6627 		hwbug = true;
6628 
6629 	if (tg3_40bit_overflow_test(tp, map, len))
6630 		hwbug = true;
6631 
6632 	if (tp->dma_limit) {
6633 		u32 prvidx = *entry;
6634 		u32 tmp_flag = flags & ~TXD_FLAG_END;
6635 		while (len > tp->dma_limit && *budget) {
6636 			u32 frag_len = tp->dma_limit;
6637 			len -= tp->dma_limit;
6638 
6639 			/* Avoid the 8byte DMA problem */
6640 			if (len <= 8) {
6641 				len += tp->dma_limit / 2;
6642 				frag_len = tp->dma_limit / 2;
6643 			}
6644 
6645 			tnapi->tx_buffers[*entry].fragmented = true;
6646 
6647 			tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
6648 				      frag_len, tmp_flag, mss, vlan);
6649 			*budget -= 1;
6650 			prvidx = *entry;
6651 			*entry = NEXT_TX(*entry);
6652 
6653 			map += frag_len;
6654 		}
6655 
6656 		if (len) {
6657 			if (*budget) {
6658 				tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
6659 					      len, flags, mss, vlan);
6660 				*budget -= 1;
6661 				*entry = NEXT_TX(*entry);
6662 			} else {
6663 				hwbug = true;
6664 				tnapi->tx_buffers[prvidx].fragmented = false;
6665 			}
6666 		}
6667 	} else {
6668 		tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
6669 			      len, flags, mss, vlan);
6670 		*entry = NEXT_TX(*entry);
6671 	}
6672 
6673 	return hwbug;
6674 }
6675 
6676 static void tg3_tx_skb_unmap(struct tg3_napi *tnapi, u32 entry, int last)
6677 {
6678 	int i;
6679 	struct sk_buff *skb;
6680 	struct tg3_tx_ring_info *txb = &tnapi->tx_buffers[entry];
6681 
6682 	skb = txb->skb;
6683 	txb->skb = NULL;
6684 
6685 	pci_unmap_single(tnapi->tp->pdev,
6686 			 dma_unmap_addr(txb, mapping),
6687 			 skb_headlen(skb),
6688 			 PCI_DMA_TODEVICE);
6689 
6690 	while (txb->fragmented) {
6691 		txb->fragmented = false;
6692 		entry = NEXT_TX(entry);
6693 		txb = &tnapi->tx_buffers[entry];
6694 	}
6695 
6696 	for (i = 0; i <= last; i++) {
6697 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
6698 
6699 		entry = NEXT_TX(entry);
6700 		txb = &tnapi->tx_buffers[entry];
6701 
6702 		pci_unmap_page(tnapi->tp->pdev,
6703 			       dma_unmap_addr(txb, mapping),
6704 			       skb_frag_size(frag), PCI_DMA_TODEVICE);
6705 
6706 		while (txb->fragmented) {
6707 			txb->fragmented = false;
6708 			entry = NEXT_TX(entry);
6709 			txb = &tnapi->tx_buffers[entry];
6710 		}
6711 	}
6712 }
6713 
6714 /* Workaround 4GB and 40-bit hardware DMA bugs. */
6715 static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
6716 				       struct sk_buff **pskb,
6717 				       u32 *entry, u32 *budget,
6718 				       u32 base_flags, u32 mss, u32 vlan)
6719 {
6720 	struct tg3 *tp = tnapi->tp;
6721 	struct sk_buff *new_skb, *skb = *pskb;
6722 	dma_addr_t new_addr = 0;
6723 	int ret = 0;
6724 
6725 	if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
6726 		new_skb = skb_copy(skb, GFP_ATOMIC);
6727 	else {
6728 		int more_headroom = 4 - ((unsigned long)skb->data & 3);
6729 
6730 		new_skb = skb_copy_expand(skb,
6731 					  skb_headroom(skb) + more_headroom,
6732 					  skb_tailroom(skb), GFP_ATOMIC);
6733 	}
6734 
6735 	if (!new_skb) {
6736 		ret = -1;
6737 	} else {
6738 		/* New SKB is guaranteed to be linear. */
6739 		new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
6740 					  PCI_DMA_TODEVICE);
6741 		/* Make sure the mapping succeeded */
6742 		if (pci_dma_mapping_error(tp->pdev, new_addr)) {
6743 			dev_kfree_skb(new_skb);
6744 			ret = -1;
6745 		} else {
6746 			u32 save_entry = *entry;
6747 
6748 			base_flags |= TXD_FLAG_END;
6749 
6750 			tnapi->tx_buffers[*entry].skb = new_skb;
6751 			dma_unmap_addr_set(&tnapi->tx_buffers[*entry],
6752 					   mapping, new_addr);
6753 
6754 			if (tg3_tx_frag_set(tnapi, entry, budget, new_addr,
6755 					    new_skb->len, base_flags,
6756 					    mss, vlan)) {
6757 				tg3_tx_skb_unmap(tnapi, save_entry, -1);
6758 				dev_kfree_skb(new_skb);
6759 				ret = -1;
6760 			}
6761 		}
6762 	}
6763 
6764 	dev_kfree_skb(skb);
6765 	*pskb = new_skb;
6766 	return ret;
6767 }
6768 
6769 static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *);
6770 
6771 /* Use GSO to workaround a rare TSO bug that may be triggered when the
6772  * TSO header is greater than 80 bytes.
6773  */
6774 static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
6775 {
6776 	struct sk_buff *segs, *nskb;
6777 	u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3;
6778 
6779 	/* Estimate the number of fragments in the worst case */
6780 	if (unlikely(tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)) {
6781 		netif_stop_queue(tp->dev);
6782 
6783 		/* netif_tx_stop_queue() must be done before checking
6784 		 * checking tx index in tg3_tx_avail() below, because in
6785 		 * tg3_tx(), we update tx index before checking for
6786 		 * netif_tx_queue_stopped().
6787 		 */
6788 		smp_mb();
6789 		if (tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)
6790 			return NETDEV_TX_BUSY;
6791 
6792 		netif_wake_queue(tp->dev);
6793 	}
6794 
6795 	segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
6796 	if (IS_ERR(segs))
6797 		goto tg3_tso_bug_end;
6798 
6799 	do {
6800 		nskb = segs;
6801 		segs = segs->next;
6802 		nskb->next = NULL;
6803 		tg3_start_xmit(nskb, tp->dev);
6804 	} while (segs);
6805 
6806 tg3_tso_bug_end:
6807 	dev_kfree_skb(skb);
6808 
6809 	return NETDEV_TX_OK;
6810 }
6811 
6812 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
6813  * support TG3_FLAG_HW_TSO_1 or firmware TSO only.
6814  */
6815 static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
6816 {
6817 	struct tg3 *tp = netdev_priv(dev);
6818 	u32 len, entry, base_flags, mss, vlan = 0;
6819 	u32 budget;
6820 	int i = -1, would_hit_hwbug;
6821 	dma_addr_t mapping;
6822 	struct tg3_napi *tnapi;
6823 	struct netdev_queue *txq;
6824 	unsigned int last;
6825 
6826 	txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
6827 	tnapi = &tp->napi[skb_get_queue_mapping(skb)];
6828 	if (tg3_flag(tp, ENABLE_TSS))
6829 		tnapi++;
6830 
6831 	budget = tg3_tx_avail(tnapi);
6832 
6833 	/* We are running in BH disabled context with netif_tx_lock
6834 	 * and TX reclaim runs via tp->napi.poll inside of a software
6835 	 * interrupt.  Furthermore, IRQ processing runs lockless so we have
6836 	 * no IRQ context deadlocks to worry about either.  Rejoice!
6837 	 */
6838 	if (unlikely(budget <= (skb_shinfo(skb)->nr_frags + 1))) {
6839 		if (!netif_tx_queue_stopped(txq)) {
6840 			netif_tx_stop_queue(txq);
6841 
6842 			/* This is a hard error, log it. */
6843 			netdev_err(dev,
6844 				   "BUG! Tx Ring full when queue awake!\n");
6845 		}
6846 		return NETDEV_TX_BUSY;
6847 	}
6848 
6849 	entry = tnapi->tx_prod;
6850 	base_flags = 0;
6851 	if (skb->ip_summed == CHECKSUM_PARTIAL)
6852 		base_flags |= TXD_FLAG_TCPUDP_CSUM;
6853 
6854 	mss = skb_shinfo(skb)->gso_size;
6855 	if (mss) {
6856 		struct iphdr *iph;
6857 		u32 tcp_opt_len, hdr_len;
6858 
6859 		if (skb_header_cloned(skb) &&
6860 		    pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
6861 			goto drop;
6862 
6863 		iph = ip_hdr(skb);
6864 		tcp_opt_len = tcp_optlen(skb);
6865 
6866 		hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb) - ETH_HLEN;
6867 
6868 		if (!skb_is_gso_v6(skb)) {
6869 			iph->check = 0;
6870 			iph->tot_len = htons(mss + hdr_len);
6871 		}
6872 
6873 		if (unlikely((ETH_HLEN + hdr_len) > 80) &&
6874 		    tg3_flag(tp, TSO_BUG))
6875 			return tg3_tso_bug(tp, skb);
6876 
6877 		base_flags |= (TXD_FLAG_CPU_PRE_DMA |
6878 			       TXD_FLAG_CPU_POST_DMA);
6879 
6880 		if (tg3_flag(tp, HW_TSO_1) ||
6881 		    tg3_flag(tp, HW_TSO_2) ||
6882 		    tg3_flag(tp, HW_TSO_3)) {
6883 			tcp_hdr(skb)->check = 0;
6884 			base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
6885 		} else
6886 			tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
6887 								 iph->daddr, 0,
6888 								 IPPROTO_TCP,
6889 								 0);
6890 
6891 		if (tg3_flag(tp, HW_TSO_3)) {
6892 			mss |= (hdr_len & 0xc) << 12;
6893 			if (hdr_len & 0x10)
6894 				base_flags |= 0x00000010;
6895 			base_flags |= (hdr_len & 0x3e0) << 5;
6896 		} else if (tg3_flag(tp, HW_TSO_2))
6897 			mss |= hdr_len << 9;
6898 		else if (tg3_flag(tp, HW_TSO_1) ||
6899 			 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
6900 			if (tcp_opt_len || iph->ihl > 5) {
6901 				int tsflags;
6902 
6903 				tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
6904 				mss |= (tsflags << 11);
6905 			}
6906 		} else {
6907 			if (tcp_opt_len || iph->ihl > 5) {
6908 				int tsflags;
6909 
6910 				tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
6911 				base_flags |= tsflags << 12;
6912 			}
6913 		}
6914 	}
6915 
6916 	if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
6917 	    !mss && skb->len > VLAN_ETH_FRAME_LEN)
6918 		base_flags |= TXD_FLAG_JMB_PKT;
6919 
6920 	if (vlan_tx_tag_present(skb)) {
6921 		base_flags |= TXD_FLAG_VLAN;
6922 		vlan = vlan_tx_tag_get(skb);
6923 	}
6924 
6925 	len = skb_headlen(skb);
6926 
6927 	mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
6928 	if (pci_dma_mapping_error(tp->pdev, mapping))
6929 		goto drop;
6930 
6931 
6932 	tnapi->tx_buffers[entry].skb = skb;
6933 	dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
6934 
6935 	would_hit_hwbug = 0;
6936 
6937 	if (tg3_flag(tp, 5701_DMA_BUG))
6938 		would_hit_hwbug = 1;
6939 
6940 	if (tg3_tx_frag_set(tnapi, &entry, &budget, mapping, len, base_flags |
6941 			  ((skb_shinfo(skb)->nr_frags == 0) ? TXD_FLAG_END : 0),
6942 			    mss, vlan)) {
6943 		would_hit_hwbug = 1;
6944 	} else if (skb_shinfo(skb)->nr_frags > 0) {
6945 		u32 tmp_mss = mss;
6946 
6947 		if (!tg3_flag(tp, HW_TSO_1) &&
6948 		    !tg3_flag(tp, HW_TSO_2) &&
6949 		    !tg3_flag(tp, HW_TSO_3))
6950 			tmp_mss = 0;
6951 
6952 		/* Now loop through additional data
6953 		 * fragments, and queue them.
6954 		 */
6955 		last = skb_shinfo(skb)->nr_frags - 1;
6956 		for (i = 0; i <= last; i++) {
6957 			skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
6958 
6959 			len = skb_frag_size(frag);
6960 			mapping = skb_frag_dma_map(&tp->pdev->dev, frag, 0,
6961 						   len, DMA_TO_DEVICE);
6962 
6963 			tnapi->tx_buffers[entry].skb = NULL;
6964 			dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
6965 					   mapping);
6966 			if (dma_mapping_error(&tp->pdev->dev, mapping))
6967 				goto dma_error;
6968 
6969 			if (!budget ||
6970 			    tg3_tx_frag_set(tnapi, &entry, &budget, mapping,
6971 					    len, base_flags |
6972 					    ((i == last) ? TXD_FLAG_END : 0),
6973 					    tmp_mss, vlan)) {
6974 				would_hit_hwbug = 1;
6975 				break;
6976 			}
6977 		}
6978 	}
6979 
6980 	if (would_hit_hwbug) {
6981 		tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i);
6982 
6983 		/* If the workaround fails due to memory/mapping
6984 		 * failure, silently drop this packet.
6985 		 */
6986 		entry = tnapi->tx_prod;
6987 		budget = tg3_tx_avail(tnapi);
6988 		if (tigon3_dma_hwbug_workaround(tnapi, &skb, &entry, &budget,
6989 						base_flags, mss, vlan))
6990 			goto drop_nofree;
6991 	}
6992 
6993 	skb_tx_timestamp(skb);
6994 	netdev_tx_sent_queue(txq, skb->len);
6995 
6996 	/* Sync BD data before updating mailbox */
6997 	wmb();
6998 
6999 	/* Packets are ready, update Tx producer idx local and on card. */
7000 	tw32_tx_mbox(tnapi->prodmbox, entry);
7001 
7002 	tnapi->tx_prod = entry;
7003 	if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
7004 		netif_tx_stop_queue(txq);
7005 
7006 		/* netif_tx_stop_queue() must be done before checking
7007 		 * checking tx index in tg3_tx_avail() below, because in
7008 		 * tg3_tx(), we update tx index before checking for
7009 		 * netif_tx_queue_stopped().
7010 		 */
7011 		smp_mb();
7012 		if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
7013 			netif_tx_wake_queue(txq);
7014 	}
7015 
7016 	mmiowb();
7017 	return NETDEV_TX_OK;
7018 
7019 dma_error:
7020 	tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, --i);
7021 	tnapi->tx_buffers[tnapi->tx_prod].skb = NULL;
7022 drop:
7023 	dev_kfree_skb(skb);
7024 drop_nofree:
7025 	tp->tx_dropped++;
7026 	return NETDEV_TX_OK;
7027 }
7028 
7029 static void tg3_mac_loopback(struct tg3 *tp, bool enable)
7030 {
7031 	if (enable) {
7032 		tp->mac_mode &= ~(MAC_MODE_HALF_DUPLEX |
7033 				  MAC_MODE_PORT_MODE_MASK);
7034 
7035 		tp->mac_mode |= MAC_MODE_PORT_INT_LPBACK;
7036 
7037 		if (!tg3_flag(tp, 5705_PLUS))
7038 			tp->mac_mode |= MAC_MODE_LINK_POLARITY;
7039 
7040 		if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
7041 			tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
7042 		else
7043 			tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
7044 	} else {
7045 		tp->mac_mode &= ~MAC_MODE_PORT_INT_LPBACK;
7046 
7047 		if (tg3_flag(tp, 5705_PLUS) ||
7048 		    (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) ||
7049 		    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
7050 			tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
7051 	}
7052 
7053 	tw32(MAC_MODE, tp->mac_mode);
7054 	udelay(40);
7055 }
7056 
7057 static int tg3_phy_lpbk_set(struct tg3 *tp, u32 speed, bool extlpbk)
7058 {
7059 	u32 val, bmcr, mac_mode, ptest = 0;
7060 
7061 	tg3_phy_toggle_apd(tp, false);
7062 	tg3_phy_toggle_automdix(tp, 0);
7063 
7064 	if (extlpbk && tg3_phy_set_extloopbk(tp))
7065 		return -EIO;
7066 
7067 	bmcr = BMCR_FULLDPLX;
7068 	switch (speed) {
7069 	case SPEED_10:
7070 		break;
7071 	case SPEED_100:
7072 		bmcr |= BMCR_SPEED100;
7073 		break;
7074 	case SPEED_1000:
7075 	default:
7076 		if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
7077 			speed = SPEED_100;
7078 			bmcr |= BMCR_SPEED100;
7079 		} else {
7080 			speed = SPEED_1000;
7081 			bmcr |= BMCR_SPEED1000;
7082 		}
7083 	}
7084 
7085 	if (extlpbk) {
7086 		if (!(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
7087 			tg3_readphy(tp, MII_CTRL1000, &val);
7088 			val |= CTL1000_AS_MASTER |
7089 			       CTL1000_ENABLE_MASTER;
7090 			tg3_writephy(tp, MII_CTRL1000, val);
7091 		} else {
7092 			ptest = MII_TG3_FET_PTEST_TRIM_SEL |
7093 				MII_TG3_FET_PTEST_TRIM_2;
7094 			tg3_writephy(tp, MII_TG3_FET_PTEST, ptest);
7095 		}
7096 	} else
7097 		bmcr |= BMCR_LOOPBACK;
7098 
7099 	tg3_writephy(tp, MII_BMCR, bmcr);
7100 
7101 	/* The write needs to be flushed for the FETs */
7102 	if (tp->phy_flags & TG3_PHYFLG_IS_FET)
7103 		tg3_readphy(tp, MII_BMCR, &bmcr);
7104 
7105 	udelay(40);
7106 
7107 	if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
7108 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
7109 		tg3_writephy(tp, MII_TG3_FET_PTEST, ptest |
7110 			     MII_TG3_FET_PTEST_FRC_TX_LINK |
7111 			     MII_TG3_FET_PTEST_FRC_TX_LOCK);
7112 
7113 		/* The write needs to be flushed for the AC131 */
7114 		tg3_readphy(tp, MII_TG3_FET_PTEST, &val);
7115 	}
7116 
7117 	/* Reset to prevent losing 1st rx packet intermittently */
7118 	if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
7119 	    tg3_flag(tp, 5780_CLASS)) {
7120 		tw32_f(MAC_RX_MODE, RX_MODE_RESET);
7121 		udelay(10);
7122 		tw32_f(MAC_RX_MODE, tp->rx_mode);
7123 	}
7124 
7125 	mac_mode = tp->mac_mode &
7126 		   ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
7127 	if (speed == SPEED_1000)
7128 		mac_mode |= MAC_MODE_PORT_MODE_GMII;
7129 	else
7130 		mac_mode |= MAC_MODE_PORT_MODE_MII;
7131 
7132 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
7133 		u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK;
7134 
7135 		if (masked_phy_id == TG3_PHY_ID_BCM5401)
7136 			mac_mode &= ~MAC_MODE_LINK_POLARITY;
7137 		else if (masked_phy_id == TG3_PHY_ID_BCM5411)
7138 			mac_mode |= MAC_MODE_LINK_POLARITY;
7139 
7140 		tg3_writephy(tp, MII_TG3_EXT_CTRL,
7141 			     MII_TG3_EXT_CTRL_LNK3_LED_MODE);
7142 	}
7143 
7144 	tw32(MAC_MODE, mac_mode);
7145 	udelay(40);
7146 
7147 	return 0;
7148 }
7149 
7150 static void tg3_set_loopback(struct net_device *dev, netdev_features_t features)
7151 {
7152 	struct tg3 *tp = netdev_priv(dev);
7153 
7154 	if (features & NETIF_F_LOOPBACK) {
7155 		if (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)
7156 			return;
7157 
7158 		spin_lock_bh(&tp->lock);
7159 		tg3_mac_loopback(tp, true);
7160 		netif_carrier_on(tp->dev);
7161 		spin_unlock_bh(&tp->lock);
7162 		netdev_info(dev, "Internal MAC loopback mode enabled.\n");
7163 	} else {
7164 		if (!(tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
7165 			return;
7166 
7167 		spin_lock_bh(&tp->lock);
7168 		tg3_mac_loopback(tp, false);
7169 		/* Force link status check */
7170 		tg3_setup_phy(tp, 1);
7171 		spin_unlock_bh(&tp->lock);
7172 		netdev_info(dev, "Internal MAC loopback mode disabled.\n");
7173 	}
7174 }
7175 
7176 static netdev_features_t tg3_fix_features(struct net_device *dev,
7177 	netdev_features_t features)
7178 {
7179 	struct tg3 *tp = netdev_priv(dev);
7180 
7181 	if (dev->mtu > ETH_DATA_LEN && tg3_flag(tp, 5780_CLASS))
7182 		features &= ~NETIF_F_ALL_TSO;
7183 
7184 	return features;
7185 }
7186 
7187 static int tg3_set_features(struct net_device *dev, netdev_features_t features)
7188 {
7189 	netdev_features_t changed = dev->features ^ features;
7190 
7191 	if ((changed & NETIF_F_LOOPBACK) && netif_running(dev))
7192 		tg3_set_loopback(dev, features);
7193 
7194 	return 0;
7195 }
7196 
7197 static void tg3_rx_prodring_free(struct tg3 *tp,
7198 				 struct tg3_rx_prodring_set *tpr)
7199 {
7200 	int i;
7201 
7202 	if (tpr != &tp->napi[0].prodring) {
7203 		for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx;
7204 		     i = (i + 1) & tp->rx_std_ring_mask)
7205 			tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
7206 					tp->rx_pkt_map_sz);
7207 
7208 		if (tg3_flag(tp, JUMBO_CAPABLE)) {
7209 			for (i = tpr->rx_jmb_cons_idx;
7210 			     i != tpr->rx_jmb_prod_idx;
7211 			     i = (i + 1) & tp->rx_jmb_ring_mask) {
7212 				tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
7213 						TG3_RX_JMB_MAP_SZ);
7214 			}
7215 		}
7216 
7217 		return;
7218 	}
7219 
7220 	for (i = 0; i <= tp->rx_std_ring_mask; i++)
7221 		tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
7222 				tp->rx_pkt_map_sz);
7223 
7224 	if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
7225 		for (i = 0; i <= tp->rx_jmb_ring_mask; i++)
7226 			tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
7227 					TG3_RX_JMB_MAP_SZ);
7228 	}
7229 }
7230 
7231 /* Initialize rx rings for packet processing.
7232  *
7233  * The chip has been shut down and the driver detached from
7234  * the networking, so no interrupts or new tx packets will
7235  * end up in the driver.  tp->{tx,}lock are held and thus
7236  * we may not sleep.
7237  */
7238 static int tg3_rx_prodring_alloc(struct tg3 *tp,
7239 				 struct tg3_rx_prodring_set *tpr)
7240 {
7241 	u32 i, rx_pkt_dma_sz;
7242 
7243 	tpr->rx_std_cons_idx = 0;
7244 	tpr->rx_std_prod_idx = 0;
7245 	tpr->rx_jmb_cons_idx = 0;
7246 	tpr->rx_jmb_prod_idx = 0;
7247 
7248 	if (tpr != &tp->napi[0].prodring) {
7249 		memset(&tpr->rx_std_buffers[0], 0,
7250 		       TG3_RX_STD_BUFF_RING_SIZE(tp));
7251 		if (tpr->rx_jmb_buffers)
7252 			memset(&tpr->rx_jmb_buffers[0], 0,
7253 			       TG3_RX_JMB_BUFF_RING_SIZE(tp));
7254 		goto done;
7255 	}
7256 
7257 	/* Zero out all descriptors. */
7258 	memset(tpr->rx_std, 0, TG3_RX_STD_RING_BYTES(tp));
7259 
7260 	rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ;
7261 	if (tg3_flag(tp, 5780_CLASS) &&
7262 	    tp->dev->mtu > ETH_DATA_LEN)
7263 		rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ;
7264 	tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz);
7265 
7266 	/* Initialize invariants of the rings, we only set this
7267 	 * stuff once.  This works because the card does not
7268 	 * write into the rx buffer posting rings.
7269 	 */
7270 	for (i = 0; i <= tp->rx_std_ring_mask; i++) {
7271 		struct tg3_rx_buffer_desc *rxd;
7272 
7273 		rxd = &tpr->rx_std[i];
7274 		rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT;
7275 		rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
7276 		rxd->opaque = (RXD_OPAQUE_RING_STD |
7277 			       (i << RXD_OPAQUE_INDEX_SHIFT));
7278 	}
7279 
7280 	/* Now allocate fresh SKBs for each rx ring. */
7281 	for (i = 0; i < tp->rx_pending; i++) {
7282 		if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_STD, i) < 0) {
7283 			netdev_warn(tp->dev,
7284 				    "Using a smaller RX standard ring. Only "
7285 				    "%d out of %d buffers were allocated "
7286 				    "successfully\n", i, tp->rx_pending);
7287 			if (i == 0)
7288 				goto initfail;
7289 			tp->rx_pending = i;
7290 			break;
7291 		}
7292 	}
7293 
7294 	if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
7295 		goto done;
7296 
7297 	memset(tpr->rx_jmb, 0, TG3_RX_JMB_RING_BYTES(tp));
7298 
7299 	if (!tg3_flag(tp, JUMBO_RING_ENABLE))
7300 		goto done;
7301 
7302 	for (i = 0; i <= tp->rx_jmb_ring_mask; i++) {
7303 		struct tg3_rx_buffer_desc *rxd;
7304 
7305 		rxd = &tpr->rx_jmb[i].std;
7306 		rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT;
7307 		rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
7308 				  RXD_FLAG_JUMBO;
7309 		rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
7310 		       (i << RXD_OPAQUE_INDEX_SHIFT));
7311 	}
7312 
7313 	for (i = 0; i < tp->rx_jumbo_pending; i++) {
7314 		if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_JUMBO, i) < 0) {
7315 			netdev_warn(tp->dev,
7316 				    "Using a smaller RX jumbo ring. Only %d "
7317 				    "out of %d buffers were allocated "
7318 				    "successfully\n", i, tp->rx_jumbo_pending);
7319 			if (i == 0)
7320 				goto initfail;
7321 			tp->rx_jumbo_pending = i;
7322 			break;
7323 		}
7324 	}
7325 
7326 done:
7327 	return 0;
7328 
7329 initfail:
7330 	tg3_rx_prodring_free(tp, tpr);
7331 	return -ENOMEM;
7332 }
7333 
7334 static void tg3_rx_prodring_fini(struct tg3 *tp,
7335 				 struct tg3_rx_prodring_set *tpr)
7336 {
7337 	kfree(tpr->rx_std_buffers);
7338 	tpr->rx_std_buffers = NULL;
7339 	kfree(tpr->rx_jmb_buffers);
7340 	tpr->rx_jmb_buffers = NULL;
7341 	if (tpr->rx_std) {
7342 		dma_free_coherent(&tp->pdev->dev, TG3_RX_STD_RING_BYTES(tp),
7343 				  tpr->rx_std, tpr->rx_std_mapping);
7344 		tpr->rx_std = NULL;
7345 	}
7346 	if (tpr->rx_jmb) {
7347 		dma_free_coherent(&tp->pdev->dev, TG3_RX_JMB_RING_BYTES(tp),
7348 				  tpr->rx_jmb, tpr->rx_jmb_mapping);
7349 		tpr->rx_jmb = NULL;
7350 	}
7351 }
7352 
7353 static int tg3_rx_prodring_init(struct tg3 *tp,
7354 				struct tg3_rx_prodring_set *tpr)
7355 {
7356 	tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp),
7357 				      GFP_KERNEL);
7358 	if (!tpr->rx_std_buffers)
7359 		return -ENOMEM;
7360 
7361 	tpr->rx_std = dma_alloc_coherent(&tp->pdev->dev,
7362 					 TG3_RX_STD_RING_BYTES(tp),
7363 					 &tpr->rx_std_mapping,
7364 					 GFP_KERNEL);
7365 	if (!tpr->rx_std)
7366 		goto err_out;
7367 
7368 	if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
7369 		tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp),
7370 					      GFP_KERNEL);
7371 		if (!tpr->rx_jmb_buffers)
7372 			goto err_out;
7373 
7374 		tpr->rx_jmb = dma_alloc_coherent(&tp->pdev->dev,
7375 						 TG3_RX_JMB_RING_BYTES(tp),
7376 						 &tpr->rx_jmb_mapping,
7377 						 GFP_KERNEL);
7378 		if (!tpr->rx_jmb)
7379 			goto err_out;
7380 	}
7381 
7382 	return 0;
7383 
7384 err_out:
7385 	tg3_rx_prodring_fini(tp, tpr);
7386 	return -ENOMEM;
7387 }
7388 
7389 /* Free up pending packets in all rx/tx rings.
7390  *
7391  * The chip has been shut down and the driver detached from
7392  * the networking, so no interrupts or new tx packets will
7393  * end up in the driver.  tp->{tx,}lock is not held and we are not
7394  * in an interrupt context and thus may sleep.
7395  */
7396 static void tg3_free_rings(struct tg3 *tp)
7397 {
7398 	int i, j;
7399 
7400 	for (j = 0; j < tp->irq_cnt; j++) {
7401 		struct tg3_napi *tnapi = &tp->napi[j];
7402 
7403 		tg3_rx_prodring_free(tp, &tnapi->prodring);
7404 
7405 		if (!tnapi->tx_buffers)
7406 			continue;
7407 
7408 		for (i = 0; i < TG3_TX_RING_SIZE; i++) {
7409 			struct sk_buff *skb = tnapi->tx_buffers[i].skb;
7410 
7411 			if (!skb)
7412 				continue;
7413 
7414 			tg3_tx_skb_unmap(tnapi, i,
7415 					 skb_shinfo(skb)->nr_frags - 1);
7416 
7417 			dev_kfree_skb_any(skb);
7418 		}
7419 		netdev_tx_reset_queue(netdev_get_tx_queue(tp->dev, j));
7420 	}
7421 }
7422 
7423 /* Initialize tx/rx rings for packet processing.
7424  *
7425  * The chip has been shut down and the driver detached from
7426  * the networking, so no interrupts or new tx packets will
7427  * end up in the driver.  tp->{tx,}lock are held and thus
7428  * we may not sleep.
7429  */
7430 static int tg3_init_rings(struct tg3 *tp)
7431 {
7432 	int i;
7433 
7434 	/* Free up all the SKBs. */
7435 	tg3_free_rings(tp);
7436 
7437 	for (i = 0; i < tp->irq_cnt; i++) {
7438 		struct tg3_napi *tnapi = &tp->napi[i];
7439 
7440 		tnapi->last_tag = 0;
7441 		tnapi->last_irq_tag = 0;
7442 		tnapi->hw_status->status = 0;
7443 		tnapi->hw_status->status_tag = 0;
7444 		memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7445 
7446 		tnapi->tx_prod = 0;
7447 		tnapi->tx_cons = 0;
7448 		if (tnapi->tx_ring)
7449 			memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES);
7450 
7451 		tnapi->rx_rcb_ptr = 0;
7452 		if (tnapi->rx_rcb)
7453 			memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
7454 
7455 		if (tg3_rx_prodring_alloc(tp, &tnapi->prodring)) {
7456 			tg3_free_rings(tp);
7457 			return -ENOMEM;
7458 		}
7459 	}
7460 
7461 	return 0;
7462 }
7463 
7464 /*
7465  * Must not be invoked with interrupt sources disabled and
7466  * the hardware shutdown down.
7467  */
7468 static void tg3_free_consistent(struct tg3 *tp)
7469 {
7470 	int i;
7471 
7472 	for (i = 0; i < tp->irq_cnt; i++) {
7473 		struct tg3_napi *tnapi = &tp->napi[i];
7474 
7475 		if (tnapi->tx_ring) {
7476 			dma_free_coherent(&tp->pdev->dev, TG3_TX_RING_BYTES,
7477 				tnapi->tx_ring, tnapi->tx_desc_mapping);
7478 			tnapi->tx_ring = NULL;
7479 		}
7480 
7481 		kfree(tnapi->tx_buffers);
7482 		tnapi->tx_buffers = NULL;
7483 
7484 		if (tnapi->rx_rcb) {
7485 			dma_free_coherent(&tp->pdev->dev,
7486 					  TG3_RX_RCB_RING_BYTES(tp),
7487 					  tnapi->rx_rcb,
7488 					  tnapi->rx_rcb_mapping);
7489 			tnapi->rx_rcb = NULL;
7490 		}
7491 
7492 		tg3_rx_prodring_fini(tp, &tnapi->prodring);
7493 
7494 		if (tnapi->hw_status) {
7495 			dma_free_coherent(&tp->pdev->dev, TG3_HW_STATUS_SIZE,
7496 					  tnapi->hw_status,
7497 					  tnapi->status_mapping);
7498 			tnapi->hw_status = NULL;
7499 		}
7500 	}
7501 
7502 	if (tp->hw_stats) {
7503 		dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats),
7504 				  tp->hw_stats, tp->stats_mapping);
7505 		tp->hw_stats = NULL;
7506 	}
7507 }
7508 
7509 /*
7510  * Must not be invoked with interrupt sources disabled and
7511  * the hardware shutdown down.  Can sleep.
7512  */
7513 static int tg3_alloc_consistent(struct tg3 *tp)
7514 {
7515 	int i;
7516 
7517 	tp->hw_stats = dma_alloc_coherent(&tp->pdev->dev,
7518 					  sizeof(struct tg3_hw_stats),
7519 					  &tp->stats_mapping,
7520 					  GFP_KERNEL);
7521 	if (!tp->hw_stats)
7522 		goto err_out;
7523 
7524 	memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
7525 
7526 	for (i = 0; i < tp->irq_cnt; i++) {
7527 		struct tg3_napi *tnapi = &tp->napi[i];
7528 		struct tg3_hw_status *sblk;
7529 
7530 		tnapi->hw_status = dma_alloc_coherent(&tp->pdev->dev,
7531 						      TG3_HW_STATUS_SIZE,
7532 						      &tnapi->status_mapping,
7533 						      GFP_KERNEL);
7534 		if (!tnapi->hw_status)
7535 			goto err_out;
7536 
7537 		memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7538 		sblk = tnapi->hw_status;
7539 
7540 		if (tg3_rx_prodring_init(tp, &tnapi->prodring))
7541 			goto err_out;
7542 
7543 		/* If multivector TSS is enabled, vector 0 does not handle
7544 		 * tx interrupts.  Don't allocate any resources for it.
7545 		 */
7546 		if ((!i && !tg3_flag(tp, ENABLE_TSS)) ||
7547 		    (i && tg3_flag(tp, ENABLE_TSS))) {
7548 			tnapi->tx_buffers = kzalloc(
7549 					       sizeof(struct tg3_tx_ring_info) *
7550 					       TG3_TX_RING_SIZE, GFP_KERNEL);
7551 			if (!tnapi->tx_buffers)
7552 				goto err_out;
7553 
7554 			tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev,
7555 							    TG3_TX_RING_BYTES,
7556 							&tnapi->tx_desc_mapping,
7557 							    GFP_KERNEL);
7558 			if (!tnapi->tx_ring)
7559 				goto err_out;
7560 		}
7561 
7562 		/*
7563 		 * When RSS is enabled, the status block format changes
7564 		 * slightly.  The "rx_jumbo_consumer", "reserved",
7565 		 * and "rx_mini_consumer" members get mapped to the
7566 		 * other three rx return ring producer indexes.
7567 		 */
7568 		switch (i) {
7569 		default:
7570 			tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer;
7571 			break;
7572 		case 2:
7573 			tnapi->rx_rcb_prod_idx = &sblk->rx_jumbo_consumer;
7574 			break;
7575 		case 3:
7576 			tnapi->rx_rcb_prod_idx = &sblk->reserved;
7577 			break;
7578 		case 4:
7579 			tnapi->rx_rcb_prod_idx = &sblk->rx_mini_consumer;
7580 			break;
7581 		}
7582 
7583 		/*
7584 		 * If multivector RSS is enabled, vector 0 does not handle
7585 		 * rx or tx interrupts.  Don't allocate any resources for it.
7586 		 */
7587 		if (!i && tg3_flag(tp, ENABLE_RSS))
7588 			continue;
7589 
7590 		tnapi->rx_rcb = dma_alloc_coherent(&tp->pdev->dev,
7591 						   TG3_RX_RCB_RING_BYTES(tp),
7592 						   &tnapi->rx_rcb_mapping,
7593 						   GFP_KERNEL);
7594 		if (!tnapi->rx_rcb)
7595 			goto err_out;
7596 
7597 		memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
7598 	}
7599 
7600 	return 0;
7601 
7602 err_out:
7603 	tg3_free_consistent(tp);
7604 	return -ENOMEM;
7605 }
7606 
7607 #define MAX_WAIT_CNT 1000
7608 
7609 /* To stop a block, clear the enable bit and poll till it
7610  * clears.  tp->lock is held.
7611  */
7612 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
7613 {
7614 	unsigned int i;
7615 	u32 val;
7616 
7617 	if (tg3_flag(tp, 5705_PLUS)) {
7618 		switch (ofs) {
7619 		case RCVLSC_MODE:
7620 		case DMAC_MODE:
7621 		case MBFREE_MODE:
7622 		case BUFMGR_MODE:
7623 		case MEMARB_MODE:
7624 			/* We can't enable/disable these bits of the
7625 			 * 5705/5750, just say success.
7626 			 */
7627 			return 0;
7628 
7629 		default:
7630 			break;
7631 		}
7632 	}
7633 
7634 	val = tr32(ofs);
7635 	val &= ~enable_bit;
7636 	tw32_f(ofs, val);
7637 
7638 	for (i = 0; i < MAX_WAIT_CNT; i++) {
7639 		udelay(100);
7640 		val = tr32(ofs);
7641 		if ((val & enable_bit) == 0)
7642 			break;
7643 	}
7644 
7645 	if (i == MAX_WAIT_CNT && !silent) {
7646 		dev_err(&tp->pdev->dev,
7647 			"tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
7648 			ofs, enable_bit);
7649 		return -ENODEV;
7650 	}
7651 
7652 	return 0;
7653 }
7654 
7655 /* tp->lock is held. */
7656 static int tg3_abort_hw(struct tg3 *tp, int silent)
7657 {
7658 	int i, err;
7659 
7660 	tg3_disable_ints(tp);
7661 
7662 	tp->rx_mode &= ~RX_MODE_ENABLE;
7663 	tw32_f(MAC_RX_MODE, tp->rx_mode);
7664 	udelay(10);
7665 
7666 	err  = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
7667 	err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
7668 	err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
7669 	err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
7670 	err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
7671 	err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
7672 
7673 	err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
7674 	err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
7675 	err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
7676 	err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
7677 	err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
7678 	err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
7679 	err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
7680 
7681 	tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
7682 	tw32_f(MAC_MODE, tp->mac_mode);
7683 	udelay(40);
7684 
7685 	tp->tx_mode &= ~TX_MODE_ENABLE;
7686 	tw32_f(MAC_TX_MODE, tp->tx_mode);
7687 
7688 	for (i = 0; i < MAX_WAIT_CNT; i++) {
7689 		udelay(100);
7690 		if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
7691 			break;
7692 	}
7693 	if (i >= MAX_WAIT_CNT) {
7694 		dev_err(&tp->pdev->dev,
7695 			"%s timed out, TX_MODE_ENABLE will not clear "
7696 			"MAC_TX_MODE=%08x\n", __func__, tr32(MAC_TX_MODE));
7697 		err |= -ENODEV;
7698 	}
7699 
7700 	err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
7701 	err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
7702 	err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
7703 
7704 	tw32(FTQ_RESET, 0xffffffff);
7705 	tw32(FTQ_RESET, 0x00000000);
7706 
7707 	err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
7708 	err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
7709 
7710 	for (i = 0; i < tp->irq_cnt; i++) {
7711 		struct tg3_napi *tnapi = &tp->napi[i];
7712 		if (tnapi->hw_status)
7713 			memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7714 	}
7715 
7716 	return err;
7717 }
7718 
7719 /* Save PCI command register before chip reset */
7720 static void tg3_save_pci_state(struct tg3 *tp)
7721 {
7722 	pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
7723 }
7724 
7725 /* Restore PCI state after chip reset */
7726 static void tg3_restore_pci_state(struct tg3 *tp)
7727 {
7728 	u32 val;
7729 
7730 	/* Re-enable indirect register accesses. */
7731 	pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
7732 			       tp->misc_host_ctrl);
7733 
7734 	/* Set MAX PCI retry to zero. */
7735 	val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
7736 	if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
7737 	    tg3_flag(tp, PCIX_MODE))
7738 		val |= PCISTATE_RETRY_SAME_DMA;
7739 	/* Allow reads and writes to the APE register and memory space. */
7740 	if (tg3_flag(tp, ENABLE_APE))
7741 		val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
7742 		       PCISTATE_ALLOW_APE_SHMEM_WR |
7743 		       PCISTATE_ALLOW_APE_PSPACE_WR;
7744 	pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
7745 
7746 	pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
7747 
7748 	if (!tg3_flag(tp, PCI_EXPRESS)) {
7749 		pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
7750 				      tp->pci_cacheline_sz);
7751 		pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
7752 				      tp->pci_lat_timer);
7753 	}
7754 
7755 	/* Make sure PCI-X relaxed ordering bit is clear. */
7756 	if (tg3_flag(tp, PCIX_MODE)) {
7757 		u16 pcix_cmd;
7758 
7759 		pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7760 				     &pcix_cmd);
7761 		pcix_cmd &= ~PCI_X_CMD_ERO;
7762 		pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7763 				      pcix_cmd);
7764 	}
7765 
7766 	if (tg3_flag(tp, 5780_CLASS)) {
7767 
7768 		/* Chip reset on 5780 will reset MSI enable bit,
7769 		 * so need to restore it.
7770 		 */
7771 		if (tg3_flag(tp, USING_MSI)) {
7772 			u16 ctrl;
7773 
7774 			pci_read_config_word(tp->pdev,
7775 					     tp->msi_cap + PCI_MSI_FLAGS,
7776 					     &ctrl);
7777 			pci_write_config_word(tp->pdev,
7778 					      tp->msi_cap + PCI_MSI_FLAGS,
7779 					      ctrl | PCI_MSI_FLAGS_ENABLE);
7780 			val = tr32(MSGINT_MODE);
7781 			tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
7782 		}
7783 	}
7784 }
7785 
7786 /* tp->lock is held. */
7787 static int tg3_chip_reset(struct tg3 *tp)
7788 {
7789 	u32 val;
7790 	void (*write_op)(struct tg3 *, u32, u32);
7791 	int i, err;
7792 
7793 	tg3_nvram_lock(tp);
7794 
7795 	tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
7796 
7797 	/* No matching tg3_nvram_unlock() after this because
7798 	 * chip reset below will undo the nvram lock.
7799 	 */
7800 	tp->nvram_lock_cnt = 0;
7801 
7802 	/* GRC_MISC_CFG core clock reset will clear the memory
7803 	 * enable bit in PCI register 4 and the MSI enable bit
7804 	 * on some chips, so we save relevant registers here.
7805 	 */
7806 	tg3_save_pci_state(tp);
7807 
7808 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
7809 	    tg3_flag(tp, 5755_PLUS))
7810 		tw32(GRC_FASTBOOT_PC, 0);
7811 
7812 	/*
7813 	 * We must avoid the readl() that normally takes place.
7814 	 * It locks machines, causes machine checks, and other
7815 	 * fun things.  So, temporarily disable the 5701
7816 	 * hardware workaround, while we do the reset.
7817 	 */
7818 	write_op = tp->write32;
7819 	if (write_op == tg3_write_flush_reg32)
7820 		tp->write32 = tg3_write32;
7821 
7822 	/* Prevent the irq handler from reading or writing PCI registers
7823 	 * during chip reset when the memory enable bit in the PCI command
7824 	 * register may be cleared.  The chip does not generate interrupt
7825 	 * at this time, but the irq handler may still be called due to irq
7826 	 * sharing or irqpoll.
7827 	 */
7828 	tg3_flag_set(tp, CHIP_RESETTING);
7829 	for (i = 0; i < tp->irq_cnt; i++) {
7830 		struct tg3_napi *tnapi = &tp->napi[i];
7831 		if (tnapi->hw_status) {
7832 			tnapi->hw_status->status = 0;
7833 			tnapi->hw_status->status_tag = 0;
7834 		}
7835 		tnapi->last_tag = 0;
7836 		tnapi->last_irq_tag = 0;
7837 	}
7838 	smp_mb();
7839 
7840 	for (i = 0; i < tp->irq_cnt; i++)
7841 		synchronize_irq(tp->napi[i].irq_vec);
7842 
7843 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
7844 		val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
7845 		tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
7846 	}
7847 
7848 	/* do the reset */
7849 	val = GRC_MISC_CFG_CORECLK_RESET;
7850 
7851 	if (tg3_flag(tp, PCI_EXPRESS)) {
7852 		/* Force PCIe 1.0a mode */
7853 		if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
7854 		    !tg3_flag(tp, 57765_PLUS) &&
7855 		    tr32(TG3_PCIE_PHY_TSTCTL) ==
7856 		    (TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM))
7857 			tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM);
7858 
7859 		if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
7860 			tw32(GRC_MISC_CFG, (1 << 29));
7861 			val |= (1 << 29);
7862 		}
7863 	}
7864 
7865 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
7866 		tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
7867 		tw32(GRC_VCPU_EXT_CTRL,
7868 		     tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
7869 	}
7870 
7871 	/* Manage gphy power for all CPMU absent PCIe devices. */
7872 	if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, CPMU_PRESENT))
7873 		val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
7874 
7875 	tw32(GRC_MISC_CFG, val);
7876 
7877 	/* restore 5701 hardware bug workaround write method */
7878 	tp->write32 = write_op;
7879 
7880 	/* Unfortunately, we have to delay before the PCI read back.
7881 	 * Some 575X chips even will not respond to a PCI cfg access
7882 	 * when the reset command is given to the chip.
7883 	 *
7884 	 * How do these hardware designers expect things to work
7885 	 * properly if the PCI write is posted for a long period
7886 	 * of time?  It is always necessary to have some method by
7887 	 * which a register read back can occur to push the write
7888 	 * out which does the reset.
7889 	 *
7890 	 * For most tg3 variants the trick below was working.
7891 	 * Ho hum...
7892 	 */
7893 	udelay(120);
7894 
7895 	/* Flush PCI posted writes.  The normal MMIO registers
7896 	 * are inaccessible at this time so this is the only
7897 	 * way to make this reliably (actually, this is no longer
7898 	 * the case, see above).  I tried to use indirect
7899 	 * register read/write but this upset some 5701 variants.
7900 	 */
7901 	pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
7902 
7903 	udelay(120);
7904 
7905 	if (tg3_flag(tp, PCI_EXPRESS) && pci_pcie_cap(tp->pdev)) {
7906 		u16 val16;
7907 
7908 		if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
7909 			int i;
7910 			u32 cfg_val;
7911 
7912 			/* Wait for link training to complete.  */
7913 			for (i = 0; i < 5000; i++)
7914 				udelay(100);
7915 
7916 			pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
7917 			pci_write_config_dword(tp->pdev, 0xc4,
7918 					       cfg_val | (1 << 15));
7919 		}
7920 
7921 		/* Clear the "no snoop" and "relaxed ordering" bits. */
7922 		pci_read_config_word(tp->pdev,
7923 				     pci_pcie_cap(tp->pdev) + PCI_EXP_DEVCTL,
7924 				     &val16);
7925 		val16 &= ~(PCI_EXP_DEVCTL_RELAX_EN |
7926 			   PCI_EXP_DEVCTL_NOSNOOP_EN);
7927 		/*
7928 		 * Older PCIe devices only support the 128 byte
7929 		 * MPS setting.  Enforce the restriction.
7930 		 */
7931 		if (!tg3_flag(tp, CPMU_PRESENT))
7932 			val16 &= ~PCI_EXP_DEVCTL_PAYLOAD;
7933 		pci_write_config_word(tp->pdev,
7934 				      pci_pcie_cap(tp->pdev) + PCI_EXP_DEVCTL,
7935 				      val16);
7936 
7937 		/* Clear error status */
7938 		pci_write_config_word(tp->pdev,
7939 				      pci_pcie_cap(tp->pdev) + PCI_EXP_DEVSTA,
7940 				      PCI_EXP_DEVSTA_CED |
7941 				      PCI_EXP_DEVSTA_NFED |
7942 				      PCI_EXP_DEVSTA_FED |
7943 				      PCI_EXP_DEVSTA_URD);
7944 	}
7945 
7946 	tg3_restore_pci_state(tp);
7947 
7948 	tg3_flag_clear(tp, CHIP_RESETTING);
7949 	tg3_flag_clear(tp, ERROR_PROCESSED);
7950 
7951 	val = 0;
7952 	if (tg3_flag(tp, 5780_CLASS))
7953 		val = tr32(MEMARB_MODE);
7954 	tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
7955 
7956 	if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
7957 		tg3_stop_fw(tp);
7958 		tw32(0x5000, 0x400);
7959 	}
7960 
7961 	tw32(GRC_MODE, tp->grc_mode);
7962 
7963 	if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
7964 		val = tr32(0xc4);
7965 
7966 		tw32(0xc4, val | (1 << 15));
7967 	}
7968 
7969 	if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
7970 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
7971 		tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
7972 		if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
7973 			tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
7974 		tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
7975 	}
7976 
7977 	if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
7978 		tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
7979 		val = tp->mac_mode;
7980 	} else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
7981 		tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
7982 		val = tp->mac_mode;
7983 	} else
7984 		val = 0;
7985 
7986 	tw32_f(MAC_MODE, val);
7987 	udelay(40);
7988 
7989 	tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
7990 
7991 	err = tg3_poll_fw(tp);
7992 	if (err)
7993 		return err;
7994 
7995 	tg3_mdio_start(tp);
7996 
7997 	if (tg3_flag(tp, PCI_EXPRESS) &&
7998 	    tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
7999 	    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
8000 	    !tg3_flag(tp, 57765_PLUS)) {
8001 		val = tr32(0x7c00);
8002 
8003 		tw32(0x7c00, val | (1 << 25));
8004 	}
8005 
8006 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
8007 		val = tr32(TG3_CPMU_CLCK_ORIDE);
8008 		tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
8009 	}
8010 
8011 	/* Reprobe ASF enable state.  */
8012 	tg3_flag_clear(tp, ENABLE_ASF);
8013 	tg3_flag_clear(tp, ASF_NEW_HANDSHAKE);
8014 	tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
8015 	if (val == NIC_SRAM_DATA_SIG_MAGIC) {
8016 		u32 nic_cfg;
8017 
8018 		tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
8019 		if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
8020 			tg3_flag_set(tp, ENABLE_ASF);
8021 			tp->last_event_jiffies = jiffies;
8022 			if (tg3_flag(tp, 5750_PLUS))
8023 				tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
8024 		}
8025 	}
8026 
8027 	return 0;
8028 }
8029 
8030 static void tg3_get_nstats(struct tg3 *, struct rtnl_link_stats64 *);
8031 static void tg3_get_estats(struct tg3 *, struct tg3_ethtool_stats *);
8032 
8033 /* tp->lock is held. */
8034 static int tg3_halt(struct tg3 *tp, int kind, int silent)
8035 {
8036 	int err;
8037 
8038 	tg3_stop_fw(tp);
8039 
8040 	tg3_write_sig_pre_reset(tp, kind);
8041 
8042 	tg3_abort_hw(tp, silent);
8043 	err = tg3_chip_reset(tp);
8044 
8045 	__tg3_set_mac_addr(tp, 0);
8046 
8047 	tg3_write_sig_legacy(tp, kind);
8048 	tg3_write_sig_post_reset(tp, kind);
8049 
8050 	if (tp->hw_stats) {
8051 		/* Save the stats across chip resets... */
8052 		tg3_get_nstats(tp, &tp->net_stats_prev);
8053 		tg3_get_estats(tp, &tp->estats_prev);
8054 
8055 		/* And make sure the next sample is new data */
8056 		memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
8057 	}
8058 
8059 	if (err)
8060 		return err;
8061 
8062 	return 0;
8063 }
8064 
8065 static int tg3_set_mac_addr(struct net_device *dev, void *p)
8066 {
8067 	struct tg3 *tp = netdev_priv(dev);
8068 	struct sockaddr *addr = p;
8069 	int err = 0, skip_mac_1 = 0;
8070 
8071 	if (!is_valid_ether_addr(addr->sa_data))
8072 		return -EADDRNOTAVAIL;
8073 
8074 	memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
8075 
8076 	if (!netif_running(dev))
8077 		return 0;
8078 
8079 	if (tg3_flag(tp, ENABLE_ASF)) {
8080 		u32 addr0_high, addr0_low, addr1_high, addr1_low;
8081 
8082 		addr0_high = tr32(MAC_ADDR_0_HIGH);
8083 		addr0_low = tr32(MAC_ADDR_0_LOW);
8084 		addr1_high = tr32(MAC_ADDR_1_HIGH);
8085 		addr1_low = tr32(MAC_ADDR_1_LOW);
8086 
8087 		/* Skip MAC addr 1 if ASF is using it. */
8088 		if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
8089 		    !(addr1_high == 0 && addr1_low == 0))
8090 			skip_mac_1 = 1;
8091 	}
8092 	spin_lock_bh(&tp->lock);
8093 	__tg3_set_mac_addr(tp, skip_mac_1);
8094 	spin_unlock_bh(&tp->lock);
8095 
8096 	return err;
8097 }
8098 
8099 /* tp->lock is held. */
8100 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
8101 			   dma_addr_t mapping, u32 maxlen_flags,
8102 			   u32 nic_addr)
8103 {
8104 	tg3_write_mem(tp,
8105 		      (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
8106 		      ((u64) mapping >> 32));
8107 	tg3_write_mem(tp,
8108 		      (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
8109 		      ((u64) mapping & 0xffffffff));
8110 	tg3_write_mem(tp,
8111 		      (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
8112 		       maxlen_flags);
8113 
8114 	if (!tg3_flag(tp, 5705_PLUS))
8115 		tg3_write_mem(tp,
8116 			      (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
8117 			      nic_addr);
8118 }
8119 
8120 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
8121 {
8122 	int i;
8123 
8124 	if (!tg3_flag(tp, ENABLE_TSS)) {
8125 		tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
8126 		tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
8127 		tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
8128 	} else {
8129 		tw32(HOSTCC_TXCOL_TICKS, 0);
8130 		tw32(HOSTCC_TXMAX_FRAMES, 0);
8131 		tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
8132 	}
8133 
8134 	if (!tg3_flag(tp, ENABLE_RSS)) {
8135 		tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
8136 		tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
8137 		tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
8138 	} else {
8139 		tw32(HOSTCC_RXCOL_TICKS, 0);
8140 		tw32(HOSTCC_RXMAX_FRAMES, 0);
8141 		tw32(HOSTCC_RXCOAL_MAXF_INT, 0);
8142 	}
8143 
8144 	if (!tg3_flag(tp, 5705_PLUS)) {
8145 		u32 val = ec->stats_block_coalesce_usecs;
8146 
8147 		tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
8148 		tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
8149 
8150 		if (!netif_carrier_ok(tp->dev))
8151 			val = 0;
8152 
8153 		tw32(HOSTCC_STAT_COAL_TICKS, val);
8154 	}
8155 
8156 	for (i = 0; i < tp->irq_cnt - 1; i++) {
8157 		u32 reg;
8158 
8159 		reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18;
8160 		tw32(reg, ec->rx_coalesce_usecs);
8161 		reg = HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18;
8162 		tw32(reg, ec->rx_max_coalesced_frames);
8163 		reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18;
8164 		tw32(reg, ec->rx_max_coalesced_frames_irq);
8165 
8166 		if (tg3_flag(tp, ENABLE_TSS)) {
8167 			reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18;
8168 			tw32(reg, ec->tx_coalesce_usecs);
8169 			reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18;
8170 			tw32(reg, ec->tx_max_coalesced_frames);
8171 			reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18;
8172 			tw32(reg, ec->tx_max_coalesced_frames_irq);
8173 		}
8174 	}
8175 
8176 	for (; i < tp->irq_max - 1; i++) {
8177 		tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0);
8178 		tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0);
8179 		tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
8180 
8181 		if (tg3_flag(tp, ENABLE_TSS)) {
8182 			tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0);
8183 			tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0);
8184 			tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
8185 		}
8186 	}
8187 }
8188 
8189 /* tp->lock is held. */
8190 static void tg3_rings_reset(struct tg3 *tp)
8191 {
8192 	int i;
8193 	u32 stblk, txrcb, rxrcb, limit;
8194 	struct tg3_napi *tnapi = &tp->napi[0];
8195 
8196 	/* Disable all transmit rings but the first. */
8197 	if (!tg3_flag(tp, 5705_PLUS))
8198 		limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16;
8199 	else if (tg3_flag(tp, 5717_PLUS))
8200 		limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4;
8201 	else if (tg3_flag(tp, 57765_CLASS))
8202 		limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2;
8203 	else
8204 		limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
8205 
8206 	for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
8207 	     txrcb < limit; txrcb += TG3_BDINFO_SIZE)
8208 		tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS,
8209 			      BDINFO_FLAGS_DISABLED);
8210 
8211 
8212 	/* Disable all receive return rings but the first. */
8213 	if (tg3_flag(tp, 5717_PLUS))
8214 		limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17;
8215 	else if (!tg3_flag(tp, 5705_PLUS))
8216 		limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
8217 	else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
8218 		 tg3_flag(tp, 57765_CLASS))
8219 		limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4;
8220 	else
8221 		limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
8222 
8223 	for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
8224 	     rxrcb < limit; rxrcb += TG3_BDINFO_SIZE)
8225 		tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS,
8226 			      BDINFO_FLAGS_DISABLED);
8227 
8228 	/* Disable interrupts */
8229 	tw32_mailbox_f(tp->napi[0].int_mbox, 1);
8230 	tp->napi[0].chk_msi_cnt = 0;
8231 	tp->napi[0].last_rx_cons = 0;
8232 	tp->napi[0].last_tx_cons = 0;
8233 
8234 	/* Zero mailbox registers. */
8235 	if (tg3_flag(tp, SUPPORT_MSIX)) {
8236 		for (i = 1; i < tp->irq_max; i++) {
8237 			tp->napi[i].tx_prod = 0;
8238 			tp->napi[i].tx_cons = 0;
8239 			if (tg3_flag(tp, ENABLE_TSS))
8240 				tw32_mailbox(tp->napi[i].prodmbox, 0);
8241 			tw32_rx_mbox(tp->napi[i].consmbox, 0);
8242 			tw32_mailbox_f(tp->napi[i].int_mbox, 1);
8243 			tp->napi[i].chk_msi_cnt = 0;
8244 			tp->napi[i].last_rx_cons = 0;
8245 			tp->napi[i].last_tx_cons = 0;
8246 		}
8247 		if (!tg3_flag(tp, ENABLE_TSS))
8248 			tw32_mailbox(tp->napi[0].prodmbox, 0);
8249 	} else {
8250 		tp->napi[0].tx_prod = 0;
8251 		tp->napi[0].tx_cons = 0;
8252 		tw32_mailbox(tp->napi[0].prodmbox, 0);
8253 		tw32_rx_mbox(tp->napi[0].consmbox, 0);
8254 	}
8255 
8256 	/* Make sure the NIC-based send BD rings are disabled. */
8257 	if (!tg3_flag(tp, 5705_PLUS)) {
8258 		u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW;
8259 		for (i = 0; i < 16; i++)
8260 			tw32_tx_mbox(mbox + i * 8, 0);
8261 	}
8262 
8263 	txrcb = NIC_SRAM_SEND_RCB;
8264 	rxrcb = NIC_SRAM_RCV_RET_RCB;
8265 
8266 	/* Clear status block in ram. */
8267 	memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8268 
8269 	/* Set status block DMA address */
8270 	tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
8271 	     ((u64) tnapi->status_mapping >> 32));
8272 	tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
8273 	     ((u64) tnapi->status_mapping & 0xffffffff));
8274 
8275 	if (tnapi->tx_ring) {
8276 		tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
8277 			       (TG3_TX_RING_SIZE <<
8278 				BDINFO_FLAGS_MAXLEN_SHIFT),
8279 			       NIC_SRAM_TX_BUFFER_DESC);
8280 		txrcb += TG3_BDINFO_SIZE;
8281 	}
8282 
8283 	if (tnapi->rx_rcb) {
8284 		tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
8285 			       (tp->rx_ret_ring_mask + 1) <<
8286 				BDINFO_FLAGS_MAXLEN_SHIFT, 0);
8287 		rxrcb += TG3_BDINFO_SIZE;
8288 	}
8289 
8290 	stblk = HOSTCC_STATBLCK_RING1;
8291 
8292 	for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) {
8293 		u64 mapping = (u64)tnapi->status_mapping;
8294 		tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32);
8295 		tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff);
8296 
8297 		/* Clear status block in ram. */
8298 		memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8299 
8300 		if (tnapi->tx_ring) {
8301 			tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
8302 				       (TG3_TX_RING_SIZE <<
8303 					BDINFO_FLAGS_MAXLEN_SHIFT),
8304 				       NIC_SRAM_TX_BUFFER_DESC);
8305 			txrcb += TG3_BDINFO_SIZE;
8306 		}
8307 
8308 		tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
8309 			       ((tp->rx_ret_ring_mask + 1) <<
8310 				BDINFO_FLAGS_MAXLEN_SHIFT), 0);
8311 
8312 		stblk += 8;
8313 		rxrcb += TG3_BDINFO_SIZE;
8314 	}
8315 }
8316 
8317 static void tg3_setup_rxbd_thresholds(struct tg3 *tp)
8318 {
8319 	u32 val, bdcache_maxcnt, host_rep_thresh, nic_rep_thresh;
8320 
8321 	if (!tg3_flag(tp, 5750_PLUS) ||
8322 	    tg3_flag(tp, 5780_CLASS) ||
8323 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
8324 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
8325 	    tg3_flag(tp, 57765_PLUS))
8326 		bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5700;
8327 	else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
8328 		 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
8329 		bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5755;
8330 	else
8331 		bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5906;
8332 
8333 	nic_rep_thresh = min(bdcache_maxcnt / 2, tp->rx_std_max_post);
8334 	host_rep_thresh = max_t(u32, tp->rx_pending / 8, 1);
8335 
8336 	val = min(nic_rep_thresh, host_rep_thresh);
8337 	tw32(RCVBDI_STD_THRESH, val);
8338 
8339 	if (tg3_flag(tp, 57765_PLUS))
8340 		tw32(STD_REPLENISH_LWM, bdcache_maxcnt);
8341 
8342 	if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
8343 		return;
8344 
8345 	bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700;
8346 
8347 	host_rep_thresh = max_t(u32, tp->rx_jumbo_pending / 8, 1);
8348 
8349 	val = min(bdcache_maxcnt / 2, host_rep_thresh);
8350 	tw32(RCVBDI_JUMBO_THRESH, val);
8351 
8352 	if (tg3_flag(tp, 57765_PLUS))
8353 		tw32(JMB_REPLENISH_LWM, bdcache_maxcnt);
8354 }
8355 
8356 static inline u32 calc_crc(unsigned char *buf, int len)
8357 {
8358 	u32 reg;
8359 	u32 tmp;
8360 	int j, k;
8361 
8362 	reg = 0xffffffff;
8363 
8364 	for (j = 0; j < len; j++) {
8365 		reg ^= buf[j];
8366 
8367 		for (k = 0; k < 8; k++) {
8368 			tmp = reg & 0x01;
8369 
8370 			reg >>= 1;
8371 
8372 			if (tmp)
8373 				reg ^= 0xedb88320;
8374 		}
8375 	}
8376 
8377 	return ~reg;
8378 }
8379 
8380 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
8381 {
8382 	/* accept or reject all multicast frames */
8383 	tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
8384 	tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
8385 	tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
8386 	tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
8387 }
8388 
8389 static void __tg3_set_rx_mode(struct net_device *dev)
8390 {
8391 	struct tg3 *tp = netdev_priv(dev);
8392 	u32 rx_mode;
8393 
8394 	rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
8395 				  RX_MODE_KEEP_VLAN_TAG);
8396 
8397 #if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE)
8398 	/* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
8399 	 * flag clear.
8400 	 */
8401 	if (!tg3_flag(tp, ENABLE_ASF))
8402 		rx_mode |= RX_MODE_KEEP_VLAN_TAG;
8403 #endif
8404 
8405 	if (dev->flags & IFF_PROMISC) {
8406 		/* Promiscuous mode. */
8407 		rx_mode |= RX_MODE_PROMISC;
8408 	} else if (dev->flags & IFF_ALLMULTI) {
8409 		/* Accept all multicast. */
8410 		tg3_set_multi(tp, 1);
8411 	} else if (netdev_mc_empty(dev)) {
8412 		/* Reject all multicast. */
8413 		tg3_set_multi(tp, 0);
8414 	} else {
8415 		/* Accept one or more multicast(s). */
8416 		struct netdev_hw_addr *ha;
8417 		u32 mc_filter[4] = { 0, };
8418 		u32 regidx;
8419 		u32 bit;
8420 		u32 crc;
8421 
8422 		netdev_for_each_mc_addr(ha, dev) {
8423 			crc = calc_crc(ha->addr, ETH_ALEN);
8424 			bit = ~crc & 0x7f;
8425 			regidx = (bit & 0x60) >> 5;
8426 			bit &= 0x1f;
8427 			mc_filter[regidx] |= (1 << bit);
8428 		}
8429 
8430 		tw32(MAC_HASH_REG_0, mc_filter[0]);
8431 		tw32(MAC_HASH_REG_1, mc_filter[1]);
8432 		tw32(MAC_HASH_REG_2, mc_filter[2]);
8433 		tw32(MAC_HASH_REG_3, mc_filter[3]);
8434 	}
8435 
8436 	if (rx_mode != tp->rx_mode) {
8437 		tp->rx_mode = rx_mode;
8438 		tw32_f(MAC_RX_MODE, rx_mode);
8439 		udelay(10);
8440 	}
8441 }
8442 
8443 static void tg3_rss_init_dflt_indir_tbl(struct tg3 *tp)
8444 {
8445 	int i;
8446 
8447 	for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
8448 		tp->rss_ind_tbl[i] =
8449 			ethtool_rxfh_indir_default(i, tp->irq_cnt - 1);
8450 }
8451 
8452 static void tg3_rss_check_indir_tbl(struct tg3 *tp)
8453 {
8454 	int i;
8455 
8456 	if (!tg3_flag(tp, SUPPORT_MSIX))
8457 		return;
8458 
8459 	if (tp->irq_cnt <= 2) {
8460 		memset(&tp->rss_ind_tbl[0], 0, sizeof(tp->rss_ind_tbl));
8461 		return;
8462 	}
8463 
8464 	/* Validate table against current IRQ count */
8465 	for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) {
8466 		if (tp->rss_ind_tbl[i] >= tp->irq_cnt - 1)
8467 			break;
8468 	}
8469 
8470 	if (i != TG3_RSS_INDIR_TBL_SIZE)
8471 		tg3_rss_init_dflt_indir_tbl(tp);
8472 }
8473 
8474 static void tg3_rss_write_indir_tbl(struct tg3 *tp)
8475 {
8476 	int i = 0;
8477 	u32 reg = MAC_RSS_INDIR_TBL_0;
8478 
8479 	while (i < TG3_RSS_INDIR_TBL_SIZE) {
8480 		u32 val = tp->rss_ind_tbl[i];
8481 		i++;
8482 		for (; i % 8; i++) {
8483 			val <<= 4;
8484 			val |= tp->rss_ind_tbl[i];
8485 		}
8486 		tw32(reg, val);
8487 		reg += 4;
8488 	}
8489 }
8490 
8491 /* tp->lock is held. */
8492 static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
8493 {
8494 	u32 val, rdmac_mode;
8495 	int i, err, limit;
8496 	struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
8497 
8498 	tg3_disable_ints(tp);
8499 
8500 	tg3_stop_fw(tp);
8501 
8502 	tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
8503 
8504 	if (tg3_flag(tp, INIT_COMPLETE))
8505 		tg3_abort_hw(tp, 1);
8506 
8507 	/* Enable MAC control of LPI */
8508 	if (tp->phy_flags & TG3_PHYFLG_EEE_CAP) {
8509 		tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL,
8510 		       TG3_CPMU_EEE_LNKIDL_PCIE_NL0 |
8511 		       TG3_CPMU_EEE_LNKIDL_UART_IDL);
8512 
8513 		tw32_f(TG3_CPMU_EEE_CTRL,
8514 		       TG3_CPMU_EEE_CTRL_EXIT_20_1_US);
8515 
8516 		val = TG3_CPMU_EEEMD_ERLY_L1_XIT_DET |
8517 		      TG3_CPMU_EEEMD_LPI_IN_TX |
8518 		      TG3_CPMU_EEEMD_LPI_IN_RX |
8519 		      TG3_CPMU_EEEMD_EEE_ENABLE;
8520 
8521 		if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717)
8522 			val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN;
8523 
8524 		if (tg3_flag(tp, ENABLE_APE))
8525 			val |= TG3_CPMU_EEEMD_APE_TX_DET_EN;
8526 
8527 		tw32_f(TG3_CPMU_EEE_MODE, val);
8528 
8529 		tw32_f(TG3_CPMU_EEE_DBTMR1,
8530 		       TG3_CPMU_DBTMR1_PCIEXIT_2047US |
8531 		       TG3_CPMU_DBTMR1_LNKIDLE_2047US);
8532 
8533 		tw32_f(TG3_CPMU_EEE_DBTMR2,
8534 		       TG3_CPMU_DBTMR2_APE_TX_2047US |
8535 		       TG3_CPMU_DBTMR2_TXIDXEQ_2047US);
8536 	}
8537 
8538 	if (reset_phy)
8539 		tg3_phy_reset(tp);
8540 
8541 	err = tg3_chip_reset(tp);
8542 	if (err)
8543 		return err;
8544 
8545 	tg3_write_sig_legacy(tp, RESET_KIND_INIT);
8546 
8547 	if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
8548 		val = tr32(TG3_CPMU_CTRL);
8549 		val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
8550 		tw32(TG3_CPMU_CTRL, val);
8551 
8552 		val = tr32(TG3_CPMU_LSPD_10MB_CLK);
8553 		val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
8554 		val |= CPMU_LSPD_10MB_MACCLK_6_25;
8555 		tw32(TG3_CPMU_LSPD_10MB_CLK, val);
8556 
8557 		val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
8558 		val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
8559 		val |= CPMU_LNK_AWARE_MACCLK_6_25;
8560 		tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
8561 
8562 		val = tr32(TG3_CPMU_HST_ACC);
8563 		val &= ~CPMU_HST_ACC_MACCLK_MASK;
8564 		val |= CPMU_HST_ACC_MACCLK_6_25;
8565 		tw32(TG3_CPMU_HST_ACC, val);
8566 	}
8567 
8568 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
8569 		val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK;
8570 		val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN |
8571 		       PCIE_PWR_MGMT_L1_THRESH_4MS;
8572 		tw32(PCIE_PWR_MGMT_THRESH, val);
8573 
8574 		val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK;
8575 		tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS);
8576 
8577 		tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR);
8578 
8579 		val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
8580 		tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
8581 	}
8582 
8583 	if (tg3_flag(tp, L1PLLPD_EN)) {
8584 		u32 grc_mode = tr32(GRC_MODE);
8585 
8586 		/* Access the lower 1K of PL PCIE block registers. */
8587 		val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
8588 		tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
8589 
8590 		val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1);
8591 		tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1,
8592 		     val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN);
8593 
8594 		tw32(GRC_MODE, grc_mode);
8595 	}
8596 
8597 	if (tg3_flag(tp, 57765_CLASS)) {
8598 		if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
8599 			u32 grc_mode = tr32(GRC_MODE);
8600 
8601 			/* Access the lower 1K of PL PCIE block registers. */
8602 			val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
8603 			tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
8604 
8605 			val = tr32(TG3_PCIE_TLDLPL_PORT +
8606 				   TG3_PCIE_PL_LO_PHYCTL5);
8607 			tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5,
8608 			     val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ);
8609 
8610 			tw32(GRC_MODE, grc_mode);
8611 		}
8612 
8613 		if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_57765_AX) {
8614 			u32 grc_mode = tr32(GRC_MODE);
8615 
8616 			/* Access the lower 1K of DL PCIE block registers. */
8617 			val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
8618 			tw32(GRC_MODE, val | GRC_MODE_PCIE_DL_SEL);
8619 
8620 			val = tr32(TG3_PCIE_TLDLPL_PORT +
8621 				   TG3_PCIE_DL_LO_FTSMAX);
8622 			val &= ~TG3_PCIE_DL_LO_FTSMAX_MSK;
8623 			tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_DL_LO_FTSMAX,
8624 			     val | TG3_PCIE_DL_LO_FTSMAX_VAL);
8625 
8626 			tw32(GRC_MODE, grc_mode);
8627 		}
8628 
8629 		val = tr32(TG3_CPMU_LSPD_10MB_CLK);
8630 		val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
8631 		val |= CPMU_LSPD_10MB_MACCLK_6_25;
8632 		tw32(TG3_CPMU_LSPD_10MB_CLK, val);
8633 	}
8634 
8635 	/* This works around an issue with Athlon chipsets on
8636 	 * B3 tigon3 silicon.  This bit has no effect on any
8637 	 * other revision.  But do not set this on PCI Express
8638 	 * chips and don't even touch the clocks if the CPMU is present.
8639 	 */
8640 	if (!tg3_flag(tp, CPMU_PRESENT)) {
8641 		if (!tg3_flag(tp, PCI_EXPRESS))
8642 			tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
8643 		tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
8644 	}
8645 
8646 	if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
8647 	    tg3_flag(tp, PCIX_MODE)) {
8648 		val = tr32(TG3PCI_PCISTATE);
8649 		val |= PCISTATE_RETRY_SAME_DMA;
8650 		tw32(TG3PCI_PCISTATE, val);
8651 	}
8652 
8653 	if (tg3_flag(tp, ENABLE_APE)) {
8654 		/* Allow reads and writes to the
8655 		 * APE register and memory space.
8656 		 */
8657 		val = tr32(TG3PCI_PCISTATE);
8658 		val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
8659 		       PCISTATE_ALLOW_APE_SHMEM_WR |
8660 		       PCISTATE_ALLOW_APE_PSPACE_WR;
8661 		tw32(TG3PCI_PCISTATE, val);
8662 	}
8663 
8664 	if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
8665 		/* Enable some hw fixes.  */
8666 		val = tr32(TG3PCI_MSI_DATA);
8667 		val |= (1 << 26) | (1 << 28) | (1 << 29);
8668 		tw32(TG3PCI_MSI_DATA, val);
8669 	}
8670 
8671 	/* Descriptor ring init may make accesses to the
8672 	 * NIC SRAM area to setup the TX descriptors, so we
8673 	 * can only do this after the hardware has been
8674 	 * successfully reset.
8675 	 */
8676 	err = tg3_init_rings(tp);
8677 	if (err)
8678 		return err;
8679 
8680 	if (tg3_flag(tp, 57765_PLUS)) {
8681 		val = tr32(TG3PCI_DMA_RW_CTRL) &
8682 		      ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
8683 		if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0)
8684 			val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK;
8685 		if (!tg3_flag(tp, 57765_CLASS) &&
8686 		    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717)
8687 			val |= DMA_RWCTRL_TAGGED_STAT_WA;
8688 		tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
8689 	} else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 &&
8690 		   GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) {
8691 		/* This value is determined during the probe time DMA
8692 		 * engine test, tg3_test_dma.
8693 		 */
8694 		tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
8695 	}
8696 
8697 	tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
8698 			  GRC_MODE_4X_NIC_SEND_RINGS |
8699 			  GRC_MODE_NO_TX_PHDR_CSUM |
8700 			  GRC_MODE_NO_RX_PHDR_CSUM);
8701 	tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
8702 
8703 	/* Pseudo-header checksum is done by hardware logic and not
8704 	 * the offload processers, so make the chip do the pseudo-
8705 	 * header checksums on receive.  For transmit it is more
8706 	 * convenient to do the pseudo-header checksum in software
8707 	 * as Linux does that on transmit for us in all cases.
8708 	 */
8709 	tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
8710 
8711 	tw32(GRC_MODE,
8712 	     tp->grc_mode |
8713 	     (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
8714 
8715 	/* Setup the timer prescalar register.  Clock is always 66Mhz. */
8716 	val = tr32(GRC_MISC_CFG);
8717 	val &= ~0xff;
8718 	val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
8719 	tw32(GRC_MISC_CFG, val);
8720 
8721 	/* Initialize MBUF/DESC pool. */
8722 	if (tg3_flag(tp, 5750_PLUS)) {
8723 		/* Do nothing.  */
8724 	} else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
8725 		tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
8726 		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
8727 			tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
8728 		else
8729 			tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
8730 		tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
8731 		tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
8732 	} else if (tg3_flag(tp, TSO_CAPABLE)) {
8733 		int fw_len;
8734 
8735 		fw_len = tp->fw_len;
8736 		fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
8737 		tw32(BUFMGR_MB_POOL_ADDR,
8738 		     NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
8739 		tw32(BUFMGR_MB_POOL_SIZE,
8740 		     NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
8741 	}
8742 
8743 	if (tp->dev->mtu <= ETH_DATA_LEN) {
8744 		tw32(BUFMGR_MB_RDMA_LOW_WATER,
8745 		     tp->bufmgr_config.mbuf_read_dma_low_water);
8746 		tw32(BUFMGR_MB_MACRX_LOW_WATER,
8747 		     tp->bufmgr_config.mbuf_mac_rx_low_water);
8748 		tw32(BUFMGR_MB_HIGH_WATER,
8749 		     tp->bufmgr_config.mbuf_high_water);
8750 	} else {
8751 		tw32(BUFMGR_MB_RDMA_LOW_WATER,
8752 		     tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
8753 		tw32(BUFMGR_MB_MACRX_LOW_WATER,
8754 		     tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
8755 		tw32(BUFMGR_MB_HIGH_WATER,
8756 		     tp->bufmgr_config.mbuf_high_water_jumbo);
8757 	}
8758 	tw32(BUFMGR_DMA_LOW_WATER,
8759 	     tp->bufmgr_config.dma_low_water);
8760 	tw32(BUFMGR_DMA_HIGH_WATER,
8761 	     tp->bufmgr_config.dma_high_water);
8762 
8763 	val = BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE;
8764 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
8765 		val |= BUFMGR_MODE_NO_TX_UNDERRUN;
8766 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
8767 	    tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
8768 	    tp->pci_chip_rev_id == CHIPREV_ID_5720_A0)
8769 		val |= BUFMGR_MODE_MBLOW_ATTN_ENAB;
8770 	tw32(BUFMGR_MODE, val);
8771 	for (i = 0; i < 2000; i++) {
8772 		if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
8773 			break;
8774 		udelay(10);
8775 	}
8776 	if (i >= 2000) {
8777 		netdev_err(tp->dev, "%s cannot enable BUFMGR\n", __func__);
8778 		return -ENODEV;
8779 	}
8780 
8781 	if (tp->pci_chip_rev_id == CHIPREV_ID_5906_A1)
8782 		tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
8783 
8784 	tg3_setup_rxbd_thresholds(tp);
8785 
8786 	/* Initialize TG3_BDINFO's at:
8787 	 *  RCVDBDI_STD_BD:	standard eth size rx ring
8788 	 *  RCVDBDI_JUMBO_BD:	jumbo frame rx ring
8789 	 *  RCVDBDI_MINI_BD:	small frame rx ring (??? does not work)
8790 	 *
8791 	 * like so:
8792 	 *  TG3_BDINFO_HOST_ADDR:	high/low parts of DMA address of ring
8793 	 *  TG3_BDINFO_MAXLEN_FLAGS:	(rx max buffer size << 16) |
8794 	 *                              ring attribute flags
8795 	 *  TG3_BDINFO_NIC_ADDR:	location of descriptors in nic SRAM
8796 	 *
8797 	 * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
8798 	 * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
8799 	 *
8800 	 * The size of each ring is fixed in the firmware, but the location is
8801 	 * configurable.
8802 	 */
8803 	tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
8804 	     ((u64) tpr->rx_std_mapping >> 32));
8805 	tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
8806 	     ((u64) tpr->rx_std_mapping & 0xffffffff));
8807 	if (!tg3_flag(tp, 5717_PLUS))
8808 		tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
8809 		     NIC_SRAM_RX_BUFFER_DESC);
8810 
8811 	/* Disable the mini ring */
8812 	if (!tg3_flag(tp, 5705_PLUS))
8813 		tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
8814 		     BDINFO_FLAGS_DISABLED);
8815 
8816 	/* Program the jumbo buffer descriptor ring control
8817 	 * blocks on those devices that have them.
8818 	 */
8819 	if (tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
8820 	    (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))) {
8821 
8822 		if (tg3_flag(tp, JUMBO_RING_ENABLE)) {
8823 			tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
8824 			     ((u64) tpr->rx_jmb_mapping >> 32));
8825 			tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
8826 			     ((u64) tpr->rx_jmb_mapping & 0xffffffff));
8827 			val = TG3_RX_JMB_RING_SIZE(tp) <<
8828 			      BDINFO_FLAGS_MAXLEN_SHIFT;
8829 			tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
8830 			     val | BDINFO_FLAGS_USE_EXT_RECV);
8831 			if (!tg3_flag(tp, USE_JUMBO_BDFLAG) ||
8832 			    tg3_flag(tp, 57765_CLASS))
8833 				tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
8834 				     NIC_SRAM_RX_JUMBO_BUFFER_DESC);
8835 		} else {
8836 			tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
8837 			     BDINFO_FLAGS_DISABLED);
8838 		}
8839 
8840 		if (tg3_flag(tp, 57765_PLUS)) {
8841 			val = TG3_RX_STD_RING_SIZE(tp);
8842 			val <<= BDINFO_FLAGS_MAXLEN_SHIFT;
8843 			val |= (TG3_RX_STD_DMA_SZ << 2);
8844 		} else
8845 			val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT;
8846 	} else
8847 		val = TG3_RX_STD_MAX_SIZE_5700 << BDINFO_FLAGS_MAXLEN_SHIFT;
8848 
8849 	tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val);
8850 
8851 	tpr->rx_std_prod_idx = tp->rx_pending;
8852 	tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx);
8853 
8854 	tpr->rx_jmb_prod_idx =
8855 		tg3_flag(tp, JUMBO_RING_ENABLE) ? tp->rx_jumbo_pending : 0;
8856 	tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx);
8857 
8858 	tg3_rings_reset(tp);
8859 
8860 	/* Initialize MAC address and backoff seed. */
8861 	__tg3_set_mac_addr(tp, 0);
8862 
8863 	/* MTU + ethernet header + FCS + optional VLAN tag */
8864 	tw32(MAC_RX_MTU_SIZE,
8865 	     tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
8866 
8867 	/* The slot time is changed by tg3_setup_phy if we
8868 	 * run at gigabit with half duplex.
8869 	 */
8870 	val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
8871 	      (6 << TX_LENGTHS_IPG_SHIFT) |
8872 	      (32 << TX_LENGTHS_SLOT_TIME_SHIFT);
8873 
8874 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
8875 		val |= tr32(MAC_TX_LENGTHS) &
8876 		       (TX_LENGTHS_JMB_FRM_LEN_MSK |
8877 			TX_LENGTHS_CNT_DWN_VAL_MSK);
8878 
8879 	tw32(MAC_TX_LENGTHS, val);
8880 
8881 	/* Receive rules. */
8882 	tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
8883 	tw32(RCVLPC_CONFIG, 0x0181);
8884 
8885 	/* Calculate RDMAC_MODE setting early, we need it to determine
8886 	 * the RCVLPC_STATE_ENABLE mask.
8887 	 */
8888 	rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
8889 		      RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
8890 		      RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
8891 		      RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
8892 		      RDMAC_MODE_LNGREAD_ENAB);
8893 
8894 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717)
8895 		rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS;
8896 
8897 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
8898 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
8899 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
8900 		rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
8901 			      RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
8902 			      RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
8903 
8904 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
8905 	    tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
8906 		if (tg3_flag(tp, TSO_CAPABLE) &&
8907 		    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
8908 			rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
8909 		} else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
8910 			   !tg3_flag(tp, IS_5788)) {
8911 			rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
8912 		}
8913 	}
8914 
8915 	if (tg3_flag(tp, PCI_EXPRESS))
8916 		rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
8917 
8918 	if (tg3_flag(tp, HW_TSO_1) ||
8919 	    tg3_flag(tp, HW_TSO_2) ||
8920 	    tg3_flag(tp, HW_TSO_3))
8921 		rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN;
8922 
8923 	if (tg3_flag(tp, 57765_PLUS) ||
8924 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
8925 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
8926 		rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;
8927 
8928 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
8929 		rdmac_mode |= tr32(RDMAC_MODE) & RDMAC_MODE_H2BNC_VLAN_DET;
8930 
8931 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
8932 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
8933 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
8934 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
8935 	    tg3_flag(tp, 57765_PLUS)) {
8936 		val = tr32(TG3_RDMA_RSRVCTRL_REG);
8937 		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
8938 		    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
8939 			val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK |
8940 				 TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK |
8941 				 TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK);
8942 			val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B |
8943 			       TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
8944 			       TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K;
8945 		}
8946 		tw32(TG3_RDMA_RSRVCTRL_REG,
8947 		     val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
8948 	}
8949 
8950 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
8951 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
8952 		val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
8953 		tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val |
8954 		     TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K |
8955 		     TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K);
8956 	}
8957 
8958 	/* Receive/send statistics. */
8959 	if (tg3_flag(tp, 5750_PLUS)) {
8960 		val = tr32(RCVLPC_STATS_ENABLE);
8961 		val &= ~RCVLPC_STATSENAB_DACK_FIX;
8962 		tw32(RCVLPC_STATS_ENABLE, val);
8963 	} else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
8964 		   tg3_flag(tp, TSO_CAPABLE)) {
8965 		val = tr32(RCVLPC_STATS_ENABLE);
8966 		val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
8967 		tw32(RCVLPC_STATS_ENABLE, val);
8968 	} else {
8969 		tw32(RCVLPC_STATS_ENABLE, 0xffffff);
8970 	}
8971 	tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
8972 	tw32(SNDDATAI_STATSENAB, 0xffffff);
8973 	tw32(SNDDATAI_STATSCTRL,
8974 	     (SNDDATAI_SCTRL_ENABLE |
8975 	      SNDDATAI_SCTRL_FASTUPD));
8976 
8977 	/* Setup host coalescing engine. */
8978 	tw32(HOSTCC_MODE, 0);
8979 	for (i = 0; i < 2000; i++) {
8980 		if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
8981 			break;
8982 		udelay(10);
8983 	}
8984 
8985 	__tg3_set_coalesce(tp, &tp->coal);
8986 
8987 	if (!tg3_flag(tp, 5705_PLUS)) {
8988 		/* Status/statistics block address.  See tg3_timer,
8989 		 * the tg3_periodic_fetch_stats call there, and
8990 		 * tg3_get_stats to see how this works for 5705/5750 chips.
8991 		 */
8992 		tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
8993 		     ((u64) tp->stats_mapping >> 32));
8994 		tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
8995 		     ((u64) tp->stats_mapping & 0xffffffff));
8996 		tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
8997 
8998 		tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
8999 
9000 		/* Clear statistics and status block memory areas */
9001 		for (i = NIC_SRAM_STATS_BLK;
9002 		     i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
9003 		     i += sizeof(u32)) {
9004 			tg3_write_mem(tp, i, 0);
9005 			udelay(40);
9006 		}
9007 	}
9008 
9009 	tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
9010 
9011 	tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
9012 	tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
9013 	if (!tg3_flag(tp, 5705_PLUS))
9014 		tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
9015 
9016 	if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
9017 		tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
9018 		/* reset to prevent losing 1st rx packet intermittently */
9019 		tw32_f(MAC_RX_MODE, RX_MODE_RESET);
9020 		udelay(10);
9021 	}
9022 
9023 	tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
9024 			MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE |
9025 			MAC_MODE_FHDE_ENABLE;
9026 	if (tg3_flag(tp, ENABLE_APE))
9027 		tp->mac_mode |= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
9028 	if (!tg3_flag(tp, 5705_PLUS) &&
9029 	    !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
9030 	    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
9031 		tp->mac_mode |= MAC_MODE_LINK_POLARITY;
9032 	tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
9033 	udelay(40);
9034 
9035 	/* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
9036 	 * If TG3_FLAG_IS_NIC is zero, we should read the
9037 	 * register to preserve the GPIO settings for LOMs. The GPIOs,
9038 	 * whether used as inputs or outputs, are set by boot code after
9039 	 * reset.
9040 	 */
9041 	if (!tg3_flag(tp, IS_NIC)) {
9042 		u32 gpio_mask;
9043 
9044 		gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
9045 			    GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
9046 			    GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
9047 
9048 		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
9049 			gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
9050 				     GRC_LCLCTRL_GPIO_OUTPUT3;
9051 
9052 		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
9053 			gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
9054 
9055 		tp->grc_local_ctrl &= ~gpio_mask;
9056 		tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
9057 
9058 		/* GPIO1 must be driven high for eeprom write protect */
9059 		if (tg3_flag(tp, EEPROM_WRITE_PROT))
9060 			tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
9061 					       GRC_LCLCTRL_GPIO_OUTPUT1);
9062 	}
9063 	tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
9064 	udelay(100);
9065 
9066 	if (tg3_flag(tp, USING_MSIX)) {
9067 		val = tr32(MSGINT_MODE);
9068 		val |= MSGINT_MODE_ENABLE;
9069 		if (tp->irq_cnt > 1)
9070 			val |= MSGINT_MODE_MULTIVEC_EN;
9071 		if (!tg3_flag(tp, 1SHOT_MSI))
9072 			val |= MSGINT_MODE_ONE_SHOT_DISABLE;
9073 		tw32(MSGINT_MODE, val);
9074 	}
9075 
9076 	if (!tg3_flag(tp, 5705_PLUS)) {
9077 		tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
9078 		udelay(40);
9079 	}
9080 
9081 	val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
9082 	       WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
9083 	       WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
9084 	       WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
9085 	       WDMAC_MODE_LNGREAD_ENAB);
9086 
9087 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
9088 	    tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
9089 		if (tg3_flag(tp, TSO_CAPABLE) &&
9090 		    (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
9091 		     tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
9092 			/* nothing */
9093 		} else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
9094 			   !tg3_flag(tp, IS_5788)) {
9095 			val |= WDMAC_MODE_RX_ACCEL;
9096 		}
9097 	}
9098 
9099 	/* Enable host coalescing bug fix */
9100 	if (tg3_flag(tp, 5755_PLUS))
9101 		val |= WDMAC_MODE_STATUS_TAG_FIX;
9102 
9103 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
9104 		val |= WDMAC_MODE_BURST_ALL_DATA;
9105 
9106 	tw32_f(WDMAC_MODE, val);
9107 	udelay(40);
9108 
9109 	if (tg3_flag(tp, PCIX_MODE)) {
9110 		u16 pcix_cmd;
9111 
9112 		pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
9113 				     &pcix_cmd);
9114 		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
9115 			pcix_cmd &= ~PCI_X_CMD_MAX_READ;
9116 			pcix_cmd |= PCI_X_CMD_READ_2K;
9117 		} else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
9118 			pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
9119 			pcix_cmd |= PCI_X_CMD_READ_2K;
9120 		}
9121 		pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
9122 				      pcix_cmd);
9123 	}
9124 
9125 	tw32_f(RDMAC_MODE, rdmac_mode);
9126 	udelay(40);
9127 
9128 	tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
9129 	if (!tg3_flag(tp, 5705_PLUS))
9130 		tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
9131 
9132 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
9133 		tw32(SNDDATAC_MODE,
9134 		     SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
9135 	else
9136 		tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
9137 
9138 	tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
9139 	tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
9140 	val = RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ;
9141 	if (tg3_flag(tp, LRG_PROD_RING_CAP))
9142 		val |= RCVDBDI_MODE_LRG_RING_SZ;
9143 	tw32(RCVDBDI_MODE, val);
9144 	tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
9145 	if (tg3_flag(tp, HW_TSO_1) ||
9146 	    tg3_flag(tp, HW_TSO_2) ||
9147 	    tg3_flag(tp, HW_TSO_3))
9148 		tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
9149 	val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE;
9150 	if (tg3_flag(tp, ENABLE_TSS))
9151 		val |= SNDBDI_MODE_MULTI_TXQ_EN;
9152 	tw32(SNDBDI_MODE, val);
9153 	tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
9154 
9155 	if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
9156 		err = tg3_load_5701_a0_firmware_fix(tp);
9157 		if (err)
9158 			return err;
9159 	}
9160 
9161 	if (tg3_flag(tp, TSO_CAPABLE)) {
9162 		err = tg3_load_tso_firmware(tp);
9163 		if (err)
9164 			return err;
9165 	}
9166 
9167 	tp->tx_mode = TX_MODE_ENABLE;
9168 
9169 	if (tg3_flag(tp, 5755_PLUS) ||
9170 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
9171 		tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX;
9172 
9173 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
9174 		val = TX_MODE_JMB_FRM_LEN | TX_MODE_CNT_DN_MODE;
9175 		tp->tx_mode &= ~val;
9176 		tp->tx_mode |= tr32(MAC_TX_MODE) & val;
9177 	}
9178 
9179 	tw32_f(MAC_TX_MODE, tp->tx_mode);
9180 	udelay(100);
9181 
9182 	if (tg3_flag(tp, ENABLE_RSS)) {
9183 		tg3_rss_write_indir_tbl(tp);
9184 
9185 		/* Setup the "secret" hash key. */
9186 		tw32(MAC_RSS_HASH_KEY_0, 0x5f865437);
9187 		tw32(MAC_RSS_HASH_KEY_1, 0xe4ac62cc);
9188 		tw32(MAC_RSS_HASH_KEY_2, 0x50103a45);
9189 		tw32(MAC_RSS_HASH_KEY_3, 0x36621985);
9190 		tw32(MAC_RSS_HASH_KEY_4, 0xbf14c0e8);
9191 		tw32(MAC_RSS_HASH_KEY_5, 0x1bc27a1e);
9192 		tw32(MAC_RSS_HASH_KEY_6, 0x84f4b556);
9193 		tw32(MAC_RSS_HASH_KEY_7, 0x094ea6fe);
9194 		tw32(MAC_RSS_HASH_KEY_8, 0x7dda01e7);
9195 		tw32(MAC_RSS_HASH_KEY_9, 0xc04d7481);
9196 	}
9197 
9198 	tp->rx_mode = RX_MODE_ENABLE;
9199 	if (tg3_flag(tp, 5755_PLUS))
9200 		tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
9201 
9202 	if (tg3_flag(tp, ENABLE_RSS))
9203 		tp->rx_mode |= RX_MODE_RSS_ENABLE |
9204 			       RX_MODE_RSS_ITBL_HASH_BITS_7 |
9205 			       RX_MODE_RSS_IPV6_HASH_EN |
9206 			       RX_MODE_RSS_TCP_IPV6_HASH_EN |
9207 			       RX_MODE_RSS_IPV4_HASH_EN |
9208 			       RX_MODE_RSS_TCP_IPV4_HASH_EN;
9209 
9210 	tw32_f(MAC_RX_MODE, tp->rx_mode);
9211 	udelay(10);
9212 
9213 	tw32(MAC_LED_CTRL, tp->led_ctrl);
9214 
9215 	tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
9216 	if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
9217 		tw32_f(MAC_RX_MODE, RX_MODE_RESET);
9218 		udelay(10);
9219 	}
9220 	tw32_f(MAC_RX_MODE, tp->rx_mode);
9221 	udelay(10);
9222 
9223 	if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
9224 		if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
9225 			!(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) {
9226 			/* Set drive transmission level to 1.2V  */
9227 			/* only if the signal pre-emphasis bit is not set  */
9228 			val = tr32(MAC_SERDES_CFG);
9229 			val &= 0xfffff000;
9230 			val |= 0x880;
9231 			tw32(MAC_SERDES_CFG, val);
9232 		}
9233 		if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
9234 			tw32(MAC_SERDES_CFG, 0x616000);
9235 	}
9236 
9237 	/* Prevent chip from dropping frames when flow control
9238 	 * is enabled.
9239 	 */
9240 	if (tg3_flag(tp, 57765_CLASS))
9241 		val = 1;
9242 	else
9243 		val = 2;
9244 	tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val);
9245 
9246 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
9247 	    (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
9248 		/* Use hardware link auto-negotiation */
9249 		tg3_flag_set(tp, HW_AUTONEG);
9250 	}
9251 
9252 	if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
9253 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
9254 		u32 tmp;
9255 
9256 		tmp = tr32(SERDES_RX_CTRL);
9257 		tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
9258 		tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
9259 		tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
9260 		tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
9261 	}
9262 
9263 	if (!tg3_flag(tp, USE_PHYLIB)) {
9264 		if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
9265 			tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
9266 
9267 		err = tg3_setup_phy(tp, 0);
9268 		if (err)
9269 			return err;
9270 
9271 		if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
9272 		    !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
9273 			u32 tmp;
9274 
9275 			/* Clear CRC stats. */
9276 			if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
9277 				tg3_writephy(tp, MII_TG3_TEST1,
9278 					     tmp | MII_TG3_TEST1_CRC_EN);
9279 				tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &tmp);
9280 			}
9281 		}
9282 	}
9283 
9284 	__tg3_set_rx_mode(tp->dev);
9285 
9286 	/* Initialize receive rules. */
9287 	tw32(MAC_RCV_RULE_0,  0xc2000000 & RCV_RULE_DISABLE_MASK);
9288 	tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
9289 	tw32(MAC_RCV_RULE_1,  0x86000004 & RCV_RULE_DISABLE_MASK);
9290 	tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
9291 
9292 	if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS))
9293 		limit = 8;
9294 	else
9295 		limit = 16;
9296 	if (tg3_flag(tp, ENABLE_ASF))
9297 		limit -= 4;
9298 	switch (limit) {
9299 	case 16:
9300 		tw32(MAC_RCV_RULE_15,  0); tw32(MAC_RCV_VALUE_15,  0);
9301 	case 15:
9302 		tw32(MAC_RCV_RULE_14,  0); tw32(MAC_RCV_VALUE_14,  0);
9303 	case 14:
9304 		tw32(MAC_RCV_RULE_13,  0); tw32(MAC_RCV_VALUE_13,  0);
9305 	case 13:
9306 		tw32(MAC_RCV_RULE_12,  0); tw32(MAC_RCV_VALUE_12,  0);
9307 	case 12:
9308 		tw32(MAC_RCV_RULE_11,  0); tw32(MAC_RCV_VALUE_11,  0);
9309 	case 11:
9310 		tw32(MAC_RCV_RULE_10,  0); tw32(MAC_RCV_VALUE_10,  0);
9311 	case 10:
9312 		tw32(MAC_RCV_RULE_9,  0); tw32(MAC_RCV_VALUE_9,  0);
9313 	case 9:
9314 		tw32(MAC_RCV_RULE_8,  0); tw32(MAC_RCV_VALUE_8,  0);
9315 	case 8:
9316 		tw32(MAC_RCV_RULE_7,  0); tw32(MAC_RCV_VALUE_7,  0);
9317 	case 7:
9318 		tw32(MAC_RCV_RULE_6,  0); tw32(MAC_RCV_VALUE_6,  0);
9319 	case 6:
9320 		tw32(MAC_RCV_RULE_5,  0); tw32(MAC_RCV_VALUE_5,  0);
9321 	case 5:
9322 		tw32(MAC_RCV_RULE_4,  0); tw32(MAC_RCV_VALUE_4,  0);
9323 	case 4:
9324 		/* tw32(MAC_RCV_RULE_3,  0); tw32(MAC_RCV_VALUE_3,  0); */
9325 	case 3:
9326 		/* tw32(MAC_RCV_RULE_2,  0); tw32(MAC_RCV_VALUE_2,  0); */
9327 	case 2:
9328 	case 1:
9329 
9330 	default:
9331 		break;
9332 	}
9333 
9334 	if (tg3_flag(tp, ENABLE_APE))
9335 		/* Write our heartbeat update interval to APE. */
9336 		tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
9337 				APE_HOST_HEARTBEAT_INT_DISABLE);
9338 
9339 	tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
9340 
9341 	return 0;
9342 }
9343 
9344 /* Called at device open time to get the chip ready for
9345  * packet processing.  Invoked with tp->lock held.
9346  */
9347 static int tg3_init_hw(struct tg3 *tp, int reset_phy)
9348 {
9349 	tg3_switch_clocks(tp);
9350 
9351 	tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
9352 
9353 	return tg3_reset_hw(tp, reset_phy);
9354 }
9355 
9356 #define TG3_STAT_ADD32(PSTAT, REG) \
9357 do {	u32 __val = tr32(REG); \
9358 	(PSTAT)->low += __val; \
9359 	if ((PSTAT)->low < __val) \
9360 		(PSTAT)->high += 1; \
9361 } while (0)
9362 
9363 static void tg3_periodic_fetch_stats(struct tg3 *tp)
9364 {
9365 	struct tg3_hw_stats *sp = tp->hw_stats;
9366 
9367 	if (!netif_carrier_ok(tp->dev))
9368 		return;
9369 
9370 	TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
9371 	TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
9372 	TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
9373 	TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
9374 	TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
9375 	TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
9376 	TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
9377 	TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
9378 	TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
9379 	TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
9380 	TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
9381 	TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
9382 	TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
9383 
9384 	TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
9385 	TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
9386 	TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
9387 	TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
9388 	TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
9389 	TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
9390 	TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
9391 	TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
9392 	TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
9393 	TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
9394 	TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
9395 	TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
9396 	TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
9397 	TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
9398 
9399 	TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
9400 	if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
9401 	    tp->pci_chip_rev_id != CHIPREV_ID_5719_A0 &&
9402 	    tp->pci_chip_rev_id != CHIPREV_ID_5720_A0) {
9403 		TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
9404 	} else {
9405 		u32 val = tr32(HOSTCC_FLOW_ATTN);
9406 		val = (val & HOSTCC_FLOW_ATTN_MBUF_LWM) ? 1 : 0;
9407 		if (val) {
9408 			tw32(HOSTCC_FLOW_ATTN, HOSTCC_FLOW_ATTN_MBUF_LWM);
9409 			sp->rx_discards.low += val;
9410 			if (sp->rx_discards.low < val)
9411 				sp->rx_discards.high += 1;
9412 		}
9413 		sp->mbuf_lwm_thresh_hit = sp->rx_discards;
9414 	}
9415 	TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
9416 }
9417 
9418 static void tg3_chk_missed_msi(struct tg3 *tp)
9419 {
9420 	u32 i;
9421 
9422 	for (i = 0; i < tp->irq_cnt; i++) {
9423 		struct tg3_napi *tnapi = &tp->napi[i];
9424 
9425 		if (tg3_has_work(tnapi)) {
9426 			if (tnapi->last_rx_cons == tnapi->rx_rcb_ptr &&
9427 			    tnapi->last_tx_cons == tnapi->tx_cons) {
9428 				if (tnapi->chk_msi_cnt < 1) {
9429 					tnapi->chk_msi_cnt++;
9430 					return;
9431 				}
9432 				tg3_msi(0, tnapi);
9433 			}
9434 		}
9435 		tnapi->chk_msi_cnt = 0;
9436 		tnapi->last_rx_cons = tnapi->rx_rcb_ptr;
9437 		tnapi->last_tx_cons = tnapi->tx_cons;
9438 	}
9439 }
9440 
9441 static void tg3_timer(unsigned long __opaque)
9442 {
9443 	struct tg3 *tp = (struct tg3 *) __opaque;
9444 
9445 	if (tp->irq_sync || tg3_flag(tp, RESET_TASK_PENDING))
9446 		goto restart_timer;
9447 
9448 	spin_lock(&tp->lock);
9449 
9450 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
9451 	    tg3_flag(tp, 57765_CLASS))
9452 		tg3_chk_missed_msi(tp);
9453 
9454 	if (!tg3_flag(tp, TAGGED_STATUS)) {
9455 		/* All of this garbage is because when using non-tagged
9456 		 * IRQ status the mailbox/status_block protocol the chip
9457 		 * uses with the cpu is race prone.
9458 		 */
9459 		if (tp->napi[0].hw_status->status & SD_STATUS_UPDATED) {
9460 			tw32(GRC_LOCAL_CTRL,
9461 			     tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
9462 		} else {
9463 			tw32(HOSTCC_MODE, tp->coalesce_mode |
9464 			     HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW);
9465 		}
9466 
9467 		if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
9468 			spin_unlock(&tp->lock);
9469 			tg3_reset_task_schedule(tp);
9470 			goto restart_timer;
9471 		}
9472 	}
9473 
9474 	/* This part only runs once per second. */
9475 	if (!--tp->timer_counter) {
9476 		if (tg3_flag(tp, 5705_PLUS))
9477 			tg3_periodic_fetch_stats(tp);
9478 
9479 		if (tp->setlpicnt && !--tp->setlpicnt)
9480 			tg3_phy_eee_enable(tp);
9481 
9482 		if (tg3_flag(tp, USE_LINKCHG_REG)) {
9483 			u32 mac_stat;
9484 			int phy_event;
9485 
9486 			mac_stat = tr32(MAC_STATUS);
9487 
9488 			phy_event = 0;
9489 			if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) {
9490 				if (mac_stat & MAC_STATUS_MI_INTERRUPT)
9491 					phy_event = 1;
9492 			} else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
9493 				phy_event = 1;
9494 
9495 			if (phy_event)
9496 				tg3_setup_phy(tp, 0);
9497 		} else if (tg3_flag(tp, POLL_SERDES)) {
9498 			u32 mac_stat = tr32(MAC_STATUS);
9499 			int need_setup = 0;
9500 
9501 			if (netif_carrier_ok(tp->dev) &&
9502 			    (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
9503 				need_setup = 1;
9504 			}
9505 			if (!netif_carrier_ok(tp->dev) &&
9506 			    (mac_stat & (MAC_STATUS_PCS_SYNCED |
9507 					 MAC_STATUS_SIGNAL_DET))) {
9508 				need_setup = 1;
9509 			}
9510 			if (need_setup) {
9511 				if (!tp->serdes_counter) {
9512 					tw32_f(MAC_MODE,
9513 					     (tp->mac_mode &
9514 					      ~MAC_MODE_PORT_MODE_MASK));
9515 					udelay(40);
9516 					tw32_f(MAC_MODE, tp->mac_mode);
9517 					udelay(40);
9518 				}
9519 				tg3_setup_phy(tp, 0);
9520 			}
9521 		} else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
9522 			   tg3_flag(tp, 5780_CLASS)) {
9523 			tg3_serdes_parallel_detect(tp);
9524 		}
9525 
9526 		tp->timer_counter = tp->timer_multiplier;
9527 	}
9528 
9529 	/* Heartbeat is only sent once every 2 seconds.
9530 	 *
9531 	 * The heartbeat is to tell the ASF firmware that the host
9532 	 * driver is still alive.  In the event that the OS crashes,
9533 	 * ASF needs to reset the hardware to free up the FIFO space
9534 	 * that may be filled with rx packets destined for the host.
9535 	 * If the FIFO is full, ASF will no longer function properly.
9536 	 *
9537 	 * Unintended resets have been reported on real time kernels
9538 	 * where the timer doesn't run on time.  Netpoll will also have
9539 	 * same problem.
9540 	 *
9541 	 * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
9542 	 * to check the ring condition when the heartbeat is expiring
9543 	 * before doing the reset.  This will prevent most unintended
9544 	 * resets.
9545 	 */
9546 	if (!--tp->asf_counter) {
9547 		if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
9548 			tg3_wait_for_event_ack(tp);
9549 
9550 			tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
9551 				      FWCMD_NICDRV_ALIVE3);
9552 			tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
9553 			tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX,
9554 				      TG3_FW_UPDATE_TIMEOUT_SEC);
9555 
9556 			tg3_generate_fw_event(tp);
9557 		}
9558 		tp->asf_counter = tp->asf_multiplier;
9559 	}
9560 
9561 	spin_unlock(&tp->lock);
9562 
9563 restart_timer:
9564 	tp->timer.expires = jiffies + tp->timer_offset;
9565 	add_timer(&tp->timer);
9566 }
9567 
9568 static void __devinit tg3_timer_init(struct tg3 *tp)
9569 {
9570 	if (tg3_flag(tp, TAGGED_STATUS) &&
9571 	    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
9572 	    !tg3_flag(tp, 57765_CLASS))
9573 		tp->timer_offset = HZ;
9574 	else
9575 		tp->timer_offset = HZ / 10;
9576 
9577 	BUG_ON(tp->timer_offset > HZ);
9578 
9579 	tp->timer_multiplier = (HZ / tp->timer_offset);
9580 	tp->asf_multiplier = (HZ / tp->timer_offset) *
9581 			     TG3_FW_UPDATE_FREQ_SEC;
9582 
9583 	init_timer(&tp->timer);
9584 	tp->timer.data = (unsigned long) tp;
9585 	tp->timer.function = tg3_timer;
9586 }
9587 
9588 static void tg3_timer_start(struct tg3 *tp)
9589 {
9590 	tp->asf_counter   = tp->asf_multiplier;
9591 	tp->timer_counter = tp->timer_multiplier;
9592 
9593 	tp->timer.expires = jiffies + tp->timer_offset;
9594 	add_timer(&tp->timer);
9595 }
9596 
9597 static void tg3_timer_stop(struct tg3 *tp)
9598 {
9599 	del_timer_sync(&tp->timer);
9600 }
9601 
9602 /* Restart hardware after configuration changes, self-test, etc.
9603  * Invoked with tp->lock held.
9604  */
9605 static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
9606 	__releases(tp->lock)
9607 	__acquires(tp->lock)
9608 {
9609 	int err;
9610 
9611 	err = tg3_init_hw(tp, reset_phy);
9612 	if (err) {
9613 		netdev_err(tp->dev,
9614 			   "Failed to re-initialize device, aborting\n");
9615 		tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9616 		tg3_full_unlock(tp);
9617 		tg3_timer_stop(tp);
9618 		tp->irq_sync = 0;
9619 		tg3_napi_enable(tp);
9620 		dev_close(tp->dev);
9621 		tg3_full_lock(tp, 0);
9622 	}
9623 	return err;
9624 }
9625 
9626 static void tg3_reset_task(struct work_struct *work)
9627 {
9628 	struct tg3 *tp = container_of(work, struct tg3, reset_task);
9629 	int err;
9630 
9631 	tg3_full_lock(tp, 0);
9632 
9633 	if (!netif_running(tp->dev)) {
9634 		tg3_flag_clear(tp, RESET_TASK_PENDING);
9635 		tg3_full_unlock(tp);
9636 		return;
9637 	}
9638 
9639 	tg3_full_unlock(tp);
9640 
9641 	tg3_phy_stop(tp);
9642 
9643 	tg3_netif_stop(tp);
9644 
9645 	tg3_full_lock(tp, 1);
9646 
9647 	if (tg3_flag(tp, TX_RECOVERY_PENDING)) {
9648 		tp->write32_tx_mbox = tg3_write32_tx_mbox;
9649 		tp->write32_rx_mbox = tg3_write_flush_reg32;
9650 		tg3_flag_set(tp, MBOX_WRITE_REORDER);
9651 		tg3_flag_clear(tp, TX_RECOVERY_PENDING);
9652 	}
9653 
9654 	tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
9655 	err = tg3_init_hw(tp, 1);
9656 	if (err)
9657 		goto out;
9658 
9659 	tg3_netif_start(tp);
9660 
9661 out:
9662 	tg3_full_unlock(tp);
9663 
9664 	if (!err)
9665 		tg3_phy_start(tp);
9666 
9667 	tg3_flag_clear(tp, RESET_TASK_PENDING);
9668 }
9669 
9670 static int tg3_request_irq(struct tg3 *tp, int irq_num)
9671 {
9672 	irq_handler_t fn;
9673 	unsigned long flags;
9674 	char *name;
9675 	struct tg3_napi *tnapi = &tp->napi[irq_num];
9676 
9677 	if (tp->irq_cnt == 1)
9678 		name = tp->dev->name;
9679 	else {
9680 		name = &tnapi->irq_lbl[0];
9681 		snprintf(name, IFNAMSIZ, "%s-%d", tp->dev->name, irq_num);
9682 		name[IFNAMSIZ-1] = 0;
9683 	}
9684 
9685 	if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
9686 		fn = tg3_msi;
9687 		if (tg3_flag(tp, 1SHOT_MSI))
9688 			fn = tg3_msi_1shot;
9689 		flags = 0;
9690 	} else {
9691 		fn = tg3_interrupt;
9692 		if (tg3_flag(tp, TAGGED_STATUS))
9693 			fn = tg3_interrupt_tagged;
9694 		flags = IRQF_SHARED;
9695 	}
9696 
9697 	return request_irq(tnapi->irq_vec, fn, flags, name, tnapi);
9698 }
9699 
9700 static int tg3_test_interrupt(struct tg3 *tp)
9701 {
9702 	struct tg3_napi *tnapi = &tp->napi[0];
9703 	struct net_device *dev = tp->dev;
9704 	int err, i, intr_ok = 0;
9705 	u32 val;
9706 
9707 	if (!netif_running(dev))
9708 		return -ENODEV;
9709 
9710 	tg3_disable_ints(tp);
9711 
9712 	free_irq(tnapi->irq_vec, tnapi);
9713 
9714 	/*
9715 	 * Turn off MSI one shot mode.  Otherwise this test has no
9716 	 * observable way to know whether the interrupt was delivered.
9717 	 */
9718 	if (tg3_flag(tp, 57765_PLUS)) {
9719 		val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE;
9720 		tw32(MSGINT_MODE, val);
9721 	}
9722 
9723 	err = request_irq(tnapi->irq_vec, tg3_test_isr,
9724 			  IRQF_SHARED, dev->name, tnapi);
9725 	if (err)
9726 		return err;
9727 
9728 	tnapi->hw_status->status &= ~SD_STATUS_UPDATED;
9729 	tg3_enable_ints(tp);
9730 
9731 	tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
9732 	       tnapi->coal_now);
9733 
9734 	for (i = 0; i < 5; i++) {
9735 		u32 int_mbox, misc_host_ctrl;
9736 
9737 		int_mbox = tr32_mailbox(tnapi->int_mbox);
9738 		misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
9739 
9740 		if ((int_mbox != 0) ||
9741 		    (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
9742 			intr_ok = 1;
9743 			break;
9744 		}
9745 
9746 		if (tg3_flag(tp, 57765_PLUS) &&
9747 		    tnapi->hw_status->status_tag != tnapi->last_tag)
9748 			tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
9749 
9750 		msleep(10);
9751 	}
9752 
9753 	tg3_disable_ints(tp);
9754 
9755 	free_irq(tnapi->irq_vec, tnapi);
9756 
9757 	err = tg3_request_irq(tp, 0);
9758 
9759 	if (err)
9760 		return err;
9761 
9762 	if (intr_ok) {
9763 		/* Reenable MSI one shot mode. */
9764 		if (tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, 1SHOT_MSI)) {
9765 			val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE;
9766 			tw32(MSGINT_MODE, val);
9767 		}
9768 		return 0;
9769 	}
9770 
9771 	return -EIO;
9772 }
9773 
9774 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
9775  * successfully restored
9776  */
9777 static int tg3_test_msi(struct tg3 *tp)
9778 {
9779 	int err;
9780 	u16 pci_cmd;
9781 
9782 	if (!tg3_flag(tp, USING_MSI))
9783 		return 0;
9784 
9785 	/* Turn off SERR reporting in case MSI terminates with Master
9786 	 * Abort.
9787 	 */
9788 	pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
9789 	pci_write_config_word(tp->pdev, PCI_COMMAND,
9790 			      pci_cmd & ~PCI_COMMAND_SERR);
9791 
9792 	err = tg3_test_interrupt(tp);
9793 
9794 	pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
9795 
9796 	if (!err)
9797 		return 0;
9798 
9799 	/* other failures */
9800 	if (err != -EIO)
9801 		return err;
9802 
9803 	/* MSI test failed, go back to INTx mode */
9804 	netdev_warn(tp->dev, "No interrupt was generated using MSI. Switching "
9805 		    "to INTx mode. Please report this failure to the PCI "
9806 		    "maintainer and include system chipset information\n");
9807 
9808 	free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
9809 
9810 	pci_disable_msi(tp->pdev);
9811 
9812 	tg3_flag_clear(tp, USING_MSI);
9813 	tp->napi[0].irq_vec = tp->pdev->irq;
9814 
9815 	err = tg3_request_irq(tp, 0);
9816 	if (err)
9817 		return err;
9818 
9819 	/* Need to reset the chip because the MSI cycle may have terminated
9820 	 * with Master Abort.
9821 	 */
9822 	tg3_full_lock(tp, 1);
9823 
9824 	tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9825 	err = tg3_init_hw(tp, 1);
9826 
9827 	tg3_full_unlock(tp);
9828 
9829 	if (err)
9830 		free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
9831 
9832 	return err;
9833 }
9834 
9835 static int tg3_request_firmware(struct tg3 *tp)
9836 {
9837 	const __be32 *fw_data;
9838 
9839 	if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) {
9840 		netdev_err(tp->dev, "Failed to load firmware \"%s\"\n",
9841 			   tp->fw_needed);
9842 		return -ENOENT;
9843 	}
9844 
9845 	fw_data = (void *)tp->fw->data;
9846 
9847 	/* Firmware blob starts with version numbers, followed by
9848 	 * start address and _full_ length including BSS sections
9849 	 * (which must be longer than the actual data, of course
9850 	 */
9851 
9852 	tp->fw_len = be32_to_cpu(fw_data[2]);	/* includes bss */
9853 	if (tp->fw_len < (tp->fw->size - 12)) {
9854 		netdev_err(tp->dev, "bogus length %d in \"%s\"\n",
9855 			   tp->fw_len, tp->fw_needed);
9856 		release_firmware(tp->fw);
9857 		tp->fw = NULL;
9858 		return -EINVAL;
9859 	}
9860 
9861 	/* We no longer need firmware; we have it. */
9862 	tp->fw_needed = NULL;
9863 	return 0;
9864 }
9865 
9866 static bool tg3_enable_msix(struct tg3 *tp)
9867 {
9868 	int i, rc;
9869 	struct msix_entry msix_ent[tp->irq_max];
9870 
9871 	tp->irq_cnt = num_online_cpus();
9872 	if (tp->irq_cnt > 1) {
9873 		/* We want as many rx rings enabled as there are cpus.
9874 		 * In multiqueue MSI-X mode, the first MSI-X vector
9875 		 * only deals with link interrupts, etc, so we add
9876 		 * one to the number of vectors we are requesting.
9877 		 */
9878 		tp->irq_cnt = min_t(unsigned, tp->irq_cnt + 1, tp->irq_max);
9879 	}
9880 
9881 	for (i = 0; i < tp->irq_max; i++) {
9882 		msix_ent[i].entry  = i;
9883 		msix_ent[i].vector = 0;
9884 	}
9885 
9886 	rc = pci_enable_msix(tp->pdev, msix_ent, tp->irq_cnt);
9887 	if (rc < 0) {
9888 		return false;
9889 	} else if (rc != 0) {
9890 		if (pci_enable_msix(tp->pdev, msix_ent, rc))
9891 			return false;
9892 		netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n",
9893 			      tp->irq_cnt, rc);
9894 		tp->irq_cnt = rc;
9895 	}
9896 
9897 	for (i = 0; i < tp->irq_max; i++)
9898 		tp->napi[i].irq_vec = msix_ent[i].vector;
9899 
9900 	netif_set_real_num_tx_queues(tp->dev, 1);
9901 	rc = tp->irq_cnt > 1 ? tp->irq_cnt - 1 : 1;
9902 	if (netif_set_real_num_rx_queues(tp->dev, rc)) {
9903 		pci_disable_msix(tp->pdev);
9904 		return false;
9905 	}
9906 
9907 	if (tp->irq_cnt > 1) {
9908 		tg3_flag_set(tp, ENABLE_RSS);
9909 
9910 		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
9911 		    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
9912 			tg3_flag_set(tp, ENABLE_TSS);
9913 			netif_set_real_num_tx_queues(tp->dev, tp->irq_cnt - 1);
9914 		}
9915 	}
9916 
9917 	return true;
9918 }
9919 
9920 static void tg3_ints_init(struct tg3 *tp)
9921 {
9922 	if ((tg3_flag(tp, SUPPORT_MSI) || tg3_flag(tp, SUPPORT_MSIX)) &&
9923 	    !tg3_flag(tp, TAGGED_STATUS)) {
9924 		/* All MSI supporting chips should support tagged
9925 		 * status.  Assert that this is the case.
9926 		 */
9927 		netdev_warn(tp->dev,
9928 			    "MSI without TAGGED_STATUS? Not using MSI\n");
9929 		goto defcfg;
9930 	}
9931 
9932 	if (tg3_flag(tp, SUPPORT_MSIX) && tg3_enable_msix(tp))
9933 		tg3_flag_set(tp, USING_MSIX);
9934 	else if (tg3_flag(tp, SUPPORT_MSI) && pci_enable_msi(tp->pdev) == 0)
9935 		tg3_flag_set(tp, USING_MSI);
9936 
9937 	if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
9938 		u32 msi_mode = tr32(MSGINT_MODE);
9939 		if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1)
9940 			msi_mode |= MSGINT_MODE_MULTIVEC_EN;
9941 		if (!tg3_flag(tp, 1SHOT_MSI))
9942 			msi_mode |= MSGINT_MODE_ONE_SHOT_DISABLE;
9943 		tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
9944 	}
9945 defcfg:
9946 	if (!tg3_flag(tp, USING_MSIX)) {
9947 		tp->irq_cnt = 1;
9948 		tp->napi[0].irq_vec = tp->pdev->irq;
9949 		netif_set_real_num_tx_queues(tp->dev, 1);
9950 		netif_set_real_num_rx_queues(tp->dev, 1);
9951 	}
9952 }
9953 
9954 static void tg3_ints_fini(struct tg3 *tp)
9955 {
9956 	if (tg3_flag(tp, USING_MSIX))
9957 		pci_disable_msix(tp->pdev);
9958 	else if (tg3_flag(tp, USING_MSI))
9959 		pci_disable_msi(tp->pdev);
9960 	tg3_flag_clear(tp, USING_MSI);
9961 	tg3_flag_clear(tp, USING_MSIX);
9962 	tg3_flag_clear(tp, ENABLE_RSS);
9963 	tg3_flag_clear(tp, ENABLE_TSS);
9964 }
9965 
9966 static int tg3_open(struct net_device *dev)
9967 {
9968 	struct tg3 *tp = netdev_priv(dev);
9969 	int i, err;
9970 
9971 	if (tp->fw_needed) {
9972 		err = tg3_request_firmware(tp);
9973 		if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
9974 			if (err)
9975 				return err;
9976 		} else if (err) {
9977 			netdev_warn(tp->dev, "TSO capability disabled\n");
9978 			tg3_flag_clear(tp, TSO_CAPABLE);
9979 		} else if (!tg3_flag(tp, TSO_CAPABLE)) {
9980 			netdev_notice(tp->dev, "TSO capability restored\n");
9981 			tg3_flag_set(tp, TSO_CAPABLE);
9982 		}
9983 	}
9984 
9985 	netif_carrier_off(tp->dev);
9986 
9987 	err = tg3_power_up(tp);
9988 	if (err)
9989 		return err;
9990 
9991 	tg3_full_lock(tp, 0);
9992 
9993 	tg3_disable_ints(tp);
9994 	tg3_flag_clear(tp, INIT_COMPLETE);
9995 
9996 	tg3_full_unlock(tp);
9997 
9998 	/*
9999 	 * Setup interrupts first so we know how
10000 	 * many NAPI resources to allocate
10001 	 */
10002 	tg3_ints_init(tp);
10003 
10004 	tg3_rss_check_indir_tbl(tp);
10005 
10006 	/* The placement of this call is tied
10007 	 * to the setup and use of Host TX descriptors.
10008 	 */
10009 	err = tg3_alloc_consistent(tp);
10010 	if (err)
10011 		goto err_out1;
10012 
10013 	tg3_napi_init(tp);
10014 
10015 	tg3_napi_enable(tp);
10016 
10017 	for (i = 0; i < tp->irq_cnt; i++) {
10018 		struct tg3_napi *tnapi = &tp->napi[i];
10019 		err = tg3_request_irq(tp, i);
10020 		if (err) {
10021 			for (i--; i >= 0; i--) {
10022 				tnapi = &tp->napi[i];
10023 				free_irq(tnapi->irq_vec, tnapi);
10024 			}
10025 			goto err_out2;
10026 		}
10027 	}
10028 
10029 	tg3_full_lock(tp, 0);
10030 
10031 	err = tg3_init_hw(tp, 1);
10032 	if (err) {
10033 		tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10034 		tg3_free_rings(tp);
10035 	}
10036 
10037 	tg3_full_unlock(tp);
10038 
10039 	if (err)
10040 		goto err_out3;
10041 
10042 	if (tg3_flag(tp, USING_MSI)) {
10043 		err = tg3_test_msi(tp);
10044 
10045 		if (err) {
10046 			tg3_full_lock(tp, 0);
10047 			tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10048 			tg3_free_rings(tp);
10049 			tg3_full_unlock(tp);
10050 
10051 			goto err_out2;
10052 		}
10053 
10054 		if (!tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
10055 			u32 val = tr32(PCIE_TRANSACTION_CFG);
10056 
10057 			tw32(PCIE_TRANSACTION_CFG,
10058 			     val | PCIE_TRANS_CFG_1SHOT_MSI);
10059 		}
10060 	}
10061 
10062 	tg3_phy_start(tp);
10063 
10064 	tg3_full_lock(tp, 0);
10065 
10066 	tg3_timer_start(tp);
10067 	tg3_flag_set(tp, INIT_COMPLETE);
10068 	tg3_enable_ints(tp);
10069 
10070 	tg3_full_unlock(tp);
10071 
10072 	netif_tx_start_all_queues(dev);
10073 
10074 	/*
10075 	 * Reset loopback feature if it was turned on while the device was down
10076 	 * make sure that it's installed properly now.
10077 	 */
10078 	if (dev->features & NETIF_F_LOOPBACK)
10079 		tg3_set_loopback(dev, dev->features);
10080 
10081 	return 0;
10082 
10083 err_out3:
10084 	for (i = tp->irq_cnt - 1; i >= 0; i--) {
10085 		struct tg3_napi *tnapi = &tp->napi[i];
10086 		free_irq(tnapi->irq_vec, tnapi);
10087 	}
10088 
10089 err_out2:
10090 	tg3_napi_disable(tp);
10091 	tg3_napi_fini(tp);
10092 	tg3_free_consistent(tp);
10093 
10094 err_out1:
10095 	tg3_ints_fini(tp);
10096 	tg3_frob_aux_power(tp, false);
10097 	pci_set_power_state(tp->pdev, PCI_D3hot);
10098 	return err;
10099 }
10100 
10101 static int tg3_close(struct net_device *dev)
10102 {
10103 	int i;
10104 	struct tg3 *tp = netdev_priv(dev);
10105 
10106 	tg3_napi_disable(tp);
10107 	tg3_reset_task_cancel(tp);
10108 
10109 	netif_tx_stop_all_queues(dev);
10110 
10111 	tg3_timer_stop(tp);
10112 
10113 	tg3_phy_stop(tp);
10114 
10115 	tg3_full_lock(tp, 1);
10116 
10117 	tg3_disable_ints(tp);
10118 
10119 	tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10120 	tg3_free_rings(tp);
10121 	tg3_flag_clear(tp, INIT_COMPLETE);
10122 
10123 	tg3_full_unlock(tp);
10124 
10125 	for (i = tp->irq_cnt - 1; i >= 0; i--) {
10126 		struct tg3_napi *tnapi = &tp->napi[i];
10127 		free_irq(tnapi->irq_vec, tnapi);
10128 	}
10129 
10130 	tg3_ints_fini(tp);
10131 
10132 	/* Clear stats across close / open calls */
10133 	memset(&tp->net_stats_prev, 0, sizeof(tp->net_stats_prev));
10134 	memset(&tp->estats_prev, 0, sizeof(tp->estats_prev));
10135 
10136 	tg3_napi_fini(tp);
10137 
10138 	tg3_free_consistent(tp);
10139 
10140 	tg3_power_down(tp);
10141 
10142 	netif_carrier_off(tp->dev);
10143 
10144 	return 0;
10145 }
10146 
10147 static inline u64 get_stat64(tg3_stat64_t *val)
10148 {
10149        return ((u64)val->high << 32) | ((u64)val->low);
10150 }
10151 
10152 static u64 tg3_calc_crc_errors(struct tg3 *tp)
10153 {
10154 	struct tg3_hw_stats *hw_stats = tp->hw_stats;
10155 
10156 	if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
10157 	    (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
10158 	     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
10159 		u32 val;
10160 
10161 		if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
10162 			tg3_writephy(tp, MII_TG3_TEST1,
10163 				     val | MII_TG3_TEST1_CRC_EN);
10164 			tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &val);
10165 		} else
10166 			val = 0;
10167 
10168 		tp->phy_crc_errors += val;
10169 
10170 		return tp->phy_crc_errors;
10171 	}
10172 
10173 	return get_stat64(&hw_stats->rx_fcs_errors);
10174 }
10175 
10176 #define ESTAT_ADD(member) \
10177 	estats->member =	old_estats->member + \
10178 				get_stat64(&hw_stats->member)
10179 
10180 static void tg3_get_estats(struct tg3 *tp, struct tg3_ethtool_stats *estats)
10181 {
10182 	struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
10183 	struct tg3_hw_stats *hw_stats = tp->hw_stats;
10184 
10185 	ESTAT_ADD(rx_octets);
10186 	ESTAT_ADD(rx_fragments);
10187 	ESTAT_ADD(rx_ucast_packets);
10188 	ESTAT_ADD(rx_mcast_packets);
10189 	ESTAT_ADD(rx_bcast_packets);
10190 	ESTAT_ADD(rx_fcs_errors);
10191 	ESTAT_ADD(rx_align_errors);
10192 	ESTAT_ADD(rx_xon_pause_rcvd);
10193 	ESTAT_ADD(rx_xoff_pause_rcvd);
10194 	ESTAT_ADD(rx_mac_ctrl_rcvd);
10195 	ESTAT_ADD(rx_xoff_entered);
10196 	ESTAT_ADD(rx_frame_too_long_errors);
10197 	ESTAT_ADD(rx_jabbers);
10198 	ESTAT_ADD(rx_undersize_packets);
10199 	ESTAT_ADD(rx_in_length_errors);
10200 	ESTAT_ADD(rx_out_length_errors);
10201 	ESTAT_ADD(rx_64_or_less_octet_packets);
10202 	ESTAT_ADD(rx_65_to_127_octet_packets);
10203 	ESTAT_ADD(rx_128_to_255_octet_packets);
10204 	ESTAT_ADD(rx_256_to_511_octet_packets);
10205 	ESTAT_ADD(rx_512_to_1023_octet_packets);
10206 	ESTAT_ADD(rx_1024_to_1522_octet_packets);
10207 	ESTAT_ADD(rx_1523_to_2047_octet_packets);
10208 	ESTAT_ADD(rx_2048_to_4095_octet_packets);
10209 	ESTAT_ADD(rx_4096_to_8191_octet_packets);
10210 	ESTAT_ADD(rx_8192_to_9022_octet_packets);
10211 
10212 	ESTAT_ADD(tx_octets);
10213 	ESTAT_ADD(tx_collisions);
10214 	ESTAT_ADD(tx_xon_sent);
10215 	ESTAT_ADD(tx_xoff_sent);
10216 	ESTAT_ADD(tx_flow_control);
10217 	ESTAT_ADD(tx_mac_errors);
10218 	ESTAT_ADD(tx_single_collisions);
10219 	ESTAT_ADD(tx_mult_collisions);
10220 	ESTAT_ADD(tx_deferred);
10221 	ESTAT_ADD(tx_excessive_collisions);
10222 	ESTAT_ADD(tx_late_collisions);
10223 	ESTAT_ADD(tx_collide_2times);
10224 	ESTAT_ADD(tx_collide_3times);
10225 	ESTAT_ADD(tx_collide_4times);
10226 	ESTAT_ADD(tx_collide_5times);
10227 	ESTAT_ADD(tx_collide_6times);
10228 	ESTAT_ADD(tx_collide_7times);
10229 	ESTAT_ADD(tx_collide_8times);
10230 	ESTAT_ADD(tx_collide_9times);
10231 	ESTAT_ADD(tx_collide_10times);
10232 	ESTAT_ADD(tx_collide_11times);
10233 	ESTAT_ADD(tx_collide_12times);
10234 	ESTAT_ADD(tx_collide_13times);
10235 	ESTAT_ADD(tx_collide_14times);
10236 	ESTAT_ADD(tx_collide_15times);
10237 	ESTAT_ADD(tx_ucast_packets);
10238 	ESTAT_ADD(tx_mcast_packets);
10239 	ESTAT_ADD(tx_bcast_packets);
10240 	ESTAT_ADD(tx_carrier_sense_errors);
10241 	ESTAT_ADD(tx_discards);
10242 	ESTAT_ADD(tx_errors);
10243 
10244 	ESTAT_ADD(dma_writeq_full);
10245 	ESTAT_ADD(dma_write_prioq_full);
10246 	ESTAT_ADD(rxbds_empty);
10247 	ESTAT_ADD(rx_discards);
10248 	ESTAT_ADD(rx_errors);
10249 	ESTAT_ADD(rx_threshold_hit);
10250 
10251 	ESTAT_ADD(dma_readq_full);
10252 	ESTAT_ADD(dma_read_prioq_full);
10253 	ESTAT_ADD(tx_comp_queue_full);
10254 
10255 	ESTAT_ADD(ring_set_send_prod_index);
10256 	ESTAT_ADD(ring_status_update);
10257 	ESTAT_ADD(nic_irqs);
10258 	ESTAT_ADD(nic_avoided_irqs);
10259 	ESTAT_ADD(nic_tx_threshold_hit);
10260 
10261 	ESTAT_ADD(mbuf_lwm_thresh_hit);
10262 }
10263 
10264 static void tg3_get_nstats(struct tg3 *tp, struct rtnl_link_stats64 *stats)
10265 {
10266 	struct rtnl_link_stats64 *old_stats = &tp->net_stats_prev;
10267 	struct tg3_hw_stats *hw_stats = tp->hw_stats;
10268 
10269 	stats->rx_packets = old_stats->rx_packets +
10270 		get_stat64(&hw_stats->rx_ucast_packets) +
10271 		get_stat64(&hw_stats->rx_mcast_packets) +
10272 		get_stat64(&hw_stats->rx_bcast_packets);
10273 
10274 	stats->tx_packets = old_stats->tx_packets +
10275 		get_stat64(&hw_stats->tx_ucast_packets) +
10276 		get_stat64(&hw_stats->tx_mcast_packets) +
10277 		get_stat64(&hw_stats->tx_bcast_packets);
10278 
10279 	stats->rx_bytes = old_stats->rx_bytes +
10280 		get_stat64(&hw_stats->rx_octets);
10281 	stats->tx_bytes = old_stats->tx_bytes +
10282 		get_stat64(&hw_stats->tx_octets);
10283 
10284 	stats->rx_errors = old_stats->rx_errors +
10285 		get_stat64(&hw_stats->rx_errors);
10286 	stats->tx_errors = old_stats->tx_errors +
10287 		get_stat64(&hw_stats->tx_errors) +
10288 		get_stat64(&hw_stats->tx_mac_errors) +
10289 		get_stat64(&hw_stats->tx_carrier_sense_errors) +
10290 		get_stat64(&hw_stats->tx_discards);
10291 
10292 	stats->multicast = old_stats->multicast +
10293 		get_stat64(&hw_stats->rx_mcast_packets);
10294 	stats->collisions = old_stats->collisions +
10295 		get_stat64(&hw_stats->tx_collisions);
10296 
10297 	stats->rx_length_errors = old_stats->rx_length_errors +
10298 		get_stat64(&hw_stats->rx_frame_too_long_errors) +
10299 		get_stat64(&hw_stats->rx_undersize_packets);
10300 
10301 	stats->rx_over_errors = old_stats->rx_over_errors +
10302 		get_stat64(&hw_stats->rxbds_empty);
10303 	stats->rx_frame_errors = old_stats->rx_frame_errors +
10304 		get_stat64(&hw_stats->rx_align_errors);
10305 	stats->tx_aborted_errors = old_stats->tx_aborted_errors +
10306 		get_stat64(&hw_stats->tx_discards);
10307 	stats->tx_carrier_errors = old_stats->tx_carrier_errors +
10308 		get_stat64(&hw_stats->tx_carrier_sense_errors);
10309 
10310 	stats->rx_crc_errors = old_stats->rx_crc_errors +
10311 		tg3_calc_crc_errors(tp);
10312 
10313 	stats->rx_missed_errors = old_stats->rx_missed_errors +
10314 		get_stat64(&hw_stats->rx_discards);
10315 
10316 	stats->rx_dropped = tp->rx_dropped;
10317 	stats->tx_dropped = tp->tx_dropped;
10318 }
10319 
10320 static int tg3_get_regs_len(struct net_device *dev)
10321 {
10322 	return TG3_REG_BLK_SIZE;
10323 }
10324 
10325 static void tg3_get_regs(struct net_device *dev,
10326 		struct ethtool_regs *regs, void *_p)
10327 {
10328 	struct tg3 *tp = netdev_priv(dev);
10329 
10330 	regs->version = 0;
10331 
10332 	memset(_p, 0, TG3_REG_BLK_SIZE);
10333 
10334 	if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10335 		return;
10336 
10337 	tg3_full_lock(tp, 0);
10338 
10339 	tg3_dump_legacy_regs(tp, (u32 *)_p);
10340 
10341 	tg3_full_unlock(tp);
10342 }
10343 
10344 static int tg3_get_eeprom_len(struct net_device *dev)
10345 {
10346 	struct tg3 *tp = netdev_priv(dev);
10347 
10348 	return tp->nvram_size;
10349 }
10350 
10351 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
10352 {
10353 	struct tg3 *tp = netdev_priv(dev);
10354 	int ret;
10355 	u8  *pd;
10356 	u32 i, offset, len, b_offset, b_count;
10357 	__be32 val;
10358 
10359 	if (tg3_flag(tp, NO_NVRAM))
10360 		return -EINVAL;
10361 
10362 	if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10363 		return -EAGAIN;
10364 
10365 	offset = eeprom->offset;
10366 	len = eeprom->len;
10367 	eeprom->len = 0;
10368 
10369 	eeprom->magic = TG3_EEPROM_MAGIC;
10370 
10371 	if (offset & 3) {
10372 		/* adjustments to start on required 4 byte boundary */
10373 		b_offset = offset & 3;
10374 		b_count = 4 - b_offset;
10375 		if (b_count > len) {
10376 			/* i.e. offset=1 len=2 */
10377 			b_count = len;
10378 		}
10379 		ret = tg3_nvram_read_be32(tp, offset-b_offset, &val);
10380 		if (ret)
10381 			return ret;
10382 		memcpy(data, ((char *)&val) + b_offset, b_count);
10383 		len -= b_count;
10384 		offset += b_count;
10385 		eeprom->len += b_count;
10386 	}
10387 
10388 	/* read bytes up to the last 4 byte boundary */
10389 	pd = &data[eeprom->len];
10390 	for (i = 0; i < (len - (len & 3)); i += 4) {
10391 		ret = tg3_nvram_read_be32(tp, offset + i, &val);
10392 		if (ret) {
10393 			eeprom->len += i;
10394 			return ret;
10395 		}
10396 		memcpy(pd + i, &val, 4);
10397 	}
10398 	eeprom->len += i;
10399 
10400 	if (len & 3) {
10401 		/* read last bytes not ending on 4 byte boundary */
10402 		pd = &data[eeprom->len];
10403 		b_count = len & 3;
10404 		b_offset = offset + len - b_count;
10405 		ret = tg3_nvram_read_be32(tp, b_offset, &val);
10406 		if (ret)
10407 			return ret;
10408 		memcpy(pd, &val, b_count);
10409 		eeprom->len += b_count;
10410 	}
10411 	return 0;
10412 }
10413 
10414 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
10415 {
10416 	struct tg3 *tp = netdev_priv(dev);
10417 	int ret;
10418 	u32 offset, len, b_offset, odd_len;
10419 	u8 *buf;
10420 	__be32 start, end;
10421 
10422 	if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10423 		return -EAGAIN;
10424 
10425 	if (tg3_flag(tp, NO_NVRAM) ||
10426 	    eeprom->magic != TG3_EEPROM_MAGIC)
10427 		return -EINVAL;
10428 
10429 	offset = eeprom->offset;
10430 	len = eeprom->len;
10431 
10432 	if ((b_offset = (offset & 3))) {
10433 		/* adjustments to start on required 4 byte boundary */
10434 		ret = tg3_nvram_read_be32(tp, offset-b_offset, &start);
10435 		if (ret)
10436 			return ret;
10437 		len += b_offset;
10438 		offset &= ~3;
10439 		if (len < 4)
10440 			len = 4;
10441 	}
10442 
10443 	odd_len = 0;
10444 	if (len & 3) {
10445 		/* adjustments to end on required 4 byte boundary */
10446 		odd_len = 1;
10447 		len = (len + 3) & ~3;
10448 		ret = tg3_nvram_read_be32(tp, offset+len-4, &end);
10449 		if (ret)
10450 			return ret;
10451 	}
10452 
10453 	buf = data;
10454 	if (b_offset || odd_len) {
10455 		buf = kmalloc(len, GFP_KERNEL);
10456 		if (!buf)
10457 			return -ENOMEM;
10458 		if (b_offset)
10459 			memcpy(buf, &start, 4);
10460 		if (odd_len)
10461 			memcpy(buf+len-4, &end, 4);
10462 		memcpy(buf + b_offset, data, eeprom->len);
10463 	}
10464 
10465 	ret = tg3_nvram_write_block(tp, offset, len, buf);
10466 
10467 	if (buf != data)
10468 		kfree(buf);
10469 
10470 	return ret;
10471 }
10472 
10473 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
10474 {
10475 	struct tg3 *tp = netdev_priv(dev);
10476 
10477 	if (tg3_flag(tp, USE_PHYLIB)) {
10478 		struct phy_device *phydev;
10479 		if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
10480 			return -EAGAIN;
10481 		phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
10482 		return phy_ethtool_gset(phydev, cmd);
10483 	}
10484 
10485 	cmd->supported = (SUPPORTED_Autoneg);
10486 
10487 	if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
10488 		cmd->supported |= (SUPPORTED_1000baseT_Half |
10489 				   SUPPORTED_1000baseT_Full);
10490 
10491 	if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
10492 		cmd->supported |= (SUPPORTED_100baseT_Half |
10493 				  SUPPORTED_100baseT_Full |
10494 				  SUPPORTED_10baseT_Half |
10495 				  SUPPORTED_10baseT_Full |
10496 				  SUPPORTED_TP);
10497 		cmd->port = PORT_TP;
10498 	} else {
10499 		cmd->supported |= SUPPORTED_FIBRE;
10500 		cmd->port = PORT_FIBRE;
10501 	}
10502 
10503 	cmd->advertising = tp->link_config.advertising;
10504 	if (tg3_flag(tp, PAUSE_AUTONEG)) {
10505 		if (tp->link_config.flowctrl & FLOW_CTRL_RX) {
10506 			if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
10507 				cmd->advertising |= ADVERTISED_Pause;
10508 			} else {
10509 				cmd->advertising |= ADVERTISED_Pause |
10510 						    ADVERTISED_Asym_Pause;
10511 			}
10512 		} else if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
10513 			cmd->advertising |= ADVERTISED_Asym_Pause;
10514 		}
10515 	}
10516 	if (netif_running(dev) && netif_carrier_ok(dev)) {
10517 		ethtool_cmd_speed_set(cmd, tp->link_config.active_speed);
10518 		cmd->duplex = tp->link_config.active_duplex;
10519 		cmd->lp_advertising = tp->link_config.rmt_adv;
10520 		if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
10521 			if (tp->phy_flags & TG3_PHYFLG_MDIX_STATE)
10522 				cmd->eth_tp_mdix = ETH_TP_MDI_X;
10523 			else
10524 				cmd->eth_tp_mdix = ETH_TP_MDI;
10525 		}
10526 	} else {
10527 		ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN);
10528 		cmd->duplex = DUPLEX_UNKNOWN;
10529 		cmd->eth_tp_mdix = ETH_TP_MDI_INVALID;
10530 	}
10531 	cmd->phy_address = tp->phy_addr;
10532 	cmd->transceiver = XCVR_INTERNAL;
10533 	cmd->autoneg = tp->link_config.autoneg;
10534 	cmd->maxtxpkt = 0;
10535 	cmd->maxrxpkt = 0;
10536 	return 0;
10537 }
10538 
10539 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
10540 {
10541 	struct tg3 *tp = netdev_priv(dev);
10542 	u32 speed = ethtool_cmd_speed(cmd);
10543 
10544 	if (tg3_flag(tp, USE_PHYLIB)) {
10545 		struct phy_device *phydev;
10546 		if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
10547 			return -EAGAIN;
10548 		phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
10549 		return phy_ethtool_sset(phydev, cmd);
10550 	}
10551 
10552 	if (cmd->autoneg != AUTONEG_ENABLE &&
10553 	    cmd->autoneg != AUTONEG_DISABLE)
10554 		return -EINVAL;
10555 
10556 	if (cmd->autoneg == AUTONEG_DISABLE &&
10557 	    cmd->duplex != DUPLEX_FULL &&
10558 	    cmd->duplex != DUPLEX_HALF)
10559 		return -EINVAL;
10560 
10561 	if (cmd->autoneg == AUTONEG_ENABLE) {
10562 		u32 mask = ADVERTISED_Autoneg |
10563 			   ADVERTISED_Pause |
10564 			   ADVERTISED_Asym_Pause;
10565 
10566 		if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
10567 			mask |= ADVERTISED_1000baseT_Half |
10568 				ADVERTISED_1000baseT_Full;
10569 
10570 		if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
10571 			mask |= ADVERTISED_100baseT_Half |
10572 				ADVERTISED_100baseT_Full |
10573 				ADVERTISED_10baseT_Half |
10574 				ADVERTISED_10baseT_Full |
10575 				ADVERTISED_TP;
10576 		else
10577 			mask |= ADVERTISED_FIBRE;
10578 
10579 		if (cmd->advertising & ~mask)
10580 			return -EINVAL;
10581 
10582 		mask &= (ADVERTISED_1000baseT_Half |
10583 			 ADVERTISED_1000baseT_Full |
10584 			 ADVERTISED_100baseT_Half |
10585 			 ADVERTISED_100baseT_Full |
10586 			 ADVERTISED_10baseT_Half |
10587 			 ADVERTISED_10baseT_Full);
10588 
10589 		cmd->advertising &= mask;
10590 	} else {
10591 		if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) {
10592 			if (speed != SPEED_1000)
10593 				return -EINVAL;
10594 
10595 			if (cmd->duplex != DUPLEX_FULL)
10596 				return -EINVAL;
10597 		} else {
10598 			if (speed != SPEED_100 &&
10599 			    speed != SPEED_10)
10600 				return -EINVAL;
10601 		}
10602 	}
10603 
10604 	tg3_full_lock(tp, 0);
10605 
10606 	tp->link_config.autoneg = cmd->autoneg;
10607 	if (cmd->autoneg == AUTONEG_ENABLE) {
10608 		tp->link_config.advertising = (cmd->advertising |
10609 					      ADVERTISED_Autoneg);
10610 		tp->link_config.speed = SPEED_UNKNOWN;
10611 		tp->link_config.duplex = DUPLEX_UNKNOWN;
10612 	} else {
10613 		tp->link_config.advertising = 0;
10614 		tp->link_config.speed = speed;
10615 		tp->link_config.duplex = cmd->duplex;
10616 	}
10617 
10618 	if (netif_running(dev))
10619 		tg3_setup_phy(tp, 1);
10620 
10621 	tg3_full_unlock(tp);
10622 
10623 	return 0;
10624 }
10625 
10626 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
10627 {
10628 	struct tg3 *tp = netdev_priv(dev);
10629 
10630 	strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
10631 	strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
10632 	strlcpy(info->fw_version, tp->fw_ver, sizeof(info->fw_version));
10633 	strlcpy(info->bus_info, pci_name(tp->pdev), sizeof(info->bus_info));
10634 }
10635 
10636 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
10637 {
10638 	struct tg3 *tp = netdev_priv(dev);
10639 
10640 	if (tg3_flag(tp, WOL_CAP) && device_can_wakeup(&tp->pdev->dev))
10641 		wol->supported = WAKE_MAGIC;
10642 	else
10643 		wol->supported = 0;
10644 	wol->wolopts = 0;
10645 	if (tg3_flag(tp, WOL_ENABLE) && device_can_wakeup(&tp->pdev->dev))
10646 		wol->wolopts = WAKE_MAGIC;
10647 	memset(&wol->sopass, 0, sizeof(wol->sopass));
10648 }
10649 
10650 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
10651 {
10652 	struct tg3 *tp = netdev_priv(dev);
10653 	struct device *dp = &tp->pdev->dev;
10654 
10655 	if (wol->wolopts & ~WAKE_MAGIC)
10656 		return -EINVAL;
10657 	if ((wol->wolopts & WAKE_MAGIC) &&
10658 	    !(tg3_flag(tp, WOL_CAP) && device_can_wakeup(dp)))
10659 		return -EINVAL;
10660 
10661 	device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC);
10662 
10663 	spin_lock_bh(&tp->lock);
10664 	if (device_may_wakeup(dp))
10665 		tg3_flag_set(tp, WOL_ENABLE);
10666 	else
10667 		tg3_flag_clear(tp, WOL_ENABLE);
10668 	spin_unlock_bh(&tp->lock);
10669 
10670 	return 0;
10671 }
10672 
10673 static u32 tg3_get_msglevel(struct net_device *dev)
10674 {
10675 	struct tg3 *tp = netdev_priv(dev);
10676 	return tp->msg_enable;
10677 }
10678 
10679 static void tg3_set_msglevel(struct net_device *dev, u32 value)
10680 {
10681 	struct tg3 *tp = netdev_priv(dev);
10682 	tp->msg_enable = value;
10683 }
10684 
10685 static int tg3_nway_reset(struct net_device *dev)
10686 {
10687 	struct tg3 *tp = netdev_priv(dev);
10688 	int r;
10689 
10690 	if (!netif_running(dev))
10691 		return -EAGAIN;
10692 
10693 	if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
10694 		return -EINVAL;
10695 
10696 	if (tg3_flag(tp, USE_PHYLIB)) {
10697 		if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
10698 			return -EAGAIN;
10699 		r = phy_start_aneg(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
10700 	} else {
10701 		u32 bmcr;
10702 
10703 		spin_lock_bh(&tp->lock);
10704 		r = -EINVAL;
10705 		tg3_readphy(tp, MII_BMCR, &bmcr);
10706 		if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
10707 		    ((bmcr & BMCR_ANENABLE) ||
10708 		     (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT))) {
10709 			tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
10710 						   BMCR_ANENABLE);
10711 			r = 0;
10712 		}
10713 		spin_unlock_bh(&tp->lock);
10714 	}
10715 
10716 	return r;
10717 }
10718 
10719 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
10720 {
10721 	struct tg3 *tp = netdev_priv(dev);
10722 
10723 	ering->rx_max_pending = tp->rx_std_ring_mask;
10724 	if (tg3_flag(tp, JUMBO_RING_ENABLE))
10725 		ering->rx_jumbo_max_pending = tp->rx_jmb_ring_mask;
10726 	else
10727 		ering->rx_jumbo_max_pending = 0;
10728 
10729 	ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
10730 
10731 	ering->rx_pending = tp->rx_pending;
10732 	if (tg3_flag(tp, JUMBO_RING_ENABLE))
10733 		ering->rx_jumbo_pending = tp->rx_jumbo_pending;
10734 	else
10735 		ering->rx_jumbo_pending = 0;
10736 
10737 	ering->tx_pending = tp->napi[0].tx_pending;
10738 }
10739 
10740 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
10741 {
10742 	struct tg3 *tp = netdev_priv(dev);
10743 	int i, irq_sync = 0, err = 0;
10744 
10745 	if ((ering->rx_pending > tp->rx_std_ring_mask) ||
10746 	    (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) ||
10747 	    (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
10748 	    (ering->tx_pending <= MAX_SKB_FRAGS) ||
10749 	    (tg3_flag(tp, TSO_BUG) &&
10750 	     (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
10751 		return -EINVAL;
10752 
10753 	if (netif_running(dev)) {
10754 		tg3_phy_stop(tp);
10755 		tg3_netif_stop(tp);
10756 		irq_sync = 1;
10757 	}
10758 
10759 	tg3_full_lock(tp, irq_sync);
10760 
10761 	tp->rx_pending = ering->rx_pending;
10762 
10763 	if (tg3_flag(tp, MAX_RXPEND_64) &&
10764 	    tp->rx_pending > 63)
10765 		tp->rx_pending = 63;
10766 	tp->rx_jumbo_pending = ering->rx_jumbo_pending;
10767 
10768 	for (i = 0; i < tp->irq_max; i++)
10769 		tp->napi[i].tx_pending = ering->tx_pending;
10770 
10771 	if (netif_running(dev)) {
10772 		tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10773 		err = tg3_restart_hw(tp, 1);
10774 		if (!err)
10775 			tg3_netif_start(tp);
10776 	}
10777 
10778 	tg3_full_unlock(tp);
10779 
10780 	if (irq_sync && !err)
10781 		tg3_phy_start(tp);
10782 
10783 	return err;
10784 }
10785 
10786 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
10787 {
10788 	struct tg3 *tp = netdev_priv(dev);
10789 
10790 	epause->autoneg = !!tg3_flag(tp, PAUSE_AUTONEG);
10791 
10792 	if (tp->link_config.flowctrl & FLOW_CTRL_RX)
10793 		epause->rx_pause = 1;
10794 	else
10795 		epause->rx_pause = 0;
10796 
10797 	if (tp->link_config.flowctrl & FLOW_CTRL_TX)
10798 		epause->tx_pause = 1;
10799 	else
10800 		epause->tx_pause = 0;
10801 }
10802 
10803 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
10804 {
10805 	struct tg3 *tp = netdev_priv(dev);
10806 	int err = 0;
10807 
10808 	if (tg3_flag(tp, USE_PHYLIB)) {
10809 		u32 newadv;
10810 		struct phy_device *phydev;
10811 
10812 		phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
10813 
10814 		if (!(phydev->supported & SUPPORTED_Pause) ||
10815 		    (!(phydev->supported & SUPPORTED_Asym_Pause) &&
10816 		     (epause->rx_pause != epause->tx_pause)))
10817 			return -EINVAL;
10818 
10819 		tp->link_config.flowctrl = 0;
10820 		if (epause->rx_pause) {
10821 			tp->link_config.flowctrl |= FLOW_CTRL_RX;
10822 
10823 			if (epause->tx_pause) {
10824 				tp->link_config.flowctrl |= FLOW_CTRL_TX;
10825 				newadv = ADVERTISED_Pause;
10826 			} else
10827 				newadv = ADVERTISED_Pause |
10828 					 ADVERTISED_Asym_Pause;
10829 		} else if (epause->tx_pause) {
10830 			tp->link_config.flowctrl |= FLOW_CTRL_TX;
10831 			newadv = ADVERTISED_Asym_Pause;
10832 		} else
10833 			newadv = 0;
10834 
10835 		if (epause->autoneg)
10836 			tg3_flag_set(tp, PAUSE_AUTONEG);
10837 		else
10838 			tg3_flag_clear(tp, PAUSE_AUTONEG);
10839 
10840 		if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
10841 			u32 oldadv = phydev->advertising &
10842 				     (ADVERTISED_Pause | ADVERTISED_Asym_Pause);
10843 			if (oldadv != newadv) {
10844 				phydev->advertising &=
10845 					~(ADVERTISED_Pause |
10846 					  ADVERTISED_Asym_Pause);
10847 				phydev->advertising |= newadv;
10848 				if (phydev->autoneg) {
10849 					/*
10850 					 * Always renegotiate the link to
10851 					 * inform our link partner of our
10852 					 * flow control settings, even if the
10853 					 * flow control is forced.  Let
10854 					 * tg3_adjust_link() do the final
10855 					 * flow control setup.
10856 					 */
10857 					return phy_start_aneg(phydev);
10858 				}
10859 			}
10860 
10861 			if (!epause->autoneg)
10862 				tg3_setup_flow_control(tp, 0, 0);
10863 		} else {
10864 			tp->link_config.advertising &=
10865 					~(ADVERTISED_Pause |
10866 					  ADVERTISED_Asym_Pause);
10867 			tp->link_config.advertising |= newadv;
10868 		}
10869 	} else {
10870 		int irq_sync = 0;
10871 
10872 		if (netif_running(dev)) {
10873 			tg3_netif_stop(tp);
10874 			irq_sync = 1;
10875 		}
10876 
10877 		tg3_full_lock(tp, irq_sync);
10878 
10879 		if (epause->autoneg)
10880 			tg3_flag_set(tp, PAUSE_AUTONEG);
10881 		else
10882 			tg3_flag_clear(tp, PAUSE_AUTONEG);
10883 		if (epause->rx_pause)
10884 			tp->link_config.flowctrl |= FLOW_CTRL_RX;
10885 		else
10886 			tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
10887 		if (epause->tx_pause)
10888 			tp->link_config.flowctrl |= FLOW_CTRL_TX;
10889 		else
10890 			tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
10891 
10892 		if (netif_running(dev)) {
10893 			tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10894 			err = tg3_restart_hw(tp, 1);
10895 			if (!err)
10896 				tg3_netif_start(tp);
10897 		}
10898 
10899 		tg3_full_unlock(tp);
10900 	}
10901 
10902 	return err;
10903 }
10904 
10905 static int tg3_get_sset_count(struct net_device *dev, int sset)
10906 {
10907 	switch (sset) {
10908 	case ETH_SS_TEST:
10909 		return TG3_NUM_TEST;
10910 	case ETH_SS_STATS:
10911 		return TG3_NUM_STATS;
10912 	default:
10913 		return -EOPNOTSUPP;
10914 	}
10915 }
10916 
10917 static int tg3_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
10918 			 u32 *rules __always_unused)
10919 {
10920 	struct tg3 *tp = netdev_priv(dev);
10921 
10922 	if (!tg3_flag(tp, SUPPORT_MSIX))
10923 		return -EOPNOTSUPP;
10924 
10925 	switch (info->cmd) {
10926 	case ETHTOOL_GRXRINGS:
10927 		if (netif_running(tp->dev))
10928 			info->data = tp->irq_cnt;
10929 		else {
10930 			info->data = num_online_cpus();
10931 			if (info->data > TG3_IRQ_MAX_VECS_RSS)
10932 				info->data = TG3_IRQ_MAX_VECS_RSS;
10933 		}
10934 
10935 		/* The first interrupt vector only
10936 		 * handles link interrupts.
10937 		 */
10938 		info->data -= 1;
10939 		return 0;
10940 
10941 	default:
10942 		return -EOPNOTSUPP;
10943 	}
10944 }
10945 
10946 static u32 tg3_get_rxfh_indir_size(struct net_device *dev)
10947 {
10948 	u32 size = 0;
10949 	struct tg3 *tp = netdev_priv(dev);
10950 
10951 	if (tg3_flag(tp, SUPPORT_MSIX))
10952 		size = TG3_RSS_INDIR_TBL_SIZE;
10953 
10954 	return size;
10955 }
10956 
10957 static int tg3_get_rxfh_indir(struct net_device *dev, u32 *indir)
10958 {
10959 	struct tg3 *tp = netdev_priv(dev);
10960 	int i;
10961 
10962 	for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
10963 		indir[i] = tp->rss_ind_tbl[i];
10964 
10965 	return 0;
10966 }
10967 
10968 static int tg3_set_rxfh_indir(struct net_device *dev, const u32 *indir)
10969 {
10970 	struct tg3 *tp = netdev_priv(dev);
10971 	size_t i;
10972 
10973 	for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
10974 		tp->rss_ind_tbl[i] = indir[i];
10975 
10976 	if (!netif_running(dev) || !tg3_flag(tp, ENABLE_RSS))
10977 		return 0;
10978 
10979 	/* It is legal to write the indirection
10980 	 * table while the device is running.
10981 	 */
10982 	tg3_full_lock(tp, 0);
10983 	tg3_rss_write_indir_tbl(tp);
10984 	tg3_full_unlock(tp);
10985 
10986 	return 0;
10987 }
10988 
10989 static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
10990 {
10991 	switch (stringset) {
10992 	case ETH_SS_STATS:
10993 		memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
10994 		break;
10995 	case ETH_SS_TEST:
10996 		memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
10997 		break;
10998 	default:
10999 		WARN_ON(1);	/* we need a WARN() */
11000 		break;
11001 	}
11002 }
11003 
11004 static int tg3_set_phys_id(struct net_device *dev,
11005 			    enum ethtool_phys_id_state state)
11006 {
11007 	struct tg3 *tp = netdev_priv(dev);
11008 
11009 	if (!netif_running(tp->dev))
11010 		return -EAGAIN;
11011 
11012 	switch (state) {
11013 	case ETHTOOL_ID_ACTIVE:
11014 		return 1;	/* cycle on/off once per second */
11015 
11016 	case ETHTOOL_ID_ON:
11017 		tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
11018 		     LED_CTRL_1000MBPS_ON |
11019 		     LED_CTRL_100MBPS_ON |
11020 		     LED_CTRL_10MBPS_ON |
11021 		     LED_CTRL_TRAFFIC_OVERRIDE |
11022 		     LED_CTRL_TRAFFIC_BLINK |
11023 		     LED_CTRL_TRAFFIC_LED);
11024 		break;
11025 
11026 	case ETHTOOL_ID_OFF:
11027 		tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
11028 		     LED_CTRL_TRAFFIC_OVERRIDE);
11029 		break;
11030 
11031 	case ETHTOOL_ID_INACTIVE:
11032 		tw32(MAC_LED_CTRL, tp->led_ctrl);
11033 		break;
11034 	}
11035 
11036 	return 0;
11037 }
11038 
11039 static void tg3_get_ethtool_stats(struct net_device *dev,
11040 				   struct ethtool_stats *estats, u64 *tmp_stats)
11041 {
11042 	struct tg3 *tp = netdev_priv(dev);
11043 
11044 	if (tp->hw_stats)
11045 		tg3_get_estats(tp, (struct tg3_ethtool_stats *)tmp_stats);
11046 	else
11047 		memset(tmp_stats, 0, sizeof(struct tg3_ethtool_stats));
11048 }
11049 
11050 static __be32 *tg3_vpd_readblock(struct tg3 *tp, u32 *vpdlen)
11051 {
11052 	int i;
11053 	__be32 *buf;
11054 	u32 offset = 0, len = 0;
11055 	u32 magic, val;
11056 
11057 	if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &magic))
11058 		return NULL;
11059 
11060 	if (magic == TG3_EEPROM_MAGIC) {
11061 		for (offset = TG3_NVM_DIR_START;
11062 		     offset < TG3_NVM_DIR_END;
11063 		     offset += TG3_NVM_DIRENT_SIZE) {
11064 			if (tg3_nvram_read(tp, offset, &val))
11065 				return NULL;
11066 
11067 			if ((val >> TG3_NVM_DIRTYPE_SHIFT) ==
11068 			    TG3_NVM_DIRTYPE_EXTVPD)
11069 				break;
11070 		}
11071 
11072 		if (offset != TG3_NVM_DIR_END) {
11073 			len = (val & TG3_NVM_DIRTYPE_LENMSK) * 4;
11074 			if (tg3_nvram_read(tp, offset + 4, &offset))
11075 				return NULL;
11076 
11077 			offset = tg3_nvram_logical_addr(tp, offset);
11078 		}
11079 	}
11080 
11081 	if (!offset || !len) {
11082 		offset = TG3_NVM_VPD_OFF;
11083 		len = TG3_NVM_VPD_LEN;
11084 	}
11085 
11086 	buf = kmalloc(len, GFP_KERNEL);
11087 	if (buf == NULL)
11088 		return NULL;
11089 
11090 	if (magic == TG3_EEPROM_MAGIC) {
11091 		for (i = 0; i < len; i += 4) {
11092 			/* The data is in little-endian format in NVRAM.
11093 			 * Use the big-endian read routines to preserve
11094 			 * the byte order as it exists in NVRAM.
11095 			 */
11096 			if (tg3_nvram_read_be32(tp, offset + i, &buf[i/4]))
11097 				goto error;
11098 		}
11099 	} else {
11100 		u8 *ptr;
11101 		ssize_t cnt;
11102 		unsigned int pos = 0;
11103 
11104 		ptr = (u8 *)&buf[0];
11105 		for (i = 0; pos < len && i < 3; i++, pos += cnt, ptr += cnt) {
11106 			cnt = pci_read_vpd(tp->pdev, pos,
11107 					   len - pos, ptr);
11108 			if (cnt == -ETIMEDOUT || cnt == -EINTR)
11109 				cnt = 0;
11110 			else if (cnt < 0)
11111 				goto error;
11112 		}
11113 		if (pos != len)
11114 			goto error;
11115 	}
11116 
11117 	*vpdlen = len;
11118 
11119 	return buf;
11120 
11121 error:
11122 	kfree(buf);
11123 	return NULL;
11124 }
11125 
11126 #define NVRAM_TEST_SIZE 0x100
11127 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE	0x14
11128 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE	0x18
11129 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE	0x1c
11130 #define NVRAM_SELFBOOT_FORMAT1_4_SIZE	0x20
11131 #define NVRAM_SELFBOOT_FORMAT1_5_SIZE	0x24
11132 #define NVRAM_SELFBOOT_FORMAT1_6_SIZE	0x50
11133 #define NVRAM_SELFBOOT_HW_SIZE 0x20
11134 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
11135 
11136 static int tg3_test_nvram(struct tg3 *tp)
11137 {
11138 	u32 csum, magic, len;
11139 	__be32 *buf;
11140 	int i, j, k, err = 0, size;
11141 
11142 	if (tg3_flag(tp, NO_NVRAM))
11143 		return 0;
11144 
11145 	if (tg3_nvram_read(tp, 0, &magic) != 0)
11146 		return -EIO;
11147 
11148 	if (magic == TG3_EEPROM_MAGIC)
11149 		size = NVRAM_TEST_SIZE;
11150 	else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
11151 		if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
11152 		    TG3_EEPROM_SB_FORMAT_1) {
11153 			switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
11154 			case TG3_EEPROM_SB_REVISION_0:
11155 				size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
11156 				break;
11157 			case TG3_EEPROM_SB_REVISION_2:
11158 				size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
11159 				break;
11160 			case TG3_EEPROM_SB_REVISION_3:
11161 				size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
11162 				break;
11163 			case TG3_EEPROM_SB_REVISION_4:
11164 				size = NVRAM_SELFBOOT_FORMAT1_4_SIZE;
11165 				break;
11166 			case TG3_EEPROM_SB_REVISION_5:
11167 				size = NVRAM_SELFBOOT_FORMAT1_5_SIZE;
11168 				break;
11169 			case TG3_EEPROM_SB_REVISION_6:
11170 				size = NVRAM_SELFBOOT_FORMAT1_6_SIZE;
11171 				break;
11172 			default:
11173 				return -EIO;
11174 			}
11175 		} else
11176 			return 0;
11177 	} else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
11178 		size = NVRAM_SELFBOOT_HW_SIZE;
11179 	else
11180 		return -EIO;
11181 
11182 	buf = kmalloc(size, GFP_KERNEL);
11183 	if (buf == NULL)
11184 		return -ENOMEM;
11185 
11186 	err = -EIO;
11187 	for (i = 0, j = 0; i < size; i += 4, j++) {
11188 		err = tg3_nvram_read_be32(tp, i, &buf[j]);
11189 		if (err)
11190 			break;
11191 	}
11192 	if (i < size)
11193 		goto out;
11194 
11195 	/* Selfboot format */
11196 	magic = be32_to_cpu(buf[0]);
11197 	if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
11198 	    TG3_EEPROM_MAGIC_FW) {
11199 		u8 *buf8 = (u8 *) buf, csum8 = 0;
11200 
11201 		if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
11202 		    TG3_EEPROM_SB_REVISION_2) {
11203 			/* For rev 2, the csum doesn't include the MBA. */
11204 			for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
11205 				csum8 += buf8[i];
11206 			for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
11207 				csum8 += buf8[i];
11208 		} else {
11209 			for (i = 0; i < size; i++)
11210 				csum8 += buf8[i];
11211 		}
11212 
11213 		if (csum8 == 0) {
11214 			err = 0;
11215 			goto out;
11216 		}
11217 
11218 		err = -EIO;
11219 		goto out;
11220 	}
11221 
11222 	if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
11223 	    TG3_EEPROM_MAGIC_HW) {
11224 		u8 data[NVRAM_SELFBOOT_DATA_SIZE];
11225 		u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
11226 		u8 *buf8 = (u8 *) buf;
11227 
11228 		/* Separate the parity bits and the data bytes.  */
11229 		for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
11230 			if ((i == 0) || (i == 8)) {
11231 				int l;
11232 				u8 msk;
11233 
11234 				for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
11235 					parity[k++] = buf8[i] & msk;
11236 				i++;
11237 			} else if (i == 16) {
11238 				int l;
11239 				u8 msk;
11240 
11241 				for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
11242 					parity[k++] = buf8[i] & msk;
11243 				i++;
11244 
11245 				for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
11246 					parity[k++] = buf8[i] & msk;
11247 				i++;
11248 			}
11249 			data[j++] = buf8[i];
11250 		}
11251 
11252 		err = -EIO;
11253 		for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
11254 			u8 hw8 = hweight8(data[i]);
11255 
11256 			if ((hw8 & 0x1) && parity[i])
11257 				goto out;
11258 			else if (!(hw8 & 0x1) && !parity[i])
11259 				goto out;
11260 		}
11261 		err = 0;
11262 		goto out;
11263 	}
11264 
11265 	err = -EIO;
11266 
11267 	/* Bootstrap checksum at offset 0x10 */
11268 	csum = calc_crc((unsigned char *) buf, 0x10);
11269 	if (csum != le32_to_cpu(buf[0x10/4]))
11270 		goto out;
11271 
11272 	/* Manufacturing block starts at offset 0x74, checksum at 0xfc */
11273 	csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
11274 	if (csum != le32_to_cpu(buf[0xfc/4]))
11275 		goto out;
11276 
11277 	kfree(buf);
11278 
11279 	buf = tg3_vpd_readblock(tp, &len);
11280 	if (!buf)
11281 		return -ENOMEM;
11282 
11283 	i = pci_vpd_find_tag((u8 *)buf, 0, len, PCI_VPD_LRDT_RO_DATA);
11284 	if (i > 0) {
11285 		j = pci_vpd_lrdt_size(&((u8 *)buf)[i]);
11286 		if (j < 0)
11287 			goto out;
11288 
11289 		if (i + PCI_VPD_LRDT_TAG_SIZE + j > len)
11290 			goto out;
11291 
11292 		i += PCI_VPD_LRDT_TAG_SIZE;
11293 		j = pci_vpd_find_info_keyword((u8 *)buf, i, j,
11294 					      PCI_VPD_RO_KEYWORD_CHKSUM);
11295 		if (j > 0) {
11296 			u8 csum8 = 0;
11297 
11298 			j += PCI_VPD_INFO_FLD_HDR_SIZE;
11299 
11300 			for (i = 0; i <= j; i++)
11301 				csum8 += ((u8 *)buf)[i];
11302 
11303 			if (csum8)
11304 				goto out;
11305 		}
11306 	}
11307 
11308 	err = 0;
11309 
11310 out:
11311 	kfree(buf);
11312 	return err;
11313 }
11314 
11315 #define TG3_SERDES_TIMEOUT_SEC	2
11316 #define TG3_COPPER_TIMEOUT_SEC	6
11317 
11318 static int tg3_test_link(struct tg3 *tp)
11319 {
11320 	int i, max;
11321 
11322 	if (!netif_running(tp->dev))
11323 		return -ENODEV;
11324 
11325 	if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
11326 		max = TG3_SERDES_TIMEOUT_SEC;
11327 	else
11328 		max = TG3_COPPER_TIMEOUT_SEC;
11329 
11330 	for (i = 0; i < max; i++) {
11331 		if (netif_carrier_ok(tp->dev))
11332 			return 0;
11333 
11334 		if (msleep_interruptible(1000))
11335 			break;
11336 	}
11337 
11338 	return -EIO;
11339 }
11340 
11341 /* Only test the commonly used registers */
11342 static int tg3_test_registers(struct tg3 *tp)
11343 {
11344 	int i, is_5705, is_5750;
11345 	u32 offset, read_mask, write_mask, val, save_val, read_val;
11346 	static struct {
11347 		u16 offset;
11348 		u16 flags;
11349 #define TG3_FL_5705	0x1
11350 #define TG3_FL_NOT_5705	0x2
11351 #define TG3_FL_NOT_5788	0x4
11352 #define TG3_FL_NOT_5750	0x8
11353 		u32 read_mask;
11354 		u32 write_mask;
11355 	} reg_tbl[] = {
11356 		/* MAC Control Registers */
11357 		{ MAC_MODE, TG3_FL_NOT_5705,
11358 			0x00000000, 0x00ef6f8c },
11359 		{ MAC_MODE, TG3_FL_5705,
11360 			0x00000000, 0x01ef6b8c },
11361 		{ MAC_STATUS, TG3_FL_NOT_5705,
11362 			0x03800107, 0x00000000 },
11363 		{ MAC_STATUS, TG3_FL_5705,
11364 			0x03800100, 0x00000000 },
11365 		{ MAC_ADDR_0_HIGH, 0x0000,
11366 			0x00000000, 0x0000ffff },
11367 		{ MAC_ADDR_0_LOW, 0x0000,
11368 			0x00000000, 0xffffffff },
11369 		{ MAC_RX_MTU_SIZE, 0x0000,
11370 			0x00000000, 0x0000ffff },
11371 		{ MAC_TX_MODE, 0x0000,
11372 			0x00000000, 0x00000070 },
11373 		{ MAC_TX_LENGTHS, 0x0000,
11374 			0x00000000, 0x00003fff },
11375 		{ MAC_RX_MODE, TG3_FL_NOT_5705,
11376 			0x00000000, 0x000007fc },
11377 		{ MAC_RX_MODE, TG3_FL_5705,
11378 			0x00000000, 0x000007dc },
11379 		{ MAC_HASH_REG_0, 0x0000,
11380 			0x00000000, 0xffffffff },
11381 		{ MAC_HASH_REG_1, 0x0000,
11382 			0x00000000, 0xffffffff },
11383 		{ MAC_HASH_REG_2, 0x0000,
11384 			0x00000000, 0xffffffff },
11385 		{ MAC_HASH_REG_3, 0x0000,
11386 			0x00000000, 0xffffffff },
11387 
11388 		/* Receive Data and Receive BD Initiator Control Registers. */
11389 		{ RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
11390 			0x00000000, 0xffffffff },
11391 		{ RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
11392 			0x00000000, 0xffffffff },
11393 		{ RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
11394 			0x00000000, 0x00000003 },
11395 		{ RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
11396 			0x00000000, 0xffffffff },
11397 		{ RCVDBDI_STD_BD+0, 0x0000,
11398 			0x00000000, 0xffffffff },
11399 		{ RCVDBDI_STD_BD+4, 0x0000,
11400 			0x00000000, 0xffffffff },
11401 		{ RCVDBDI_STD_BD+8, 0x0000,
11402 			0x00000000, 0xffff0002 },
11403 		{ RCVDBDI_STD_BD+0xc, 0x0000,
11404 			0x00000000, 0xffffffff },
11405 
11406 		/* Receive BD Initiator Control Registers. */
11407 		{ RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
11408 			0x00000000, 0xffffffff },
11409 		{ RCVBDI_STD_THRESH, TG3_FL_5705,
11410 			0x00000000, 0x000003ff },
11411 		{ RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
11412 			0x00000000, 0xffffffff },
11413 
11414 		/* Host Coalescing Control Registers. */
11415 		{ HOSTCC_MODE, TG3_FL_NOT_5705,
11416 			0x00000000, 0x00000004 },
11417 		{ HOSTCC_MODE, TG3_FL_5705,
11418 			0x00000000, 0x000000f6 },
11419 		{ HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
11420 			0x00000000, 0xffffffff },
11421 		{ HOSTCC_RXCOL_TICKS, TG3_FL_5705,
11422 			0x00000000, 0x000003ff },
11423 		{ HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
11424 			0x00000000, 0xffffffff },
11425 		{ HOSTCC_TXCOL_TICKS, TG3_FL_5705,
11426 			0x00000000, 0x000003ff },
11427 		{ HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
11428 			0x00000000, 0xffffffff },
11429 		{ HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
11430 			0x00000000, 0x000000ff },
11431 		{ HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
11432 			0x00000000, 0xffffffff },
11433 		{ HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
11434 			0x00000000, 0x000000ff },
11435 		{ HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
11436 			0x00000000, 0xffffffff },
11437 		{ HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
11438 			0x00000000, 0xffffffff },
11439 		{ HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
11440 			0x00000000, 0xffffffff },
11441 		{ HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
11442 			0x00000000, 0x000000ff },
11443 		{ HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
11444 			0x00000000, 0xffffffff },
11445 		{ HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
11446 			0x00000000, 0x000000ff },
11447 		{ HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
11448 			0x00000000, 0xffffffff },
11449 		{ HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
11450 			0x00000000, 0xffffffff },
11451 		{ HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
11452 			0x00000000, 0xffffffff },
11453 		{ HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
11454 			0x00000000, 0xffffffff },
11455 		{ HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
11456 			0x00000000, 0xffffffff },
11457 		{ HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
11458 			0xffffffff, 0x00000000 },
11459 		{ HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
11460 			0xffffffff, 0x00000000 },
11461 
11462 		/* Buffer Manager Control Registers. */
11463 		{ BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
11464 			0x00000000, 0x007fff80 },
11465 		{ BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
11466 			0x00000000, 0x007fffff },
11467 		{ BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
11468 			0x00000000, 0x0000003f },
11469 		{ BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
11470 			0x00000000, 0x000001ff },
11471 		{ BUFMGR_MB_HIGH_WATER, 0x0000,
11472 			0x00000000, 0x000001ff },
11473 		{ BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
11474 			0xffffffff, 0x00000000 },
11475 		{ BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
11476 			0xffffffff, 0x00000000 },
11477 
11478 		/* Mailbox Registers */
11479 		{ GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
11480 			0x00000000, 0x000001ff },
11481 		{ GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
11482 			0x00000000, 0x000001ff },
11483 		{ GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
11484 			0x00000000, 0x000007ff },
11485 		{ GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
11486 			0x00000000, 0x000001ff },
11487 
11488 		{ 0xffff, 0x0000, 0x00000000, 0x00000000 },
11489 	};
11490 
11491 	is_5705 = is_5750 = 0;
11492 	if (tg3_flag(tp, 5705_PLUS)) {
11493 		is_5705 = 1;
11494 		if (tg3_flag(tp, 5750_PLUS))
11495 			is_5750 = 1;
11496 	}
11497 
11498 	for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
11499 		if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
11500 			continue;
11501 
11502 		if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
11503 			continue;
11504 
11505 		if (tg3_flag(tp, IS_5788) &&
11506 		    (reg_tbl[i].flags & TG3_FL_NOT_5788))
11507 			continue;
11508 
11509 		if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
11510 			continue;
11511 
11512 		offset = (u32) reg_tbl[i].offset;
11513 		read_mask = reg_tbl[i].read_mask;
11514 		write_mask = reg_tbl[i].write_mask;
11515 
11516 		/* Save the original register content */
11517 		save_val = tr32(offset);
11518 
11519 		/* Determine the read-only value. */
11520 		read_val = save_val & read_mask;
11521 
11522 		/* Write zero to the register, then make sure the read-only bits
11523 		 * are not changed and the read/write bits are all zeros.
11524 		 */
11525 		tw32(offset, 0);
11526 
11527 		val = tr32(offset);
11528 
11529 		/* Test the read-only and read/write bits. */
11530 		if (((val & read_mask) != read_val) || (val & write_mask))
11531 			goto out;
11532 
11533 		/* Write ones to all the bits defined by RdMask and WrMask, then
11534 		 * make sure the read-only bits are not changed and the
11535 		 * read/write bits are all ones.
11536 		 */
11537 		tw32(offset, read_mask | write_mask);
11538 
11539 		val = tr32(offset);
11540 
11541 		/* Test the read-only bits. */
11542 		if ((val & read_mask) != read_val)
11543 			goto out;
11544 
11545 		/* Test the read/write bits. */
11546 		if ((val & write_mask) != write_mask)
11547 			goto out;
11548 
11549 		tw32(offset, save_val);
11550 	}
11551 
11552 	return 0;
11553 
11554 out:
11555 	if (netif_msg_hw(tp))
11556 		netdev_err(tp->dev,
11557 			   "Register test failed at offset %x\n", offset);
11558 	tw32(offset, save_val);
11559 	return -EIO;
11560 }
11561 
11562 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
11563 {
11564 	static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
11565 	int i;
11566 	u32 j;
11567 
11568 	for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
11569 		for (j = 0; j < len; j += 4) {
11570 			u32 val;
11571 
11572 			tg3_write_mem(tp, offset + j, test_pattern[i]);
11573 			tg3_read_mem(tp, offset + j, &val);
11574 			if (val != test_pattern[i])
11575 				return -EIO;
11576 		}
11577 	}
11578 	return 0;
11579 }
11580 
11581 static int tg3_test_memory(struct tg3 *tp)
11582 {
11583 	static struct mem_entry {
11584 		u32 offset;
11585 		u32 len;
11586 	} mem_tbl_570x[] = {
11587 		{ 0x00000000, 0x00b50},
11588 		{ 0x00002000, 0x1c000},
11589 		{ 0xffffffff, 0x00000}
11590 	}, mem_tbl_5705[] = {
11591 		{ 0x00000100, 0x0000c},
11592 		{ 0x00000200, 0x00008},
11593 		{ 0x00004000, 0x00800},
11594 		{ 0x00006000, 0x01000},
11595 		{ 0x00008000, 0x02000},
11596 		{ 0x00010000, 0x0e000},
11597 		{ 0xffffffff, 0x00000}
11598 	}, mem_tbl_5755[] = {
11599 		{ 0x00000200, 0x00008},
11600 		{ 0x00004000, 0x00800},
11601 		{ 0x00006000, 0x00800},
11602 		{ 0x00008000, 0x02000},
11603 		{ 0x00010000, 0x0c000},
11604 		{ 0xffffffff, 0x00000}
11605 	}, mem_tbl_5906[] = {
11606 		{ 0x00000200, 0x00008},
11607 		{ 0x00004000, 0x00400},
11608 		{ 0x00006000, 0x00400},
11609 		{ 0x00008000, 0x01000},
11610 		{ 0x00010000, 0x01000},
11611 		{ 0xffffffff, 0x00000}
11612 	}, mem_tbl_5717[] = {
11613 		{ 0x00000200, 0x00008},
11614 		{ 0x00010000, 0x0a000},
11615 		{ 0x00020000, 0x13c00},
11616 		{ 0xffffffff, 0x00000}
11617 	}, mem_tbl_57765[] = {
11618 		{ 0x00000200, 0x00008},
11619 		{ 0x00004000, 0x00800},
11620 		{ 0x00006000, 0x09800},
11621 		{ 0x00010000, 0x0a000},
11622 		{ 0xffffffff, 0x00000}
11623 	};
11624 	struct mem_entry *mem_tbl;
11625 	int err = 0;
11626 	int i;
11627 
11628 	if (tg3_flag(tp, 5717_PLUS))
11629 		mem_tbl = mem_tbl_5717;
11630 	else if (tg3_flag(tp, 57765_CLASS))
11631 		mem_tbl = mem_tbl_57765;
11632 	else if (tg3_flag(tp, 5755_PLUS))
11633 		mem_tbl = mem_tbl_5755;
11634 	else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
11635 		mem_tbl = mem_tbl_5906;
11636 	else if (tg3_flag(tp, 5705_PLUS))
11637 		mem_tbl = mem_tbl_5705;
11638 	else
11639 		mem_tbl = mem_tbl_570x;
11640 
11641 	for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
11642 		err = tg3_do_mem_test(tp, mem_tbl[i].offset, mem_tbl[i].len);
11643 		if (err)
11644 			break;
11645 	}
11646 
11647 	return err;
11648 }
11649 
11650 #define TG3_TSO_MSS		500
11651 
11652 #define TG3_TSO_IP_HDR_LEN	20
11653 #define TG3_TSO_TCP_HDR_LEN	20
11654 #define TG3_TSO_TCP_OPT_LEN	12
11655 
11656 static const u8 tg3_tso_header[] = {
11657 0x08, 0x00,
11658 0x45, 0x00, 0x00, 0x00,
11659 0x00, 0x00, 0x40, 0x00,
11660 0x40, 0x06, 0x00, 0x00,
11661 0x0a, 0x00, 0x00, 0x01,
11662 0x0a, 0x00, 0x00, 0x02,
11663 0x0d, 0x00, 0xe0, 0x00,
11664 0x00, 0x00, 0x01, 0x00,
11665 0x00, 0x00, 0x02, 0x00,
11666 0x80, 0x10, 0x10, 0x00,
11667 0x14, 0x09, 0x00, 0x00,
11668 0x01, 0x01, 0x08, 0x0a,
11669 0x11, 0x11, 0x11, 0x11,
11670 0x11, 0x11, 0x11, 0x11,
11671 };
11672 
11673 static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, bool tso_loopback)
11674 {
11675 	u32 rx_start_idx, rx_idx, tx_idx, opaque_key;
11676 	u32 base_flags = 0, mss = 0, desc_idx, coal_now, data_off, val;
11677 	u32 budget;
11678 	struct sk_buff *skb;
11679 	u8 *tx_data, *rx_data;
11680 	dma_addr_t map;
11681 	int num_pkts, tx_len, rx_len, i, err;
11682 	struct tg3_rx_buffer_desc *desc;
11683 	struct tg3_napi *tnapi, *rnapi;
11684 	struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
11685 
11686 	tnapi = &tp->napi[0];
11687 	rnapi = &tp->napi[0];
11688 	if (tp->irq_cnt > 1) {
11689 		if (tg3_flag(tp, ENABLE_RSS))
11690 			rnapi = &tp->napi[1];
11691 		if (tg3_flag(tp, ENABLE_TSS))
11692 			tnapi = &tp->napi[1];
11693 	}
11694 	coal_now = tnapi->coal_now | rnapi->coal_now;
11695 
11696 	err = -EIO;
11697 
11698 	tx_len = pktsz;
11699 	skb = netdev_alloc_skb(tp->dev, tx_len);
11700 	if (!skb)
11701 		return -ENOMEM;
11702 
11703 	tx_data = skb_put(skb, tx_len);
11704 	memcpy(tx_data, tp->dev->dev_addr, 6);
11705 	memset(tx_data + 6, 0x0, 8);
11706 
11707 	tw32(MAC_RX_MTU_SIZE, tx_len + ETH_FCS_LEN);
11708 
11709 	if (tso_loopback) {
11710 		struct iphdr *iph = (struct iphdr *)&tx_data[ETH_HLEN];
11711 
11712 		u32 hdr_len = TG3_TSO_IP_HDR_LEN + TG3_TSO_TCP_HDR_LEN +
11713 			      TG3_TSO_TCP_OPT_LEN;
11714 
11715 		memcpy(tx_data + ETH_ALEN * 2, tg3_tso_header,
11716 		       sizeof(tg3_tso_header));
11717 		mss = TG3_TSO_MSS;
11718 
11719 		val = tx_len - ETH_ALEN * 2 - sizeof(tg3_tso_header);
11720 		num_pkts = DIV_ROUND_UP(val, TG3_TSO_MSS);
11721 
11722 		/* Set the total length field in the IP header */
11723 		iph->tot_len = htons((u16)(mss + hdr_len));
11724 
11725 		base_flags = (TXD_FLAG_CPU_PRE_DMA |
11726 			      TXD_FLAG_CPU_POST_DMA);
11727 
11728 		if (tg3_flag(tp, HW_TSO_1) ||
11729 		    tg3_flag(tp, HW_TSO_2) ||
11730 		    tg3_flag(tp, HW_TSO_3)) {
11731 			struct tcphdr *th;
11732 			val = ETH_HLEN + TG3_TSO_IP_HDR_LEN;
11733 			th = (struct tcphdr *)&tx_data[val];
11734 			th->check = 0;
11735 		} else
11736 			base_flags |= TXD_FLAG_TCPUDP_CSUM;
11737 
11738 		if (tg3_flag(tp, HW_TSO_3)) {
11739 			mss |= (hdr_len & 0xc) << 12;
11740 			if (hdr_len & 0x10)
11741 				base_flags |= 0x00000010;
11742 			base_flags |= (hdr_len & 0x3e0) << 5;
11743 		} else if (tg3_flag(tp, HW_TSO_2))
11744 			mss |= hdr_len << 9;
11745 		else if (tg3_flag(tp, HW_TSO_1) ||
11746 			 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
11747 			mss |= (TG3_TSO_TCP_OPT_LEN << 9);
11748 		} else {
11749 			base_flags |= (TG3_TSO_TCP_OPT_LEN << 10);
11750 		}
11751 
11752 		data_off = ETH_ALEN * 2 + sizeof(tg3_tso_header);
11753 	} else {
11754 		num_pkts = 1;
11755 		data_off = ETH_HLEN;
11756 
11757 		if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
11758 		    tx_len > VLAN_ETH_FRAME_LEN)
11759 			base_flags |= TXD_FLAG_JMB_PKT;
11760 	}
11761 
11762 	for (i = data_off; i < tx_len; i++)
11763 		tx_data[i] = (u8) (i & 0xff);
11764 
11765 	map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
11766 	if (pci_dma_mapping_error(tp->pdev, map)) {
11767 		dev_kfree_skb(skb);
11768 		return -EIO;
11769 	}
11770 
11771 	val = tnapi->tx_prod;
11772 	tnapi->tx_buffers[val].skb = skb;
11773 	dma_unmap_addr_set(&tnapi->tx_buffers[val], mapping, map);
11774 
11775 	tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
11776 	       rnapi->coal_now);
11777 
11778 	udelay(10);
11779 
11780 	rx_start_idx = rnapi->hw_status->idx[0].rx_producer;
11781 
11782 	budget = tg3_tx_avail(tnapi);
11783 	if (tg3_tx_frag_set(tnapi, &val, &budget, map, tx_len,
11784 			    base_flags | TXD_FLAG_END, mss, 0)) {
11785 		tnapi->tx_buffers[val].skb = NULL;
11786 		dev_kfree_skb(skb);
11787 		return -EIO;
11788 	}
11789 
11790 	tnapi->tx_prod++;
11791 
11792 	/* Sync BD data before updating mailbox */
11793 	wmb();
11794 
11795 	tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod);
11796 	tr32_mailbox(tnapi->prodmbox);
11797 
11798 	udelay(10);
11799 
11800 	/* 350 usec to allow enough time on some 10/100 Mbps devices.  */
11801 	for (i = 0; i < 35; i++) {
11802 		tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
11803 		       coal_now);
11804 
11805 		udelay(10);
11806 
11807 		tx_idx = tnapi->hw_status->idx[0].tx_consumer;
11808 		rx_idx = rnapi->hw_status->idx[0].rx_producer;
11809 		if ((tx_idx == tnapi->tx_prod) &&
11810 		    (rx_idx == (rx_start_idx + num_pkts)))
11811 			break;
11812 	}
11813 
11814 	tg3_tx_skb_unmap(tnapi, tnapi->tx_prod - 1, -1);
11815 	dev_kfree_skb(skb);
11816 
11817 	if (tx_idx != tnapi->tx_prod)
11818 		goto out;
11819 
11820 	if (rx_idx != rx_start_idx + num_pkts)
11821 		goto out;
11822 
11823 	val = data_off;
11824 	while (rx_idx != rx_start_idx) {
11825 		desc = &rnapi->rx_rcb[rx_start_idx++];
11826 		desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
11827 		opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
11828 
11829 		if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
11830 		    (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
11831 			goto out;
11832 
11833 		rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT)
11834 			 - ETH_FCS_LEN;
11835 
11836 		if (!tso_loopback) {
11837 			if (rx_len != tx_len)
11838 				goto out;
11839 
11840 			if (pktsz <= TG3_RX_STD_DMA_SZ - ETH_FCS_LEN) {
11841 				if (opaque_key != RXD_OPAQUE_RING_STD)
11842 					goto out;
11843 			} else {
11844 				if (opaque_key != RXD_OPAQUE_RING_JUMBO)
11845 					goto out;
11846 			}
11847 		} else if ((desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
11848 			   (desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
11849 			    >> RXD_TCPCSUM_SHIFT != 0xffff) {
11850 			goto out;
11851 		}
11852 
11853 		if (opaque_key == RXD_OPAQUE_RING_STD) {
11854 			rx_data = tpr->rx_std_buffers[desc_idx].data;
11855 			map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx],
11856 					     mapping);
11857 		} else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
11858 			rx_data = tpr->rx_jmb_buffers[desc_idx].data;
11859 			map = dma_unmap_addr(&tpr->rx_jmb_buffers[desc_idx],
11860 					     mapping);
11861 		} else
11862 			goto out;
11863 
11864 		pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len,
11865 					    PCI_DMA_FROMDEVICE);
11866 
11867 		rx_data += TG3_RX_OFFSET(tp);
11868 		for (i = data_off; i < rx_len; i++, val++) {
11869 			if (*(rx_data + i) != (u8) (val & 0xff))
11870 				goto out;
11871 		}
11872 	}
11873 
11874 	err = 0;
11875 
11876 	/* tg3_free_rings will unmap and free the rx_data */
11877 out:
11878 	return err;
11879 }
11880 
11881 #define TG3_STD_LOOPBACK_FAILED		1
11882 #define TG3_JMB_LOOPBACK_FAILED		2
11883 #define TG3_TSO_LOOPBACK_FAILED		4
11884 #define TG3_LOOPBACK_FAILED \
11885 	(TG3_STD_LOOPBACK_FAILED | \
11886 	 TG3_JMB_LOOPBACK_FAILED | \
11887 	 TG3_TSO_LOOPBACK_FAILED)
11888 
11889 static int tg3_test_loopback(struct tg3 *tp, u64 *data, bool do_extlpbk)
11890 {
11891 	int err = -EIO;
11892 	u32 eee_cap;
11893 	u32 jmb_pkt_sz = 9000;
11894 
11895 	if (tp->dma_limit)
11896 		jmb_pkt_sz = tp->dma_limit - ETH_HLEN;
11897 
11898 	eee_cap = tp->phy_flags & TG3_PHYFLG_EEE_CAP;
11899 	tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
11900 
11901 	if (!netif_running(tp->dev)) {
11902 		data[0] = TG3_LOOPBACK_FAILED;
11903 		data[1] = TG3_LOOPBACK_FAILED;
11904 		if (do_extlpbk)
11905 			data[2] = TG3_LOOPBACK_FAILED;
11906 		goto done;
11907 	}
11908 
11909 	err = tg3_reset_hw(tp, 1);
11910 	if (err) {
11911 		data[0] = TG3_LOOPBACK_FAILED;
11912 		data[1] = TG3_LOOPBACK_FAILED;
11913 		if (do_extlpbk)
11914 			data[2] = TG3_LOOPBACK_FAILED;
11915 		goto done;
11916 	}
11917 
11918 	if (tg3_flag(tp, ENABLE_RSS)) {
11919 		int i;
11920 
11921 		/* Reroute all rx packets to the 1st queue */
11922 		for (i = MAC_RSS_INDIR_TBL_0;
11923 		     i < MAC_RSS_INDIR_TBL_0 + TG3_RSS_INDIR_TBL_SIZE; i += 4)
11924 			tw32(i, 0x0);
11925 	}
11926 
11927 	/* HW errata - mac loopback fails in some cases on 5780.
11928 	 * Normal traffic and PHY loopback are not affected by
11929 	 * errata.  Also, the MAC loopback test is deprecated for
11930 	 * all newer ASIC revisions.
11931 	 */
11932 	if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5780 &&
11933 	    !tg3_flag(tp, CPMU_PRESENT)) {
11934 		tg3_mac_loopback(tp, true);
11935 
11936 		if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
11937 			data[0] |= TG3_STD_LOOPBACK_FAILED;
11938 
11939 		if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
11940 		    tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
11941 			data[0] |= TG3_JMB_LOOPBACK_FAILED;
11942 
11943 		tg3_mac_loopback(tp, false);
11944 	}
11945 
11946 	if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
11947 	    !tg3_flag(tp, USE_PHYLIB)) {
11948 		int i;
11949 
11950 		tg3_phy_lpbk_set(tp, 0, false);
11951 
11952 		/* Wait for link */
11953 		for (i = 0; i < 100; i++) {
11954 			if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
11955 				break;
11956 			mdelay(1);
11957 		}
11958 
11959 		if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
11960 			data[1] |= TG3_STD_LOOPBACK_FAILED;
11961 		if (tg3_flag(tp, TSO_CAPABLE) &&
11962 		    tg3_run_loopback(tp, ETH_FRAME_LEN, true))
11963 			data[1] |= TG3_TSO_LOOPBACK_FAILED;
11964 		if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
11965 		    tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
11966 			data[1] |= TG3_JMB_LOOPBACK_FAILED;
11967 
11968 		if (do_extlpbk) {
11969 			tg3_phy_lpbk_set(tp, 0, true);
11970 
11971 			/* All link indications report up, but the hardware
11972 			 * isn't really ready for about 20 msec.  Double it
11973 			 * to be sure.
11974 			 */
11975 			mdelay(40);
11976 
11977 			if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
11978 				data[2] |= TG3_STD_LOOPBACK_FAILED;
11979 			if (tg3_flag(tp, TSO_CAPABLE) &&
11980 			    tg3_run_loopback(tp, ETH_FRAME_LEN, true))
11981 				data[2] |= TG3_TSO_LOOPBACK_FAILED;
11982 			if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
11983 			    tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
11984 				data[2] |= TG3_JMB_LOOPBACK_FAILED;
11985 		}
11986 
11987 		/* Re-enable gphy autopowerdown. */
11988 		if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
11989 			tg3_phy_toggle_apd(tp, true);
11990 	}
11991 
11992 	err = (data[0] | data[1] | data[2]) ? -EIO : 0;
11993 
11994 done:
11995 	tp->phy_flags |= eee_cap;
11996 
11997 	return err;
11998 }
11999 
12000 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
12001 			  u64 *data)
12002 {
12003 	struct tg3 *tp = netdev_priv(dev);
12004 	bool doextlpbk = etest->flags & ETH_TEST_FL_EXTERNAL_LB;
12005 
12006 	if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
12007 	    tg3_power_up(tp)) {
12008 		etest->flags |= ETH_TEST_FL_FAILED;
12009 		memset(data, 1, sizeof(u64) * TG3_NUM_TEST);
12010 		return;
12011 	}
12012 
12013 	memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
12014 
12015 	if (tg3_test_nvram(tp) != 0) {
12016 		etest->flags |= ETH_TEST_FL_FAILED;
12017 		data[0] = 1;
12018 	}
12019 	if (!doextlpbk && tg3_test_link(tp)) {
12020 		etest->flags |= ETH_TEST_FL_FAILED;
12021 		data[1] = 1;
12022 	}
12023 	if (etest->flags & ETH_TEST_FL_OFFLINE) {
12024 		int err, err2 = 0, irq_sync = 0;
12025 
12026 		if (netif_running(dev)) {
12027 			tg3_phy_stop(tp);
12028 			tg3_netif_stop(tp);
12029 			irq_sync = 1;
12030 		}
12031 
12032 		tg3_full_lock(tp, irq_sync);
12033 
12034 		tg3_halt(tp, RESET_KIND_SUSPEND, 1);
12035 		err = tg3_nvram_lock(tp);
12036 		tg3_halt_cpu(tp, RX_CPU_BASE);
12037 		if (!tg3_flag(tp, 5705_PLUS))
12038 			tg3_halt_cpu(tp, TX_CPU_BASE);
12039 		if (!err)
12040 			tg3_nvram_unlock(tp);
12041 
12042 		if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
12043 			tg3_phy_reset(tp);
12044 
12045 		if (tg3_test_registers(tp) != 0) {
12046 			etest->flags |= ETH_TEST_FL_FAILED;
12047 			data[2] = 1;
12048 		}
12049 
12050 		if (tg3_test_memory(tp) != 0) {
12051 			etest->flags |= ETH_TEST_FL_FAILED;
12052 			data[3] = 1;
12053 		}
12054 
12055 		if (doextlpbk)
12056 			etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE;
12057 
12058 		if (tg3_test_loopback(tp, &data[4], doextlpbk))
12059 			etest->flags |= ETH_TEST_FL_FAILED;
12060 
12061 		tg3_full_unlock(tp);
12062 
12063 		if (tg3_test_interrupt(tp) != 0) {
12064 			etest->flags |= ETH_TEST_FL_FAILED;
12065 			data[7] = 1;
12066 		}
12067 
12068 		tg3_full_lock(tp, 0);
12069 
12070 		tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12071 		if (netif_running(dev)) {
12072 			tg3_flag_set(tp, INIT_COMPLETE);
12073 			err2 = tg3_restart_hw(tp, 1);
12074 			if (!err2)
12075 				tg3_netif_start(tp);
12076 		}
12077 
12078 		tg3_full_unlock(tp);
12079 
12080 		if (irq_sync && !err2)
12081 			tg3_phy_start(tp);
12082 	}
12083 	if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
12084 		tg3_power_down(tp);
12085 
12086 }
12087 
12088 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
12089 {
12090 	struct mii_ioctl_data *data = if_mii(ifr);
12091 	struct tg3 *tp = netdev_priv(dev);
12092 	int err;
12093 
12094 	if (tg3_flag(tp, USE_PHYLIB)) {
12095 		struct phy_device *phydev;
12096 		if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
12097 			return -EAGAIN;
12098 		phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
12099 		return phy_mii_ioctl(phydev, ifr, cmd);
12100 	}
12101 
12102 	switch (cmd) {
12103 	case SIOCGMIIPHY:
12104 		data->phy_id = tp->phy_addr;
12105 
12106 		/* fallthru */
12107 	case SIOCGMIIREG: {
12108 		u32 mii_regval;
12109 
12110 		if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
12111 			break;			/* We have no PHY */
12112 
12113 		if (!netif_running(dev))
12114 			return -EAGAIN;
12115 
12116 		spin_lock_bh(&tp->lock);
12117 		err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
12118 		spin_unlock_bh(&tp->lock);
12119 
12120 		data->val_out = mii_regval;
12121 
12122 		return err;
12123 	}
12124 
12125 	case SIOCSMIIREG:
12126 		if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
12127 			break;			/* We have no PHY */
12128 
12129 		if (!netif_running(dev))
12130 			return -EAGAIN;
12131 
12132 		spin_lock_bh(&tp->lock);
12133 		err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
12134 		spin_unlock_bh(&tp->lock);
12135 
12136 		return err;
12137 
12138 	default:
12139 		/* do nothing */
12140 		break;
12141 	}
12142 	return -EOPNOTSUPP;
12143 }
12144 
12145 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
12146 {
12147 	struct tg3 *tp = netdev_priv(dev);
12148 
12149 	memcpy(ec, &tp->coal, sizeof(*ec));
12150 	return 0;
12151 }
12152 
12153 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
12154 {
12155 	struct tg3 *tp = netdev_priv(dev);
12156 	u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
12157 	u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
12158 
12159 	if (!tg3_flag(tp, 5705_PLUS)) {
12160 		max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
12161 		max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
12162 		max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
12163 		min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
12164 	}
12165 
12166 	if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
12167 	    (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
12168 	    (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
12169 	    (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
12170 	    (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
12171 	    (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
12172 	    (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
12173 	    (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
12174 	    (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
12175 	    (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
12176 		return -EINVAL;
12177 
12178 	/* No rx interrupts will be generated if both are zero */
12179 	if ((ec->rx_coalesce_usecs == 0) &&
12180 	    (ec->rx_max_coalesced_frames == 0))
12181 		return -EINVAL;
12182 
12183 	/* No tx interrupts will be generated if both are zero */
12184 	if ((ec->tx_coalesce_usecs == 0) &&
12185 	    (ec->tx_max_coalesced_frames == 0))
12186 		return -EINVAL;
12187 
12188 	/* Only copy relevant parameters, ignore all others. */
12189 	tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
12190 	tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
12191 	tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
12192 	tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
12193 	tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
12194 	tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
12195 	tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
12196 	tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
12197 	tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
12198 
12199 	if (netif_running(dev)) {
12200 		tg3_full_lock(tp, 0);
12201 		__tg3_set_coalesce(tp, &tp->coal);
12202 		tg3_full_unlock(tp);
12203 	}
12204 	return 0;
12205 }
12206 
12207 static const struct ethtool_ops tg3_ethtool_ops = {
12208 	.get_settings		= tg3_get_settings,
12209 	.set_settings		= tg3_set_settings,
12210 	.get_drvinfo		= tg3_get_drvinfo,
12211 	.get_regs_len		= tg3_get_regs_len,
12212 	.get_regs		= tg3_get_regs,
12213 	.get_wol		= tg3_get_wol,
12214 	.set_wol		= tg3_set_wol,
12215 	.get_msglevel		= tg3_get_msglevel,
12216 	.set_msglevel		= tg3_set_msglevel,
12217 	.nway_reset		= tg3_nway_reset,
12218 	.get_link		= ethtool_op_get_link,
12219 	.get_eeprom_len		= tg3_get_eeprom_len,
12220 	.get_eeprom		= tg3_get_eeprom,
12221 	.set_eeprom		= tg3_set_eeprom,
12222 	.get_ringparam		= tg3_get_ringparam,
12223 	.set_ringparam		= tg3_set_ringparam,
12224 	.get_pauseparam		= tg3_get_pauseparam,
12225 	.set_pauseparam		= tg3_set_pauseparam,
12226 	.self_test		= tg3_self_test,
12227 	.get_strings		= tg3_get_strings,
12228 	.set_phys_id		= tg3_set_phys_id,
12229 	.get_ethtool_stats	= tg3_get_ethtool_stats,
12230 	.get_coalesce		= tg3_get_coalesce,
12231 	.set_coalesce		= tg3_set_coalesce,
12232 	.get_sset_count		= tg3_get_sset_count,
12233 	.get_rxnfc		= tg3_get_rxnfc,
12234 	.get_rxfh_indir_size    = tg3_get_rxfh_indir_size,
12235 	.get_rxfh_indir		= tg3_get_rxfh_indir,
12236 	.set_rxfh_indir		= tg3_set_rxfh_indir,
12237 };
12238 
12239 static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *dev,
12240 						struct rtnl_link_stats64 *stats)
12241 {
12242 	struct tg3 *tp = netdev_priv(dev);
12243 
12244 	if (!tp->hw_stats)
12245 		return &tp->net_stats_prev;
12246 
12247 	spin_lock_bh(&tp->lock);
12248 	tg3_get_nstats(tp, stats);
12249 	spin_unlock_bh(&tp->lock);
12250 
12251 	return stats;
12252 }
12253 
12254 static void tg3_set_rx_mode(struct net_device *dev)
12255 {
12256 	struct tg3 *tp = netdev_priv(dev);
12257 
12258 	if (!netif_running(dev))
12259 		return;
12260 
12261 	tg3_full_lock(tp, 0);
12262 	__tg3_set_rx_mode(dev);
12263 	tg3_full_unlock(tp);
12264 }
12265 
12266 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
12267 			       int new_mtu)
12268 {
12269 	dev->mtu = new_mtu;
12270 
12271 	if (new_mtu > ETH_DATA_LEN) {
12272 		if (tg3_flag(tp, 5780_CLASS)) {
12273 			netdev_update_features(dev);
12274 			tg3_flag_clear(tp, TSO_CAPABLE);
12275 		} else {
12276 			tg3_flag_set(tp, JUMBO_RING_ENABLE);
12277 		}
12278 	} else {
12279 		if (tg3_flag(tp, 5780_CLASS)) {
12280 			tg3_flag_set(tp, TSO_CAPABLE);
12281 			netdev_update_features(dev);
12282 		}
12283 		tg3_flag_clear(tp, JUMBO_RING_ENABLE);
12284 	}
12285 }
12286 
12287 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
12288 {
12289 	struct tg3 *tp = netdev_priv(dev);
12290 	int err, reset_phy = 0;
12291 
12292 	if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
12293 		return -EINVAL;
12294 
12295 	if (!netif_running(dev)) {
12296 		/* We'll just catch it later when the
12297 		 * device is up'd.
12298 		 */
12299 		tg3_set_mtu(dev, tp, new_mtu);
12300 		return 0;
12301 	}
12302 
12303 	tg3_phy_stop(tp);
12304 
12305 	tg3_netif_stop(tp);
12306 
12307 	tg3_full_lock(tp, 1);
12308 
12309 	tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12310 
12311 	tg3_set_mtu(dev, tp, new_mtu);
12312 
12313 	/* Reset PHY, otherwise the read DMA engine will be in a mode that
12314 	 * breaks all requests to 256 bytes.
12315 	 */
12316 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57766)
12317 		reset_phy = 1;
12318 
12319 	err = tg3_restart_hw(tp, reset_phy);
12320 
12321 	if (!err)
12322 		tg3_netif_start(tp);
12323 
12324 	tg3_full_unlock(tp);
12325 
12326 	if (!err)
12327 		tg3_phy_start(tp);
12328 
12329 	return err;
12330 }
12331 
12332 static const struct net_device_ops tg3_netdev_ops = {
12333 	.ndo_open		= tg3_open,
12334 	.ndo_stop		= tg3_close,
12335 	.ndo_start_xmit		= tg3_start_xmit,
12336 	.ndo_get_stats64	= tg3_get_stats64,
12337 	.ndo_validate_addr	= eth_validate_addr,
12338 	.ndo_set_rx_mode	= tg3_set_rx_mode,
12339 	.ndo_set_mac_address	= tg3_set_mac_addr,
12340 	.ndo_do_ioctl		= tg3_ioctl,
12341 	.ndo_tx_timeout		= tg3_tx_timeout,
12342 	.ndo_change_mtu		= tg3_change_mtu,
12343 	.ndo_fix_features	= tg3_fix_features,
12344 	.ndo_set_features	= tg3_set_features,
12345 #ifdef CONFIG_NET_POLL_CONTROLLER
12346 	.ndo_poll_controller	= tg3_poll_controller,
12347 #endif
12348 };
12349 
12350 static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
12351 {
12352 	u32 cursize, val, magic;
12353 
12354 	tp->nvram_size = EEPROM_CHIP_SIZE;
12355 
12356 	if (tg3_nvram_read(tp, 0, &magic) != 0)
12357 		return;
12358 
12359 	if ((magic != TG3_EEPROM_MAGIC) &&
12360 	    ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
12361 	    ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
12362 		return;
12363 
12364 	/*
12365 	 * Size the chip by reading offsets at increasing powers of two.
12366 	 * When we encounter our validation signature, we know the addressing
12367 	 * has wrapped around, and thus have our chip size.
12368 	 */
12369 	cursize = 0x10;
12370 
12371 	while (cursize < tp->nvram_size) {
12372 		if (tg3_nvram_read(tp, cursize, &val) != 0)
12373 			return;
12374 
12375 		if (val == magic)
12376 			break;
12377 
12378 		cursize <<= 1;
12379 	}
12380 
12381 	tp->nvram_size = cursize;
12382 }
12383 
12384 static void __devinit tg3_get_nvram_size(struct tg3 *tp)
12385 {
12386 	u32 val;
12387 
12388 	if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &val) != 0)
12389 		return;
12390 
12391 	/* Selfboot format */
12392 	if (val != TG3_EEPROM_MAGIC) {
12393 		tg3_get_eeprom_size(tp);
12394 		return;
12395 	}
12396 
12397 	if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
12398 		if (val != 0) {
12399 			/* This is confusing.  We want to operate on the
12400 			 * 16-bit value at offset 0xf2.  The tg3_nvram_read()
12401 			 * call will read from NVRAM and byteswap the data
12402 			 * according to the byteswapping settings for all
12403 			 * other register accesses.  This ensures the data we
12404 			 * want will always reside in the lower 16-bits.
12405 			 * However, the data in NVRAM is in LE format, which
12406 			 * means the data from the NVRAM read will always be
12407 			 * opposite the endianness of the CPU.  The 16-bit
12408 			 * byteswap then brings the data to CPU endianness.
12409 			 */
12410 			tp->nvram_size = swab16((u16)(val & 0x0000ffff)) * 1024;
12411 			return;
12412 		}
12413 	}
12414 	tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12415 }
12416 
12417 static void __devinit tg3_get_nvram_info(struct tg3 *tp)
12418 {
12419 	u32 nvcfg1;
12420 
12421 	nvcfg1 = tr32(NVRAM_CFG1);
12422 	if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
12423 		tg3_flag_set(tp, FLASH);
12424 	} else {
12425 		nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12426 		tw32(NVRAM_CFG1, nvcfg1);
12427 	}
12428 
12429 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
12430 	    tg3_flag(tp, 5780_CLASS)) {
12431 		switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
12432 		case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
12433 			tp->nvram_jedecnum = JEDEC_ATMEL;
12434 			tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
12435 			tg3_flag_set(tp, NVRAM_BUFFERED);
12436 			break;
12437 		case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
12438 			tp->nvram_jedecnum = JEDEC_ATMEL;
12439 			tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
12440 			break;
12441 		case FLASH_VENDOR_ATMEL_EEPROM:
12442 			tp->nvram_jedecnum = JEDEC_ATMEL;
12443 			tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12444 			tg3_flag_set(tp, NVRAM_BUFFERED);
12445 			break;
12446 		case FLASH_VENDOR_ST:
12447 			tp->nvram_jedecnum = JEDEC_ST;
12448 			tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
12449 			tg3_flag_set(tp, NVRAM_BUFFERED);
12450 			break;
12451 		case FLASH_VENDOR_SAIFUN:
12452 			tp->nvram_jedecnum = JEDEC_SAIFUN;
12453 			tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
12454 			break;
12455 		case FLASH_VENDOR_SST_SMALL:
12456 		case FLASH_VENDOR_SST_LARGE:
12457 			tp->nvram_jedecnum = JEDEC_SST;
12458 			tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
12459 			break;
12460 		}
12461 	} else {
12462 		tp->nvram_jedecnum = JEDEC_ATMEL;
12463 		tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
12464 		tg3_flag_set(tp, NVRAM_BUFFERED);
12465 	}
12466 }
12467 
12468 static void __devinit tg3_nvram_get_pagesize(struct tg3 *tp, u32 nvmcfg1)
12469 {
12470 	switch (nvmcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
12471 	case FLASH_5752PAGE_SIZE_256:
12472 		tp->nvram_pagesize = 256;
12473 		break;
12474 	case FLASH_5752PAGE_SIZE_512:
12475 		tp->nvram_pagesize = 512;
12476 		break;
12477 	case FLASH_5752PAGE_SIZE_1K:
12478 		tp->nvram_pagesize = 1024;
12479 		break;
12480 	case FLASH_5752PAGE_SIZE_2K:
12481 		tp->nvram_pagesize = 2048;
12482 		break;
12483 	case FLASH_5752PAGE_SIZE_4K:
12484 		tp->nvram_pagesize = 4096;
12485 		break;
12486 	case FLASH_5752PAGE_SIZE_264:
12487 		tp->nvram_pagesize = 264;
12488 		break;
12489 	case FLASH_5752PAGE_SIZE_528:
12490 		tp->nvram_pagesize = 528;
12491 		break;
12492 	}
12493 }
12494 
12495 static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp)
12496 {
12497 	u32 nvcfg1;
12498 
12499 	nvcfg1 = tr32(NVRAM_CFG1);
12500 
12501 	/* NVRAM protection for TPM */
12502 	if (nvcfg1 & (1 << 27))
12503 		tg3_flag_set(tp, PROTECTED_NVRAM);
12504 
12505 	switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12506 	case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
12507 	case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
12508 		tp->nvram_jedecnum = JEDEC_ATMEL;
12509 		tg3_flag_set(tp, NVRAM_BUFFERED);
12510 		break;
12511 	case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
12512 		tp->nvram_jedecnum = JEDEC_ATMEL;
12513 		tg3_flag_set(tp, NVRAM_BUFFERED);
12514 		tg3_flag_set(tp, FLASH);
12515 		break;
12516 	case FLASH_5752VENDOR_ST_M45PE10:
12517 	case FLASH_5752VENDOR_ST_M45PE20:
12518 	case FLASH_5752VENDOR_ST_M45PE40:
12519 		tp->nvram_jedecnum = JEDEC_ST;
12520 		tg3_flag_set(tp, NVRAM_BUFFERED);
12521 		tg3_flag_set(tp, FLASH);
12522 		break;
12523 	}
12524 
12525 	if (tg3_flag(tp, FLASH)) {
12526 		tg3_nvram_get_pagesize(tp, nvcfg1);
12527 	} else {
12528 		/* For eeprom, set pagesize to maximum eeprom size */
12529 		tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12530 
12531 		nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12532 		tw32(NVRAM_CFG1, nvcfg1);
12533 	}
12534 }
12535 
12536 static void __devinit tg3_get_5755_nvram_info(struct tg3 *tp)
12537 {
12538 	u32 nvcfg1, protect = 0;
12539 
12540 	nvcfg1 = tr32(NVRAM_CFG1);
12541 
12542 	/* NVRAM protection for TPM */
12543 	if (nvcfg1 & (1 << 27)) {
12544 		tg3_flag_set(tp, PROTECTED_NVRAM);
12545 		protect = 1;
12546 	}
12547 
12548 	nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
12549 	switch (nvcfg1) {
12550 	case FLASH_5755VENDOR_ATMEL_FLASH_1:
12551 	case FLASH_5755VENDOR_ATMEL_FLASH_2:
12552 	case FLASH_5755VENDOR_ATMEL_FLASH_3:
12553 	case FLASH_5755VENDOR_ATMEL_FLASH_5:
12554 		tp->nvram_jedecnum = JEDEC_ATMEL;
12555 		tg3_flag_set(tp, NVRAM_BUFFERED);
12556 		tg3_flag_set(tp, FLASH);
12557 		tp->nvram_pagesize = 264;
12558 		if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
12559 		    nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
12560 			tp->nvram_size = (protect ? 0x3e200 :
12561 					  TG3_NVRAM_SIZE_512KB);
12562 		else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
12563 			tp->nvram_size = (protect ? 0x1f200 :
12564 					  TG3_NVRAM_SIZE_256KB);
12565 		else
12566 			tp->nvram_size = (protect ? 0x1f200 :
12567 					  TG3_NVRAM_SIZE_128KB);
12568 		break;
12569 	case FLASH_5752VENDOR_ST_M45PE10:
12570 	case FLASH_5752VENDOR_ST_M45PE20:
12571 	case FLASH_5752VENDOR_ST_M45PE40:
12572 		tp->nvram_jedecnum = JEDEC_ST;
12573 		tg3_flag_set(tp, NVRAM_BUFFERED);
12574 		tg3_flag_set(tp, FLASH);
12575 		tp->nvram_pagesize = 256;
12576 		if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
12577 			tp->nvram_size = (protect ?
12578 					  TG3_NVRAM_SIZE_64KB :
12579 					  TG3_NVRAM_SIZE_128KB);
12580 		else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
12581 			tp->nvram_size = (protect ?
12582 					  TG3_NVRAM_SIZE_64KB :
12583 					  TG3_NVRAM_SIZE_256KB);
12584 		else
12585 			tp->nvram_size = (protect ?
12586 					  TG3_NVRAM_SIZE_128KB :
12587 					  TG3_NVRAM_SIZE_512KB);
12588 		break;
12589 	}
12590 }
12591 
12592 static void __devinit tg3_get_5787_nvram_info(struct tg3 *tp)
12593 {
12594 	u32 nvcfg1;
12595 
12596 	nvcfg1 = tr32(NVRAM_CFG1);
12597 
12598 	switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12599 	case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
12600 	case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
12601 	case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
12602 	case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
12603 		tp->nvram_jedecnum = JEDEC_ATMEL;
12604 		tg3_flag_set(tp, NVRAM_BUFFERED);
12605 		tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12606 
12607 		nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12608 		tw32(NVRAM_CFG1, nvcfg1);
12609 		break;
12610 	case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
12611 	case FLASH_5755VENDOR_ATMEL_FLASH_1:
12612 	case FLASH_5755VENDOR_ATMEL_FLASH_2:
12613 	case FLASH_5755VENDOR_ATMEL_FLASH_3:
12614 		tp->nvram_jedecnum = JEDEC_ATMEL;
12615 		tg3_flag_set(tp, NVRAM_BUFFERED);
12616 		tg3_flag_set(tp, FLASH);
12617 		tp->nvram_pagesize = 264;
12618 		break;
12619 	case FLASH_5752VENDOR_ST_M45PE10:
12620 	case FLASH_5752VENDOR_ST_M45PE20:
12621 	case FLASH_5752VENDOR_ST_M45PE40:
12622 		tp->nvram_jedecnum = JEDEC_ST;
12623 		tg3_flag_set(tp, NVRAM_BUFFERED);
12624 		tg3_flag_set(tp, FLASH);
12625 		tp->nvram_pagesize = 256;
12626 		break;
12627 	}
12628 }
12629 
12630 static void __devinit tg3_get_5761_nvram_info(struct tg3 *tp)
12631 {
12632 	u32 nvcfg1, protect = 0;
12633 
12634 	nvcfg1 = tr32(NVRAM_CFG1);
12635 
12636 	/* NVRAM protection for TPM */
12637 	if (nvcfg1 & (1 << 27)) {
12638 		tg3_flag_set(tp, PROTECTED_NVRAM);
12639 		protect = 1;
12640 	}
12641 
12642 	nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
12643 	switch (nvcfg1) {
12644 	case FLASH_5761VENDOR_ATMEL_ADB021D:
12645 	case FLASH_5761VENDOR_ATMEL_ADB041D:
12646 	case FLASH_5761VENDOR_ATMEL_ADB081D:
12647 	case FLASH_5761VENDOR_ATMEL_ADB161D:
12648 	case FLASH_5761VENDOR_ATMEL_MDB021D:
12649 	case FLASH_5761VENDOR_ATMEL_MDB041D:
12650 	case FLASH_5761VENDOR_ATMEL_MDB081D:
12651 	case FLASH_5761VENDOR_ATMEL_MDB161D:
12652 		tp->nvram_jedecnum = JEDEC_ATMEL;
12653 		tg3_flag_set(tp, NVRAM_BUFFERED);
12654 		tg3_flag_set(tp, FLASH);
12655 		tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
12656 		tp->nvram_pagesize = 256;
12657 		break;
12658 	case FLASH_5761VENDOR_ST_A_M45PE20:
12659 	case FLASH_5761VENDOR_ST_A_M45PE40:
12660 	case FLASH_5761VENDOR_ST_A_M45PE80:
12661 	case FLASH_5761VENDOR_ST_A_M45PE16:
12662 	case FLASH_5761VENDOR_ST_M_M45PE20:
12663 	case FLASH_5761VENDOR_ST_M_M45PE40:
12664 	case FLASH_5761VENDOR_ST_M_M45PE80:
12665 	case FLASH_5761VENDOR_ST_M_M45PE16:
12666 		tp->nvram_jedecnum = JEDEC_ST;
12667 		tg3_flag_set(tp, NVRAM_BUFFERED);
12668 		tg3_flag_set(tp, FLASH);
12669 		tp->nvram_pagesize = 256;
12670 		break;
12671 	}
12672 
12673 	if (protect) {
12674 		tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
12675 	} else {
12676 		switch (nvcfg1) {
12677 		case FLASH_5761VENDOR_ATMEL_ADB161D:
12678 		case FLASH_5761VENDOR_ATMEL_MDB161D:
12679 		case FLASH_5761VENDOR_ST_A_M45PE16:
12680 		case FLASH_5761VENDOR_ST_M_M45PE16:
12681 			tp->nvram_size = TG3_NVRAM_SIZE_2MB;
12682 			break;
12683 		case FLASH_5761VENDOR_ATMEL_ADB081D:
12684 		case FLASH_5761VENDOR_ATMEL_MDB081D:
12685 		case FLASH_5761VENDOR_ST_A_M45PE80:
12686 		case FLASH_5761VENDOR_ST_M_M45PE80:
12687 			tp->nvram_size = TG3_NVRAM_SIZE_1MB;
12688 			break;
12689 		case FLASH_5761VENDOR_ATMEL_ADB041D:
12690 		case FLASH_5761VENDOR_ATMEL_MDB041D:
12691 		case FLASH_5761VENDOR_ST_A_M45PE40:
12692 		case FLASH_5761VENDOR_ST_M_M45PE40:
12693 			tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12694 			break;
12695 		case FLASH_5761VENDOR_ATMEL_ADB021D:
12696 		case FLASH_5761VENDOR_ATMEL_MDB021D:
12697 		case FLASH_5761VENDOR_ST_A_M45PE20:
12698 		case FLASH_5761VENDOR_ST_M_M45PE20:
12699 			tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12700 			break;
12701 		}
12702 	}
12703 }
12704 
12705 static void __devinit tg3_get_5906_nvram_info(struct tg3 *tp)
12706 {
12707 	tp->nvram_jedecnum = JEDEC_ATMEL;
12708 	tg3_flag_set(tp, NVRAM_BUFFERED);
12709 	tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12710 }
12711 
12712 static void __devinit tg3_get_57780_nvram_info(struct tg3 *tp)
12713 {
12714 	u32 nvcfg1;
12715 
12716 	nvcfg1 = tr32(NVRAM_CFG1);
12717 
12718 	switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12719 	case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
12720 	case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
12721 		tp->nvram_jedecnum = JEDEC_ATMEL;
12722 		tg3_flag_set(tp, NVRAM_BUFFERED);
12723 		tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12724 
12725 		nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12726 		tw32(NVRAM_CFG1, nvcfg1);
12727 		return;
12728 	case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
12729 	case FLASH_57780VENDOR_ATMEL_AT45DB011D:
12730 	case FLASH_57780VENDOR_ATMEL_AT45DB011B:
12731 	case FLASH_57780VENDOR_ATMEL_AT45DB021D:
12732 	case FLASH_57780VENDOR_ATMEL_AT45DB021B:
12733 	case FLASH_57780VENDOR_ATMEL_AT45DB041D:
12734 	case FLASH_57780VENDOR_ATMEL_AT45DB041B:
12735 		tp->nvram_jedecnum = JEDEC_ATMEL;
12736 		tg3_flag_set(tp, NVRAM_BUFFERED);
12737 		tg3_flag_set(tp, FLASH);
12738 
12739 		switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12740 		case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
12741 		case FLASH_57780VENDOR_ATMEL_AT45DB011D:
12742 		case FLASH_57780VENDOR_ATMEL_AT45DB011B:
12743 			tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12744 			break;
12745 		case FLASH_57780VENDOR_ATMEL_AT45DB021D:
12746 		case FLASH_57780VENDOR_ATMEL_AT45DB021B:
12747 			tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12748 			break;
12749 		case FLASH_57780VENDOR_ATMEL_AT45DB041D:
12750 		case FLASH_57780VENDOR_ATMEL_AT45DB041B:
12751 			tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12752 			break;
12753 		}
12754 		break;
12755 	case FLASH_5752VENDOR_ST_M45PE10:
12756 	case FLASH_5752VENDOR_ST_M45PE20:
12757 	case FLASH_5752VENDOR_ST_M45PE40:
12758 		tp->nvram_jedecnum = JEDEC_ST;
12759 		tg3_flag_set(tp, NVRAM_BUFFERED);
12760 		tg3_flag_set(tp, FLASH);
12761 
12762 		switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12763 		case FLASH_5752VENDOR_ST_M45PE10:
12764 			tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12765 			break;
12766 		case FLASH_5752VENDOR_ST_M45PE20:
12767 			tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12768 			break;
12769 		case FLASH_5752VENDOR_ST_M45PE40:
12770 			tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12771 			break;
12772 		}
12773 		break;
12774 	default:
12775 		tg3_flag_set(tp, NO_NVRAM);
12776 		return;
12777 	}
12778 
12779 	tg3_nvram_get_pagesize(tp, nvcfg1);
12780 	if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
12781 		tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
12782 }
12783 
12784 
12785 static void __devinit tg3_get_5717_nvram_info(struct tg3 *tp)
12786 {
12787 	u32 nvcfg1;
12788 
12789 	nvcfg1 = tr32(NVRAM_CFG1);
12790 
12791 	switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12792 	case FLASH_5717VENDOR_ATMEL_EEPROM:
12793 	case FLASH_5717VENDOR_MICRO_EEPROM:
12794 		tp->nvram_jedecnum = JEDEC_ATMEL;
12795 		tg3_flag_set(tp, NVRAM_BUFFERED);
12796 		tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12797 
12798 		nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12799 		tw32(NVRAM_CFG1, nvcfg1);
12800 		return;
12801 	case FLASH_5717VENDOR_ATMEL_MDB011D:
12802 	case FLASH_5717VENDOR_ATMEL_ADB011B:
12803 	case FLASH_5717VENDOR_ATMEL_ADB011D:
12804 	case FLASH_5717VENDOR_ATMEL_MDB021D:
12805 	case FLASH_5717VENDOR_ATMEL_ADB021B:
12806 	case FLASH_5717VENDOR_ATMEL_ADB021D:
12807 	case FLASH_5717VENDOR_ATMEL_45USPT:
12808 		tp->nvram_jedecnum = JEDEC_ATMEL;
12809 		tg3_flag_set(tp, NVRAM_BUFFERED);
12810 		tg3_flag_set(tp, FLASH);
12811 
12812 		switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12813 		case FLASH_5717VENDOR_ATMEL_MDB021D:
12814 			/* Detect size with tg3_nvram_get_size() */
12815 			break;
12816 		case FLASH_5717VENDOR_ATMEL_ADB021B:
12817 		case FLASH_5717VENDOR_ATMEL_ADB021D:
12818 			tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12819 			break;
12820 		default:
12821 			tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12822 			break;
12823 		}
12824 		break;
12825 	case FLASH_5717VENDOR_ST_M_M25PE10:
12826 	case FLASH_5717VENDOR_ST_A_M25PE10:
12827 	case FLASH_5717VENDOR_ST_M_M45PE10:
12828 	case FLASH_5717VENDOR_ST_A_M45PE10:
12829 	case FLASH_5717VENDOR_ST_M_M25PE20:
12830 	case FLASH_5717VENDOR_ST_A_M25PE20:
12831 	case FLASH_5717VENDOR_ST_M_M45PE20:
12832 	case FLASH_5717VENDOR_ST_A_M45PE20:
12833 	case FLASH_5717VENDOR_ST_25USPT:
12834 	case FLASH_5717VENDOR_ST_45USPT:
12835 		tp->nvram_jedecnum = JEDEC_ST;
12836 		tg3_flag_set(tp, NVRAM_BUFFERED);
12837 		tg3_flag_set(tp, FLASH);
12838 
12839 		switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12840 		case FLASH_5717VENDOR_ST_M_M25PE20:
12841 		case FLASH_5717VENDOR_ST_M_M45PE20:
12842 			/* Detect size with tg3_nvram_get_size() */
12843 			break;
12844 		case FLASH_5717VENDOR_ST_A_M25PE20:
12845 		case FLASH_5717VENDOR_ST_A_M45PE20:
12846 			tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12847 			break;
12848 		default:
12849 			tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12850 			break;
12851 		}
12852 		break;
12853 	default:
12854 		tg3_flag_set(tp, NO_NVRAM);
12855 		return;
12856 	}
12857 
12858 	tg3_nvram_get_pagesize(tp, nvcfg1);
12859 	if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
12860 		tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
12861 }
12862 
12863 static void __devinit tg3_get_5720_nvram_info(struct tg3 *tp)
12864 {
12865 	u32 nvcfg1, nvmpinstrp;
12866 
12867 	nvcfg1 = tr32(NVRAM_CFG1);
12868 	nvmpinstrp = nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK;
12869 
12870 	switch (nvmpinstrp) {
12871 	case FLASH_5720_EEPROM_HD:
12872 	case FLASH_5720_EEPROM_LD:
12873 		tp->nvram_jedecnum = JEDEC_ATMEL;
12874 		tg3_flag_set(tp, NVRAM_BUFFERED);
12875 
12876 		nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12877 		tw32(NVRAM_CFG1, nvcfg1);
12878 		if (nvmpinstrp == FLASH_5720_EEPROM_HD)
12879 			tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12880 		else
12881 			tp->nvram_pagesize = ATMEL_AT24C02_CHIP_SIZE;
12882 		return;
12883 	case FLASH_5720VENDOR_M_ATMEL_DB011D:
12884 	case FLASH_5720VENDOR_A_ATMEL_DB011B:
12885 	case FLASH_5720VENDOR_A_ATMEL_DB011D:
12886 	case FLASH_5720VENDOR_M_ATMEL_DB021D:
12887 	case FLASH_5720VENDOR_A_ATMEL_DB021B:
12888 	case FLASH_5720VENDOR_A_ATMEL_DB021D:
12889 	case FLASH_5720VENDOR_M_ATMEL_DB041D:
12890 	case FLASH_5720VENDOR_A_ATMEL_DB041B:
12891 	case FLASH_5720VENDOR_A_ATMEL_DB041D:
12892 	case FLASH_5720VENDOR_M_ATMEL_DB081D:
12893 	case FLASH_5720VENDOR_A_ATMEL_DB081D:
12894 	case FLASH_5720VENDOR_ATMEL_45USPT:
12895 		tp->nvram_jedecnum = JEDEC_ATMEL;
12896 		tg3_flag_set(tp, NVRAM_BUFFERED);
12897 		tg3_flag_set(tp, FLASH);
12898 
12899 		switch (nvmpinstrp) {
12900 		case FLASH_5720VENDOR_M_ATMEL_DB021D:
12901 		case FLASH_5720VENDOR_A_ATMEL_DB021B:
12902 		case FLASH_5720VENDOR_A_ATMEL_DB021D:
12903 			tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12904 			break;
12905 		case FLASH_5720VENDOR_M_ATMEL_DB041D:
12906 		case FLASH_5720VENDOR_A_ATMEL_DB041B:
12907 		case FLASH_5720VENDOR_A_ATMEL_DB041D:
12908 			tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12909 			break;
12910 		case FLASH_5720VENDOR_M_ATMEL_DB081D:
12911 		case FLASH_5720VENDOR_A_ATMEL_DB081D:
12912 			tp->nvram_size = TG3_NVRAM_SIZE_1MB;
12913 			break;
12914 		default:
12915 			tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12916 			break;
12917 		}
12918 		break;
12919 	case FLASH_5720VENDOR_M_ST_M25PE10:
12920 	case FLASH_5720VENDOR_M_ST_M45PE10:
12921 	case FLASH_5720VENDOR_A_ST_M25PE10:
12922 	case FLASH_5720VENDOR_A_ST_M45PE10:
12923 	case FLASH_5720VENDOR_M_ST_M25PE20:
12924 	case FLASH_5720VENDOR_M_ST_M45PE20:
12925 	case FLASH_5720VENDOR_A_ST_M25PE20:
12926 	case FLASH_5720VENDOR_A_ST_M45PE20:
12927 	case FLASH_5720VENDOR_M_ST_M25PE40:
12928 	case FLASH_5720VENDOR_M_ST_M45PE40:
12929 	case FLASH_5720VENDOR_A_ST_M25PE40:
12930 	case FLASH_5720VENDOR_A_ST_M45PE40:
12931 	case FLASH_5720VENDOR_M_ST_M25PE80:
12932 	case FLASH_5720VENDOR_M_ST_M45PE80:
12933 	case FLASH_5720VENDOR_A_ST_M25PE80:
12934 	case FLASH_5720VENDOR_A_ST_M45PE80:
12935 	case FLASH_5720VENDOR_ST_25USPT:
12936 	case FLASH_5720VENDOR_ST_45USPT:
12937 		tp->nvram_jedecnum = JEDEC_ST;
12938 		tg3_flag_set(tp, NVRAM_BUFFERED);
12939 		tg3_flag_set(tp, FLASH);
12940 
12941 		switch (nvmpinstrp) {
12942 		case FLASH_5720VENDOR_M_ST_M25PE20:
12943 		case FLASH_5720VENDOR_M_ST_M45PE20:
12944 		case FLASH_5720VENDOR_A_ST_M25PE20:
12945 		case FLASH_5720VENDOR_A_ST_M45PE20:
12946 			tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12947 			break;
12948 		case FLASH_5720VENDOR_M_ST_M25PE40:
12949 		case FLASH_5720VENDOR_M_ST_M45PE40:
12950 		case FLASH_5720VENDOR_A_ST_M25PE40:
12951 		case FLASH_5720VENDOR_A_ST_M45PE40:
12952 			tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12953 			break;
12954 		case FLASH_5720VENDOR_M_ST_M25PE80:
12955 		case FLASH_5720VENDOR_M_ST_M45PE80:
12956 		case FLASH_5720VENDOR_A_ST_M25PE80:
12957 		case FLASH_5720VENDOR_A_ST_M45PE80:
12958 			tp->nvram_size = TG3_NVRAM_SIZE_1MB;
12959 			break;
12960 		default:
12961 			tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12962 			break;
12963 		}
12964 		break;
12965 	default:
12966 		tg3_flag_set(tp, NO_NVRAM);
12967 		return;
12968 	}
12969 
12970 	tg3_nvram_get_pagesize(tp, nvcfg1);
12971 	if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
12972 		tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
12973 }
12974 
12975 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
12976 static void __devinit tg3_nvram_init(struct tg3 *tp)
12977 {
12978 	tw32_f(GRC_EEPROM_ADDR,
12979 	     (EEPROM_ADDR_FSM_RESET |
12980 	      (EEPROM_DEFAULT_CLOCK_PERIOD <<
12981 	       EEPROM_ADDR_CLKPERD_SHIFT)));
12982 
12983 	msleep(1);
12984 
12985 	/* Enable seeprom accesses. */
12986 	tw32_f(GRC_LOCAL_CTRL,
12987 	     tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
12988 	udelay(100);
12989 
12990 	if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
12991 	    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
12992 		tg3_flag_set(tp, NVRAM);
12993 
12994 		if (tg3_nvram_lock(tp)) {
12995 			netdev_warn(tp->dev,
12996 				    "Cannot get nvram lock, %s failed\n",
12997 				    __func__);
12998 			return;
12999 		}
13000 		tg3_enable_nvram_access(tp);
13001 
13002 		tp->nvram_size = 0;
13003 
13004 		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
13005 			tg3_get_5752_nvram_info(tp);
13006 		else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
13007 			tg3_get_5755_nvram_info(tp);
13008 		else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
13009 			 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
13010 			 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
13011 			tg3_get_5787_nvram_info(tp);
13012 		else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
13013 			tg3_get_5761_nvram_info(tp);
13014 		else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
13015 			tg3_get_5906_nvram_info(tp);
13016 		else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
13017 			 tg3_flag(tp, 57765_CLASS))
13018 			tg3_get_57780_nvram_info(tp);
13019 		else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
13020 			 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
13021 			tg3_get_5717_nvram_info(tp);
13022 		else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
13023 			tg3_get_5720_nvram_info(tp);
13024 		else
13025 			tg3_get_nvram_info(tp);
13026 
13027 		if (tp->nvram_size == 0)
13028 			tg3_get_nvram_size(tp);
13029 
13030 		tg3_disable_nvram_access(tp);
13031 		tg3_nvram_unlock(tp);
13032 
13033 	} else {
13034 		tg3_flag_clear(tp, NVRAM);
13035 		tg3_flag_clear(tp, NVRAM_BUFFERED);
13036 
13037 		tg3_get_eeprom_size(tp);
13038 	}
13039 }
13040 
13041 struct subsys_tbl_ent {
13042 	u16 subsys_vendor, subsys_devid;
13043 	u32 phy_id;
13044 };
13045 
13046 static struct subsys_tbl_ent subsys_id_to_phy_id[] __devinitdata = {
13047 	/* Broadcom boards. */
13048 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
13049 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6, TG3_PHY_ID_BCM5401 },
13050 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
13051 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5, TG3_PHY_ID_BCM5701 },
13052 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
13053 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6, TG3_PHY_ID_BCM8002 },
13054 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
13055 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9, 0 },
13056 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
13057 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1, TG3_PHY_ID_BCM5701 },
13058 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
13059 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8, TG3_PHY_ID_BCM5701 },
13060 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
13061 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7, 0 },
13062 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
13063 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10, TG3_PHY_ID_BCM5701 },
13064 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
13065 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12, TG3_PHY_ID_BCM5701 },
13066 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
13067 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1, TG3_PHY_ID_BCM5703 },
13068 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
13069 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2, TG3_PHY_ID_BCM5703 },
13070 
13071 	/* 3com boards. */
13072 	{ TG3PCI_SUBVENDOR_ID_3COM,
13073 	  TG3PCI_SUBDEVICE_ID_3COM_3C996T, TG3_PHY_ID_BCM5401 },
13074 	{ TG3PCI_SUBVENDOR_ID_3COM,
13075 	  TG3PCI_SUBDEVICE_ID_3COM_3C996BT, TG3_PHY_ID_BCM5701 },
13076 	{ TG3PCI_SUBVENDOR_ID_3COM,
13077 	  TG3PCI_SUBDEVICE_ID_3COM_3C996SX, 0 },
13078 	{ TG3PCI_SUBVENDOR_ID_3COM,
13079 	  TG3PCI_SUBDEVICE_ID_3COM_3C1000T, TG3_PHY_ID_BCM5701 },
13080 	{ TG3PCI_SUBVENDOR_ID_3COM,
13081 	  TG3PCI_SUBDEVICE_ID_3COM_3C940BR01, TG3_PHY_ID_BCM5701 },
13082 
13083 	/* DELL boards. */
13084 	{ TG3PCI_SUBVENDOR_ID_DELL,
13085 	  TG3PCI_SUBDEVICE_ID_DELL_VIPER, TG3_PHY_ID_BCM5401 },
13086 	{ TG3PCI_SUBVENDOR_ID_DELL,
13087 	  TG3PCI_SUBDEVICE_ID_DELL_JAGUAR, TG3_PHY_ID_BCM5401 },
13088 	{ TG3PCI_SUBVENDOR_ID_DELL,
13089 	  TG3PCI_SUBDEVICE_ID_DELL_MERLOT, TG3_PHY_ID_BCM5411 },
13090 	{ TG3PCI_SUBVENDOR_ID_DELL,
13091 	  TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT, TG3_PHY_ID_BCM5411 },
13092 
13093 	/* Compaq boards. */
13094 	{ TG3PCI_SUBVENDOR_ID_COMPAQ,
13095 	  TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE, TG3_PHY_ID_BCM5701 },
13096 	{ TG3PCI_SUBVENDOR_ID_COMPAQ,
13097 	  TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2, TG3_PHY_ID_BCM5701 },
13098 	{ TG3PCI_SUBVENDOR_ID_COMPAQ,
13099 	  TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING, 0 },
13100 	{ TG3PCI_SUBVENDOR_ID_COMPAQ,
13101 	  TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780, TG3_PHY_ID_BCM5701 },
13102 	{ TG3PCI_SUBVENDOR_ID_COMPAQ,
13103 	  TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2, TG3_PHY_ID_BCM5701 },
13104 
13105 	/* IBM boards. */
13106 	{ TG3PCI_SUBVENDOR_ID_IBM,
13107 	  TG3PCI_SUBDEVICE_ID_IBM_5703SAX2, 0 }
13108 };
13109 
13110 static struct subsys_tbl_ent * __devinit tg3_lookup_by_subsys(struct tg3 *tp)
13111 {
13112 	int i;
13113 
13114 	for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
13115 		if ((subsys_id_to_phy_id[i].subsys_vendor ==
13116 		     tp->pdev->subsystem_vendor) &&
13117 		    (subsys_id_to_phy_id[i].subsys_devid ==
13118 		     tp->pdev->subsystem_device))
13119 			return &subsys_id_to_phy_id[i];
13120 	}
13121 	return NULL;
13122 }
13123 
13124 static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
13125 {
13126 	u32 val;
13127 
13128 	tp->phy_id = TG3_PHY_ID_INVALID;
13129 	tp->led_ctrl = LED_CTRL_MODE_PHY_1;
13130 
13131 	/* Assume an onboard device and WOL capable by default.  */
13132 	tg3_flag_set(tp, EEPROM_WRITE_PROT);
13133 	tg3_flag_set(tp, WOL_CAP);
13134 
13135 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
13136 		if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
13137 			tg3_flag_clear(tp, EEPROM_WRITE_PROT);
13138 			tg3_flag_set(tp, IS_NIC);
13139 		}
13140 		val = tr32(VCPU_CFGSHDW);
13141 		if (val & VCPU_CFGSHDW_ASPM_DBNC)
13142 			tg3_flag_set(tp, ASPM_WORKAROUND);
13143 		if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
13144 		    (val & VCPU_CFGSHDW_WOL_MAGPKT)) {
13145 			tg3_flag_set(tp, WOL_ENABLE);
13146 			device_set_wakeup_enable(&tp->pdev->dev, true);
13147 		}
13148 		goto done;
13149 	}
13150 
13151 	tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
13152 	if (val == NIC_SRAM_DATA_SIG_MAGIC) {
13153 		u32 nic_cfg, led_cfg;
13154 		u32 nic_phy_id, ver, cfg2 = 0, cfg4 = 0, eeprom_phy_id;
13155 		int eeprom_phy_serdes = 0;
13156 
13157 		tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
13158 		tp->nic_sram_data_cfg = nic_cfg;
13159 
13160 		tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
13161 		ver >>= NIC_SRAM_DATA_VER_SHIFT;
13162 		if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
13163 		    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
13164 		    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703 &&
13165 		    (ver > 0) && (ver < 0x100))
13166 			tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
13167 
13168 		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
13169 			tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
13170 
13171 		if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
13172 		    NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
13173 			eeprom_phy_serdes = 1;
13174 
13175 		tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
13176 		if (nic_phy_id != 0) {
13177 			u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
13178 			u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
13179 
13180 			eeprom_phy_id  = (id1 >> 16) << 10;
13181 			eeprom_phy_id |= (id2 & 0xfc00) << 16;
13182 			eeprom_phy_id |= (id2 & 0x03ff) <<  0;
13183 		} else
13184 			eeprom_phy_id = 0;
13185 
13186 		tp->phy_id = eeprom_phy_id;
13187 		if (eeprom_phy_serdes) {
13188 			if (!tg3_flag(tp, 5705_PLUS))
13189 				tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
13190 			else
13191 				tp->phy_flags |= TG3_PHYFLG_MII_SERDES;
13192 		}
13193 
13194 		if (tg3_flag(tp, 5750_PLUS))
13195 			led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
13196 				    SHASTA_EXT_LED_MODE_MASK);
13197 		else
13198 			led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
13199 
13200 		switch (led_cfg) {
13201 		default:
13202 		case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
13203 			tp->led_ctrl = LED_CTRL_MODE_PHY_1;
13204 			break;
13205 
13206 		case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
13207 			tp->led_ctrl = LED_CTRL_MODE_PHY_2;
13208 			break;
13209 
13210 		case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
13211 			tp->led_ctrl = LED_CTRL_MODE_MAC;
13212 
13213 			/* Default to PHY_1_MODE if 0 (MAC_MODE) is
13214 			 * read on some older 5700/5701 bootcode.
13215 			 */
13216 			if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
13217 			    ASIC_REV_5700 ||
13218 			    GET_ASIC_REV(tp->pci_chip_rev_id) ==
13219 			    ASIC_REV_5701)
13220 				tp->led_ctrl = LED_CTRL_MODE_PHY_1;
13221 
13222 			break;
13223 
13224 		case SHASTA_EXT_LED_SHARED:
13225 			tp->led_ctrl = LED_CTRL_MODE_SHARED;
13226 			if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
13227 			    tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
13228 				tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
13229 						 LED_CTRL_MODE_PHY_2);
13230 			break;
13231 
13232 		case SHASTA_EXT_LED_MAC:
13233 			tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
13234 			break;
13235 
13236 		case SHASTA_EXT_LED_COMBO:
13237 			tp->led_ctrl = LED_CTRL_MODE_COMBO;
13238 			if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
13239 				tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
13240 						 LED_CTRL_MODE_PHY_2);
13241 			break;
13242 
13243 		}
13244 
13245 		if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
13246 		     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
13247 		    tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
13248 			tp->led_ctrl = LED_CTRL_MODE_PHY_2;
13249 
13250 		if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX)
13251 			tp->led_ctrl = LED_CTRL_MODE_PHY_1;
13252 
13253 		if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
13254 			tg3_flag_set(tp, EEPROM_WRITE_PROT);
13255 			if ((tp->pdev->subsystem_vendor ==
13256 			     PCI_VENDOR_ID_ARIMA) &&
13257 			    (tp->pdev->subsystem_device == 0x205a ||
13258 			     tp->pdev->subsystem_device == 0x2063))
13259 				tg3_flag_clear(tp, EEPROM_WRITE_PROT);
13260 		} else {
13261 			tg3_flag_clear(tp, EEPROM_WRITE_PROT);
13262 			tg3_flag_set(tp, IS_NIC);
13263 		}
13264 
13265 		if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
13266 			tg3_flag_set(tp, ENABLE_ASF);
13267 			if (tg3_flag(tp, 5750_PLUS))
13268 				tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
13269 		}
13270 
13271 		if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
13272 		    tg3_flag(tp, 5750_PLUS))
13273 			tg3_flag_set(tp, ENABLE_APE);
13274 
13275 		if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES &&
13276 		    !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
13277 			tg3_flag_clear(tp, WOL_CAP);
13278 
13279 		if (tg3_flag(tp, WOL_CAP) &&
13280 		    (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)) {
13281 			tg3_flag_set(tp, WOL_ENABLE);
13282 			device_set_wakeup_enable(&tp->pdev->dev, true);
13283 		}
13284 
13285 		if (cfg2 & (1 << 17))
13286 			tp->phy_flags |= TG3_PHYFLG_CAPACITIVE_COUPLING;
13287 
13288 		/* serdes signal pre-emphasis in register 0x590 set by */
13289 		/* bootcode if bit 18 is set */
13290 		if (cfg2 & (1 << 18))
13291 			tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS;
13292 
13293 		if ((tg3_flag(tp, 57765_PLUS) ||
13294 		     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
13295 		      GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX)) &&
13296 		    (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
13297 			tp->phy_flags |= TG3_PHYFLG_ENABLE_APD;
13298 
13299 		if (tg3_flag(tp, PCI_EXPRESS) &&
13300 		    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
13301 		    !tg3_flag(tp, 57765_PLUS)) {
13302 			u32 cfg3;
13303 
13304 			tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
13305 			if (cfg3 & NIC_SRAM_ASPM_DEBOUNCE)
13306 				tg3_flag_set(tp, ASPM_WORKAROUND);
13307 		}
13308 
13309 		if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE)
13310 			tg3_flag_set(tp, RGMII_INBAND_DISABLE);
13311 		if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
13312 			tg3_flag_set(tp, RGMII_EXT_IBND_RX_EN);
13313 		if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
13314 			tg3_flag_set(tp, RGMII_EXT_IBND_TX_EN);
13315 	}
13316 done:
13317 	if (tg3_flag(tp, WOL_CAP))
13318 		device_set_wakeup_enable(&tp->pdev->dev,
13319 					 tg3_flag(tp, WOL_ENABLE));
13320 	else
13321 		device_set_wakeup_capable(&tp->pdev->dev, false);
13322 }
13323 
13324 static int __devinit tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
13325 {
13326 	int i;
13327 	u32 val;
13328 
13329 	tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
13330 	tw32(OTP_CTRL, cmd);
13331 
13332 	/* Wait for up to 1 ms for command to execute. */
13333 	for (i = 0; i < 100; i++) {
13334 		val = tr32(OTP_STATUS);
13335 		if (val & OTP_STATUS_CMD_DONE)
13336 			break;
13337 		udelay(10);
13338 	}
13339 
13340 	return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
13341 }
13342 
13343 /* Read the gphy configuration from the OTP region of the chip.  The gphy
13344  * configuration is a 32-bit value that straddles the alignment boundary.
13345  * We do two 32-bit reads and then shift and merge the results.
13346  */
13347 static u32 __devinit tg3_read_otp_phycfg(struct tg3 *tp)
13348 {
13349 	u32 bhalf_otp, thalf_otp;
13350 
13351 	tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
13352 
13353 	if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
13354 		return 0;
13355 
13356 	tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
13357 
13358 	if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
13359 		return 0;
13360 
13361 	thalf_otp = tr32(OTP_READ_DATA);
13362 
13363 	tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
13364 
13365 	if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
13366 		return 0;
13367 
13368 	bhalf_otp = tr32(OTP_READ_DATA);
13369 
13370 	return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
13371 }
13372 
13373 static void __devinit tg3_phy_init_link_config(struct tg3 *tp)
13374 {
13375 	u32 adv = ADVERTISED_Autoneg;
13376 
13377 	if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
13378 		adv |= ADVERTISED_1000baseT_Half |
13379 		       ADVERTISED_1000baseT_Full;
13380 
13381 	if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
13382 		adv |= ADVERTISED_100baseT_Half |
13383 		       ADVERTISED_100baseT_Full |
13384 		       ADVERTISED_10baseT_Half |
13385 		       ADVERTISED_10baseT_Full |
13386 		       ADVERTISED_TP;
13387 	else
13388 		adv |= ADVERTISED_FIBRE;
13389 
13390 	tp->link_config.advertising = adv;
13391 	tp->link_config.speed = SPEED_UNKNOWN;
13392 	tp->link_config.duplex = DUPLEX_UNKNOWN;
13393 	tp->link_config.autoneg = AUTONEG_ENABLE;
13394 	tp->link_config.active_speed = SPEED_UNKNOWN;
13395 	tp->link_config.active_duplex = DUPLEX_UNKNOWN;
13396 
13397 	tp->old_link = -1;
13398 }
13399 
13400 static int __devinit tg3_phy_probe(struct tg3 *tp)
13401 {
13402 	u32 hw_phy_id_1, hw_phy_id_2;
13403 	u32 hw_phy_id, hw_phy_id_masked;
13404 	int err;
13405 
13406 	/* flow control autonegotiation is default behavior */
13407 	tg3_flag_set(tp, PAUSE_AUTONEG);
13408 	tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
13409 
13410 	if (tg3_flag(tp, USE_PHYLIB))
13411 		return tg3_phy_init(tp);
13412 
13413 	/* Reading the PHY ID register can conflict with ASF
13414 	 * firmware access to the PHY hardware.
13415 	 */
13416 	err = 0;
13417 	if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)) {
13418 		hw_phy_id = hw_phy_id_masked = TG3_PHY_ID_INVALID;
13419 	} else {
13420 		/* Now read the physical PHY_ID from the chip and verify
13421 		 * that it is sane.  If it doesn't look good, we fall back
13422 		 * to either the hard-coded table based PHY_ID and failing
13423 		 * that the value found in the eeprom area.
13424 		 */
13425 		err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
13426 		err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
13427 
13428 		hw_phy_id  = (hw_phy_id_1 & 0xffff) << 10;
13429 		hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
13430 		hw_phy_id |= (hw_phy_id_2 & 0x03ff) <<  0;
13431 
13432 		hw_phy_id_masked = hw_phy_id & TG3_PHY_ID_MASK;
13433 	}
13434 
13435 	if (!err && TG3_KNOWN_PHY_ID(hw_phy_id_masked)) {
13436 		tp->phy_id = hw_phy_id;
13437 		if (hw_phy_id_masked == TG3_PHY_ID_BCM8002)
13438 			tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
13439 		else
13440 			tp->phy_flags &= ~TG3_PHYFLG_PHY_SERDES;
13441 	} else {
13442 		if (tp->phy_id != TG3_PHY_ID_INVALID) {
13443 			/* Do nothing, phy ID already set up in
13444 			 * tg3_get_eeprom_hw_cfg().
13445 			 */
13446 		} else {
13447 			struct subsys_tbl_ent *p;
13448 
13449 			/* No eeprom signature?  Try the hardcoded
13450 			 * subsys device table.
13451 			 */
13452 			p = tg3_lookup_by_subsys(tp);
13453 			if (!p)
13454 				return -ENODEV;
13455 
13456 			tp->phy_id = p->phy_id;
13457 			if (!tp->phy_id ||
13458 			    tp->phy_id == TG3_PHY_ID_BCM8002)
13459 				tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
13460 		}
13461 	}
13462 
13463 	if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
13464 	    (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
13465 	     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720 ||
13466 	     (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 &&
13467 	      tp->pci_chip_rev_id != CHIPREV_ID_5717_A0) ||
13468 	     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 &&
13469 	      tp->pci_chip_rev_id != CHIPREV_ID_57765_A0)))
13470 		tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
13471 
13472 	tg3_phy_init_link_config(tp);
13473 
13474 	if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
13475 	    !tg3_flag(tp, ENABLE_APE) &&
13476 	    !tg3_flag(tp, ENABLE_ASF)) {
13477 		u32 bmsr, dummy;
13478 
13479 		tg3_readphy(tp, MII_BMSR, &bmsr);
13480 		if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
13481 		    (bmsr & BMSR_LSTATUS))
13482 			goto skip_phy_reset;
13483 
13484 		err = tg3_phy_reset(tp);
13485 		if (err)
13486 			return err;
13487 
13488 		tg3_phy_set_wirespeed(tp);
13489 
13490 		if (!tg3_phy_copper_an_config_ok(tp, &dummy)) {
13491 			tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
13492 					    tp->link_config.flowctrl);
13493 
13494 			tg3_writephy(tp, MII_BMCR,
13495 				     BMCR_ANENABLE | BMCR_ANRESTART);
13496 		}
13497 	}
13498 
13499 skip_phy_reset:
13500 	if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
13501 		err = tg3_init_5401phy_dsp(tp);
13502 		if (err)
13503 			return err;
13504 
13505 		err = tg3_init_5401phy_dsp(tp);
13506 	}
13507 
13508 	return err;
13509 }
13510 
13511 static void __devinit tg3_read_vpd(struct tg3 *tp)
13512 {
13513 	u8 *vpd_data;
13514 	unsigned int block_end, rosize, len;
13515 	u32 vpdlen;
13516 	int j, i = 0;
13517 
13518 	vpd_data = (u8 *)tg3_vpd_readblock(tp, &vpdlen);
13519 	if (!vpd_data)
13520 		goto out_no_vpd;
13521 
13522 	i = pci_vpd_find_tag(vpd_data, 0, vpdlen, PCI_VPD_LRDT_RO_DATA);
13523 	if (i < 0)
13524 		goto out_not_found;
13525 
13526 	rosize = pci_vpd_lrdt_size(&vpd_data[i]);
13527 	block_end = i + PCI_VPD_LRDT_TAG_SIZE + rosize;
13528 	i += PCI_VPD_LRDT_TAG_SIZE;
13529 
13530 	if (block_end > vpdlen)
13531 		goto out_not_found;
13532 
13533 	j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
13534 				      PCI_VPD_RO_KEYWORD_MFR_ID);
13535 	if (j > 0) {
13536 		len = pci_vpd_info_field_size(&vpd_data[j]);
13537 
13538 		j += PCI_VPD_INFO_FLD_HDR_SIZE;
13539 		if (j + len > block_end || len != 4 ||
13540 		    memcmp(&vpd_data[j], "1028", 4))
13541 			goto partno;
13542 
13543 		j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
13544 					      PCI_VPD_RO_KEYWORD_VENDOR0);
13545 		if (j < 0)
13546 			goto partno;
13547 
13548 		len = pci_vpd_info_field_size(&vpd_data[j]);
13549 
13550 		j += PCI_VPD_INFO_FLD_HDR_SIZE;
13551 		if (j + len > block_end)
13552 			goto partno;
13553 
13554 		memcpy(tp->fw_ver, &vpd_data[j], len);
13555 		strncat(tp->fw_ver, " bc ", vpdlen - len - 1);
13556 	}
13557 
13558 partno:
13559 	i = pci_vpd_find_info_keyword(vpd_data, i, rosize,
13560 				      PCI_VPD_RO_KEYWORD_PARTNO);
13561 	if (i < 0)
13562 		goto out_not_found;
13563 
13564 	len = pci_vpd_info_field_size(&vpd_data[i]);
13565 
13566 	i += PCI_VPD_INFO_FLD_HDR_SIZE;
13567 	if (len > TG3_BPN_SIZE ||
13568 	    (len + i) > vpdlen)
13569 		goto out_not_found;
13570 
13571 	memcpy(tp->board_part_number, &vpd_data[i], len);
13572 
13573 out_not_found:
13574 	kfree(vpd_data);
13575 	if (tp->board_part_number[0])
13576 		return;
13577 
13578 out_no_vpd:
13579 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
13580 		if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717)
13581 			strcpy(tp->board_part_number, "BCM5717");
13582 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718)
13583 			strcpy(tp->board_part_number, "BCM5718");
13584 		else
13585 			goto nomatch;
13586 	} else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
13587 		if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780)
13588 			strcpy(tp->board_part_number, "BCM57780");
13589 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760)
13590 			strcpy(tp->board_part_number, "BCM57760");
13591 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790)
13592 			strcpy(tp->board_part_number, "BCM57790");
13593 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788)
13594 			strcpy(tp->board_part_number, "BCM57788");
13595 		else
13596 			goto nomatch;
13597 	} else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
13598 		if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761)
13599 			strcpy(tp->board_part_number, "BCM57761");
13600 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765)
13601 			strcpy(tp->board_part_number, "BCM57765");
13602 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781)
13603 			strcpy(tp->board_part_number, "BCM57781");
13604 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785)
13605 			strcpy(tp->board_part_number, "BCM57785");
13606 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791)
13607 			strcpy(tp->board_part_number, "BCM57791");
13608 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
13609 			strcpy(tp->board_part_number, "BCM57795");
13610 		else
13611 			goto nomatch;
13612 	} else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57766) {
13613 		if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762)
13614 			strcpy(tp->board_part_number, "BCM57762");
13615 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766)
13616 			strcpy(tp->board_part_number, "BCM57766");
13617 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782)
13618 			strcpy(tp->board_part_number, "BCM57782");
13619 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
13620 			strcpy(tp->board_part_number, "BCM57786");
13621 		else
13622 			goto nomatch;
13623 	} else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
13624 		strcpy(tp->board_part_number, "BCM95906");
13625 	} else {
13626 nomatch:
13627 		strcpy(tp->board_part_number, "none");
13628 	}
13629 }
13630 
13631 static int __devinit tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
13632 {
13633 	u32 val;
13634 
13635 	if (tg3_nvram_read(tp, offset, &val) ||
13636 	    (val & 0xfc000000) != 0x0c000000 ||
13637 	    tg3_nvram_read(tp, offset + 4, &val) ||
13638 	    val != 0)
13639 		return 0;
13640 
13641 	return 1;
13642 }
13643 
13644 static void __devinit tg3_read_bc_ver(struct tg3 *tp)
13645 {
13646 	u32 val, offset, start, ver_offset;
13647 	int i, dst_off;
13648 	bool newver = false;
13649 
13650 	if (tg3_nvram_read(tp, 0xc, &offset) ||
13651 	    tg3_nvram_read(tp, 0x4, &start))
13652 		return;
13653 
13654 	offset = tg3_nvram_logical_addr(tp, offset);
13655 
13656 	if (tg3_nvram_read(tp, offset, &val))
13657 		return;
13658 
13659 	if ((val & 0xfc000000) == 0x0c000000) {
13660 		if (tg3_nvram_read(tp, offset + 4, &val))
13661 			return;
13662 
13663 		if (val == 0)
13664 			newver = true;
13665 	}
13666 
13667 	dst_off = strlen(tp->fw_ver);
13668 
13669 	if (newver) {
13670 		if (TG3_VER_SIZE - dst_off < 16 ||
13671 		    tg3_nvram_read(tp, offset + 8, &ver_offset))
13672 			return;
13673 
13674 		offset = offset + ver_offset - start;
13675 		for (i = 0; i < 16; i += 4) {
13676 			__be32 v;
13677 			if (tg3_nvram_read_be32(tp, offset + i, &v))
13678 				return;
13679 
13680 			memcpy(tp->fw_ver + dst_off + i, &v, sizeof(v));
13681 		}
13682 	} else {
13683 		u32 major, minor;
13684 
13685 		if (tg3_nvram_read(tp, TG3_NVM_PTREV_BCVER, &ver_offset))
13686 			return;
13687 
13688 		major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >>
13689 			TG3_NVM_BCVER_MAJSFT;
13690 		minor = ver_offset & TG3_NVM_BCVER_MINMSK;
13691 		snprintf(&tp->fw_ver[dst_off], TG3_VER_SIZE - dst_off,
13692 			 "v%d.%02d", major, minor);
13693 	}
13694 }
13695 
13696 static void __devinit tg3_read_hwsb_ver(struct tg3 *tp)
13697 {
13698 	u32 val, major, minor;
13699 
13700 	/* Use native endian representation */
13701 	if (tg3_nvram_read(tp, TG3_NVM_HWSB_CFG1, &val))
13702 		return;
13703 
13704 	major = (val & TG3_NVM_HWSB_CFG1_MAJMSK) >>
13705 		TG3_NVM_HWSB_CFG1_MAJSFT;
13706 	minor = (val & TG3_NVM_HWSB_CFG1_MINMSK) >>
13707 		TG3_NVM_HWSB_CFG1_MINSFT;
13708 
13709 	snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor);
13710 }
13711 
13712 static void __devinit tg3_read_sb_ver(struct tg3 *tp, u32 val)
13713 {
13714 	u32 offset, major, minor, build;
13715 
13716 	strncat(tp->fw_ver, "sb", TG3_VER_SIZE - strlen(tp->fw_ver) - 1);
13717 
13718 	if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1)
13719 		return;
13720 
13721 	switch (val & TG3_EEPROM_SB_REVISION_MASK) {
13722 	case TG3_EEPROM_SB_REVISION_0:
13723 		offset = TG3_EEPROM_SB_F1R0_EDH_OFF;
13724 		break;
13725 	case TG3_EEPROM_SB_REVISION_2:
13726 		offset = TG3_EEPROM_SB_F1R2_EDH_OFF;
13727 		break;
13728 	case TG3_EEPROM_SB_REVISION_3:
13729 		offset = TG3_EEPROM_SB_F1R3_EDH_OFF;
13730 		break;
13731 	case TG3_EEPROM_SB_REVISION_4:
13732 		offset = TG3_EEPROM_SB_F1R4_EDH_OFF;
13733 		break;
13734 	case TG3_EEPROM_SB_REVISION_5:
13735 		offset = TG3_EEPROM_SB_F1R5_EDH_OFF;
13736 		break;
13737 	case TG3_EEPROM_SB_REVISION_6:
13738 		offset = TG3_EEPROM_SB_F1R6_EDH_OFF;
13739 		break;
13740 	default:
13741 		return;
13742 	}
13743 
13744 	if (tg3_nvram_read(tp, offset, &val))
13745 		return;
13746 
13747 	build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >>
13748 		TG3_EEPROM_SB_EDH_BLD_SHFT;
13749 	major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >>
13750 		TG3_EEPROM_SB_EDH_MAJ_SHFT;
13751 	minor =  val & TG3_EEPROM_SB_EDH_MIN_MASK;
13752 
13753 	if (minor > 99 || build > 26)
13754 		return;
13755 
13756 	offset = strlen(tp->fw_ver);
13757 	snprintf(&tp->fw_ver[offset], TG3_VER_SIZE - offset,
13758 		 " v%d.%02d", major, minor);
13759 
13760 	if (build > 0) {
13761 		offset = strlen(tp->fw_ver);
13762 		if (offset < TG3_VER_SIZE - 1)
13763 			tp->fw_ver[offset] = 'a' + build - 1;
13764 	}
13765 }
13766 
13767 static void __devinit tg3_read_mgmtfw_ver(struct tg3 *tp)
13768 {
13769 	u32 val, offset, start;
13770 	int i, vlen;
13771 
13772 	for (offset = TG3_NVM_DIR_START;
13773 	     offset < TG3_NVM_DIR_END;
13774 	     offset += TG3_NVM_DIRENT_SIZE) {
13775 		if (tg3_nvram_read(tp, offset, &val))
13776 			return;
13777 
13778 		if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
13779 			break;
13780 	}
13781 
13782 	if (offset == TG3_NVM_DIR_END)
13783 		return;
13784 
13785 	if (!tg3_flag(tp, 5705_PLUS))
13786 		start = 0x08000000;
13787 	else if (tg3_nvram_read(tp, offset - 4, &start))
13788 		return;
13789 
13790 	if (tg3_nvram_read(tp, offset + 4, &offset) ||
13791 	    !tg3_fw_img_is_valid(tp, offset) ||
13792 	    tg3_nvram_read(tp, offset + 8, &val))
13793 		return;
13794 
13795 	offset += val - start;
13796 
13797 	vlen = strlen(tp->fw_ver);
13798 
13799 	tp->fw_ver[vlen++] = ',';
13800 	tp->fw_ver[vlen++] = ' ';
13801 
13802 	for (i = 0; i < 4; i++) {
13803 		__be32 v;
13804 		if (tg3_nvram_read_be32(tp, offset, &v))
13805 			return;
13806 
13807 		offset += sizeof(v);
13808 
13809 		if (vlen > TG3_VER_SIZE - sizeof(v)) {
13810 			memcpy(&tp->fw_ver[vlen], &v, TG3_VER_SIZE - vlen);
13811 			break;
13812 		}
13813 
13814 		memcpy(&tp->fw_ver[vlen], &v, sizeof(v));
13815 		vlen += sizeof(v);
13816 	}
13817 }
13818 
13819 static void __devinit tg3_read_dash_ver(struct tg3 *tp)
13820 {
13821 	int vlen;
13822 	u32 apedata;
13823 	char *fwtype;
13824 
13825 	if (!tg3_flag(tp, ENABLE_APE) || !tg3_flag(tp, ENABLE_ASF))
13826 		return;
13827 
13828 	apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
13829 	if (apedata != APE_SEG_SIG_MAGIC)
13830 		return;
13831 
13832 	apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
13833 	if (!(apedata & APE_FW_STATUS_READY))
13834 		return;
13835 
13836 	apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION);
13837 
13838 	if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI) {
13839 		tg3_flag_set(tp, APE_HAS_NCSI);
13840 		fwtype = "NCSI";
13841 	} else {
13842 		fwtype = "DASH";
13843 	}
13844 
13845 	vlen = strlen(tp->fw_ver);
13846 
13847 	snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " %s v%d.%d.%d.%d",
13848 		 fwtype,
13849 		 (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT,
13850 		 (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT,
13851 		 (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT,
13852 		 (apedata & APE_FW_VERSION_BLDMSK));
13853 }
13854 
13855 static void __devinit tg3_read_fw_ver(struct tg3 *tp)
13856 {
13857 	u32 val;
13858 	bool vpd_vers = false;
13859 
13860 	if (tp->fw_ver[0] != 0)
13861 		vpd_vers = true;
13862 
13863 	if (tg3_flag(tp, NO_NVRAM)) {
13864 		strcat(tp->fw_ver, "sb");
13865 		return;
13866 	}
13867 
13868 	if (tg3_nvram_read(tp, 0, &val))
13869 		return;
13870 
13871 	if (val == TG3_EEPROM_MAGIC)
13872 		tg3_read_bc_ver(tp);
13873 	else if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW)
13874 		tg3_read_sb_ver(tp, val);
13875 	else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
13876 		tg3_read_hwsb_ver(tp);
13877 	else
13878 		return;
13879 
13880 	if (vpd_vers)
13881 		goto done;
13882 
13883 	if (tg3_flag(tp, ENABLE_APE)) {
13884 		if (tg3_flag(tp, ENABLE_ASF))
13885 			tg3_read_dash_ver(tp);
13886 	} else if (tg3_flag(tp, ENABLE_ASF)) {
13887 		tg3_read_mgmtfw_ver(tp);
13888 	}
13889 
13890 done:
13891 	tp->fw_ver[TG3_VER_SIZE - 1] = 0;
13892 }
13893 
13894 static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp)
13895 {
13896 	if (tg3_flag(tp, LRG_PROD_RING_CAP))
13897 		return TG3_RX_RET_MAX_SIZE_5717;
13898 	else if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))
13899 		return TG3_RX_RET_MAX_SIZE_5700;
13900 	else
13901 		return TG3_RX_RET_MAX_SIZE_5705;
13902 }
13903 
13904 static DEFINE_PCI_DEVICE_TABLE(tg3_write_reorder_chipsets) = {
13905 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C) },
13906 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE) },
13907 	{ PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8385_0) },
13908 	{ },
13909 };
13910 
13911 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *tp)
13912 {
13913 	struct pci_dev *peer;
13914 	unsigned int func, devnr = tp->pdev->devfn & ~7;
13915 
13916 	for (func = 0; func < 8; func++) {
13917 		peer = pci_get_slot(tp->pdev->bus, devnr | func);
13918 		if (peer && peer != tp->pdev)
13919 			break;
13920 		pci_dev_put(peer);
13921 	}
13922 	/* 5704 can be configured in single-port mode, set peer to
13923 	 * tp->pdev in that case.
13924 	 */
13925 	if (!peer) {
13926 		peer = tp->pdev;
13927 		return peer;
13928 	}
13929 
13930 	/*
13931 	 * We don't need to keep the refcount elevated; there's no way
13932 	 * to remove one half of this device without removing the other
13933 	 */
13934 	pci_dev_put(peer);
13935 
13936 	return peer;
13937 }
13938 
13939 static void __devinit tg3_detect_asic_rev(struct tg3 *tp, u32 misc_ctrl_reg)
13940 {
13941 	tp->pci_chip_rev_id = misc_ctrl_reg >> MISC_HOST_CTRL_CHIPREV_SHIFT;
13942 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_USE_PROD_ID_REG) {
13943 		u32 reg;
13944 
13945 		/* All devices that use the alternate
13946 		 * ASIC REV location have a CPMU.
13947 		 */
13948 		tg3_flag_set(tp, CPMU_PRESENT);
13949 
13950 		if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
13951 		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
13952 		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
13953 		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720)
13954 			reg = TG3PCI_GEN2_PRODID_ASICREV;
13955 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 ||
13956 			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 ||
13957 			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 ||
13958 			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 ||
13959 			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
13960 			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
13961 			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762 ||
13962 			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766 ||
13963 			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782 ||
13964 			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
13965 			reg = TG3PCI_GEN15_PRODID_ASICREV;
13966 		else
13967 			reg = TG3PCI_PRODID_ASICREV;
13968 
13969 		pci_read_config_dword(tp->pdev, reg, &tp->pci_chip_rev_id);
13970 	}
13971 
13972 	/* Wrong chip ID in 5752 A0. This code can be removed later
13973 	 * as A0 is not in production.
13974 	 */
13975 	if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
13976 		tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
13977 
13978 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
13979 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
13980 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
13981 		tg3_flag_set(tp, 5717_PLUS);
13982 
13983 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 ||
13984 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57766)
13985 		tg3_flag_set(tp, 57765_CLASS);
13986 
13987 	if (tg3_flag(tp, 57765_CLASS) || tg3_flag(tp, 5717_PLUS))
13988 		tg3_flag_set(tp, 57765_PLUS);
13989 
13990 	/* Intentionally exclude ASIC_REV_5906 */
13991 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
13992 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
13993 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
13994 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
13995 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
13996 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
13997 	    tg3_flag(tp, 57765_PLUS))
13998 		tg3_flag_set(tp, 5755_PLUS);
13999 
14000 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
14001 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)
14002 		tg3_flag_set(tp, 5780_CLASS);
14003 
14004 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
14005 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
14006 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
14007 	    tg3_flag(tp, 5755_PLUS) ||
14008 	    tg3_flag(tp, 5780_CLASS))
14009 		tg3_flag_set(tp, 5750_PLUS);
14010 
14011 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
14012 	    tg3_flag(tp, 5750_PLUS))
14013 		tg3_flag_set(tp, 5705_PLUS);
14014 }
14015 
14016 static int __devinit tg3_get_invariants(struct tg3 *tp)
14017 {
14018 	u32 misc_ctrl_reg;
14019 	u32 pci_state_reg, grc_misc_cfg;
14020 	u32 val;
14021 	u16 pci_cmd;
14022 	int err;
14023 
14024 	/* Force memory write invalidate off.  If we leave it on,
14025 	 * then on 5700_BX chips we have to enable a workaround.
14026 	 * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
14027 	 * to match the cacheline size.  The Broadcom driver have this
14028 	 * workaround but turns MWI off all the times so never uses
14029 	 * it.  This seems to suggest that the workaround is insufficient.
14030 	 */
14031 	pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
14032 	pci_cmd &= ~PCI_COMMAND_INVALIDATE;
14033 	pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
14034 
14035 	/* Important! -- Make sure register accesses are byteswapped
14036 	 * correctly.  Also, for those chips that require it, make
14037 	 * sure that indirect register accesses are enabled before
14038 	 * the first operation.
14039 	 */
14040 	pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
14041 			      &misc_ctrl_reg);
14042 	tp->misc_host_ctrl |= (misc_ctrl_reg &
14043 			       MISC_HOST_CTRL_CHIPREV);
14044 	pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
14045 			       tp->misc_host_ctrl);
14046 
14047 	tg3_detect_asic_rev(tp, misc_ctrl_reg);
14048 
14049 	/* If we have 5702/03 A1 or A2 on certain ICH chipsets,
14050 	 * we need to disable memory and use config. cycles
14051 	 * only to access all registers. The 5702/03 chips
14052 	 * can mistakenly decode the special cycles from the
14053 	 * ICH chipsets as memory write cycles, causing corruption
14054 	 * of register and memory space. Only certain ICH bridges
14055 	 * will drive special cycles with non-zero data during the
14056 	 * address phase which can fall within the 5703's address
14057 	 * range. This is not an ICH bug as the PCI spec allows
14058 	 * non-zero address during special cycles. However, only
14059 	 * these ICH bridges are known to drive non-zero addresses
14060 	 * during special cycles.
14061 	 *
14062 	 * Since special cycles do not cross PCI bridges, we only
14063 	 * enable this workaround if the 5703 is on the secondary
14064 	 * bus of these ICH bridges.
14065 	 */
14066 	if ((tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) ||
14067 	    (tp->pci_chip_rev_id == CHIPREV_ID_5703_A2)) {
14068 		static struct tg3_dev_id {
14069 			u32	vendor;
14070 			u32	device;
14071 			u32	rev;
14072 		} ich_chipsets[] = {
14073 			{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
14074 			  PCI_ANY_ID },
14075 			{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
14076 			  PCI_ANY_ID },
14077 			{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
14078 			  0xa },
14079 			{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
14080 			  PCI_ANY_ID },
14081 			{ },
14082 		};
14083 		struct tg3_dev_id *pci_id = &ich_chipsets[0];
14084 		struct pci_dev *bridge = NULL;
14085 
14086 		while (pci_id->vendor != 0) {
14087 			bridge = pci_get_device(pci_id->vendor, pci_id->device,
14088 						bridge);
14089 			if (!bridge) {
14090 				pci_id++;
14091 				continue;
14092 			}
14093 			if (pci_id->rev != PCI_ANY_ID) {
14094 				if (bridge->revision > pci_id->rev)
14095 					continue;
14096 			}
14097 			if (bridge->subordinate &&
14098 			    (bridge->subordinate->number ==
14099 			     tp->pdev->bus->number)) {
14100 				tg3_flag_set(tp, ICH_WORKAROUND);
14101 				pci_dev_put(bridge);
14102 				break;
14103 			}
14104 		}
14105 	}
14106 
14107 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
14108 		static struct tg3_dev_id {
14109 			u32	vendor;
14110 			u32	device;
14111 		} bridge_chipsets[] = {
14112 			{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
14113 			{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
14114 			{ },
14115 		};
14116 		struct tg3_dev_id *pci_id = &bridge_chipsets[0];
14117 		struct pci_dev *bridge = NULL;
14118 
14119 		while (pci_id->vendor != 0) {
14120 			bridge = pci_get_device(pci_id->vendor,
14121 						pci_id->device,
14122 						bridge);
14123 			if (!bridge) {
14124 				pci_id++;
14125 				continue;
14126 			}
14127 			if (bridge->subordinate &&
14128 			    (bridge->subordinate->number <=
14129 			     tp->pdev->bus->number) &&
14130 			    (bridge->subordinate->subordinate >=
14131 			     tp->pdev->bus->number)) {
14132 				tg3_flag_set(tp, 5701_DMA_BUG);
14133 				pci_dev_put(bridge);
14134 				break;
14135 			}
14136 		}
14137 	}
14138 
14139 	/* The EPB bridge inside 5714, 5715, and 5780 cannot support
14140 	 * DMA addresses > 40-bit. This bridge may have other additional
14141 	 * 57xx devices behind it in some 4-port NIC designs for example.
14142 	 * Any tg3 device found behind the bridge will also need the 40-bit
14143 	 * DMA workaround.
14144 	 */
14145 	if (tg3_flag(tp, 5780_CLASS)) {
14146 		tg3_flag_set(tp, 40BIT_DMA_BUG);
14147 		tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
14148 	} else {
14149 		struct pci_dev *bridge = NULL;
14150 
14151 		do {
14152 			bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
14153 						PCI_DEVICE_ID_SERVERWORKS_EPB,
14154 						bridge);
14155 			if (bridge && bridge->subordinate &&
14156 			    (bridge->subordinate->number <=
14157 			     tp->pdev->bus->number) &&
14158 			    (bridge->subordinate->subordinate >=
14159 			     tp->pdev->bus->number)) {
14160 				tg3_flag_set(tp, 40BIT_DMA_BUG);
14161 				pci_dev_put(bridge);
14162 				break;
14163 			}
14164 		} while (bridge);
14165 	}
14166 
14167 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
14168 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)
14169 		tp->pdev_peer = tg3_find_peer(tp);
14170 
14171 	/* Determine TSO capabilities */
14172 	if (tp->pci_chip_rev_id == CHIPREV_ID_5719_A0)
14173 		; /* Do nothing. HW bug. */
14174 	else if (tg3_flag(tp, 57765_PLUS))
14175 		tg3_flag_set(tp, HW_TSO_3);
14176 	else if (tg3_flag(tp, 5755_PLUS) ||
14177 		 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
14178 		tg3_flag_set(tp, HW_TSO_2);
14179 	else if (tg3_flag(tp, 5750_PLUS)) {
14180 		tg3_flag_set(tp, HW_TSO_1);
14181 		tg3_flag_set(tp, TSO_BUG);
14182 		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 &&
14183 		    tp->pci_chip_rev_id >= CHIPREV_ID_5750_C2)
14184 			tg3_flag_clear(tp, TSO_BUG);
14185 	} else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
14186 		   GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
14187 		   tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
14188 			tg3_flag_set(tp, TSO_BUG);
14189 		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)
14190 			tp->fw_needed = FIRMWARE_TG3TSO5;
14191 		else
14192 			tp->fw_needed = FIRMWARE_TG3TSO;
14193 	}
14194 
14195 	/* Selectively allow TSO based on operating conditions */
14196 	if (tg3_flag(tp, HW_TSO_1) ||
14197 	    tg3_flag(tp, HW_TSO_2) ||
14198 	    tg3_flag(tp, HW_TSO_3) ||
14199 	    tp->fw_needed) {
14200 		/* For firmware TSO, assume ASF is disabled.
14201 		 * We'll disable TSO later if we discover ASF
14202 		 * is enabled in tg3_get_eeprom_hw_cfg().
14203 		 */
14204 		tg3_flag_set(tp, TSO_CAPABLE);
14205 	} else {
14206 		tg3_flag_clear(tp, TSO_CAPABLE);
14207 		tg3_flag_clear(tp, TSO_BUG);
14208 		tp->fw_needed = NULL;
14209 	}
14210 
14211 	if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0)
14212 		tp->fw_needed = FIRMWARE_TG3;
14213 
14214 	tp->irq_max = 1;
14215 
14216 	if (tg3_flag(tp, 5750_PLUS)) {
14217 		tg3_flag_set(tp, SUPPORT_MSI);
14218 		if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX ||
14219 		    GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX ||
14220 		    (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 &&
14221 		     tp->pci_chip_rev_id <= CHIPREV_ID_5714_A2 &&
14222 		     tp->pdev_peer == tp->pdev))
14223 			tg3_flag_clear(tp, SUPPORT_MSI);
14224 
14225 		if (tg3_flag(tp, 5755_PLUS) ||
14226 		    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
14227 			tg3_flag_set(tp, 1SHOT_MSI);
14228 		}
14229 
14230 		if (tg3_flag(tp, 57765_PLUS)) {
14231 			tg3_flag_set(tp, SUPPORT_MSIX);
14232 			tp->irq_max = TG3_IRQ_MAX_VECS;
14233 			tg3_rss_init_dflt_indir_tbl(tp);
14234 		}
14235 	}
14236 
14237 	if (tg3_flag(tp, 5755_PLUS))
14238 		tg3_flag_set(tp, SHORT_DMA_BUG);
14239 
14240 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
14241 		tp->dma_limit = TG3_TX_BD_DMA_MAX_4K;
14242 
14243 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
14244 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
14245 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
14246 		tg3_flag_set(tp, LRG_PROD_RING_CAP);
14247 
14248 	if (tg3_flag(tp, 57765_PLUS) &&
14249 	    tp->pci_chip_rev_id != CHIPREV_ID_5719_A0)
14250 		tg3_flag_set(tp, USE_JUMBO_BDFLAG);
14251 
14252 	if (!tg3_flag(tp, 5705_PLUS) ||
14253 	    tg3_flag(tp, 5780_CLASS) ||
14254 	    tg3_flag(tp, USE_JUMBO_BDFLAG))
14255 		tg3_flag_set(tp, JUMBO_CAPABLE);
14256 
14257 	pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
14258 			      &pci_state_reg);
14259 
14260 	if (pci_is_pcie(tp->pdev)) {
14261 		u16 lnkctl;
14262 
14263 		tg3_flag_set(tp, PCI_EXPRESS);
14264 
14265 		pci_read_config_word(tp->pdev,
14266 				     pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
14267 				     &lnkctl);
14268 		if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
14269 			if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
14270 			    ASIC_REV_5906) {
14271 				tg3_flag_clear(tp, HW_TSO_2);
14272 				tg3_flag_clear(tp, TSO_CAPABLE);
14273 			}
14274 			if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
14275 			    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
14276 			    tp->pci_chip_rev_id == CHIPREV_ID_57780_A0 ||
14277 			    tp->pci_chip_rev_id == CHIPREV_ID_57780_A1)
14278 				tg3_flag_set(tp, CLKREQ_BUG);
14279 		} else if (tp->pci_chip_rev_id == CHIPREV_ID_5717_A0) {
14280 			tg3_flag_set(tp, L1PLLPD_EN);
14281 		}
14282 	} else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
14283 		/* BCM5785 devices are effectively PCIe devices, and should
14284 		 * follow PCIe codepaths, but do not have a PCIe capabilities
14285 		 * section.
14286 		 */
14287 		tg3_flag_set(tp, PCI_EXPRESS);
14288 	} else if (!tg3_flag(tp, 5705_PLUS) ||
14289 		   tg3_flag(tp, 5780_CLASS)) {
14290 		tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
14291 		if (!tp->pcix_cap) {
14292 			dev_err(&tp->pdev->dev,
14293 				"Cannot find PCI-X capability, aborting\n");
14294 			return -EIO;
14295 		}
14296 
14297 		if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE))
14298 			tg3_flag_set(tp, PCIX_MODE);
14299 	}
14300 
14301 	/* If we have an AMD 762 or VIA K8T800 chipset, write
14302 	 * reordering to the mailbox registers done by the host
14303 	 * controller can cause major troubles.  We read back from
14304 	 * every mailbox register write to force the writes to be
14305 	 * posted to the chip in order.
14306 	 */
14307 	if (pci_dev_present(tg3_write_reorder_chipsets) &&
14308 	    !tg3_flag(tp, PCI_EXPRESS))
14309 		tg3_flag_set(tp, MBOX_WRITE_REORDER);
14310 
14311 	pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
14312 			     &tp->pci_cacheline_sz);
14313 	pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER,
14314 			     &tp->pci_lat_timer);
14315 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
14316 	    tp->pci_lat_timer < 64) {
14317 		tp->pci_lat_timer = 64;
14318 		pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
14319 				      tp->pci_lat_timer);
14320 	}
14321 
14322 	/* Important! -- It is critical that the PCI-X hw workaround
14323 	 * situation is decided before the first MMIO register access.
14324 	 */
14325 	if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
14326 		/* 5700 BX chips need to have their TX producer index
14327 		 * mailboxes written twice to workaround a bug.
14328 		 */
14329 		tg3_flag_set(tp, TXD_MBOX_HWBUG);
14330 
14331 		/* If we are in PCI-X mode, enable register write workaround.
14332 		 *
14333 		 * The workaround is to use indirect register accesses
14334 		 * for all chip writes not to mailbox registers.
14335 		 */
14336 		if (tg3_flag(tp, PCIX_MODE)) {
14337 			u32 pm_reg;
14338 
14339 			tg3_flag_set(tp, PCIX_TARGET_HWBUG);
14340 
14341 			/* The chip can have it's power management PCI config
14342 			 * space registers clobbered due to this bug.
14343 			 * So explicitly force the chip into D0 here.
14344 			 */
14345 			pci_read_config_dword(tp->pdev,
14346 					      tp->pm_cap + PCI_PM_CTRL,
14347 					      &pm_reg);
14348 			pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
14349 			pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
14350 			pci_write_config_dword(tp->pdev,
14351 					       tp->pm_cap + PCI_PM_CTRL,
14352 					       pm_reg);
14353 
14354 			/* Also, force SERR#/PERR# in PCI command. */
14355 			pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
14356 			pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
14357 			pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
14358 		}
14359 	}
14360 
14361 	if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
14362 		tg3_flag_set(tp, PCI_HIGH_SPEED);
14363 	if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
14364 		tg3_flag_set(tp, PCI_32BIT);
14365 
14366 	/* Chip-specific fixup from Broadcom driver */
14367 	if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
14368 	    (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
14369 		pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
14370 		pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
14371 	}
14372 
14373 	/* Default fast path register access methods */
14374 	tp->read32 = tg3_read32;
14375 	tp->write32 = tg3_write32;
14376 	tp->read32_mbox = tg3_read32;
14377 	tp->write32_mbox = tg3_write32;
14378 	tp->write32_tx_mbox = tg3_write32;
14379 	tp->write32_rx_mbox = tg3_write32;
14380 
14381 	/* Various workaround register access methods */
14382 	if (tg3_flag(tp, PCIX_TARGET_HWBUG))
14383 		tp->write32 = tg3_write_indirect_reg32;
14384 	else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
14385 		 (tg3_flag(tp, PCI_EXPRESS) &&
14386 		  tp->pci_chip_rev_id == CHIPREV_ID_5750_A0)) {
14387 		/*
14388 		 * Back to back register writes can cause problems on these
14389 		 * chips, the workaround is to read back all reg writes
14390 		 * except those to mailbox regs.
14391 		 *
14392 		 * See tg3_write_indirect_reg32().
14393 		 */
14394 		tp->write32 = tg3_write_flush_reg32;
14395 	}
14396 
14397 	if (tg3_flag(tp, TXD_MBOX_HWBUG) || tg3_flag(tp, MBOX_WRITE_REORDER)) {
14398 		tp->write32_tx_mbox = tg3_write32_tx_mbox;
14399 		if (tg3_flag(tp, MBOX_WRITE_REORDER))
14400 			tp->write32_rx_mbox = tg3_write_flush_reg32;
14401 	}
14402 
14403 	if (tg3_flag(tp, ICH_WORKAROUND)) {
14404 		tp->read32 = tg3_read_indirect_reg32;
14405 		tp->write32 = tg3_write_indirect_reg32;
14406 		tp->read32_mbox = tg3_read_indirect_mbox;
14407 		tp->write32_mbox = tg3_write_indirect_mbox;
14408 		tp->write32_tx_mbox = tg3_write_indirect_mbox;
14409 		tp->write32_rx_mbox = tg3_write_indirect_mbox;
14410 
14411 		iounmap(tp->regs);
14412 		tp->regs = NULL;
14413 
14414 		pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
14415 		pci_cmd &= ~PCI_COMMAND_MEMORY;
14416 		pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
14417 	}
14418 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
14419 		tp->read32_mbox = tg3_read32_mbox_5906;
14420 		tp->write32_mbox = tg3_write32_mbox_5906;
14421 		tp->write32_tx_mbox = tg3_write32_mbox_5906;
14422 		tp->write32_rx_mbox = tg3_write32_mbox_5906;
14423 	}
14424 
14425 	if (tp->write32 == tg3_write_indirect_reg32 ||
14426 	    (tg3_flag(tp, PCIX_MODE) &&
14427 	     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14428 	      GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)))
14429 		tg3_flag_set(tp, SRAM_USE_CONFIG);
14430 
14431 	/* The memory arbiter has to be enabled in order for SRAM accesses
14432 	 * to succeed.  Normally on powerup the tg3 chip firmware will make
14433 	 * sure it is enabled, but other entities such as system netboot
14434 	 * code might disable it.
14435 	 */
14436 	val = tr32(MEMARB_MODE);
14437 	tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
14438 
14439 	tp->pci_fn = PCI_FUNC(tp->pdev->devfn) & 3;
14440 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
14441 	    tg3_flag(tp, 5780_CLASS)) {
14442 		if (tg3_flag(tp, PCIX_MODE)) {
14443 			pci_read_config_dword(tp->pdev,
14444 					      tp->pcix_cap + PCI_X_STATUS,
14445 					      &val);
14446 			tp->pci_fn = val & 0x7;
14447 		}
14448 	} else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
14449 		tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val);
14450 		if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) ==
14451 		    NIC_SRAM_CPMUSTAT_SIG) {
14452 			tp->pci_fn = val & TG3_CPMU_STATUS_FMSK_5717;
14453 			tp->pci_fn = tp->pci_fn ? 1 : 0;
14454 		}
14455 	} else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
14456 		   GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
14457 		tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val);
14458 		if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) ==
14459 		    NIC_SRAM_CPMUSTAT_SIG) {
14460 			tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5719) >>
14461 				     TG3_CPMU_STATUS_FSHFT_5719;
14462 		}
14463 	}
14464 
14465 	/* Get eeprom hw config before calling tg3_set_power_state().
14466 	 * In particular, the TG3_FLAG_IS_NIC flag must be
14467 	 * determined before calling tg3_set_power_state() so that
14468 	 * we know whether or not to switch out of Vaux power.
14469 	 * When the flag is set, it means that GPIO1 is used for eeprom
14470 	 * write protect and also implies that it is a LOM where GPIOs
14471 	 * are not used to switch power.
14472 	 */
14473 	tg3_get_eeprom_hw_cfg(tp);
14474 
14475 	if (tp->fw_needed && tg3_flag(tp, ENABLE_ASF)) {
14476 		tg3_flag_clear(tp, TSO_CAPABLE);
14477 		tg3_flag_clear(tp, TSO_BUG);
14478 		tp->fw_needed = NULL;
14479 	}
14480 
14481 	if (tg3_flag(tp, ENABLE_APE)) {
14482 		/* Allow reads and writes to the
14483 		 * APE register and memory space.
14484 		 */
14485 		pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
14486 				 PCISTATE_ALLOW_APE_SHMEM_WR |
14487 				 PCISTATE_ALLOW_APE_PSPACE_WR;
14488 		pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
14489 				       pci_state_reg);
14490 
14491 		tg3_ape_lock_init(tp);
14492 	}
14493 
14494 	/* Set up tp->grc_local_ctrl before calling
14495 	 * tg3_pwrsrc_switch_to_vmain().  GPIO1 driven high
14496 	 * will bring 5700's external PHY out of reset.
14497 	 * It is also used as eeprom write protect on LOMs.
14498 	 */
14499 	tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
14500 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14501 	    tg3_flag(tp, EEPROM_WRITE_PROT))
14502 		tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
14503 				       GRC_LCLCTRL_GPIO_OUTPUT1);
14504 	/* Unused GPIO3 must be driven as output on 5752 because there
14505 	 * are no pull-up resistors on unused GPIO pins.
14506 	 */
14507 	else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
14508 		tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
14509 
14510 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
14511 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
14512 	    tg3_flag(tp, 57765_CLASS))
14513 		tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
14514 
14515 	if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
14516 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
14517 		/* Turn off the debug UART. */
14518 		tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
14519 		if (tg3_flag(tp, IS_NIC))
14520 			/* Keep VMain power. */
14521 			tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
14522 					      GRC_LCLCTRL_GPIO_OUTPUT0;
14523 	}
14524 
14525 	/* Switch out of Vaux if it is a NIC */
14526 	tg3_pwrsrc_switch_to_vmain(tp);
14527 
14528 	/* Derive initial jumbo mode from MTU assigned in
14529 	 * ether_setup() via the alloc_etherdev() call
14530 	 */
14531 	if (tp->dev->mtu > ETH_DATA_LEN && !tg3_flag(tp, 5780_CLASS))
14532 		tg3_flag_set(tp, JUMBO_RING_ENABLE);
14533 
14534 	/* Determine WakeOnLan speed to use. */
14535 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14536 	    tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
14537 	    tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
14538 	    tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
14539 		tg3_flag_clear(tp, WOL_SPEED_100MB);
14540 	} else {
14541 		tg3_flag_set(tp, WOL_SPEED_100MB);
14542 	}
14543 
14544 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
14545 		tp->phy_flags |= TG3_PHYFLG_IS_FET;
14546 
14547 	/* A few boards don't want Ethernet@WireSpeed phy feature */
14548 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14549 	    (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
14550 	     (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
14551 	     (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) ||
14552 	    (tp->phy_flags & TG3_PHYFLG_IS_FET) ||
14553 	    (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
14554 		tp->phy_flags |= TG3_PHYFLG_NO_ETH_WIRE_SPEED;
14555 
14556 	if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
14557 	    GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
14558 		tp->phy_flags |= TG3_PHYFLG_ADC_BUG;
14559 	if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
14560 		tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG;
14561 
14562 	if (tg3_flag(tp, 5705_PLUS) &&
14563 	    !(tp->phy_flags & TG3_PHYFLG_IS_FET) &&
14564 	    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
14565 	    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57780 &&
14566 	    !tg3_flag(tp, 57765_PLUS)) {
14567 		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
14568 		    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
14569 		    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
14570 		    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
14571 			if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
14572 			    tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
14573 				tp->phy_flags |= TG3_PHYFLG_JITTER_BUG;
14574 			if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
14575 				tp->phy_flags |= TG3_PHYFLG_ADJUST_TRIM;
14576 		} else
14577 			tp->phy_flags |= TG3_PHYFLG_BER_BUG;
14578 	}
14579 
14580 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
14581 	    GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
14582 		tp->phy_otp = tg3_read_otp_phycfg(tp);
14583 		if (tp->phy_otp == 0)
14584 			tp->phy_otp = TG3_OTP_DEFAULT;
14585 	}
14586 
14587 	if (tg3_flag(tp, CPMU_PRESENT))
14588 		tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
14589 	else
14590 		tp->mi_mode = MAC_MI_MODE_BASE;
14591 
14592 	tp->coalesce_mode = 0;
14593 	if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
14594 	    GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
14595 		tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
14596 
14597 	/* Set these bits to enable statistics workaround. */
14598 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
14599 	    tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
14600 	    tp->pci_chip_rev_id == CHIPREV_ID_5720_A0) {
14601 		tp->coalesce_mode |= HOSTCC_MODE_ATTN;
14602 		tp->grc_mode |= GRC_MODE_IRQ_ON_FLOW_ATTN;
14603 	}
14604 
14605 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
14606 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
14607 		tg3_flag_set(tp, USE_PHYLIB);
14608 
14609 	err = tg3_mdio_init(tp);
14610 	if (err)
14611 		return err;
14612 
14613 	/* Initialize data/descriptor byte/word swapping. */
14614 	val = tr32(GRC_MODE);
14615 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
14616 		val &= (GRC_MODE_BYTE_SWAP_B2HRX_DATA |
14617 			GRC_MODE_WORD_SWAP_B2HRX_DATA |
14618 			GRC_MODE_B2HRX_ENABLE |
14619 			GRC_MODE_HTX2B_ENABLE |
14620 			GRC_MODE_HOST_STACKUP);
14621 	else
14622 		val &= GRC_MODE_HOST_STACKUP;
14623 
14624 	tw32(GRC_MODE, val | tp->grc_mode);
14625 
14626 	tg3_switch_clocks(tp);
14627 
14628 	/* Clear this out for sanity. */
14629 	tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
14630 
14631 	pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
14632 			      &pci_state_reg);
14633 	if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
14634 	    !tg3_flag(tp, PCIX_TARGET_HWBUG)) {
14635 		u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
14636 
14637 		if (chiprevid == CHIPREV_ID_5701_A0 ||
14638 		    chiprevid == CHIPREV_ID_5701_B0 ||
14639 		    chiprevid == CHIPREV_ID_5701_B2 ||
14640 		    chiprevid == CHIPREV_ID_5701_B5) {
14641 			void __iomem *sram_base;
14642 
14643 			/* Write some dummy words into the SRAM status block
14644 			 * area, see if it reads back correctly.  If the return
14645 			 * value is bad, force enable the PCIX workaround.
14646 			 */
14647 			sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
14648 
14649 			writel(0x00000000, sram_base);
14650 			writel(0x00000000, sram_base + 4);
14651 			writel(0xffffffff, sram_base + 4);
14652 			if (readl(sram_base) != 0x00000000)
14653 				tg3_flag_set(tp, PCIX_TARGET_HWBUG);
14654 		}
14655 	}
14656 
14657 	udelay(50);
14658 	tg3_nvram_init(tp);
14659 
14660 	grc_misc_cfg = tr32(GRC_MISC_CFG);
14661 	grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
14662 
14663 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
14664 	    (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
14665 	     grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
14666 		tg3_flag_set(tp, IS_5788);
14667 
14668 	if (!tg3_flag(tp, IS_5788) &&
14669 	    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
14670 		tg3_flag_set(tp, TAGGED_STATUS);
14671 	if (tg3_flag(tp, TAGGED_STATUS)) {
14672 		tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
14673 				      HOSTCC_MODE_CLRTICK_TXBD);
14674 
14675 		tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
14676 		pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
14677 				       tp->misc_host_ctrl);
14678 	}
14679 
14680 	/* Preserve the APE MAC_MODE bits */
14681 	if (tg3_flag(tp, ENABLE_APE))
14682 		tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
14683 	else
14684 		tp->mac_mode = 0;
14685 
14686 	/* these are limited to 10/100 only */
14687 	if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
14688 	     (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
14689 	    (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
14690 	     tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
14691 	     (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
14692 	      tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
14693 	      tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
14694 	    (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
14695 	     (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F ||
14696 	      tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F ||
14697 	      tp->pdev->device == PCI_DEVICE_ID_TIGON3_5787F)) ||
14698 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790 ||
14699 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
14700 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
14701 	    (tp->phy_flags & TG3_PHYFLG_IS_FET))
14702 		tp->phy_flags |= TG3_PHYFLG_10_100_ONLY;
14703 
14704 	err = tg3_phy_probe(tp);
14705 	if (err) {
14706 		dev_err(&tp->pdev->dev, "phy probe failed, err %d\n", err);
14707 		/* ... but do not return immediately ... */
14708 		tg3_mdio_fini(tp);
14709 	}
14710 
14711 	tg3_read_vpd(tp);
14712 	tg3_read_fw_ver(tp);
14713 
14714 	if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
14715 		tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
14716 	} else {
14717 		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
14718 			tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
14719 		else
14720 			tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
14721 	}
14722 
14723 	/* 5700 {AX,BX} chips have a broken status block link
14724 	 * change bit implementation, so we must use the
14725 	 * status register in those cases.
14726 	 */
14727 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
14728 		tg3_flag_set(tp, USE_LINKCHG_REG);
14729 	else
14730 		tg3_flag_clear(tp, USE_LINKCHG_REG);
14731 
14732 	/* The led_ctrl is set during tg3_phy_probe, here we might
14733 	 * have to force the link status polling mechanism based
14734 	 * upon subsystem IDs.
14735 	 */
14736 	if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
14737 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
14738 	    !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
14739 		tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
14740 		tg3_flag_set(tp, USE_LINKCHG_REG);
14741 	}
14742 
14743 	/* For all SERDES we poll the MAC status register. */
14744 	if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
14745 		tg3_flag_set(tp, POLL_SERDES);
14746 	else
14747 		tg3_flag_clear(tp, POLL_SERDES);
14748 
14749 	tp->rx_offset = NET_SKB_PAD + NET_IP_ALIGN;
14750 	tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;
14751 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
14752 	    tg3_flag(tp, PCIX_MODE)) {
14753 		tp->rx_offset = NET_SKB_PAD;
14754 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
14755 		tp->rx_copy_thresh = ~(u16)0;
14756 #endif
14757 	}
14758 
14759 	tp->rx_std_ring_mask = TG3_RX_STD_RING_SIZE(tp) - 1;
14760 	tp->rx_jmb_ring_mask = TG3_RX_JMB_RING_SIZE(tp) - 1;
14761 	tp->rx_ret_ring_mask = tg3_rx_ret_ring_size(tp) - 1;
14762 
14763 	tp->rx_std_max_post = tp->rx_std_ring_mask + 1;
14764 
14765 	/* Increment the rx prod index on the rx std ring by at most
14766 	 * 8 for these chips to workaround hw errata.
14767 	 */
14768 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
14769 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
14770 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
14771 		tp->rx_std_max_post = 8;
14772 
14773 	if (tg3_flag(tp, ASPM_WORKAROUND))
14774 		tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
14775 				     PCIE_PWR_MGMT_L1_THRESH_MSK;
14776 
14777 	return err;
14778 }
14779 
14780 #ifdef CONFIG_SPARC
14781 static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
14782 {
14783 	struct net_device *dev = tp->dev;
14784 	struct pci_dev *pdev = tp->pdev;
14785 	struct device_node *dp = pci_device_to_OF_node(pdev);
14786 	const unsigned char *addr;
14787 	int len;
14788 
14789 	addr = of_get_property(dp, "local-mac-address", &len);
14790 	if (addr && len == 6) {
14791 		memcpy(dev->dev_addr, addr, 6);
14792 		memcpy(dev->perm_addr, dev->dev_addr, 6);
14793 		return 0;
14794 	}
14795 	return -ENODEV;
14796 }
14797 
14798 static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp)
14799 {
14800 	struct net_device *dev = tp->dev;
14801 
14802 	memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
14803 	memcpy(dev->perm_addr, idprom->id_ethaddr, 6);
14804 	return 0;
14805 }
14806 #endif
14807 
14808 static int __devinit tg3_get_device_address(struct tg3 *tp)
14809 {
14810 	struct net_device *dev = tp->dev;
14811 	u32 hi, lo, mac_offset;
14812 	int addr_ok = 0;
14813 
14814 #ifdef CONFIG_SPARC
14815 	if (!tg3_get_macaddr_sparc(tp))
14816 		return 0;
14817 #endif
14818 
14819 	mac_offset = 0x7c;
14820 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
14821 	    tg3_flag(tp, 5780_CLASS)) {
14822 		if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
14823 			mac_offset = 0xcc;
14824 		if (tg3_nvram_lock(tp))
14825 			tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
14826 		else
14827 			tg3_nvram_unlock(tp);
14828 	} else if (tg3_flag(tp, 5717_PLUS)) {
14829 		if (tp->pci_fn & 1)
14830 			mac_offset = 0xcc;
14831 		if (tp->pci_fn > 1)
14832 			mac_offset += 0x18c;
14833 	} else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
14834 		mac_offset = 0x10;
14835 
14836 	/* First try to get it from MAC address mailbox. */
14837 	tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
14838 	if ((hi >> 16) == 0x484b) {
14839 		dev->dev_addr[0] = (hi >>  8) & 0xff;
14840 		dev->dev_addr[1] = (hi >>  0) & 0xff;
14841 
14842 		tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
14843 		dev->dev_addr[2] = (lo >> 24) & 0xff;
14844 		dev->dev_addr[3] = (lo >> 16) & 0xff;
14845 		dev->dev_addr[4] = (lo >>  8) & 0xff;
14846 		dev->dev_addr[5] = (lo >>  0) & 0xff;
14847 
14848 		/* Some old bootcode may report a 0 MAC address in SRAM */
14849 		addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
14850 	}
14851 	if (!addr_ok) {
14852 		/* Next, try NVRAM. */
14853 		if (!tg3_flag(tp, NO_NVRAM) &&
14854 		    !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) &&
14855 		    !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) {
14856 			memcpy(&dev->dev_addr[0], ((char *)&hi) + 2, 2);
14857 			memcpy(&dev->dev_addr[2], (char *)&lo, sizeof(lo));
14858 		}
14859 		/* Finally just fetch it out of the MAC control regs. */
14860 		else {
14861 			hi = tr32(MAC_ADDR_0_HIGH);
14862 			lo = tr32(MAC_ADDR_0_LOW);
14863 
14864 			dev->dev_addr[5] = lo & 0xff;
14865 			dev->dev_addr[4] = (lo >> 8) & 0xff;
14866 			dev->dev_addr[3] = (lo >> 16) & 0xff;
14867 			dev->dev_addr[2] = (lo >> 24) & 0xff;
14868 			dev->dev_addr[1] = hi & 0xff;
14869 			dev->dev_addr[0] = (hi >> 8) & 0xff;
14870 		}
14871 	}
14872 
14873 	if (!is_valid_ether_addr(&dev->dev_addr[0])) {
14874 #ifdef CONFIG_SPARC
14875 		if (!tg3_get_default_macaddr_sparc(tp))
14876 			return 0;
14877 #endif
14878 		return -EINVAL;
14879 	}
14880 	memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
14881 	return 0;
14882 }
14883 
14884 #define BOUNDARY_SINGLE_CACHELINE	1
14885 #define BOUNDARY_MULTI_CACHELINE	2
14886 
14887 static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
14888 {
14889 	int cacheline_size;
14890 	u8 byte;
14891 	int goal;
14892 
14893 	pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
14894 	if (byte == 0)
14895 		cacheline_size = 1024;
14896 	else
14897 		cacheline_size = (int) byte * 4;
14898 
14899 	/* On 5703 and later chips, the boundary bits have no
14900 	 * effect.
14901 	 */
14902 	if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
14903 	    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
14904 	    !tg3_flag(tp, PCI_EXPRESS))
14905 		goto out;
14906 
14907 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
14908 	goal = BOUNDARY_MULTI_CACHELINE;
14909 #else
14910 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
14911 	goal = BOUNDARY_SINGLE_CACHELINE;
14912 #else
14913 	goal = 0;
14914 #endif
14915 #endif
14916 
14917 	if (tg3_flag(tp, 57765_PLUS)) {
14918 		val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
14919 		goto out;
14920 	}
14921 
14922 	if (!goal)
14923 		goto out;
14924 
14925 	/* PCI controllers on most RISC systems tend to disconnect
14926 	 * when a device tries to burst across a cache-line boundary.
14927 	 * Therefore, letting tg3 do so just wastes PCI bandwidth.
14928 	 *
14929 	 * Unfortunately, for PCI-E there are only limited
14930 	 * write-side controls for this, and thus for reads
14931 	 * we will still get the disconnects.  We'll also waste
14932 	 * these PCI cycles for both read and write for chips
14933 	 * other than 5700 and 5701 which do not implement the
14934 	 * boundary bits.
14935 	 */
14936 	if (tg3_flag(tp, PCIX_MODE) && !tg3_flag(tp, PCI_EXPRESS)) {
14937 		switch (cacheline_size) {
14938 		case 16:
14939 		case 32:
14940 		case 64:
14941 		case 128:
14942 			if (goal == BOUNDARY_SINGLE_CACHELINE) {
14943 				val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
14944 					DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
14945 			} else {
14946 				val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
14947 					DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
14948 			}
14949 			break;
14950 
14951 		case 256:
14952 			val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
14953 				DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
14954 			break;
14955 
14956 		default:
14957 			val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
14958 				DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
14959 			break;
14960 		}
14961 	} else if (tg3_flag(tp, PCI_EXPRESS)) {
14962 		switch (cacheline_size) {
14963 		case 16:
14964 		case 32:
14965 		case 64:
14966 			if (goal == BOUNDARY_SINGLE_CACHELINE) {
14967 				val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
14968 				val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
14969 				break;
14970 			}
14971 			/* fallthrough */
14972 		case 128:
14973 		default:
14974 			val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
14975 			val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
14976 			break;
14977 		}
14978 	} else {
14979 		switch (cacheline_size) {
14980 		case 16:
14981 			if (goal == BOUNDARY_SINGLE_CACHELINE) {
14982 				val |= (DMA_RWCTRL_READ_BNDRY_16 |
14983 					DMA_RWCTRL_WRITE_BNDRY_16);
14984 				break;
14985 			}
14986 			/* fallthrough */
14987 		case 32:
14988 			if (goal == BOUNDARY_SINGLE_CACHELINE) {
14989 				val |= (DMA_RWCTRL_READ_BNDRY_32 |
14990 					DMA_RWCTRL_WRITE_BNDRY_32);
14991 				break;
14992 			}
14993 			/* fallthrough */
14994 		case 64:
14995 			if (goal == BOUNDARY_SINGLE_CACHELINE) {
14996 				val |= (DMA_RWCTRL_READ_BNDRY_64 |
14997 					DMA_RWCTRL_WRITE_BNDRY_64);
14998 				break;
14999 			}
15000 			/* fallthrough */
15001 		case 128:
15002 			if (goal == BOUNDARY_SINGLE_CACHELINE) {
15003 				val |= (DMA_RWCTRL_READ_BNDRY_128 |
15004 					DMA_RWCTRL_WRITE_BNDRY_128);
15005 				break;
15006 			}
15007 			/* fallthrough */
15008 		case 256:
15009 			val |= (DMA_RWCTRL_READ_BNDRY_256 |
15010 				DMA_RWCTRL_WRITE_BNDRY_256);
15011 			break;
15012 		case 512:
15013 			val |= (DMA_RWCTRL_READ_BNDRY_512 |
15014 				DMA_RWCTRL_WRITE_BNDRY_512);
15015 			break;
15016 		case 1024:
15017 		default:
15018 			val |= (DMA_RWCTRL_READ_BNDRY_1024 |
15019 				DMA_RWCTRL_WRITE_BNDRY_1024);
15020 			break;
15021 		}
15022 	}
15023 
15024 out:
15025 	return val;
15026 }
15027 
15028 static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
15029 {
15030 	struct tg3_internal_buffer_desc test_desc;
15031 	u32 sram_dma_descs;
15032 	int i, ret;
15033 
15034 	sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
15035 
15036 	tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
15037 	tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
15038 	tw32(RDMAC_STATUS, 0);
15039 	tw32(WDMAC_STATUS, 0);
15040 
15041 	tw32(BUFMGR_MODE, 0);
15042 	tw32(FTQ_RESET, 0);
15043 
15044 	test_desc.addr_hi = ((u64) buf_dma) >> 32;
15045 	test_desc.addr_lo = buf_dma & 0xffffffff;
15046 	test_desc.nic_mbuf = 0x00002100;
15047 	test_desc.len = size;
15048 
15049 	/*
15050 	 * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
15051 	 * the *second* time the tg3 driver was getting loaded after an
15052 	 * initial scan.
15053 	 *
15054 	 * Broadcom tells me:
15055 	 *   ...the DMA engine is connected to the GRC block and a DMA
15056 	 *   reset may affect the GRC block in some unpredictable way...
15057 	 *   The behavior of resets to individual blocks has not been tested.
15058 	 *
15059 	 * Broadcom noted the GRC reset will also reset all sub-components.
15060 	 */
15061 	if (to_device) {
15062 		test_desc.cqid_sqid = (13 << 8) | 2;
15063 
15064 		tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
15065 		udelay(40);
15066 	} else {
15067 		test_desc.cqid_sqid = (16 << 8) | 7;
15068 
15069 		tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
15070 		udelay(40);
15071 	}
15072 	test_desc.flags = 0x00000005;
15073 
15074 	for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
15075 		u32 val;
15076 
15077 		val = *(((u32 *)&test_desc) + i);
15078 		pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
15079 				       sram_dma_descs + (i * sizeof(u32)));
15080 		pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
15081 	}
15082 	pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
15083 
15084 	if (to_device)
15085 		tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
15086 	else
15087 		tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
15088 
15089 	ret = -ENODEV;
15090 	for (i = 0; i < 40; i++) {
15091 		u32 val;
15092 
15093 		if (to_device)
15094 			val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
15095 		else
15096 			val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
15097 		if ((val & 0xffff) == sram_dma_descs) {
15098 			ret = 0;
15099 			break;
15100 		}
15101 
15102 		udelay(100);
15103 	}
15104 
15105 	return ret;
15106 }
15107 
15108 #define TEST_BUFFER_SIZE	0x2000
15109 
15110 static DEFINE_PCI_DEVICE_TABLE(tg3_dma_wait_state_chipsets) = {
15111 	{ PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
15112 	{ },
15113 };
15114 
15115 static int __devinit tg3_test_dma(struct tg3 *tp)
15116 {
15117 	dma_addr_t buf_dma;
15118 	u32 *buf, saved_dma_rwctrl;
15119 	int ret = 0;
15120 
15121 	buf = dma_alloc_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE,
15122 				 &buf_dma, GFP_KERNEL);
15123 	if (!buf) {
15124 		ret = -ENOMEM;
15125 		goto out_nofree;
15126 	}
15127 
15128 	tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
15129 			  (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
15130 
15131 	tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
15132 
15133 	if (tg3_flag(tp, 57765_PLUS))
15134 		goto out;
15135 
15136 	if (tg3_flag(tp, PCI_EXPRESS)) {
15137 		/* DMA read watermark not used on PCIE */
15138 		tp->dma_rwctrl |= 0x00180000;
15139 	} else if (!tg3_flag(tp, PCIX_MODE)) {
15140 		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
15141 		    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
15142 			tp->dma_rwctrl |= 0x003f0000;
15143 		else
15144 			tp->dma_rwctrl |= 0x003f000f;
15145 	} else {
15146 		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
15147 		    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
15148 			u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
15149 			u32 read_water = 0x7;
15150 
15151 			/* If the 5704 is behind the EPB bridge, we can
15152 			 * do the less restrictive ONE_DMA workaround for
15153 			 * better performance.
15154 			 */
15155 			if (tg3_flag(tp, 40BIT_DMA_BUG) &&
15156 			    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
15157 				tp->dma_rwctrl |= 0x8000;
15158 			else if (ccval == 0x6 || ccval == 0x7)
15159 				tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
15160 
15161 			if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703)
15162 				read_water = 4;
15163 			/* Set bit 23 to enable PCIX hw bug fix */
15164 			tp->dma_rwctrl |=
15165 				(read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
15166 				(0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
15167 				(1 << 23);
15168 		} else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
15169 			/* 5780 always in PCIX mode */
15170 			tp->dma_rwctrl |= 0x00144000;
15171 		} else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
15172 			/* 5714 always in PCIX mode */
15173 			tp->dma_rwctrl |= 0x00148000;
15174 		} else {
15175 			tp->dma_rwctrl |= 0x001b000f;
15176 		}
15177 	}
15178 
15179 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
15180 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
15181 		tp->dma_rwctrl &= 0xfffffff0;
15182 
15183 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
15184 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
15185 		/* Remove this if it causes problems for some boards. */
15186 		tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
15187 
15188 		/* On 5700/5701 chips, we need to set this bit.
15189 		 * Otherwise the chip will issue cacheline transactions
15190 		 * to streamable DMA memory with not all the byte
15191 		 * enables turned on.  This is an error on several
15192 		 * RISC PCI controllers, in particular sparc64.
15193 		 *
15194 		 * On 5703/5704 chips, this bit has been reassigned
15195 		 * a different meaning.  In particular, it is used
15196 		 * on those chips to enable a PCI-X workaround.
15197 		 */
15198 		tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
15199 	}
15200 
15201 	tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
15202 
15203 #if 0
15204 	/* Unneeded, already done by tg3_get_invariants.  */
15205 	tg3_switch_clocks(tp);
15206 #endif
15207 
15208 	if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
15209 	    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
15210 		goto out;
15211 
15212 	/* It is best to perform DMA test with maximum write burst size
15213 	 * to expose the 5700/5701 write DMA bug.
15214 	 */
15215 	saved_dma_rwctrl = tp->dma_rwctrl;
15216 	tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
15217 	tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
15218 
15219 	while (1) {
15220 		u32 *p = buf, i;
15221 
15222 		for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
15223 			p[i] = i;
15224 
15225 		/* Send the buffer to the chip. */
15226 		ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
15227 		if (ret) {
15228 			dev_err(&tp->pdev->dev,
15229 				"%s: Buffer write failed. err = %d\n",
15230 				__func__, ret);
15231 			break;
15232 		}
15233 
15234 #if 0
15235 		/* validate data reached card RAM correctly. */
15236 		for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
15237 			u32 val;
15238 			tg3_read_mem(tp, 0x2100 + (i*4), &val);
15239 			if (le32_to_cpu(val) != p[i]) {
15240 				dev_err(&tp->pdev->dev,
15241 					"%s: Buffer corrupted on device! "
15242 					"(%d != %d)\n", __func__, val, i);
15243 				/* ret = -ENODEV here? */
15244 			}
15245 			p[i] = 0;
15246 		}
15247 #endif
15248 		/* Now read it back. */
15249 		ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
15250 		if (ret) {
15251 			dev_err(&tp->pdev->dev, "%s: Buffer read failed. "
15252 				"err = %d\n", __func__, ret);
15253 			break;
15254 		}
15255 
15256 		/* Verify it. */
15257 		for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
15258 			if (p[i] == i)
15259 				continue;
15260 
15261 			if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
15262 			    DMA_RWCTRL_WRITE_BNDRY_16) {
15263 				tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
15264 				tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
15265 				tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
15266 				break;
15267 			} else {
15268 				dev_err(&tp->pdev->dev,
15269 					"%s: Buffer corrupted on read back! "
15270 					"(%d != %d)\n", __func__, p[i], i);
15271 				ret = -ENODEV;
15272 				goto out;
15273 			}
15274 		}
15275 
15276 		if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
15277 			/* Success. */
15278 			ret = 0;
15279 			break;
15280 		}
15281 	}
15282 	if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
15283 	    DMA_RWCTRL_WRITE_BNDRY_16) {
15284 		/* DMA test passed without adjusting DMA boundary,
15285 		 * now look for chipsets that are known to expose the
15286 		 * DMA bug without failing the test.
15287 		 */
15288 		if (pci_dev_present(tg3_dma_wait_state_chipsets)) {
15289 			tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
15290 			tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
15291 		} else {
15292 			/* Safe to use the calculated DMA boundary. */
15293 			tp->dma_rwctrl = saved_dma_rwctrl;
15294 		}
15295 
15296 		tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
15297 	}
15298 
15299 out:
15300 	dma_free_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, buf, buf_dma);
15301 out_nofree:
15302 	return ret;
15303 }
15304 
15305 static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
15306 {
15307 	if (tg3_flag(tp, 57765_PLUS)) {
15308 		tp->bufmgr_config.mbuf_read_dma_low_water =
15309 			DEFAULT_MB_RDMA_LOW_WATER_5705;
15310 		tp->bufmgr_config.mbuf_mac_rx_low_water =
15311 			DEFAULT_MB_MACRX_LOW_WATER_57765;
15312 		tp->bufmgr_config.mbuf_high_water =
15313 			DEFAULT_MB_HIGH_WATER_57765;
15314 
15315 		tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
15316 			DEFAULT_MB_RDMA_LOW_WATER_5705;
15317 		tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
15318 			DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765;
15319 		tp->bufmgr_config.mbuf_high_water_jumbo =
15320 			DEFAULT_MB_HIGH_WATER_JUMBO_57765;
15321 	} else if (tg3_flag(tp, 5705_PLUS)) {
15322 		tp->bufmgr_config.mbuf_read_dma_low_water =
15323 			DEFAULT_MB_RDMA_LOW_WATER_5705;
15324 		tp->bufmgr_config.mbuf_mac_rx_low_water =
15325 			DEFAULT_MB_MACRX_LOW_WATER_5705;
15326 		tp->bufmgr_config.mbuf_high_water =
15327 			DEFAULT_MB_HIGH_WATER_5705;
15328 		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
15329 			tp->bufmgr_config.mbuf_mac_rx_low_water =
15330 				DEFAULT_MB_MACRX_LOW_WATER_5906;
15331 			tp->bufmgr_config.mbuf_high_water =
15332 				DEFAULT_MB_HIGH_WATER_5906;
15333 		}
15334 
15335 		tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
15336 			DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
15337 		tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
15338 			DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
15339 		tp->bufmgr_config.mbuf_high_water_jumbo =
15340 			DEFAULT_MB_HIGH_WATER_JUMBO_5780;
15341 	} else {
15342 		tp->bufmgr_config.mbuf_read_dma_low_water =
15343 			DEFAULT_MB_RDMA_LOW_WATER;
15344 		tp->bufmgr_config.mbuf_mac_rx_low_water =
15345 			DEFAULT_MB_MACRX_LOW_WATER;
15346 		tp->bufmgr_config.mbuf_high_water =
15347 			DEFAULT_MB_HIGH_WATER;
15348 
15349 		tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
15350 			DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
15351 		tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
15352 			DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
15353 		tp->bufmgr_config.mbuf_high_water_jumbo =
15354 			DEFAULT_MB_HIGH_WATER_JUMBO;
15355 	}
15356 
15357 	tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
15358 	tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
15359 }
15360 
15361 static char * __devinit tg3_phy_string(struct tg3 *tp)
15362 {
15363 	switch (tp->phy_id & TG3_PHY_ID_MASK) {
15364 	case TG3_PHY_ID_BCM5400:	return "5400";
15365 	case TG3_PHY_ID_BCM5401:	return "5401";
15366 	case TG3_PHY_ID_BCM5411:	return "5411";
15367 	case TG3_PHY_ID_BCM5701:	return "5701";
15368 	case TG3_PHY_ID_BCM5703:	return "5703";
15369 	case TG3_PHY_ID_BCM5704:	return "5704";
15370 	case TG3_PHY_ID_BCM5705:	return "5705";
15371 	case TG3_PHY_ID_BCM5750:	return "5750";
15372 	case TG3_PHY_ID_BCM5752:	return "5752";
15373 	case TG3_PHY_ID_BCM5714:	return "5714";
15374 	case TG3_PHY_ID_BCM5780:	return "5780";
15375 	case TG3_PHY_ID_BCM5755:	return "5755";
15376 	case TG3_PHY_ID_BCM5787:	return "5787";
15377 	case TG3_PHY_ID_BCM5784:	return "5784";
15378 	case TG3_PHY_ID_BCM5756:	return "5722/5756";
15379 	case TG3_PHY_ID_BCM5906:	return "5906";
15380 	case TG3_PHY_ID_BCM5761:	return "5761";
15381 	case TG3_PHY_ID_BCM5718C:	return "5718C";
15382 	case TG3_PHY_ID_BCM5718S:	return "5718S";
15383 	case TG3_PHY_ID_BCM57765:	return "57765";
15384 	case TG3_PHY_ID_BCM5719C:	return "5719C";
15385 	case TG3_PHY_ID_BCM5720C:	return "5720C";
15386 	case TG3_PHY_ID_BCM8002:	return "8002/serdes";
15387 	case 0:			return "serdes";
15388 	default:		return "unknown";
15389 	}
15390 }
15391 
15392 static char * __devinit tg3_bus_string(struct tg3 *tp, char *str)
15393 {
15394 	if (tg3_flag(tp, PCI_EXPRESS)) {
15395 		strcpy(str, "PCI Express");
15396 		return str;
15397 	} else if (tg3_flag(tp, PCIX_MODE)) {
15398 		u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
15399 
15400 		strcpy(str, "PCIX:");
15401 
15402 		if ((clock_ctrl == 7) ||
15403 		    ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
15404 		     GRC_MISC_CFG_BOARD_ID_5704CIOBE))
15405 			strcat(str, "133MHz");
15406 		else if (clock_ctrl == 0)
15407 			strcat(str, "33MHz");
15408 		else if (clock_ctrl == 2)
15409 			strcat(str, "50MHz");
15410 		else if (clock_ctrl == 4)
15411 			strcat(str, "66MHz");
15412 		else if (clock_ctrl == 6)
15413 			strcat(str, "100MHz");
15414 	} else {
15415 		strcpy(str, "PCI:");
15416 		if (tg3_flag(tp, PCI_HIGH_SPEED))
15417 			strcat(str, "66MHz");
15418 		else
15419 			strcat(str, "33MHz");
15420 	}
15421 	if (tg3_flag(tp, PCI_32BIT))
15422 		strcat(str, ":32-bit");
15423 	else
15424 		strcat(str, ":64-bit");
15425 	return str;
15426 }
15427 
15428 static void __devinit tg3_init_coal(struct tg3 *tp)
15429 {
15430 	struct ethtool_coalesce *ec = &tp->coal;
15431 
15432 	memset(ec, 0, sizeof(*ec));
15433 	ec->cmd = ETHTOOL_GCOALESCE;
15434 	ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
15435 	ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
15436 	ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
15437 	ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
15438 	ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
15439 	ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
15440 	ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
15441 	ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
15442 	ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
15443 
15444 	if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
15445 				 HOSTCC_MODE_CLRTICK_TXBD)) {
15446 		ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
15447 		ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
15448 		ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
15449 		ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
15450 	}
15451 
15452 	if (tg3_flag(tp, 5705_PLUS)) {
15453 		ec->rx_coalesce_usecs_irq = 0;
15454 		ec->tx_coalesce_usecs_irq = 0;
15455 		ec->stats_block_coalesce_usecs = 0;
15456 	}
15457 }
15458 
15459 static int __devinit tg3_init_one(struct pci_dev *pdev,
15460 				  const struct pci_device_id *ent)
15461 {
15462 	struct net_device *dev;
15463 	struct tg3 *tp;
15464 	int i, err, pm_cap;
15465 	u32 sndmbx, rcvmbx, intmbx;
15466 	char str[40];
15467 	u64 dma_mask, persist_dma_mask;
15468 	netdev_features_t features = 0;
15469 
15470 	printk_once(KERN_INFO "%s\n", version);
15471 
15472 	err = pci_enable_device(pdev);
15473 	if (err) {
15474 		dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
15475 		return err;
15476 	}
15477 
15478 	err = pci_request_regions(pdev, DRV_MODULE_NAME);
15479 	if (err) {
15480 		dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
15481 		goto err_out_disable_pdev;
15482 	}
15483 
15484 	pci_set_master(pdev);
15485 
15486 	/* Find power-management capability. */
15487 	pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
15488 	if (pm_cap == 0) {
15489 		dev_err(&pdev->dev,
15490 			"Cannot find Power Management capability, aborting\n");
15491 		err = -EIO;
15492 		goto err_out_free_res;
15493 	}
15494 
15495 	err = pci_set_power_state(pdev, PCI_D0);
15496 	if (err) {
15497 		dev_err(&pdev->dev, "Transition to D0 failed, aborting\n");
15498 		goto err_out_free_res;
15499 	}
15500 
15501 	dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS);
15502 	if (!dev) {
15503 		err = -ENOMEM;
15504 		goto err_out_power_down;
15505 	}
15506 
15507 	SET_NETDEV_DEV(dev, &pdev->dev);
15508 
15509 	tp = netdev_priv(dev);
15510 	tp->pdev = pdev;
15511 	tp->dev = dev;
15512 	tp->pm_cap = pm_cap;
15513 	tp->rx_mode = TG3_DEF_RX_MODE;
15514 	tp->tx_mode = TG3_DEF_TX_MODE;
15515 
15516 	if (tg3_debug > 0)
15517 		tp->msg_enable = tg3_debug;
15518 	else
15519 		tp->msg_enable = TG3_DEF_MSG_ENABLE;
15520 
15521 	/* The word/byte swap controls here control register access byte
15522 	 * swapping.  DMA data byte swapping is controlled in the GRC_MODE
15523 	 * setting below.
15524 	 */
15525 	tp->misc_host_ctrl =
15526 		MISC_HOST_CTRL_MASK_PCI_INT |
15527 		MISC_HOST_CTRL_WORD_SWAP |
15528 		MISC_HOST_CTRL_INDIR_ACCESS |
15529 		MISC_HOST_CTRL_PCISTATE_RW;
15530 
15531 	/* The NONFRM (non-frame) byte/word swap controls take effect
15532 	 * on descriptor entries, anything which isn't packet data.
15533 	 *
15534 	 * The StrongARM chips on the board (one for tx, one for rx)
15535 	 * are running in big-endian mode.
15536 	 */
15537 	tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
15538 			GRC_MODE_WSWAP_NONFRM_DATA);
15539 #ifdef __BIG_ENDIAN
15540 	tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
15541 #endif
15542 	spin_lock_init(&tp->lock);
15543 	spin_lock_init(&tp->indirect_lock);
15544 	INIT_WORK(&tp->reset_task, tg3_reset_task);
15545 
15546 	tp->regs = pci_ioremap_bar(pdev, BAR_0);
15547 	if (!tp->regs) {
15548 		dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
15549 		err = -ENOMEM;
15550 		goto err_out_free_dev;
15551 	}
15552 
15553 	if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
15554 	    tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761E ||
15555 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S ||
15556 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761SE ||
15557 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
15558 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
15559 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
15560 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720) {
15561 		tg3_flag_set(tp, ENABLE_APE);
15562 		tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
15563 		if (!tp->aperegs) {
15564 			dev_err(&pdev->dev,
15565 				"Cannot map APE registers, aborting\n");
15566 			err = -ENOMEM;
15567 			goto err_out_iounmap;
15568 		}
15569 	}
15570 
15571 	tp->rx_pending = TG3_DEF_RX_RING_PENDING;
15572 	tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
15573 
15574 	dev->ethtool_ops = &tg3_ethtool_ops;
15575 	dev->watchdog_timeo = TG3_TX_TIMEOUT;
15576 	dev->netdev_ops = &tg3_netdev_ops;
15577 	dev->irq = pdev->irq;
15578 
15579 	err = tg3_get_invariants(tp);
15580 	if (err) {
15581 		dev_err(&pdev->dev,
15582 			"Problem fetching invariants of chip, aborting\n");
15583 		goto err_out_apeunmap;
15584 	}
15585 
15586 	/* The EPB bridge inside 5714, 5715, and 5780 and any
15587 	 * device behind the EPB cannot support DMA addresses > 40-bit.
15588 	 * On 64-bit systems with IOMMU, use 40-bit dma_mask.
15589 	 * On 64-bit systems without IOMMU, use 64-bit dma_mask and
15590 	 * do DMA address check in tg3_start_xmit().
15591 	 */
15592 	if (tg3_flag(tp, IS_5788))
15593 		persist_dma_mask = dma_mask = DMA_BIT_MASK(32);
15594 	else if (tg3_flag(tp, 40BIT_DMA_BUG)) {
15595 		persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
15596 #ifdef CONFIG_HIGHMEM
15597 		dma_mask = DMA_BIT_MASK(64);
15598 #endif
15599 	} else
15600 		persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
15601 
15602 	/* Configure DMA attributes. */
15603 	if (dma_mask > DMA_BIT_MASK(32)) {
15604 		err = pci_set_dma_mask(pdev, dma_mask);
15605 		if (!err) {
15606 			features |= NETIF_F_HIGHDMA;
15607 			err = pci_set_consistent_dma_mask(pdev,
15608 							  persist_dma_mask);
15609 			if (err < 0) {
15610 				dev_err(&pdev->dev, "Unable to obtain 64 bit "
15611 					"DMA for consistent allocations\n");
15612 				goto err_out_apeunmap;
15613 			}
15614 		}
15615 	}
15616 	if (err || dma_mask == DMA_BIT_MASK(32)) {
15617 		err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
15618 		if (err) {
15619 			dev_err(&pdev->dev,
15620 				"No usable DMA configuration, aborting\n");
15621 			goto err_out_apeunmap;
15622 		}
15623 	}
15624 
15625 	tg3_init_bufmgr_config(tp);
15626 
15627 	features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
15628 
15629 	/* 5700 B0 chips do not support checksumming correctly due
15630 	 * to hardware bugs.
15631 	 */
15632 	if (tp->pci_chip_rev_id != CHIPREV_ID_5700_B0) {
15633 		features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
15634 
15635 		if (tg3_flag(tp, 5755_PLUS))
15636 			features |= NETIF_F_IPV6_CSUM;
15637 	}
15638 
15639 	/* TSO is on by default on chips that support hardware TSO.
15640 	 * Firmware TSO on older chips gives lower performance, so it
15641 	 * is off by default, but can be enabled using ethtool.
15642 	 */
15643 	if ((tg3_flag(tp, HW_TSO_1) ||
15644 	     tg3_flag(tp, HW_TSO_2) ||
15645 	     tg3_flag(tp, HW_TSO_3)) &&
15646 	    (features & NETIF_F_IP_CSUM))
15647 		features |= NETIF_F_TSO;
15648 	if (tg3_flag(tp, HW_TSO_2) || tg3_flag(tp, HW_TSO_3)) {
15649 		if (features & NETIF_F_IPV6_CSUM)
15650 			features |= NETIF_F_TSO6;
15651 		if (tg3_flag(tp, HW_TSO_3) ||
15652 		    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
15653 		    (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
15654 		     GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) ||
15655 		    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
15656 		    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
15657 			features |= NETIF_F_TSO_ECN;
15658 	}
15659 
15660 	dev->features |= features;
15661 	dev->vlan_features |= features;
15662 
15663 	/*
15664 	 * Add loopback capability only for a subset of devices that support
15665 	 * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY
15666 	 * loopback for the remaining devices.
15667 	 */
15668 	if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5780 &&
15669 	    !tg3_flag(tp, CPMU_PRESENT))
15670 		/* Add the loopback capability */
15671 		features |= NETIF_F_LOOPBACK;
15672 
15673 	dev->hw_features |= features;
15674 
15675 	if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
15676 	    !tg3_flag(tp, TSO_CAPABLE) &&
15677 	    !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
15678 		tg3_flag_set(tp, MAX_RXPEND_64);
15679 		tp->rx_pending = 63;
15680 	}
15681 
15682 	err = tg3_get_device_address(tp);
15683 	if (err) {
15684 		dev_err(&pdev->dev,
15685 			"Could not obtain valid ethernet address, aborting\n");
15686 		goto err_out_apeunmap;
15687 	}
15688 
15689 	/*
15690 	 * Reset chip in case UNDI or EFI driver did not shutdown
15691 	 * DMA self test will enable WDMAC and we'll see (spurious)
15692 	 * pending DMA on the PCI bus at that point.
15693 	 */
15694 	if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
15695 	    (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
15696 		tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
15697 		tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
15698 	}
15699 
15700 	err = tg3_test_dma(tp);
15701 	if (err) {
15702 		dev_err(&pdev->dev, "DMA engine test failed, aborting\n");
15703 		goto err_out_apeunmap;
15704 	}
15705 
15706 	intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
15707 	rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
15708 	sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
15709 	for (i = 0; i < tp->irq_max; i++) {
15710 		struct tg3_napi *tnapi = &tp->napi[i];
15711 
15712 		tnapi->tp = tp;
15713 		tnapi->tx_pending = TG3_DEF_TX_RING_PENDING;
15714 
15715 		tnapi->int_mbox = intmbx;
15716 		if (i <= 4)
15717 			intmbx += 0x8;
15718 		else
15719 			intmbx += 0x4;
15720 
15721 		tnapi->consmbox = rcvmbx;
15722 		tnapi->prodmbox = sndmbx;
15723 
15724 		if (i)
15725 			tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1);
15726 		else
15727 			tnapi->coal_now = HOSTCC_MODE_NOW;
15728 
15729 		if (!tg3_flag(tp, SUPPORT_MSIX))
15730 			break;
15731 
15732 		/*
15733 		 * If we support MSIX, we'll be using RSS.  If we're using
15734 		 * RSS, the first vector only handles link interrupts and the
15735 		 * remaining vectors handle rx and tx interrupts.  Reuse the
15736 		 * mailbox values for the next iteration.  The values we setup
15737 		 * above are still useful for the single vectored mode.
15738 		 */
15739 		if (!i)
15740 			continue;
15741 
15742 		rcvmbx += 0x8;
15743 
15744 		if (sndmbx & 0x4)
15745 			sndmbx -= 0x4;
15746 		else
15747 			sndmbx += 0xc;
15748 	}
15749 
15750 	tg3_init_coal(tp);
15751 
15752 	pci_set_drvdata(pdev, dev);
15753 
15754 	if (tg3_flag(tp, 5717_PLUS)) {
15755 		/* Resume a low-power mode */
15756 		tg3_frob_aux_power(tp, false);
15757 	}
15758 
15759 	tg3_timer_init(tp);
15760 
15761 	err = register_netdev(dev);
15762 	if (err) {
15763 		dev_err(&pdev->dev, "Cannot register net device, aborting\n");
15764 		goto err_out_apeunmap;
15765 	}
15766 
15767 	netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
15768 		    tp->board_part_number,
15769 		    tp->pci_chip_rev_id,
15770 		    tg3_bus_string(tp, str),
15771 		    dev->dev_addr);
15772 
15773 	if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
15774 		struct phy_device *phydev;
15775 		phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
15776 		netdev_info(dev,
15777 			    "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
15778 			    phydev->drv->name, dev_name(&phydev->dev));
15779 	} else {
15780 		char *ethtype;
15781 
15782 		if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
15783 			ethtype = "10/100Base-TX";
15784 		else if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
15785 			ethtype = "1000Base-SX";
15786 		else
15787 			ethtype = "10/100/1000Base-T";
15788 
15789 		netdev_info(dev, "attached PHY is %s (%s Ethernet) "
15790 			    "(WireSpeed[%d], EEE[%d])\n",
15791 			    tg3_phy_string(tp), ethtype,
15792 			    (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) == 0,
15793 			    (tp->phy_flags & TG3_PHYFLG_EEE_CAP) != 0);
15794 	}
15795 
15796 	netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
15797 		    (dev->features & NETIF_F_RXCSUM) != 0,
15798 		    tg3_flag(tp, USE_LINKCHG_REG) != 0,
15799 		    (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) != 0,
15800 		    tg3_flag(tp, ENABLE_ASF) != 0,
15801 		    tg3_flag(tp, TSO_CAPABLE) != 0);
15802 	netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
15803 		    tp->dma_rwctrl,
15804 		    pdev->dma_mask == DMA_BIT_MASK(32) ? 32 :
15805 		    ((u64)pdev->dma_mask) == DMA_BIT_MASK(40) ? 40 : 64);
15806 
15807 	pci_save_state(pdev);
15808 
15809 	return 0;
15810 
15811 err_out_apeunmap:
15812 	if (tp->aperegs) {
15813 		iounmap(tp->aperegs);
15814 		tp->aperegs = NULL;
15815 	}
15816 
15817 err_out_iounmap:
15818 	if (tp->regs) {
15819 		iounmap(tp->regs);
15820 		tp->regs = NULL;
15821 	}
15822 
15823 err_out_free_dev:
15824 	free_netdev(dev);
15825 
15826 err_out_power_down:
15827 	pci_set_power_state(pdev, PCI_D3hot);
15828 
15829 err_out_free_res:
15830 	pci_release_regions(pdev);
15831 
15832 err_out_disable_pdev:
15833 	pci_disable_device(pdev);
15834 	pci_set_drvdata(pdev, NULL);
15835 	return err;
15836 }
15837 
15838 static void __devexit tg3_remove_one(struct pci_dev *pdev)
15839 {
15840 	struct net_device *dev = pci_get_drvdata(pdev);
15841 
15842 	if (dev) {
15843 		struct tg3 *tp = netdev_priv(dev);
15844 
15845 		if (tp->fw)
15846 			release_firmware(tp->fw);
15847 
15848 		tg3_reset_task_cancel(tp);
15849 
15850 		if (tg3_flag(tp, USE_PHYLIB)) {
15851 			tg3_phy_fini(tp);
15852 			tg3_mdio_fini(tp);
15853 		}
15854 
15855 		unregister_netdev(dev);
15856 		if (tp->aperegs) {
15857 			iounmap(tp->aperegs);
15858 			tp->aperegs = NULL;
15859 		}
15860 		if (tp->regs) {
15861 			iounmap(tp->regs);
15862 			tp->regs = NULL;
15863 		}
15864 		free_netdev(dev);
15865 		pci_release_regions(pdev);
15866 		pci_disable_device(pdev);
15867 		pci_set_drvdata(pdev, NULL);
15868 	}
15869 }
15870 
15871 #ifdef CONFIG_PM_SLEEP
15872 static int tg3_suspend(struct device *device)
15873 {
15874 	struct pci_dev *pdev = to_pci_dev(device);
15875 	struct net_device *dev = pci_get_drvdata(pdev);
15876 	struct tg3 *tp = netdev_priv(dev);
15877 	int err;
15878 
15879 	if (!netif_running(dev))
15880 		return 0;
15881 
15882 	tg3_reset_task_cancel(tp);
15883 	tg3_phy_stop(tp);
15884 	tg3_netif_stop(tp);
15885 
15886 	tg3_timer_stop(tp);
15887 
15888 	tg3_full_lock(tp, 1);
15889 	tg3_disable_ints(tp);
15890 	tg3_full_unlock(tp);
15891 
15892 	netif_device_detach(dev);
15893 
15894 	tg3_full_lock(tp, 0);
15895 	tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
15896 	tg3_flag_clear(tp, INIT_COMPLETE);
15897 	tg3_full_unlock(tp);
15898 
15899 	err = tg3_power_down_prepare(tp);
15900 	if (err) {
15901 		int err2;
15902 
15903 		tg3_full_lock(tp, 0);
15904 
15905 		tg3_flag_set(tp, INIT_COMPLETE);
15906 		err2 = tg3_restart_hw(tp, 1);
15907 		if (err2)
15908 			goto out;
15909 
15910 		tg3_timer_start(tp);
15911 
15912 		netif_device_attach(dev);
15913 		tg3_netif_start(tp);
15914 
15915 out:
15916 		tg3_full_unlock(tp);
15917 
15918 		if (!err2)
15919 			tg3_phy_start(tp);
15920 	}
15921 
15922 	return err;
15923 }
15924 
15925 static int tg3_resume(struct device *device)
15926 {
15927 	struct pci_dev *pdev = to_pci_dev(device);
15928 	struct net_device *dev = pci_get_drvdata(pdev);
15929 	struct tg3 *tp = netdev_priv(dev);
15930 	int err;
15931 
15932 	if (!netif_running(dev))
15933 		return 0;
15934 
15935 	netif_device_attach(dev);
15936 
15937 	tg3_full_lock(tp, 0);
15938 
15939 	tg3_flag_set(tp, INIT_COMPLETE);
15940 	err = tg3_restart_hw(tp, 1);
15941 	if (err)
15942 		goto out;
15943 
15944 	tg3_timer_start(tp);
15945 
15946 	tg3_netif_start(tp);
15947 
15948 out:
15949 	tg3_full_unlock(tp);
15950 
15951 	if (!err)
15952 		tg3_phy_start(tp);
15953 
15954 	return err;
15955 }
15956 
15957 static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume);
15958 #define TG3_PM_OPS (&tg3_pm_ops)
15959 
15960 #else
15961 
15962 #define TG3_PM_OPS NULL
15963 
15964 #endif /* CONFIG_PM_SLEEP */
15965 
15966 /**
15967  * tg3_io_error_detected - called when PCI error is detected
15968  * @pdev: Pointer to PCI device
15969  * @state: The current pci connection state
15970  *
15971  * This function is called after a PCI bus error affecting
15972  * this device has been detected.
15973  */
15974 static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
15975 					      pci_channel_state_t state)
15976 {
15977 	struct net_device *netdev = pci_get_drvdata(pdev);
15978 	struct tg3 *tp = netdev_priv(netdev);
15979 	pci_ers_result_t err = PCI_ERS_RESULT_NEED_RESET;
15980 
15981 	netdev_info(netdev, "PCI I/O error detected\n");
15982 
15983 	rtnl_lock();
15984 
15985 	if (!netif_running(netdev))
15986 		goto done;
15987 
15988 	tg3_phy_stop(tp);
15989 
15990 	tg3_netif_stop(tp);
15991 
15992 	tg3_timer_stop(tp);
15993 
15994 	/* Want to make sure that the reset task doesn't run */
15995 	tg3_reset_task_cancel(tp);
15996 
15997 	netif_device_detach(netdev);
15998 
15999 	/* Clean up software state, even if MMIO is blocked */
16000 	tg3_full_lock(tp, 0);
16001 	tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
16002 	tg3_full_unlock(tp);
16003 
16004 done:
16005 	if (state == pci_channel_io_perm_failure)
16006 		err = PCI_ERS_RESULT_DISCONNECT;
16007 	else
16008 		pci_disable_device(pdev);
16009 
16010 	rtnl_unlock();
16011 
16012 	return err;
16013 }
16014 
16015 /**
16016  * tg3_io_slot_reset - called after the pci bus has been reset.
16017  * @pdev: Pointer to PCI device
16018  *
16019  * Restart the card from scratch, as if from a cold-boot.
16020  * At this point, the card has exprienced a hard reset,
16021  * followed by fixups by BIOS, and has its config space
16022  * set up identically to what it was at cold boot.
16023  */
16024 static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev)
16025 {
16026 	struct net_device *netdev = pci_get_drvdata(pdev);
16027 	struct tg3 *tp = netdev_priv(netdev);
16028 	pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
16029 	int err;
16030 
16031 	rtnl_lock();
16032 
16033 	if (pci_enable_device(pdev)) {
16034 		netdev_err(netdev, "Cannot re-enable PCI device after reset.\n");
16035 		goto done;
16036 	}
16037 
16038 	pci_set_master(pdev);
16039 	pci_restore_state(pdev);
16040 	pci_save_state(pdev);
16041 
16042 	if (!netif_running(netdev)) {
16043 		rc = PCI_ERS_RESULT_RECOVERED;
16044 		goto done;
16045 	}
16046 
16047 	err = tg3_power_up(tp);
16048 	if (err)
16049 		goto done;
16050 
16051 	rc = PCI_ERS_RESULT_RECOVERED;
16052 
16053 done:
16054 	rtnl_unlock();
16055 
16056 	return rc;
16057 }
16058 
16059 /**
16060  * tg3_io_resume - called when traffic can start flowing again.
16061  * @pdev: Pointer to PCI device
16062  *
16063  * This callback is called when the error recovery driver tells
16064  * us that its OK to resume normal operation.
16065  */
16066 static void tg3_io_resume(struct pci_dev *pdev)
16067 {
16068 	struct net_device *netdev = pci_get_drvdata(pdev);
16069 	struct tg3 *tp = netdev_priv(netdev);
16070 	int err;
16071 
16072 	rtnl_lock();
16073 
16074 	if (!netif_running(netdev))
16075 		goto done;
16076 
16077 	tg3_full_lock(tp, 0);
16078 	tg3_flag_set(tp, INIT_COMPLETE);
16079 	err = tg3_restart_hw(tp, 1);
16080 	tg3_full_unlock(tp);
16081 	if (err) {
16082 		netdev_err(netdev, "Cannot restart hardware after reset.\n");
16083 		goto done;
16084 	}
16085 
16086 	netif_device_attach(netdev);
16087 
16088 	tg3_timer_start(tp);
16089 
16090 	tg3_netif_start(tp);
16091 
16092 	tg3_phy_start(tp);
16093 
16094 done:
16095 	rtnl_unlock();
16096 }
16097 
16098 static struct pci_error_handlers tg3_err_handler = {
16099 	.error_detected	= tg3_io_error_detected,
16100 	.slot_reset	= tg3_io_slot_reset,
16101 	.resume		= tg3_io_resume
16102 };
16103 
16104 static struct pci_driver tg3_driver = {
16105 	.name		= DRV_MODULE_NAME,
16106 	.id_table	= tg3_pci_tbl,
16107 	.probe		= tg3_init_one,
16108 	.remove		= __devexit_p(tg3_remove_one),
16109 	.err_handler	= &tg3_err_handler,
16110 	.driver.pm	= TG3_PM_OPS,
16111 };
16112 
16113 static int __init tg3_init(void)
16114 {
16115 	return pci_register_driver(&tg3_driver);
16116 }
16117 
16118 static void __exit tg3_cleanup(void)
16119 {
16120 	pci_unregister_driver(&tg3_driver);
16121 }
16122 
16123 module_init(tg3_init);
16124 module_exit(tg3_cleanup);
16125