1 /*
2  * tg3.c: Broadcom Tigon3 ethernet driver.
3  *
4  * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5  * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6  * Copyright (C) 2004 Sun Microsystems Inc.
7  * Copyright (C) 2005-2012 Broadcom Corporation.
8  *
9  * Firmware is:
10  *	Derived from proprietary unpublished source code,
11  *	Copyright (C) 2000-2003 Broadcom Corporation.
12  *
13  *	Permission is hereby granted for the distribution of this firmware
14  *	data in hexadecimal or equivalent format, provided this copyright
15  *	notice is accompanying it.
16  */
17 
18 
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/stringify.h>
22 #include <linux/kernel.h>
23 #include <linux/types.h>
24 #include <linux/compiler.h>
25 #include <linux/slab.h>
26 #include <linux/delay.h>
27 #include <linux/in.h>
28 #include <linux/init.h>
29 #include <linux/interrupt.h>
30 #include <linux/ioport.h>
31 #include <linux/pci.h>
32 #include <linux/netdevice.h>
33 #include <linux/etherdevice.h>
34 #include <linux/skbuff.h>
35 #include <linux/ethtool.h>
36 #include <linux/mdio.h>
37 #include <linux/mii.h>
38 #include <linux/phy.h>
39 #include <linux/brcmphy.h>
40 #include <linux/if_vlan.h>
41 #include <linux/ip.h>
42 #include <linux/tcp.h>
43 #include <linux/workqueue.h>
44 #include <linux/prefetch.h>
45 #include <linux/dma-mapping.h>
46 #include <linux/firmware.h>
47 #if IS_ENABLED(CONFIG_HWMON)
48 #include <linux/hwmon.h>
49 #include <linux/hwmon-sysfs.h>
50 #endif
51 
52 #include <net/checksum.h>
53 #include <net/ip.h>
54 
55 #include <linux/io.h>
56 #include <asm/byteorder.h>
57 #include <linux/uaccess.h>
58 
59 #ifdef CONFIG_SPARC
60 #include <asm/idprom.h>
61 #include <asm/prom.h>
62 #endif
63 
64 #define BAR_0	0
65 #define BAR_2	2
66 
67 #include "tg3.h"
68 
69 /* Functions & macros to verify TG3_FLAGS types */
70 
71 static inline int _tg3_flag(enum TG3_FLAGS flag, unsigned long *bits)
72 {
73 	return test_bit(flag, bits);
74 }
75 
76 static inline void _tg3_flag_set(enum TG3_FLAGS flag, unsigned long *bits)
77 {
78 	set_bit(flag, bits);
79 }
80 
81 static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
82 {
83 	clear_bit(flag, bits);
84 }
85 
86 #define tg3_flag(tp, flag)				\
87 	_tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags)
88 #define tg3_flag_set(tp, flag)				\
89 	_tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags)
90 #define tg3_flag_clear(tp, flag)			\
91 	_tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags)
92 
93 #define DRV_MODULE_NAME		"tg3"
94 #define TG3_MAJ_NUM			3
95 #define TG3_MIN_NUM			123
96 #define DRV_MODULE_VERSION	\
97 	__stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
98 #define DRV_MODULE_RELDATE	"March 21, 2012"
99 
100 #define RESET_KIND_SHUTDOWN	0
101 #define RESET_KIND_INIT		1
102 #define RESET_KIND_SUSPEND	2
103 
104 #define TG3_DEF_RX_MODE		0
105 #define TG3_DEF_TX_MODE		0
106 #define TG3_DEF_MSG_ENABLE	  \
107 	(NETIF_MSG_DRV		| \
108 	 NETIF_MSG_PROBE	| \
109 	 NETIF_MSG_LINK		| \
110 	 NETIF_MSG_TIMER	| \
111 	 NETIF_MSG_IFDOWN	| \
112 	 NETIF_MSG_IFUP		| \
113 	 NETIF_MSG_RX_ERR	| \
114 	 NETIF_MSG_TX_ERR)
115 
116 #define TG3_GRC_LCLCTL_PWRSW_DELAY	100
117 
118 /* length of time before we decide the hardware is borked,
119  * and dev->tx_timeout() should be called to fix the problem
120  */
121 
122 #define TG3_TX_TIMEOUT			(5 * HZ)
123 
124 /* hardware minimum and maximum for a single frame's data payload */
125 #define TG3_MIN_MTU			60
126 #define TG3_MAX_MTU(tp)	\
127 	(tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500)
128 
129 /* These numbers seem to be hard coded in the NIC firmware somehow.
130  * You can't change the ring sizes, but you can change where you place
131  * them in the NIC onboard memory.
132  */
133 #define TG3_RX_STD_RING_SIZE(tp) \
134 	(tg3_flag(tp, LRG_PROD_RING_CAP) ? \
135 	 TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700)
136 #define TG3_DEF_RX_RING_PENDING		200
137 #define TG3_RX_JMB_RING_SIZE(tp) \
138 	(tg3_flag(tp, LRG_PROD_RING_CAP) ? \
139 	 TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
140 #define TG3_DEF_RX_JUMBO_RING_PENDING	100
141 
142 /* Do not place this n-ring entries value into the tp struct itself,
143  * we really want to expose these constants to GCC so that modulo et
144  * al.  operations are done with shifts and masks instead of with
145  * hw multiply/modulo instructions.  Another solution would be to
146  * replace things like '% foo' with '& (foo - 1)'.
147  */
148 
149 #define TG3_TX_RING_SIZE		512
150 #define TG3_DEF_TX_RING_PENDING		(TG3_TX_RING_SIZE - 1)
151 
152 #define TG3_RX_STD_RING_BYTES(tp) \
153 	(sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
154 #define TG3_RX_JMB_RING_BYTES(tp) \
155 	(sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
156 #define TG3_RX_RCB_RING_BYTES(tp) \
157 	(sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1))
158 #define TG3_TX_RING_BYTES	(sizeof(struct tg3_tx_buffer_desc) * \
159 				 TG3_TX_RING_SIZE)
160 #define NEXT_TX(N)		(((N) + 1) & (TG3_TX_RING_SIZE - 1))
161 
162 #define TG3_DMA_BYTE_ENAB		64
163 
164 #define TG3_RX_STD_DMA_SZ		1536
165 #define TG3_RX_JMB_DMA_SZ		9046
166 
167 #define TG3_RX_DMA_TO_MAP_SZ(x)		((x) + TG3_DMA_BYTE_ENAB)
168 
169 #define TG3_RX_STD_MAP_SZ		TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
170 #define TG3_RX_JMB_MAP_SZ		TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
171 
172 #define TG3_RX_STD_BUFF_RING_SIZE(tp) \
173 	(sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
174 
175 #define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
176 	(sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
177 
178 /* Due to a hardware bug, the 5701 can only DMA to memory addresses
179  * that are at least dword aligned when used in PCIX mode.  The driver
180  * works around this bug by double copying the packet.  This workaround
181  * is built into the normal double copy length check for efficiency.
182  *
183  * However, the double copy is only necessary on those architectures
184  * where unaligned memory accesses are inefficient.  For those architectures
185  * where unaligned memory accesses incur little penalty, we can reintegrate
186  * the 5701 in the normal rx path.  Doing so saves a device structure
187  * dereference by hardcoding the double copy threshold in place.
188  */
189 #define TG3_RX_COPY_THRESHOLD		256
190 #if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
191 	#define TG3_RX_COPY_THRESH(tp)	TG3_RX_COPY_THRESHOLD
192 #else
193 	#define TG3_RX_COPY_THRESH(tp)	((tp)->rx_copy_thresh)
194 #endif
195 
196 #if (NET_IP_ALIGN != 0)
197 #define TG3_RX_OFFSET(tp)	((tp)->rx_offset)
198 #else
199 #define TG3_RX_OFFSET(tp)	(NET_SKB_PAD)
200 #endif
201 
202 /* minimum number of free TX descriptors required to wake up TX process */
203 #define TG3_TX_WAKEUP_THRESH(tnapi)		((tnapi)->tx_pending / 4)
204 #define TG3_TX_BD_DMA_MAX_2K		2048
205 #define TG3_TX_BD_DMA_MAX_4K		4096
206 
207 #define TG3_RAW_IP_ALIGN 2
208 
209 #define TG3_FW_UPDATE_TIMEOUT_SEC	5
210 #define TG3_FW_UPDATE_FREQ_SEC		(TG3_FW_UPDATE_TIMEOUT_SEC / 2)
211 
212 #define FIRMWARE_TG3		"tigon/tg3.bin"
213 #define FIRMWARE_TG3TSO		"tigon/tg3_tso.bin"
214 #define FIRMWARE_TG3TSO5	"tigon/tg3_tso5.bin"
215 
216 static char version[] __devinitdata =
217 	DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")";
218 
219 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
220 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
221 MODULE_LICENSE("GPL");
222 MODULE_VERSION(DRV_MODULE_VERSION);
223 MODULE_FIRMWARE(FIRMWARE_TG3);
224 MODULE_FIRMWARE(FIRMWARE_TG3TSO);
225 MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
226 
227 static int tg3_debug = -1;	/* -1 == use TG3_DEF_MSG_ENABLE as value */
228 module_param(tg3_debug, int, 0);
229 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
230 
231 static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl) = {
232 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
233 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
234 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
235 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
236 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
237 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
238 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
239 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
240 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
241 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
242 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
243 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
244 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
245 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
246 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
247 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
248 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
249 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
250 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901)},
251 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2)},
252 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
253 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F)},
254 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
255 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
256 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
257 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
258 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F)},
259 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
260 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
261 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
262 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
263 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F)},
264 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
265 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
266 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
267 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
268 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
269 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
270 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
271 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
272 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F)},
273 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
274 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
275 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
276 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
277 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
278 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
279 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
280 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
281 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
282 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
283 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
284 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
285 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
286 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
287 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
288 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
289 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)},
290 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)},
291 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
292 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
293 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790)},
294 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
295 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
296 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
297 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)},
298 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)},
299 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)},
300 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)},
301 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791)},
302 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795)},
303 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)},
304 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)},
305 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57762)},
306 	{PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
307 	{PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
308 	{PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
309 	{PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
310 	{PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
311 	{PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
312 	{PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
313 	{PCI_DEVICE(0x10cf, 0x11a2)}, /* Fujitsu 1000base-SX with BCM5703SKHB */
314 	{}
315 };
316 
317 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
318 
319 static const struct {
320 	const char string[ETH_GSTRING_LEN];
321 } ethtool_stats_keys[] = {
322 	{ "rx_octets" },
323 	{ "rx_fragments" },
324 	{ "rx_ucast_packets" },
325 	{ "rx_mcast_packets" },
326 	{ "rx_bcast_packets" },
327 	{ "rx_fcs_errors" },
328 	{ "rx_align_errors" },
329 	{ "rx_xon_pause_rcvd" },
330 	{ "rx_xoff_pause_rcvd" },
331 	{ "rx_mac_ctrl_rcvd" },
332 	{ "rx_xoff_entered" },
333 	{ "rx_frame_too_long_errors" },
334 	{ "rx_jabbers" },
335 	{ "rx_undersize_packets" },
336 	{ "rx_in_length_errors" },
337 	{ "rx_out_length_errors" },
338 	{ "rx_64_or_less_octet_packets" },
339 	{ "rx_65_to_127_octet_packets" },
340 	{ "rx_128_to_255_octet_packets" },
341 	{ "rx_256_to_511_octet_packets" },
342 	{ "rx_512_to_1023_octet_packets" },
343 	{ "rx_1024_to_1522_octet_packets" },
344 	{ "rx_1523_to_2047_octet_packets" },
345 	{ "rx_2048_to_4095_octet_packets" },
346 	{ "rx_4096_to_8191_octet_packets" },
347 	{ "rx_8192_to_9022_octet_packets" },
348 
349 	{ "tx_octets" },
350 	{ "tx_collisions" },
351 
352 	{ "tx_xon_sent" },
353 	{ "tx_xoff_sent" },
354 	{ "tx_flow_control" },
355 	{ "tx_mac_errors" },
356 	{ "tx_single_collisions" },
357 	{ "tx_mult_collisions" },
358 	{ "tx_deferred" },
359 	{ "tx_excessive_collisions" },
360 	{ "tx_late_collisions" },
361 	{ "tx_collide_2times" },
362 	{ "tx_collide_3times" },
363 	{ "tx_collide_4times" },
364 	{ "tx_collide_5times" },
365 	{ "tx_collide_6times" },
366 	{ "tx_collide_7times" },
367 	{ "tx_collide_8times" },
368 	{ "tx_collide_9times" },
369 	{ "tx_collide_10times" },
370 	{ "tx_collide_11times" },
371 	{ "tx_collide_12times" },
372 	{ "tx_collide_13times" },
373 	{ "tx_collide_14times" },
374 	{ "tx_collide_15times" },
375 	{ "tx_ucast_packets" },
376 	{ "tx_mcast_packets" },
377 	{ "tx_bcast_packets" },
378 	{ "tx_carrier_sense_errors" },
379 	{ "tx_discards" },
380 	{ "tx_errors" },
381 
382 	{ "dma_writeq_full" },
383 	{ "dma_write_prioq_full" },
384 	{ "rxbds_empty" },
385 	{ "rx_discards" },
386 	{ "rx_errors" },
387 	{ "rx_threshold_hit" },
388 
389 	{ "dma_readq_full" },
390 	{ "dma_read_prioq_full" },
391 	{ "tx_comp_queue_full" },
392 
393 	{ "ring_set_send_prod_index" },
394 	{ "ring_status_update" },
395 	{ "nic_irqs" },
396 	{ "nic_avoided_irqs" },
397 	{ "nic_tx_threshold_hit" },
398 
399 	{ "mbuf_lwm_thresh_hit" },
400 };
401 
402 #define TG3_NUM_STATS	ARRAY_SIZE(ethtool_stats_keys)
403 
404 
405 static const struct {
406 	const char string[ETH_GSTRING_LEN];
407 } ethtool_test_keys[] = {
408 	{ "nvram test        (online) " },
409 	{ "link test         (online) " },
410 	{ "register test     (offline)" },
411 	{ "memory test       (offline)" },
412 	{ "mac loopback test (offline)" },
413 	{ "phy loopback test (offline)" },
414 	{ "ext loopback test (offline)" },
415 	{ "interrupt test    (offline)" },
416 };
417 
418 #define TG3_NUM_TEST	ARRAY_SIZE(ethtool_test_keys)
419 
420 
421 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
422 {
423 	writel(val, tp->regs + off);
424 }
425 
426 static u32 tg3_read32(struct tg3 *tp, u32 off)
427 {
428 	return readl(tp->regs + off);
429 }
430 
431 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
432 {
433 	writel(val, tp->aperegs + off);
434 }
435 
436 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
437 {
438 	return readl(tp->aperegs + off);
439 }
440 
441 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
442 {
443 	unsigned long flags;
444 
445 	spin_lock_irqsave(&tp->indirect_lock, flags);
446 	pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
447 	pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
448 	spin_unlock_irqrestore(&tp->indirect_lock, flags);
449 }
450 
451 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
452 {
453 	writel(val, tp->regs + off);
454 	readl(tp->regs + off);
455 }
456 
457 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
458 {
459 	unsigned long flags;
460 	u32 val;
461 
462 	spin_lock_irqsave(&tp->indirect_lock, flags);
463 	pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
464 	pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
465 	spin_unlock_irqrestore(&tp->indirect_lock, flags);
466 	return val;
467 }
468 
469 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
470 {
471 	unsigned long flags;
472 
473 	if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
474 		pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
475 				       TG3_64BIT_REG_LOW, val);
476 		return;
477 	}
478 	if (off == TG3_RX_STD_PROD_IDX_REG) {
479 		pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
480 				       TG3_64BIT_REG_LOW, val);
481 		return;
482 	}
483 
484 	spin_lock_irqsave(&tp->indirect_lock, flags);
485 	pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
486 	pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
487 	spin_unlock_irqrestore(&tp->indirect_lock, flags);
488 
489 	/* In indirect mode when disabling interrupts, we also need
490 	 * to clear the interrupt bit in the GRC local ctrl register.
491 	 */
492 	if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
493 	    (val == 0x1)) {
494 		pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
495 				       tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
496 	}
497 }
498 
499 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
500 {
501 	unsigned long flags;
502 	u32 val;
503 
504 	spin_lock_irqsave(&tp->indirect_lock, flags);
505 	pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
506 	pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
507 	spin_unlock_irqrestore(&tp->indirect_lock, flags);
508 	return val;
509 }
510 
511 /* usec_wait specifies the wait time in usec when writing to certain registers
512  * where it is unsafe to read back the register without some delay.
513  * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
514  * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
515  */
516 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
517 {
518 	if (tg3_flag(tp, PCIX_TARGET_HWBUG) || tg3_flag(tp, ICH_WORKAROUND))
519 		/* Non-posted methods */
520 		tp->write32(tp, off, val);
521 	else {
522 		/* Posted method */
523 		tg3_write32(tp, off, val);
524 		if (usec_wait)
525 			udelay(usec_wait);
526 		tp->read32(tp, off);
527 	}
528 	/* Wait again after the read for the posted method to guarantee that
529 	 * the wait time is met.
530 	 */
531 	if (usec_wait)
532 		udelay(usec_wait);
533 }
534 
535 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
536 {
537 	tp->write32_mbox(tp, off, val);
538 	if (!tg3_flag(tp, MBOX_WRITE_REORDER) && !tg3_flag(tp, ICH_WORKAROUND))
539 		tp->read32_mbox(tp, off);
540 }
541 
542 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
543 {
544 	void __iomem *mbox = tp->regs + off;
545 	writel(val, mbox);
546 	if (tg3_flag(tp, TXD_MBOX_HWBUG))
547 		writel(val, mbox);
548 	if (tg3_flag(tp, MBOX_WRITE_REORDER))
549 		readl(mbox);
550 }
551 
552 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
553 {
554 	return readl(tp->regs + off + GRCMBOX_BASE);
555 }
556 
557 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
558 {
559 	writel(val, tp->regs + off + GRCMBOX_BASE);
560 }
561 
562 #define tw32_mailbox(reg, val)		tp->write32_mbox(tp, reg, val)
563 #define tw32_mailbox_f(reg, val)	tw32_mailbox_flush(tp, (reg), (val))
564 #define tw32_rx_mbox(reg, val)		tp->write32_rx_mbox(tp, reg, val)
565 #define tw32_tx_mbox(reg, val)		tp->write32_tx_mbox(tp, reg, val)
566 #define tr32_mailbox(reg)		tp->read32_mbox(tp, reg)
567 
568 #define tw32(reg, val)			tp->write32(tp, reg, val)
569 #define tw32_f(reg, val)		_tw32_flush(tp, (reg), (val), 0)
570 #define tw32_wait_f(reg, val, us)	_tw32_flush(tp, (reg), (val), (us))
571 #define tr32(reg)			tp->read32(tp, reg)
572 
573 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
574 {
575 	unsigned long flags;
576 
577 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
578 	    (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
579 		return;
580 
581 	spin_lock_irqsave(&tp->indirect_lock, flags);
582 	if (tg3_flag(tp, SRAM_USE_CONFIG)) {
583 		pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
584 		pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
585 
586 		/* Always leave this as zero. */
587 		pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
588 	} else {
589 		tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
590 		tw32_f(TG3PCI_MEM_WIN_DATA, val);
591 
592 		/* Always leave this as zero. */
593 		tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
594 	}
595 	spin_unlock_irqrestore(&tp->indirect_lock, flags);
596 }
597 
598 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
599 {
600 	unsigned long flags;
601 
602 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
603 	    (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
604 		*val = 0;
605 		return;
606 	}
607 
608 	spin_lock_irqsave(&tp->indirect_lock, flags);
609 	if (tg3_flag(tp, SRAM_USE_CONFIG)) {
610 		pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
611 		pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
612 
613 		/* Always leave this as zero. */
614 		pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
615 	} else {
616 		tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
617 		*val = tr32(TG3PCI_MEM_WIN_DATA);
618 
619 		/* Always leave this as zero. */
620 		tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
621 	}
622 	spin_unlock_irqrestore(&tp->indirect_lock, flags);
623 }
624 
625 static void tg3_ape_lock_init(struct tg3 *tp)
626 {
627 	int i;
628 	u32 regbase, bit;
629 
630 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
631 		regbase = TG3_APE_LOCK_GRANT;
632 	else
633 		regbase = TG3_APE_PER_LOCK_GRANT;
634 
635 	/* Make sure the driver hasn't any stale locks. */
636 	for (i = TG3_APE_LOCK_PHY0; i <= TG3_APE_LOCK_GPIO; i++) {
637 		switch (i) {
638 		case TG3_APE_LOCK_PHY0:
639 		case TG3_APE_LOCK_PHY1:
640 		case TG3_APE_LOCK_PHY2:
641 		case TG3_APE_LOCK_PHY3:
642 			bit = APE_LOCK_GRANT_DRIVER;
643 			break;
644 		default:
645 			if (!tp->pci_fn)
646 				bit = APE_LOCK_GRANT_DRIVER;
647 			else
648 				bit = 1 << tp->pci_fn;
649 		}
650 		tg3_ape_write32(tp, regbase + 4 * i, bit);
651 	}
652 
653 }
654 
655 static int tg3_ape_lock(struct tg3 *tp, int locknum)
656 {
657 	int i, off;
658 	int ret = 0;
659 	u32 status, req, gnt, bit;
660 
661 	if (!tg3_flag(tp, ENABLE_APE))
662 		return 0;
663 
664 	switch (locknum) {
665 	case TG3_APE_LOCK_GPIO:
666 		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
667 			return 0;
668 	case TG3_APE_LOCK_GRC:
669 	case TG3_APE_LOCK_MEM:
670 		if (!tp->pci_fn)
671 			bit = APE_LOCK_REQ_DRIVER;
672 		else
673 			bit = 1 << tp->pci_fn;
674 		break;
675 	default:
676 		return -EINVAL;
677 	}
678 
679 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
680 		req = TG3_APE_LOCK_REQ;
681 		gnt = TG3_APE_LOCK_GRANT;
682 	} else {
683 		req = TG3_APE_PER_LOCK_REQ;
684 		gnt = TG3_APE_PER_LOCK_GRANT;
685 	}
686 
687 	off = 4 * locknum;
688 
689 	tg3_ape_write32(tp, req + off, bit);
690 
691 	/* Wait for up to 1 millisecond to acquire lock. */
692 	for (i = 0; i < 100; i++) {
693 		status = tg3_ape_read32(tp, gnt + off);
694 		if (status == bit)
695 			break;
696 		udelay(10);
697 	}
698 
699 	if (status != bit) {
700 		/* Revoke the lock request. */
701 		tg3_ape_write32(tp, gnt + off, bit);
702 		ret = -EBUSY;
703 	}
704 
705 	return ret;
706 }
707 
708 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
709 {
710 	u32 gnt, bit;
711 
712 	if (!tg3_flag(tp, ENABLE_APE))
713 		return;
714 
715 	switch (locknum) {
716 	case TG3_APE_LOCK_GPIO:
717 		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
718 			return;
719 	case TG3_APE_LOCK_GRC:
720 	case TG3_APE_LOCK_MEM:
721 		if (!tp->pci_fn)
722 			bit = APE_LOCK_GRANT_DRIVER;
723 		else
724 			bit = 1 << tp->pci_fn;
725 		break;
726 	default:
727 		return;
728 	}
729 
730 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
731 		gnt = TG3_APE_LOCK_GRANT;
732 	else
733 		gnt = TG3_APE_PER_LOCK_GRANT;
734 
735 	tg3_ape_write32(tp, gnt + 4 * locknum, bit);
736 }
737 
738 static int tg3_ape_event_lock(struct tg3 *tp, u32 timeout_us)
739 {
740 	u32 apedata;
741 
742 	while (timeout_us) {
743 		if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
744 			return -EBUSY;
745 
746 		apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
747 		if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
748 			break;
749 
750 		tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
751 
752 		udelay(10);
753 		timeout_us -= (timeout_us > 10) ? 10 : timeout_us;
754 	}
755 
756 	return timeout_us ? 0 : -EBUSY;
757 }
758 
759 static int tg3_ape_wait_for_event(struct tg3 *tp, u32 timeout_us)
760 {
761 	u32 i, apedata;
762 
763 	for (i = 0; i < timeout_us / 10; i++) {
764 		apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
765 
766 		if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
767 			break;
768 
769 		udelay(10);
770 	}
771 
772 	return i == timeout_us / 10;
773 }
774 
775 int tg3_ape_scratchpad_read(struct tg3 *tp, u32 *data, u32 base_off, u32 len)
776 {
777 	int err;
778 	u32 i, bufoff, msgoff, maxlen, apedata;
779 
780 	if (!tg3_flag(tp, APE_HAS_NCSI))
781 		return 0;
782 
783 	apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
784 	if (apedata != APE_SEG_SIG_MAGIC)
785 		return -ENODEV;
786 
787 	apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
788 	if (!(apedata & APE_FW_STATUS_READY))
789 		return -EAGAIN;
790 
791 	bufoff = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_OFF) +
792 		 TG3_APE_SHMEM_BASE;
793 	msgoff = bufoff + 2 * sizeof(u32);
794 	maxlen = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_LEN);
795 
796 	while (len) {
797 		u32 length;
798 
799 		/* Cap xfer sizes to scratchpad limits. */
800 		length = (len > maxlen) ? maxlen : len;
801 		len -= length;
802 
803 		apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
804 		if (!(apedata & APE_FW_STATUS_READY))
805 			return -EAGAIN;
806 
807 		/* Wait for up to 1 msec for APE to service previous event. */
808 		err = tg3_ape_event_lock(tp, 1000);
809 		if (err)
810 			return err;
811 
812 		apedata = APE_EVENT_STATUS_DRIVER_EVNT |
813 			  APE_EVENT_STATUS_SCRTCHPD_READ |
814 			  APE_EVENT_STATUS_EVENT_PENDING;
815 		tg3_ape_write32(tp, TG3_APE_EVENT_STATUS, apedata);
816 
817 		tg3_ape_write32(tp, bufoff, base_off);
818 		tg3_ape_write32(tp, bufoff + sizeof(u32), length);
819 
820 		tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
821 		tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
822 
823 		base_off += length;
824 
825 		if (tg3_ape_wait_for_event(tp, 30000))
826 			return -EAGAIN;
827 
828 		for (i = 0; length; i += 4, length -= 4) {
829 			u32 val = tg3_ape_read32(tp, msgoff + i);
830 			memcpy(data, &val, sizeof(u32));
831 			data++;
832 		}
833 	}
834 
835 	return 0;
836 }
837 
838 static int tg3_ape_send_event(struct tg3 *tp, u32 event)
839 {
840 	int err;
841 	u32 apedata;
842 
843 	apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
844 	if (apedata != APE_SEG_SIG_MAGIC)
845 		return -EAGAIN;
846 
847 	apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
848 	if (!(apedata & APE_FW_STATUS_READY))
849 		return -EAGAIN;
850 
851 	/* Wait for up to 1 millisecond for APE to service previous event. */
852 	err = tg3_ape_event_lock(tp, 1000);
853 	if (err)
854 		return err;
855 
856 	tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
857 			event | APE_EVENT_STATUS_EVENT_PENDING);
858 
859 	tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
860 	tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
861 
862 	return 0;
863 }
864 
865 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
866 {
867 	u32 event;
868 	u32 apedata;
869 
870 	if (!tg3_flag(tp, ENABLE_APE))
871 		return;
872 
873 	switch (kind) {
874 	case RESET_KIND_INIT:
875 		tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
876 				APE_HOST_SEG_SIG_MAGIC);
877 		tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
878 				APE_HOST_SEG_LEN_MAGIC);
879 		apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
880 		tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
881 		tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
882 			APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, TG3_MIN_NUM));
883 		tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
884 				APE_HOST_BEHAV_NO_PHYLOCK);
885 		tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE,
886 				    TG3_APE_HOST_DRVR_STATE_START);
887 
888 		event = APE_EVENT_STATUS_STATE_START;
889 		break;
890 	case RESET_KIND_SHUTDOWN:
891 		/* With the interface we are currently using,
892 		 * APE does not track driver state.  Wiping
893 		 * out the HOST SEGMENT SIGNATURE forces
894 		 * the APE to assume OS absent status.
895 		 */
896 		tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0);
897 
898 		if (device_may_wakeup(&tp->pdev->dev) &&
899 		    tg3_flag(tp, WOL_ENABLE)) {
900 			tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED,
901 					    TG3_APE_HOST_WOL_SPEED_AUTO);
902 			apedata = TG3_APE_HOST_DRVR_STATE_WOL;
903 		} else
904 			apedata = TG3_APE_HOST_DRVR_STATE_UNLOAD;
905 
906 		tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, apedata);
907 
908 		event = APE_EVENT_STATUS_STATE_UNLOAD;
909 		break;
910 	case RESET_KIND_SUSPEND:
911 		event = APE_EVENT_STATUS_STATE_SUSPEND;
912 		break;
913 	default:
914 		return;
915 	}
916 
917 	event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
918 
919 	tg3_ape_send_event(tp, event);
920 }
921 
922 static void tg3_disable_ints(struct tg3 *tp)
923 {
924 	int i;
925 
926 	tw32(TG3PCI_MISC_HOST_CTRL,
927 	     (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
928 	for (i = 0; i < tp->irq_max; i++)
929 		tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001);
930 }
931 
932 static void tg3_enable_ints(struct tg3 *tp)
933 {
934 	int i;
935 
936 	tp->irq_sync = 0;
937 	wmb();
938 
939 	tw32(TG3PCI_MISC_HOST_CTRL,
940 	     (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
941 
942 	tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE;
943 	for (i = 0; i < tp->irq_cnt; i++) {
944 		struct tg3_napi *tnapi = &tp->napi[i];
945 
946 		tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
947 		if (tg3_flag(tp, 1SHOT_MSI))
948 			tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
949 
950 		tp->coal_now |= tnapi->coal_now;
951 	}
952 
953 	/* Force an initial interrupt */
954 	if (!tg3_flag(tp, TAGGED_STATUS) &&
955 	    (tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
956 		tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
957 	else
958 		tw32(HOSTCC_MODE, tp->coal_now);
959 
960 	tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now);
961 }
962 
963 static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
964 {
965 	struct tg3 *tp = tnapi->tp;
966 	struct tg3_hw_status *sblk = tnapi->hw_status;
967 	unsigned int work_exists = 0;
968 
969 	/* check for phy events */
970 	if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
971 		if (sblk->status & SD_STATUS_LINK_CHG)
972 			work_exists = 1;
973 	}
974 
975 	/* check for TX work to do */
976 	if (sblk->idx[0].tx_consumer != tnapi->tx_cons)
977 		work_exists = 1;
978 
979 	/* check for RX work to do */
980 	if (tnapi->rx_rcb_prod_idx &&
981 	    *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
982 		work_exists = 1;
983 
984 	return work_exists;
985 }
986 
987 /* tg3_int_reenable
988  *  similar to tg3_enable_ints, but it accurately determines whether there
989  *  is new work pending and can return without flushing the PIO write
990  *  which reenables interrupts
991  */
992 static void tg3_int_reenable(struct tg3_napi *tnapi)
993 {
994 	struct tg3 *tp = tnapi->tp;
995 
996 	tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
997 	mmiowb();
998 
999 	/* When doing tagged status, this work check is unnecessary.
1000 	 * The last_tag we write above tells the chip which piece of
1001 	 * work we've completed.
1002 	 */
1003 	if (!tg3_flag(tp, TAGGED_STATUS) && tg3_has_work(tnapi))
1004 		tw32(HOSTCC_MODE, tp->coalesce_mode |
1005 		     HOSTCC_MODE_ENABLE | tnapi->coal_now);
1006 }
1007 
1008 static void tg3_switch_clocks(struct tg3 *tp)
1009 {
1010 	u32 clock_ctrl;
1011 	u32 orig_clock_ctrl;
1012 
1013 	if (tg3_flag(tp, CPMU_PRESENT) || tg3_flag(tp, 5780_CLASS))
1014 		return;
1015 
1016 	clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
1017 
1018 	orig_clock_ctrl = clock_ctrl;
1019 	clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
1020 		       CLOCK_CTRL_CLKRUN_OENABLE |
1021 		       0x1f);
1022 	tp->pci_clock_ctrl = clock_ctrl;
1023 
1024 	if (tg3_flag(tp, 5705_PLUS)) {
1025 		if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
1026 			tw32_wait_f(TG3PCI_CLOCK_CTRL,
1027 				    clock_ctrl | CLOCK_CTRL_625_CORE, 40);
1028 		}
1029 	} else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
1030 		tw32_wait_f(TG3PCI_CLOCK_CTRL,
1031 			    clock_ctrl |
1032 			    (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
1033 			    40);
1034 		tw32_wait_f(TG3PCI_CLOCK_CTRL,
1035 			    clock_ctrl | (CLOCK_CTRL_ALTCLK),
1036 			    40);
1037 	}
1038 	tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
1039 }
1040 
1041 #define PHY_BUSY_LOOPS	5000
1042 
1043 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
1044 {
1045 	u32 frame_val;
1046 	unsigned int loops;
1047 	int ret;
1048 
1049 	if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1050 		tw32_f(MAC_MI_MODE,
1051 		     (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1052 		udelay(80);
1053 	}
1054 
1055 	*val = 0x0;
1056 
1057 	frame_val  = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1058 		      MI_COM_PHY_ADDR_MASK);
1059 	frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1060 		      MI_COM_REG_ADDR_MASK);
1061 	frame_val |= (MI_COM_CMD_READ | MI_COM_START);
1062 
1063 	tw32_f(MAC_MI_COM, frame_val);
1064 
1065 	loops = PHY_BUSY_LOOPS;
1066 	while (loops != 0) {
1067 		udelay(10);
1068 		frame_val = tr32(MAC_MI_COM);
1069 
1070 		if ((frame_val & MI_COM_BUSY) == 0) {
1071 			udelay(5);
1072 			frame_val = tr32(MAC_MI_COM);
1073 			break;
1074 		}
1075 		loops -= 1;
1076 	}
1077 
1078 	ret = -EBUSY;
1079 	if (loops != 0) {
1080 		*val = frame_val & MI_COM_DATA_MASK;
1081 		ret = 0;
1082 	}
1083 
1084 	if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1085 		tw32_f(MAC_MI_MODE, tp->mi_mode);
1086 		udelay(80);
1087 	}
1088 
1089 	return ret;
1090 }
1091 
1092 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
1093 {
1094 	u32 frame_val;
1095 	unsigned int loops;
1096 	int ret;
1097 
1098 	if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
1099 	    (reg == MII_CTRL1000 || reg == MII_TG3_AUX_CTRL))
1100 		return 0;
1101 
1102 	if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1103 		tw32_f(MAC_MI_MODE,
1104 		     (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1105 		udelay(80);
1106 	}
1107 
1108 	frame_val  = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1109 		      MI_COM_PHY_ADDR_MASK);
1110 	frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1111 		      MI_COM_REG_ADDR_MASK);
1112 	frame_val |= (val & MI_COM_DATA_MASK);
1113 	frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
1114 
1115 	tw32_f(MAC_MI_COM, frame_val);
1116 
1117 	loops = PHY_BUSY_LOOPS;
1118 	while (loops != 0) {
1119 		udelay(10);
1120 		frame_val = tr32(MAC_MI_COM);
1121 		if ((frame_val & MI_COM_BUSY) == 0) {
1122 			udelay(5);
1123 			frame_val = tr32(MAC_MI_COM);
1124 			break;
1125 		}
1126 		loops -= 1;
1127 	}
1128 
1129 	ret = -EBUSY;
1130 	if (loops != 0)
1131 		ret = 0;
1132 
1133 	if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1134 		tw32_f(MAC_MI_MODE, tp->mi_mode);
1135 		udelay(80);
1136 	}
1137 
1138 	return ret;
1139 }
1140 
1141 static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val)
1142 {
1143 	int err;
1144 
1145 	err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1146 	if (err)
1147 		goto done;
1148 
1149 	err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1150 	if (err)
1151 		goto done;
1152 
1153 	err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1154 			   MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1155 	if (err)
1156 		goto done;
1157 
1158 	err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, val);
1159 
1160 done:
1161 	return err;
1162 }
1163 
1164 static int tg3_phy_cl45_read(struct tg3 *tp, u32 devad, u32 addr, u32 *val)
1165 {
1166 	int err;
1167 
1168 	err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1169 	if (err)
1170 		goto done;
1171 
1172 	err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1173 	if (err)
1174 		goto done;
1175 
1176 	err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1177 			   MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1178 	if (err)
1179 		goto done;
1180 
1181 	err = tg3_readphy(tp, MII_TG3_MMD_ADDRESS, val);
1182 
1183 done:
1184 	return err;
1185 }
1186 
1187 static int tg3_phydsp_read(struct tg3 *tp, u32 reg, u32 *val)
1188 {
1189 	int err;
1190 
1191 	err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1192 	if (!err)
1193 		err = tg3_readphy(tp, MII_TG3_DSP_RW_PORT, val);
1194 
1195 	return err;
1196 }
1197 
1198 static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
1199 {
1200 	int err;
1201 
1202 	err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1203 	if (!err)
1204 		err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
1205 
1206 	return err;
1207 }
1208 
1209 static int tg3_phy_auxctl_read(struct tg3 *tp, int reg, u32 *val)
1210 {
1211 	int err;
1212 
1213 	err = tg3_writephy(tp, MII_TG3_AUX_CTRL,
1214 			   (reg << MII_TG3_AUXCTL_MISC_RDSEL_SHIFT) |
1215 			   MII_TG3_AUXCTL_SHDWSEL_MISC);
1216 	if (!err)
1217 		err = tg3_readphy(tp, MII_TG3_AUX_CTRL, val);
1218 
1219 	return err;
1220 }
1221 
1222 static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set)
1223 {
1224 	if (reg == MII_TG3_AUXCTL_SHDWSEL_MISC)
1225 		set |= MII_TG3_AUXCTL_MISC_WREN;
1226 
1227 	return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg);
1228 }
1229 
1230 #define TG3_PHY_AUXCTL_SMDSP_ENABLE(tp) \
1231 	tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \
1232 			     MII_TG3_AUXCTL_ACTL_SMDSP_ENA | \
1233 			     MII_TG3_AUXCTL_ACTL_TX_6DB)
1234 
1235 #define TG3_PHY_AUXCTL_SMDSP_DISABLE(tp) \
1236 	tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \
1237 			     MII_TG3_AUXCTL_ACTL_TX_6DB);
1238 
1239 static int tg3_bmcr_reset(struct tg3 *tp)
1240 {
1241 	u32 phy_control;
1242 	int limit, err;
1243 
1244 	/* OK, reset it, and poll the BMCR_RESET bit until it
1245 	 * clears or we time out.
1246 	 */
1247 	phy_control = BMCR_RESET;
1248 	err = tg3_writephy(tp, MII_BMCR, phy_control);
1249 	if (err != 0)
1250 		return -EBUSY;
1251 
1252 	limit = 5000;
1253 	while (limit--) {
1254 		err = tg3_readphy(tp, MII_BMCR, &phy_control);
1255 		if (err != 0)
1256 			return -EBUSY;
1257 
1258 		if ((phy_control & BMCR_RESET) == 0) {
1259 			udelay(40);
1260 			break;
1261 		}
1262 		udelay(10);
1263 	}
1264 	if (limit < 0)
1265 		return -EBUSY;
1266 
1267 	return 0;
1268 }
1269 
1270 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
1271 {
1272 	struct tg3 *tp = bp->priv;
1273 	u32 val;
1274 
1275 	spin_lock_bh(&tp->lock);
1276 
1277 	if (tg3_readphy(tp, reg, &val))
1278 		val = -EIO;
1279 
1280 	spin_unlock_bh(&tp->lock);
1281 
1282 	return val;
1283 }
1284 
1285 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
1286 {
1287 	struct tg3 *tp = bp->priv;
1288 	u32 ret = 0;
1289 
1290 	spin_lock_bh(&tp->lock);
1291 
1292 	if (tg3_writephy(tp, reg, val))
1293 		ret = -EIO;
1294 
1295 	spin_unlock_bh(&tp->lock);
1296 
1297 	return ret;
1298 }
1299 
1300 static int tg3_mdio_reset(struct mii_bus *bp)
1301 {
1302 	return 0;
1303 }
1304 
1305 static void tg3_mdio_config_5785(struct tg3 *tp)
1306 {
1307 	u32 val;
1308 	struct phy_device *phydev;
1309 
1310 	phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1311 	switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1312 	case PHY_ID_BCM50610:
1313 	case PHY_ID_BCM50610M:
1314 		val = MAC_PHYCFG2_50610_LED_MODES;
1315 		break;
1316 	case PHY_ID_BCMAC131:
1317 		val = MAC_PHYCFG2_AC131_LED_MODES;
1318 		break;
1319 	case PHY_ID_RTL8211C:
1320 		val = MAC_PHYCFG2_RTL8211C_LED_MODES;
1321 		break;
1322 	case PHY_ID_RTL8201E:
1323 		val = MAC_PHYCFG2_RTL8201E_LED_MODES;
1324 		break;
1325 	default:
1326 		return;
1327 	}
1328 
1329 	if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
1330 		tw32(MAC_PHYCFG2, val);
1331 
1332 		val = tr32(MAC_PHYCFG1);
1333 		val &= ~(MAC_PHYCFG1_RGMII_INT |
1334 			 MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK);
1335 		val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT;
1336 		tw32(MAC_PHYCFG1, val);
1337 
1338 		return;
1339 	}
1340 
1341 	if (!tg3_flag(tp, RGMII_INBAND_DISABLE))
1342 		val |= MAC_PHYCFG2_EMODE_MASK_MASK |
1343 		       MAC_PHYCFG2_FMODE_MASK_MASK |
1344 		       MAC_PHYCFG2_GMODE_MASK_MASK |
1345 		       MAC_PHYCFG2_ACT_MASK_MASK   |
1346 		       MAC_PHYCFG2_QUAL_MASK_MASK |
1347 		       MAC_PHYCFG2_INBAND_ENABLE;
1348 
1349 	tw32(MAC_PHYCFG2, val);
1350 
1351 	val = tr32(MAC_PHYCFG1);
1352 	val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
1353 		 MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
1354 	if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1355 		if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1356 			val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
1357 		if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1358 			val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
1359 	}
1360 	val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT |
1361 	       MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV;
1362 	tw32(MAC_PHYCFG1, val);
1363 
1364 	val = tr32(MAC_EXT_RGMII_MODE);
1365 	val &= ~(MAC_RGMII_MODE_RX_INT_B |
1366 		 MAC_RGMII_MODE_RX_QUALITY |
1367 		 MAC_RGMII_MODE_RX_ACTIVITY |
1368 		 MAC_RGMII_MODE_RX_ENG_DET |
1369 		 MAC_RGMII_MODE_TX_ENABLE |
1370 		 MAC_RGMII_MODE_TX_LOWPWR |
1371 		 MAC_RGMII_MODE_TX_RESET);
1372 	if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1373 		if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1374 			val |= MAC_RGMII_MODE_RX_INT_B |
1375 			       MAC_RGMII_MODE_RX_QUALITY |
1376 			       MAC_RGMII_MODE_RX_ACTIVITY |
1377 			       MAC_RGMII_MODE_RX_ENG_DET;
1378 		if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1379 			val |= MAC_RGMII_MODE_TX_ENABLE |
1380 			       MAC_RGMII_MODE_TX_LOWPWR |
1381 			       MAC_RGMII_MODE_TX_RESET;
1382 	}
1383 	tw32(MAC_EXT_RGMII_MODE, val);
1384 }
1385 
1386 static void tg3_mdio_start(struct tg3 *tp)
1387 {
1388 	tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
1389 	tw32_f(MAC_MI_MODE, tp->mi_mode);
1390 	udelay(80);
1391 
1392 	if (tg3_flag(tp, MDIOBUS_INITED) &&
1393 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1394 		tg3_mdio_config_5785(tp);
1395 }
1396 
1397 static int tg3_mdio_init(struct tg3 *tp)
1398 {
1399 	int i;
1400 	u32 reg;
1401 	struct phy_device *phydev;
1402 
1403 	if (tg3_flag(tp, 5717_PLUS)) {
1404 		u32 is_serdes;
1405 
1406 		tp->phy_addr = tp->pci_fn + 1;
1407 
1408 		if (tp->pci_chip_rev_id != CHIPREV_ID_5717_A0)
1409 			is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
1410 		else
1411 			is_serdes = tr32(TG3_CPMU_PHY_STRAP) &
1412 				    TG3_CPMU_PHY_STRAP_IS_SERDES;
1413 		if (is_serdes)
1414 			tp->phy_addr += 7;
1415 	} else
1416 		tp->phy_addr = TG3_PHY_MII_ADDR;
1417 
1418 	tg3_mdio_start(tp);
1419 
1420 	if (!tg3_flag(tp, USE_PHYLIB) || tg3_flag(tp, MDIOBUS_INITED))
1421 		return 0;
1422 
1423 	tp->mdio_bus = mdiobus_alloc();
1424 	if (tp->mdio_bus == NULL)
1425 		return -ENOMEM;
1426 
1427 	tp->mdio_bus->name     = "tg3 mdio bus";
1428 	snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
1429 		 (tp->pdev->bus->number << 8) | tp->pdev->devfn);
1430 	tp->mdio_bus->priv     = tp;
1431 	tp->mdio_bus->parent   = &tp->pdev->dev;
1432 	tp->mdio_bus->read     = &tg3_mdio_read;
1433 	tp->mdio_bus->write    = &tg3_mdio_write;
1434 	tp->mdio_bus->reset    = &tg3_mdio_reset;
1435 	tp->mdio_bus->phy_mask = ~(1 << TG3_PHY_MII_ADDR);
1436 	tp->mdio_bus->irq      = &tp->mdio_irq[0];
1437 
1438 	for (i = 0; i < PHY_MAX_ADDR; i++)
1439 		tp->mdio_bus->irq[i] = PHY_POLL;
1440 
1441 	/* The bus registration will look for all the PHYs on the mdio bus.
1442 	 * Unfortunately, it does not ensure the PHY is powered up before
1443 	 * accessing the PHY ID registers.  A chip reset is the
1444 	 * quickest way to bring the device back to an operational state..
1445 	 */
1446 	if (tg3_readphy(tp, MII_BMCR, &reg) || (reg & BMCR_PDOWN))
1447 		tg3_bmcr_reset(tp);
1448 
1449 	i = mdiobus_register(tp->mdio_bus);
1450 	if (i) {
1451 		dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i);
1452 		mdiobus_free(tp->mdio_bus);
1453 		return i;
1454 	}
1455 
1456 	phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1457 
1458 	if (!phydev || !phydev->drv) {
1459 		dev_warn(&tp->pdev->dev, "No PHY devices\n");
1460 		mdiobus_unregister(tp->mdio_bus);
1461 		mdiobus_free(tp->mdio_bus);
1462 		return -ENODEV;
1463 	}
1464 
1465 	switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1466 	case PHY_ID_BCM57780:
1467 		phydev->interface = PHY_INTERFACE_MODE_GMII;
1468 		phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1469 		break;
1470 	case PHY_ID_BCM50610:
1471 	case PHY_ID_BCM50610M:
1472 		phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE |
1473 				     PHY_BRCM_RX_REFCLK_UNUSED |
1474 				     PHY_BRCM_DIS_TXCRXC_NOENRGY |
1475 				     PHY_BRCM_AUTO_PWRDWN_ENABLE;
1476 		if (tg3_flag(tp, RGMII_INBAND_DISABLE))
1477 			phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
1478 		if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1479 			phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
1480 		if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1481 			phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
1482 		/* fallthru */
1483 	case PHY_ID_RTL8211C:
1484 		phydev->interface = PHY_INTERFACE_MODE_RGMII;
1485 		break;
1486 	case PHY_ID_RTL8201E:
1487 	case PHY_ID_BCMAC131:
1488 		phydev->interface = PHY_INTERFACE_MODE_MII;
1489 		phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1490 		tp->phy_flags |= TG3_PHYFLG_IS_FET;
1491 		break;
1492 	}
1493 
1494 	tg3_flag_set(tp, MDIOBUS_INITED);
1495 
1496 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1497 		tg3_mdio_config_5785(tp);
1498 
1499 	return 0;
1500 }
1501 
1502 static void tg3_mdio_fini(struct tg3 *tp)
1503 {
1504 	if (tg3_flag(tp, MDIOBUS_INITED)) {
1505 		tg3_flag_clear(tp, MDIOBUS_INITED);
1506 		mdiobus_unregister(tp->mdio_bus);
1507 		mdiobus_free(tp->mdio_bus);
1508 	}
1509 }
1510 
1511 /* tp->lock is held. */
1512 static inline void tg3_generate_fw_event(struct tg3 *tp)
1513 {
1514 	u32 val;
1515 
1516 	val = tr32(GRC_RX_CPU_EVENT);
1517 	val |= GRC_RX_CPU_DRIVER_EVENT;
1518 	tw32_f(GRC_RX_CPU_EVENT, val);
1519 
1520 	tp->last_event_jiffies = jiffies;
1521 }
1522 
1523 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1524 
1525 /* tp->lock is held. */
1526 static void tg3_wait_for_event_ack(struct tg3 *tp)
1527 {
1528 	int i;
1529 	unsigned int delay_cnt;
1530 	long time_remain;
1531 
1532 	/* If enough time has passed, no wait is necessary. */
1533 	time_remain = (long)(tp->last_event_jiffies + 1 +
1534 		      usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1535 		      (long)jiffies;
1536 	if (time_remain < 0)
1537 		return;
1538 
1539 	/* Check if we can shorten the wait time. */
1540 	delay_cnt = jiffies_to_usecs(time_remain);
1541 	if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1542 		delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1543 	delay_cnt = (delay_cnt >> 3) + 1;
1544 
1545 	for (i = 0; i < delay_cnt; i++) {
1546 		if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1547 			break;
1548 		udelay(8);
1549 	}
1550 }
1551 
1552 /* tp->lock is held. */
1553 static void tg3_phy_gather_ump_data(struct tg3 *tp, u32 *data)
1554 {
1555 	u32 reg, val;
1556 
1557 	val = 0;
1558 	if (!tg3_readphy(tp, MII_BMCR, &reg))
1559 		val = reg << 16;
1560 	if (!tg3_readphy(tp, MII_BMSR, &reg))
1561 		val |= (reg & 0xffff);
1562 	*data++ = val;
1563 
1564 	val = 0;
1565 	if (!tg3_readphy(tp, MII_ADVERTISE, &reg))
1566 		val = reg << 16;
1567 	if (!tg3_readphy(tp, MII_LPA, &reg))
1568 		val |= (reg & 0xffff);
1569 	*data++ = val;
1570 
1571 	val = 0;
1572 	if (!(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) {
1573 		if (!tg3_readphy(tp, MII_CTRL1000, &reg))
1574 			val = reg << 16;
1575 		if (!tg3_readphy(tp, MII_STAT1000, &reg))
1576 			val |= (reg & 0xffff);
1577 	}
1578 	*data++ = val;
1579 
1580 	if (!tg3_readphy(tp, MII_PHYADDR, &reg))
1581 		val = reg << 16;
1582 	else
1583 		val = 0;
1584 	*data++ = val;
1585 }
1586 
1587 /* tp->lock is held. */
1588 static void tg3_ump_link_report(struct tg3 *tp)
1589 {
1590 	u32 data[4];
1591 
1592 	if (!tg3_flag(tp, 5780_CLASS) || !tg3_flag(tp, ENABLE_ASF))
1593 		return;
1594 
1595 	tg3_phy_gather_ump_data(tp, data);
1596 
1597 	tg3_wait_for_event_ack(tp);
1598 
1599 	tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1600 	tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1601 	tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x0, data[0]);
1602 	tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x4, data[1]);
1603 	tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x8, data[2]);
1604 	tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0xc, data[3]);
1605 
1606 	tg3_generate_fw_event(tp);
1607 }
1608 
1609 /* tp->lock is held. */
1610 static void tg3_stop_fw(struct tg3 *tp)
1611 {
1612 	if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
1613 		/* Wait for RX cpu to ACK the previous event. */
1614 		tg3_wait_for_event_ack(tp);
1615 
1616 		tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
1617 
1618 		tg3_generate_fw_event(tp);
1619 
1620 		/* Wait for RX cpu to ACK this event. */
1621 		tg3_wait_for_event_ack(tp);
1622 	}
1623 }
1624 
1625 /* tp->lock is held. */
1626 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
1627 {
1628 	tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
1629 		      NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
1630 
1631 	if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1632 		switch (kind) {
1633 		case RESET_KIND_INIT:
1634 			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1635 				      DRV_STATE_START);
1636 			break;
1637 
1638 		case RESET_KIND_SHUTDOWN:
1639 			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1640 				      DRV_STATE_UNLOAD);
1641 			break;
1642 
1643 		case RESET_KIND_SUSPEND:
1644 			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1645 				      DRV_STATE_SUSPEND);
1646 			break;
1647 
1648 		default:
1649 			break;
1650 		}
1651 	}
1652 
1653 	if (kind == RESET_KIND_INIT ||
1654 	    kind == RESET_KIND_SUSPEND)
1655 		tg3_ape_driver_state_change(tp, kind);
1656 }
1657 
1658 /* tp->lock is held. */
1659 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
1660 {
1661 	if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1662 		switch (kind) {
1663 		case RESET_KIND_INIT:
1664 			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1665 				      DRV_STATE_START_DONE);
1666 			break;
1667 
1668 		case RESET_KIND_SHUTDOWN:
1669 			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1670 				      DRV_STATE_UNLOAD_DONE);
1671 			break;
1672 
1673 		default:
1674 			break;
1675 		}
1676 	}
1677 
1678 	if (kind == RESET_KIND_SHUTDOWN)
1679 		tg3_ape_driver_state_change(tp, kind);
1680 }
1681 
1682 /* tp->lock is held. */
1683 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
1684 {
1685 	if (tg3_flag(tp, ENABLE_ASF)) {
1686 		switch (kind) {
1687 		case RESET_KIND_INIT:
1688 			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1689 				      DRV_STATE_START);
1690 			break;
1691 
1692 		case RESET_KIND_SHUTDOWN:
1693 			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1694 				      DRV_STATE_UNLOAD);
1695 			break;
1696 
1697 		case RESET_KIND_SUSPEND:
1698 			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1699 				      DRV_STATE_SUSPEND);
1700 			break;
1701 
1702 		default:
1703 			break;
1704 		}
1705 	}
1706 }
1707 
1708 static int tg3_poll_fw(struct tg3 *tp)
1709 {
1710 	int i;
1711 	u32 val;
1712 
1713 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1714 		/* Wait up to 20ms for init done. */
1715 		for (i = 0; i < 200; i++) {
1716 			if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
1717 				return 0;
1718 			udelay(100);
1719 		}
1720 		return -ENODEV;
1721 	}
1722 
1723 	/* Wait for firmware initialization to complete. */
1724 	for (i = 0; i < 100000; i++) {
1725 		tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
1726 		if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1727 			break;
1728 		udelay(10);
1729 	}
1730 
1731 	/* Chip might not be fitted with firmware.  Some Sun onboard
1732 	 * parts are configured like that.  So don't signal the timeout
1733 	 * of the above loop as an error, but do report the lack of
1734 	 * running firmware once.
1735 	 */
1736 	if (i >= 100000 && !tg3_flag(tp, NO_FWARE_REPORTED)) {
1737 		tg3_flag_set(tp, NO_FWARE_REPORTED);
1738 
1739 		netdev_info(tp->dev, "No firmware running\n");
1740 	}
1741 
1742 	if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
1743 		/* The 57765 A0 needs a little more
1744 		 * time to do some important work.
1745 		 */
1746 		mdelay(10);
1747 	}
1748 
1749 	return 0;
1750 }
1751 
1752 static void tg3_link_report(struct tg3 *tp)
1753 {
1754 	if (!netif_carrier_ok(tp->dev)) {
1755 		netif_info(tp, link, tp->dev, "Link is down\n");
1756 		tg3_ump_link_report(tp);
1757 	} else if (netif_msg_link(tp)) {
1758 		netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n",
1759 			    (tp->link_config.active_speed == SPEED_1000 ?
1760 			     1000 :
1761 			     (tp->link_config.active_speed == SPEED_100 ?
1762 			      100 : 10)),
1763 			    (tp->link_config.active_duplex == DUPLEX_FULL ?
1764 			     "full" : "half"));
1765 
1766 		netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n",
1767 			    (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
1768 			    "on" : "off",
1769 			    (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
1770 			    "on" : "off");
1771 
1772 		if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
1773 			netdev_info(tp->dev, "EEE is %s\n",
1774 				    tp->setlpicnt ? "enabled" : "disabled");
1775 
1776 		tg3_ump_link_report(tp);
1777 	}
1778 }
1779 
1780 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1781 {
1782 	u16 miireg;
1783 
1784 	if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1785 		miireg = ADVERTISE_1000XPAUSE;
1786 	else if (flow_ctrl & FLOW_CTRL_TX)
1787 		miireg = ADVERTISE_1000XPSE_ASYM;
1788 	else if (flow_ctrl & FLOW_CTRL_RX)
1789 		miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1790 	else
1791 		miireg = 0;
1792 
1793 	return miireg;
1794 }
1795 
1796 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1797 {
1798 	u8 cap = 0;
1799 
1800 	if (lcladv & rmtadv & ADVERTISE_1000XPAUSE) {
1801 		cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1802 	} else if (lcladv & rmtadv & ADVERTISE_1000XPSE_ASYM) {
1803 		if (lcladv & ADVERTISE_1000XPAUSE)
1804 			cap = FLOW_CTRL_RX;
1805 		if (rmtadv & ADVERTISE_1000XPAUSE)
1806 			cap = FLOW_CTRL_TX;
1807 	}
1808 
1809 	return cap;
1810 }
1811 
1812 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1813 {
1814 	u8 autoneg;
1815 	u8 flowctrl = 0;
1816 	u32 old_rx_mode = tp->rx_mode;
1817 	u32 old_tx_mode = tp->tx_mode;
1818 
1819 	if (tg3_flag(tp, USE_PHYLIB))
1820 		autoneg = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]->autoneg;
1821 	else
1822 		autoneg = tp->link_config.autoneg;
1823 
1824 	if (autoneg == AUTONEG_ENABLE && tg3_flag(tp, PAUSE_AUTONEG)) {
1825 		if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
1826 			flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1827 		else
1828 			flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1829 	} else
1830 		flowctrl = tp->link_config.flowctrl;
1831 
1832 	tp->link_config.active_flowctrl = flowctrl;
1833 
1834 	if (flowctrl & FLOW_CTRL_RX)
1835 		tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1836 	else
1837 		tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1838 
1839 	if (old_rx_mode != tp->rx_mode)
1840 		tw32_f(MAC_RX_MODE, tp->rx_mode);
1841 
1842 	if (flowctrl & FLOW_CTRL_TX)
1843 		tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1844 	else
1845 		tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1846 
1847 	if (old_tx_mode != tp->tx_mode)
1848 		tw32_f(MAC_TX_MODE, tp->tx_mode);
1849 }
1850 
1851 static void tg3_adjust_link(struct net_device *dev)
1852 {
1853 	u8 oldflowctrl, linkmesg = 0;
1854 	u32 mac_mode, lcl_adv, rmt_adv;
1855 	struct tg3 *tp = netdev_priv(dev);
1856 	struct phy_device *phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1857 
1858 	spin_lock_bh(&tp->lock);
1859 
1860 	mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
1861 				    MAC_MODE_HALF_DUPLEX);
1862 
1863 	oldflowctrl = tp->link_config.active_flowctrl;
1864 
1865 	if (phydev->link) {
1866 		lcl_adv = 0;
1867 		rmt_adv = 0;
1868 
1869 		if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
1870 			mac_mode |= MAC_MODE_PORT_MODE_MII;
1871 		else if (phydev->speed == SPEED_1000 ||
1872 			 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785)
1873 			mac_mode |= MAC_MODE_PORT_MODE_GMII;
1874 		else
1875 			mac_mode |= MAC_MODE_PORT_MODE_MII;
1876 
1877 		if (phydev->duplex == DUPLEX_HALF)
1878 			mac_mode |= MAC_MODE_HALF_DUPLEX;
1879 		else {
1880 			lcl_adv = mii_advertise_flowctrl(
1881 				  tp->link_config.flowctrl);
1882 
1883 			if (phydev->pause)
1884 				rmt_adv = LPA_PAUSE_CAP;
1885 			if (phydev->asym_pause)
1886 				rmt_adv |= LPA_PAUSE_ASYM;
1887 		}
1888 
1889 		tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
1890 	} else
1891 		mac_mode |= MAC_MODE_PORT_MODE_GMII;
1892 
1893 	if (mac_mode != tp->mac_mode) {
1894 		tp->mac_mode = mac_mode;
1895 		tw32_f(MAC_MODE, tp->mac_mode);
1896 		udelay(40);
1897 	}
1898 
1899 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
1900 		if (phydev->speed == SPEED_10)
1901 			tw32(MAC_MI_STAT,
1902 			     MAC_MI_STAT_10MBPS_MODE |
1903 			     MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1904 		else
1905 			tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1906 	}
1907 
1908 	if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
1909 		tw32(MAC_TX_LENGTHS,
1910 		     ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1911 		      (6 << TX_LENGTHS_IPG_SHIFT) |
1912 		      (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
1913 	else
1914 		tw32(MAC_TX_LENGTHS,
1915 		     ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1916 		      (6 << TX_LENGTHS_IPG_SHIFT) |
1917 		      (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
1918 
1919 	if (phydev->link != tp->old_link ||
1920 	    phydev->speed != tp->link_config.active_speed ||
1921 	    phydev->duplex != tp->link_config.active_duplex ||
1922 	    oldflowctrl != tp->link_config.active_flowctrl)
1923 		linkmesg = 1;
1924 
1925 	tp->old_link = phydev->link;
1926 	tp->link_config.active_speed = phydev->speed;
1927 	tp->link_config.active_duplex = phydev->duplex;
1928 
1929 	spin_unlock_bh(&tp->lock);
1930 
1931 	if (linkmesg)
1932 		tg3_link_report(tp);
1933 }
1934 
1935 static int tg3_phy_init(struct tg3 *tp)
1936 {
1937 	struct phy_device *phydev;
1938 
1939 	if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)
1940 		return 0;
1941 
1942 	/* Bring the PHY back to a known state. */
1943 	tg3_bmcr_reset(tp);
1944 
1945 	phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1946 
1947 	/* Attach the MAC to the PHY. */
1948 	phydev = phy_connect(tp->dev, dev_name(&phydev->dev), tg3_adjust_link,
1949 			     phydev->dev_flags, phydev->interface);
1950 	if (IS_ERR(phydev)) {
1951 		dev_err(&tp->pdev->dev, "Could not attach to PHY\n");
1952 		return PTR_ERR(phydev);
1953 	}
1954 
1955 	/* Mask with MAC supported features. */
1956 	switch (phydev->interface) {
1957 	case PHY_INTERFACE_MODE_GMII:
1958 	case PHY_INTERFACE_MODE_RGMII:
1959 		if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
1960 			phydev->supported &= (PHY_GBIT_FEATURES |
1961 					      SUPPORTED_Pause |
1962 					      SUPPORTED_Asym_Pause);
1963 			break;
1964 		}
1965 		/* fallthru */
1966 	case PHY_INTERFACE_MODE_MII:
1967 		phydev->supported &= (PHY_BASIC_FEATURES |
1968 				      SUPPORTED_Pause |
1969 				      SUPPORTED_Asym_Pause);
1970 		break;
1971 	default:
1972 		phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1973 		return -EINVAL;
1974 	}
1975 
1976 	tp->phy_flags |= TG3_PHYFLG_IS_CONNECTED;
1977 
1978 	phydev->advertising = phydev->supported;
1979 
1980 	return 0;
1981 }
1982 
1983 static void tg3_phy_start(struct tg3 *tp)
1984 {
1985 	struct phy_device *phydev;
1986 
1987 	if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
1988 		return;
1989 
1990 	phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1991 
1992 	if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
1993 		tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
1994 		phydev->speed = tp->link_config.speed;
1995 		phydev->duplex = tp->link_config.duplex;
1996 		phydev->autoneg = tp->link_config.autoneg;
1997 		phydev->advertising = tp->link_config.advertising;
1998 	}
1999 
2000 	phy_start(phydev);
2001 
2002 	phy_start_aneg(phydev);
2003 }
2004 
2005 static void tg3_phy_stop(struct tg3 *tp)
2006 {
2007 	if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
2008 		return;
2009 
2010 	phy_stop(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
2011 }
2012 
2013 static void tg3_phy_fini(struct tg3 *tp)
2014 {
2015 	if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
2016 		phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
2017 		tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED;
2018 	}
2019 }
2020 
2021 static int tg3_phy_set_extloopbk(struct tg3 *tp)
2022 {
2023 	int err;
2024 	u32 val;
2025 
2026 	if (tp->phy_flags & TG3_PHYFLG_IS_FET)
2027 		return 0;
2028 
2029 	if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2030 		/* Cannot do read-modify-write on 5401 */
2031 		err = tg3_phy_auxctl_write(tp,
2032 					   MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2033 					   MII_TG3_AUXCTL_ACTL_EXTLOOPBK |
2034 					   0x4c20);
2035 		goto done;
2036 	}
2037 
2038 	err = tg3_phy_auxctl_read(tp,
2039 				  MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2040 	if (err)
2041 		return err;
2042 
2043 	val |= MII_TG3_AUXCTL_ACTL_EXTLOOPBK;
2044 	err = tg3_phy_auxctl_write(tp,
2045 				   MII_TG3_AUXCTL_SHDWSEL_AUXCTL, val);
2046 
2047 done:
2048 	return err;
2049 }
2050 
2051 static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
2052 {
2053 	u32 phytest;
2054 
2055 	if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2056 		u32 phy;
2057 
2058 		tg3_writephy(tp, MII_TG3_FET_TEST,
2059 			     phytest | MII_TG3_FET_SHADOW_EN);
2060 		if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) {
2061 			if (enable)
2062 				phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD;
2063 			else
2064 				phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD;
2065 			tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy);
2066 		}
2067 		tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2068 	}
2069 }
2070 
2071 static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
2072 {
2073 	u32 reg;
2074 
2075 	if (!tg3_flag(tp, 5705_PLUS) ||
2076 	    (tg3_flag(tp, 5717_PLUS) &&
2077 	     (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
2078 		return;
2079 
2080 	if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2081 		tg3_phy_fet_toggle_apd(tp, enable);
2082 		return;
2083 	}
2084 
2085 	reg = MII_TG3_MISC_SHDW_WREN |
2086 	      MII_TG3_MISC_SHDW_SCR5_SEL |
2087 	      MII_TG3_MISC_SHDW_SCR5_LPED |
2088 	      MII_TG3_MISC_SHDW_SCR5_DLPTLM |
2089 	      MII_TG3_MISC_SHDW_SCR5_SDTL |
2090 	      MII_TG3_MISC_SHDW_SCR5_C125OE;
2091 	if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 || !enable)
2092 		reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
2093 
2094 	tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
2095 
2096 
2097 	reg = MII_TG3_MISC_SHDW_WREN |
2098 	      MII_TG3_MISC_SHDW_APD_SEL |
2099 	      MII_TG3_MISC_SHDW_APD_WKTM_84MS;
2100 	if (enable)
2101 		reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
2102 
2103 	tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
2104 }
2105 
2106 static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable)
2107 {
2108 	u32 phy;
2109 
2110 	if (!tg3_flag(tp, 5705_PLUS) ||
2111 	    (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
2112 		return;
2113 
2114 	if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2115 		u32 ephy;
2116 
2117 		if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
2118 			u32 reg = MII_TG3_FET_SHDW_MISCCTRL;
2119 
2120 			tg3_writephy(tp, MII_TG3_FET_TEST,
2121 				     ephy | MII_TG3_FET_SHADOW_EN);
2122 			if (!tg3_readphy(tp, reg, &phy)) {
2123 				if (enable)
2124 					phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2125 				else
2126 					phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2127 				tg3_writephy(tp, reg, phy);
2128 			}
2129 			tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
2130 		}
2131 	} else {
2132 		int ret;
2133 
2134 		ret = tg3_phy_auxctl_read(tp,
2135 					  MII_TG3_AUXCTL_SHDWSEL_MISC, &phy);
2136 		if (!ret) {
2137 			if (enable)
2138 				phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2139 			else
2140 				phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2141 			tg3_phy_auxctl_write(tp,
2142 					     MII_TG3_AUXCTL_SHDWSEL_MISC, phy);
2143 		}
2144 	}
2145 }
2146 
2147 static void tg3_phy_set_wirespeed(struct tg3 *tp)
2148 {
2149 	int ret;
2150 	u32 val;
2151 
2152 	if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED)
2153 		return;
2154 
2155 	ret = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, &val);
2156 	if (!ret)
2157 		tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_MISC,
2158 				     val | MII_TG3_AUXCTL_MISC_WIRESPD_EN);
2159 }
2160 
2161 static void tg3_phy_apply_otp(struct tg3 *tp)
2162 {
2163 	u32 otp, phy;
2164 
2165 	if (!tp->phy_otp)
2166 		return;
2167 
2168 	otp = tp->phy_otp;
2169 
2170 	if (TG3_PHY_AUXCTL_SMDSP_ENABLE(tp))
2171 		return;
2172 
2173 	phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
2174 	phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
2175 	tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
2176 
2177 	phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
2178 	      ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
2179 	tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
2180 
2181 	phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
2182 	phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
2183 	tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
2184 
2185 	phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
2186 	tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
2187 
2188 	phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
2189 	tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
2190 
2191 	phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
2192 	      ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
2193 	tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
2194 
2195 	TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2196 }
2197 
2198 static void tg3_phy_eee_adjust(struct tg3 *tp, u32 current_link_up)
2199 {
2200 	u32 val;
2201 
2202 	if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2203 		return;
2204 
2205 	tp->setlpicnt = 0;
2206 
2207 	if (tp->link_config.autoneg == AUTONEG_ENABLE &&
2208 	    current_link_up == 1 &&
2209 	    tp->link_config.active_duplex == DUPLEX_FULL &&
2210 	    (tp->link_config.active_speed == SPEED_100 ||
2211 	     tp->link_config.active_speed == SPEED_1000)) {
2212 		u32 eeectl;
2213 
2214 		if (tp->link_config.active_speed == SPEED_1000)
2215 			eeectl = TG3_CPMU_EEE_CTRL_EXIT_16_5_US;
2216 		else
2217 			eeectl = TG3_CPMU_EEE_CTRL_EXIT_36_US;
2218 
2219 		tw32(TG3_CPMU_EEE_CTRL, eeectl);
2220 
2221 		tg3_phy_cl45_read(tp, MDIO_MMD_AN,
2222 				  TG3_CL45_D7_EEERES_STAT, &val);
2223 
2224 		if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T ||
2225 		    val == TG3_CL45_D7_EEERES_STAT_LP_100TX)
2226 			tp->setlpicnt = 2;
2227 	}
2228 
2229 	if (!tp->setlpicnt) {
2230 		if (current_link_up == 1 &&
2231 		   !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2232 			tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0000);
2233 			TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2234 		}
2235 
2236 		val = tr32(TG3_CPMU_EEE_MODE);
2237 		tw32(TG3_CPMU_EEE_MODE, val & ~TG3_CPMU_EEEMD_LPI_ENABLE);
2238 	}
2239 }
2240 
2241 static void tg3_phy_eee_enable(struct tg3 *tp)
2242 {
2243 	u32 val;
2244 
2245 	if (tp->link_config.active_speed == SPEED_1000 &&
2246 	    (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2247 	     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2248 	     tg3_flag(tp, 57765_CLASS)) &&
2249 	    !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2250 		val = MII_TG3_DSP_TAP26_ALNOKO |
2251 		      MII_TG3_DSP_TAP26_RMRXSTO;
2252 		tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
2253 		TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2254 	}
2255 
2256 	val = tr32(TG3_CPMU_EEE_MODE);
2257 	tw32(TG3_CPMU_EEE_MODE, val | TG3_CPMU_EEEMD_LPI_ENABLE);
2258 }
2259 
2260 static int tg3_wait_macro_done(struct tg3 *tp)
2261 {
2262 	int limit = 100;
2263 
2264 	while (limit--) {
2265 		u32 tmp32;
2266 
2267 		if (!tg3_readphy(tp, MII_TG3_DSP_CONTROL, &tmp32)) {
2268 			if ((tmp32 & 0x1000) == 0)
2269 				break;
2270 		}
2271 	}
2272 	if (limit < 0)
2273 		return -EBUSY;
2274 
2275 	return 0;
2276 }
2277 
2278 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
2279 {
2280 	static const u32 test_pat[4][6] = {
2281 	{ 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
2282 	{ 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
2283 	{ 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
2284 	{ 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
2285 	};
2286 	int chan;
2287 
2288 	for (chan = 0; chan < 4; chan++) {
2289 		int i;
2290 
2291 		tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2292 			     (chan * 0x2000) | 0x0200);
2293 		tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2294 
2295 		for (i = 0; i < 6; i++)
2296 			tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
2297 				     test_pat[chan][i]);
2298 
2299 		tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2300 		if (tg3_wait_macro_done(tp)) {
2301 			*resetp = 1;
2302 			return -EBUSY;
2303 		}
2304 
2305 		tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2306 			     (chan * 0x2000) | 0x0200);
2307 		tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0082);
2308 		if (tg3_wait_macro_done(tp)) {
2309 			*resetp = 1;
2310 			return -EBUSY;
2311 		}
2312 
2313 		tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0802);
2314 		if (tg3_wait_macro_done(tp)) {
2315 			*resetp = 1;
2316 			return -EBUSY;
2317 		}
2318 
2319 		for (i = 0; i < 6; i += 2) {
2320 			u32 low, high;
2321 
2322 			if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
2323 			    tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
2324 			    tg3_wait_macro_done(tp)) {
2325 				*resetp = 1;
2326 				return -EBUSY;
2327 			}
2328 			low &= 0x7fff;
2329 			high &= 0x000f;
2330 			if (low != test_pat[chan][i] ||
2331 			    high != test_pat[chan][i+1]) {
2332 				tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
2333 				tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
2334 				tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
2335 
2336 				return -EBUSY;
2337 			}
2338 		}
2339 	}
2340 
2341 	return 0;
2342 }
2343 
2344 static int tg3_phy_reset_chanpat(struct tg3 *tp)
2345 {
2346 	int chan;
2347 
2348 	for (chan = 0; chan < 4; chan++) {
2349 		int i;
2350 
2351 		tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2352 			     (chan * 0x2000) | 0x0200);
2353 		tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2354 		for (i = 0; i < 6; i++)
2355 			tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
2356 		tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2357 		if (tg3_wait_macro_done(tp))
2358 			return -EBUSY;
2359 	}
2360 
2361 	return 0;
2362 }
2363 
2364 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
2365 {
2366 	u32 reg32, phy9_orig;
2367 	int retries, do_phy_reset, err;
2368 
2369 	retries = 10;
2370 	do_phy_reset = 1;
2371 	do {
2372 		if (do_phy_reset) {
2373 			err = tg3_bmcr_reset(tp);
2374 			if (err)
2375 				return err;
2376 			do_phy_reset = 0;
2377 		}
2378 
2379 		/* Disable transmitter and interrupt.  */
2380 		if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
2381 			continue;
2382 
2383 		reg32 |= 0x3000;
2384 		tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2385 
2386 		/* Set full-duplex, 1000 mbps.  */
2387 		tg3_writephy(tp, MII_BMCR,
2388 			     BMCR_FULLDPLX | BMCR_SPEED1000);
2389 
2390 		/* Set to master mode.  */
2391 		if (tg3_readphy(tp, MII_CTRL1000, &phy9_orig))
2392 			continue;
2393 
2394 		tg3_writephy(tp, MII_CTRL1000,
2395 			     CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
2396 
2397 		err = TG3_PHY_AUXCTL_SMDSP_ENABLE(tp);
2398 		if (err)
2399 			return err;
2400 
2401 		/* Block the PHY control access.  */
2402 		tg3_phydsp_write(tp, 0x8005, 0x0800);
2403 
2404 		err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
2405 		if (!err)
2406 			break;
2407 	} while (--retries);
2408 
2409 	err = tg3_phy_reset_chanpat(tp);
2410 	if (err)
2411 		return err;
2412 
2413 	tg3_phydsp_write(tp, 0x8005, 0x0000);
2414 
2415 	tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
2416 	tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000);
2417 
2418 	TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2419 
2420 	tg3_writephy(tp, MII_CTRL1000, phy9_orig);
2421 
2422 	if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
2423 		reg32 &= ~0x3000;
2424 		tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2425 	} else if (!err)
2426 		err = -EBUSY;
2427 
2428 	return err;
2429 }
2430 
2431 /* This will reset the tigon3 PHY if there is no valid
2432  * link unless the FORCE argument is non-zero.
2433  */
2434 static int tg3_phy_reset(struct tg3 *tp)
2435 {
2436 	u32 val, cpmuctrl;
2437 	int err;
2438 
2439 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2440 		val = tr32(GRC_MISC_CFG);
2441 		tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
2442 		udelay(40);
2443 	}
2444 	err  = tg3_readphy(tp, MII_BMSR, &val);
2445 	err |= tg3_readphy(tp, MII_BMSR, &val);
2446 	if (err != 0)
2447 		return -EBUSY;
2448 
2449 	if (netif_running(tp->dev) && netif_carrier_ok(tp->dev)) {
2450 		netif_carrier_off(tp->dev);
2451 		tg3_link_report(tp);
2452 	}
2453 
2454 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
2455 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2456 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
2457 		err = tg3_phy_reset_5703_4_5(tp);
2458 		if (err)
2459 			return err;
2460 		goto out;
2461 	}
2462 
2463 	cpmuctrl = 0;
2464 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
2465 	    GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
2466 		cpmuctrl = tr32(TG3_CPMU_CTRL);
2467 		if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
2468 			tw32(TG3_CPMU_CTRL,
2469 			     cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
2470 	}
2471 
2472 	err = tg3_bmcr_reset(tp);
2473 	if (err)
2474 		return err;
2475 
2476 	if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
2477 		val = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
2478 		tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, val);
2479 
2480 		tw32(TG3_CPMU_CTRL, cpmuctrl);
2481 	}
2482 
2483 	if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2484 	    GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
2485 		val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2486 		if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
2487 		    CPMU_LSPD_1000MB_MACCLK_12_5) {
2488 			val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2489 			udelay(40);
2490 			tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2491 		}
2492 	}
2493 
2494 	if (tg3_flag(tp, 5717_PLUS) &&
2495 	    (tp->phy_flags & TG3_PHYFLG_MII_SERDES))
2496 		return 0;
2497 
2498 	tg3_phy_apply_otp(tp);
2499 
2500 	if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
2501 		tg3_phy_toggle_apd(tp, true);
2502 	else
2503 		tg3_phy_toggle_apd(tp, false);
2504 
2505 out:
2506 	if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) &&
2507 	    !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2508 		tg3_phydsp_write(tp, 0x201f, 0x2aaa);
2509 		tg3_phydsp_write(tp, 0x000a, 0x0323);
2510 		TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2511 	}
2512 
2513 	if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) {
2514 		tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2515 		tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2516 	}
2517 
2518 	if (tp->phy_flags & TG3_PHYFLG_BER_BUG) {
2519 		if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2520 			tg3_phydsp_write(tp, 0x000a, 0x310b);
2521 			tg3_phydsp_write(tp, 0x201f, 0x9506);
2522 			tg3_phydsp_write(tp, 0x401f, 0x14e2);
2523 			TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2524 		}
2525 	} else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) {
2526 		if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2527 			tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
2528 			if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) {
2529 				tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
2530 				tg3_writephy(tp, MII_TG3_TEST1,
2531 					     MII_TG3_TEST1_TRIM_EN | 0x4);
2532 			} else
2533 				tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
2534 
2535 			TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2536 		}
2537 	}
2538 
2539 	/* Set Extended packet length bit (bit 14) on all chips that */
2540 	/* support jumbo frames */
2541 	if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2542 		/* Cannot do read-modify-write on 5401 */
2543 		tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
2544 	} else if (tg3_flag(tp, JUMBO_CAPABLE)) {
2545 		/* Set bit 14 with read-modify-write to preserve other bits */
2546 		err = tg3_phy_auxctl_read(tp,
2547 					  MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2548 		if (!err)
2549 			tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2550 					   val | MII_TG3_AUXCTL_ACTL_EXTPKTLEN);
2551 	}
2552 
2553 	/* Set phy register 0x10 bit 0 to high fifo elasticity to support
2554 	 * jumbo frames transmission.
2555 	 */
2556 	if (tg3_flag(tp, JUMBO_CAPABLE)) {
2557 		if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &val))
2558 			tg3_writephy(tp, MII_TG3_EXT_CTRL,
2559 				     val | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
2560 	}
2561 
2562 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2563 		/* adjust output voltage */
2564 		tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
2565 	}
2566 
2567 	tg3_phy_toggle_automdix(tp, 1);
2568 	tg3_phy_set_wirespeed(tp);
2569 	return 0;
2570 }
2571 
2572 #define TG3_GPIO_MSG_DRVR_PRES		 0x00000001
2573 #define TG3_GPIO_MSG_NEED_VAUX		 0x00000002
2574 #define TG3_GPIO_MSG_MASK		 (TG3_GPIO_MSG_DRVR_PRES | \
2575 					  TG3_GPIO_MSG_NEED_VAUX)
2576 #define TG3_GPIO_MSG_ALL_DRVR_PRES_MASK \
2577 	((TG3_GPIO_MSG_DRVR_PRES << 0) | \
2578 	 (TG3_GPIO_MSG_DRVR_PRES << 4) | \
2579 	 (TG3_GPIO_MSG_DRVR_PRES << 8) | \
2580 	 (TG3_GPIO_MSG_DRVR_PRES << 12))
2581 
2582 #define TG3_GPIO_MSG_ALL_NEED_VAUX_MASK \
2583 	((TG3_GPIO_MSG_NEED_VAUX << 0) | \
2584 	 (TG3_GPIO_MSG_NEED_VAUX << 4) | \
2585 	 (TG3_GPIO_MSG_NEED_VAUX << 8) | \
2586 	 (TG3_GPIO_MSG_NEED_VAUX << 12))
2587 
2588 static inline u32 tg3_set_function_status(struct tg3 *tp, u32 newstat)
2589 {
2590 	u32 status, shift;
2591 
2592 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2593 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
2594 		status = tg3_ape_read32(tp, TG3_APE_GPIO_MSG);
2595 	else
2596 		status = tr32(TG3_CPMU_DRV_STATUS);
2597 
2598 	shift = TG3_APE_GPIO_MSG_SHIFT + 4 * tp->pci_fn;
2599 	status &= ~(TG3_GPIO_MSG_MASK << shift);
2600 	status |= (newstat << shift);
2601 
2602 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2603 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
2604 		tg3_ape_write32(tp, TG3_APE_GPIO_MSG, status);
2605 	else
2606 		tw32(TG3_CPMU_DRV_STATUS, status);
2607 
2608 	return status >> TG3_APE_GPIO_MSG_SHIFT;
2609 }
2610 
2611 static inline int tg3_pwrsrc_switch_to_vmain(struct tg3 *tp)
2612 {
2613 	if (!tg3_flag(tp, IS_NIC))
2614 		return 0;
2615 
2616 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2617 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2618 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
2619 		if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2620 			return -EIO;
2621 
2622 		tg3_set_function_status(tp, TG3_GPIO_MSG_DRVR_PRES);
2623 
2624 		tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2625 			    TG3_GRC_LCLCTL_PWRSW_DELAY);
2626 
2627 		tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2628 	} else {
2629 		tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2630 			    TG3_GRC_LCLCTL_PWRSW_DELAY);
2631 	}
2632 
2633 	return 0;
2634 }
2635 
2636 static void tg3_pwrsrc_die_with_vmain(struct tg3 *tp)
2637 {
2638 	u32 grc_local_ctrl;
2639 
2640 	if (!tg3_flag(tp, IS_NIC) ||
2641 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2642 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)
2643 		return;
2644 
2645 	grc_local_ctrl = tp->grc_local_ctrl | GRC_LCLCTRL_GPIO_OE1;
2646 
2647 	tw32_wait_f(GRC_LOCAL_CTRL,
2648 		    grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2649 		    TG3_GRC_LCLCTL_PWRSW_DELAY);
2650 
2651 	tw32_wait_f(GRC_LOCAL_CTRL,
2652 		    grc_local_ctrl,
2653 		    TG3_GRC_LCLCTL_PWRSW_DELAY);
2654 
2655 	tw32_wait_f(GRC_LOCAL_CTRL,
2656 		    grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2657 		    TG3_GRC_LCLCTL_PWRSW_DELAY);
2658 }
2659 
2660 static void tg3_pwrsrc_switch_to_vaux(struct tg3 *tp)
2661 {
2662 	if (!tg3_flag(tp, IS_NIC))
2663 		return;
2664 
2665 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2666 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2667 		tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2668 			    (GRC_LCLCTRL_GPIO_OE0 |
2669 			     GRC_LCLCTRL_GPIO_OE1 |
2670 			     GRC_LCLCTRL_GPIO_OE2 |
2671 			     GRC_LCLCTRL_GPIO_OUTPUT0 |
2672 			     GRC_LCLCTRL_GPIO_OUTPUT1),
2673 			    TG3_GRC_LCLCTL_PWRSW_DELAY);
2674 	} else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
2675 		   tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
2676 		/* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2677 		u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
2678 				     GRC_LCLCTRL_GPIO_OE1 |
2679 				     GRC_LCLCTRL_GPIO_OE2 |
2680 				     GRC_LCLCTRL_GPIO_OUTPUT0 |
2681 				     GRC_LCLCTRL_GPIO_OUTPUT1 |
2682 				     tp->grc_local_ctrl;
2683 		tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2684 			    TG3_GRC_LCLCTL_PWRSW_DELAY);
2685 
2686 		grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
2687 		tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2688 			    TG3_GRC_LCLCTL_PWRSW_DELAY);
2689 
2690 		grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
2691 		tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2692 			    TG3_GRC_LCLCTL_PWRSW_DELAY);
2693 	} else {
2694 		u32 no_gpio2;
2695 		u32 grc_local_ctrl = 0;
2696 
2697 		/* Workaround to prevent overdrawing Amps. */
2698 		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
2699 			grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
2700 			tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2701 				    grc_local_ctrl,
2702 				    TG3_GRC_LCLCTL_PWRSW_DELAY);
2703 		}
2704 
2705 		/* On 5753 and variants, GPIO2 cannot be used. */
2706 		no_gpio2 = tp->nic_sram_data_cfg &
2707 			   NIC_SRAM_DATA_CFG_NO_GPIO2;
2708 
2709 		grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
2710 				  GRC_LCLCTRL_GPIO_OE1 |
2711 				  GRC_LCLCTRL_GPIO_OE2 |
2712 				  GRC_LCLCTRL_GPIO_OUTPUT1 |
2713 				  GRC_LCLCTRL_GPIO_OUTPUT2;
2714 		if (no_gpio2) {
2715 			grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
2716 					    GRC_LCLCTRL_GPIO_OUTPUT2);
2717 		}
2718 		tw32_wait_f(GRC_LOCAL_CTRL,
2719 			    tp->grc_local_ctrl | grc_local_ctrl,
2720 			    TG3_GRC_LCLCTL_PWRSW_DELAY);
2721 
2722 		grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
2723 
2724 		tw32_wait_f(GRC_LOCAL_CTRL,
2725 			    tp->grc_local_ctrl | grc_local_ctrl,
2726 			    TG3_GRC_LCLCTL_PWRSW_DELAY);
2727 
2728 		if (!no_gpio2) {
2729 			grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
2730 			tw32_wait_f(GRC_LOCAL_CTRL,
2731 				    tp->grc_local_ctrl | grc_local_ctrl,
2732 				    TG3_GRC_LCLCTL_PWRSW_DELAY);
2733 		}
2734 	}
2735 }
2736 
2737 static void tg3_frob_aux_power_5717(struct tg3 *tp, bool wol_enable)
2738 {
2739 	u32 msg = 0;
2740 
2741 	/* Serialize power state transitions */
2742 	if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2743 		return;
2744 
2745 	if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE) || wol_enable)
2746 		msg = TG3_GPIO_MSG_NEED_VAUX;
2747 
2748 	msg = tg3_set_function_status(tp, msg);
2749 
2750 	if (msg & TG3_GPIO_MSG_ALL_DRVR_PRES_MASK)
2751 		goto done;
2752 
2753 	if (msg & TG3_GPIO_MSG_ALL_NEED_VAUX_MASK)
2754 		tg3_pwrsrc_switch_to_vaux(tp);
2755 	else
2756 		tg3_pwrsrc_die_with_vmain(tp);
2757 
2758 done:
2759 	tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2760 }
2761 
2762 static void tg3_frob_aux_power(struct tg3 *tp, bool include_wol)
2763 {
2764 	bool need_vaux = false;
2765 
2766 	/* The GPIOs do something completely different on 57765. */
2767 	if (!tg3_flag(tp, IS_NIC) || tg3_flag(tp, 57765_CLASS))
2768 		return;
2769 
2770 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2771 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2772 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
2773 		tg3_frob_aux_power_5717(tp, include_wol ?
2774 					tg3_flag(tp, WOL_ENABLE) != 0 : 0);
2775 		return;
2776 	}
2777 
2778 	if (tp->pdev_peer && tp->pdev_peer != tp->pdev) {
2779 		struct net_device *dev_peer;
2780 
2781 		dev_peer = pci_get_drvdata(tp->pdev_peer);
2782 
2783 		/* remove_one() may have been run on the peer. */
2784 		if (dev_peer) {
2785 			struct tg3 *tp_peer = netdev_priv(dev_peer);
2786 
2787 			if (tg3_flag(tp_peer, INIT_COMPLETE))
2788 				return;
2789 
2790 			if ((include_wol && tg3_flag(tp_peer, WOL_ENABLE)) ||
2791 			    tg3_flag(tp_peer, ENABLE_ASF))
2792 				need_vaux = true;
2793 		}
2794 	}
2795 
2796 	if ((include_wol && tg3_flag(tp, WOL_ENABLE)) ||
2797 	    tg3_flag(tp, ENABLE_ASF))
2798 		need_vaux = true;
2799 
2800 	if (need_vaux)
2801 		tg3_pwrsrc_switch_to_vaux(tp);
2802 	else
2803 		tg3_pwrsrc_die_with_vmain(tp);
2804 }
2805 
2806 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
2807 {
2808 	if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
2809 		return 1;
2810 	else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) {
2811 		if (speed != SPEED_10)
2812 			return 1;
2813 	} else if (speed == SPEED_10)
2814 		return 1;
2815 
2816 	return 0;
2817 }
2818 
2819 static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
2820 {
2821 	u32 val;
2822 
2823 	if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
2824 		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
2825 			u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
2826 			u32 serdes_cfg = tr32(MAC_SERDES_CFG);
2827 
2828 			sg_dig_ctrl |=
2829 				SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
2830 			tw32(SG_DIG_CTRL, sg_dig_ctrl);
2831 			tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
2832 		}
2833 		return;
2834 	}
2835 
2836 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2837 		tg3_bmcr_reset(tp);
2838 		val = tr32(GRC_MISC_CFG);
2839 		tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
2840 		udelay(40);
2841 		return;
2842 	} else if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2843 		u32 phytest;
2844 		if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2845 			u32 phy;
2846 
2847 			tg3_writephy(tp, MII_ADVERTISE, 0);
2848 			tg3_writephy(tp, MII_BMCR,
2849 				     BMCR_ANENABLE | BMCR_ANRESTART);
2850 
2851 			tg3_writephy(tp, MII_TG3_FET_TEST,
2852 				     phytest | MII_TG3_FET_SHADOW_EN);
2853 			if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) {
2854 				phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD;
2855 				tg3_writephy(tp,
2856 					     MII_TG3_FET_SHDW_AUXMODE4,
2857 					     phy);
2858 			}
2859 			tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2860 		}
2861 		return;
2862 	} else if (do_low_power) {
2863 		tg3_writephy(tp, MII_TG3_EXT_CTRL,
2864 			     MII_TG3_EXT_CTRL_FORCE_LED_OFF);
2865 
2866 		val = MII_TG3_AUXCTL_PCTL_100TX_LPWR |
2867 		      MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
2868 		      MII_TG3_AUXCTL_PCTL_VREG_11V;
2869 		tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, val);
2870 	}
2871 
2872 	/* The PHY should not be powered down on some chips because
2873 	 * of bugs.
2874 	 */
2875 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2876 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2877 	    (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 &&
2878 	     (tp->phy_flags & TG3_PHYFLG_MII_SERDES)) ||
2879 	    (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 &&
2880 	     !tp->pci_fn))
2881 		return;
2882 
2883 	if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2884 	    GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
2885 		val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2886 		val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2887 		val |= CPMU_LSPD_1000MB_MACCLK_12_5;
2888 		tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2889 	}
2890 
2891 	tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
2892 }
2893 
2894 /* tp->lock is held. */
2895 static int tg3_nvram_lock(struct tg3 *tp)
2896 {
2897 	if (tg3_flag(tp, NVRAM)) {
2898 		int i;
2899 
2900 		if (tp->nvram_lock_cnt == 0) {
2901 			tw32(NVRAM_SWARB, SWARB_REQ_SET1);
2902 			for (i = 0; i < 8000; i++) {
2903 				if (tr32(NVRAM_SWARB) & SWARB_GNT1)
2904 					break;
2905 				udelay(20);
2906 			}
2907 			if (i == 8000) {
2908 				tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
2909 				return -ENODEV;
2910 			}
2911 		}
2912 		tp->nvram_lock_cnt++;
2913 	}
2914 	return 0;
2915 }
2916 
2917 /* tp->lock is held. */
2918 static void tg3_nvram_unlock(struct tg3 *tp)
2919 {
2920 	if (tg3_flag(tp, NVRAM)) {
2921 		if (tp->nvram_lock_cnt > 0)
2922 			tp->nvram_lock_cnt--;
2923 		if (tp->nvram_lock_cnt == 0)
2924 			tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
2925 	}
2926 }
2927 
2928 /* tp->lock is held. */
2929 static void tg3_enable_nvram_access(struct tg3 *tp)
2930 {
2931 	if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
2932 		u32 nvaccess = tr32(NVRAM_ACCESS);
2933 
2934 		tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
2935 	}
2936 }
2937 
2938 /* tp->lock is held. */
2939 static void tg3_disable_nvram_access(struct tg3 *tp)
2940 {
2941 	if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
2942 		u32 nvaccess = tr32(NVRAM_ACCESS);
2943 
2944 		tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
2945 	}
2946 }
2947 
2948 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
2949 					u32 offset, u32 *val)
2950 {
2951 	u32 tmp;
2952 	int i;
2953 
2954 	if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
2955 		return -EINVAL;
2956 
2957 	tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
2958 					EEPROM_ADDR_DEVID_MASK |
2959 					EEPROM_ADDR_READ);
2960 	tw32(GRC_EEPROM_ADDR,
2961 	     tmp |
2962 	     (0 << EEPROM_ADDR_DEVID_SHIFT) |
2963 	     ((offset << EEPROM_ADDR_ADDR_SHIFT) &
2964 	      EEPROM_ADDR_ADDR_MASK) |
2965 	     EEPROM_ADDR_READ | EEPROM_ADDR_START);
2966 
2967 	for (i = 0; i < 1000; i++) {
2968 		tmp = tr32(GRC_EEPROM_ADDR);
2969 
2970 		if (tmp & EEPROM_ADDR_COMPLETE)
2971 			break;
2972 		msleep(1);
2973 	}
2974 	if (!(tmp & EEPROM_ADDR_COMPLETE))
2975 		return -EBUSY;
2976 
2977 	tmp = tr32(GRC_EEPROM_DATA);
2978 
2979 	/*
2980 	 * The data will always be opposite the native endian
2981 	 * format.  Perform a blind byteswap to compensate.
2982 	 */
2983 	*val = swab32(tmp);
2984 
2985 	return 0;
2986 }
2987 
2988 #define NVRAM_CMD_TIMEOUT 10000
2989 
2990 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
2991 {
2992 	int i;
2993 
2994 	tw32(NVRAM_CMD, nvram_cmd);
2995 	for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
2996 		udelay(10);
2997 		if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
2998 			udelay(10);
2999 			break;
3000 		}
3001 	}
3002 
3003 	if (i == NVRAM_CMD_TIMEOUT)
3004 		return -EBUSY;
3005 
3006 	return 0;
3007 }
3008 
3009 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
3010 {
3011 	if (tg3_flag(tp, NVRAM) &&
3012 	    tg3_flag(tp, NVRAM_BUFFERED) &&
3013 	    tg3_flag(tp, FLASH) &&
3014 	    !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
3015 	    (tp->nvram_jedecnum == JEDEC_ATMEL))
3016 
3017 		addr = ((addr / tp->nvram_pagesize) <<
3018 			ATMEL_AT45DB0X1B_PAGE_POS) +
3019 		       (addr % tp->nvram_pagesize);
3020 
3021 	return addr;
3022 }
3023 
3024 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
3025 {
3026 	if (tg3_flag(tp, NVRAM) &&
3027 	    tg3_flag(tp, NVRAM_BUFFERED) &&
3028 	    tg3_flag(tp, FLASH) &&
3029 	    !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
3030 	    (tp->nvram_jedecnum == JEDEC_ATMEL))
3031 
3032 		addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
3033 			tp->nvram_pagesize) +
3034 		       (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
3035 
3036 	return addr;
3037 }
3038 
3039 /* NOTE: Data read in from NVRAM is byteswapped according to
3040  * the byteswapping settings for all other register accesses.
3041  * tg3 devices are BE devices, so on a BE machine, the data
3042  * returned will be exactly as it is seen in NVRAM.  On a LE
3043  * machine, the 32-bit value will be byteswapped.
3044  */
3045 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
3046 {
3047 	int ret;
3048 
3049 	if (!tg3_flag(tp, NVRAM))
3050 		return tg3_nvram_read_using_eeprom(tp, offset, val);
3051 
3052 	offset = tg3_nvram_phys_addr(tp, offset);
3053 
3054 	if (offset > NVRAM_ADDR_MSK)
3055 		return -EINVAL;
3056 
3057 	ret = tg3_nvram_lock(tp);
3058 	if (ret)
3059 		return ret;
3060 
3061 	tg3_enable_nvram_access(tp);
3062 
3063 	tw32(NVRAM_ADDR, offset);
3064 	ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
3065 		NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
3066 
3067 	if (ret == 0)
3068 		*val = tr32(NVRAM_RDDATA);
3069 
3070 	tg3_disable_nvram_access(tp);
3071 
3072 	tg3_nvram_unlock(tp);
3073 
3074 	return ret;
3075 }
3076 
3077 /* Ensures NVRAM data is in bytestream format. */
3078 static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val)
3079 {
3080 	u32 v;
3081 	int res = tg3_nvram_read(tp, offset, &v);
3082 	if (!res)
3083 		*val = cpu_to_be32(v);
3084 	return res;
3085 }
3086 
3087 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
3088 				    u32 offset, u32 len, u8 *buf)
3089 {
3090 	int i, j, rc = 0;
3091 	u32 val;
3092 
3093 	for (i = 0; i < len; i += 4) {
3094 		u32 addr;
3095 		__be32 data;
3096 
3097 		addr = offset + i;
3098 
3099 		memcpy(&data, buf + i, 4);
3100 
3101 		/*
3102 		 * The SEEPROM interface expects the data to always be opposite
3103 		 * the native endian format.  We accomplish this by reversing
3104 		 * all the operations that would have been performed on the
3105 		 * data from a call to tg3_nvram_read_be32().
3106 		 */
3107 		tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data)));
3108 
3109 		val = tr32(GRC_EEPROM_ADDR);
3110 		tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
3111 
3112 		val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
3113 			EEPROM_ADDR_READ);
3114 		tw32(GRC_EEPROM_ADDR, val |
3115 			(0 << EEPROM_ADDR_DEVID_SHIFT) |
3116 			(addr & EEPROM_ADDR_ADDR_MASK) |
3117 			EEPROM_ADDR_START |
3118 			EEPROM_ADDR_WRITE);
3119 
3120 		for (j = 0; j < 1000; j++) {
3121 			val = tr32(GRC_EEPROM_ADDR);
3122 
3123 			if (val & EEPROM_ADDR_COMPLETE)
3124 				break;
3125 			msleep(1);
3126 		}
3127 		if (!(val & EEPROM_ADDR_COMPLETE)) {
3128 			rc = -EBUSY;
3129 			break;
3130 		}
3131 	}
3132 
3133 	return rc;
3134 }
3135 
3136 /* offset and length are dword aligned */
3137 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
3138 		u8 *buf)
3139 {
3140 	int ret = 0;
3141 	u32 pagesize = tp->nvram_pagesize;
3142 	u32 pagemask = pagesize - 1;
3143 	u32 nvram_cmd;
3144 	u8 *tmp;
3145 
3146 	tmp = kmalloc(pagesize, GFP_KERNEL);
3147 	if (tmp == NULL)
3148 		return -ENOMEM;
3149 
3150 	while (len) {
3151 		int j;
3152 		u32 phy_addr, page_off, size;
3153 
3154 		phy_addr = offset & ~pagemask;
3155 
3156 		for (j = 0; j < pagesize; j += 4) {
3157 			ret = tg3_nvram_read_be32(tp, phy_addr + j,
3158 						  (__be32 *) (tmp + j));
3159 			if (ret)
3160 				break;
3161 		}
3162 		if (ret)
3163 			break;
3164 
3165 		page_off = offset & pagemask;
3166 		size = pagesize;
3167 		if (len < size)
3168 			size = len;
3169 
3170 		len -= size;
3171 
3172 		memcpy(tmp + page_off, buf, size);
3173 
3174 		offset = offset + (pagesize - page_off);
3175 
3176 		tg3_enable_nvram_access(tp);
3177 
3178 		/*
3179 		 * Before we can erase the flash page, we need
3180 		 * to issue a special "write enable" command.
3181 		 */
3182 		nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3183 
3184 		if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3185 			break;
3186 
3187 		/* Erase the target page */
3188 		tw32(NVRAM_ADDR, phy_addr);
3189 
3190 		nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
3191 			NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
3192 
3193 		if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3194 			break;
3195 
3196 		/* Issue another write enable to start the write. */
3197 		nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3198 
3199 		if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3200 			break;
3201 
3202 		for (j = 0; j < pagesize; j += 4) {
3203 			__be32 data;
3204 
3205 			data = *((__be32 *) (tmp + j));
3206 
3207 			tw32(NVRAM_WRDATA, be32_to_cpu(data));
3208 
3209 			tw32(NVRAM_ADDR, phy_addr + j);
3210 
3211 			nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
3212 				NVRAM_CMD_WR;
3213 
3214 			if (j == 0)
3215 				nvram_cmd |= NVRAM_CMD_FIRST;
3216 			else if (j == (pagesize - 4))
3217 				nvram_cmd |= NVRAM_CMD_LAST;
3218 
3219 			ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3220 			if (ret)
3221 				break;
3222 		}
3223 		if (ret)
3224 			break;
3225 	}
3226 
3227 	nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3228 	tg3_nvram_exec_cmd(tp, nvram_cmd);
3229 
3230 	kfree(tmp);
3231 
3232 	return ret;
3233 }
3234 
3235 /* offset and length are dword aligned */
3236 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
3237 		u8 *buf)
3238 {
3239 	int i, ret = 0;
3240 
3241 	for (i = 0; i < len; i += 4, offset += 4) {
3242 		u32 page_off, phy_addr, nvram_cmd;
3243 		__be32 data;
3244 
3245 		memcpy(&data, buf + i, 4);
3246 		tw32(NVRAM_WRDATA, be32_to_cpu(data));
3247 
3248 		page_off = offset % tp->nvram_pagesize;
3249 
3250 		phy_addr = tg3_nvram_phys_addr(tp, offset);
3251 
3252 		nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
3253 
3254 		if (page_off == 0 || i == 0)
3255 			nvram_cmd |= NVRAM_CMD_FIRST;
3256 		if (page_off == (tp->nvram_pagesize - 4))
3257 			nvram_cmd |= NVRAM_CMD_LAST;
3258 
3259 		if (i == (len - 4))
3260 			nvram_cmd |= NVRAM_CMD_LAST;
3261 
3262 		if ((nvram_cmd & NVRAM_CMD_FIRST) ||
3263 		    !tg3_flag(tp, FLASH) ||
3264 		    !tg3_flag(tp, 57765_PLUS))
3265 			tw32(NVRAM_ADDR, phy_addr);
3266 
3267 		if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752 &&
3268 		    !tg3_flag(tp, 5755_PLUS) &&
3269 		    (tp->nvram_jedecnum == JEDEC_ST) &&
3270 		    (nvram_cmd & NVRAM_CMD_FIRST)) {
3271 			u32 cmd;
3272 
3273 			cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3274 			ret = tg3_nvram_exec_cmd(tp, cmd);
3275 			if (ret)
3276 				break;
3277 		}
3278 		if (!tg3_flag(tp, FLASH)) {
3279 			/* We always do complete word writes to eeprom. */
3280 			nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
3281 		}
3282 
3283 		ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3284 		if (ret)
3285 			break;
3286 	}
3287 	return ret;
3288 }
3289 
3290 /* offset and length are dword aligned */
3291 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
3292 {
3293 	int ret;
3294 
3295 	if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3296 		tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
3297 		       ~GRC_LCLCTRL_GPIO_OUTPUT1);
3298 		udelay(40);
3299 	}
3300 
3301 	if (!tg3_flag(tp, NVRAM)) {
3302 		ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
3303 	} else {
3304 		u32 grc_mode;
3305 
3306 		ret = tg3_nvram_lock(tp);
3307 		if (ret)
3308 			return ret;
3309 
3310 		tg3_enable_nvram_access(tp);
3311 		if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM))
3312 			tw32(NVRAM_WRITE1, 0x406);
3313 
3314 		grc_mode = tr32(GRC_MODE);
3315 		tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
3316 
3317 		if (tg3_flag(tp, NVRAM_BUFFERED) || !tg3_flag(tp, FLASH)) {
3318 			ret = tg3_nvram_write_block_buffered(tp, offset, len,
3319 				buf);
3320 		} else {
3321 			ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
3322 				buf);
3323 		}
3324 
3325 		grc_mode = tr32(GRC_MODE);
3326 		tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
3327 
3328 		tg3_disable_nvram_access(tp);
3329 		tg3_nvram_unlock(tp);
3330 	}
3331 
3332 	if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3333 		tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
3334 		udelay(40);
3335 	}
3336 
3337 	return ret;
3338 }
3339 
3340 #define RX_CPU_SCRATCH_BASE	0x30000
3341 #define RX_CPU_SCRATCH_SIZE	0x04000
3342 #define TX_CPU_SCRATCH_BASE	0x34000
3343 #define TX_CPU_SCRATCH_SIZE	0x04000
3344 
3345 /* tp->lock is held. */
3346 static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
3347 {
3348 	int i;
3349 
3350 	BUG_ON(offset == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS));
3351 
3352 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
3353 		u32 val = tr32(GRC_VCPU_EXT_CTRL);
3354 
3355 		tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
3356 		return 0;
3357 	}
3358 	if (offset == RX_CPU_BASE) {
3359 		for (i = 0; i < 10000; i++) {
3360 			tw32(offset + CPU_STATE, 0xffffffff);
3361 			tw32(offset + CPU_MODE,  CPU_MODE_HALT);
3362 			if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
3363 				break;
3364 		}
3365 
3366 		tw32(offset + CPU_STATE, 0xffffffff);
3367 		tw32_f(offset + CPU_MODE,  CPU_MODE_HALT);
3368 		udelay(10);
3369 	} else {
3370 		for (i = 0; i < 10000; i++) {
3371 			tw32(offset + CPU_STATE, 0xffffffff);
3372 			tw32(offset + CPU_MODE,  CPU_MODE_HALT);
3373 			if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
3374 				break;
3375 		}
3376 	}
3377 
3378 	if (i >= 10000) {
3379 		netdev_err(tp->dev, "%s timed out, %s CPU\n",
3380 			   __func__, offset == RX_CPU_BASE ? "RX" : "TX");
3381 		return -ENODEV;
3382 	}
3383 
3384 	/* Clear firmware's nvram arbitration. */
3385 	if (tg3_flag(tp, NVRAM))
3386 		tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
3387 	return 0;
3388 }
3389 
3390 struct fw_info {
3391 	unsigned int fw_base;
3392 	unsigned int fw_len;
3393 	const __be32 *fw_data;
3394 };
3395 
3396 /* tp->lock is held. */
3397 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base,
3398 				 u32 cpu_scratch_base, int cpu_scratch_size,
3399 				 struct fw_info *info)
3400 {
3401 	int err, lock_err, i;
3402 	void (*write_op)(struct tg3 *, u32, u32);
3403 
3404 	if (cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)) {
3405 		netdev_err(tp->dev,
3406 			   "%s: Trying to load TX cpu firmware which is 5705\n",
3407 			   __func__);
3408 		return -EINVAL;
3409 	}
3410 
3411 	if (tg3_flag(tp, 5705_PLUS))
3412 		write_op = tg3_write_mem;
3413 	else
3414 		write_op = tg3_write_indirect_reg32;
3415 
3416 	/* It is possible that bootcode is still loading at this point.
3417 	 * Get the nvram lock first before halting the cpu.
3418 	 */
3419 	lock_err = tg3_nvram_lock(tp);
3420 	err = tg3_halt_cpu(tp, cpu_base);
3421 	if (!lock_err)
3422 		tg3_nvram_unlock(tp);
3423 	if (err)
3424 		goto out;
3425 
3426 	for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
3427 		write_op(tp, cpu_scratch_base + i, 0);
3428 	tw32(cpu_base + CPU_STATE, 0xffffffff);
3429 	tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
3430 	for (i = 0; i < (info->fw_len / sizeof(u32)); i++)
3431 		write_op(tp, (cpu_scratch_base +
3432 			      (info->fw_base & 0xffff) +
3433 			      (i * sizeof(u32))),
3434 			      be32_to_cpu(info->fw_data[i]));
3435 
3436 	err = 0;
3437 
3438 out:
3439 	return err;
3440 }
3441 
3442 /* tp->lock is held. */
3443 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
3444 {
3445 	struct fw_info info;
3446 	const __be32 *fw_data;
3447 	int err, i;
3448 
3449 	fw_data = (void *)tp->fw->data;
3450 
3451 	/* Firmware blob starts with version numbers, followed by
3452 	   start address and length. We are setting complete length.
3453 	   length = end_address_of_bss - start_address_of_text.
3454 	   Remainder is the blob to be loaded contiguously
3455 	   from start address. */
3456 
3457 	info.fw_base = be32_to_cpu(fw_data[1]);
3458 	info.fw_len = tp->fw->size - 12;
3459 	info.fw_data = &fw_data[3];
3460 
3461 	err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
3462 				    RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
3463 				    &info);
3464 	if (err)
3465 		return err;
3466 
3467 	err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
3468 				    TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
3469 				    &info);
3470 	if (err)
3471 		return err;
3472 
3473 	/* Now startup only the RX cpu. */
3474 	tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3475 	tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
3476 
3477 	for (i = 0; i < 5; i++) {
3478 		if (tr32(RX_CPU_BASE + CPU_PC) == info.fw_base)
3479 			break;
3480 		tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3481 		tw32(RX_CPU_BASE + CPU_MODE,  CPU_MODE_HALT);
3482 		tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
3483 		udelay(1000);
3484 	}
3485 	if (i >= 5) {
3486 		netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x "
3487 			   "should be %08x\n", __func__,
3488 			   tr32(RX_CPU_BASE + CPU_PC), info.fw_base);
3489 		return -ENODEV;
3490 	}
3491 	tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3492 	tw32_f(RX_CPU_BASE + CPU_MODE,  0x00000000);
3493 
3494 	return 0;
3495 }
3496 
3497 /* tp->lock is held. */
3498 static int tg3_load_tso_firmware(struct tg3 *tp)
3499 {
3500 	struct fw_info info;
3501 	const __be32 *fw_data;
3502 	unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
3503 	int err, i;
3504 
3505 	if (tg3_flag(tp, HW_TSO_1) ||
3506 	    tg3_flag(tp, HW_TSO_2) ||
3507 	    tg3_flag(tp, HW_TSO_3))
3508 		return 0;
3509 
3510 	fw_data = (void *)tp->fw->data;
3511 
3512 	/* Firmware blob starts with version numbers, followed by
3513 	   start address and length. We are setting complete length.
3514 	   length = end_address_of_bss - start_address_of_text.
3515 	   Remainder is the blob to be loaded contiguously
3516 	   from start address. */
3517 
3518 	info.fw_base = be32_to_cpu(fw_data[1]);
3519 	cpu_scratch_size = tp->fw_len;
3520 	info.fw_len = tp->fw->size - 12;
3521 	info.fw_data = &fw_data[3];
3522 
3523 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
3524 		cpu_base = RX_CPU_BASE;
3525 		cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
3526 	} else {
3527 		cpu_base = TX_CPU_BASE;
3528 		cpu_scratch_base = TX_CPU_SCRATCH_BASE;
3529 		cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
3530 	}
3531 
3532 	err = tg3_load_firmware_cpu(tp, cpu_base,
3533 				    cpu_scratch_base, cpu_scratch_size,
3534 				    &info);
3535 	if (err)
3536 		return err;
3537 
3538 	/* Now startup the cpu. */
3539 	tw32(cpu_base + CPU_STATE, 0xffffffff);
3540 	tw32_f(cpu_base + CPU_PC, info.fw_base);
3541 
3542 	for (i = 0; i < 5; i++) {
3543 		if (tr32(cpu_base + CPU_PC) == info.fw_base)
3544 			break;
3545 		tw32(cpu_base + CPU_STATE, 0xffffffff);
3546 		tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
3547 		tw32_f(cpu_base + CPU_PC, info.fw_base);
3548 		udelay(1000);
3549 	}
3550 	if (i >= 5) {
3551 		netdev_err(tp->dev,
3552 			   "%s fails to set CPU PC, is %08x should be %08x\n",
3553 			   __func__, tr32(cpu_base + CPU_PC), info.fw_base);
3554 		return -ENODEV;
3555 	}
3556 	tw32(cpu_base + CPU_STATE, 0xffffffff);
3557 	tw32_f(cpu_base + CPU_MODE,  0x00000000);
3558 	return 0;
3559 }
3560 
3561 
3562 /* tp->lock is held. */
3563 static void __tg3_set_mac_addr(struct tg3 *tp, int skip_mac_1)
3564 {
3565 	u32 addr_high, addr_low;
3566 	int i;
3567 
3568 	addr_high = ((tp->dev->dev_addr[0] << 8) |
3569 		     tp->dev->dev_addr[1]);
3570 	addr_low = ((tp->dev->dev_addr[2] << 24) |
3571 		    (tp->dev->dev_addr[3] << 16) |
3572 		    (tp->dev->dev_addr[4] <<  8) |
3573 		    (tp->dev->dev_addr[5] <<  0));
3574 	for (i = 0; i < 4; i++) {
3575 		if (i == 1 && skip_mac_1)
3576 			continue;
3577 		tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
3578 		tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
3579 	}
3580 
3581 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
3582 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
3583 		for (i = 0; i < 12; i++) {
3584 			tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
3585 			tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
3586 		}
3587 	}
3588 
3589 	addr_high = (tp->dev->dev_addr[0] +
3590 		     tp->dev->dev_addr[1] +
3591 		     tp->dev->dev_addr[2] +
3592 		     tp->dev->dev_addr[3] +
3593 		     tp->dev->dev_addr[4] +
3594 		     tp->dev->dev_addr[5]) &
3595 		TX_BACKOFF_SEED_MASK;
3596 	tw32(MAC_TX_BACKOFF_SEED, addr_high);
3597 }
3598 
3599 static void tg3_enable_register_access(struct tg3 *tp)
3600 {
3601 	/*
3602 	 * Make sure register accesses (indirect or otherwise) will function
3603 	 * correctly.
3604 	 */
3605 	pci_write_config_dword(tp->pdev,
3606 			       TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl);
3607 }
3608 
3609 static int tg3_power_up(struct tg3 *tp)
3610 {
3611 	int err;
3612 
3613 	tg3_enable_register_access(tp);
3614 
3615 	err = pci_set_power_state(tp->pdev, PCI_D0);
3616 	if (!err) {
3617 		/* Switch out of Vaux if it is a NIC */
3618 		tg3_pwrsrc_switch_to_vmain(tp);
3619 	} else {
3620 		netdev_err(tp->dev, "Transition to D0 failed\n");
3621 	}
3622 
3623 	return err;
3624 }
3625 
3626 static int tg3_setup_phy(struct tg3 *, int);
3627 
3628 static int tg3_power_down_prepare(struct tg3 *tp)
3629 {
3630 	u32 misc_host_ctrl;
3631 	bool device_should_wake, do_low_power;
3632 
3633 	tg3_enable_register_access(tp);
3634 
3635 	/* Restore the CLKREQ setting. */
3636 	if (tg3_flag(tp, CLKREQ_BUG)) {
3637 		u16 lnkctl;
3638 
3639 		pci_read_config_word(tp->pdev,
3640 				     pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
3641 				     &lnkctl);
3642 		lnkctl |= PCI_EXP_LNKCTL_CLKREQ_EN;
3643 		pci_write_config_word(tp->pdev,
3644 				      pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
3645 				      lnkctl);
3646 	}
3647 
3648 	misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
3649 	tw32(TG3PCI_MISC_HOST_CTRL,
3650 	     misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
3651 
3652 	device_should_wake = device_may_wakeup(&tp->pdev->dev) &&
3653 			     tg3_flag(tp, WOL_ENABLE);
3654 
3655 	if (tg3_flag(tp, USE_PHYLIB)) {
3656 		do_low_power = false;
3657 		if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) &&
3658 		    !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
3659 			struct phy_device *phydev;
3660 			u32 phyid, advertising;
3661 
3662 			phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
3663 
3664 			tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
3665 
3666 			tp->link_config.speed = phydev->speed;
3667 			tp->link_config.duplex = phydev->duplex;
3668 			tp->link_config.autoneg = phydev->autoneg;
3669 			tp->link_config.advertising = phydev->advertising;
3670 
3671 			advertising = ADVERTISED_TP |
3672 				      ADVERTISED_Pause |
3673 				      ADVERTISED_Autoneg |
3674 				      ADVERTISED_10baseT_Half;
3675 
3676 			if (tg3_flag(tp, ENABLE_ASF) || device_should_wake) {
3677 				if (tg3_flag(tp, WOL_SPEED_100MB))
3678 					advertising |=
3679 						ADVERTISED_100baseT_Half |
3680 						ADVERTISED_100baseT_Full |
3681 						ADVERTISED_10baseT_Full;
3682 				else
3683 					advertising |= ADVERTISED_10baseT_Full;
3684 			}
3685 
3686 			phydev->advertising = advertising;
3687 
3688 			phy_start_aneg(phydev);
3689 
3690 			phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
3691 			if (phyid != PHY_ID_BCMAC131) {
3692 				phyid &= PHY_BCM_OUI_MASK;
3693 				if (phyid == PHY_BCM_OUI_1 ||
3694 				    phyid == PHY_BCM_OUI_2 ||
3695 				    phyid == PHY_BCM_OUI_3)
3696 					do_low_power = true;
3697 			}
3698 		}
3699 	} else {
3700 		do_low_power = true;
3701 
3702 		if (!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER))
3703 			tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
3704 
3705 		if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
3706 			tg3_setup_phy(tp, 0);
3707 	}
3708 
3709 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
3710 		u32 val;
3711 
3712 		val = tr32(GRC_VCPU_EXT_CTRL);
3713 		tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
3714 	} else if (!tg3_flag(tp, ENABLE_ASF)) {
3715 		int i;
3716 		u32 val;
3717 
3718 		for (i = 0; i < 200; i++) {
3719 			tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
3720 			if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
3721 				break;
3722 			msleep(1);
3723 		}
3724 	}
3725 	if (tg3_flag(tp, WOL_CAP))
3726 		tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
3727 						     WOL_DRV_STATE_SHUTDOWN |
3728 						     WOL_DRV_WOL |
3729 						     WOL_SET_MAGIC_PKT);
3730 
3731 	if (device_should_wake) {
3732 		u32 mac_mode;
3733 
3734 		if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
3735 			if (do_low_power &&
3736 			    !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
3737 				tg3_phy_auxctl_write(tp,
3738 					       MII_TG3_AUXCTL_SHDWSEL_PWRCTL,
3739 					       MII_TG3_AUXCTL_PCTL_WOL_EN |
3740 					       MII_TG3_AUXCTL_PCTL_100TX_LPWR |
3741 					       MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC);
3742 				udelay(40);
3743 			}
3744 
3745 			if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
3746 				mac_mode = MAC_MODE_PORT_MODE_GMII;
3747 			else
3748 				mac_mode = MAC_MODE_PORT_MODE_MII;
3749 
3750 			mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
3751 			if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
3752 			    ASIC_REV_5700) {
3753 				u32 speed = tg3_flag(tp, WOL_SPEED_100MB) ?
3754 					     SPEED_100 : SPEED_10;
3755 				if (tg3_5700_link_polarity(tp, speed))
3756 					mac_mode |= MAC_MODE_LINK_POLARITY;
3757 				else
3758 					mac_mode &= ~MAC_MODE_LINK_POLARITY;
3759 			}
3760 		} else {
3761 			mac_mode = MAC_MODE_PORT_MODE_TBI;
3762 		}
3763 
3764 		if (!tg3_flag(tp, 5750_PLUS))
3765 			tw32(MAC_LED_CTRL, tp->led_ctrl);
3766 
3767 		mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
3768 		if ((tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) &&
3769 		    (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)))
3770 			mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
3771 
3772 		if (tg3_flag(tp, ENABLE_APE))
3773 			mac_mode |= MAC_MODE_APE_TX_EN |
3774 				    MAC_MODE_APE_RX_EN |
3775 				    MAC_MODE_TDE_ENABLE;
3776 
3777 		tw32_f(MAC_MODE, mac_mode);
3778 		udelay(100);
3779 
3780 		tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
3781 		udelay(10);
3782 	}
3783 
3784 	if (!tg3_flag(tp, WOL_SPEED_100MB) &&
3785 	    (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3786 	     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
3787 		u32 base_val;
3788 
3789 		base_val = tp->pci_clock_ctrl;
3790 		base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
3791 			     CLOCK_CTRL_TXCLK_DISABLE);
3792 
3793 		tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
3794 			    CLOCK_CTRL_PWRDOWN_PLL133, 40);
3795 	} else if (tg3_flag(tp, 5780_CLASS) ||
3796 		   tg3_flag(tp, CPMU_PRESENT) ||
3797 		   GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
3798 		/* do nothing */
3799 	} else if (!(tg3_flag(tp, 5750_PLUS) && tg3_flag(tp, ENABLE_ASF))) {
3800 		u32 newbits1, newbits2;
3801 
3802 		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3803 		    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
3804 			newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
3805 				    CLOCK_CTRL_TXCLK_DISABLE |
3806 				    CLOCK_CTRL_ALTCLK);
3807 			newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
3808 		} else if (tg3_flag(tp, 5705_PLUS)) {
3809 			newbits1 = CLOCK_CTRL_625_CORE;
3810 			newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
3811 		} else {
3812 			newbits1 = CLOCK_CTRL_ALTCLK;
3813 			newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
3814 		}
3815 
3816 		tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
3817 			    40);
3818 
3819 		tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
3820 			    40);
3821 
3822 		if (!tg3_flag(tp, 5705_PLUS)) {
3823 			u32 newbits3;
3824 
3825 			if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3826 			    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
3827 				newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
3828 					    CLOCK_CTRL_TXCLK_DISABLE |
3829 					    CLOCK_CTRL_44MHZ_CORE);
3830 			} else {
3831 				newbits3 = CLOCK_CTRL_44MHZ_CORE;
3832 			}
3833 
3834 			tw32_wait_f(TG3PCI_CLOCK_CTRL,
3835 				    tp->pci_clock_ctrl | newbits3, 40);
3836 		}
3837 	}
3838 
3839 	if (!(device_should_wake) && !tg3_flag(tp, ENABLE_ASF))
3840 		tg3_power_down_phy(tp, do_low_power);
3841 
3842 	tg3_frob_aux_power(tp, true);
3843 
3844 	/* Workaround for unstable PLL clock */
3845 	if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
3846 	    (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
3847 		u32 val = tr32(0x7d00);
3848 
3849 		val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
3850 		tw32(0x7d00, val);
3851 		if (!tg3_flag(tp, ENABLE_ASF)) {
3852 			int err;
3853 
3854 			err = tg3_nvram_lock(tp);
3855 			tg3_halt_cpu(tp, RX_CPU_BASE);
3856 			if (!err)
3857 				tg3_nvram_unlock(tp);
3858 		}
3859 	}
3860 
3861 	tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
3862 
3863 	return 0;
3864 }
3865 
3866 static void tg3_power_down(struct tg3 *tp)
3867 {
3868 	tg3_power_down_prepare(tp);
3869 
3870 	pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE));
3871 	pci_set_power_state(tp->pdev, PCI_D3hot);
3872 }
3873 
3874 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
3875 {
3876 	switch (val & MII_TG3_AUX_STAT_SPDMASK) {
3877 	case MII_TG3_AUX_STAT_10HALF:
3878 		*speed = SPEED_10;
3879 		*duplex = DUPLEX_HALF;
3880 		break;
3881 
3882 	case MII_TG3_AUX_STAT_10FULL:
3883 		*speed = SPEED_10;
3884 		*duplex = DUPLEX_FULL;
3885 		break;
3886 
3887 	case MII_TG3_AUX_STAT_100HALF:
3888 		*speed = SPEED_100;
3889 		*duplex = DUPLEX_HALF;
3890 		break;
3891 
3892 	case MII_TG3_AUX_STAT_100FULL:
3893 		*speed = SPEED_100;
3894 		*duplex = DUPLEX_FULL;
3895 		break;
3896 
3897 	case MII_TG3_AUX_STAT_1000HALF:
3898 		*speed = SPEED_1000;
3899 		*duplex = DUPLEX_HALF;
3900 		break;
3901 
3902 	case MII_TG3_AUX_STAT_1000FULL:
3903 		*speed = SPEED_1000;
3904 		*duplex = DUPLEX_FULL;
3905 		break;
3906 
3907 	default:
3908 		if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
3909 			*speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
3910 				 SPEED_10;
3911 			*duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
3912 				  DUPLEX_HALF;
3913 			break;
3914 		}
3915 		*speed = SPEED_UNKNOWN;
3916 		*duplex = DUPLEX_UNKNOWN;
3917 		break;
3918 	}
3919 }
3920 
3921 static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
3922 {
3923 	int err = 0;
3924 	u32 val, new_adv;
3925 
3926 	new_adv = ADVERTISE_CSMA;
3927 	new_adv |= ethtool_adv_to_mii_adv_t(advertise) & ADVERTISE_ALL;
3928 	new_adv |= mii_advertise_flowctrl(flowctrl);
3929 
3930 	err = tg3_writephy(tp, MII_ADVERTISE, new_adv);
3931 	if (err)
3932 		goto done;
3933 
3934 	if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
3935 		new_adv = ethtool_adv_to_mii_ctrl1000_t(advertise);
3936 
3937 		if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
3938 		    tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
3939 			new_adv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
3940 
3941 		err = tg3_writephy(tp, MII_CTRL1000, new_adv);
3942 		if (err)
3943 			goto done;
3944 	}
3945 
3946 	if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
3947 		goto done;
3948 
3949 	tw32(TG3_CPMU_EEE_MODE,
3950 	     tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE);
3951 
3952 	err = TG3_PHY_AUXCTL_SMDSP_ENABLE(tp);
3953 	if (!err) {
3954 		u32 err2;
3955 
3956 		val = 0;
3957 		/* Advertise 100-BaseTX EEE ability */
3958 		if (advertise & ADVERTISED_100baseT_Full)
3959 			val |= MDIO_AN_EEE_ADV_100TX;
3960 		/* Advertise 1000-BaseT EEE ability */
3961 		if (advertise & ADVERTISED_1000baseT_Full)
3962 			val |= MDIO_AN_EEE_ADV_1000T;
3963 		err = tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val);
3964 		if (err)
3965 			val = 0;
3966 
3967 		switch (GET_ASIC_REV(tp->pci_chip_rev_id)) {
3968 		case ASIC_REV_5717:
3969 		case ASIC_REV_57765:
3970 		case ASIC_REV_57766:
3971 		case ASIC_REV_5719:
3972 			/* If we advertised any eee advertisements above... */
3973 			if (val)
3974 				val = MII_TG3_DSP_TAP26_ALNOKO |
3975 				      MII_TG3_DSP_TAP26_RMRXSTO |
3976 				      MII_TG3_DSP_TAP26_OPCSINPT;
3977 			tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
3978 			/* Fall through */
3979 		case ASIC_REV_5720:
3980 			if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val))
3981 				tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val |
3982 						 MII_TG3_DSP_CH34TP2_HIBW01);
3983 		}
3984 
3985 		err2 = TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
3986 		if (!err)
3987 			err = err2;
3988 	}
3989 
3990 done:
3991 	return err;
3992 }
3993 
3994 static void tg3_phy_copper_begin(struct tg3 *tp)
3995 {
3996 	if (tp->link_config.autoneg == AUTONEG_ENABLE ||
3997 	    (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
3998 		u32 adv, fc;
3999 
4000 		if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
4001 			adv = ADVERTISED_10baseT_Half |
4002 			      ADVERTISED_10baseT_Full;
4003 			if (tg3_flag(tp, WOL_SPEED_100MB))
4004 				adv |= ADVERTISED_100baseT_Half |
4005 				       ADVERTISED_100baseT_Full;
4006 
4007 			fc = FLOW_CTRL_TX | FLOW_CTRL_RX;
4008 		} else {
4009 			adv = tp->link_config.advertising;
4010 			if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
4011 				adv &= ~(ADVERTISED_1000baseT_Half |
4012 					 ADVERTISED_1000baseT_Full);
4013 
4014 			fc = tp->link_config.flowctrl;
4015 		}
4016 
4017 		tg3_phy_autoneg_cfg(tp, adv, fc);
4018 
4019 		tg3_writephy(tp, MII_BMCR,
4020 			     BMCR_ANENABLE | BMCR_ANRESTART);
4021 	} else {
4022 		int i;
4023 		u32 bmcr, orig_bmcr;
4024 
4025 		tp->link_config.active_speed = tp->link_config.speed;
4026 		tp->link_config.active_duplex = tp->link_config.duplex;
4027 
4028 		bmcr = 0;
4029 		switch (tp->link_config.speed) {
4030 		default:
4031 		case SPEED_10:
4032 			break;
4033 
4034 		case SPEED_100:
4035 			bmcr |= BMCR_SPEED100;
4036 			break;
4037 
4038 		case SPEED_1000:
4039 			bmcr |= BMCR_SPEED1000;
4040 			break;
4041 		}
4042 
4043 		if (tp->link_config.duplex == DUPLEX_FULL)
4044 			bmcr |= BMCR_FULLDPLX;
4045 
4046 		if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
4047 		    (bmcr != orig_bmcr)) {
4048 			tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
4049 			for (i = 0; i < 1500; i++) {
4050 				u32 tmp;
4051 
4052 				udelay(10);
4053 				if (tg3_readphy(tp, MII_BMSR, &tmp) ||
4054 				    tg3_readphy(tp, MII_BMSR, &tmp))
4055 					continue;
4056 				if (!(tmp & BMSR_LSTATUS)) {
4057 					udelay(40);
4058 					break;
4059 				}
4060 			}
4061 			tg3_writephy(tp, MII_BMCR, bmcr);
4062 			udelay(40);
4063 		}
4064 	}
4065 }
4066 
4067 static int tg3_init_5401phy_dsp(struct tg3 *tp)
4068 {
4069 	int err;
4070 
4071 	/* Turn off tap power management. */
4072 	/* Set Extended packet length bit */
4073 	err = tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
4074 
4075 	err |= tg3_phydsp_write(tp, 0x0012, 0x1804);
4076 	err |= tg3_phydsp_write(tp, 0x0013, 0x1204);
4077 	err |= tg3_phydsp_write(tp, 0x8006, 0x0132);
4078 	err |= tg3_phydsp_write(tp, 0x8006, 0x0232);
4079 	err |= tg3_phydsp_write(tp, 0x201f, 0x0a20);
4080 
4081 	udelay(40);
4082 
4083 	return err;
4084 }
4085 
4086 static bool tg3_phy_copper_an_config_ok(struct tg3 *tp, u32 *lcladv)
4087 {
4088 	u32 advmsk, tgtadv, advertising;
4089 
4090 	advertising = tp->link_config.advertising;
4091 	tgtadv = ethtool_adv_to_mii_adv_t(advertising) & ADVERTISE_ALL;
4092 
4093 	advmsk = ADVERTISE_ALL;
4094 	if (tp->link_config.active_duplex == DUPLEX_FULL) {
4095 		tgtadv |= mii_advertise_flowctrl(tp->link_config.flowctrl);
4096 		advmsk |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
4097 	}
4098 
4099 	if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
4100 		return false;
4101 
4102 	if ((*lcladv & advmsk) != tgtadv)
4103 		return false;
4104 
4105 	if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4106 		u32 tg3_ctrl;
4107 
4108 		tgtadv = ethtool_adv_to_mii_ctrl1000_t(advertising);
4109 
4110 		if (tg3_readphy(tp, MII_CTRL1000, &tg3_ctrl))
4111 			return false;
4112 
4113 		if (tgtadv &&
4114 		    (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
4115 		     tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)) {
4116 			tgtadv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4117 			tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL |
4118 				     CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
4119 		} else {
4120 			tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL);
4121 		}
4122 
4123 		if (tg3_ctrl != tgtadv)
4124 			return false;
4125 	}
4126 
4127 	return true;
4128 }
4129 
4130 static bool tg3_phy_copper_fetch_rmtadv(struct tg3 *tp, u32 *rmtadv)
4131 {
4132 	u32 lpeth = 0;
4133 
4134 	if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4135 		u32 val;
4136 
4137 		if (tg3_readphy(tp, MII_STAT1000, &val))
4138 			return false;
4139 
4140 		lpeth = mii_stat1000_to_ethtool_lpa_t(val);
4141 	}
4142 
4143 	if (tg3_readphy(tp, MII_LPA, rmtadv))
4144 		return false;
4145 
4146 	lpeth |= mii_lpa_to_ethtool_lpa_t(*rmtadv);
4147 	tp->link_config.rmt_adv = lpeth;
4148 
4149 	return true;
4150 }
4151 
4152 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
4153 {
4154 	int current_link_up;
4155 	u32 bmsr, val;
4156 	u32 lcl_adv, rmt_adv;
4157 	u16 current_speed;
4158 	u8 current_duplex;
4159 	int i, err;
4160 
4161 	tw32(MAC_EVENT, 0);
4162 
4163 	tw32_f(MAC_STATUS,
4164 	     (MAC_STATUS_SYNC_CHANGED |
4165 	      MAC_STATUS_CFG_CHANGED |
4166 	      MAC_STATUS_MI_COMPLETION |
4167 	      MAC_STATUS_LNKSTATE_CHANGED));
4168 	udelay(40);
4169 
4170 	if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
4171 		tw32_f(MAC_MI_MODE,
4172 		     (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
4173 		udelay(80);
4174 	}
4175 
4176 	tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, 0);
4177 
4178 	/* Some third-party PHYs need to be reset on link going
4179 	 * down.
4180 	 */
4181 	if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
4182 	     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
4183 	     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
4184 	    netif_carrier_ok(tp->dev)) {
4185 		tg3_readphy(tp, MII_BMSR, &bmsr);
4186 		if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4187 		    !(bmsr & BMSR_LSTATUS))
4188 			force_reset = 1;
4189 	}
4190 	if (force_reset)
4191 		tg3_phy_reset(tp);
4192 
4193 	if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
4194 		tg3_readphy(tp, MII_BMSR, &bmsr);
4195 		if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
4196 		    !tg3_flag(tp, INIT_COMPLETE))
4197 			bmsr = 0;
4198 
4199 		if (!(bmsr & BMSR_LSTATUS)) {
4200 			err = tg3_init_5401phy_dsp(tp);
4201 			if (err)
4202 				return err;
4203 
4204 			tg3_readphy(tp, MII_BMSR, &bmsr);
4205 			for (i = 0; i < 1000; i++) {
4206 				udelay(10);
4207 				if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4208 				    (bmsr & BMSR_LSTATUS)) {
4209 					udelay(40);
4210 					break;
4211 				}
4212 			}
4213 
4214 			if ((tp->phy_id & TG3_PHY_ID_REV_MASK) ==
4215 			    TG3_PHY_REV_BCM5401_B0 &&
4216 			    !(bmsr & BMSR_LSTATUS) &&
4217 			    tp->link_config.active_speed == SPEED_1000) {
4218 				err = tg3_phy_reset(tp);
4219 				if (!err)
4220 					err = tg3_init_5401phy_dsp(tp);
4221 				if (err)
4222 					return err;
4223 			}
4224 		}
4225 	} else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
4226 		   tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
4227 		/* 5701 {A0,B0} CRC bug workaround */
4228 		tg3_writephy(tp, 0x15, 0x0a75);
4229 		tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4230 		tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
4231 		tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4232 	}
4233 
4234 	/* Clear pending interrupts... */
4235 	tg3_readphy(tp, MII_TG3_ISTAT, &val);
4236 	tg3_readphy(tp, MII_TG3_ISTAT, &val);
4237 
4238 	if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT)
4239 		tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
4240 	else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET))
4241 		tg3_writephy(tp, MII_TG3_IMASK, ~0);
4242 
4243 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
4244 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
4245 		if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
4246 			tg3_writephy(tp, MII_TG3_EXT_CTRL,
4247 				     MII_TG3_EXT_CTRL_LNK3_LED_MODE);
4248 		else
4249 			tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
4250 	}
4251 
4252 	current_link_up = 0;
4253 	current_speed = SPEED_UNKNOWN;
4254 	current_duplex = DUPLEX_UNKNOWN;
4255 	tp->phy_flags &= ~TG3_PHYFLG_MDIX_STATE;
4256 	tp->link_config.rmt_adv = 0;
4257 
4258 	if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) {
4259 		err = tg3_phy_auxctl_read(tp,
4260 					  MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4261 					  &val);
4262 		if (!err && !(val & (1 << 10))) {
4263 			tg3_phy_auxctl_write(tp,
4264 					     MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4265 					     val | (1 << 10));
4266 			goto relink;
4267 		}
4268 	}
4269 
4270 	bmsr = 0;
4271 	for (i = 0; i < 100; i++) {
4272 		tg3_readphy(tp, MII_BMSR, &bmsr);
4273 		if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4274 		    (bmsr & BMSR_LSTATUS))
4275 			break;
4276 		udelay(40);
4277 	}
4278 
4279 	if (bmsr & BMSR_LSTATUS) {
4280 		u32 aux_stat, bmcr;
4281 
4282 		tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
4283 		for (i = 0; i < 2000; i++) {
4284 			udelay(10);
4285 			if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
4286 			    aux_stat)
4287 				break;
4288 		}
4289 
4290 		tg3_aux_stat_to_speed_duplex(tp, aux_stat,
4291 					     &current_speed,
4292 					     &current_duplex);
4293 
4294 		bmcr = 0;
4295 		for (i = 0; i < 200; i++) {
4296 			tg3_readphy(tp, MII_BMCR, &bmcr);
4297 			if (tg3_readphy(tp, MII_BMCR, &bmcr))
4298 				continue;
4299 			if (bmcr && bmcr != 0x7fff)
4300 				break;
4301 			udelay(10);
4302 		}
4303 
4304 		lcl_adv = 0;
4305 		rmt_adv = 0;
4306 
4307 		tp->link_config.active_speed = current_speed;
4308 		tp->link_config.active_duplex = current_duplex;
4309 
4310 		if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4311 			if ((bmcr & BMCR_ANENABLE) &&
4312 			    tg3_phy_copper_an_config_ok(tp, &lcl_adv) &&
4313 			    tg3_phy_copper_fetch_rmtadv(tp, &rmt_adv))
4314 				current_link_up = 1;
4315 		} else {
4316 			if (!(bmcr & BMCR_ANENABLE) &&
4317 			    tp->link_config.speed == current_speed &&
4318 			    tp->link_config.duplex == current_duplex &&
4319 			    tp->link_config.flowctrl ==
4320 			    tp->link_config.active_flowctrl) {
4321 				current_link_up = 1;
4322 			}
4323 		}
4324 
4325 		if (current_link_up == 1 &&
4326 		    tp->link_config.active_duplex == DUPLEX_FULL) {
4327 			u32 reg, bit;
4328 
4329 			if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4330 				reg = MII_TG3_FET_GEN_STAT;
4331 				bit = MII_TG3_FET_GEN_STAT_MDIXSTAT;
4332 			} else {
4333 				reg = MII_TG3_EXT_STAT;
4334 				bit = MII_TG3_EXT_STAT_MDIX;
4335 			}
4336 
4337 			if (!tg3_readphy(tp, reg, &val) && (val & bit))
4338 				tp->phy_flags |= TG3_PHYFLG_MDIX_STATE;
4339 
4340 			tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
4341 		}
4342 	}
4343 
4344 relink:
4345 	if (current_link_up == 0 || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4346 		tg3_phy_copper_begin(tp);
4347 
4348 		tg3_readphy(tp, MII_BMSR, &bmsr);
4349 		if ((!tg3_readphy(tp, MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) ||
4350 		    (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
4351 			current_link_up = 1;
4352 	}
4353 
4354 	tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
4355 	if (current_link_up == 1) {
4356 		if (tp->link_config.active_speed == SPEED_100 ||
4357 		    tp->link_config.active_speed == SPEED_10)
4358 			tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4359 		else
4360 			tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4361 	} else if (tp->phy_flags & TG3_PHYFLG_IS_FET)
4362 		tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4363 	else
4364 		tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4365 
4366 	tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
4367 	if (tp->link_config.active_duplex == DUPLEX_HALF)
4368 		tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
4369 
4370 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
4371 		if (current_link_up == 1 &&
4372 		    tg3_5700_link_polarity(tp, tp->link_config.active_speed))
4373 			tp->mac_mode |= MAC_MODE_LINK_POLARITY;
4374 		else
4375 			tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
4376 	}
4377 
4378 	/* ??? Without this setting Netgear GA302T PHY does not
4379 	 * ??? send/receive packets...
4380 	 */
4381 	if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 &&
4382 	    tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
4383 		tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
4384 		tw32_f(MAC_MI_MODE, tp->mi_mode);
4385 		udelay(80);
4386 	}
4387 
4388 	tw32_f(MAC_MODE, tp->mac_mode);
4389 	udelay(40);
4390 
4391 	tg3_phy_eee_adjust(tp, current_link_up);
4392 
4393 	if (tg3_flag(tp, USE_LINKCHG_REG)) {
4394 		/* Polled via timer. */
4395 		tw32_f(MAC_EVENT, 0);
4396 	} else {
4397 		tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4398 	}
4399 	udelay(40);
4400 
4401 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
4402 	    current_link_up == 1 &&
4403 	    tp->link_config.active_speed == SPEED_1000 &&
4404 	    (tg3_flag(tp, PCIX_MODE) || tg3_flag(tp, PCI_HIGH_SPEED))) {
4405 		udelay(120);
4406 		tw32_f(MAC_STATUS,
4407 		     (MAC_STATUS_SYNC_CHANGED |
4408 		      MAC_STATUS_CFG_CHANGED));
4409 		udelay(40);
4410 		tg3_write_mem(tp,
4411 			      NIC_SRAM_FIRMWARE_MBOX,
4412 			      NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
4413 	}
4414 
4415 	/* Prevent send BD corruption. */
4416 	if (tg3_flag(tp, CLKREQ_BUG)) {
4417 		u16 oldlnkctl, newlnkctl;
4418 
4419 		pci_read_config_word(tp->pdev,
4420 				     pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
4421 				     &oldlnkctl);
4422 		if (tp->link_config.active_speed == SPEED_100 ||
4423 		    tp->link_config.active_speed == SPEED_10)
4424 			newlnkctl = oldlnkctl & ~PCI_EXP_LNKCTL_CLKREQ_EN;
4425 		else
4426 			newlnkctl = oldlnkctl | PCI_EXP_LNKCTL_CLKREQ_EN;
4427 		if (newlnkctl != oldlnkctl)
4428 			pci_write_config_word(tp->pdev,
4429 					      pci_pcie_cap(tp->pdev) +
4430 					      PCI_EXP_LNKCTL, newlnkctl);
4431 	}
4432 
4433 	if (current_link_up != netif_carrier_ok(tp->dev)) {
4434 		if (current_link_up)
4435 			netif_carrier_on(tp->dev);
4436 		else
4437 			netif_carrier_off(tp->dev);
4438 		tg3_link_report(tp);
4439 	}
4440 
4441 	return 0;
4442 }
4443 
4444 struct tg3_fiber_aneginfo {
4445 	int state;
4446 #define ANEG_STATE_UNKNOWN		0
4447 #define ANEG_STATE_AN_ENABLE		1
4448 #define ANEG_STATE_RESTART_INIT		2
4449 #define ANEG_STATE_RESTART		3
4450 #define ANEG_STATE_DISABLE_LINK_OK	4
4451 #define ANEG_STATE_ABILITY_DETECT_INIT	5
4452 #define ANEG_STATE_ABILITY_DETECT	6
4453 #define ANEG_STATE_ACK_DETECT_INIT	7
4454 #define ANEG_STATE_ACK_DETECT		8
4455 #define ANEG_STATE_COMPLETE_ACK_INIT	9
4456 #define ANEG_STATE_COMPLETE_ACK		10
4457 #define ANEG_STATE_IDLE_DETECT_INIT	11
4458 #define ANEG_STATE_IDLE_DETECT		12
4459 #define ANEG_STATE_LINK_OK		13
4460 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT	14
4461 #define ANEG_STATE_NEXT_PAGE_WAIT	15
4462 
4463 	u32 flags;
4464 #define MR_AN_ENABLE		0x00000001
4465 #define MR_RESTART_AN		0x00000002
4466 #define MR_AN_COMPLETE		0x00000004
4467 #define MR_PAGE_RX		0x00000008
4468 #define MR_NP_LOADED		0x00000010
4469 #define MR_TOGGLE_TX		0x00000020
4470 #define MR_LP_ADV_FULL_DUPLEX	0x00000040
4471 #define MR_LP_ADV_HALF_DUPLEX	0x00000080
4472 #define MR_LP_ADV_SYM_PAUSE	0x00000100
4473 #define MR_LP_ADV_ASYM_PAUSE	0x00000200
4474 #define MR_LP_ADV_REMOTE_FAULT1	0x00000400
4475 #define MR_LP_ADV_REMOTE_FAULT2	0x00000800
4476 #define MR_LP_ADV_NEXT_PAGE	0x00001000
4477 #define MR_TOGGLE_RX		0x00002000
4478 #define MR_NP_RX		0x00004000
4479 
4480 #define MR_LINK_OK		0x80000000
4481 
4482 	unsigned long link_time, cur_time;
4483 
4484 	u32 ability_match_cfg;
4485 	int ability_match_count;
4486 
4487 	char ability_match, idle_match, ack_match;
4488 
4489 	u32 txconfig, rxconfig;
4490 #define ANEG_CFG_NP		0x00000080
4491 #define ANEG_CFG_ACK		0x00000040
4492 #define ANEG_CFG_RF2		0x00000020
4493 #define ANEG_CFG_RF1		0x00000010
4494 #define ANEG_CFG_PS2		0x00000001
4495 #define ANEG_CFG_PS1		0x00008000
4496 #define ANEG_CFG_HD		0x00004000
4497 #define ANEG_CFG_FD		0x00002000
4498 #define ANEG_CFG_INVAL		0x00001f06
4499 
4500 };
4501 #define ANEG_OK		0
4502 #define ANEG_DONE	1
4503 #define ANEG_TIMER_ENAB	2
4504 #define ANEG_FAILED	-1
4505 
4506 #define ANEG_STATE_SETTLE_TIME	10000
4507 
4508 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
4509 				   struct tg3_fiber_aneginfo *ap)
4510 {
4511 	u16 flowctrl;
4512 	unsigned long delta;
4513 	u32 rx_cfg_reg;
4514 	int ret;
4515 
4516 	if (ap->state == ANEG_STATE_UNKNOWN) {
4517 		ap->rxconfig = 0;
4518 		ap->link_time = 0;
4519 		ap->cur_time = 0;
4520 		ap->ability_match_cfg = 0;
4521 		ap->ability_match_count = 0;
4522 		ap->ability_match = 0;
4523 		ap->idle_match = 0;
4524 		ap->ack_match = 0;
4525 	}
4526 	ap->cur_time++;
4527 
4528 	if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
4529 		rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
4530 
4531 		if (rx_cfg_reg != ap->ability_match_cfg) {
4532 			ap->ability_match_cfg = rx_cfg_reg;
4533 			ap->ability_match = 0;
4534 			ap->ability_match_count = 0;
4535 		} else {
4536 			if (++ap->ability_match_count > 1) {
4537 				ap->ability_match = 1;
4538 				ap->ability_match_cfg = rx_cfg_reg;
4539 			}
4540 		}
4541 		if (rx_cfg_reg & ANEG_CFG_ACK)
4542 			ap->ack_match = 1;
4543 		else
4544 			ap->ack_match = 0;
4545 
4546 		ap->idle_match = 0;
4547 	} else {
4548 		ap->idle_match = 1;
4549 		ap->ability_match_cfg = 0;
4550 		ap->ability_match_count = 0;
4551 		ap->ability_match = 0;
4552 		ap->ack_match = 0;
4553 
4554 		rx_cfg_reg = 0;
4555 	}
4556 
4557 	ap->rxconfig = rx_cfg_reg;
4558 	ret = ANEG_OK;
4559 
4560 	switch (ap->state) {
4561 	case ANEG_STATE_UNKNOWN:
4562 		if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
4563 			ap->state = ANEG_STATE_AN_ENABLE;
4564 
4565 		/* fallthru */
4566 	case ANEG_STATE_AN_ENABLE:
4567 		ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
4568 		if (ap->flags & MR_AN_ENABLE) {
4569 			ap->link_time = 0;
4570 			ap->cur_time = 0;
4571 			ap->ability_match_cfg = 0;
4572 			ap->ability_match_count = 0;
4573 			ap->ability_match = 0;
4574 			ap->idle_match = 0;
4575 			ap->ack_match = 0;
4576 
4577 			ap->state = ANEG_STATE_RESTART_INIT;
4578 		} else {
4579 			ap->state = ANEG_STATE_DISABLE_LINK_OK;
4580 		}
4581 		break;
4582 
4583 	case ANEG_STATE_RESTART_INIT:
4584 		ap->link_time = ap->cur_time;
4585 		ap->flags &= ~(MR_NP_LOADED);
4586 		ap->txconfig = 0;
4587 		tw32(MAC_TX_AUTO_NEG, 0);
4588 		tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
4589 		tw32_f(MAC_MODE, tp->mac_mode);
4590 		udelay(40);
4591 
4592 		ret = ANEG_TIMER_ENAB;
4593 		ap->state = ANEG_STATE_RESTART;
4594 
4595 		/* fallthru */
4596 	case ANEG_STATE_RESTART:
4597 		delta = ap->cur_time - ap->link_time;
4598 		if (delta > ANEG_STATE_SETTLE_TIME)
4599 			ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
4600 		else
4601 			ret = ANEG_TIMER_ENAB;
4602 		break;
4603 
4604 	case ANEG_STATE_DISABLE_LINK_OK:
4605 		ret = ANEG_DONE;
4606 		break;
4607 
4608 	case ANEG_STATE_ABILITY_DETECT_INIT:
4609 		ap->flags &= ~(MR_TOGGLE_TX);
4610 		ap->txconfig = ANEG_CFG_FD;
4611 		flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
4612 		if (flowctrl & ADVERTISE_1000XPAUSE)
4613 			ap->txconfig |= ANEG_CFG_PS1;
4614 		if (flowctrl & ADVERTISE_1000XPSE_ASYM)
4615 			ap->txconfig |= ANEG_CFG_PS2;
4616 		tw32(MAC_TX_AUTO_NEG, ap->txconfig);
4617 		tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
4618 		tw32_f(MAC_MODE, tp->mac_mode);
4619 		udelay(40);
4620 
4621 		ap->state = ANEG_STATE_ABILITY_DETECT;
4622 		break;
4623 
4624 	case ANEG_STATE_ABILITY_DETECT:
4625 		if (ap->ability_match != 0 && ap->rxconfig != 0)
4626 			ap->state = ANEG_STATE_ACK_DETECT_INIT;
4627 		break;
4628 
4629 	case ANEG_STATE_ACK_DETECT_INIT:
4630 		ap->txconfig |= ANEG_CFG_ACK;
4631 		tw32(MAC_TX_AUTO_NEG, ap->txconfig);
4632 		tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
4633 		tw32_f(MAC_MODE, tp->mac_mode);
4634 		udelay(40);
4635 
4636 		ap->state = ANEG_STATE_ACK_DETECT;
4637 
4638 		/* fallthru */
4639 	case ANEG_STATE_ACK_DETECT:
4640 		if (ap->ack_match != 0) {
4641 			if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
4642 			    (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
4643 				ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
4644 			} else {
4645 				ap->state = ANEG_STATE_AN_ENABLE;
4646 			}
4647 		} else if (ap->ability_match != 0 &&
4648 			   ap->rxconfig == 0) {
4649 			ap->state = ANEG_STATE_AN_ENABLE;
4650 		}
4651 		break;
4652 
4653 	case ANEG_STATE_COMPLETE_ACK_INIT:
4654 		if (ap->rxconfig & ANEG_CFG_INVAL) {
4655 			ret = ANEG_FAILED;
4656 			break;
4657 		}
4658 		ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
4659 			       MR_LP_ADV_HALF_DUPLEX |
4660 			       MR_LP_ADV_SYM_PAUSE |
4661 			       MR_LP_ADV_ASYM_PAUSE |
4662 			       MR_LP_ADV_REMOTE_FAULT1 |
4663 			       MR_LP_ADV_REMOTE_FAULT2 |
4664 			       MR_LP_ADV_NEXT_PAGE |
4665 			       MR_TOGGLE_RX |
4666 			       MR_NP_RX);
4667 		if (ap->rxconfig & ANEG_CFG_FD)
4668 			ap->flags |= MR_LP_ADV_FULL_DUPLEX;
4669 		if (ap->rxconfig & ANEG_CFG_HD)
4670 			ap->flags |= MR_LP_ADV_HALF_DUPLEX;
4671 		if (ap->rxconfig & ANEG_CFG_PS1)
4672 			ap->flags |= MR_LP_ADV_SYM_PAUSE;
4673 		if (ap->rxconfig & ANEG_CFG_PS2)
4674 			ap->flags |= MR_LP_ADV_ASYM_PAUSE;
4675 		if (ap->rxconfig & ANEG_CFG_RF1)
4676 			ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
4677 		if (ap->rxconfig & ANEG_CFG_RF2)
4678 			ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
4679 		if (ap->rxconfig & ANEG_CFG_NP)
4680 			ap->flags |= MR_LP_ADV_NEXT_PAGE;
4681 
4682 		ap->link_time = ap->cur_time;
4683 
4684 		ap->flags ^= (MR_TOGGLE_TX);
4685 		if (ap->rxconfig & 0x0008)
4686 			ap->flags |= MR_TOGGLE_RX;
4687 		if (ap->rxconfig & ANEG_CFG_NP)
4688 			ap->flags |= MR_NP_RX;
4689 		ap->flags |= MR_PAGE_RX;
4690 
4691 		ap->state = ANEG_STATE_COMPLETE_ACK;
4692 		ret = ANEG_TIMER_ENAB;
4693 		break;
4694 
4695 	case ANEG_STATE_COMPLETE_ACK:
4696 		if (ap->ability_match != 0 &&
4697 		    ap->rxconfig == 0) {
4698 			ap->state = ANEG_STATE_AN_ENABLE;
4699 			break;
4700 		}
4701 		delta = ap->cur_time - ap->link_time;
4702 		if (delta > ANEG_STATE_SETTLE_TIME) {
4703 			if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
4704 				ap->state = ANEG_STATE_IDLE_DETECT_INIT;
4705 			} else {
4706 				if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
4707 				    !(ap->flags & MR_NP_RX)) {
4708 					ap->state = ANEG_STATE_IDLE_DETECT_INIT;
4709 				} else {
4710 					ret = ANEG_FAILED;
4711 				}
4712 			}
4713 		}
4714 		break;
4715 
4716 	case ANEG_STATE_IDLE_DETECT_INIT:
4717 		ap->link_time = ap->cur_time;
4718 		tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
4719 		tw32_f(MAC_MODE, tp->mac_mode);
4720 		udelay(40);
4721 
4722 		ap->state = ANEG_STATE_IDLE_DETECT;
4723 		ret = ANEG_TIMER_ENAB;
4724 		break;
4725 
4726 	case ANEG_STATE_IDLE_DETECT:
4727 		if (ap->ability_match != 0 &&
4728 		    ap->rxconfig == 0) {
4729 			ap->state = ANEG_STATE_AN_ENABLE;
4730 			break;
4731 		}
4732 		delta = ap->cur_time - ap->link_time;
4733 		if (delta > ANEG_STATE_SETTLE_TIME) {
4734 			/* XXX another gem from the Broadcom driver :( */
4735 			ap->state = ANEG_STATE_LINK_OK;
4736 		}
4737 		break;
4738 
4739 	case ANEG_STATE_LINK_OK:
4740 		ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
4741 		ret = ANEG_DONE;
4742 		break;
4743 
4744 	case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
4745 		/* ??? unimplemented */
4746 		break;
4747 
4748 	case ANEG_STATE_NEXT_PAGE_WAIT:
4749 		/* ??? unimplemented */
4750 		break;
4751 
4752 	default:
4753 		ret = ANEG_FAILED;
4754 		break;
4755 	}
4756 
4757 	return ret;
4758 }
4759 
4760 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
4761 {
4762 	int res = 0;
4763 	struct tg3_fiber_aneginfo aninfo;
4764 	int status = ANEG_FAILED;
4765 	unsigned int tick;
4766 	u32 tmp;
4767 
4768 	tw32_f(MAC_TX_AUTO_NEG, 0);
4769 
4770 	tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
4771 	tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
4772 	udelay(40);
4773 
4774 	tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
4775 	udelay(40);
4776 
4777 	memset(&aninfo, 0, sizeof(aninfo));
4778 	aninfo.flags |= MR_AN_ENABLE;
4779 	aninfo.state = ANEG_STATE_UNKNOWN;
4780 	aninfo.cur_time = 0;
4781 	tick = 0;
4782 	while (++tick < 195000) {
4783 		status = tg3_fiber_aneg_smachine(tp, &aninfo);
4784 		if (status == ANEG_DONE || status == ANEG_FAILED)
4785 			break;
4786 
4787 		udelay(1);
4788 	}
4789 
4790 	tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
4791 	tw32_f(MAC_MODE, tp->mac_mode);
4792 	udelay(40);
4793 
4794 	*txflags = aninfo.txconfig;
4795 	*rxflags = aninfo.flags;
4796 
4797 	if (status == ANEG_DONE &&
4798 	    (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
4799 			     MR_LP_ADV_FULL_DUPLEX)))
4800 		res = 1;
4801 
4802 	return res;
4803 }
4804 
4805 static void tg3_init_bcm8002(struct tg3 *tp)
4806 {
4807 	u32 mac_status = tr32(MAC_STATUS);
4808 	int i;
4809 
4810 	/* Reset when initting first time or we have a link. */
4811 	if (tg3_flag(tp, INIT_COMPLETE) &&
4812 	    !(mac_status & MAC_STATUS_PCS_SYNCED))
4813 		return;
4814 
4815 	/* Set PLL lock range. */
4816 	tg3_writephy(tp, 0x16, 0x8007);
4817 
4818 	/* SW reset */
4819 	tg3_writephy(tp, MII_BMCR, BMCR_RESET);
4820 
4821 	/* Wait for reset to complete. */
4822 	/* XXX schedule_timeout() ... */
4823 	for (i = 0; i < 500; i++)
4824 		udelay(10);
4825 
4826 	/* Config mode; select PMA/Ch 1 regs. */
4827 	tg3_writephy(tp, 0x10, 0x8411);
4828 
4829 	/* Enable auto-lock and comdet, select txclk for tx. */
4830 	tg3_writephy(tp, 0x11, 0x0a10);
4831 
4832 	tg3_writephy(tp, 0x18, 0x00a0);
4833 	tg3_writephy(tp, 0x16, 0x41ff);
4834 
4835 	/* Assert and deassert POR. */
4836 	tg3_writephy(tp, 0x13, 0x0400);
4837 	udelay(40);
4838 	tg3_writephy(tp, 0x13, 0x0000);
4839 
4840 	tg3_writephy(tp, 0x11, 0x0a50);
4841 	udelay(40);
4842 	tg3_writephy(tp, 0x11, 0x0a10);
4843 
4844 	/* Wait for signal to stabilize */
4845 	/* XXX schedule_timeout() ... */
4846 	for (i = 0; i < 15000; i++)
4847 		udelay(10);
4848 
4849 	/* Deselect the channel register so we can read the PHYID
4850 	 * later.
4851 	 */
4852 	tg3_writephy(tp, 0x10, 0x8011);
4853 }
4854 
4855 static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
4856 {
4857 	u16 flowctrl;
4858 	u32 sg_dig_ctrl, sg_dig_status;
4859 	u32 serdes_cfg, expected_sg_dig_ctrl;
4860 	int workaround, port_a;
4861 	int current_link_up;
4862 
4863 	serdes_cfg = 0;
4864 	expected_sg_dig_ctrl = 0;
4865 	workaround = 0;
4866 	port_a = 1;
4867 	current_link_up = 0;
4868 
4869 	if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
4870 	    tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
4871 		workaround = 1;
4872 		if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
4873 			port_a = 0;
4874 
4875 		/* preserve bits 0-11,13,14 for signal pre-emphasis */
4876 		/* preserve bits 20-23 for voltage regulator */
4877 		serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
4878 	}
4879 
4880 	sg_dig_ctrl = tr32(SG_DIG_CTRL);
4881 
4882 	if (tp->link_config.autoneg != AUTONEG_ENABLE) {
4883 		if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
4884 			if (workaround) {
4885 				u32 val = serdes_cfg;
4886 
4887 				if (port_a)
4888 					val |= 0xc010000;
4889 				else
4890 					val |= 0x4010000;
4891 				tw32_f(MAC_SERDES_CFG, val);
4892 			}
4893 
4894 			tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
4895 		}
4896 		if (mac_status & MAC_STATUS_PCS_SYNCED) {
4897 			tg3_setup_flow_control(tp, 0, 0);
4898 			current_link_up = 1;
4899 		}
4900 		goto out;
4901 	}
4902 
4903 	/* Want auto-negotiation.  */
4904 	expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
4905 
4906 	flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
4907 	if (flowctrl & ADVERTISE_1000XPAUSE)
4908 		expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
4909 	if (flowctrl & ADVERTISE_1000XPSE_ASYM)
4910 		expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
4911 
4912 	if (sg_dig_ctrl != expected_sg_dig_ctrl) {
4913 		if ((tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT) &&
4914 		    tp->serdes_counter &&
4915 		    ((mac_status & (MAC_STATUS_PCS_SYNCED |
4916 				    MAC_STATUS_RCVD_CFG)) ==
4917 		     MAC_STATUS_PCS_SYNCED)) {
4918 			tp->serdes_counter--;
4919 			current_link_up = 1;
4920 			goto out;
4921 		}
4922 restart_autoneg:
4923 		if (workaround)
4924 			tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
4925 		tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
4926 		udelay(5);
4927 		tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
4928 
4929 		tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
4930 		tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4931 	} else if (mac_status & (MAC_STATUS_PCS_SYNCED |
4932 				 MAC_STATUS_SIGNAL_DET)) {
4933 		sg_dig_status = tr32(SG_DIG_STATUS);
4934 		mac_status = tr32(MAC_STATUS);
4935 
4936 		if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
4937 		    (mac_status & MAC_STATUS_PCS_SYNCED)) {
4938 			u32 local_adv = 0, remote_adv = 0;
4939 
4940 			if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
4941 				local_adv |= ADVERTISE_1000XPAUSE;
4942 			if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
4943 				local_adv |= ADVERTISE_1000XPSE_ASYM;
4944 
4945 			if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
4946 				remote_adv |= LPA_1000XPAUSE;
4947 			if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
4948 				remote_adv |= LPA_1000XPAUSE_ASYM;
4949 
4950 			tp->link_config.rmt_adv =
4951 					   mii_adv_to_ethtool_adv_x(remote_adv);
4952 
4953 			tg3_setup_flow_control(tp, local_adv, remote_adv);
4954 			current_link_up = 1;
4955 			tp->serdes_counter = 0;
4956 			tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4957 		} else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
4958 			if (tp->serdes_counter)
4959 				tp->serdes_counter--;
4960 			else {
4961 				if (workaround) {
4962 					u32 val = serdes_cfg;
4963 
4964 					if (port_a)
4965 						val |= 0xc010000;
4966 					else
4967 						val |= 0x4010000;
4968 
4969 					tw32_f(MAC_SERDES_CFG, val);
4970 				}
4971 
4972 				tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
4973 				udelay(40);
4974 
4975 				/* Link parallel detection - link is up */
4976 				/* only if we have PCS_SYNC and not */
4977 				/* receiving config code words */
4978 				mac_status = tr32(MAC_STATUS);
4979 				if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
4980 				    !(mac_status & MAC_STATUS_RCVD_CFG)) {
4981 					tg3_setup_flow_control(tp, 0, 0);
4982 					current_link_up = 1;
4983 					tp->phy_flags |=
4984 						TG3_PHYFLG_PARALLEL_DETECT;
4985 					tp->serdes_counter =
4986 						SERDES_PARALLEL_DET_TIMEOUT;
4987 				} else
4988 					goto restart_autoneg;
4989 			}
4990 		}
4991 	} else {
4992 		tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
4993 		tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4994 	}
4995 
4996 out:
4997 	return current_link_up;
4998 }
4999 
5000 static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
5001 {
5002 	int current_link_up = 0;
5003 
5004 	if (!(mac_status & MAC_STATUS_PCS_SYNCED))
5005 		goto out;
5006 
5007 	if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5008 		u32 txflags, rxflags;
5009 		int i;
5010 
5011 		if (fiber_autoneg(tp, &txflags, &rxflags)) {
5012 			u32 local_adv = 0, remote_adv = 0;
5013 
5014 			if (txflags & ANEG_CFG_PS1)
5015 				local_adv |= ADVERTISE_1000XPAUSE;
5016 			if (txflags & ANEG_CFG_PS2)
5017 				local_adv |= ADVERTISE_1000XPSE_ASYM;
5018 
5019 			if (rxflags & MR_LP_ADV_SYM_PAUSE)
5020 				remote_adv |= LPA_1000XPAUSE;
5021 			if (rxflags & MR_LP_ADV_ASYM_PAUSE)
5022 				remote_adv |= LPA_1000XPAUSE_ASYM;
5023 
5024 			tp->link_config.rmt_adv =
5025 					   mii_adv_to_ethtool_adv_x(remote_adv);
5026 
5027 			tg3_setup_flow_control(tp, local_adv, remote_adv);
5028 
5029 			current_link_up = 1;
5030 		}
5031 		for (i = 0; i < 30; i++) {
5032 			udelay(20);
5033 			tw32_f(MAC_STATUS,
5034 			       (MAC_STATUS_SYNC_CHANGED |
5035 				MAC_STATUS_CFG_CHANGED));
5036 			udelay(40);
5037 			if ((tr32(MAC_STATUS) &
5038 			     (MAC_STATUS_SYNC_CHANGED |
5039 			      MAC_STATUS_CFG_CHANGED)) == 0)
5040 				break;
5041 		}
5042 
5043 		mac_status = tr32(MAC_STATUS);
5044 		if (current_link_up == 0 &&
5045 		    (mac_status & MAC_STATUS_PCS_SYNCED) &&
5046 		    !(mac_status & MAC_STATUS_RCVD_CFG))
5047 			current_link_up = 1;
5048 	} else {
5049 		tg3_setup_flow_control(tp, 0, 0);
5050 
5051 		/* Forcing 1000FD link up. */
5052 		current_link_up = 1;
5053 
5054 		tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
5055 		udelay(40);
5056 
5057 		tw32_f(MAC_MODE, tp->mac_mode);
5058 		udelay(40);
5059 	}
5060 
5061 out:
5062 	return current_link_up;
5063 }
5064 
5065 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
5066 {
5067 	u32 orig_pause_cfg;
5068 	u16 orig_active_speed;
5069 	u8 orig_active_duplex;
5070 	u32 mac_status;
5071 	int current_link_up;
5072 	int i;
5073 
5074 	orig_pause_cfg = tp->link_config.active_flowctrl;
5075 	orig_active_speed = tp->link_config.active_speed;
5076 	orig_active_duplex = tp->link_config.active_duplex;
5077 
5078 	if (!tg3_flag(tp, HW_AUTONEG) &&
5079 	    netif_carrier_ok(tp->dev) &&
5080 	    tg3_flag(tp, INIT_COMPLETE)) {
5081 		mac_status = tr32(MAC_STATUS);
5082 		mac_status &= (MAC_STATUS_PCS_SYNCED |
5083 			       MAC_STATUS_SIGNAL_DET |
5084 			       MAC_STATUS_CFG_CHANGED |
5085 			       MAC_STATUS_RCVD_CFG);
5086 		if (mac_status == (MAC_STATUS_PCS_SYNCED |
5087 				   MAC_STATUS_SIGNAL_DET)) {
5088 			tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5089 					    MAC_STATUS_CFG_CHANGED));
5090 			return 0;
5091 		}
5092 	}
5093 
5094 	tw32_f(MAC_TX_AUTO_NEG, 0);
5095 
5096 	tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
5097 	tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
5098 	tw32_f(MAC_MODE, tp->mac_mode);
5099 	udelay(40);
5100 
5101 	if (tp->phy_id == TG3_PHY_ID_BCM8002)
5102 		tg3_init_bcm8002(tp);
5103 
5104 	/* Enable link change event even when serdes polling.  */
5105 	tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5106 	udelay(40);
5107 
5108 	current_link_up = 0;
5109 	tp->link_config.rmt_adv = 0;
5110 	mac_status = tr32(MAC_STATUS);
5111 
5112 	if (tg3_flag(tp, HW_AUTONEG))
5113 		current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
5114 	else
5115 		current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
5116 
5117 	tp->napi[0].hw_status->status =
5118 		(SD_STATUS_UPDATED |
5119 		 (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG));
5120 
5121 	for (i = 0; i < 100; i++) {
5122 		tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5123 				    MAC_STATUS_CFG_CHANGED));
5124 		udelay(5);
5125 		if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
5126 					 MAC_STATUS_CFG_CHANGED |
5127 					 MAC_STATUS_LNKSTATE_CHANGED)) == 0)
5128 			break;
5129 	}
5130 
5131 	mac_status = tr32(MAC_STATUS);
5132 	if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
5133 		current_link_up = 0;
5134 		if (tp->link_config.autoneg == AUTONEG_ENABLE &&
5135 		    tp->serdes_counter == 0) {
5136 			tw32_f(MAC_MODE, (tp->mac_mode |
5137 					  MAC_MODE_SEND_CONFIGS));
5138 			udelay(1);
5139 			tw32_f(MAC_MODE, tp->mac_mode);
5140 		}
5141 	}
5142 
5143 	if (current_link_up == 1) {
5144 		tp->link_config.active_speed = SPEED_1000;
5145 		tp->link_config.active_duplex = DUPLEX_FULL;
5146 		tw32(MAC_LED_CTRL, (tp->led_ctrl |
5147 				    LED_CTRL_LNKLED_OVERRIDE |
5148 				    LED_CTRL_1000MBPS_ON));
5149 	} else {
5150 		tp->link_config.active_speed = SPEED_UNKNOWN;
5151 		tp->link_config.active_duplex = DUPLEX_UNKNOWN;
5152 		tw32(MAC_LED_CTRL, (tp->led_ctrl |
5153 				    LED_CTRL_LNKLED_OVERRIDE |
5154 				    LED_CTRL_TRAFFIC_OVERRIDE));
5155 	}
5156 
5157 	if (current_link_up != netif_carrier_ok(tp->dev)) {
5158 		if (current_link_up)
5159 			netif_carrier_on(tp->dev);
5160 		else
5161 			netif_carrier_off(tp->dev);
5162 		tg3_link_report(tp);
5163 	} else {
5164 		u32 now_pause_cfg = tp->link_config.active_flowctrl;
5165 		if (orig_pause_cfg != now_pause_cfg ||
5166 		    orig_active_speed != tp->link_config.active_speed ||
5167 		    orig_active_duplex != tp->link_config.active_duplex)
5168 			tg3_link_report(tp);
5169 	}
5170 
5171 	return 0;
5172 }
5173 
5174 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
5175 {
5176 	int current_link_up, err = 0;
5177 	u32 bmsr, bmcr;
5178 	u16 current_speed;
5179 	u8 current_duplex;
5180 	u32 local_adv, remote_adv;
5181 
5182 	tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5183 	tw32_f(MAC_MODE, tp->mac_mode);
5184 	udelay(40);
5185 
5186 	tw32(MAC_EVENT, 0);
5187 
5188 	tw32_f(MAC_STATUS,
5189 	     (MAC_STATUS_SYNC_CHANGED |
5190 	      MAC_STATUS_CFG_CHANGED |
5191 	      MAC_STATUS_MI_COMPLETION |
5192 	      MAC_STATUS_LNKSTATE_CHANGED));
5193 	udelay(40);
5194 
5195 	if (force_reset)
5196 		tg3_phy_reset(tp);
5197 
5198 	current_link_up = 0;
5199 	current_speed = SPEED_UNKNOWN;
5200 	current_duplex = DUPLEX_UNKNOWN;
5201 	tp->link_config.rmt_adv = 0;
5202 
5203 	err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5204 	err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5205 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
5206 		if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5207 			bmsr |= BMSR_LSTATUS;
5208 		else
5209 			bmsr &= ~BMSR_LSTATUS;
5210 	}
5211 
5212 	err |= tg3_readphy(tp, MII_BMCR, &bmcr);
5213 
5214 	if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
5215 	    (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
5216 		/* do nothing, just check for link up at the end */
5217 	} else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5218 		u32 adv, newadv;
5219 
5220 		err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5221 		newadv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
5222 				 ADVERTISE_1000XPAUSE |
5223 				 ADVERTISE_1000XPSE_ASYM |
5224 				 ADVERTISE_SLCT);
5225 
5226 		newadv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5227 		newadv |= ethtool_adv_to_mii_adv_x(tp->link_config.advertising);
5228 
5229 		if ((newadv != adv) || !(bmcr & BMCR_ANENABLE)) {
5230 			tg3_writephy(tp, MII_ADVERTISE, newadv);
5231 			bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
5232 			tg3_writephy(tp, MII_BMCR, bmcr);
5233 
5234 			tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5235 			tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
5236 			tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5237 
5238 			return err;
5239 		}
5240 	} else {
5241 		u32 new_bmcr;
5242 
5243 		bmcr &= ~BMCR_SPEED1000;
5244 		new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
5245 
5246 		if (tp->link_config.duplex == DUPLEX_FULL)
5247 			new_bmcr |= BMCR_FULLDPLX;
5248 
5249 		if (new_bmcr != bmcr) {
5250 			/* BMCR_SPEED1000 is a reserved bit that needs
5251 			 * to be set on write.
5252 			 */
5253 			new_bmcr |= BMCR_SPEED1000;
5254 
5255 			/* Force a linkdown */
5256 			if (netif_carrier_ok(tp->dev)) {
5257 				u32 adv;
5258 
5259 				err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5260 				adv &= ~(ADVERTISE_1000XFULL |
5261 					 ADVERTISE_1000XHALF |
5262 					 ADVERTISE_SLCT);
5263 				tg3_writephy(tp, MII_ADVERTISE, adv);
5264 				tg3_writephy(tp, MII_BMCR, bmcr |
5265 							   BMCR_ANRESTART |
5266 							   BMCR_ANENABLE);
5267 				udelay(10);
5268 				netif_carrier_off(tp->dev);
5269 			}
5270 			tg3_writephy(tp, MII_BMCR, new_bmcr);
5271 			bmcr = new_bmcr;
5272 			err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5273 			err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5274 			if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
5275 			    ASIC_REV_5714) {
5276 				if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5277 					bmsr |= BMSR_LSTATUS;
5278 				else
5279 					bmsr &= ~BMSR_LSTATUS;
5280 			}
5281 			tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5282 		}
5283 	}
5284 
5285 	if (bmsr & BMSR_LSTATUS) {
5286 		current_speed = SPEED_1000;
5287 		current_link_up = 1;
5288 		if (bmcr & BMCR_FULLDPLX)
5289 			current_duplex = DUPLEX_FULL;
5290 		else
5291 			current_duplex = DUPLEX_HALF;
5292 
5293 		local_adv = 0;
5294 		remote_adv = 0;
5295 
5296 		if (bmcr & BMCR_ANENABLE) {
5297 			u32 common;
5298 
5299 			err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
5300 			err |= tg3_readphy(tp, MII_LPA, &remote_adv);
5301 			common = local_adv & remote_adv;
5302 			if (common & (ADVERTISE_1000XHALF |
5303 				      ADVERTISE_1000XFULL)) {
5304 				if (common & ADVERTISE_1000XFULL)
5305 					current_duplex = DUPLEX_FULL;
5306 				else
5307 					current_duplex = DUPLEX_HALF;
5308 
5309 				tp->link_config.rmt_adv =
5310 					   mii_adv_to_ethtool_adv_x(remote_adv);
5311 			} else if (!tg3_flag(tp, 5780_CLASS)) {
5312 				/* Link is up via parallel detect */
5313 			} else {
5314 				current_link_up = 0;
5315 			}
5316 		}
5317 	}
5318 
5319 	if (current_link_up == 1 && current_duplex == DUPLEX_FULL)
5320 		tg3_setup_flow_control(tp, local_adv, remote_adv);
5321 
5322 	tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
5323 	if (tp->link_config.active_duplex == DUPLEX_HALF)
5324 		tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
5325 
5326 	tw32_f(MAC_MODE, tp->mac_mode);
5327 	udelay(40);
5328 
5329 	tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5330 
5331 	tp->link_config.active_speed = current_speed;
5332 	tp->link_config.active_duplex = current_duplex;
5333 
5334 	if (current_link_up != netif_carrier_ok(tp->dev)) {
5335 		if (current_link_up)
5336 			netif_carrier_on(tp->dev);
5337 		else {
5338 			netif_carrier_off(tp->dev);
5339 			tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5340 		}
5341 		tg3_link_report(tp);
5342 	}
5343 	return err;
5344 }
5345 
5346 static void tg3_serdes_parallel_detect(struct tg3 *tp)
5347 {
5348 	if (tp->serdes_counter) {
5349 		/* Give autoneg time to complete. */
5350 		tp->serdes_counter--;
5351 		return;
5352 	}
5353 
5354 	if (!netif_carrier_ok(tp->dev) &&
5355 	    (tp->link_config.autoneg == AUTONEG_ENABLE)) {
5356 		u32 bmcr;
5357 
5358 		tg3_readphy(tp, MII_BMCR, &bmcr);
5359 		if (bmcr & BMCR_ANENABLE) {
5360 			u32 phy1, phy2;
5361 
5362 			/* Select shadow register 0x1f */
5363 			tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x7c00);
5364 			tg3_readphy(tp, MII_TG3_MISC_SHDW, &phy1);
5365 
5366 			/* Select expansion interrupt status register */
5367 			tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
5368 					 MII_TG3_DSP_EXP1_INT_STAT);
5369 			tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5370 			tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5371 
5372 			if ((phy1 & 0x10) && !(phy2 & 0x20)) {
5373 				/* We have signal detect and not receiving
5374 				 * config code words, link is up by parallel
5375 				 * detection.
5376 				 */
5377 
5378 				bmcr &= ~BMCR_ANENABLE;
5379 				bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
5380 				tg3_writephy(tp, MII_BMCR, bmcr);
5381 				tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT;
5382 			}
5383 		}
5384 	} else if (netif_carrier_ok(tp->dev) &&
5385 		   (tp->link_config.autoneg == AUTONEG_ENABLE) &&
5386 		   (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
5387 		u32 phy2;
5388 
5389 		/* Select expansion interrupt status register */
5390 		tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
5391 				 MII_TG3_DSP_EXP1_INT_STAT);
5392 		tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5393 		if (phy2 & 0x20) {
5394 			u32 bmcr;
5395 
5396 			/* Config code words received, turn on autoneg. */
5397 			tg3_readphy(tp, MII_BMCR, &bmcr);
5398 			tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
5399 
5400 			tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5401 
5402 		}
5403 	}
5404 }
5405 
5406 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
5407 {
5408 	u32 val;
5409 	int err;
5410 
5411 	if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
5412 		err = tg3_setup_fiber_phy(tp, force_reset);
5413 	else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
5414 		err = tg3_setup_fiber_mii_phy(tp, force_reset);
5415 	else
5416 		err = tg3_setup_copper_phy(tp, force_reset);
5417 
5418 	if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
5419 		u32 scale;
5420 
5421 		val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
5422 		if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
5423 			scale = 65;
5424 		else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
5425 			scale = 6;
5426 		else
5427 			scale = 12;
5428 
5429 		val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
5430 		val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
5431 		tw32(GRC_MISC_CFG, val);
5432 	}
5433 
5434 	val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
5435 	      (6 << TX_LENGTHS_IPG_SHIFT);
5436 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
5437 		val |= tr32(MAC_TX_LENGTHS) &
5438 		       (TX_LENGTHS_JMB_FRM_LEN_MSK |
5439 			TX_LENGTHS_CNT_DWN_VAL_MSK);
5440 
5441 	if (tp->link_config.active_speed == SPEED_1000 &&
5442 	    tp->link_config.active_duplex == DUPLEX_HALF)
5443 		tw32(MAC_TX_LENGTHS, val |
5444 		     (0xff << TX_LENGTHS_SLOT_TIME_SHIFT));
5445 	else
5446 		tw32(MAC_TX_LENGTHS, val |
5447 		     (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
5448 
5449 	if (!tg3_flag(tp, 5705_PLUS)) {
5450 		if (netif_carrier_ok(tp->dev)) {
5451 			tw32(HOSTCC_STAT_COAL_TICKS,
5452 			     tp->coal.stats_block_coalesce_usecs);
5453 		} else {
5454 			tw32(HOSTCC_STAT_COAL_TICKS, 0);
5455 		}
5456 	}
5457 
5458 	if (tg3_flag(tp, ASPM_WORKAROUND)) {
5459 		val = tr32(PCIE_PWR_MGMT_THRESH);
5460 		if (!netif_carrier_ok(tp->dev))
5461 			val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
5462 			      tp->pwrmgmt_thresh;
5463 		else
5464 			val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
5465 		tw32(PCIE_PWR_MGMT_THRESH, val);
5466 	}
5467 
5468 	return err;
5469 }
5470 
5471 static inline int tg3_irq_sync(struct tg3 *tp)
5472 {
5473 	return tp->irq_sync;
5474 }
5475 
5476 static inline void tg3_rd32_loop(struct tg3 *tp, u32 *dst, u32 off, u32 len)
5477 {
5478 	int i;
5479 
5480 	dst = (u32 *)((u8 *)dst + off);
5481 	for (i = 0; i < len; i += sizeof(u32))
5482 		*dst++ = tr32(off + i);
5483 }
5484 
5485 static void tg3_dump_legacy_regs(struct tg3 *tp, u32 *regs)
5486 {
5487 	tg3_rd32_loop(tp, regs, TG3PCI_VENDOR, 0xb0);
5488 	tg3_rd32_loop(tp, regs, MAILBOX_INTERRUPT_0, 0x200);
5489 	tg3_rd32_loop(tp, regs, MAC_MODE, 0x4f0);
5490 	tg3_rd32_loop(tp, regs, SNDDATAI_MODE, 0xe0);
5491 	tg3_rd32_loop(tp, regs, SNDDATAC_MODE, 0x04);
5492 	tg3_rd32_loop(tp, regs, SNDBDS_MODE, 0x80);
5493 	tg3_rd32_loop(tp, regs, SNDBDI_MODE, 0x48);
5494 	tg3_rd32_loop(tp, regs, SNDBDC_MODE, 0x04);
5495 	tg3_rd32_loop(tp, regs, RCVLPC_MODE, 0x20);
5496 	tg3_rd32_loop(tp, regs, RCVLPC_SELLST_BASE, 0x15c);
5497 	tg3_rd32_loop(tp, regs, RCVDBDI_MODE, 0x0c);
5498 	tg3_rd32_loop(tp, regs, RCVDBDI_JUMBO_BD, 0x3c);
5499 	tg3_rd32_loop(tp, regs, RCVDBDI_BD_PROD_IDX_0, 0x44);
5500 	tg3_rd32_loop(tp, regs, RCVDCC_MODE, 0x04);
5501 	tg3_rd32_loop(tp, regs, RCVBDI_MODE, 0x20);
5502 	tg3_rd32_loop(tp, regs, RCVCC_MODE, 0x14);
5503 	tg3_rd32_loop(tp, regs, RCVLSC_MODE, 0x08);
5504 	tg3_rd32_loop(tp, regs, MBFREE_MODE, 0x08);
5505 	tg3_rd32_loop(tp, regs, HOSTCC_MODE, 0x100);
5506 
5507 	if (tg3_flag(tp, SUPPORT_MSIX))
5508 		tg3_rd32_loop(tp, regs, HOSTCC_RXCOL_TICKS_VEC1, 0x180);
5509 
5510 	tg3_rd32_loop(tp, regs, MEMARB_MODE, 0x10);
5511 	tg3_rd32_loop(tp, regs, BUFMGR_MODE, 0x58);
5512 	tg3_rd32_loop(tp, regs, RDMAC_MODE, 0x08);
5513 	tg3_rd32_loop(tp, regs, WDMAC_MODE, 0x08);
5514 	tg3_rd32_loop(tp, regs, RX_CPU_MODE, 0x04);
5515 	tg3_rd32_loop(tp, regs, RX_CPU_STATE, 0x04);
5516 	tg3_rd32_loop(tp, regs, RX_CPU_PGMCTR, 0x04);
5517 	tg3_rd32_loop(tp, regs, RX_CPU_HWBKPT, 0x04);
5518 
5519 	if (!tg3_flag(tp, 5705_PLUS)) {
5520 		tg3_rd32_loop(tp, regs, TX_CPU_MODE, 0x04);
5521 		tg3_rd32_loop(tp, regs, TX_CPU_STATE, 0x04);
5522 		tg3_rd32_loop(tp, regs, TX_CPU_PGMCTR, 0x04);
5523 	}
5524 
5525 	tg3_rd32_loop(tp, regs, GRCMBOX_INTERRUPT_0, 0x110);
5526 	tg3_rd32_loop(tp, regs, FTQ_RESET, 0x120);
5527 	tg3_rd32_loop(tp, regs, MSGINT_MODE, 0x0c);
5528 	tg3_rd32_loop(tp, regs, DMAC_MODE, 0x04);
5529 	tg3_rd32_loop(tp, regs, GRC_MODE, 0x4c);
5530 
5531 	if (tg3_flag(tp, NVRAM))
5532 		tg3_rd32_loop(tp, regs, NVRAM_CMD, 0x24);
5533 }
5534 
5535 static void tg3_dump_state(struct tg3 *tp)
5536 {
5537 	int i;
5538 	u32 *regs;
5539 
5540 	regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC);
5541 	if (!regs) {
5542 		netdev_err(tp->dev, "Failed allocating register dump buffer\n");
5543 		return;
5544 	}
5545 
5546 	if (tg3_flag(tp, PCI_EXPRESS)) {
5547 		/* Read up to but not including private PCI registers */
5548 		for (i = 0; i < TG3_PCIE_TLDLPL_PORT; i += sizeof(u32))
5549 			regs[i / sizeof(u32)] = tr32(i);
5550 	} else
5551 		tg3_dump_legacy_regs(tp, regs);
5552 
5553 	for (i = 0; i < TG3_REG_BLK_SIZE / sizeof(u32); i += 4) {
5554 		if (!regs[i + 0] && !regs[i + 1] &&
5555 		    !regs[i + 2] && !regs[i + 3])
5556 			continue;
5557 
5558 		netdev_err(tp->dev, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
5559 			   i * 4,
5560 			   regs[i + 0], regs[i + 1], regs[i + 2], regs[i + 3]);
5561 	}
5562 
5563 	kfree(regs);
5564 
5565 	for (i = 0; i < tp->irq_cnt; i++) {
5566 		struct tg3_napi *tnapi = &tp->napi[i];
5567 
5568 		/* SW status block */
5569 		netdev_err(tp->dev,
5570 			 "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
5571 			   i,
5572 			   tnapi->hw_status->status,
5573 			   tnapi->hw_status->status_tag,
5574 			   tnapi->hw_status->rx_jumbo_consumer,
5575 			   tnapi->hw_status->rx_consumer,
5576 			   tnapi->hw_status->rx_mini_consumer,
5577 			   tnapi->hw_status->idx[0].rx_producer,
5578 			   tnapi->hw_status->idx[0].tx_consumer);
5579 
5580 		netdev_err(tp->dev,
5581 		"%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n",
5582 			   i,
5583 			   tnapi->last_tag, tnapi->last_irq_tag,
5584 			   tnapi->tx_prod, tnapi->tx_cons, tnapi->tx_pending,
5585 			   tnapi->rx_rcb_ptr,
5586 			   tnapi->prodring.rx_std_prod_idx,
5587 			   tnapi->prodring.rx_std_cons_idx,
5588 			   tnapi->prodring.rx_jmb_prod_idx,
5589 			   tnapi->prodring.rx_jmb_cons_idx);
5590 	}
5591 }
5592 
5593 /* This is called whenever we suspect that the system chipset is re-
5594  * ordering the sequence of MMIO to the tx send mailbox. The symptom
5595  * is bogus tx completions. We try to recover by setting the
5596  * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
5597  * in the workqueue.
5598  */
5599 static void tg3_tx_recover(struct tg3 *tp)
5600 {
5601 	BUG_ON(tg3_flag(tp, MBOX_WRITE_REORDER) ||
5602 	       tp->write32_tx_mbox == tg3_write_indirect_mbox);
5603 
5604 	netdev_warn(tp->dev,
5605 		    "The system may be re-ordering memory-mapped I/O "
5606 		    "cycles to the network device, attempting to recover. "
5607 		    "Please report the problem to the driver maintainer "
5608 		    "and include system chipset information.\n");
5609 
5610 	spin_lock(&tp->lock);
5611 	tg3_flag_set(tp, TX_RECOVERY_PENDING);
5612 	spin_unlock(&tp->lock);
5613 }
5614 
5615 static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
5616 {
5617 	/* Tell compiler to fetch tx indices from memory. */
5618 	barrier();
5619 	return tnapi->tx_pending -
5620 	       ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1));
5621 }
5622 
5623 /* Tigon3 never reports partial packet sends.  So we do not
5624  * need special logic to handle SKBs that have not had all
5625  * of their frags sent yet, like SunGEM does.
5626  */
5627 static void tg3_tx(struct tg3_napi *tnapi)
5628 {
5629 	struct tg3 *tp = tnapi->tp;
5630 	u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer;
5631 	u32 sw_idx = tnapi->tx_cons;
5632 	struct netdev_queue *txq;
5633 	int index = tnapi - tp->napi;
5634 	unsigned int pkts_compl = 0, bytes_compl = 0;
5635 
5636 	if (tg3_flag(tp, ENABLE_TSS))
5637 		index--;
5638 
5639 	txq = netdev_get_tx_queue(tp->dev, index);
5640 
5641 	while (sw_idx != hw_idx) {
5642 		struct tg3_tx_ring_info *ri = &tnapi->tx_buffers[sw_idx];
5643 		struct sk_buff *skb = ri->skb;
5644 		int i, tx_bug = 0;
5645 
5646 		if (unlikely(skb == NULL)) {
5647 			tg3_tx_recover(tp);
5648 			return;
5649 		}
5650 
5651 		pci_unmap_single(tp->pdev,
5652 				 dma_unmap_addr(ri, mapping),
5653 				 skb_headlen(skb),
5654 				 PCI_DMA_TODEVICE);
5655 
5656 		ri->skb = NULL;
5657 
5658 		while (ri->fragmented) {
5659 			ri->fragmented = false;
5660 			sw_idx = NEXT_TX(sw_idx);
5661 			ri = &tnapi->tx_buffers[sw_idx];
5662 		}
5663 
5664 		sw_idx = NEXT_TX(sw_idx);
5665 
5666 		for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
5667 			ri = &tnapi->tx_buffers[sw_idx];
5668 			if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
5669 				tx_bug = 1;
5670 
5671 			pci_unmap_page(tp->pdev,
5672 				       dma_unmap_addr(ri, mapping),
5673 				       skb_frag_size(&skb_shinfo(skb)->frags[i]),
5674 				       PCI_DMA_TODEVICE);
5675 
5676 			while (ri->fragmented) {
5677 				ri->fragmented = false;
5678 				sw_idx = NEXT_TX(sw_idx);
5679 				ri = &tnapi->tx_buffers[sw_idx];
5680 			}
5681 
5682 			sw_idx = NEXT_TX(sw_idx);
5683 		}
5684 
5685 		pkts_compl++;
5686 		bytes_compl += skb->len;
5687 
5688 		dev_kfree_skb(skb);
5689 
5690 		if (unlikely(tx_bug)) {
5691 			tg3_tx_recover(tp);
5692 			return;
5693 		}
5694 	}
5695 
5696 	netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
5697 
5698 	tnapi->tx_cons = sw_idx;
5699 
5700 	/* Need to make the tx_cons update visible to tg3_start_xmit()
5701 	 * before checking for netif_queue_stopped().  Without the
5702 	 * memory barrier, there is a small possibility that tg3_start_xmit()
5703 	 * will miss it and cause the queue to be stopped forever.
5704 	 */
5705 	smp_mb();
5706 
5707 	if (unlikely(netif_tx_queue_stopped(txq) &&
5708 		     (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) {
5709 		__netif_tx_lock(txq, smp_processor_id());
5710 		if (netif_tx_queue_stopped(txq) &&
5711 		    (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))
5712 			netif_tx_wake_queue(txq);
5713 		__netif_tx_unlock(txq);
5714 	}
5715 }
5716 
5717 static void tg3_frag_free(bool is_frag, void *data)
5718 {
5719 	if (is_frag)
5720 		put_page(virt_to_head_page(data));
5721 	else
5722 		kfree(data);
5723 }
5724 
5725 static void tg3_rx_data_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
5726 {
5727 	unsigned int skb_size = SKB_DATA_ALIGN(map_sz + TG3_RX_OFFSET(tp)) +
5728 		   SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
5729 
5730 	if (!ri->data)
5731 		return;
5732 
5733 	pci_unmap_single(tp->pdev, dma_unmap_addr(ri, mapping),
5734 			 map_sz, PCI_DMA_FROMDEVICE);
5735 	tg3_frag_free(skb_size <= PAGE_SIZE, ri->data);
5736 	ri->data = NULL;
5737 }
5738 
5739 
5740 /* Returns size of skb allocated or < 0 on error.
5741  *
5742  * We only need to fill in the address because the other members
5743  * of the RX descriptor are invariant, see tg3_init_rings.
5744  *
5745  * Note the purposeful assymetry of cpu vs. chip accesses.  For
5746  * posting buffers we only dirty the first cache line of the RX
5747  * descriptor (containing the address).  Whereas for the RX status
5748  * buffers the cpu only reads the last cacheline of the RX descriptor
5749  * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
5750  */
5751 static int tg3_alloc_rx_data(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
5752 			     u32 opaque_key, u32 dest_idx_unmasked,
5753 			     unsigned int *frag_size)
5754 {
5755 	struct tg3_rx_buffer_desc *desc;
5756 	struct ring_info *map;
5757 	u8 *data;
5758 	dma_addr_t mapping;
5759 	int skb_size, data_size, dest_idx;
5760 
5761 	switch (opaque_key) {
5762 	case RXD_OPAQUE_RING_STD:
5763 		dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
5764 		desc = &tpr->rx_std[dest_idx];
5765 		map = &tpr->rx_std_buffers[dest_idx];
5766 		data_size = tp->rx_pkt_map_sz;
5767 		break;
5768 
5769 	case RXD_OPAQUE_RING_JUMBO:
5770 		dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
5771 		desc = &tpr->rx_jmb[dest_idx].std;
5772 		map = &tpr->rx_jmb_buffers[dest_idx];
5773 		data_size = TG3_RX_JMB_MAP_SZ;
5774 		break;
5775 
5776 	default:
5777 		return -EINVAL;
5778 	}
5779 
5780 	/* Do not overwrite any of the map or rp information
5781 	 * until we are sure we can commit to a new buffer.
5782 	 *
5783 	 * Callers depend upon this behavior and assume that
5784 	 * we leave everything unchanged if we fail.
5785 	 */
5786 	skb_size = SKB_DATA_ALIGN(data_size + TG3_RX_OFFSET(tp)) +
5787 		   SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
5788 	if (skb_size <= PAGE_SIZE) {
5789 		data = netdev_alloc_frag(skb_size);
5790 		*frag_size = skb_size;
5791 	} else {
5792 		data = kmalloc(skb_size, GFP_ATOMIC);
5793 		*frag_size = 0;
5794 	}
5795 	if (!data)
5796 		return -ENOMEM;
5797 
5798 	mapping = pci_map_single(tp->pdev,
5799 				 data + TG3_RX_OFFSET(tp),
5800 				 data_size,
5801 				 PCI_DMA_FROMDEVICE);
5802 	if (unlikely(pci_dma_mapping_error(tp->pdev, mapping))) {
5803 		tg3_frag_free(skb_size <= PAGE_SIZE, data);
5804 		return -EIO;
5805 	}
5806 
5807 	map->data = data;
5808 	dma_unmap_addr_set(map, mapping, mapping);
5809 
5810 	desc->addr_hi = ((u64)mapping >> 32);
5811 	desc->addr_lo = ((u64)mapping & 0xffffffff);
5812 
5813 	return data_size;
5814 }
5815 
5816 /* We only need to move over in the address because the other
5817  * members of the RX descriptor are invariant.  See notes above
5818  * tg3_alloc_rx_data for full details.
5819  */
5820 static void tg3_recycle_rx(struct tg3_napi *tnapi,
5821 			   struct tg3_rx_prodring_set *dpr,
5822 			   u32 opaque_key, int src_idx,
5823 			   u32 dest_idx_unmasked)
5824 {
5825 	struct tg3 *tp = tnapi->tp;
5826 	struct tg3_rx_buffer_desc *src_desc, *dest_desc;
5827 	struct ring_info *src_map, *dest_map;
5828 	struct tg3_rx_prodring_set *spr = &tp->napi[0].prodring;
5829 	int dest_idx;
5830 
5831 	switch (opaque_key) {
5832 	case RXD_OPAQUE_RING_STD:
5833 		dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
5834 		dest_desc = &dpr->rx_std[dest_idx];
5835 		dest_map = &dpr->rx_std_buffers[dest_idx];
5836 		src_desc = &spr->rx_std[src_idx];
5837 		src_map = &spr->rx_std_buffers[src_idx];
5838 		break;
5839 
5840 	case RXD_OPAQUE_RING_JUMBO:
5841 		dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
5842 		dest_desc = &dpr->rx_jmb[dest_idx].std;
5843 		dest_map = &dpr->rx_jmb_buffers[dest_idx];
5844 		src_desc = &spr->rx_jmb[src_idx].std;
5845 		src_map = &spr->rx_jmb_buffers[src_idx];
5846 		break;
5847 
5848 	default:
5849 		return;
5850 	}
5851 
5852 	dest_map->data = src_map->data;
5853 	dma_unmap_addr_set(dest_map, mapping,
5854 			   dma_unmap_addr(src_map, mapping));
5855 	dest_desc->addr_hi = src_desc->addr_hi;
5856 	dest_desc->addr_lo = src_desc->addr_lo;
5857 
5858 	/* Ensure that the update to the skb happens after the physical
5859 	 * addresses have been transferred to the new BD location.
5860 	 */
5861 	smp_wmb();
5862 
5863 	src_map->data = NULL;
5864 }
5865 
5866 /* The RX ring scheme is composed of multiple rings which post fresh
5867  * buffers to the chip, and one special ring the chip uses to report
5868  * status back to the host.
5869  *
5870  * The special ring reports the status of received packets to the
5871  * host.  The chip does not write into the original descriptor the
5872  * RX buffer was obtained from.  The chip simply takes the original
5873  * descriptor as provided by the host, updates the status and length
5874  * field, then writes this into the next status ring entry.
5875  *
5876  * Each ring the host uses to post buffers to the chip is described
5877  * by a TG3_BDINFO entry in the chips SRAM area.  When a packet arrives,
5878  * it is first placed into the on-chip ram.  When the packet's length
5879  * is known, it walks down the TG3_BDINFO entries to select the ring.
5880  * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
5881  * which is within the range of the new packet's length is chosen.
5882  *
5883  * The "separate ring for rx status" scheme may sound queer, but it makes
5884  * sense from a cache coherency perspective.  If only the host writes
5885  * to the buffer post rings, and only the chip writes to the rx status
5886  * rings, then cache lines never move beyond shared-modified state.
5887  * If both the host and chip were to write into the same ring, cache line
5888  * eviction could occur since both entities want it in an exclusive state.
5889  */
5890 static int tg3_rx(struct tg3_napi *tnapi, int budget)
5891 {
5892 	struct tg3 *tp = tnapi->tp;
5893 	u32 work_mask, rx_std_posted = 0;
5894 	u32 std_prod_idx, jmb_prod_idx;
5895 	u32 sw_idx = tnapi->rx_rcb_ptr;
5896 	u16 hw_idx;
5897 	int received;
5898 	struct tg3_rx_prodring_set *tpr = &tnapi->prodring;
5899 
5900 	hw_idx = *(tnapi->rx_rcb_prod_idx);
5901 	/*
5902 	 * We need to order the read of hw_idx and the read of
5903 	 * the opaque cookie.
5904 	 */
5905 	rmb();
5906 	work_mask = 0;
5907 	received = 0;
5908 	std_prod_idx = tpr->rx_std_prod_idx;
5909 	jmb_prod_idx = tpr->rx_jmb_prod_idx;
5910 	while (sw_idx != hw_idx && budget > 0) {
5911 		struct ring_info *ri;
5912 		struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
5913 		unsigned int len;
5914 		struct sk_buff *skb;
5915 		dma_addr_t dma_addr;
5916 		u32 opaque_key, desc_idx, *post_ptr;
5917 		u8 *data;
5918 
5919 		desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
5920 		opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
5921 		if (opaque_key == RXD_OPAQUE_RING_STD) {
5922 			ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx];
5923 			dma_addr = dma_unmap_addr(ri, mapping);
5924 			data = ri->data;
5925 			post_ptr = &std_prod_idx;
5926 			rx_std_posted++;
5927 		} else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
5928 			ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx];
5929 			dma_addr = dma_unmap_addr(ri, mapping);
5930 			data = ri->data;
5931 			post_ptr = &jmb_prod_idx;
5932 		} else
5933 			goto next_pkt_nopost;
5934 
5935 		work_mask |= opaque_key;
5936 
5937 		if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
5938 		    (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
5939 		drop_it:
5940 			tg3_recycle_rx(tnapi, tpr, opaque_key,
5941 				       desc_idx, *post_ptr);
5942 		drop_it_no_recycle:
5943 			/* Other statistics kept track of by card. */
5944 			tp->rx_dropped++;
5945 			goto next_pkt;
5946 		}
5947 
5948 		prefetch(data + TG3_RX_OFFSET(tp));
5949 		len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
5950 		      ETH_FCS_LEN;
5951 
5952 		if (len > TG3_RX_COPY_THRESH(tp)) {
5953 			int skb_size;
5954 			unsigned int frag_size;
5955 
5956 			skb_size = tg3_alloc_rx_data(tp, tpr, opaque_key,
5957 						    *post_ptr, &frag_size);
5958 			if (skb_size < 0)
5959 				goto drop_it;
5960 
5961 			pci_unmap_single(tp->pdev, dma_addr, skb_size,
5962 					 PCI_DMA_FROMDEVICE);
5963 
5964 			skb = build_skb(data, frag_size);
5965 			if (!skb) {
5966 				tg3_frag_free(frag_size != 0, data);
5967 				goto drop_it_no_recycle;
5968 			}
5969 			skb_reserve(skb, TG3_RX_OFFSET(tp));
5970 			/* Ensure that the update to the data happens
5971 			 * after the usage of the old DMA mapping.
5972 			 */
5973 			smp_wmb();
5974 
5975 			ri->data = NULL;
5976 
5977 		} else {
5978 			tg3_recycle_rx(tnapi, tpr, opaque_key,
5979 				       desc_idx, *post_ptr);
5980 
5981 			skb = netdev_alloc_skb(tp->dev,
5982 					       len + TG3_RAW_IP_ALIGN);
5983 			if (skb == NULL)
5984 				goto drop_it_no_recycle;
5985 
5986 			skb_reserve(skb, TG3_RAW_IP_ALIGN);
5987 			pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
5988 			memcpy(skb->data,
5989 			       data + TG3_RX_OFFSET(tp),
5990 			       len);
5991 			pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
5992 		}
5993 
5994 		skb_put(skb, len);
5995 		if ((tp->dev->features & NETIF_F_RXCSUM) &&
5996 		    (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
5997 		    (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
5998 		      >> RXD_TCPCSUM_SHIFT) == 0xffff))
5999 			skb->ip_summed = CHECKSUM_UNNECESSARY;
6000 		else
6001 			skb_checksum_none_assert(skb);
6002 
6003 		skb->protocol = eth_type_trans(skb, tp->dev);
6004 
6005 		if (len > (tp->dev->mtu + ETH_HLEN) &&
6006 		    skb->protocol != htons(ETH_P_8021Q)) {
6007 			dev_kfree_skb(skb);
6008 			goto drop_it_no_recycle;
6009 		}
6010 
6011 		if (desc->type_flags & RXD_FLAG_VLAN &&
6012 		    !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG))
6013 			__vlan_hwaccel_put_tag(skb,
6014 					       desc->err_vlan & RXD_VLAN_MASK);
6015 
6016 		napi_gro_receive(&tnapi->napi, skb);
6017 
6018 		received++;
6019 		budget--;
6020 
6021 next_pkt:
6022 		(*post_ptr)++;
6023 
6024 		if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
6025 			tpr->rx_std_prod_idx = std_prod_idx &
6026 					       tp->rx_std_ring_mask;
6027 			tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6028 				     tpr->rx_std_prod_idx);
6029 			work_mask &= ~RXD_OPAQUE_RING_STD;
6030 			rx_std_posted = 0;
6031 		}
6032 next_pkt_nopost:
6033 		sw_idx++;
6034 		sw_idx &= tp->rx_ret_ring_mask;
6035 
6036 		/* Refresh hw_idx to see if there is new work */
6037 		if (sw_idx == hw_idx) {
6038 			hw_idx = *(tnapi->rx_rcb_prod_idx);
6039 			rmb();
6040 		}
6041 	}
6042 
6043 	/* ACK the status ring. */
6044 	tnapi->rx_rcb_ptr = sw_idx;
6045 	tw32_rx_mbox(tnapi->consmbox, sw_idx);
6046 
6047 	/* Refill RX ring(s). */
6048 	if (!tg3_flag(tp, ENABLE_RSS)) {
6049 		/* Sync BD data before updating mailbox */
6050 		wmb();
6051 
6052 		if (work_mask & RXD_OPAQUE_RING_STD) {
6053 			tpr->rx_std_prod_idx = std_prod_idx &
6054 					       tp->rx_std_ring_mask;
6055 			tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6056 				     tpr->rx_std_prod_idx);
6057 		}
6058 		if (work_mask & RXD_OPAQUE_RING_JUMBO) {
6059 			tpr->rx_jmb_prod_idx = jmb_prod_idx &
6060 					       tp->rx_jmb_ring_mask;
6061 			tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
6062 				     tpr->rx_jmb_prod_idx);
6063 		}
6064 		mmiowb();
6065 	} else if (work_mask) {
6066 		/* rx_std_buffers[] and rx_jmb_buffers[] entries must be
6067 		 * updated before the producer indices can be updated.
6068 		 */
6069 		smp_wmb();
6070 
6071 		tpr->rx_std_prod_idx = std_prod_idx & tp->rx_std_ring_mask;
6072 		tpr->rx_jmb_prod_idx = jmb_prod_idx & tp->rx_jmb_ring_mask;
6073 
6074 		if (tnapi != &tp->napi[1]) {
6075 			tp->rx_refill = true;
6076 			napi_schedule(&tp->napi[1].napi);
6077 		}
6078 	}
6079 
6080 	return received;
6081 }
6082 
6083 static void tg3_poll_link(struct tg3 *tp)
6084 {
6085 	/* handle link change and other phy events */
6086 	if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
6087 		struct tg3_hw_status *sblk = tp->napi[0].hw_status;
6088 
6089 		if (sblk->status & SD_STATUS_LINK_CHG) {
6090 			sblk->status = SD_STATUS_UPDATED |
6091 				       (sblk->status & ~SD_STATUS_LINK_CHG);
6092 			spin_lock(&tp->lock);
6093 			if (tg3_flag(tp, USE_PHYLIB)) {
6094 				tw32_f(MAC_STATUS,
6095 				     (MAC_STATUS_SYNC_CHANGED |
6096 				      MAC_STATUS_CFG_CHANGED |
6097 				      MAC_STATUS_MI_COMPLETION |
6098 				      MAC_STATUS_LNKSTATE_CHANGED));
6099 				udelay(40);
6100 			} else
6101 				tg3_setup_phy(tp, 0);
6102 			spin_unlock(&tp->lock);
6103 		}
6104 	}
6105 }
6106 
6107 static int tg3_rx_prodring_xfer(struct tg3 *tp,
6108 				struct tg3_rx_prodring_set *dpr,
6109 				struct tg3_rx_prodring_set *spr)
6110 {
6111 	u32 si, di, cpycnt, src_prod_idx;
6112 	int i, err = 0;
6113 
6114 	while (1) {
6115 		src_prod_idx = spr->rx_std_prod_idx;
6116 
6117 		/* Make sure updates to the rx_std_buffers[] entries and the
6118 		 * standard producer index are seen in the correct order.
6119 		 */
6120 		smp_rmb();
6121 
6122 		if (spr->rx_std_cons_idx == src_prod_idx)
6123 			break;
6124 
6125 		if (spr->rx_std_cons_idx < src_prod_idx)
6126 			cpycnt = src_prod_idx - spr->rx_std_cons_idx;
6127 		else
6128 			cpycnt = tp->rx_std_ring_mask + 1 -
6129 				 spr->rx_std_cons_idx;
6130 
6131 		cpycnt = min(cpycnt,
6132 			     tp->rx_std_ring_mask + 1 - dpr->rx_std_prod_idx);
6133 
6134 		si = spr->rx_std_cons_idx;
6135 		di = dpr->rx_std_prod_idx;
6136 
6137 		for (i = di; i < di + cpycnt; i++) {
6138 			if (dpr->rx_std_buffers[i].data) {
6139 				cpycnt = i - di;
6140 				err = -ENOSPC;
6141 				break;
6142 			}
6143 		}
6144 
6145 		if (!cpycnt)
6146 			break;
6147 
6148 		/* Ensure that updates to the rx_std_buffers ring and the
6149 		 * shadowed hardware producer ring from tg3_recycle_skb() are
6150 		 * ordered correctly WRT the skb check above.
6151 		 */
6152 		smp_rmb();
6153 
6154 		memcpy(&dpr->rx_std_buffers[di],
6155 		       &spr->rx_std_buffers[si],
6156 		       cpycnt * sizeof(struct ring_info));
6157 
6158 		for (i = 0; i < cpycnt; i++, di++, si++) {
6159 			struct tg3_rx_buffer_desc *sbd, *dbd;
6160 			sbd = &spr->rx_std[si];
6161 			dbd = &dpr->rx_std[di];
6162 			dbd->addr_hi = sbd->addr_hi;
6163 			dbd->addr_lo = sbd->addr_lo;
6164 		}
6165 
6166 		spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) &
6167 				       tp->rx_std_ring_mask;
6168 		dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) &
6169 				       tp->rx_std_ring_mask;
6170 	}
6171 
6172 	while (1) {
6173 		src_prod_idx = spr->rx_jmb_prod_idx;
6174 
6175 		/* Make sure updates to the rx_jmb_buffers[] entries and
6176 		 * the jumbo producer index are seen in the correct order.
6177 		 */
6178 		smp_rmb();
6179 
6180 		if (spr->rx_jmb_cons_idx == src_prod_idx)
6181 			break;
6182 
6183 		if (spr->rx_jmb_cons_idx < src_prod_idx)
6184 			cpycnt = src_prod_idx - spr->rx_jmb_cons_idx;
6185 		else
6186 			cpycnt = tp->rx_jmb_ring_mask + 1 -
6187 				 spr->rx_jmb_cons_idx;
6188 
6189 		cpycnt = min(cpycnt,
6190 			     tp->rx_jmb_ring_mask + 1 - dpr->rx_jmb_prod_idx);
6191 
6192 		si = spr->rx_jmb_cons_idx;
6193 		di = dpr->rx_jmb_prod_idx;
6194 
6195 		for (i = di; i < di + cpycnt; i++) {
6196 			if (dpr->rx_jmb_buffers[i].data) {
6197 				cpycnt = i - di;
6198 				err = -ENOSPC;
6199 				break;
6200 			}
6201 		}
6202 
6203 		if (!cpycnt)
6204 			break;
6205 
6206 		/* Ensure that updates to the rx_jmb_buffers ring and the
6207 		 * shadowed hardware producer ring from tg3_recycle_skb() are
6208 		 * ordered correctly WRT the skb check above.
6209 		 */
6210 		smp_rmb();
6211 
6212 		memcpy(&dpr->rx_jmb_buffers[di],
6213 		       &spr->rx_jmb_buffers[si],
6214 		       cpycnt * sizeof(struct ring_info));
6215 
6216 		for (i = 0; i < cpycnt; i++, di++, si++) {
6217 			struct tg3_rx_buffer_desc *sbd, *dbd;
6218 			sbd = &spr->rx_jmb[si].std;
6219 			dbd = &dpr->rx_jmb[di].std;
6220 			dbd->addr_hi = sbd->addr_hi;
6221 			dbd->addr_lo = sbd->addr_lo;
6222 		}
6223 
6224 		spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) &
6225 				       tp->rx_jmb_ring_mask;
6226 		dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) &
6227 				       tp->rx_jmb_ring_mask;
6228 	}
6229 
6230 	return err;
6231 }
6232 
6233 static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
6234 {
6235 	struct tg3 *tp = tnapi->tp;
6236 
6237 	/* run TX completion thread */
6238 	if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
6239 		tg3_tx(tnapi);
6240 		if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
6241 			return work_done;
6242 	}
6243 
6244 	if (!tnapi->rx_rcb_prod_idx)
6245 		return work_done;
6246 
6247 	/* run RX thread, within the bounds set by NAPI.
6248 	 * All RX "locking" is done by ensuring outside
6249 	 * code synchronizes with tg3->napi.poll()
6250 	 */
6251 	if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
6252 		work_done += tg3_rx(tnapi, budget - work_done);
6253 
6254 	if (tg3_flag(tp, ENABLE_RSS) && tnapi == &tp->napi[1]) {
6255 		struct tg3_rx_prodring_set *dpr = &tp->napi[0].prodring;
6256 		int i, err = 0;
6257 		u32 std_prod_idx = dpr->rx_std_prod_idx;
6258 		u32 jmb_prod_idx = dpr->rx_jmb_prod_idx;
6259 
6260 		tp->rx_refill = false;
6261 		for (i = 1; i < tp->irq_cnt; i++)
6262 			err |= tg3_rx_prodring_xfer(tp, dpr,
6263 						    &tp->napi[i].prodring);
6264 
6265 		wmb();
6266 
6267 		if (std_prod_idx != dpr->rx_std_prod_idx)
6268 			tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6269 				     dpr->rx_std_prod_idx);
6270 
6271 		if (jmb_prod_idx != dpr->rx_jmb_prod_idx)
6272 			tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
6273 				     dpr->rx_jmb_prod_idx);
6274 
6275 		mmiowb();
6276 
6277 		if (err)
6278 			tw32_f(HOSTCC_MODE, tp->coal_now);
6279 	}
6280 
6281 	return work_done;
6282 }
6283 
6284 static inline void tg3_reset_task_schedule(struct tg3 *tp)
6285 {
6286 	if (!test_and_set_bit(TG3_FLAG_RESET_TASK_PENDING, tp->tg3_flags))
6287 		schedule_work(&tp->reset_task);
6288 }
6289 
6290 static inline void tg3_reset_task_cancel(struct tg3 *tp)
6291 {
6292 	cancel_work_sync(&tp->reset_task);
6293 	tg3_flag_clear(tp, RESET_TASK_PENDING);
6294 	tg3_flag_clear(tp, TX_RECOVERY_PENDING);
6295 }
6296 
6297 static int tg3_poll_msix(struct napi_struct *napi, int budget)
6298 {
6299 	struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
6300 	struct tg3 *tp = tnapi->tp;
6301 	int work_done = 0;
6302 	struct tg3_hw_status *sblk = tnapi->hw_status;
6303 
6304 	while (1) {
6305 		work_done = tg3_poll_work(tnapi, work_done, budget);
6306 
6307 		if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
6308 			goto tx_recovery;
6309 
6310 		if (unlikely(work_done >= budget))
6311 			break;
6312 
6313 		/* tp->last_tag is used in tg3_int_reenable() below
6314 		 * to tell the hw how much work has been processed,
6315 		 * so we must read it before checking for more work.
6316 		 */
6317 		tnapi->last_tag = sblk->status_tag;
6318 		tnapi->last_irq_tag = tnapi->last_tag;
6319 		rmb();
6320 
6321 		/* check for RX/TX work to do */
6322 		if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons &&
6323 			   *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) {
6324 
6325 			/* This test here is not race free, but will reduce
6326 			 * the number of interrupts by looping again.
6327 			 */
6328 			if (tnapi == &tp->napi[1] && tp->rx_refill)
6329 				continue;
6330 
6331 			napi_complete(napi);
6332 			/* Reenable interrupts. */
6333 			tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
6334 
6335 			/* This test here is synchronized by napi_schedule()
6336 			 * and napi_complete() to close the race condition.
6337 			 */
6338 			if (unlikely(tnapi == &tp->napi[1] && tp->rx_refill)) {
6339 				tw32(HOSTCC_MODE, tp->coalesce_mode |
6340 						  HOSTCC_MODE_ENABLE |
6341 						  tnapi->coal_now);
6342 			}
6343 			mmiowb();
6344 			break;
6345 		}
6346 	}
6347 
6348 	return work_done;
6349 
6350 tx_recovery:
6351 	/* work_done is guaranteed to be less than budget. */
6352 	napi_complete(napi);
6353 	tg3_reset_task_schedule(tp);
6354 	return work_done;
6355 }
6356 
6357 static void tg3_process_error(struct tg3 *tp)
6358 {
6359 	u32 val;
6360 	bool real_error = false;
6361 
6362 	if (tg3_flag(tp, ERROR_PROCESSED))
6363 		return;
6364 
6365 	/* Check Flow Attention register */
6366 	val = tr32(HOSTCC_FLOW_ATTN);
6367 	if (val & ~HOSTCC_FLOW_ATTN_MBUF_LWM) {
6368 		netdev_err(tp->dev, "FLOW Attention error.  Resetting chip.\n");
6369 		real_error = true;
6370 	}
6371 
6372 	if (tr32(MSGINT_STATUS) & ~MSGINT_STATUS_MSI_REQ) {
6373 		netdev_err(tp->dev, "MSI Status error.  Resetting chip.\n");
6374 		real_error = true;
6375 	}
6376 
6377 	if (tr32(RDMAC_STATUS) || tr32(WDMAC_STATUS)) {
6378 		netdev_err(tp->dev, "DMA Status error.  Resetting chip.\n");
6379 		real_error = true;
6380 	}
6381 
6382 	if (!real_error)
6383 		return;
6384 
6385 	tg3_dump_state(tp);
6386 
6387 	tg3_flag_set(tp, ERROR_PROCESSED);
6388 	tg3_reset_task_schedule(tp);
6389 }
6390 
6391 static int tg3_poll(struct napi_struct *napi, int budget)
6392 {
6393 	struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
6394 	struct tg3 *tp = tnapi->tp;
6395 	int work_done = 0;
6396 	struct tg3_hw_status *sblk = tnapi->hw_status;
6397 
6398 	while (1) {
6399 		if (sblk->status & SD_STATUS_ERROR)
6400 			tg3_process_error(tp);
6401 
6402 		tg3_poll_link(tp);
6403 
6404 		work_done = tg3_poll_work(tnapi, work_done, budget);
6405 
6406 		if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
6407 			goto tx_recovery;
6408 
6409 		if (unlikely(work_done >= budget))
6410 			break;
6411 
6412 		if (tg3_flag(tp, TAGGED_STATUS)) {
6413 			/* tp->last_tag is used in tg3_int_reenable() below
6414 			 * to tell the hw how much work has been processed,
6415 			 * so we must read it before checking for more work.
6416 			 */
6417 			tnapi->last_tag = sblk->status_tag;
6418 			tnapi->last_irq_tag = tnapi->last_tag;
6419 			rmb();
6420 		} else
6421 			sblk->status &= ~SD_STATUS_UPDATED;
6422 
6423 		if (likely(!tg3_has_work(tnapi))) {
6424 			napi_complete(napi);
6425 			tg3_int_reenable(tnapi);
6426 			break;
6427 		}
6428 	}
6429 
6430 	return work_done;
6431 
6432 tx_recovery:
6433 	/* work_done is guaranteed to be less than budget. */
6434 	napi_complete(napi);
6435 	tg3_reset_task_schedule(tp);
6436 	return work_done;
6437 }
6438 
6439 static void tg3_napi_disable(struct tg3 *tp)
6440 {
6441 	int i;
6442 
6443 	for (i = tp->irq_cnt - 1; i >= 0; i--)
6444 		napi_disable(&tp->napi[i].napi);
6445 }
6446 
6447 static void tg3_napi_enable(struct tg3 *tp)
6448 {
6449 	int i;
6450 
6451 	for (i = 0; i < tp->irq_cnt; i++)
6452 		napi_enable(&tp->napi[i].napi);
6453 }
6454 
6455 static void tg3_napi_init(struct tg3 *tp)
6456 {
6457 	int i;
6458 
6459 	netif_napi_add(tp->dev, &tp->napi[0].napi, tg3_poll, 64);
6460 	for (i = 1; i < tp->irq_cnt; i++)
6461 		netif_napi_add(tp->dev, &tp->napi[i].napi, tg3_poll_msix, 64);
6462 }
6463 
6464 static void tg3_napi_fini(struct tg3 *tp)
6465 {
6466 	int i;
6467 
6468 	for (i = 0; i < tp->irq_cnt; i++)
6469 		netif_napi_del(&tp->napi[i].napi);
6470 }
6471 
6472 static inline void tg3_netif_stop(struct tg3 *tp)
6473 {
6474 	tp->dev->trans_start = jiffies;	/* prevent tx timeout */
6475 	tg3_napi_disable(tp);
6476 	netif_tx_disable(tp->dev);
6477 }
6478 
6479 static inline void tg3_netif_start(struct tg3 *tp)
6480 {
6481 	/* NOTE: unconditional netif_tx_wake_all_queues is only
6482 	 * appropriate so long as all callers are assured to
6483 	 * have free tx slots (such as after tg3_init_hw)
6484 	 */
6485 	netif_tx_wake_all_queues(tp->dev);
6486 
6487 	tg3_napi_enable(tp);
6488 	tp->napi[0].hw_status->status |= SD_STATUS_UPDATED;
6489 	tg3_enable_ints(tp);
6490 }
6491 
6492 static void tg3_irq_quiesce(struct tg3 *tp)
6493 {
6494 	int i;
6495 
6496 	BUG_ON(tp->irq_sync);
6497 
6498 	tp->irq_sync = 1;
6499 	smp_mb();
6500 
6501 	for (i = 0; i < tp->irq_cnt; i++)
6502 		synchronize_irq(tp->napi[i].irq_vec);
6503 }
6504 
6505 /* Fully shutdown all tg3 driver activity elsewhere in the system.
6506  * If irq_sync is non-zero, then the IRQ handler must be synchronized
6507  * with as well.  Most of the time, this is not necessary except when
6508  * shutting down the device.
6509  */
6510 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
6511 {
6512 	spin_lock_bh(&tp->lock);
6513 	if (irq_sync)
6514 		tg3_irq_quiesce(tp);
6515 }
6516 
6517 static inline void tg3_full_unlock(struct tg3 *tp)
6518 {
6519 	spin_unlock_bh(&tp->lock);
6520 }
6521 
6522 /* One-shot MSI handler - Chip automatically disables interrupt
6523  * after sending MSI so driver doesn't have to do it.
6524  */
6525 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
6526 {
6527 	struct tg3_napi *tnapi = dev_id;
6528 	struct tg3 *tp = tnapi->tp;
6529 
6530 	prefetch(tnapi->hw_status);
6531 	if (tnapi->rx_rcb)
6532 		prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
6533 
6534 	if (likely(!tg3_irq_sync(tp)))
6535 		napi_schedule(&tnapi->napi);
6536 
6537 	return IRQ_HANDLED;
6538 }
6539 
6540 /* MSI ISR - No need to check for interrupt sharing and no need to
6541  * flush status block and interrupt mailbox. PCI ordering rules
6542  * guarantee that MSI will arrive after the status block.
6543  */
6544 static irqreturn_t tg3_msi(int irq, void *dev_id)
6545 {
6546 	struct tg3_napi *tnapi = dev_id;
6547 	struct tg3 *tp = tnapi->tp;
6548 
6549 	prefetch(tnapi->hw_status);
6550 	if (tnapi->rx_rcb)
6551 		prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
6552 	/*
6553 	 * Writing any value to intr-mbox-0 clears PCI INTA# and
6554 	 * chip-internal interrupt pending events.
6555 	 * Writing non-zero to intr-mbox-0 additional tells the
6556 	 * NIC to stop sending us irqs, engaging "in-intr-handler"
6557 	 * event coalescing.
6558 	 */
6559 	tw32_mailbox(tnapi->int_mbox, 0x00000001);
6560 	if (likely(!tg3_irq_sync(tp)))
6561 		napi_schedule(&tnapi->napi);
6562 
6563 	return IRQ_RETVAL(1);
6564 }
6565 
6566 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
6567 {
6568 	struct tg3_napi *tnapi = dev_id;
6569 	struct tg3 *tp = tnapi->tp;
6570 	struct tg3_hw_status *sblk = tnapi->hw_status;
6571 	unsigned int handled = 1;
6572 
6573 	/* In INTx mode, it is possible for the interrupt to arrive at
6574 	 * the CPU before the status block posted prior to the interrupt.
6575 	 * Reading the PCI State register will confirm whether the
6576 	 * interrupt is ours and will flush the status block.
6577 	 */
6578 	if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
6579 		if (tg3_flag(tp, CHIP_RESETTING) ||
6580 		    (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
6581 			handled = 0;
6582 			goto out;
6583 		}
6584 	}
6585 
6586 	/*
6587 	 * Writing any value to intr-mbox-0 clears PCI INTA# and
6588 	 * chip-internal interrupt pending events.
6589 	 * Writing non-zero to intr-mbox-0 additional tells the
6590 	 * NIC to stop sending us irqs, engaging "in-intr-handler"
6591 	 * event coalescing.
6592 	 *
6593 	 * Flush the mailbox to de-assert the IRQ immediately to prevent
6594 	 * spurious interrupts.  The flush impacts performance but
6595 	 * excessive spurious interrupts can be worse in some cases.
6596 	 */
6597 	tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
6598 	if (tg3_irq_sync(tp))
6599 		goto out;
6600 	sblk->status &= ~SD_STATUS_UPDATED;
6601 	if (likely(tg3_has_work(tnapi))) {
6602 		prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
6603 		napi_schedule(&tnapi->napi);
6604 	} else {
6605 		/* No work, shared interrupt perhaps?  re-enable
6606 		 * interrupts, and flush that PCI write
6607 		 */
6608 		tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
6609 			       0x00000000);
6610 	}
6611 out:
6612 	return IRQ_RETVAL(handled);
6613 }
6614 
6615 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
6616 {
6617 	struct tg3_napi *tnapi = dev_id;
6618 	struct tg3 *tp = tnapi->tp;
6619 	struct tg3_hw_status *sblk = tnapi->hw_status;
6620 	unsigned int handled = 1;
6621 
6622 	/* In INTx mode, it is possible for the interrupt to arrive at
6623 	 * the CPU before the status block posted prior to the interrupt.
6624 	 * Reading the PCI State register will confirm whether the
6625 	 * interrupt is ours and will flush the status block.
6626 	 */
6627 	if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) {
6628 		if (tg3_flag(tp, CHIP_RESETTING) ||
6629 		    (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
6630 			handled = 0;
6631 			goto out;
6632 		}
6633 	}
6634 
6635 	/*
6636 	 * writing any value to intr-mbox-0 clears PCI INTA# and
6637 	 * chip-internal interrupt pending events.
6638 	 * writing non-zero to intr-mbox-0 additional tells the
6639 	 * NIC to stop sending us irqs, engaging "in-intr-handler"
6640 	 * event coalescing.
6641 	 *
6642 	 * Flush the mailbox to de-assert the IRQ immediately to prevent
6643 	 * spurious interrupts.  The flush impacts performance but
6644 	 * excessive spurious interrupts can be worse in some cases.
6645 	 */
6646 	tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
6647 
6648 	/*
6649 	 * In a shared interrupt configuration, sometimes other devices'
6650 	 * interrupts will scream.  We record the current status tag here
6651 	 * so that the above check can report that the screaming interrupts
6652 	 * are unhandled.  Eventually they will be silenced.
6653 	 */
6654 	tnapi->last_irq_tag = sblk->status_tag;
6655 
6656 	if (tg3_irq_sync(tp))
6657 		goto out;
6658 
6659 	prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
6660 
6661 	napi_schedule(&tnapi->napi);
6662 
6663 out:
6664 	return IRQ_RETVAL(handled);
6665 }
6666 
6667 /* ISR for interrupt test */
6668 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
6669 {
6670 	struct tg3_napi *tnapi = dev_id;
6671 	struct tg3 *tp = tnapi->tp;
6672 	struct tg3_hw_status *sblk = tnapi->hw_status;
6673 
6674 	if ((sblk->status & SD_STATUS_UPDATED) ||
6675 	    !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
6676 		tg3_disable_ints(tp);
6677 		return IRQ_RETVAL(1);
6678 	}
6679 	return IRQ_RETVAL(0);
6680 }
6681 
6682 #ifdef CONFIG_NET_POLL_CONTROLLER
6683 static void tg3_poll_controller(struct net_device *dev)
6684 {
6685 	int i;
6686 	struct tg3 *tp = netdev_priv(dev);
6687 
6688 	for (i = 0; i < tp->irq_cnt; i++)
6689 		tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]);
6690 }
6691 #endif
6692 
6693 static void tg3_tx_timeout(struct net_device *dev)
6694 {
6695 	struct tg3 *tp = netdev_priv(dev);
6696 
6697 	if (netif_msg_tx_err(tp)) {
6698 		netdev_err(dev, "transmit timed out, resetting\n");
6699 		tg3_dump_state(tp);
6700 	}
6701 
6702 	tg3_reset_task_schedule(tp);
6703 }
6704 
6705 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
6706 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
6707 {
6708 	u32 base = (u32) mapping & 0xffffffff;
6709 
6710 	return (base > 0xffffdcc0) && (base + len + 8 < base);
6711 }
6712 
6713 /* Test for DMA addresses > 40-bit */
6714 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
6715 					  int len)
6716 {
6717 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
6718 	if (tg3_flag(tp, 40BIT_DMA_BUG))
6719 		return ((u64) mapping + len) > DMA_BIT_MASK(40);
6720 	return 0;
6721 #else
6722 	return 0;
6723 #endif
6724 }
6725 
6726 static inline void tg3_tx_set_bd(struct tg3_tx_buffer_desc *txbd,
6727 				 dma_addr_t mapping, u32 len, u32 flags,
6728 				 u32 mss, u32 vlan)
6729 {
6730 	txbd->addr_hi = ((u64) mapping >> 32);
6731 	txbd->addr_lo = ((u64) mapping & 0xffffffff);
6732 	txbd->len_flags = (len << TXD_LEN_SHIFT) | (flags & 0x0000ffff);
6733 	txbd->vlan_tag = (mss << TXD_MSS_SHIFT) | (vlan << TXD_VLAN_TAG_SHIFT);
6734 }
6735 
6736 static bool tg3_tx_frag_set(struct tg3_napi *tnapi, u32 *entry, u32 *budget,
6737 			    dma_addr_t map, u32 len, u32 flags,
6738 			    u32 mss, u32 vlan)
6739 {
6740 	struct tg3 *tp = tnapi->tp;
6741 	bool hwbug = false;
6742 
6743 	if (tg3_flag(tp, SHORT_DMA_BUG) && len <= 8)
6744 		hwbug = true;
6745 
6746 	if (tg3_4g_overflow_test(map, len))
6747 		hwbug = true;
6748 
6749 	if (tg3_40bit_overflow_test(tp, map, len))
6750 		hwbug = true;
6751 
6752 	if (tp->dma_limit) {
6753 		u32 prvidx = *entry;
6754 		u32 tmp_flag = flags & ~TXD_FLAG_END;
6755 		while (len > tp->dma_limit && *budget) {
6756 			u32 frag_len = tp->dma_limit;
6757 			len -= tp->dma_limit;
6758 
6759 			/* Avoid the 8byte DMA problem */
6760 			if (len <= 8) {
6761 				len += tp->dma_limit / 2;
6762 				frag_len = tp->dma_limit / 2;
6763 			}
6764 
6765 			tnapi->tx_buffers[*entry].fragmented = true;
6766 
6767 			tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
6768 				      frag_len, tmp_flag, mss, vlan);
6769 			*budget -= 1;
6770 			prvidx = *entry;
6771 			*entry = NEXT_TX(*entry);
6772 
6773 			map += frag_len;
6774 		}
6775 
6776 		if (len) {
6777 			if (*budget) {
6778 				tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
6779 					      len, flags, mss, vlan);
6780 				*budget -= 1;
6781 				*entry = NEXT_TX(*entry);
6782 			} else {
6783 				hwbug = true;
6784 				tnapi->tx_buffers[prvidx].fragmented = false;
6785 			}
6786 		}
6787 	} else {
6788 		tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
6789 			      len, flags, mss, vlan);
6790 		*entry = NEXT_TX(*entry);
6791 	}
6792 
6793 	return hwbug;
6794 }
6795 
6796 static void tg3_tx_skb_unmap(struct tg3_napi *tnapi, u32 entry, int last)
6797 {
6798 	int i;
6799 	struct sk_buff *skb;
6800 	struct tg3_tx_ring_info *txb = &tnapi->tx_buffers[entry];
6801 
6802 	skb = txb->skb;
6803 	txb->skb = NULL;
6804 
6805 	pci_unmap_single(tnapi->tp->pdev,
6806 			 dma_unmap_addr(txb, mapping),
6807 			 skb_headlen(skb),
6808 			 PCI_DMA_TODEVICE);
6809 
6810 	while (txb->fragmented) {
6811 		txb->fragmented = false;
6812 		entry = NEXT_TX(entry);
6813 		txb = &tnapi->tx_buffers[entry];
6814 	}
6815 
6816 	for (i = 0; i <= last; i++) {
6817 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
6818 
6819 		entry = NEXT_TX(entry);
6820 		txb = &tnapi->tx_buffers[entry];
6821 
6822 		pci_unmap_page(tnapi->tp->pdev,
6823 			       dma_unmap_addr(txb, mapping),
6824 			       skb_frag_size(frag), PCI_DMA_TODEVICE);
6825 
6826 		while (txb->fragmented) {
6827 			txb->fragmented = false;
6828 			entry = NEXT_TX(entry);
6829 			txb = &tnapi->tx_buffers[entry];
6830 		}
6831 	}
6832 }
6833 
6834 /* Workaround 4GB and 40-bit hardware DMA bugs. */
6835 static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
6836 				       struct sk_buff **pskb,
6837 				       u32 *entry, u32 *budget,
6838 				       u32 base_flags, u32 mss, u32 vlan)
6839 {
6840 	struct tg3 *tp = tnapi->tp;
6841 	struct sk_buff *new_skb, *skb = *pskb;
6842 	dma_addr_t new_addr = 0;
6843 	int ret = 0;
6844 
6845 	if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
6846 		new_skb = skb_copy(skb, GFP_ATOMIC);
6847 	else {
6848 		int more_headroom = 4 - ((unsigned long)skb->data & 3);
6849 
6850 		new_skb = skb_copy_expand(skb,
6851 					  skb_headroom(skb) + more_headroom,
6852 					  skb_tailroom(skb), GFP_ATOMIC);
6853 	}
6854 
6855 	if (!new_skb) {
6856 		ret = -1;
6857 	} else {
6858 		/* New SKB is guaranteed to be linear. */
6859 		new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
6860 					  PCI_DMA_TODEVICE);
6861 		/* Make sure the mapping succeeded */
6862 		if (pci_dma_mapping_error(tp->pdev, new_addr)) {
6863 			dev_kfree_skb(new_skb);
6864 			ret = -1;
6865 		} else {
6866 			u32 save_entry = *entry;
6867 
6868 			base_flags |= TXD_FLAG_END;
6869 
6870 			tnapi->tx_buffers[*entry].skb = new_skb;
6871 			dma_unmap_addr_set(&tnapi->tx_buffers[*entry],
6872 					   mapping, new_addr);
6873 
6874 			if (tg3_tx_frag_set(tnapi, entry, budget, new_addr,
6875 					    new_skb->len, base_flags,
6876 					    mss, vlan)) {
6877 				tg3_tx_skb_unmap(tnapi, save_entry, -1);
6878 				dev_kfree_skb(new_skb);
6879 				ret = -1;
6880 			}
6881 		}
6882 	}
6883 
6884 	dev_kfree_skb(skb);
6885 	*pskb = new_skb;
6886 	return ret;
6887 }
6888 
6889 static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *);
6890 
6891 /* Use GSO to workaround a rare TSO bug that may be triggered when the
6892  * TSO header is greater than 80 bytes.
6893  */
6894 static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
6895 {
6896 	struct sk_buff *segs, *nskb;
6897 	u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3;
6898 
6899 	/* Estimate the number of fragments in the worst case */
6900 	if (unlikely(tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)) {
6901 		netif_stop_queue(tp->dev);
6902 
6903 		/* netif_tx_stop_queue() must be done before checking
6904 		 * checking tx index in tg3_tx_avail() below, because in
6905 		 * tg3_tx(), we update tx index before checking for
6906 		 * netif_tx_queue_stopped().
6907 		 */
6908 		smp_mb();
6909 		if (tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)
6910 			return NETDEV_TX_BUSY;
6911 
6912 		netif_wake_queue(tp->dev);
6913 	}
6914 
6915 	segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
6916 	if (IS_ERR(segs))
6917 		goto tg3_tso_bug_end;
6918 
6919 	do {
6920 		nskb = segs;
6921 		segs = segs->next;
6922 		nskb->next = NULL;
6923 		tg3_start_xmit(nskb, tp->dev);
6924 	} while (segs);
6925 
6926 tg3_tso_bug_end:
6927 	dev_kfree_skb(skb);
6928 
6929 	return NETDEV_TX_OK;
6930 }
6931 
6932 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
6933  * support TG3_FLAG_HW_TSO_1 or firmware TSO only.
6934  */
6935 static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
6936 {
6937 	struct tg3 *tp = netdev_priv(dev);
6938 	u32 len, entry, base_flags, mss, vlan = 0;
6939 	u32 budget;
6940 	int i = -1, would_hit_hwbug;
6941 	dma_addr_t mapping;
6942 	struct tg3_napi *tnapi;
6943 	struct netdev_queue *txq;
6944 	unsigned int last;
6945 
6946 	txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
6947 	tnapi = &tp->napi[skb_get_queue_mapping(skb)];
6948 	if (tg3_flag(tp, ENABLE_TSS))
6949 		tnapi++;
6950 
6951 	budget = tg3_tx_avail(tnapi);
6952 
6953 	/* We are running in BH disabled context with netif_tx_lock
6954 	 * and TX reclaim runs via tp->napi.poll inside of a software
6955 	 * interrupt.  Furthermore, IRQ processing runs lockless so we have
6956 	 * no IRQ context deadlocks to worry about either.  Rejoice!
6957 	 */
6958 	if (unlikely(budget <= (skb_shinfo(skb)->nr_frags + 1))) {
6959 		if (!netif_tx_queue_stopped(txq)) {
6960 			netif_tx_stop_queue(txq);
6961 
6962 			/* This is a hard error, log it. */
6963 			netdev_err(dev,
6964 				   "BUG! Tx Ring full when queue awake!\n");
6965 		}
6966 		return NETDEV_TX_BUSY;
6967 	}
6968 
6969 	entry = tnapi->tx_prod;
6970 	base_flags = 0;
6971 	if (skb->ip_summed == CHECKSUM_PARTIAL)
6972 		base_flags |= TXD_FLAG_TCPUDP_CSUM;
6973 
6974 	mss = skb_shinfo(skb)->gso_size;
6975 	if (mss) {
6976 		struct iphdr *iph;
6977 		u32 tcp_opt_len, hdr_len;
6978 
6979 		if (skb_header_cloned(skb) &&
6980 		    pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
6981 			goto drop;
6982 
6983 		iph = ip_hdr(skb);
6984 		tcp_opt_len = tcp_optlen(skb);
6985 
6986 		hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb) - ETH_HLEN;
6987 
6988 		if (!skb_is_gso_v6(skb)) {
6989 			iph->check = 0;
6990 			iph->tot_len = htons(mss + hdr_len);
6991 		}
6992 
6993 		if (unlikely((ETH_HLEN + hdr_len) > 80) &&
6994 		    tg3_flag(tp, TSO_BUG))
6995 			return tg3_tso_bug(tp, skb);
6996 
6997 		base_flags |= (TXD_FLAG_CPU_PRE_DMA |
6998 			       TXD_FLAG_CPU_POST_DMA);
6999 
7000 		if (tg3_flag(tp, HW_TSO_1) ||
7001 		    tg3_flag(tp, HW_TSO_2) ||
7002 		    tg3_flag(tp, HW_TSO_3)) {
7003 			tcp_hdr(skb)->check = 0;
7004 			base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
7005 		} else
7006 			tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
7007 								 iph->daddr, 0,
7008 								 IPPROTO_TCP,
7009 								 0);
7010 
7011 		if (tg3_flag(tp, HW_TSO_3)) {
7012 			mss |= (hdr_len & 0xc) << 12;
7013 			if (hdr_len & 0x10)
7014 				base_flags |= 0x00000010;
7015 			base_flags |= (hdr_len & 0x3e0) << 5;
7016 		} else if (tg3_flag(tp, HW_TSO_2))
7017 			mss |= hdr_len << 9;
7018 		else if (tg3_flag(tp, HW_TSO_1) ||
7019 			 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
7020 			if (tcp_opt_len || iph->ihl > 5) {
7021 				int tsflags;
7022 
7023 				tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
7024 				mss |= (tsflags << 11);
7025 			}
7026 		} else {
7027 			if (tcp_opt_len || iph->ihl > 5) {
7028 				int tsflags;
7029 
7030 				tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
7031 				base_flags |= tsflags << 12;
7032 			}
7033 		}
7034 	}
7035 
7036 	if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
7037 	    !mss && skb->len > VLAN_ETH_FRAME_LEN)
7038 		base_flags |= TXD_FLAG_JMB_PKT;
7039 
7040 	if (vlan_tx_tag_present(skb)) {
7041 		base_flags |= TXD_FLAG_VLAN;
7042 		vlan = vlan_tx_tag_get(skb);
7043 	}
7044 
7045 	len = skb_headlen(skb);
7046 
7047 	mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
7048 	if (pci_dma_mapping_error(tp->pdev, mapping))
7049 		goto drop;
7050 
7051 
7052 	tnapi->tx_buffers[entry].skb = skb;
7053 	dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
7054 
7055 	would_hit_hwbug = 0;
7056 
7057 	if (tg3_flag(tp, 5701_DMA_BUG))
7058 		would_hit_hwbug = 1;
7059 
7060 	if (tg3_tx_frag_set(tnapi, &entry, &budget, mapping, len, base_flags |
7061 			  ((skb_shinfo(skb)->nr_frags == 0) ? TXD_FLAG_END : 0),
7062 			    mss, vlan)) {
7063 		would_hit_hwbug = 1;
7064 	} else if (skb_shinfo(skb)->nr_frags > 0) {
7065 		u32 tmp_mss = mss;
7066 
7067 		if (!tg3_flag(tp, HW_TSO_1) &&
7068 		    !tg3_flag(tp, HW_TSO_2) &&
7069 		    !tg3_flag(tp, HW_TSO_3))
7070 			tmp_mss = 0;
7071 
7072 		/* Now loop through additional data
7073 		 * fragments, and queue them.
7074 		 */
7075 		last = skb_shinfo(skb)->nr_frags - 1;
7076 		for (i = 0; i <= last; i++) {
7077 			skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
7078 
7079 			len = skb_frag_size(frag);
7080 			mapping = skb_frag_dma_map(&tp->pdev->dev, frag, 0,
7081 						   len, DMA_TO_DEVICE);
7082 
7083 			tnapi->tx_buffers[entry].skb = NULL;
7084 			dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
7085 					   mapping);
7086 			if (dma_mapping_error(&tp->pdev->dev, mapping))
7087 				goto dma_error;
7088 
7089 			if (!budget ||
7090 			    tg3_tx_frag_set(tnapi, &entry, &budget, mapping,
7091 					    len, base_flags |
7092 					    ((i == last) ? TXD_FLAG_END : 0),
7093 					    tmp_mss, vlan)) {
7094 				would_hit_hwbug = 1;
7095 				break;
7096 			}
7097 		}
7098 	}
7099 
7100 	if (would_hit_hwbug) {
7101 		tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i);
7102 
7103 		/* If the workaround fails due to memory/mapping
7104 		 * failure, silently drop this packet.
7105 		 */
7106 		entry = tnapi->tx_prod;
7107 		budget = tg3_tx_avail(tnapi);
7108 		if (tigon3_dma_hwbug_workaround(tnapi, &skb, &entry, &budget,
7109 						base_flags, mss, vlan))
7110 			goto drop_nofree;
7111 	}
7112 
7113 	skb_tx_timestamp(skb);
7114 	netdev_tx_sent_queue(txq, skb->len);
7115 
7116 	/* Sync BD data before updating mailbox */
7117 	wmb();
7118 
7119 	/* Packets are ready, update Tx producer idx local and on card. */
7120 	tw32_tx_mbox(tnapi->prodmbox, entry);
7121 
7122 	tnapi->tx_prod = entry;
7123 	if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
7124 		netif_tx_stop_queue(txq);
7125 
7126 		/* netif_tx_stop_queue() must be done before checking
7127 		 * checking tx index in tg3_tx_avail() below, because in
7128 		 * tg3_tx(), we update tx index before checking for
7129 		 * netif_tx_queue_stopped().
7130 		 */
7131 		smp_mb();
7132 		if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
7133 			netif_tx_wake_queue(txq);
7134 	}
7135 
7136 	mmiowb();
7137 	return NETDEV_TX_OK;
7138 
7139 dma_error:
7140 	tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, --i);
7141 	tnapi->tx_buffers[tnapi->tx_prod].skb = NULL;
7142 drop:
7143 	dev_kfree_skb(skb);
7144 drop_nofree:
7145 	tp->tx_dropped++;
7146 	return NETDEV_TX_OK;
7147 }
7148 
7149 static void tg3_mac_loopback(struct tg3 *tp, bool enable)
7150 {
7151 	if (enable) {
7152 		tp->mac_mode &= ~(MAC_MODE_HALF_DUPLEX |
7153 				  MAC_MODE_PORT_MODE_MASK);
7154 
7155 		tp->mac_mode |= MAC_MODE_PORT_INT_LPBACK;
7156 
7157 		if (!tg3_flag(tp, 5705_PLUS))
7158 			tp->mac_mode |= MAC_MODE_LINK_POLARITY;
7159 
7160 		if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
7161 			tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
7162 		else
7163 			tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
7164 	} else {
7165 		tp->mac_mode &= ~MAC_MODE_PORT_INT_LPBACK;
7166 
7167 		if (tg3_flag(tp, 5705_PLUS) ||
7168 		    (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) ||
7169 		    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
7170 			tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
7171 	}
7172 
7173 	tw32(MAC_MODE, tp->mac_mode);
7174 	udelay(40);
7175 }
7176 
7177 static int tg3_phy_lpbk_set(struct tg3 *tp, u32 speed, bool extlpbk)
7178 {
7179 	u32 val, bmcr, mac_mode, ptest = 0;
7180 
7181 	tg3_phy_toggle_apd(tp, false);
7182 	tg3_phy_toggle_automdix(tp, 0);
7183 
7184 	if (extlpbk && tg3_phy_set_extloopbk(tp))
7185 		return -EIO;
7186 
7187 	bmcr = BMCR_FULLDPLX;
7188 	switch (speed) {
7189 	case SPEED_10:
7190 		break;
7191 	case SPEED_100:
7192 		bmcr |= BMCR_SPEED100;
7193 		break;
7194 	case SPEED_1000:
7195 	default:
7196 		if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
7197 			speed = SPEED_100;
7198 			bmcr |= BMCR_SPEED100;
7199 		} else {
7200 			speed = SPEED_1000;
7201 			bmcr |= BMCR_SPEED1000;
7202 		}
7203 	}
7204 
7205 	if (extlpbk) {
7206 		if (!(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
7207 			tg3_readphy(tp, MII_CTRL1000, &val);
7208 			val |= CTL1000_AS_MASTER |
7209 			       CTL1000_ENABLE_MASTER;
7210 			tg3_writephy(tp, MII_CTRL1000, val);
7211 		} else {
7212 			ptest = MII_TG3_FET_PTEST_TRIM_SEL |
7213 				MII_TG3_FET_PTEST_TRIM_2;
7214 			tg3_writephy(tp, MII_TG3_FET_PTEST, ptest);
7215 		}
7216 	} else
7217 		bmcr |= BMCR_LOOPBACK;
7218 
7219 	tg3_writephy(tp, MII_BMCR, bmcr);
7220 
7221 	/* The write needs to be flushed for the FETs */
7222 	if (tp->phy_flags & TG3_PHYFLG_IS_FET)
7223 		tg3_readphy(tp, MII_BMCR, &bmcr);
7224 
7225 	udelay(40);
7226 
7227 	if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
7228 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
7229 		tg3_writephy(tp, MII_TG3_FET_PTEST, ptest |
7230 			     MII_TG3_FET_PTEST_FRC_TX_LINK |
7231 			     MII_TG3_FET_PTEST_FRC_TX_LOCK);
7232 
7233 		/* The write needs to be flushed for the AC131 */
7234 		tg3_readphy(tp, MII_TG3_FET_PTEST, &val);
7235 	}
7236 
7237 	/* Reset to prevent losing 1st rx packet intermittently */
7238 	if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
7239 	    tg3_flag(tp, 5780_CLASS)) {
7240 		tw32_f(MAC_RX_MODE, RX_MODE_RESET);
7241 		udelay(10);
7242 		tw32_f(MAC_RX_MODE, tp->rx_mode);
7243 	}
7244 
7245 	mac_mode = tp->mac_mode &
7246 		   ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
7247 	if (speed == SPEED_1000)
7248 		mac_mode |= MAC_MODE_PORT_MODE_GMII;
7249 	else
7250 		mac_mode |= MAC_MODE_PORT_MODE_MII;
7251 
7252 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
7253 		u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK;
7254 
7255 		if (masked_phy_id == TG3_PHY_ID_BCM5401)
7256 			mac_mode &= ~MAC_MODE_LINK_POLARITY;
7257 		else if (masked_phy_id == TG3_PHY_ID_BCM5411)
7258 			mac_mode |= MAC_MODE_LINK_POLARITY;
7259 
7260 		tg3_writephy(tp, MII_TG3_EXT_CTRL,
7261 			     MII_TG3_EXT_CTRL_LNK3_LED_MODE);
7262 	}
7263 
7264 	tw32(MAC_MODE, mac_mode);
7265 	udelay(40);
7266 
7267 	return 0;
7268 }
7269 
7270 static void tg3_set_loopback(struct net_device *dev, netdev_features_t features)
7271 {
7272 	struct tg3 *tp = netdev_priv(dev);
7273 
7274 	if (features & NETIF_F_LOOPBACK) {
7275 		if (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)
7276 			return;
7277 
7278 		spin_lock_bh(&tp->lock);
7279 		tg3_mac_loopback(tp, true);
7280 		netif_carrier_on(tp->dev);
7281 		spin_unlock_bh(&tp->lock);
7282 		netdev_info(dev, "Internal MAC loopback mode enabled.\n");
7283 	} else {
7284 		if (!(tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
7285 			return;
7286 
7287 		spin_lock_bh(&tp->lock);
7288 		tg3_mac_loopback(tp, false);
7289 		/* Force link status check */
7290 		tg3_setup_phy(tp, 1);
7291 		spin_unlock_bh(&tp->lock);
7292 		netdev_info(dev, "Internal MAC loopback mode disabled.\n");
7293 	}
7294 }
7295 
7296 static netdev_features_t tg3_fix_features(struct net_device *dev,
7297 	netdev_features_t features)
7298 {
7299 	struct tg3 *tp = netdev_priv(dev);
7300 
7301 	if (dev->mtu > ETH_DATA_LEN && tg3_flag(tp, 5780_CLASS))
7302 		features &= ~NETIF_F_ALL_TSO;
7303 
7304 	return features;
7305 }
7306 
7307 static int tg3_set_features(struct net_device *dev, netdev_features_t features)
7308 {
7309 	netdev_features_t changed = dev->features ^ features;
7310 
7311 	if ((changed & NETIF_F_LOOPBACK) && netif_running(dev))
7312 		tg3_set_loopback(dev, features);
7313 
7314 	return 0;
7315 }
7316 
7317 static void tg3_rx_prodring_free(struct tg3 *tp,
7318 				 struct tg3_rx_prodring_set *tpr)
7319 {
7320 	int i;
7321 
7322 	if (tpr != &tp->napi[0].prodring) {
7323 		for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx;
7324 		     i = (i + 1) & tp->rx_std_ring_mask)
7325 			tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
7326 					tp->rx_pkt_map_sz);
7327 
7328 		if (tg3_flag(tp, JUMBO_CAPABLE)) {
7329 			for (i = tpr->rx_jmb_cons_idx;
7330 			     i != tpr->rx_jmb_prod_idx;
7331 			     i = (i + 1) & tp->rx_jmb_ring_mask) {
7332 				tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
7333 						TG3_RX_JMB_MAP_SZ);
7334 			}
7335 		}
7336 
7337 		return;
7338 	}
7339 
7340 	for (i = 0; i <= tp->rx_std_ring_mask; i++)
7341 		tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
7342 				tp->rx_pkt_map_sz);
7343 
7344 	if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
7345 		for (i = 0; i <= tp->rx_jmb_ring_mask; i++)
7346 			tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
7347 					TG3_RX_JMB_MAP_SZ);
7348 	}
7349 }
7350 
7351 /* Initialize rx rings for packet processing.
7352  *
7353  * The chip has been shut down and the driver detached from
7354  * the networking, so no interrupts or new tx packets will
7355  * end up in the driver.  tp->{tx,}lock are held and thus
7356  * we may not sleep.
7357  */
7358 static int tg3_rx_prodring_alloc(struct tg3 *tp,
7359 				 struct tg3_rx_prodring_set *tpr)
7360 {
7361 	u32 i, rx_pkt_dma_sz;
7362 
7363 	tpr->rx_std_cons_idx = 0;
7364 	tpr->rx_std_prod_idx = 0;
7365 	tpr->rx_jmb_cons_idx = 0;
7366 	tpr->rx_jmb_prod_idx = 0;
7367 
7368 	if (tpr != &tp->napi[0].prodring) {
7369 		memset(&tpr->rx_std_buffers[0], 0,
7370 		       TG3_RX_STD_BUFF_RING_SIZE(tp));
7371 		if (tpr->rx_jmb_buffers)
7372 			memset(&tpr->rx_jmb_buffers[0], 0,
7373 			       TG3_RX_JMB_BUFF_RING_SIZE(tp));
7374 		goto done;
7375 	}
7376 
7377 	/* Zero out all descriptors. */
7378 	memset(tpr->rx_std, 0, TG3_RX_STD_RING_BYTES(tp));
7379 
7380 	rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ;
7381 	if (tg3_flag(tp, 5780_CLASS) &&
7382 	    tp->dev->mtu > ETH_DATA_LEN)
7383 		rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ;
7384 	tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz);
7385 
7386 	/* Initialize invariants of the rings, we only set this
7387 	 * stuff once.  This works because the card does not
7388 	 * write into the rx buffer posting rings.
7389 	 */
7390 	for (i = 0; i <= tp->rx_std_ring_mask; i++) {
7391 		struct tg3_rx_buffer_desc *rxd;
7392 
7393 		rxd = &tpr->rx_std[i];
7394 		rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT;
7395 		rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
7396 		rxd->opaque = (RXD_OPAQUE_RING_STD |
7397 			       (i << RXD_OPAQUE_INDEX_SHIFT));
7398 	}
7399 
7400 	/* Now allocate fresh SKBs for each rx ring. */
7401 	for (i = 0; i < tp->rx_pending; i++) {
7402 		unsigned int frag_size;
7403 
7404 		if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_STD, i,
7405 				      &frag_size) < 0) {
7406 			netdev_warn(tp->dev,
7407 				    "Using a smaller RX standard ring. Only "
7408 				    "%d out of %d buffers were allocated "
7409 				    "successfully\n", i, tp->rx_pending);
7410 			if (i == 0)
7411 				goto initfail;
7412 			tp->rx_pending = i;
7413 			break;
7414 		}
7415 	}
7416 
7417 	if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
7418 		goto done;
7419 
7420 	memset(tpr->rx_jmb, 0, TG3_RX_JMB_RING_BYTES(tp));
7421 
7422 	if (!tg3_flag(tp, JUMBO_RING_ENABLE))
7423 		goto done;
7424 
7425 	for (i = 0; i <= tp->rx_jmb_ring_mask; i++) {
7426 		struct tg3_rx_buffer_desc *rxd;
7427 
7428 		rxd = &tpr->rx_jmb[i].std;
7429 		rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT;
7430 		rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
7431 				  RXD_FLAG_JUMBO;
7432 		rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
7433 		       (i << RXD_OPAQUE_INDEX_SHIFT));
7434 	}
7435 
7436 	for (i = 0; i < tp->rx_jumbo_pending; i++) {
7437 		unsigned int frag_size;
7438 
7439 		if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_JUMBO, i,
7440 				      &frag_size) < 0) {
7441 			netdev_warn(tp->dev,
7442 				    "Using a smaller RX jumbo ring. Only %d "
7443 				    "out of %d buffers were allocated "
7444 				    "successfully\n", i, tp->rx_jumbo_pending);
7445 			if (i == 0)
7446 				goto initfail;
7447 			tp->rx_jumbo_pending = i;
7448 			break;
7449 		}
7450 	}
7451 
7452 done:
7453 	return 0;
7454 
7455 initfail:
7456 	tg3_rx_prodring_free(tp, tpr);
7457 	return -ENOMEM;
7458 }
7459 
7460 static void tg3_rx_prodring_fini(struct tg3 *tp,
7461 				 struct tg3_rx_prodring_set *tpr)
7462 {
7463 	kfree(tpr->rx_std_buffers);
7464 	tpr->rx_std_buffers = NULL;
7465 	kfree(tpr->rx_jmb_buffers);
7466 	tpr->rx_jmb_buffers = NULL;
7467 	if (tpr->rx_std) {
7468 		dma_free_coherent(&tp->pdev->dev, TG3_RX_STD_RING_BYTES(tp),
7469 				  tpr->rx_std, tpr->rx_std_mapping);
7470 		tpr->rx_std = NULL;
7471 	}
7472 	if (tpr->rx_jmb) {
7473 		dma_free_coherent(&tp->pdev->dev, TG3_RX_JMB_RING_BYTES(tp),
7474 				  tpr->rx_jmb, tpr->rx_jmb_mapping);
7475 		tpr->rx_jmb = NULL;
7476 	}
7477 }
7478 
7479 static int tg3_rx_prodring_init(struct tg3 *tp,
7480 				struct tg3_rx_prodring_set *tpr)
7481 {
7482 	tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp),
7483 				      GFP_KERNEL);
7484 	if (!tpr->rx_std_buffers)
7485 		return -ENOMEM;
7486 
7487 	tpr->rx_std = dma_alloc_coherent(&tp->pdev->dev,
7488 					 TG3_RX_STD_RING_BYTES(tp),
7489 					 &tpr->rx_std_mapping,
7490 					 GFP_KERNEL);
7491 	if (!tpr->rx_std)
7492 		goto err_out;
7493 
7494 	if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
7495 		tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp),
7496 					      GFP_KERNEL);
7497 		if (!tpr->rx_jmb_buffers)
7498 			goto err_out;
7499 
7500 		tpr->rx_jmb = dma_alloc_coherent(&tp->pdev->dev,
7501 						 TG3_RX_JMB_RING_BYTES(tp),
7502 						 &tpr->rx_jmb_mapping,
7503 						 GFP_KERNEL);
7504 		if (!tpr->rx_jmb)
7505 			goto err_out;
7506 	}
7507 
7508 	return 0;
7509 
7510 err_out:
7511 	tg3_rx_prodring_fini(tp, tpr);
7512 	return -ENOMEM;
7513 }
7514 
7515 /* Free up pending packets in all rx/tx rings.
7516  *
7517  * The chip has been shut down and the driver detached from
7518  * the networking, so no interrupts or new tx packets will
7519  * end up in the driver.  tp->{tx,}lock is not held and we are not
7520  * in an interrupt context and thus may sleep.
7521  */
7522 static void tg3_free_rings(struct tg3 *tp)
7523 {
7524 	int i, j;
7525 
7526 	for (j = 0; j < tp->irq_cnt; j++) {
7527 		struct tg3_napi *tnapi = &tp->napi[j];
7528 
7529 		tg3_rx_prodring_free(tp, &tnapi->prodring);
7530 
7531 		if (!tnapi->tx_buffers)
7532 			continue;
7533 
7534 		for (i = 0; i < TG3_TX_RING_SIZE; i++) {
7535 			struct sk_buff *skb = tnapi->tx_buffers[i].skb;
7536 
7537 			if (!skb)
7538 				continue;
7539 
7540 			tg3_tx_skb_unmap(tnapi, i,
7541 					 skb_shinfo(skb)->nr_frags - 1);
7542 
7543 			dev_kfree_skb_any(skb);
7544 		}
7545 		netdev_tx_reset_queue(netdev_get_tx_queue(tp->dev, j));
7546 	}
7547 }
7548 
7549 /* Initialize tx/rx rings for packet processing.
7550  *
7551  * The chip has been shut down and the driver detached from
7552  * the networking, so no interrupts or new tx packets will
7553  * end up in the driver.  tp->{tx,}lock are held and thus
7554  * we may not sleep.
7555  */
7556 static int tg3_init_rings(struct tg3 *tp)
7557 {
7558 	int i;
7559 
7560 	/* Free up all the SKBs. */
7561 	tg3_free_rings(tp);
7562 
7563 	for (i = 0; i < tp->irq_cnt; i++) {
7564 		struct tg3_napi *tnapi = &tp->napi[i];
7565 
7566 		tnapi->last_tag = 0;
7567 		tnapi->last_irq_tag = 0;
7568 		tnapi->hw_status->status = 0;
7569 		tnapi->hw_status->status_tag = 0;
7570 		memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7571 
7572 		tnapi->tx_prod = 0;
7573 		tnapi->tx_cons = 0;
7574 		if (tnapi->tx_ring)
7575 			memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES);
7576 
7577 		tnapi->rx_rcb_ptr = 0;
7578 		if (tnapi->rx_rcb)
7579 			memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
7580 
7581 		if (tg3_rx_prodring_alloc(tp, &tnapi->prodring)) {
7582 			tg3_free_rings(tp);
7583 			return -ENOMEM;
7584 		}
7585 	}
7586 
7587 	return 0;
7588 }
7589 
7590 /*
7591  * Must not be invoked with interrupt sources disabled and
7592  * the hardware shutdown down.
7593  */
7594 static void tg3_free_consistent(struct tg3 *tp)
7595 {
7596 	int i;
7597 
7598 	for (i = 0; i < tp->irq_cnt; i++) {
7599 		struct tg3_napi *tnapi = &tp->napi[i];
7600 
7601 		if (tnapi->tx_ring) {
7602 			dma_free_coherent(&tp->pdev->dev, TG3_TX_RING_BYTES,
7603 				tnapi->tx_ring, tnapi->tx_desc_mapping);
7604 			tnapi->tx_ring = NULL;
7605 		}
7606 
7607 		kfree(tnapi->tx_buffers);
7608 		tnapi->tx_buffers = NULL;
7609 
7610 		if (tnapi->rx_rcb) {
7611 			dma_free_coherent(&tp->pdev->dev,
7612 					  TG3_RX_RCB_RING_BYTES(tp),
7613 					  tnapi->rx_rcb,
7614 					  tnapi->rx_rcb_mapping);
7615 			tnapi->rx_rcb = NULL;
7616 		}
7617 
7618 		tg3_rx_prodring_fini(tp, &tnapi->prodring);
7619 
7620 		if (tnapi->hw_status) {
7621 			dma_free_coherent(&tp->pdev->dev, TG3_HW_STATUS_SIZE,
7622 					  tnapi->hw_status,
7623 					  tnapi->status_mapping);
7624 			tnapi->hw_status = NULL;
7625 		}
7626 	}
7627 
7628 	if (tp->hw_stats) {
7629 		dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats),
7630 				  tp->hw_stats, tp->stats_mapping);
7631 		tp->hw_stats = NULL;
7632 	}
7633 }
7634 
7635 /*
7636  * Must not be invoked with interrupt sources disabled and
7637  * the hardware shutdown down.  Can sleep.
7638  */
7639 static int tg3_alloc_consistent(struct tg3 *tp)
7640 {
7641 	int i;
7642 
7643 	tp->hw_stats = dma_alloc_coherent(&tp->pdev->dev,
7644 					  sizeof(struct tg3_hw_stats),
7645 					  &tp->stats_mapping,
7646 					  GFP_KERNEL);
7647 	if (!tp->hw_stats)
7648 		goto err_out;
7649 
7650 	memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
7651 
7652 	for (i = 0; i < tp->irq_cnt; i++) {
7653 		struct tg3_napi *tnapi = &tp->napi[i];
7654 		struct tg3_hw_status *sblk;
7655 
7656 		tnapi->hw_status = dma_alloc_coherent(&tp->pdev->dev,
7657 						      TG3_HW_STATUS_SIZE,
7658 						      &tnapi->status_mapping,
7659 						      GFP_KERNEL);
7660 		if (!tnapi->hw_status)
7661 			goto err_out;
7662 
7663 		memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7664 		sblk = tnapi->hw_status;
7665 
7666 		if (tg3_rx_prodring_init(tp, &tnapi->prodring))
7667 			goto err_out;
7668 
7669 		/* If multivector TSS is enabled, vector 0 does not handle
7670 		 * tx interrupts.  Don't allocate any resources for it.
7671 		 */
7672 		if ((!i && !tg3_flag(tp, ENABLE_TSS)) ||
7673 		    (i && tg3_flag(tp, ENABLE_TSS))) {
7674 			tnapi->tx_buffers = kzalloc(
7675 					       sizeof(struct tg3_tx_ring_info) *
7676 					       TG3_TX_RING_SIZE, GFP_KERNEL);
7677 			if (!tnapi->tx_buffers)
7678 				goto err_out;
7679 
7680 			tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev,
7681 							    TG3_TX_RING_BYTES,
7682 							&tnapi->tx_desc_mapping,
7683 							    GFP_KERNEL);
7684 			if (!tnapi->tx_ring)
7685 				goto err_out;
7686 		}
7687 
7688 		/*
7689 		 * When RSS is enabled, the status block format changes
7690 		 * slightly.  The "rx_jumbo_consumer", "reserved",
7691 		 * and "rx_mini_consumer" members get mapped to the
7692 		 * other three rx return ring producer indexes.
7693 		 */
7694 		switch (i) {
7695 		default:
7696 			if (tg3_flag(tp, ENABLE_RSS)) {
7697 				tnapi->rx_rcb_prod_idx = NULL;
7698 				break;
7699 			}
7700 			/* Fall through */
7701 		case 1:
7702 			tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer;
7703 			break;
7704 		case 2:
7705 			tnapi->rx_rcb_prod_idx = &sblk->rx_jumbo_consumer;
7706 			break;
7707 		case 3:
7708 			tnapi->rx_rcb_prod_idx = &sblk->reserved;
7709 			break;
7710 		case 4:
7711 			tnapi->rx_rcb_prod_idx = &sblk->rx_mini_consumer;
7712 			break;
7713 		}
7714 
7715 		/*
7716 		 * If multivector RSS is enabled, vector 0 does not handle
7717 		 * rx or tx interrupts.  Don't allocate any resources for it.
7718 		 */
7719 		if (!i && tg3_flag(tp, ENABLE_RSS))
7720 			continue;
7721 
7722 		tnapi->rx_rcb = dma_alloc_coherent(&tp->pdev->dev,
7723 						   TG3_RX_RCB_RING_BYTES(tp),
7724 						   &tnapi->rx_rcb_mapping,
7725 						   GFP_KERNEL);
7726 		if (!tnapi->rx_rcb)
7727 			goto err_out;
7728 
7729 		memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
7730 	}
7731 
7732 	return 0;
7733 
7734 err_out:
7735 	tg3_free_consistent(tp);
7736 	return -ENOMEM;
7737 }
7738 
7739 #define MAX_WAIT_CNT 1000
7740 
7741 /* To stop a block, clear the enable bit and poll till it
7742  * clears.  tp->lock is held.
7743  */
7744 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
7745 {
7746 	unsigned int i;
7747 	u32 val;
7748 
7749 	if (tg3_flag(tp, 5705_PLUS)) {
7750 		switch (ofs) {
7751 		case RCVLSC_MODE:
7752 		case DMAC_MODE:
7753 		case MBFREE_MODE:
7754 		case BUFMGR_MODE:
7755 		case MEMARB_MODE:
7756 			/* We can't enable/disable these bits of the
7757 			 * 5705/5750, just say success.
7758 			 */
7759 			return 0;
7760 
7761 		default:
7762 			break;
7763 		}
7764 	}
7765 
7766 	val = tr32(ofs);
7767 	val &= ~enable_bit;
7768 	tw32_f(ofs, val);
7769 
7770 	for (i = 0; i < MAX_WAIT_CNT; i++) {
7771 		udelay(100);
7772 		val = tr32(ofs);
7773 		if ((val & enable_bit) == 0)
7774 			break;
7775 	}
7776 
7777 	if (i == MAX_WAIT_CNT && !silent) {
7778 		dev_err(&tp->pdev->dev,
7779 			"tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
7780 			ofs, enable_bit);
7781 		return -ENODEV;
7782 	}
7783 
7784 	return 0;
7785 }
7786 
7787 /* tp->lock is held. */
7788 static int tg3_abort_hw(struct tg3 *tp, int silent)
7789 {
7790 	int i, err;
7791 
7792 	tg3_disable_ints(tp);
7793 
7794 	tp->rx_mode &= ~RX_MODE_ENABLE;
7795 	tw32_f(MAC_RX_MODE, tp->rx_mode);
7796 	udelay(10);
7797 
7798 	err  = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
7799 	err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
7800 	err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
7801 	err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
7802 	err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
7803 	err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
7804 
7805 	err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
7806 	err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
7807 	err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
7808 	err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
7809 	err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
7810 	err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
7811 	err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
7812 
7813 	tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
7814 	tw32_f(MAC_MODE, tp->mac_mode);
7815 	udelay(40);
7816 
7817 	tp->tx_mode &= ~TX_MODE_ENABLE;
7818 	tw32_f(MAC_TX_MODE, tp->tx_mode);
7819 
7820 	for (i = 0; i < MAX_WAIT_CNT; i++) {
7821 		udelay(100);
7822 		if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
7823 			break;
7824 	}
7825 	if (i >= MAX_WAIT_CNT) {
7826 		dev_err(&tp->pdev->dev,
7827 			"%s timed out, TX_MODE_ENABLE will not clear "
7828 			"MAC_TX_MODE=%08x\n", __func__, tr32(MAC_TX_MODE));
7829 		err |= -ENODEV;
7830 	}
7831 
7832 	err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
7833 	err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
7834 	err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
7835 
7836 	tw32(FTQ_RESET, 0xffffffff);
7837 	tw32(FTQ_RESET, 0x00000000);
7838 
7839 	err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
7840 	err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
7841 
7842 	for (i = 0; i < tp->irq_cnt; i++) {
7843 		struct tg3_napi *tnapi = &tp->napi[i];
7844 		if (tnapi->hw_status)
7845 			memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7846 	}
7847 
7848 	return err;
7849 }
7850 
7851 /* Save PCI command register before chip reset */
7852 static void tg3_save_pci_state(struct tg3 *tp)
7853 {
7854 	pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
7855 }
7856 
7857 /* Restore PCI state after chip reset */
7858 static void tg3_restore_pci_state(struct tg3 *tp)
7859 {
7860 	u32 val;
7861 
7862 	/* Re-enable indirect register accesses. */
7863 	pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
7864 			       tp->misc_host_ctrl);
7865 
7866 	/* Set MAX PCI retry to zero. */
7867 	val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
7868 	if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
7869 	    tg3_flag(tp, PCIX_MODE))
7870 		val |= PCISTATE_RETRY_SAME_DMA;
7871 	/* Allow reads and writes to the APE register and memory space. */
7872 	if (tg3_flag(tp, ENABLE_APE))
7873 		val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
7874 		       PCISTATE_ALLOW_APE_SHMEM_WR |
7875 		       PCISTATE_ALLOW_APE_PSPACE_WR;
7876 	pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
7877 
7878 	pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
7879 
7880 	if (!tg3_flag(tp, PCI_EXPRESS)) {
7881 		pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
7882 				      tp->pci_cacheline_sz);
7883 		pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
7884 				      tp->pci_lat_timer);
7885 	}
7886 
7887 	/* Make sure PCI-X relaxed ordering bit is clear. */
7888 	if (tg3_flag(tp, PCIX_MODE)) {
7889 		u16 pcix_cmd;
7890 
7891 		pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7892 				     &pcix_cmd);
7893 		pcix_cmd &= ~PCI_X_CMD_ERO;
7894 		pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7895 				      pcix_cmd);
7896 	}
7897 
7898 	if (tg3_flag(tp, 5780_CLASS)) {
7899 
7900 		/* Chip reset on 5780 will reset MSI enable bit,
7901 		 * so need to restore it.
7902 		 */
7903 		if (tg3_flag(tp, USING_MSI)) {
7904 			u16 ctrl;
7905 
7906 			pci_read_config_word(tp->pdev,
7907 					     tp->msi_cap + PCI_MSI_FLAGS,
7908 					     &ctrl);
7909 			pci_write_config_word(tp->pdev,
7910 					      tp->msi_cap + PCI_MSI_FLAGS,
7911 					      ctrl | PCI_MSI_FLAGS_ENABLE);
7912 			val = tr32(MSGINT_MODE);
7913 			tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
7914 		}
7915 	}
7916 }
7917 
7918 /* tp->lock is held. */
7919 static int tg3_chip_reset(struct tg3 *tp)
7920 {
7921 	u32 val;
7922 	void (*write_op)(struct tg3 *, u32, u32);
7923 	int i, err;
7924 
7925 	tg3_nvram_lock(tp);
7926 
7927 	tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
7928 
7929 	/* No matching tg3_nvram_unlock() after this because
7930 	 * chip reset below will undo the nvram lock.
7931 	 */
7932 	tp->nvram_lock_cnt = 0;
7933 
7934 	/* GRC_MISC_CFG core clock reset will clear the memory
7935 	 * enable bit in PCI register 4 and the MSI enable bit
7936 	 * on some chips, so we save relevant registers here.
7937 	 */
7938 	tg3_save_pci_state(tp);
7939 
7940 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
7941 	    tg3_flag(tp, 5755_PLUS))
7942 		tw32(GRC_FASTBOOT_PC, 0);
7943 
7944 	/*
7945 	 * We must avoid the readl() that normally takes place.
7946 	 * It locks machines, causes machine checks, and other
7947 	 * fun things.  So, temporarily disable the 5701
7948 	 * hardware workaround, while we do the reset.
7949 	 */
7950 	write_op = tp->write32;
7951 	if (write_op == tg3_write_flush_reg32)
7952 		tp->write32 = tg3_write32;
7953 
7954 	/* Prevent the irq handler from reading or writing PCI registers
7955 	 * during chip reset when the memory enable bit in the PCI command
7956 	 * register may be cleared.  The chip does not generate interrupt
7957 	 * at this time, but the irq handler may still be called due to irq
7958 	 * sharing or irqpoll.
7959 	 */
7960 	tg3_flag_set(tp, CHIP_RESETTING);
7961 	for (i = 0; i < tp->irq_cnt; i++) {
7962 		struct tg3_napi *tnapi = &tp->napi[i];
7963 		if (tnapi->hw_status) {
7964 			tnapi->hw_status->status = 0;
7965 			tnapi->hw_status->status_tag = 0;
7966 		}
7967 		tnapi->last_tag = 0;
7968 		tnapi->last_irq_tag = 0;
7969 	}
7970 	smp_mb();
7971 
7972 	for (i = 0; i < tp->irq_cnt; i++)
7973 		synchronize_irq(tp->napi[i].irq_vec);
7974 
7975 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
7976 		val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
7977 		tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
7978 	}
7979 
7980 	/* do the reset */
7981 	val = GRC_MISC_CFG_CORECLK_RESET;
7982 
7983 	if (tg3_flag(tp, PCI_EXPRESS)) {
7984 		/* Force PCIe 1.0a mode */
7985 		if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
7986 		    !tg3_flag(tp, 57765_PLUS) &&
7987 		    tr32(TG3_PCIE_PHY_TSTCTL) ==
7988 		    (TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM))
7989 			tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM);
7990 
7991 		if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
7992 			tw32(GRC_MISC_CFG, (1 << 29));
7993 			val |= (1 << 29);
7994 		}
7995 	}
7996 
7997 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
7998 		tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
7999 		tw32(GRC_VCPU_EXT_CTRL,
8000 		     tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
8001 	}
8002 
8003 	/* Manage gphy power for all CPMU absent PCIe devices. */
8004 	if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, CPMU_PRESENT))
8005 		val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
8006 
8007 	tw32(GRC_MISC_CFG, val);
8008 
8009 	/* restore 5701 hardware bug workaround write method */
8010 	tp->write32 = write_op;
8011 
8012 	/* Unfortunately, we have to delay before the PCI read back.
8013 	 * Some 575X chips even will not respond to a PCI cfg access
8014 	 * when the reset command is given to the chip.
8015 	 *
8016 	 * How do these hardware designers expect things to work
8017 	 * properly if the PCI write is posted for a long period
8018 	 * of time?  It is always necessary to have some method by
8019 	 * which a register read back can occur to push the write
8020 	 * out which does the reset.
8021 	 *
8022 	 * For most tg3 variants the trick below was working.
8023 	 * Ho hum...
8024 	 */
8025 	udelay(120);
8026 
8027 	/* Flush PCI posted writes.  The normal MMIO registers
8028 	 * are inaccessible at this time so this is the only
8029 	 * way to make this reliably (actually, this is no longer
8030 	 * the case, see above).  I tried to use indirect
8031 	 * register read/write but this upset some 5701 variants.
8032 	 */
8033 	pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
8034 
8035 	udelay(120);
8036 
8037 	if (tg3_flag(tp, PCI_EXPRESS) && pci_pcie_cap(tp->pdev)) {
8038 		u16 val16;
8039 
8040 		if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
8041 			int i;
8042 			u32 cfg_val;
8043 
8044 			/* Wait for link training to complete.  */
8045 			for (i = 0; i < 5000; i++)
8046 				udelay(100);
8047 
8048 			pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
8049 			pci_write_config_dword(tp->pdev, 0xc4,
8050 					       cfg_val | (1 << 15));
8051 		}
8052 
8053 		/* Clear the "no snoop" and "relaxed ordering" bits. */
8054 		pci_read_config_word(tp->pdev,
8055 				     pci_pcie_cap(tp->pdev) + PCI_EXP_DEVCTL,
8056 				     &val16);
8057 		val16 &= ~(PCI_EXP_DEVCTL_RELAX_EN |
8058 			   PCI_EXP_DEVCTL_NOSNOOP_EN);
8059 		/*
8060 		 * Older PCIe devices only support the 128 byte
8061 		 * MPS setting.  Enforce the restriction.
8062 		 */
8063 		if (!tg3_flag(tp, CPMU_PRESENT))
8064 			val16 &= ~PCI_EXP_DEVCTL_PAYLOAD;
8065 		pci_write_config_word(tp->pdev,
8066 				      pci_pcie_cap(tp->pdev) + PCI_EXP_DEVCTL,
8067 				      val16);
8068 
8069 		/* Clear error status */
8070 		pci_write_config_word(tp->pdev,
8071 				      pci_pcie_cap(tp->pdev) + PCI_EXP_DEVSTA,
8072 				      PCI_EXP_DEVSTA_CED |
8073 				      PCI_EXP_DEVSTA_NFED |
8074 				      PCI_EXP_DEVSTA_FED |
8075 				      PCI_EXP_DEVSTA_URD);
8076 	}
8077 
8078 	tg3_restore_pci_state(tp);
8079 
8080 	tg3_flag_clear(tp, CHIP_RESETTING);
8081 	tg3_flag_clear(tp, ERROR_PROCESSED);
8082 
8083 	val = 0;
8084 	if (tg3_flag(tp, 5780_CLASS))
8085 		val = tr32(MEMARB_MODE);
8086 	tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
8087 
8088 	if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
8089 		tg3_stop_fw(tp);
8090 		tw32(0x5000, 0x400);
8091 	}
8092 
8093 	tw32(GRC_MODE, tp->grc_mode);
8094 
8095 	if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
8096 		val = tr32(0xc4);
8097 
8098 		tw32(0xc4, val | (1 << 15));
8099 	}
8100 
8101 	if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
8102 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
8103 		tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
8104 		if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
8105 			tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
8106 		tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
8107 	}
8108 
8109 	if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
8110 		tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
8111 		val = tp->mac_mode;
8112 	} else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
8113 		tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
8114 		val = tp->mac_mode;
8115 	} else
8116 		val = 0;
8117 
8118 	tw32_f(MAC_MODE, val);
8119 	udelay(40);
8120 
8121 	tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
8122 
8123 	err = tg3_poll_fw(tp);
8124 	if (err)
8125 		return err;
8126 
8127 	tg3_mdio_start(tp);
8128 
8129 	if (tg3_flag(tp, PCI_EXPRESS) &&
8130 	    tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
8131 	    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
8132 	    !tg3_flag(tp, 57765_PLUS)) {
8133 		val = tr32(0x7c00);
8134 
8135 		tw32(0x7c00, val | (1 << 25));
8136 	}
8137 
8138 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
8139 		val = tr32(TG3_CPMU_CLCK_ORIDE);
8140 		tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
8141 	}
8142 
8143 	/* Reprobe ASF enable state.  */
8144 	tg3_flag_clear(tp, ENABLE_ASF);
8145 	tg3_flag_clear(tp, ASF_NEW_HANDSHAKE);
8146 	tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
8147 	if (val == NIC_SRAM_DATA_SIG_MAGIC) {
8148 		u32 nic_cfg;
8149 
8150 		tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
8151 		if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
8152 			tg3_flag_set(tp, ENABLE_ASF);
8153 			tp->last_event_jiffies = jiffies;
8154 			if (tg3_flag(tp, 5750_PLUS))
8155 				tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
8156 		}
8157 	}
8158 
8159 	return 0;
8160 }
8161 
8162 static void tg3_get_nstats(struct tg3 *, struct rtnl_link_stats64 *);
8163 static void tg3_get_estats(struct tg3 *, struct tg3_ethtool_stats *);
8164 
8165 /* tp->lock is held. */
8166 static int tg3_halt(struct tg3 *tp, int kind, int silent)
8167 {
8168 	int err;
8169 
8170 	tg3_stop_fw(tp);
8171 
8172 	tg3_write_sig_pre_reset(tp, kind);
8173 
8174 	tg3_abort_hw(tp, silent);
8175 	err = tg3_chip_reset(tp);
8176 
8177 	__tg3_set_mac_addr(tp, 0);
8178 
8179 	tg3_write_sig_legacy(tp, kind);
8180 	tg3_write_sig_post_reset(tp, kind);
8181 
8182 	if (tp->hw_stats) {
8183 		/* Save the stats across chip resets... */
8184 		tg3_get_nstats(tp, &tp->net_stats_prev);
8185 		tg3_get_estats(tp, &tp->estats_prev);
8186 
8187 		/* And make sure the next sample is new data */
8188 		memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
8189 	}
8190 
8191 	if (err)
8192 		return err;
8193 
8194 	return 0;
8195 }
8196 
8197 static int tg3_set_mac_addr(struct net_device *dev, void *p)
8198 {
8199 	struct tg3 *tp = netdev_priv(dev);
8200 	struct sockaddr *addr = p;
8201 	int err = 0, skip_mac_1 = 0;
8202 
8203 	if (!is_valid_ether_addr(addr->sa_data))
8204 		return -EADDRNOTAVAIL;
8205 
8206 	memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
8207 
8208 	if (!netif_running(dev))
8209 		return 0;
8210 
8211 	if (tg3_flag(tp, ENABLE_ASF)) {
8212 		u32 addr0_high, addr0_low, addr1_high, addr1_low;
8213 
8214 		addr0_high = tr32(MAC_ADDR_0_HIGH);
8215 		addr0_low = tr32(MAC_ADDR_0_LOW);
8216 		addr1_high = tr32(MAC_ADDR_1_HIGH);
8217 		addr1_low = tr32(MAC_ADDR_1_LOW);
8218 
8219 		/* Skip MAC addr 1 if ASF is using it. */
8220 		if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
8221 		    !(addr1_high == 0 && addr1_low == 0))
8222 			skip_mac_1 = 1;
8223 	}
8224 	spin_lock_bh(&tp->lock);
8225 	__tg3_set_mac_addr(tp, skip_mac_1);
8226 	spin_unlock_bh(&tp->lock);
8227 
8228 	return err;
8229 }
8230 
8231 /* tp->lock is held. */
8232 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
8233 			   dma_addr_t mapping, u32 maxlen_flags,
8234 			   u32 nic_addr)
8235 {
8236 	tg3_write_mem(tp,
8237 		      (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
8238 		      ((u64) mapping >> 32));
8239 	tg3_write_mem(tp,
8240 		      (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
8241 		      ((u64) mapping & 0xffffffff));
8242 	tg3_write_mem(tp,
8243 		      (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
8244 		       maxlen_flags);
8245 
8246 	if (!tg3_flag(tp, 5705_PLUS))
8247 		tg3_write_mem(tp,
8248 			      (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
8249 			      nic_addr);
8250 }
8251 
8252 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
8253 {
8254 	int i;
8255 
8256 	if (!tg3_flag(tp, ENABLE_TSS)) {
8257 		tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
8258 		tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
8259 		tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
8260 	} else {
8261 		tw32(HOSTCC_TXCOL_TICKS, 0);
8262 		tw32(HOSTCC_TXMAX_FRAMES, 0);
8263 		tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
8264 	}
8265 
8266 	if (!tg3_flag(tp, ENABLE_RSS)) {
8267 		tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
8268 		tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
8269 		tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
8270 	} else {
8271 		tw32(HOSTCC_RXCOL_TICKS, 0);
8272 		tw32(HOSTCC_RXMAX_FRAMES, 0);
8273 		tw32(HOSTCC_RXCOAL_MAXF_INT, 0);
8274 	}
8275 
8276 	if (!tg3_flag(tp, 5705_PLUS)) {
8277 		u32 val = ec->stats_block_coalesce_usecs;
8278 
8279 		tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
8280 		tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
8281 
8282 		if (!netif_carrier_ok(tp->dev))
8283 			val = 0;
8284 
8285 		tw32(HOSTCC_STAT_COAL_TICKS, val);
8286 	}
8287 
8288 	for (i = 0; i < tp->irq_cnt - 1; i++) {
8289 		u32 reg;
8290 
8291 		reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18;
8292 		tw32(reg, ec->rx_coalesce_usecs);
8293 		reg = HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18;
8294 		tw32(reg, ec->rx_max_coalesced_frames);
8295 		reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18;
8296 		tw32(reg, ec->rx_max_coalesced_frames_irq);
8297 
8298 		if (tg3_flag(tp, ENABLE_TSS)) {
8299 			reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18;
8300 			tw32(reg, ec->tx_coalesce_usecs);
8301 			reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18;
8302 			tw32(reg, ec->tx_max_coalesced_frames);
8303 			reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18;
8304 			tw32(reg, ec->tx_max_coalesced_frames_irq);
8305 		}
8306 	}
8307 
8308 	for (; i < tp->irq_max - 1; i++) {
8309 		tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0);
8310 		tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0);
8311 		tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
8312 
8313 		if (tg3_flag(tp, ENABLE_TSS)) {
8314 			tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0);
8315 			tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0);
8316 			tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
8317 		}
8318 	}
8319 }
8320 
8321 /* tp->lock is held. */
8322 static void tg3_rings_reset(struct tg3 *tp)
8323 {
8324 	int i;
8325 	u32 stblk, txrcb, rxrcb, limit;
8326 	struct tg3_napi *tnapi = &tp->napi[0];
8327 
8328 	/* Disable all transmit rings but the first. */
8329 	if (!tg3_flag(tp, 5705_PLUS))
8330 		limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16;
8331 	else if (tg3_flag(tp, 5717_PLUS))
8332 		limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4;
8333 	else if (tg3_flag(tp, 57765_CLASS))
8334 		limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2;
8335 	else
8336 		limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
8337 
8338 	for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
8339 	     txrcb < limit; txrcb += TG3_BDINFO_SIZE)
8340 		tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS,
8341 			      BDINFO_FLAGS_DISABLED);
8342 
8343 
8344 	/* Disable all receive return rings but the first. */
8345 	if (tg3_flag(tp, 5717_PLUS))
8346 		limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17;
8347 	else if (!tg3_flag(tp, 5705_PLUS))
8348 		limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
8349 	else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
8350 		 tg3_flag(tp, 57765_CLASS))
8351 		limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4;
8352 	else
8353 		limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
8354 
8355 	for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
8356 	     rxrcb < limit; rxrcb += TG3_BDINFO_SIZE)
8357 		tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS,
8358 			      BDINFO_FLAGS_DISABLED);
8359 
8360 	/* Disable interrupts */
8361 	tw32_mailbox_f(tp->napi[0].int_mbox, 1);
8362 	tp->napi[0].chk_msi_cnt = 0;
8363 	tp->napi[0].last_rx_cons = 0;
8364 	tp->napi[0].last_tx_cons = 0;
8365 
8366 	/* Zero mailbox registers. */
8367 	if (tg3_flag(tp, SUPPORT_MSIX)) {
8368 		for (i = 1; i < tp->irq_max; i++) {
8369 			tp->napi[i].tx_prod = 0;
8370 			tp->napi[i].tx_cons = 0;
8371 			if (tg3_flag(tp, ENABLE_TSS))
8372 				tw32_mailbox(tp->napi[i].prodmbox, 0);
8373 			tw32_rx_mbox(tp->napi[i].consmbox, 0);
8374 			tw32_mailbox_f(tp->napi[i].int_mbox, 1);
8375 			tp->napi[i].chk_msi_cnt = 0;
8376 			tp->napi[i].last_rx_cons = 0;
8377 			tp->napi[i].last_tx_cons = 0;
8378 		}
8379 		if (!tg3_flag(tp, ENABLE_TSS))
8380 			tw32_mailbox(tp->napi[0].prodmbox, 0);
8381 	} else {
8382 		tp->napi[0].tx_prod = 0;
8383 		tp->napi[0].tx_cons = 0;
8384 		tw32_mailbox(tp->napi[0].prodmbox, 0);
8385 		tw32_rx_mbox(tp->napi[0].consmbox, 0);
8386 	}
8387 
8388 	/* Make sure the NIC-based send BD rings are disabled. */
8389 	if (!tg3_flag(tp, 5705_PLUS)) {
8390 		u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW;
8391 		for (i = 0; i < 16; i++)
8392 			tw32_tx_mbox(mbox + i * 8, 0);
8393 	}
8394 
8395 	txrcb = NIC_SRAM_SEND_RCB;
8396 	rxrcb = NIC_SRAM_RCV_RET_RCB;
8397 
8398 	/* Clear status block in ram. */
8399 	memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8400 
8401 	/* Set status block DMA address */
8402 	tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
8403 	     ((u64) tnapi->status_mapping >> 32));
8404 	tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
8405 	     ((u64) tnapi->status_mapping & 0xffffffff));
8406 
8407 	if (tnapi->tx_ring) {
8408 		tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
8409 			       (TG3_TX_RING_SIZE <<
8410 				BDINFO_FLAGS_MAXLEN_SHIFT),
8411 			       NIC_SRAM_TX_BUFFER_DESC);
8412 		txrcb += TG3_BDINFO_SIZE;
8413 	}
8414 
8415 	if (tnapi->rx_rcb) {
8416 		tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
8417 			       (tp->rx_ret_ring_mask + 1) <<
8418 				BDINFO_FLAGS_MAXLEN_SHIFT, 0);
8419 		rxrcb += TG3_BDINFO_SIZE;
8420 	}
8421 
8422 	stblk = HOSTCC_STATBLCK_RING1;
8423 
8424 	for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) {
8425 		u64 mapping = (u64)tnapi->status_mapping;
8426 		tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32);
8427 		tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff);
8428 
8429 		/* Clear status block in ram. */
8430 		memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8431 
8432 		if (tnapi->tx_ring) {
8433 			tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
8434 				       (TG3_TX_RING_SIZE <<
8435 					BDINFO_FLAGS_MAXLEN_SHIFT),
8436 				       NIC_SRAM_TX_BUFFER_DESC);
8437 			txrcb += TG3_BDINFO_SIZE;
8438 		}
8439 
8440 		tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
8441 			       ((tp->rx_ret_ring_mask + 1) <<
8442 				BDINFO_FLAGS_MAXLEN_SHIFT), 0);
8443 
8444 		stblk += 8;
8445 		rxrcb += TG3_BDINFO_SIZE;
8446 	}
8447 }
8448 
8449 static void tg3_setup_rxbd_thresholds(struct tg3 *tp)
8450 {
8451 	u32 val, bdcache_maxcnt, host_rep_thresh, nic_rep_thresh;
8452 
8453 	if (!tg3_flag(tp, 5750_PLUS) ||
8454 	    tg3_flag(tp, 5780_CLASS) ||
8455 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
8456 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
8457 	    tg3_flag(tp, 57765_PLUS))
8458 		bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5700;
8459 	else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
8460 		 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
8461 		bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5755;
8462 	else
8463 		bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5906;
8464 
8465 	nic_rep_thresh = min(bdcache_maxcnt / 2, tp->rx_std_max_post);
8466 	host_rep_thresh = max_t(u32, tp->rx_pending / 8, 1);
8467 
8468 	val = min(nic_rep_thresh, host_rep_thresh);
8469 	tw32(RCVBDI_STD_THRESH, val);
8470 
8471 	if (tg3_flag(tp, 57765_PLUS))
8472 		tw32(STD_REPLENISH_LWM, bdcache_maxcnt);
8473 
8474 	if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
8475 		return;
8476 
8477 	bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700;
8478 
8479 	host_rep_thresh = max_t(u32, tp->rx_jumbo_pending / 8, 1);
8480 
8481 	val = min(bdcache_maxcnt / 2, host_rep_thresh);
8482 	tw32(RCVBDI_JUMBO_THRESH, val);
8483 
8484 	if (tg3_flag(tp, 57765_PLUS))
8485 		tw32(JMB_REPLENISH_LWM, bdcache_maxcnt);
8486 }
8487 
8488 static inline u32 calc_crc(unsigned char *buf, int len)
8489 {
8490 	u32 reg;
8491 	u32 tmp;
8492 	int j, k;
8493 
8494 	reg = 0xffffffff;
8495 
8496 	for (j = 0; j < len; j++) {
8497 		reg ^= buf[j];
8498 
8499 		for (k = 0; k < 8; k++) {
8500 			tmp = reg & 0x01;
8501 
8502 			reg >>= 1;
8503 
8504 			if (tmp)
8505 				reg ^= 0xedb88320;
8506 		}
8507 	}
8508 
8509 	return ~reg;
8510 }
8511 
8512 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
8513 {
8514 	/* accept or reject all multicast frames */
8515 	tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
8516 	tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
8517 	tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
8518 	tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
8519 }
8520 
8521 static void __tg3_set_rx_mode(struct net_device *dev)
8522 {
8523 	struct tg3 *tp = netdev_priv(dev);
8524 	u32 rx_mode;
8525 
8526 	rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
8527 				  RX_MODE_KEEP_VLAN_TAG);
8528 
8529 #if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE)
8530 	/* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
8531 	 * flag clear.
8532 	 */
8533 	if (!tg3_flag(tp, ENABLE_ASF))
8534 		rx_mode |= RX_MODE_KEEP_VLAN_TAG;
8535 #endif
8536 
8537 	if (dev->flags & IFF_PROMISC) {
8538 		/* Promiscuous mode. */
8539 		rx_mode |= RX_MODE_PROMISC;
8540 	} else if (dev->flags & IFF_ALLMULTI) {
8541 		/* Accept all multicast. */
8542 		tg3_set_multi(tp, 1);
8543 	} else if (netdev_mc_empty(dev)) {
8544 		/* Reject all multicast. */
8545 		tg3_set_multi(tp, 0);
8546 	} else {
8547 		/* Accept one or more multicast(s). */
8548 		struct netdev_hw_addr *ha;
8549 		u32 mc_filter[4] = { 0, };
8550 		u32 regidx;
8551 		u32 bit;
8552 		u32 crc;
8553 
8554 		netdev_for_each_mc_addr(ha, dev) {
8555 			crc = calc_crc(ha->addr, ETH_ALEN);
8556 			bit = ~crc & 0x7f;
8557 			regidx = (bit & 0x60) >> 5;
8558 			bit &= 0x1f;
8559 			mc_filter[regidx] |= (1 << bit);
8560 		}
8561 
8562 		tw32(MAC_HASH_REG_0, mc_filter[0]);
8563 		tw32(MAC_HASH_REG_1, mc_filter[1]);
8564 		tw32(MAC_HASH_REG_2, mc_filter[2]);
8565 		tw32(MAC_HASH_REG_3, mc_filter[3]);
8566 	}
8567 
8568 	if (rx_mode != tp->rx_mode) {
8569 		tp->rx_mode = rx_mode;
8570 		tw32_f(MAC_RX_MODE, rx_mode);
8571 		udelay(10);
8572 	}
8573 }
8574 
8575 static void tg3_rss_init_dflt_indir_tbl(struct tg3 *tp)
8576 {
8577 	int i;
8578 
8579 	for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
8580 		tp->rss_ind_tbl[i] =
8581 			ethtool_rxfh_indir_default(i, tp->irq_cnt - 1);
8582 }
8583 
8584 static void tg3_rss_check_indir_tbl(struct tg3 *tp)
8585 {
8586 	int i;
8587 
8588 	if (!tg3_flag(tp, SUPPORT_MSIX))
8589 		return;
8590 
8591 	if (tp->irq_cnt <= 2) {
8592 		memset(&tp->rss_ind_tbl[0], 0, sizeof(tp->rss_ind_tbl));
8593 		return;
8594 	}
8595 
8596 	/* Validate table against current IRQ count */
8597 	for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) {
8598 		if (tp->rss_ind_tbl[i] >= tp->irq_cnt - 1)
8599 			break;
8600 	}
8601 
8602 	if (i != TG3_RSS_INDIR_TBL_SIZE)
8603 		tg3_rss_init_dflt_indir_tbl(tp);
8604 }
8605 
8606 static void tg3_rss_write_indir_tbl(struct tg3 *tp)
8607 {
8608 	int i = 0;
8609 	u32 reg = MAC_RSS_INDIR_TBL_0;
8610 
8611 	while (i < TG3_RSS_INDIR_TBL_SIZE) {
8612 		u32 val = tp->rss_ind_tbl[i];
8613 		i++;
8614 		for (; i % 8; i++) {
8615 			val <<= 4;
8616 			val |= tp->rss_ind_tbl[i];
8617 		}
8618 		tw32(reg, val);
8619 		reg += 4;
8620 	}
8621 }
8622 
8623 /* tp->lock is held. */
8624 static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
8625 {
8626 	u32 val, rdmac_mode;
8627 	int i, err, limit;
8628 	struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
8629 
8630 	tg3_disable_ints(tp);
8631 
8632 	tg3_stop_fw(tp);
8633 
8634 	tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
8635 
8636 	if (tg3_flag(tp, INIT_COMPLETE))
8637 		tg3_abort_hw(tp, 1);
8638 
8639 	/* Enable MAC control of LPI */
8640 	if (tp->phy_flags & TG3_PHYFLG_EEE_CAP) {
8641 		tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL,
8642 		       TG3_CPMU_EEE_LNKIDL_PCIE_NL0 |
8643 		       TG3_CPMU_EEE_LNKIDL_UART_IDL);
8644 
8645 		tw32_f(TG3_CPMU_EEE_CTRL,
8646 		       TG3_CPMU_EEE_CTRL_EXIT_20_1_US);
8647 
8648 		val = TG3_CPMU_EEEMD_ERLY_L1_XIT_DET |
8649 		      TG3_CPMU_EEEMD_LPI_IN_TX |
8650 		      TG3_CPMU_EEEMD_LPI_IN_RX |
8651 		      TG3_CPMU_EEEMD_EEE_ENABLE;
8652 
8653 		if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717)
8654 			val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN;
8655 
8656 		if (tg3_flag(tp, ENABLE_APE))
8657 			val |= TG3_CPMU_EEEMD_APE_TX_DET_EN;
8658 
8659 		tw32_f(TG3_CPMU_EEE_MODE, val);
8660 
8661 		tw32_f(TG3_CPMU_EEE_DBTMR1,
8662 		       TG3_CPMU_DBTMR1_PCIEXIT_2047US |
8663 		       TG3_CPMU_DBTMR1_LNKIDLE_2047US);
8664 
8665 		tw32_f(TG3_CPMU_EEE_DBTMR2,
8666 		       TG3_CPMU_DBTMR2_APE_TX_2047US |
8667 		       TG3_CPMU_DBTMR2_TXIDXEQ_2047US);
8668 	}
8669 
8670 	if (reset_phy)
8671 		tg3_phy_reset(tp);
8672 
8673 	err = tg3_chip_reset(tp);
8674 	if (err)
8675 		return err;
8676 
8677 	tg3_write_sig_legacy(tp, RESET_KIND_INIT);
8678 
8679 	if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
8680 		val = tr32(TG3_CPMU_CTRL);
8681 		val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
8682 		tw32(TG3_CPMU_CTRL, val);
8683 
8684 		val = tr32(TG3_CPMU_LSPD_10MB_CLK);
8685 		val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
8686 		val |= CPMU_LSPD_10MB_MACCLK_6_25;
8687 		tw32(TG3_CPMU_LSPD_10MB_CLK, val);
8688 
8689 		val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
8690 		val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
8691 		val |= CPMU_LNK_AWARE_MACCLK_6_25;
8692 		tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
8693 
8694 		val = tr32(TG3_CPMU_HST_ACC);
8695 		val &= ~CPMU_HST_ACC_MACCLK_MASK;
8696 		val |= CPMU_HST_ACC_MACCLK_6_25;
8697 		tw32(TG3_CPMU_HST_ACC, val);
8698 	}
8699 
8700 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
8701 		val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK;
8702 		val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN |
8703 		       PCIE_PWR_MGMT_L1_THRESH_4MS;
8704 		tw32(PCIE_PWR_MGMT_THRESH, val);
8705 
8706 		val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK;
8707 		tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS);
8708 
8709 		tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR);
8710 
8711 		val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
8712 		tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
8713 	}
8714 
8715 	if (tg3_flag(tp, L1PLLPD_EN)) {
8716 		u32 grc_mode = tr32(GRC_MODE);
8717 
8718 		/* Access the lower 1K of PL PCIE block registers. */
8719 		val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
8720 		tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
8721 
8722 		val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1);
8723 		tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1,
8724 		     val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN);
8725 
8726 		tw32(GRC_MODE, grc_mode);
8727 	}
8728 
8729 	if (tg3_flag(tp, 57765_CLASS)) {
8730 		if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
8731 			u32 grc_mode = tr32(GRC_MODE);
8732 
8733 			/* Access the lower 1K of PL PCIE block registers. */
8734 			val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
8735 			tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
8736 
8737 			val = tr32(TG3_PCIE_TLDLPL_PORT +
8738 				   TG3_PCIE_PL_LO_PHYCTL5);
8739 			tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5,
8740 			     val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ);
8741 
8742 			tw32(GRC_MODE, grc_mode);
8743 		}
8744 
8745 		if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_57765_AX) {
8746 			u32 grc_mode = tr32(GRC_MODE);
8747 
8748 			/* Access the lower 1K of DL PCIE block registers. */
8749 			val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
8750 			tw32(GRC_MODE, val | GRC_MODE_PCIE_DL_SEL);
8751 
8752 			val = tr32(TG3_PCIE_TLDLPL_PORT +
8753 				   TG3_PCIE_DL_LO_FTSMAX);
8754 			val &= ~TG3_PCIE_DL_LO_FTSMAX_MSK;
8755 			tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_DL_LO_FTSMAX,
8756 			     val | TG3_PCIE_DL_LO_FTSMAX_VAL);
8757 
8758 			tw32(GRC_MODE, grc_mode);
8759 		}
8760 
8761 		val = tr32(TG3_CPMU_LSPD_10MB_CLK);
8762 		val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
8763 		val |= CPMU_LSPD_10MB_MACCLK_6_25;
8764 		tw32(TG3_CPMU_LSPD_10MB_CLK, val);
8765 	}
8766 
8767 	/* This works around an issue with Athlon chipsets on
8768 	 * B3 tigon3 silicon.  This bit has no effect on any
8769 	 * other revision.  But do not set this on PCI Express
8770 	 * chips and don't even touch the clocks if the CPMU is present.
8771 	 */
8772 	if (!tg3_flag(tp, CPMU_PRESENT)) {
8773 		if (!tg3_flag(tp, PCI_EXPRESS))
8774 			tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
8775 		tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
8776 	}
8777 
8778 	if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
8779 	    tg3_flag(tp, PCIX_MODE)) {
8780 		val = tr32(TG3PCI_PCISTATE);
8781 		val |= PCISTATE_RETRY_SAME_DMA;
8782 		tw32(TG3PCI_PCISTATE, val);
8783 	}
8784 
8785 	if (tg3_flag(tp, ENABLE_APE)) {
8786 		/* Allow reads and writes to the
8787 		 * APE register and memory space.
8788 		 */
8789 		val = tr32(TG3PCI_PCISTATE);
8790 		val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
8791 		       PCISTATE_ALLOW_APE_SHMEM_WR |
8792 		       PCISTATE_ALLOW_APE_PSPACE_WR;
8793 		tw32(TG3PCI_PCISTATE, val);
8794 	}
8795 
8796 	if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
8797 		/* Enable some hw fixes.  */
8798 		val = tr32(TG3PCI_MSI_DATA);
8799 		val |= (1 << 26) | (1 << 28) | (1 << 29);
8800 		tw32(TG3PCI_MSI_DATA, val);
8801 	}
8802 
8803 	/* Descriptor ring init may make accesses to the
8804 	 * NIC SRAM area to setup the TX descriptors, so we
8805 	 * can only do this after the hardware has been
8806 	 * successfully reset.
8807 	 */
8808 	err = tg3_init_rings(tp);
8809 	if (err)
8810 		return err;
8811 
8812 	if (tg3_flag(tp, 57765_PLUS)) {
8813 		val = tr32(TG3PCI_DMA_RW_CTRL) &
8814 		      ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
8815 		if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0)
8816 			val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK;
8817 		if (!tg3_flag(tp, 57765_CLASS) &&
8818 		    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717)
8819 			val |= DMA_RWCTRL_TAGGED_STAT_WA;
8820 		tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
8821 	} else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 &&
8822 		   GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) {
8823 		/* This value is determined during the probe time DMA
8824 		 * engine test, tg3_test_dma.
8825 		 */
8826 		tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
8827 	}
8828 
8829 	tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
8830 			  GRC_MODE_4X_NIC_SEND_RINGS |
8831 			  GRC_MODE_NO_TX_PHDR_CSUM |
8832 			  GRC_MODE_NO_RX_PHDR_CSUM);
8833 	tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
8834 
8835 	/* Pseudo-header checksum is done by hardware logic and not
8836 	 * the offload processers, so make the chip do the pseudo-
8837 	 * header checksums on receive.  For transmit it is more
8838 	 * convenient to do the pseudo-header checksum in software
8839 	 * as Linux does that on transmit for us in all cases.
8840 	 */
8841 	tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
8842 
8843 	tw32(GRC_MODE,
8844 	     tp->grc_mode |
8845 	     (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
8846 
8847 	/* Setup the timer prescalar register.  Clock is always 66Mhz. */
8848 	val = tr32(GRC_MISC_CFG);
8849 	val &= ~0xff;
8850 	val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
8851 	tw32(GRC_MISC_CFG, val);
8852 
8853 	/* Initialize MBUF/DESC pool. */
8854 	if (tg3_flag(tp, 5750_PLUS)) {
8855 		/* Do nothing.  */
8856 	} else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
8857 		tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
8858 		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
8859 			tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
8860 		else
8861 			tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
8862 		tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
8863 		tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
8864 	} else if (tg3_flag(tp, TSO_CAPABLE)) {
8865 		int fw_len;
8866 
8867 		fw_len = tp->fw_len;
8868 		fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
8869 		tw32(BUFMGR_MB_POOL_ADDR,
8870 		     NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
8871 		tw32(BUFMGR_MB_POOL_SIZE,
8872 		     NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
8873 	}
8874 
8875 	if (tp->dev->mtu <= ETH_DATA_LEN) {
8876 		tw32(BUFMGR_MB_RDMA_LOW_WATER,
8877 		     tp->bufmgr_config.mbuf_read_dma_low_water);
8878 		tw32(BUFMGR_MB_MACRX_LOW_WATER,
8879 		     tp->bufmgr_config.mbuf_mac_rx_low_water);
8880 		tw32(BUFMGR_MB_HIGH_WATER,
8881 		     tp->bufmgr_config.mbuf_high_water);
8882 	} else {
8883 		tw32(BUFMGR_MB_RDMA_LOW_WATER,
8884 		     tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
8885 		tw32(BUFMGR_MB_MACRX_LOW_WATER,
8886 		     tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
8887 		tw32(BUFMGR_MB_HIGH_WATER,
8888 		     tp->bufmgr_config.mbuf_high_water_jumbo);
8889 	}
8890 	tw32(BUFMGR_DMA_LOW_WATER,
8891 	     tp->bufmgr_config.dma_low_water);
8892 	tw32(BUFMGR_DMA_HIGH_WATER,
8893 	     tp->bufmgr_config.dma_high_water);
8894 
8895 	val = BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE;
8896 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
8897 		val |= BUFMGR_MODE_NO_TX_UNDERRUN;
8898 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
8899 	    tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
8900 	    tp->pci_chip_rev_id == CHIPREV_ID_5720_A0)
8901 		val |= BUFMGR_MODE_MBLOW_ATTN_ENAB;
8902 	tw32(BUFMGR_MODE, val);
8903 	for (i = 0; i < 2000; i++) {
8904 		if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
8905 			break;
8906 		udelay(10);
8907 	}
8908 	if (i >= 2000) {
8909 		netdev_err(tp->dev, "%s cannot enable BUFMGR\n", __func__);
8910 		return -ENODEV;
8911 	}
8912 
8913 	if (tp->pci_chip_rev_id == CHIPREV_ID_5906_A1)
8914 		tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
8915 
8916 	tg3_setup_rxbd_thresholds(tp);
8917 
8918 	/* Initialize TG3_BDINFO's at:
8919 	 *  RCVDBDI_STD_BD:	standard eth size rx ring
8920 	 *  RCVDBDI_JUMBO_BD:	jumbo frame rx ring
8921 	 *  RCVDBDI_MINI_BD:	small frame rx ring (??? does not work)
8922 	 *
8923 	 * like so:
8924 	 *  TG3_BDINFO_HOST_ADDR:	high/low parts of DMA address of ring
8925 	 *  TG3_BDINFO_MAXLEN_FLAGS:	(rx max buffer size << 16) |
8926 	 *                              ring attribute flags
8927 	 *  TG3_BDINFO_NIC_ADDR:	location of descriptors in nic SRAM
8928 	 *
8929 	 * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
8930 	 * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
8931 	 *
8932 	 * The size of each ring is fixed in the firmware, but the location is
8933 	 * configurable.
8934 	 */
8935 	tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
8936 	     ((u64) tpr->rx_std_mapping >> 32));
8937 	tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
8938 	     ((u64) tpr->rx_std_mapping & 0xffffffff));
8939 	if (!tg3_flag(tp, 5717_PLUS))
8940 		tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
8941 		     NIC_SRAM_RX_BUFFER_DESC);
8942 
8943 	/* Disable the mini ring */
8944 	if (!tg3_flag(tp, 5705_PLUS))
8945 		tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
8946 		     BDINFO_FLAGS_DISABLED);
8947 
8948 	/* Program the jumbo buffer descriptor ring control
8949 	 * blocks on those devices that have them.
8950 	 */
8951 	if (tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
8952 	    (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))) {
8953 
8954 		if (tg3_flag(tp, JUMBO_RING_ENABLE)) {
8955 			tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
8956 			     ((u64) tpr->rx_jmb_mapping >> 32));
8957 			tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
8958 			     ((u64) tpr->rx_jmb_mapping & 0xffffffff));
8959 			val = TG3_RX_JMB_RING_SIZE(tp) <<
8960 			      BDINFO_FLAGS_MAXLEN_SHIFT;
8961 			tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
8962 			     val | BDINFO_FLAGS_USE_EXT_RECV);
8963 			if (!tg3_flag(tp, USE_JUMBO_BDFLAG) ||
8964 			    tg3_flag(tp, 57765_CLASS))
8965 				tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
8966 				     NIC_SRAM_RX_JUMBO_BUFFER_DESC);
8967 		} else {
8968 			tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
8969 			     BDINFO_FLAGS_DISABLED);
8970 		}
8971 
8972 		if (tg3_flag(tp, 57765_PLUS)) {
8973 			val = TG3_RX_STD_RING_SIZE(tp);
8974 			val <<= BDINFO_FLAGS_MAXLEN_SHIFT;
8975 			val |= (TG3_RX_STD_DMA_SZ << 2);
8976 		} else
8977 			val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT;
8978 	} else
8979 		val = TG3_RX_STD_MAX_SIZE_5700 << BDINFO_FLAGS_MAXLEN_SHIFT;
8980 
8981 	tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val);
8982 
8983 	tpr->rx_std_prod_idx = tp->rx_pending;
8984 	tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx);
8985 
8986 	tpr->rx_jmb_prod_idx =
8987 		tg3_flag(tp, JUMBO_RING_ENABLE) ? tp->rx_jumbo_pending : 0;
8988 	tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx);
8989 
8990 	tg3_rings_reset(tp);
8991 
8992 	/* Initialize MAC address and backoff seed. */
8993 	__tg3_set_mac_addr(tp, 0);
8994 
8995 	/* MTU + ethernet header + FCS + optional VLAN tag */
8996 	tw32(MAC_RX_MTU_SIZE,
8997 	     tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
8998 
8999 	/* The slot time is changed by tg3_setup_phy if we
9000 	 * run at gigabit with half duplex.
9001 	 */
9002 	val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
9003 	      (6 << TX_LENGTHS_IPG_SHIFT) |
9004 	      (32 << TX_LENGTHS_SLOT_TIME_SHIFT);
9005 
9006 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
9007 		val |= tr32(MAC_TX_LENGTHS) &
9008 		       (TX_LENGTHS_JMB_FRM_LEN_MSK |
9009 			TX_LENGTHS_CNT_DWN_VAL_MSK);
9010 
9011 	tw32(MAC_TX_LENGTHS, val);
9012 
9013 	/* Receive rules. */
9014 	tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
9015 	tw32(RCVLPC_CONFIG, 0x0181);
9016 
9017 	/* Calculate RDMAC_MODE setting early, we need it to determine
9018 	 * the RCVLPC_STATE_ENABLE mask.
9019 	 */
9020 	rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
9021 		      RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
9022 		      RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
9023 		      RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
9024 		      RDMAC_MODE_LNGREAD_ENAB);
9025 
9026 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717)
9027 		rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS;
9028 
9029 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
9030 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
9031 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
9032 		rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
9033 			      RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
9034 			      RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
9035 
9036 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
9037 	    tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
9038 		if (tg3_flag(tp, TSO_CAPABLE) &&
9039 		    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
9040 			rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
9041 		} else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
9042 			   !tg3_flag(tp, IS_5788)) {
9043 			rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
9044 		}
9045 	}
9046 
9047 	if (tg3_flag(tp, PCI_EXPRESS))
9048 		rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
9049 
9050 	if (tg3_flag(tp, HW_TSO_1) ||
9051 	    tg3_flag(tp, HW_TSO_2) ||
9052 	    tg3_flag(tp, HW_TSO_3))
9053 		rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN;
9054 
9055 	if (tg3_flag(tp, 57765_PLUS) ||
9056 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
9057 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
9058 		rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;
9059 
9060 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
9061 		rdmac_mode |= tr32(RDMAC_MODE) & RDMAC_MODE_H2BNC_VLAN_DET;
9062 
9063 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
9064 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
9065 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
9066 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
9067 	    tg3_flag(tp, 57765_PLUS)) {
9068 		val = tr32(TG3_RDMA_RSRVCTRL_REG);
9069 		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
9070 		    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
9071 			val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK |
9072 				 TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK |
9073 				 TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK);
9074 			val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B |
9075 			       TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
9076 			       TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K;
9077 		}
9078 		tw32(TG3_RDMA_RSRVCTRL_REG,
9079 		     val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
9080 	}
9081 
9082 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
9083 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
9084 		val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
9085 		tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val |
9086 		     TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K |
9087 		     TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K);
9088 	}
9089 
9090 	/* Receive/send statistics. */
9091 	if (tg3_flag(tp, 5750_PLUS)) {
9092 		val = tr32(RCVLPC_STATS_ENABLE);
9093 		val &= ~RCVLPC_STATSENAB_DACK_FIX;
9094 		tw32(RCVLPC_STATS_ENABLE, val);
9095 	} else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
9096 		   tg3_flag(tp, TSO_CAPABLE)) {
9097 		val = tr32(RCVLPC_STATS_ENABLE);
9098 		val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
9099 		tw32(RCVLPC_STATS_ENABLE, val);
9100 	} else {
9101 		tw32(RCVLPC_STATS_ENABLE, 0xffffff);
9102 	}
9103 	tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
9104 	tw32(SNDDATAI_STATSENAB, 0xffffff);
9105 	tw32(SNDDATAI_STATSCTRL,
9106 	     (SNDDATAI_SCTRL_ENABLE |
9107 	      SNDDATAI_SCTRL_FASTUPD));
9108 
9109 	/* Setup host coalescing engine. */
9110 	tw32(HOSTCC_MODE, 0);
9111 	for (i = 0; i < 2000; i++) {
9112 		if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
9113 			break;
9114 		udelay(10);
9115 	}
9116 
9117 	__tg3_set_coalesce(tp, &tp->coal);
9118 
9119 	if (!tg3_flag(tp, 5705_PLUS)) {
9120 		/* Status/statistics block address.  See tg3_timer,
9121 		 * the tg3_periodic_fetch_stats call there, and
9122 		 * tg3_get_stats to see how this works for 5705/5750 chips.
9123 		 */
9124 		tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
9125 		     ((u64) tp->stats_mapping >> 32));
9126 		tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
9127 		     ((u64) tp->stats_mapping & 0xffffffff));
9128 		tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
9129 
9130 		tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
9131 
9132 		/* Clear statistics and status block memory areas */
9133 		for (i = NIC_SRAM_STATS_BLK;
9134 		     i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
9135 		     i += sizeof(u32)) {
9136 			tg3_write_mem(tp, i, 0);
9137 			udelay(40);
9138 		}
9139 	}
9140 
9141 	tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
9142 
9143 	tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
9144 	tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
9145 	if (!tg3_flag(tp, 5705_PLUS))
9146 		tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
9147 
9148 	if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
9149 		tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
9150 		/* reset to prevent losing 1st rx packet intermittently */
9151 		tw32_f(MAC_RX_MODE, RX_MODE_RESET);
9152 		udelay(10);
9153 	}
9154 
9155 	tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
9156 			MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE |
9157 			MAC_MODE_FHDE_ENABLE;
9158 	if (tg3_flag(tp, ENABLE_APE))
9159 		tp->mac_mode |= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
9160 	if (!tg3_flag(tp, 5705_PLUS) &&
9161 	    !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
9162 	    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
9163 		tp->mac_mode |= MAC_MODE_LINK_POLARITY;
9164 	tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
9165 	udelay(40);
9166 
9167 	/* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
9168 	 * If TG3_FLAG_IS_NIC is zero, we should read the
9169 	 * register to preserve the GPIO settings for LOMs. The GPIOs,
9170 	 * whether used as inputs or outputs, are set by boot code after
9171 	 * reset.
9172 	 */
9173 	if (!tg3_flag(tp, IS_NIC)) {
9174 		u32 gpio_mask;
9175 
9176 		gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
9177 			    GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
9178 			    GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
9179 
9180 		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
9181 			gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
9182 				     GRC_LCLCTRL_GPIO_OUTPUT3;
9183 
9184 		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
9185 			gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
9186 
9187 		tp->grc_local_ctrl &= ~gpio_mask;
9188 		tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
9189 
9190 		/* GPIO1 must be driven high for eeprom write protect */
9191 		if (tg3_flag(tp, EEPROM_WRITE_PROT))
9192 			tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
9193 					       GRC_LCLCTRL_GPIO_OUTPUT1);
9194 	}
9195 	tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
9196 	udelay(100);
9197 
9198 	if (tg3_flag(tp, USING_MSIX)) {
9199 		val = tr32(MSGINT_MODE);
9200 		val |= MSGINT_MODE_ENABLE;
9201 		if (tp->irq_cnt > 1)
9202 			val |= MSGINT_MODE_MULTIVEC_EN;
9203 		if (!tg3_flag(tp, 1SHOT_MSI))
9204 			val |= MSGINT_MODE_ONE_SHOT_DISABLE;
9205 		tw32(MSGINT_MODE, val);
9206 	}
9207 
9208 	if (!tg3_flag(tp, 5705_PLUS)) {
9209 		tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
9210 		udelay(40);
9211 	}
9212 
9213 	val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
9214 	       WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
9215 	       WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
9216 	       WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
9217 	       WDMAC_MODE_LNGREAD_ENAB);
9218 
9219 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
9220 	    tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
9221 		if (tg3_flag(tp, TSO_CAPABLE) &&
9222 		    (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
9223 		     tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
9224 			/* nothing */
9225 		} else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
9226 			   !tg3_flag(tp, IS_5788)) {
9227 			val |= WDMAC_MODE_RX_ACCEL;
9228 		}
9229 	}
9230 
9231 	/* Enable host coalescing bug fix */
9232 	if (tg3_flag(tp, 5755_PLUS))
9233 		val |= WDMAC_MODE_STATUS_TAG_FIX;
9234 
9235 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
9236 		val |= WDMAC_MODE_BURST_ALL_DATA;
9237 
9238 	tw32_f(WDMAC_MODE, val);
9239 	udelay(40);
9240 
9241 	if (tg3_flag(tp, PCIX_MODE)) {
9242 		u16 pcix_cmd;
9243 
9244 		pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
9245 				     &pcix_cmd);
9246 		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
9247 			pcix_cmd &= ~PCI_X_CMD_MAX_READ;
9248 			pcix_cmd |= PCI_X_CMD_READ_2K;
9249 		} else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
9250 			pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
9251 			pcix_cmd |= PCI_X_CMD_READ_2K;
9252 		}
9253 		pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
9254 				      pcix_cmd);
9255 	}
9256 
9257 	tw32_f(RDMAC_MODE, rdmac_mode);
9258 	udelay(40);
9259 
9260 	tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
9261 	if (!tg3_flag(tp, 5705_PLUS))
9262 		tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
9263 
9264 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
9265 		tw32(SNDDATAC_MODE,
9266 		     SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
9267 	else
9268 		tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
9269 
9270 	tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
9271 	tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
9272 	val = RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ;
9273 	if (tg3_flag(tp, LRG_PROD_RING_CAP))
9274 		val |= RCVDBDI_MODE_LRG_RING_SZ;
9275 	tw32(RCVDBDI_MODE, val);
9276 	tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
9277 	if (tg3_flag(tp, HW_TSO_1) ||
9278 	    tg3_flag(tp, HW_TSO_2) ||
9279 	    tg3_flag(tp, HW_TSO_3))
9280 		tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
9281 	val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE;
9282 	if (tg3_flag(tp, ENABLE_TSS))
9283 		val |= SNDBDI_MODE_MULTI_TXQ_EN;
9284 	tw32(SNDBDI_MODE, val);
9285 	tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
9286 
9287 	if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
9288 		err = tg3_load_5701_a0_firmware_fix(tp);
9289 		if (err)
9290 			return err;
9291 	}
9292 
9293 	if (tg3_flag(tp, TSO_CAPABLE)) {
9294 		err = tg3_load_tso_firmware(tp);
9295 		if (err)
9296 			return err;
9297 	}
9298 
9299 	tp->tx_mode = TX_MODE_ENABLE;
9300 
9301 	if (tg3_flag(tp, 5755_PLUS) ||
9302 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
9303 		tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX;
9304 
9305 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
9306 		val = TX_MODE_JMB_FRM_LEN | TX_MODE_CNT_DN_MODE;
9307 		tp->tx_mode &= ~val;
9308 		tp->tx_mode |= tr32(MAC_TX_MODE) & val;
9309 	}
9310 
9311 	tw32_f(MAC_TX_MODE, tp->tx_mode);
9312 	udelay(100);
9313 
9314 	if (tg3_flag(tp, ENABLE_RSS)) {
9315 		tg3_rss_write_indir_tbl(tp);
9316 
9317 		/* Setup the "secret" hash key. */
9318 		tw32(MAC_RSS_HASH_KEY_0, 0x5f865437);
9319 		tw32(MAC_RSS_HASH_KEY_1, 0xe4ac62cc);
9320 		tw32(MAC_RSS_HASH_KEY_2, 0x50103a45);
9321 		tw32(MAC_RSS_HASH_KEY_3, 0x36621985);
9322 		tw32(MAC_RSS_HASH_KEY_4, 0xbf14c0e8);
9323 		tw32(MAC_RSS_HASH_KEY_5, 0x1bc27a1e);
9324 		tw32(MAC_RSS_HASH_KEY_6, 0x84f4b556);
9325 		tw32(MAC_RSS_HASH_KEY_7, 0x094ea6fe);
9326 		tw32(MAC_RSS_HASH_KEY_8, 0x7dda01e7);
9327 		tw32(MAC_RSS_HASH_KEY_9, 0xc04d7481);
9328 	}
9329 
9330 	tp->rx_mode = RX_MODE_ENABLE;
9331 	if (tg3_flag(tp, 5755_PLUS))
9332 		tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
9333 
9334 	if (tg3_flag(tp, ENABLE_RSS))
9335 		tp->rx_mode |= RX_MODE_RSS_ENABLE |
9336 			       RX_MODE_RSS_ITBL_HASH_BITS_7 |
9337 			       RX_MODE_RSS_IPV6_HASH_EN |
9338 			       RX_MODE_RSS_TCP_IPV6_HASH_EN |
9339 			       RX_MODE_RSS_IPV4_HASH_EN |
9340 			       RX_MODE_RSS_TCP_IPV4_HASH_EN;
9341 
9342 	tw32_f(MAC_RX_MODE, tp->rx_mode);
9343 	udelay(10);
9344 
9345 	tw32(MAC_LED_CTRL, tp->led_ctrl);
9346 
9347 	tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
9348 	if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
9349 		tw32_f(MAC_RX_MODE, RX_MODE_RESET);
9350 		udelay(10);
9351 	}
9352 	tw32_f(MAC_RX_MODE, tp->rx_mode);
9353 	udelay(10);
9354 
9355 	if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
9356 		if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
9357 			!(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) {
9358 			/* Set drive transmission level to 1.2V  */
9359 			/* only if the signal pre-emphasis bit is not set  */
9360 			val = tr32(MAC_SERDES_CFG);
9361 			val &= 0xfffff000;
9362 			val |= 0x880;
9363 			tw32(MAC_SERDES_CFG, val);
9364 		}
9365 		if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
9366 			tw32(MAC_SERDES_CFG, 0x616000);
9367 	}
9368 
9369 	/* Prevent chip from dropping frames when flow control
9370 	 * is enabled.
9371 	 */
9372 	if (tg3_flag(tp, 57765_CLASS))
9373 		val = 1;
9374 	else
9375 		val = 2;
9376 	tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val);
9377 
9378 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
9379 	    (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
9380 		/* Use hardware link auto-negotiation */
9381 		tg3_flag_set(tp, HW_AUTONEG);
9382 	}
9383 
9384 	if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
9385 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
9386 		u32 tmp;
9387 
9388 		tmp = tr32(SERDES_RX_CTRL);
9389 		tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
9390 		tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
9391 		tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
9392 		tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
9393 	}
9394 
9395 	if (!tg3_flag(tp, USE_PHYLIB)) {
9396 		if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
9397 			tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
9398 
9399 		err = tg3_setup_phy(tp, 0);
9400 		if (err)
9401 			return err;
9402 
9403 		if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
9404 		    !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
9405 			u32 tmp;
9406 
9407 			/* Clear CRC stats. */
9408 			if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
9409 				tg3_writephy(tp, MII_TG3_TEST1,
9410 					     tmp | MII_TG3_TEST1_CRC_EN);
9411 				tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &tmp);
9412 			}
9413 		}
9414 	}
9415 
9416 	__tg3_set_rx_mode(tp->dev);
9417 
9418 	/* Initialize receive rules. */
9419 	tw32(MAC_RCV_RULE_0,  0xc2000000 & RCV_RULE_DISABLE_MASK);
9420 	tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
9421 	tw32(MAC_RCV_RULE_1,  0x86000004 & RCV_RULE_DISABLE_MASK);
9422 	tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
9423 
9424 	if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS))
9425 		limit = 8;
9426 	else
9427 		limit = 16;
9428 	if (tg3_flag(tp, ENABLE_ASF))
9429 		limit -= 4;
9430 	switch (limit) {
9431 	case 16:
9432 		tw32(MAC_RCV_RULE_15,  0); tw32(MAC_RCV_VALUE_15,  0);
9433 	case 15:
9434 		tw32(MAC_RCV_RULE_14,  0); tw32(MAC_RCV_VALUE_14,  0);
9435 	case 14:
9436 		tw32(MAC_RCV_RULE_13,  0); tw32(MAC_RCV_VALUE_13,  0);
9437 	case 13:
9438 		tw32(MAC_RCV_RULE_12,  0); tw32(MAC_RCV_VALUE_12,  0);
9439 	case 12:
9440 		tw32(MAC_RCV_RULE_11,  0); tw32(MAC_RCV_VALUE_11,  0);
9441 	case 11:
9442 		tw32(MAC_RCV_RULE_10,  0); tw32(MAC_RCV_VALUE_10,  0);
9443 	case 10:
9444 		tw32(MAC_RCV_RULE_9,  0); tw32(MAC_RCV_VALUE_9,  0);
9445 	case 9:
9446 		tw32(MAC_RCV_RULE_8,  0); tw32(MAC_RCV_VALUE_8,  0);
9447 	case 8:
9448 		tw32(MAC_RCV_RULE_7,  0); tw32(MAC_RCV_VALUE_7,  0);
9449 	case 7:
9450 		tw32(MAC_RCV_RULE_6,  0); tw32(MAC_RCV_VALUE_6,  0);
9451 	case 6:
9452 		tw32(MAC_RCV_RULE_5,  0); tw32(MAC_RCV_VALUE_5,  0);
9453 	case 5:
9454 		tw32(MAC_RCV_RULE_4,  0); tw32(MAC_RCV_VALUE_4,  0);
9455 	case 4:
9456 		/* tw32(MAC_RCV_RULE_3,  0); tw32(MAC_RCV_VALUE_3,  0); */
9457 	case 3:
9458 		/* tw32(MAC_RCV_RULE_2,  0); tw32(MAC_RCV_VALUE_2,  0); */
9459 	case 2:
9460 	case 1:
9461 
9462 	default:
9463 		break;
9464 	}
9465 
9466 	if (tg3_flag(tp, ENABLE_APE))
9467 		/* Write our heartbeat update interval to APE. */
9468 		tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
9469 				APE_HOST_HEARTBEAT_INT_DISABLE);
9470 
9471 	tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
9472 
9473 	return 0;
9474 }
9475 
9476 /* Called at device open time to get the chip ready for
9477  * packet processing.  Invoked with tp->lock held.
9478  */
9479 static int tg3_init_hw(struct tg3 *tp, int reset_phy)
9480 {
9481 	tg3_switch_clocks(tp);
9482 
9483 	tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
9484 
9485 	return tg3_reset_hw(tp, reset_phy);
9486 }
9487 
9488 #if IS_ENABLED(CONFIG_HWMON)
9489 static void tg3_sd_scan_scratchpad(struct tg3 *tp, struct tg3_ocir *ocir)
9490 {
9491 	int i;
9492 
9493 	for (i = 0; i < TG3_SD_NUM_RECS; i++, ocir++) {
9494 		u32 off = i * TG3_OCIR_LEN, len = TG3_OCIR_LEN;
9495 
9496 		tg3_ape_scratchpad_read(tp, (u32 *) ocir, off, len);
9497 		off += len;
9498 
9499 		if (ocir->signature != TG3_OCIR_SIG_MAGIC ||
9500 		    !(ocir->version_flags & TG3_OCIR_FLAG_ACTIVE))
9501 			memset(ocir, 0, TG3_OCIR_LEN);
9502 	}
9503 }
9504 
9505 /* sysfs attributes for hwmon */
9506 static ssize_t tg3_show_temp(struct device *dev,
9507 			     struct device_attribute *devattr, char *buf)
9508 {
9509 	struct pci_dev *pdev = to_pci_dev(dev);
9510 	struct net_device *netdev = pci_get_drvdata(pdev);
9511 	struct tg3 *tp = netdev_priv(netdev);
9512 	struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
9513 	u32 temperature;
9514 
9515 	spin_lock_bh(&tp->lock);
9516 	tg3_ape_scratchpad_read(tp, &temperature, attr->index,
9517 				sizeof(temperature));
9518 	spin_unlock_bh(&tp->lock);
9519 	return sprintf(buf, "%u\n", temperature);
9520 }
9521 
9522 
9523 static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, tg3_show_temp, NULL,
9524 			  TG3_TEMP_SENSOR_OFFSET);
9525 static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, tg3_show_temp, NULL,
9526 			  TG3_TEMP_CAUTION_OFFSET);
9527 static SENSOR_DEVICE_ATTR(temp1_max, S_IRUGO, tg3_show_temp, NULL,
9528 			  TG3_TEMP_MAX_OFFSET);
9529 
9530 static struct attribute *tg3_attributes[] = {
9531 	&sensor_dev_attr_temp1_input.dev_attr.attr,
9532 	&sensor_dev_attr_temp1_crit.dev_attr.attr,
9533 	&sensor_dev_attr_temp1_max.dev_attr.attr,
9534 	NULL
9535 };
9536 
9537 static const struct attribute_group tg3_group = {
9538 	.attrs = tg3_attributes,
9539 };
9540 
9541 #endif
9542 
9543 static void tg3_hwmon_close(struct tg3 *tp)
9544 {
9545 #if IS_ENABLED(CONFIG_HWMON)
9546 	if (tp->hwmon_dev) {
9547 		hwmon_device_unregister(tp->hwmon_dev);
9548 		tp->hwmon_dev = NULL;
9549 		sysfs_remove_group(&tp->pdev->dev.kobj, &tg3_group);
9550 	}
9551 #endif
9552 }
9553 
9554 static void tg3_hwmon_open(struct tg3 *tp)
9555 {
9556 #if IS_ENABLED(CONFIG_HWMON)
9557 	int i, err;
9558 	u32 size = 0;
9559 	struct pci_dev *pdev = tp->pdev;
9560 	struct tg3_ocir ocirs[TG3_SD_NUM_RECS];
9561 
9562 	tg3_sd_scan_scratchpad(tp, ocirs);
9563 
9564 	for (i = 0; i < TG3_SD_NUM_RECS; i++) {
9565 		if (!ocirs[i].src_data_length)
9566 			continue;
9567 
9568 		size += ocirs[i].src_hdr_length;
9569 		size += ocirs[i].src_data_length;
9570 	}
9571 
9572 	if (!size)
9573 		return;
9574 
9575 	/* Register hwmon sysfs hooks */
9576 	err = sysfs_create_group(&pdev->dev.kobj, &tg3_group);
9577 	if (err) {
9578 		dev_err(&pdev->dev, "Cannot create sysfs group, aborting\n");
9579 		return;
9580 	}
9581 
9582 	tp->hwmon_dev = hwmon_device_register(&pdev->dev);
9583 	if (IS_ERR(tp->hwmon_dev)) {
9584 		tp->hwmon_dev = NULL;
9585 		dev_err(&pdev->dev, "Cannot register hwmon device, aborting\n");
9586 		sysfs_remove_group(&pdev->dev.kobj, &tg3_group);
9587 	}
9588 #endif
9589 }
9590 
9591 
9592 #define TG3_STAT_ADD32(PSTAT, REG) \
9593 do {	u32 __val = tr32(REG); \
9594 	(PSTAT)->low += __val; \
9595 	if ((PSTAT)->low < __val) \
9596 		(PSTAT)->high += 1; \
9597 } while (0)
9598 
9599 static void tg3_periodic_fetch_stats(struct tg3 *tp)
9600 {
9601 	struct tg3_hw_stats *sp = tp->hw_stats;
9602 
9603 	if (!netif_carrier_ok(tp->dev))
9604 		return;
9605 
9606 	TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
9607 	TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
9608 	TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
9609 	TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
9610 	TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
9611 	TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
9612 	TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
9613 	TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
9614 	TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
9615 	TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
9616 	TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
9617 	TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
9618 	TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
9619 
9620 	TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
9621 	TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
9622 	TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
9623 	TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
9624 	TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
9625 	TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
9626 	TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
9627 	TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
9628 	TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
9629 	TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
9630 	TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
9631 	TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
9632 	TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
9633 	TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
9634 
9635 	TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
9636 	if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
9637 	    tp->pci_chip_rev_id != CHIPREV_ID_5719_A0 &&
9638 	    tp->pci_chip_rev_id != CHIPREV_ID_5720_A0) {
9639 		TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
9640 	} else {
9641 		u32 val = tr32(HOSTCC_FLOW_ATTN);
9642 		val = (val & HOSTCC_FLOW_ATTN_MBUF_LWM) ? 1 : 0;
9643 		if (val) {
9644 			tw32(HOSTCC_FLOW_ATTN, HOSTCC_FLOW_ATTN_MBUF_LWM);
9645 			sp->rx_discards.low += val;
9646 			if (sp->rx_discards.low < val)
9647 				sp->rx_discards.high += 1;
9648 		}
9649 		sp->mbuf_lwm_thresh_hit = sp->rx_discards;
9650 	}
9651 	TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
9652 }
9653 
9654 static void tg3_chk_missed_msi(struct tg3 *tp)
9655 {
9656 	u32 i;
9657 
9658 	for (i = 0; i < tp->irq_cnt; i++) {
9659 		struct tg3_napi *tnapi = &tp->napi[i];
9660 
9661 		if (tg3_has_work(tnapi)) {
9662 			if (tnapi->last_rx_cons == tnapi->rx_rcb_ptr &&
9663 			    tnapi->last_tx_cons == tnapi->tx_cons) {
9664 				if (tnapi->chk_msi_cnt < 1) {
9665 					tnapi->chk_msi_cnt++;
9666 					return;
9667 				}
9668 				tg3_msi(0, tnapi);
9669 			}
9670 		}
9671 		tnapi->chk_msi_cnt = 0;
9672 		tnapi->last_rx_cons = tnapi->rx_rcb_ptr;
9673 		tnapi->last_tx_cons = tnapi->tx_cons;
9674 	}
9675 }
9676 
9677 static void tg3_timer(unsigned long __opaque)
9678 {
9679 	struct tg3 *tp = (struct tg3 *) __opaque;
9680 
9681 	if (tp->irq_sync || tg3_flag(tp, RESET_TASK_PENDING))
9682 		goto restart_timer;
9683 
9684 	spin_lock(&tp->lock);
9685 
9686 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
9687 	    tg3_flag(tp, 57765_CLASS))
9688 		tg3_chk_missed_msi(tp);
9689 
9690 	if (!tg3_flag(tp, TAGGED_STATUS)) {
9691 		/* All of this garbage is because when using non-tagged
9692 		 * IRQ status the mailbox/status_block protocol the chip
9693 		 * uses with the cpu is race prone.
9694 		 */
9695 		if (tp->napi[0].hw_status->status & SD_STATUS_UPDATED) {
9696 			tw32(GRC_LOCAL_CTRL,
9697 			     tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
9698 		} else {
9699 			tw32(HOSTCC_MODE, tp->coalesce_mode |
9700 			     HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW);
9701 		}
9702 
9703 		if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
9704 			spin_unlock(&tp->lock);
9705 			tg3_reset_task_schedule(tp);
9706 			goto restart_timer;
9707 		}
9708 	}
9709 
9710 	/* This part only runs once per second. */
9711 	if (!--tp->timer_counter) {
9712 		if (tg3_flag(tp, 5705_PLUS))
9713 			tg3_periodic_fetch_stats(tp);
9714 
9715 		if (tp->setlpicnt && !--tp->setlpicnt)
9716 			tg3_phy_eee_enable(tp);
9717 
9718 		if (tg3_flag(tp, USE_LINKCHG_REG)) {
9719 			u32 mac_stat;
9720 			int phy_event;
9721 
9722 			mac_stat = tr32(MAC_STATUS);
9723 
9724 			phy_event = 0;
9725 			if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) {
9726 				if (mac_stat & MAC_STATUS_MI_INTERRUPT)
9727 					phy_event = 1;
9728 			} else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
9729 				phy_event = 1;
9730 
9731 			if (phy_event)
9732 				tg3_setup_phy(tp, 0);
9733 		} else if (tg3_flag(tp, POLL_SERDES)) {
9734 			u32 mac_stat = tr32(MAC_STATUS);
9735 			int need_setup = 0;
9736 
9737 			if (netif_carrier_ok(tp->dev) &&
9738 			    (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
9739 				need_setup = 1;
9740 			}
9741 			if (!netif_carrier_ok(tp->dev) &&
9742 			    (mac_stat & (MAC_STATUS_PCS_SYNCED |
9743 					 MAC_STATUS_SIGNAL_DET))) {
9744 				need_setup = 1;
9745 			}
9746 			if (need_setup) {
9747 				if (!tp->serdes_counter) {
9748 					tw32_f(MAC_MODE,
9749 					     (tp->mac_mode &
9750 					      ~MAC_MODE_PORT_MODE_MASK));
9751 					udelay(40);
9752 					tw32_f(MAC_MODE, tp->mac_mode);
9753 					udelay(40);
9754 				}
9755 				tg3_setup_phy(tp, 0);
9756 			}
9757 		} else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
9758 			   tg3_flag(tp, 5780_CLASS)) {
9759 			tg3_serdes_parallel_detect(tp);
9760 		}
9761 
9762 		tp->timer_counter = tp->timer_multiplier;
9763 	}
9764 
9765 	/* Heartbeat is only sent once every 2 seconds.
9766 	 *
9767 	 * The heartbeat is to tell the ASF firmware that the host
9768 	 * driver is still alive.  In the event that the OS crashes,
9769 	 * ASF needs to reset the hardware to free up the FIFO space
9770 	 * that may be filled with rx packets destined for the host.
9771 	 * If the FIFO is full, ASF will no longer function properly.
9772 	 *
9773 	 * Unintended resets have been reported on real time kernels
9774 	 * where the timer doesn't run on time.  Netpoll will also have
9775 	 * same problem.
9776 	 *
9777 	 * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
9778 	 * to check the ring condition when the heartbeat is expiring
9779 	 * before doing the reset.  This will prevent most unintended
9780 	 * resets.
9781 	 */
9782 	if (!--tp->asf_counter) {
9783 		if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
9784 			tg3_wait_for_event_ack(tp);
9785 
9786 			tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
9787 				      FWCMD_NICDRV_ALIVE3);
9788 			tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
9789 			tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX,
9790 				      TG3_FW_UPDATE_TIMEOUT_SEC);
9791 
9792 			tg3_generate_fw_event(tp);
9793 		}
9794 		tp->asf_counter = tp->asf_multiplier;
9795 	}
9796 
9797 	spin_unlock(&tp->lock);
9798 
9799 restart_timer:
9800 	tp->timer.expires = jiffies + tp->timer_offset;
9801 	add_timer(&tp->timer);
9802 }
9803 
9804 static void __devinit tg3_timer_init(struct tg3 *tp)
9805 {
9806 	if (tg3_flag(tp, TAGGED_STATUS) &&
9807 	    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
9808 	    !tg3_flag(tp, 57765_CLASS))
9809 		tp->timer_offset = HZ;
9810 	else
9811 		tp->timer_offset = HZ / 10;
9812 
9813 	BUG_ON(tp->timer_offset > HZ);
9814 
9815 	tp->timer_multiplier = (HZ / tp->timer_offset);
9816 	tp->asf_multiplier = (HZ / tp->timer_offset) *
9817 			     TG3_FW_UPDATE_FREQ_SEC;
9818 
9819 	init_timer(&tp->timer);
9820 	tp->timer.data = (unsigned long) tp;
9821 	tp->timer.function = tg3_timer;
9822 }
9823 
9824 static void tg3_timer_start(struct tg3 *tp)
9825 {
9826 	tp->asf_counter   = tp->asf_multiplier;
9827 	tp->timer_counter = tp->timer_multiplier;
9828 
9829 	tp->timer.expires = jiffies + tp->timer_offset;
9830 	add_timer(&tp->timer);
9831 }
9832 
9833 static void tg3_timer_stop(struct tg3 *tp)
9834 {
9835 	del_timer_sync(&tp->timer);
9836 }
9837 
9838 /* Restart hardware after configuration changes, self-test, etc.
9839  * Invoked with tp->lock held.
9840  */
9841 static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
9842 	__releases(tp->lock)
9843 	__acquires(tp->lock)
9844 {
9845 	int err;
9846 
9847 	err = tg3_init_hw(tp, reset_phy);
9848 	if (err) {
9849 		netdev_err(tp->dev,
9850 			   "Failed to re-initialize device, aborting\n");
9851 		tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9852 		tg3_full_unlock(tp);
9853 		tg3_timer_stop(tp);
9854 		tp->irq_sync = 0;
9855 		tg3_napi_enable(tp);
9856 		dev_close(tp->dev);
9857 		tg3_full_lock(tp, 0);
9858 	}
9859 	return err;
9860 }
9861 
9862 static void tg3_reset_task(struct work_struct *work)
9863 {
9864 	struct tg3 *tp = container_of(work, struct tg3, reset_task);
9865 	int err;
9866 
9867 	tg3_full_lock(tp, 0);
9868 
9869 	if (!netif_running(tp->dev)) {
9870 		tg3_flag_clear(tp, RESET_TASK_PENDING);
9871 		tg3_full_unlock(tp);
9872 		return;
9873 	}
9874 
9875 	tg3_full_unlock(tp);
9876 
9877 	tg3_phy_stop(tp);
9878 
9879 	tg3_netif_stop(tp);
9880 
9881 	tg3_full_lock(tp, 1);
9882 
9883 	if (tg3_flag(tp, TX_RECOVERY_PENDING)) {
9884 		tp->write32_tx_mbox = tg3_write32_tx_mbox;
9885 		tp->write32_rx_mbox = tg3_write_flush_reg32;
9886 		tg3_flag_set(tp, MBOX_WRITE_REORDER);
9887 		tg3_flag_clear(tp, TX_RECOVERY_PENDING);
9888 	}
9889 
9890 	tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
9891 	err = tg3_init_hw(tp, 1);
9892 	if (err)
9893 		goto out;
9894 
9895 	tg3_netif_start(tp);
9896 
9897 out:
9898 	tg3_full_unlock(tp);
9899 
9900 	if (!err)
9901 		tg3_phy_start(tp);
9902 
9903 	tg3_flag_clear(tp, RESET_TASK_PENDING);
9904 }
9905 
9906 static int tg3_request_irq(struct tg3 *tp, int irq_num)
9907 {
9908 	irq_handler_t fn;
9909 	unsigned long flags;
9910 	char *name;
9911 	struct tg3_napi *tnapi = &tp->napi[irq_num];
9912 
9913 	if (tp->irq_cnt == 1)
9914 		name = tp->dev->name;
9915 	else {
9916 		name = &tnapi->irq_lbl[0];
9917 		snprintf(name, IFNAMSIZ, "%s-%d", tp->dev->name, irq_num);
9918 		name[IFNAMSIZ-1] = 0;
9919 	}
9920 
9921 	if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
9922 		fn = tg3_msi;
9923 		if (tg3_flag(tp, 1SHOT_MSI))
9924 			fn = tg3_msi_1shot;
9925 		flags = 0;
9926 	} else {
9927 		fn = tg3_interrupt;
9928 		if (tg3_flag(tp, TAGGED_STATUS))
9929 			fn = tg3_interrupt_tagged;
9930 		flags = IRQF_SHARED;
9931 	}
9932 
9933 	return request_irq(tnapi->irq_vec, fn, flags, name, tnapi);
9934 }
9935 
9936 static int tg3_test_interrupt(struct tg3 *tp)
9937 {
9938 	struct tg3_napi *tnapi = &tp->napi[0];
9939 	struct net_device *dev = tp->dev;
9940 	int err, i, intr_ok = 0;
9941 	u32 val;
9942 
9943 	if (!netif_running(dev))
9944 		return -ENODEV;
9945 
9946 	tg3_disable_ints(tp);
9947 
9948 	free_irq(tnapi->irq_vec, tnapi);
9949 
9950 	/*
9951 	 * Turn off MSI one shot mode.  Otherwise this test has no
9952 	 * observable way to know whether the interrupt was delivered.
9953 	 */
9954 	if (tg3_flag(tp, 57765_PLUS)) {
9955 		val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE;
9956 		tw32(MSGINT_MODE, val);
9957 	}
9958 
9959 	err = request_irq(tnapi->irq_vec, tg3_test_isr,
9960 			  IRQF_SHARED, dev->name, tnapi);
9961 	if (err)
9962 		return err;
9963 
9964 	tnapi->hw_status->status &= ~SD_STATUS_UPDATED;
9965 	tg3_enable_ints(tp);
9966 
9967 	tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
9968 	       tnapi->coal_now);
9969 
9970 	for (i = 0; i < 5; i++) {
9971 		u32 int_mbox, misc_host_ctrl;
9972 
9973 		int_mbox = tr32_mailbox(tnapi->int_mbox);
9974 		misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
9975 
9976 		if ((int_mbox != 0) ||
9977 		    (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
9978 			intr_ok = 1;
9979 			break;
9980 		}
9981 
9982 		if (tg3_flag(tp, 57765_PLUS) &&
9983 		    tnapi->hw_status->status_tag != tnapi->last_tag)
9984 			tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
9985 
9986 		msleep(10);
9987 	}
9988 
9989 	tg3_disable_ints(tp);
9990 
9991 	free_irq(tnapi->irq_vec, tnapi);
9992 
9993 	err = tg3_request_irq(tp, 0);
9994 
9995 	if (err)
9996 		return err;
9997 
9998 	if (intr_ok) {
9999 		/* Reenable MSI one shot mode. */
10000 		if (tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, 1SHOT_MSI)) {
10001 			val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE;
10002 			tw32(MSGINT_MODE, val);
10003 		}
10004 		return 0;
10005 	}
10006 
10007 	return -EIO;
10008 }
10009 
10010 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
10011  * successfully restored
10012  */
10013 static int tg3_test_msi(struct tg3 *tp)
10014 {
10015 	int err;
10016 	u16 pci_cmd;
10017 
10018 	if (!tg3_flag(tp, USING_MSI))
10019 		return 0;
10020 
10021 	/* Turn off SERR reporting in case MSI terminates with Master
10022 	 * Abort.
10023 	 */
10024 	pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
10025 	pci_write_config_word(tp->pdev, PCI_COMMAND,
10026 			      pci_cmd & ~PCI_COMMAND_SERR);
10027 
10028 	err = tg3_test_interrupt(tp);
10029 
10030 	pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
10031 
10032 	if (!err)
10033 		return 0;
10034 
10035 	/* other failures */
10036 	if (err != -EIO)
10037 		return err;
10038 
10039 	/* MSI test failed, go back to INTx mode */
10040 	netdev_warn(tp->dev, "No interrupt was generated using MSI. Switching "
10041 		    "to INTx mode. Please report this failure to the PCI "
10042 		    "maintainer and include system chipset information\n");
10043 
10044 	free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
10045 
10046 	pci_disable_msi(tp->pdev);
10047 
10048 	tg3_flag_clear(tp, USING_MSI);
10049 	tp->napi[0].irq_vec = tp->pdev->irq;
10050 
10051 	err = tg3_request_irq(tp, 0);
10052 	if (err)
10053 		return err;
10054 
10055 	/* Need to reset the chip because the MSI cycle may have terminated
10056 	 * with Master Abort.
10057 	 */
10058 	tg3_full_lock(tp, 1);
10059 
10060 	tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10061 	err = tg3_init_hw(tp, 1);
10062 
10063 	tg3_full_unlock(tp);
10064 
10065 	if (err)
10066 		free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
10067 
10068 	return err;
10069 }
10070 
10071 static int tg3_request_firmware(struct tg3 *tp)
10072 {
10073 	const __be32 *fw_data;
10074 
10075 	if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) {
10076 		netdev_err(tp->dev, "Failed to load firmware \"%s\"\n",
10077 			   tp->fw_needed);
10078 		return -ENOENT;
10079 	}
10080 
10081 	fw_data = (void *)tp->fw->data;
10082 
10083 	/* Firmware blob starts with version numbers, followed by
10084 	 * start address and _full_ length including BSS sections
10085 	 * (which must be longer than the actual data, of course
10086 	 */
10087 
10088 	tp->fw_len = be32_to_cpu(fw_data[2]);	/* includes bss */
10089 	if (tp->fw_len < (tp->fw->size - 12)) {
10090 		netdev_err(tp->dev, "bogus length %d in \"%s\"\n",
10091 			   tp->fw_len, tp->fw_needed);
10092 		release_firmware(tp->fw);
10093 		tp->fw = NULL;
10094 		return -EINVAL;
10095 	}
10096 
10097 	/* We no longer need firmware; we have it. */
10098 	tp->fw_needed = NULL;
10099 	return 0;
10100 }
10101 
10102 static bool tg3_enable_msix(struct tg3 *tp)
10103 {
10104 	int i, rc;
10105 	struct msix_entry msix_ent[tp->irq_max];
10106 
10107 	tp->irq_cnt = netif_get_num_default_rss_queues();
10108 	if (tp->irq_cnt > 1) {
10109 		/* We want as many rx rings enabled as there are cpus.
10110 		 * In multiqueue MSI-X mode, the first MSI-X vector
10111 		 * only deals with link interrupts, etc, so we add
10112 		 * one to the number of vectors we are requesting.
10113 		 */
10114 		tp->irq_cnt = min_t(unsigned, tp->irq_cnt + 1, tp->irq_max);
10115 	}
10116 
10117 	for (i = 0; i < tp->irq_max; i++) {
10118 		msix_ent[i].entry  = i;
10119 		msix_ent[i].vector = 0;
10120 	}
10121 
10122 	rc = pci_enable_msix(tp->pdev, msix_ent, tp->irq_cnt);
10123 	if (rc < 0) {
10124 		return false;
10125 	} else if (rc != 0) {
10126 		if (pci_enable_msix(tp->pdev, msix_ent, rc))
10127 			return false;
10128 		netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n",
10129 			      tp->irq_cnt, rc);
10130 		tp->irq_cnt = rc;
10131 	}
10132 
10133 	for (i = 0; i < tp->irq_max; i++)
10134 		tp->napi[i].irq_vec = msix_ent[i].vector;
10135 
10136 	netif_set_real_num_tx_queues(tp->dev, 1);
10137 	rc = tp->irq_cnt > 1 ? tp->irq_cnt - 1 : 1;
10138 	if (netif_set_real_num_rx_queues(tp->dev, rc)) {
10139 		pci_disable_msix(tp->pdev);
10140 		return false;
10141 	}
10142 
10143 	if (tp->irq_cnt > 1) {
10144 		tg3_flag_set(tp, ENABLE_RSS);
10145 
10146 		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
10147 		    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
10148 			tg3_flag_set(tp, ENABLE_TSS);
10149 			netif_set_real_num_tx_queues(tp->dev, tp->irq_cnt - 1);
10150 		}
10151 	}
10152 
10153 	return true;
10154 }
10155 
10156 static void tg3_ints_init(struct tg3 *tp)
10157 {
10158 	if ((tg3_flag(tp, SUPPORT_MSI) || tg3_flag(tp, SUPPORT_MSIX)) &&
10159 	    !tg3_flag(tp, TAGGED_STATUS)) {
10160 		/* All MSI supporting chips should support tagged
10161 		 * status.  Assert that this is the case.
10162 		 */
10163 		netdev_warn(tp->dev,
10164 			    "MSI without TAGGED_STATUS? Not using MSI\n");
10165 		goto defcfg;
10166 	}
10167 
10168 	if (tg3_flag(tp, SUPPORT_MSIX) && tg3_enable_msix(tp))
10169 		tg3_flag_set(tp, USING_MSIX);
10170 	else if (tg3_flag(tp, SUPPORT_MSI) && pci_enable_msi(tp->pdev) == 0)
10171 		tg3_flag_set(tp, USING_MSI);
10172 
10173 	if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
10174 		u32 msi_mode = tr32(MSGINT_MODE);
10175 		if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1)
10176 			msi_mode |= MSGINT_MODE_MULTIVEC_EN;
10177 		if (!tg3_flag(tp, 1SHOT_MSI))
10178 			msi_mode |= MSGINT_MODE_ONE_SHOT_DISABLE;
10179 		tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
10180 	}
10181 defcfg:
10182 	if (!tg3_flag(tp, USING_MSIX)) {
10183 		tp->irq_cnt = 1;
10184 		tp->napi[0].irq_vec = tp->pdev->irq;
10185 		netif_set_real_num_tx_queues(tp->dev, 1);
10186 		netif_set_real_num_rx_queues(tp->dev, 1);
10187 	}
10188 }
10189 
10190 static void tg3_ints_fini(struct tg3 *tp)
10191 {
10192 	if (tg3_flag(tp, USING_MSIX))
10193 		pci_disable_msix(tp->pdev);
10194 	else if (tg3_flag(tp, USING_MSI))
10195 		pci_disable_msi(tp->pdev);
10196 	tg3_flag_clear(tp, USING_MSI);
10197 	tg3_flag_clear(tp, USING_MSIX);
10198 	tg3_flag_clear(tp, ENABLE_RSS);
10199 	tg3_flag_clear(tp, ENABLE_TSS);
10200 }
10201 
10202 static int tg3_open(struct net_device *dev)
10203 {
10204 	struct tg3 *tp = netdev_priv(dev);
10205 	int i, err;
10206 
10207 	if (tp->fw_needed) {
10208 		err = tg3_request_firmware(tp);
10209 		if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
10210 			if (err)
10211 				return err;
10212 		} else if (err) {
10213 			netdev_warn(tp->dev, "TSO capability disabled\n");
10214 			tg3_flag_clear(tp, TSO_CAPABLE);
10215 		} else if (!tg3_flag(tp, TSO_CAPABLE)) {
10216 			netdev_notice(tp->dev, "TSO capability restored\n");
10217 			tg3_flag_set(tp, TSO_CAPABLE);
10218 		}
10219 	}
10220 
10221 	netif_carrier_off(tp->dev);
10222 
10223 	err = tg3_power_up(tp);
10224 	if (err)
10225 		return err;
10226 
10227 	tg3_full_lock(tp, 0);
10228 
10229 	tg3_disable_ints(tp);
10230 	tg3_flag_clear(tp, INIT_COMPLETE);
10231 
10232 	tg3_full_unlock(tp);
10233 
10234 	/*
10235 	 * Setup interrupts first so we know how
10236 	 * many NAPI resources to allocate
10237 	 */
10238 	tg3_ints_init(tp);
10239 
10240 	tg3_rss_check_indir_tbl(tp);
10241 
10242 	/* The placement of this call is tied
10243 	 * to the setup and use of Host TX descriptors.
10244 	 */
10245 	err = tg3_alloc_consistent(tp);
10246 	if (err)
10247 		goto err_out1;
10248 
10249 	tg3_napi_init(tp);
10250 
10251 	tg3_napi_enable(tp);
10252 
10253 	for (i = 0; i < tp->irq_cnt; i++) {
10254 		struct tg3_napi *tnapi = &tp->napi[i];
10255 		err = tg3_request_irq(tp, i);
10256 		if (err) {
10257 			for (i--; i >= 0; i--) {
10258 				tnapi = &tp->napi[i];
10259 				free_irq(tnapi->irq_vec, tnapi);
10260 			}
10261 			goto err_out2;
10262 		}
10263 	}
10264 
10265 	tg3_full_lock(tp, 0);
10266 
10267 	err = tg3_init_hw(tp, 1);
10268 	if (err) {
10269 		tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10270 		tg3_free_rings(tp);
10271 	}
10272 
10273 	tg3_full_unlock(tp);
10274 
10275 	if (err)
10276 		goto err_out3;
10277 
10278 	if (tg3_flag(tp, USING_MSI)) {
10279 		err = tg3_test_msi(tp);
10280 
10281 		if (err) {
10282 			tg3_full_lock(tp, 0);
10283 			tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10284 			tg3_free_rings(tp);
10285 			tg3_full_unlock(tp);
10286 
10287 			goto err_out2;
10288 		}
10289 
10290 		if (!tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
10291 			u32 val = tr32(PCIE_TRANSACTION_CFG);
10292 
10293 			tw32(PCIE_TRANSACTION_CFG,
10294 			     val | PCIE_TRANS_CFG_1SHOT_MSI);
10295 		}
10296 	}
10297 
10298 	tg3_phy_start(tp);
10299 
10300 	tg3_hwmon_open(tp);
10301 
10302 	tg3_full_lock(tp, 0);
10303 
10304 	tg3_timer_start(tp);
10305 	tg3_flag_set(tp, INIT_COMPLETE);
10306 	tg3_enable_ints(tp);
10307 
10308 	tg3_full_unlock(tp);
10309 
10310 	netif_tx_start_all_queues(dev);
10311 
10312 	/*
10313 	 * Reset loopback feature if it was turned on while the device was down
10314 	 * make sure that it's installed properly now.
10315 	 */
10316 	if (dev->features & NETIF_F_LOOPBACK)
10317 		tg3_set_loopback(dev, dev->features);
10318 
10319 	return 0;
10320 
10321 err_out3:
10322 	for (i = tp->irq_cnt - 1; i >= 0; i--) {
10323 		struct tg3_napi *tnapi = &tp->napi[i];
10324 		free_irq(tnapi->irq_vec, tnapi);
10325 	}
10326 
10327 err_out2:
10328 	tg3_napi_disable(tp);
10329 	tg3_napi_fini(tp);
10330 	tg3_free_consistent(tp);
10331 
10332 err_out1:
10333 	tg3_ints_fini(tp);
10334 	tg3_frob_aux_power(tp, false);
10335 	pci_set_power_state(tp->pdev, PCI_D3hot);
10336 	return err;
10337 }
10338 
10339 static int tg3_close(struct net_device *dev)
10340 {
10341 	int i;
10342 	struct tg3 *tp = netdev_priv(dev);
10343 
10344 	tg3_napi_disable(tp);
10345 	tg3_reset_task_cancel(tp);
10346 
10347 	netif_tx_stop_all_queues(dev);
10348 
10349 	tg3_timer_stop(tp);
10350 
10351 	tg3_hwmon_close(tp);
10352 
10353 	tg3_phy_stop(tp);
10354 
10355 	tg3_full_lock(tp, 1);
10356 
10357 	tg3_disable_ints(tp);
10358 
10359 	tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10360 	tg3_free_rings(tp);
10361 	tg3_flag_clear(tp, INIT_COMPLETE);
10362 
10363 	tg3_full_unlock(tp);
10364 
10365 	for (i = tp->irq_cnt - 1; i >= 0; i--) {
10366 		struct tg3_napi *tnapi = &tp->napi[i];
10367 		free_irq(tnapi->irq_vec, tnapi);
10368 	}
10369 
10370 	tg3_ints_fini(tp);
10371 
10372 	/* Clear stats across close / open calls */
10373 	memset(&tp->net_stats_prev, 0, sizeof(tp->net_stats_prev));
10374 	memset(&tp->estats_prev, 0, sizeof(tp->estats_prev));
10375 
10376 	tg3_napi_fini(tp);
10377 
10378 	tg3_free_consistent(tp);
10379 
10380 	tg3_power_down(tp);
10381 
10382 	netif_carrier_off(tp->dev);
10383 
10384 	return 0;
10385 }
10386 
10387 static inline u64 get_stat64(tg3_stat64_t *val)
10388 {
10389        return ((u64)val->high << 32) | ((u64)val->low);
10390 }
10391 
10392 static u64 tg3_calc_crc_errors(struct tg3 *tp)
10393 {
10394 	struct tg3_hw_stats *hw_stats = tp->hw_stats;
10395 
10396 	if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
10397 	    (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
10398 	     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
10399 		u32 val;
10400 
10401 		if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
10402 			tg3_writephy(tp, MII_TG3_TEST1,
10403 				     val | MII_TG3_TEST1_CRC_EN);
10404 			tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &val);
10405 		} else
10406 			val = 0;
10407 
10408 		tp->phy_crc_errors += val;
10409 
10410 		return tp->phy_crc_errors;
10411 	}
10412 
10413 	return get_stat64(&hw_stats->rx_fcs_errors);
10414 }
10415 
10416 #define ESTAT_ADD(member) \
10417 	estats->member =	old_estats->member + \
10418 				get_stat64(&hw_stats->member)
10419 
10420 static void tg3_get_estats(struct tg3 *tp, struct tg3_ethtool_stats *estats)
10421 {
10422 	struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
10423 	struct tg3_hw_stats *hw_stats = tp->hw_stats;
10424 
10425 	ESTAT_ADD(rx_octets);
10426 	ESTAT_ADD(rx_fragments);
10427 	ESTAT_ADD(rx_ucast_packets);
10428 	ESTAT_ADD(rx_mcast_packets);
10429 	ESTAT_ADD(rx_bcast_packets);
10430 	ESTAT_ADD(rx_fcs_errors);
10431 	ESTAT_ADD(rx_align_errors);
10432 	ESTAT_ADD(rx_xon_pause_rcvd);
10433 	ESTAT_ADD(rx_xoff_pause_rcvd);
10434 	ESTAT_ADD(rx_mac_ctrl_rcvd);
10435 	ESTAT_ADD(rx_xoff_entered);
10436 	ESTAT_ADD(rx_frame_too_long_errors);
10437 	ESTAT_ADD(rx_jabbers);
10438 	ESTAT_ADD(rx_undersize_packets);
10439 	ESTAT_ADD(rx_in_length_errors);
10440 	ESTAT_ADD(rx_out_length_errors);
10441 	ESTAT_ADD(rx_64_or_less_octet_packets);
10442 	ESTAT_ADD(rx_65_to_127_octet_packets);
10443 	ESTAT_ADD(rx_128_to_255_octet_packets);
10444 	ESTAT_ADD(rx_256_to_511_octet_packets);
10445 	ESTAT_ADD(rx_512_to_1023_octet_packets);
10446 	ESTAT_ADD(rx_1024_to_1522_octet_packets);
10447 	ESTAT_ADD(rx_1523_to_2047_octet_packets);
10448 	ESTAT_ADD(rx_2048_to_4095_octet_packets);
10449 	ESTAT_ADD(rx_4096_to_8191_octet_packets);
10450 	ESTAT_ADD(rx_8192_to_9022_octet_packets);
10451 
10452 	ESTAT_ADD(tx_octets);
10453 	ESTAT_ADD(tx_collisions);
10454 	ESTAT_ADD(tx_xon_sent);
10455 	ESTAT_ADD(tx_xoff_sent);
10456 	ESTAT_ADD(tx_flow_control);
10457 	ESTAT_ADD(tx_mac_errors);
10458 	ESTAT_ADD(tx_single_collisions);
10459 	ESTAT_ADD(tx_mult_collisions);
10460 	ESTAT_ADD(tx_deferred);
10461 	ESTAT_ADD(tx_excessive_collisions);
10462 	ESTAT_ADD(tx_late_collisions);
10463 	ESTAT_ADD(tx_collide_2times);
10464 	ESTAT_ADD(tx_collide_3times);
10465 	ESTAT_ADD(tx_collide_4times);
10466 	ESTAT_ADD(tx_collide_5times);
10467 	ESTAT_ADD(tx_collide_6times);
10468 	ESTAT_ADD(tx_collide_7times);
10469 	ESTAT_ADD(tx_collide_8times);
10470 	ESTAT_ADD(tx_collide_9times);
10471 	ESTAT_ADD(tx_collide_10times);
10472 	ESTAT_ADD(tx_collide_11times);
10473 	ESTAT_ADD(tx_collide_12times);
10474 	ESTAT_ADD(tx_collide_13times);
10475 	ESTAT_ADD(tx_collide_14times);
10476 	ESTAT_ADD(tx_collide_15times);
10477 	ESTAT_ADD(tx_ucast_packets);
10478 	ESTAT_ADD(tx_mcast_packets);
10479 	ESTAT_ADD(tx_bcast_packets);
10480 	ESTAT_ADD(tx_carrier_sense_errors);
10481 	ESTAT_ADD(tx_discards);
10482 	ESTAT_ADD(tx_errors);
10483 
10484 	ESTAT_ADD(dma_writeq_full);
10485 	ESTAT_ADD(dma_write_prioq_full);
10486 	ESTAT_ADD(rxbds_empty);
10487 	ESTAT_ADD(rx_discards);
10488 	ESTAT_ADD(rx_errors);
10489 	ESTAT_ADD(rx_threshold_hit);
10490 
10491 	ESTAT_ADD(dma_readq_full);
10492 	ESTAT_ADD(dma_read_prioq_full);
10493 	ESTAT_ADD(tx_comp_queue_full);
10494 
10495 	ESTAT_ADD(ring_set_send_prod_index);
10496 	ESTAT_ADD(ring_status_update);
10497 	ESTAT_ADD(nic_irqs);
10498 	ESTAT_ADD(nic_avoided_irqs);
10499 	ESTAT_ADD(nic_tx_threshold_hit);
10500 
10501 	ESTAT_ADD(mbuf_lwm_thresh_hit);
10502 }
10503 
10504 static void tg3_get_nstats(struct tg3 *tp, struct rtnl_link_stats64 *stats)
10505 {
10506 	struct rtnl_link_stats64 *old_stats = &tp->net_stats_prev;
10507 	struct tg3_hw_stats *hw_stats = tp->hw_stats;
10508 
10509 	stats->rx_packets = old_stats->rx_packets +
10510 		get_stat64(&hw_stats->rx_ucast_packets) +
10511 		get_stat64(&hw_stats->rx_mcast_packets) +
10512 		get_stat64(&hw_stats->rx_bcast_packets);
10513 
10514 	stats->tx_packets = old_stats->tx_packets +
10515 		get_stat64(&hw_stats->tx_ucast_packets) +
10516 		get_stat64(&hw_stats->tx_mcast_packets) +
10517 		get_stat64(&hw_stats->tx_bcast_packets);
10518 
10519 	stats->rx_bytes = old_stats->rx_bytes +
10520 		get_stat64(&hw_stats->rx_octets);
10521 	stats->tx_bytes = old_stats->tx_bytes +
10522 		get_stat64(&hw_stats->tx_octets);
10523 
10524 	stats->rx_errors = old_stats->rx_errors +
10525 		get_stat64(&hw_stats->rx_errors);
10526 	stats->tx_errors = old_stats->tx_errors +
10527 		get_stat64(&hw_stats->tx_errors) +
10528 		get_stat64(&hw_stats->tx_mac_errors) +
10529 		get_stat64(&hw_stats->tx_carrier_sense_errors) +
10530 		get_stat64(&hw_stats->tx_discards);
10531 
10532 	stats->multicast = old_stats->multicast +
10533 		get_stat64(&hw_stats->rx_mcast_packets);
10534 	stats->collisions = old_stats->collisions +
10535 		get_stat64(&hw_stats->tx_collisions);
10536 
10537 	stats->rx_length_errors = old_stats->rx_length_errors +
10538 		get_stat64(&hw_stats->rx_frame_too_long_errors) +
10539 		get_stat64(&hw_stats->rx_undersize_packets);
10540 
10541 	stats->rx_over_errors = old_stats->rx_over_errors +
10542 		get_stat64(&hw_stats->rxbds_empty);
10543 	stats->rx_frame_errors = old_stats->rx_frame_errors +
10544 		get_stat64(&hw_stats->rx_align_errors);
10545 	stats->tx_aborted_errors = old_stats->tx_aborted_errors +
10546 		get_stat64(&hw_stats->tx_discards);
10547 	stats->tx_carrier_errors = old_stats->tx_carrier_errors +
10548 		get_stat64(&hw_stats->tx_carrier_sense_errors);
10549 
10550 	stats->rx_crc_errors = old_stats->rx_crc_errors +
10551 		tg3_calc_crc_errors(tp);
10552 
10553 	stats->rx_missed_errors = old_stats->rx_missed_errors +
10554 		get_stat64(&hw_stats->rx_discards);
10555 
10556 	stats->rx_dropped = tp->rx_dropped;
10557 	stats->tx_dropped = tp->tx_dropped;
10558 }
10559 
10560 static int tg3_get_regs_len(struct net_device *dev)
10561 {
10562 	return TG3_REG_BLK_SIZE;
10563 }
10564 
10565 static void tg3_get_regs(struct net_device *dev,
10566 		struct ethtool_regs *regs, void *_p)
10567 {
10568 	struct tg3 *tp = netdev_priv(dev);
10569 
10570 	regs->version = 0;
10571 
10572 	memset(_p, 0, TG3_REG_BLK_SIZE);
10573 
10574 	if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10575 		return;
10576 
10577 	tg3_full_lock(tp, 0);
10578 
10579 	tg3_dump_legacy_regs(tp, (u32 *)_p);
10580 
10581 	tg3_full_unlock(tp);
10582 }
10583 
10584 static int tg3_get_eeprom_len(struct net_device *dev)
10585 {
10586 	struct tg3 *tp = netdev_priv(dev);
10587 
10588 	return tp->nvram_size;
10589 }
10590 
10591 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
10592 {
10593 	struct tg3 *tp = netdev_priv(dev);
10594 	int ret;
10595 	u8  *pd;
10596 	u32 i, offset, len, b_offset, b_count;
10597 	__be32 val;
10598 
10599 	if (tg3_flag(tp, NO_NVRAM))
10600 		return -EINVAL;
10601 
10602 	if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10603 		return -EAGAIN;
10604 
10605 	offset = eeprom->offset;
10606 	len = eeprom->len;
10607 	eeprom->len = 0;
10608 
10609 	eeprom->magic = TG3_EEPROM_MAGIC;
10610 
10611 	if (offset & 3) {
10612 		/* adjustments to start on required 4 byte boundary */
10613 		b_offset = offset & 3;
10614 		b_count = 4 - b_offset;
10615 		if (b_count > len) {
10616 			/* i.e. offset=1 len=2 */
10617 			b_count = len;
10618 		}
10619 		ret = tg3_nvram_read_be32(tp, offset-b_offset, &val);
10620 		if (ret)
10621 			return ret;
10622 		memcpy(data, ((char *)&val) + b_offset, b_count);
10623 		len -= b_count;
10624 		offset += b_count;
10625 		eeprom->len += b_count;
10626 	}
10627 
10628 	/* read bytes up to the last 4 byte boundary */
10629 	pd = &data[eeprom->len];
10630 	for (i = 0; i < (len - (len & 3)); i += 4) {
10631 		ret = tg3_nvram_read_be32(tp, offset + i, &val);
10632 		if (ret) {
10633 			eeprom->len += i;
10634 			return ret;
10635 		}
10636 		memcpy(pd + i, &val, 4);
10637 	}
10638 	eeprom->len += i;
10639 
10640 	if (len & 3) {
10641 		/* read last bytes not ending on 4 byte boundary */
10642 		pd = &data[eeprom->len];
10643 		b_count = len & 3;
10644 		b_offset = offset + len - b_count;
10645 		ret = tg3_nvram_read_be32(tp, b_offset, &val);
10646 		if (ret)
10647 			return ret;
10648 		memcpy(pd, &val, b_count);
10649 		eeprom->len += b_count;
10650 	}
10651 	return 0;
10652 }
10653 
10654 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
10655 {
10656 	struct tg3 *tp = netdev_priv(dev);
10657 	int ret;
10658 	u32 offset, len, b_offset, odd_len;
10659 	u8 *buf;
10660 	__be32 start, end;
10661 
10662 	if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10663 		return -EAGAIN;
10664 
10665 	if (tg3_flag(tp, NO_NVRAM) ||
10666 	    eeprom->magic != TG3_EEPROM_MAGIC)
10667 		return -EINVAL;
10668 
10669 	offset = eeprom->offset;
10670 	len = eeprom->len;
10671 
10672 	if ((b_offset = (offset & 3))) {
10673 		/* adjustments to start on required 4 byte boundary */
10674 		ret = tg3_nvram_read_be32(tp, offset-b_offset, &start);
10675 		if (ret)
10676 			return ret;
10677 		len += b_offset;
10678 		offset &= ~3;
10679 		if (len < 4)
10680 			len = 4;
10681 	}
10682 
10683 	odd_len = 0;
10684 	if (len & 3) {
10685 		/* adjustments to end on required 4 byte boundary */
10686 		odd_len = 1;
10687 		len = (len + 3) & ~3;
10688 		ret = tg3_nvram_read_be32(tp, offset+len-4, &end);
10689 		if (ret)
10690 			return ret;
10691 	}
10692 
10693 	buf = data;
10694 	if (b_offset || odd_len) {
10695 		buf = kmalloc(len, GFP_KERNEL);
10696 		if (!buf)
10697 			return -ENOMEM;
10698 		if (b_offset)
10699 			memcpy(buf, &start, 4);
10700 		if (odd_len)
10701 			memcpy(buf+len-4, &end, 4);
10702 		memcpy(buf + b_offset, data, eeprom->len);
10703 	}
10704 
10705 	ret = tg3_nvram_write_block(tp, offset, len, buf);
10706 
10707 	if (buf != data)
10708 		kfree(buf);
10709 
10710 	return ret;
10711 }
10712 
10713 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
10714 {
10715 	struct tg3 *tp = netdev_priv(dev);
10716 
10717 	if (tg3_flag(tp, USE_PHYLIB)) {
10718 		struct phy_device *phydev;
10719 		if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
10720 			return -EAGAIN;
10721 		phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
10722 		return phy_ethtool_gset(phydev, cmd);
10723 	}
10724 
10725 	cmd->supported = (SUPPORTED_Autoneg);
10726 
10727 	if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
10728 		cmd->supported |= (SUPPORTED_1000baseT_Half |
10729 				   SUPPORTED_1000baseT_Full);
10730 
10731 	if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
10732 		cmd->supported |= (SUPPORTED_100baseT_Half |
10733 				  SUPPORTED_100baseT_Full |
10734 				  SUPPORTED_10baseT_Half |
10735 				  SUPPORTED_10baseT_Full |
10736 				  SUPPORTED_TP);
10737 		cmd->port = PORT_TP;
10738 	} else {
10739 		cmd->supported |= SUPPORTED_FIBRE;
10740 		cmd->port = PORT_FIBRE;
10741 	}
10742 
10743 	cmd->advertising = tp->link_config.advertising;
10744 	if (tg3_flag(tp, PAUSE_AUTONEG)) {
10745 		if (tp->link_config.flowctrl & FLOW_CTRL_RX) {
10746 			if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
10747 				cmd->advertising |= ADVERTISED_Pause;
10748 			} else {
10749 				cmd->advertising |= ADVERTISED_Pause |
10750 						    ADVERTISED_Asym_Pause;
10751 			}
10752 		} else if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
10753 			cmd->advertising |= ADVERTISED_Asym_Pause;
10754 		}
10755 	}
10756 	if (netif_running(dev) && netif_carrier_ok(dev)) {
10757 		ethtool_cmd_speed_set(cmd, tp->link_config.active_speed);
10758 		cmd->duplex = tp->link_config.active_duplex;
10759 		cmd->lp_advertising = tp->link_config.rmt_adv;
10760 		if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
10761 			if (tp->phy_flags & TG3_PHYFLG_MDIX_STATE)
10762 				cmd->eth_tp_mdix = ETH_TP_MDI_X;
10763 			else
10764 				cmd->eth_tp_mdix = ETH_TP_MDI;
10765 		}
10766 	} else {
10767 		ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN);
10768 		cmd->duplex = DUPLEX_UNKNOWN;
10769 		cmd->eth_tp_mdix = ETH_TP_MDI_INVALID;
10770 	}
10771 	cmd->phy_address = tp->phy_addr;
10772 	cmd->transceiver = XCVR_INTERNAL;
10773 	cmd->autoneg = tp->link_config.autoneg;
10774 	cmd->maxtxpkt = 0;
10775 	cmd->maxrxpkt = 0;
10776 	return 0;
10777 }
10778 
10779 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
10780 {
10781 	struct tg3 *tp = netdev_priv(dev);
10782 	u32 speed = ethtool_cmd_speed(cmd);
10783 
10784 	if (tg3_flag(tp, USE_PHYLIB)) {
10785 		struct phy_device *phydev;
10786 		if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
10787 			return -EAGAIN;
10788 		phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
10789 		return phy_ethtool_sset(phydev, cmd);
10790 	}
10791 
10792 	if (cmd->autoneg != AUTONEG_ENABLE &&
10793 	    cmd->autoneg != AUTONEG_DISABLE)
10794 		return -EINVAL;
10795 
10796 	if (cmd->autoneg == AUTONEG_DISABLE &&
10797 	    cmd->duplex != DUPLEX_FULL &&
10798 	    cmd->duplex != DUPLEX_HALF)
10799 		return -EINVAL;
10800 
10801 	if (cmd->autoneg == AUTONEG_ENABLE) {
10802 		u32 mask = ADVERTISED_Autoneg |
10803 			   ADVERTISED_Pause |
10804 			   ADVERTISED_Asym_Pause;
10805 
10806 		if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
10807 			mask |= ADVERTISED_1000baseT_Half |
10808 				ADVERTISED_1000baseT_Full;
10809 
10810 		if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
10811 			mask |= ADVERTISED_100baseT_Half |
10812 				ADVERTISED_100baseT_Full |
10813 				ADVERTISED_10baseT_Half |
10814 				ADVERTISED_10baseT_Full |
10815 				ADVERTISED_TP;
10816 		else
10817 			mask |= ADVERTISED_FIBRE;
10818 
10819 		if (cmd->advertising & ~mask)
10820 			return -EINVAL;
10821 
10822 		mask &= (ADVERTISED_1000baseT_Half |
10823 			 ADVERTISED_1000baseT_Full |
10824 			 ADVERTISED_100baseT_Half |
10825 			 ADVERTISED_100baseT_Full |
10826 			 ADVERTISED_10baseT_Half |
10827 			 ADVERTISED_10baseT_Full);
10828 
10829 		cmd->advertising &= mask;
10830 	} else {
10831 		if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) {
10832 			if (speed != SPEED_1000)
10833 				return -EINVAL;
10834 
10835 			if (cmd->duplex != DUPLEX_FULL)
10836 				return -EINVAL;
10837 		} else {
10838 			if (speed != SPEED_100 &&
10839 			    speed != SPEED_10)
10840 				return -EINVAL;
10841 		}
10842 	}
10843 
10844 	tg3_full_lock(tp, 0);
10845 
10846 	tp->link_config.autoneg = cmd->autoneg;
10847 	if (cmd->autoneg == AUTONEG_ENABLE) {
10848 		tp->link_config.advertising = (cmd->advertising |
10849 					      ADVERTISED_Autoneg);
10850 		tp->link_config.speed = SPEED_UNKNOWN;
10851 		tp->link_config.duplex = DUPLEX_UNKNOWN;
10852 	} else {
10853 		tp->link_config.advertising = 0;
10854 		tp->link_config.speed = speed;
10855 		tp->link_config.duplex = cmd->duplex;
10856 	}
10857 
10858 	if (netif_running(dev))
10859 		tg3_setup_phy(tp, 1);
10860 
10861 	tg3_full_unlock(tp);
10862 
10863 	return 0;
10864 }
10865 
10866 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
10867 {
10868 	struct tg3 *tp = netdev_priv(dev);
10869 
10870 	strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
10871 	strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
10872 	strlcpy(info->fw_version, tp->fw_ver, sizeof(info->fw_version));
10873 	strlcpy(info->bus_info, pci_name(tp->pdev), sizeof(info->bus_info));
10874 }
10875 
10876 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
10877 {
10878 	struct tg3 *tp = netdev_priv(dev);
10879 
10880 	if (tg3_flag(tp, WOL_CAP) && device_can_wakeup(&tp->pdev->dev))
10881 		wol->supported = WAKE_MAGIC;
10882 	else
10883 		wol->supported = 0;
10884 	wol->wolopts = 0;
10885 	if (tg3_flag(tp, WOL_ENABLE) && device_can_wakeup(&tp->pdev->dev))
10886 		wol->wolopts = WAKE_MAGIC;
10887 	memset(&wol->sopass, 0, sizeof(wol->sopass));
10888 }
10889 
10890 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
10891 {
10892 	struct tg3 *tp = netdev_priv(dev);
10893 	struct device *dp = &tp->pdev->dev;
10894 
10895 	if (wol->wolopts & ~WAKE_MAGIC)
10896 		return -EINVAL;
10897 	if ((wol->wolopts & WAKE_MAGIC) &&
10898 	    !(tg3_flag(tp, WOL_CAP) && device_can_wakeup(dp)))
10899 		return -EINVAL;
10900 
10901 	device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC);
10902 
10903 	spin_lock_bh(&tp->lock);
10904 	if (device_may_wakeup(dp))
10905 		tg3_flag_set(tp, WOL_ENABLE);
10906 	else
10907 		tg3_flag_clear(tp, WOL_ENABLE);
10908 	spin_unlock_bh(&tp->lock);
10909 
10910 	return 0;
10911 }
10912 
10913 static u32 tg3_get_msglevel(struct net_device *dev)
10914 {
10915 	struct tg3 *tp = netdev_priv(dev);
10916 	return tp->msg_enable;
10917 }
10918 
10919 static void tg3_set_msglevel(struct net_device *dev, u32 value)
10920 {
10921 	struct tg3 *tp = netdev_priv(dev);
10922 	tp->msg_enable = value;
10923 }
10924 
10925 static int tg3_nway_reset(struct net_device *dev)
10926 {
10927 	struct tg3 *tp = netdev_priv(dev);
10928 	int r;
10929 
10930 	if (!netif_running(dev))
10931 		return -EAGAIN;
10932 
10933 	if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
10934 		return -EINVAL;
10935 
10936 	if (tg3_flag(tp, USE_PHYLIB)) {
10937 		if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
10938 			return -EAGAIN;
10939 		r = phy_start_aneg(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
10940 	} else {
10941 		u32 bmcr;
10942 
10943 		spin_lock_bh(&tp->lock);
10944 		r = -EINVAL;
10945 		tg3_readphy(tp, MII_BMCR, &bmcr);
10946 		if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
10947 		    ((bmcr & BMCR_ANENABLE) ||
10948 		     (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT))) {
10949 			tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
10950 						   BMCR_ANENABLE);
10951 			r = 0;
10952 		}
10953 		spin_unlock_bh(&tp->lock);
10954 	}
10955 
10956 	return r;
10957 }
10958 
10959 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
10960 {
10961 	struct tg3 *tp = netdev_priv(dev);
10962 
10963 	ering->rx_max_pending = tp->rx_std_ring_mask;
10964 	if (tg3_flag(tp, JUMBO_RING_ENABLE))
10965 		ering->rx_jumbo_max_pending = tp->rx_jmb_ring_mask;
10966 	else
10967 		ering->rx_jumbo_max_pending = 0;
10968 
10969 	ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
10970 
10971 	ering->rx_pending = tp->rx_pending;
10972 	if (tg3_flag(tp, JUMBO_RING_ENABLE))
10973 		ering->rx_jumbo_pending = tp->rx_jumbo_pending;
10974 	else
10975 		ering->rx_jumbo_pending = 0;
10976 
10977 	ering->tx_pending = tp->napi[0].tx_pending;
10978 }
10979 
10980 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
10981 {
10982 	struct tg3 *tp = netdev_priv(dev);
10983 	int i, irq_sync = 0, err = 0;
10984 
10985 	if ((ering->rx_pending > tp->rx_std_ring_mask) ||
10986 	    (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) ||
10987 	    (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
10988 	    (ering->tx_pending <= MAX_SKB_FRAGS) ||
10989 	    (tg3_flag(tp, TSO_BUG) &&
10990 	     (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
10991 		return -EINVAL;
10992 
10993 	if (netif_running(dev)) {
10994 		tg3_phy_stop(tp);
10995 		tg3_netif_stop(tp);
10996 		irq_sync = 1;
10997 	}
10998 
10999 	tg3_full_lock(tp, irq_sync);
11000 
11001 	tp->rx_pending = ering->rx_pending;
11002 
11003 	if (tg3_flag(tp, MAX_RXPEND_64) &&
11004 	    tp->rx_pending > 63)
11005 		tp->rx_pending = 63;
11006 	tp->rx_jumbo_pending = ering->rx_jumbo_pending;
11007 
11008 	for (i = 0; i < tp->irq_max; i++)
11009 		tp->napi[i].tx_pending = ering->tx_pending;
11010 
11011 	if (netif_running(dev)) {
11012 		tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11013 		err = tg3_restart_hw(tp, 1);
11014 		if (!err)
11015 			tg3_netif_start(tp);
11016 	}
11017 
11018 	tg3_full_unlock(tp);
11019 
11020 	if (irq_sync && !err)
11021 		tg3_phy_start(tp);
11022 
11023 	return err;
11024 }
11025 
11026 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
11027 {
11028 	struct tg3 *tp = netdev_priv(dev);
11029 
11030 	epause->autoneg = !!tg3_flag(tp, PAUSE_AUTONEG);
11031 
11032 	if (tp->link_config.flowctrl & FLOW_CTRL_RX)
11033 		epause->rx_pause = 1;
11034 	else
11035 		epause->rx_pause = 0;
11036 
11037 	if (tp->link_config.flowctrl & FLOW_CTRL_TX)
11038 		epause->tx_pause = 1;
11039 	else
11040 		epause->tx_pause = 0;
11041 }
11042 
11043 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
11044 {
11045 	struct tg3 *tp = netdev_priv(dev);
11046 	int err = 0;
11047 
11048 	if (tg3_flag(tp, USE_PHYLIB)) {
11049 		u32 newadv;
11050 		struct phy_device *phydev;
11051 
11052 		phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
11053 
11054 		if (!(phydev->supported & SUPPORTED_Pause) ||
11055 		    (!(phydev->supported & SUPPORTED_Asym_Pause) &&
11056 		     (epause->rx_pause != epause->tx_pause)))
11057 			return -EINVAL;
11058 
11059 		tp->link_config.flowctrl = 0;
11060 		if (epause->rx_pause) {
11061 			tp->link_config.flowctrl |= FLOW_CTRL_RX;
11062 
11063 			if (epause->tx_pause) {
11064 				tp->link_config.flowctrl |= FLOW_CTRL_TX;
11065 				newadv = ADVERTISED_Pause;
11066 			} else
11067 				newadv = ADVERTISED_Pause |
11068 					 ADVERTISED_Asym_Pause;
11069 		} else if (epause->tx_pause) {
11070 			tp->link_config.flowctrl |= FLOW_CTRL_TX;
11071 			newadv = ADVERTISED_Asym_Pause;
11072 		} else
11073 			newadv = 0;
11074 
11075 		if (epause->autoneg)
11076 			tg3_flag_set(tp, PAUSE_AUTONEG);
11077 		else
11078 			tg3_flag_clear(tp, PAUSE_AUTONEG);
11079 
11080 		if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
11081 			u32 oldadv = phydev->advertising &
11082 				     (ADVERTISED_Pause | ADVERTISED_Asym_Pause);
11083 			if (oldadv != newadv) {
11084 				phydev->advertising &=
11085 					~(ADVERTISED_Pause |
11086 					  ADVERTISED_Asym_Pause);
11087 				phydev->advertising |= newadv;
11088 				if (phydev->autoneg) {
11089 					/*
11090 					 * Always renegotiate the link to
11091 					 * inform our link partner of our
11092 					 * flow control settings, even if the
11093 					 * flow control is forced.  Let
11094 					 * tg3_adjust_link() do the final
11095 					 * flow control setup.
11096 					 */
11097 					return phy_start_aneg(phydev);
11098 				}
11099 			}
11100 
11101 			if (!epause->autoneg)
11102 				tg3_setup_flow_control(tp, 0, 0);
11103 		} else {
11104 			tp->link_config.advertising &=
11105 					~(ADVERTISED_Pause |
11106 					  ADVERTISED_Asym_Pause);
11107 			tp->link_config.advertising |= newadv;
11108 		}
11109 	} else {
11110 		int irq_sync = 0;
11111 
11112 		if (netif_running(dev)) {
11113 			tg3_netif_stop(tp);
11114 			irq_sync = 1;
11115 		}
11116 
11117 		tg3_full_lock(tp, irq_sync);
11118 
11119 		if (epause->autoneg)
11120 			tg3_flag_set(tp, PAUSE_AUTONEG);
11121 		else
11122 			tg3_flag_clear(tp, PAUSE_AUTONEG);
11123 		if (epause->rx_pause)
11124 			tp->link_config.flowctrl |= FLOW_CTRL_RX;
11125 		else
11126 			tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
11127 		if (epause->tx_pause)
11128 			tp->link_config.flowctrl |= FLOW_CTRL_TX;
11129 		else
11130 			tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
11131 
11132 		if (netif_running(dev)) {
11133 			tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11134 			err = tg3_restart_hw(tp, 1);
11135 			if (!err)
11136 				tg3_netif_start(tp);
11137 		}
11138 
11139 		tg3_full_unlock(tp);
11140 	}
11141 
11142 	return err;
11143 }
11144 
11145 static int tg3_get_sset_count(struct net_device *dev, int sset)
11146 {
11147 	switch (sset) {
11148 	case ETH_SS_TEST:
11149 		return TG3_NUM_TEST;
11150 	case ETH_SS_STATS:
11151 		return TG3_NUM_STATS;
11152 	default:
11153 		return -EOPNOTSUPP;
11154 	}
11155 }
11156 
11157 static int tg3_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
11158 			 u32 *rules __always_unused)
11159 {
11160 	struct tg3 *tp = netdev_priv(dev);
11161 
11162 	if (!tg3_flag(tp, SUPPORT_MSIX))
11163 		return -EOPNOTSUPP;
11164 
11165 	switch (info->cmd) {
11166 	case ETHTOOL_GRXRINGS:
11167 		if (netif_running(tp->dev))
11168 			info->data = tp->irq_cnt;
11169 		else {
11170 			info->data = num_online_cpus();
11171 			if (info->data > TG3_IRQ_MAX_VECS_RSS)
11172 				info->data = TG3_IRQ_MAX_VECS_RSS;
11173 		}
11174 
11175 		/* The first interrupt vector only
11176 		 * handles link interrupts.
11177 		 */
11178 		info->data -= 1;
11179 		return 0;
11180 
11181 	default:
11182 		return -EOPNOTSUPP;
11183 	}
11184 }
11185 
11186 static u32 tg3_get_rxfh_indir_size(struct net_device *dev)
11187 {
11188 	u32 size = 0;
11189 	struct tg3 *tp = netdev_priv(dev);
11190 
11191 	if (tg3_flag(tp, SUPPORT_MSIX))
11192 		size = TG3_RSS_INDIR_TBL_SIZE;
11193 
11194 	return size;
11195 }
11196 
11197 static int tg3_get_rxfh_indir(struct net_device *dev, u32 *indir)
11198 {
11199 	struct tg3 *tp = netdev_priv(dev);
11200 	int i;
11201 
11202 	for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
11203 		indir[i] = tp->rss_ind_tbl[i];
11204 
11205 	return 0;
11206 }
11207 
11208 static int tg3_set_rxfh_indir(struct net_device *dev, const u32 *indir)
11209 {
11210 	struct tg3 *tp = netdev_priv(dev);
11211 	size_t i;
11212 
11213 	for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
11214 		tp->rss_ind_tbl[i] = indir[i];
11215 
11216 	if (!netif_running(dev) || !tg3_flag(tp, ENABLE_RSS))
11217 		return 0;
11218 
11219 	/* It is legal to write the indirection
11220 	 * table while the device is running.
11221 	 */
11222 	tg3_full_lock(tp, 0);
11223 	tg3_rss_write_indir_tbl(tp);
11224 	tg3_full_unlock(tp);
11225 
11226 	return 0;
11227 }
11228 
11229 static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
11230 {
11231 	switch (stringset) {
11232 	case ETH_SS_STATS:
11233 		memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
11234 		break;
11235 	case ETH_SS_TEST:
11236 		memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
11237 		break;
11238 	default:
11239 		WARN_ON(1);	/* we need a WARN() */
11240 		break;
11241 	}
11242 }
11243 
11244 static int tg3_set_phys_id(struct net_device *dev,
11245 			    enum ethtool_phys_id_state state)
11246 {
11247 	struct tg3 *tp = netdev_priv(dev);
11248 
11249 	if (!netif_running(tp->dev))
11250 		return -EAGAIN;
11251 
11252 	switch (state) {
11253 	case ETHTOOL_ID_ACTIVE:
11254 		return 1;	/* cycle on/off once per second */
11255 
11256 	case ETHTOOL_ID_ON:
11257 		tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
11258 		     LED_CTRL_1000MBPS_ON |
11259 		     LED_CTRL_100MBPS_ON |
11260 		     LED_CTRL_10MBPS_ON |
11261 		     LED_CTRL_TRAFFIC_OVERRIDE |
11262 		     LED_CTRL_TRAFFIC_BLINK |
11263 		     LED_CTRL_TRAFFIC_LED);
11264 		break;
11265 
11266 	case ETHTOOL_ID_OFF:
11267 		tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
11268 		     LED_CTRL_TRAFFIC_OVERRIDE);
11269 		break;
11270 
11271 	case ETHTOOL_ID_INACTIVE:
11272 		tw32(MAC_LED_CTRL, tp->led_ctrl);
11273 		break;
11274 	}
11275 
11276 	return 0;
11277 }
11278 
11279 static void tg3_get_ethtool_stats(struct net_device *dev,
11280 				   struct ethtool_stats *estats, u64 *tmp_stats)
11281 {
11282 	struct tg3 *tp = netdev_priv(dev);
11283 
11284 	if (tp->hw_stats)
11285 		tg3_get_estats(tp, (struct tg3_ethtool_stats *)tmp_stats);
11286 	else
11287 		memset(tmp_stats, 0, sizeof(struct tg3_ethtool_stats));
11288 }
11289 
11290 static __be32 *tg3_vpd_readblock(struct tg3 *tp, u32 *vpdlen)
11291 {
11292 	int i;
11293 	__be32 *buf;
11294 	u32 offset = 0, len = 0;
11295 	u32 magic, val;
11296 
11297 	if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &magic))
11298 		return NULL;
11299 
11300 	if (magic == TG3_EEPROM_MAGIC) {
11301 		for (offset = TG3_NVM_DIR_START;
11302 		     offset < TG3_NVM_DIR_END;
11303 		     offset += TG3_NVM_DIRENT_SIZE) {
11304 			if (tg3_nvram_read(tp, offset, &val))
11305 				return NULL;
11306 
11307 			if ((val >> TG3_NVM_DIRTYPE_SHIFT) ==
11308 			    TG3_NVM_DIRTYPE_EXTVPD)
11309 				break;
11310 		}
11311 
11312 		if (offset != TG3_NVM_DIR_END) {
11313 			len = (val & TG3_NVM_DIRTYPE_LENMSK) * 4;
11314 			if (tg3_nvram_read(tp, offset + 4, &offset))
11315 				return NULL;
11316 
11317 			offset = tg3_nvram_logical_addr(tp, offset);
11318 		}
11319 	}
11320 
11321 	if (!offset || !len) {
11322 		offset = TG3_NVM_VPD_OFF;
11323 		len = TG3_NVM_VPD_LEN;
11324 	}
11325 
11326 	buf = kmalloc(len, GFP_KERNEL);
11327 	if (buf == NULL)
11328 		return NULL;
11329 
11330 	if (magic == TG3_EEPROM_MAGIC) {
11331 		for (i = 0; i < len; i += 4) {
11332 			/* The data is in little-endian format in NVRAM.
11333 			 * Use the big-endian read routines to preserve
11334 			 * the byte order as it exists in NVRAM.
11335 			 */
11336 			if (tg3_nvram_read_be32(tp, offset + i, &buf[i/4]))
11337 				goto error;
11338 		}
11339 	} else {
11340 		u8 *ptr;
11341 		ssize_t cnt;
11342 		unsigned int pos = 0;
11343 
11344 		ptr = (u8 *)&buf[0];
11345 		for (i = 0; pos < len && i < 3; i++, pos += cnt, ptr += cnt) {
11346 			cnt = pci_read_vpd(tp->pdev, pos,
11347 					   len - pos, ptr);
11348 			if (cnt == -ETIMEDOUT || cnt == -EINTR)
11349 				cnt = 0;
11350 			else if (cnt < 0)
11351 				goto error;
11352 		}
11353 		if (pos != len)
11354 			goto error;
11355 	}
11356 
11357 	*vpdlen = len;
11358 
11359 	return buf;
11360 
11361 error:
11362 	kfree(buf);
11363 	return NULL;
11364 }
11365 
11366 #define NVRAM_TEST_SIZE 0x100
11367 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE	0x14
11368 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE	0x18
11369 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE	0x1c
11370 #define NVRAM_SELFBOOT_FORMAT1_4_SIZE	0x20
11371 #define NVRAM_SELFBOOT_FORMAT1_5_SIZE	0x24
11372 #define NVRAM_SELFBOOT_FORMAT1_6_SIZE	0x50
11373 #define NVRAM_SELFBOOT_HW_SIZE 0x20
11374 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
11375 
11376 static int tg3_test_nvram(struct tg3 *tp)
11377 {
11378 	u32 csum, magic, len;
11379 	__be32 *buf;
11380 	int i, j, k, err = 0, size;
11381 
11382 	if (tg3_flag(tp, NO_NVRAM))
11383 		return 0;
11384 
11385 	if (tg3_nvram_read(tp, 0, &magic) != 0)
11386 		return -EIO;
11387 
11388 	if (magic == TG3_EEPROM_MAGIC)
11389 		size = NVRAM_TEST_SIZE;
11390 	else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
11391 		if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
11392 		    TG3_EEPROM_SB_FORMAT_1) {
11393 			switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
11394 			case TG3_EEPROM_SB_REVISION_0:
11395 				size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
11396 				break;
11397 			case TG3_EEPROM_SB_REVISION_2:
11398 				size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
11399 				break;
11400 			case TG3_EEPROM_SB_REVISION_3:
11401 				size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
11402 				break;
11403 			case TG3_EEPROM_SB_REVISION_4:
11404 				size = NVRAM_SELFBOOT_FORMAT1_4_SIZE;
11405 				break;
11406 			case TG3_EEPROM_SB_REVISION_5:
11407 				size = NVRAM_SELFBOOT_FORMAT1_5_SIZE;
11408 				break;
11409 			case TG3_EEPROM_SB_REVISION_6:
11410 				size = NVRAM_SELFBOOT_FORMAT1_6_SIZE;
11411 				break;
11412 			default:
11413 				return -EIO;
11414 			}
11415 		} else
11416 			return 0;
11417 	} else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
11418 		size = NVRAM_SELFBOOT_HW_SIZE;
11419 	else
11420 		return -EIO;
11421 
11422 	buf = kmalloc(size, GFP_KERNEL);
11423 	if (buf == NULL)
11424 		return -ENOMEM;
11425 
11426 	err = -EIO;
11427 	for (i = 0, j = 0; i < size; i += 4, j++) {
11428 		err = tg3_nvram_read_be32(tp, i, &buf[j]);
11429 		if (err)
11430 			break;
11431 	}
11432 	if (i < size)
11433 		goto out;
11434 
11435 	/* Selfboot format */
11436 	magic = be32_to_cpu(buf[0]);
11437 	if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
11438 	    TG3_EEPROM_MAGIC_FW) {
11439 		u8 *buf8 = (u8 *) buf, csum8 = 0;
11440 
11441 		if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
11442 		    TG3_EEPROM_SB_REVISION_2) {
11443 			/* For rev 2, the csum doesn't include the MBA. */
11444 			for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
11445 				csum8 += buf8[i];
11446 			for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
11447 				csum8 += buf8[i];
11448 		} else {
11449 			for (i = 0; i < size; i++)
11450 				csum8 += buf8[i];
11451 		}
11452 
11453 		if (csum8 == 0) {
11454 			err = 0;
11455 			goto out;
11456 		}
11457 
11458 		err = -EIO;
11459 		goto out;
11460 	}
11461 
11462 	if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
11463 	    TG3_EEPROM_MAGIC_HW) {
11464 		u8 data[NVRAM_SELFBOOT_DATA_SIZE];
11465 		u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
11466 		u8 *buf8 = (u8 *) buf;
11467 
11468 		/* Separate the parity bits and the data bytes.  */
11469 		for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
11470 			if ((i == 0) || (i == 8)) {
11471 				int l;
11472 				u8 msk;
11473 
11474 				for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
11475 					parity[k++] = buf8[i] & msk;
11476 				i++;
11477 			} else if (i == 16) {
11478 				int l;
11479 				u8 msk;
11480 
11481 				for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
11482 					parity[k++] = buf8[i] & msk;
11483 				i++;
11484 
11485 				for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
11486 					parity[k++] = buf8[i] & msk;
11487 				i++;
11488 			}
11489 			data[j++] = buf8[i];
11490 		}
11491 
11492 		err = -EIO;
11493 		for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
11494 			u8 hw8 = hweight8(data[i]);
11495 
11496 			if ((hw8 & 0x1) && parity[i])
11497 				goto out;
11498 			else if (!(hw8 & 0x1) && !parity[i])
11499 				goto out;
11500 		}
11501 		err = 0;
11502 		goto out;
11503 	}
11504 
11505 	err = -EIO;
11506 
11507 	/* Bootstrap checksum at offset 0x10 */
11508 	csum = calc_crc((unsigned char *) buf, 0x10);
11509 	if (csum != le32_to_cpu(buf[0x10/4]))
11510 		goto out;
11511 
11512 	/* Manufacturing block starts at offset 0x74, checksum at 0xfc */
11513 	csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
11514 	if (csum != le32_to_cpu(buf[0xfc/4]))
11515 		goto out;
11516 
11517 	kfree(buf);
11518 
11519 	buf = tg3_vpd_readblock(tp, &len);
11520 	if (!buf)
11521 		return -ENOMEM;
11522 
11523 	i = pci_vpd_find_tag((u8 *)buf, 0, len, PCI_VPD_LRDT_RO_DATA);
11524 	if (i > 0) {
11525 		j = pci_vpd_lrdt_size(&((u8 *)buf)[i]);
11526 		if (j < 0)
11527 			goto out;
11528 
11529 		if (i + PCI_VPD_LRDT_TAG_SIZE + j > len)
11530 			goto out;
11531 
11532 		i += PCI_VPD_LRDT_TAG_SIZE;
11533 		j = pci_vpd_find_info_keyword((u8 *)buf, i, j,
11534 					      PCI_VPD_RO_KEYWORD_CHKSUM);
11535 		if (j > 0) {
11536 			u8 csum8 = 0;
11537 
11538 			j += PCI_VPD_INFO_FLD_HDR_SIZE;
11539 
11540 			for (i = 0; i <= j; i++)
11541 				csum8 += ((u8 *)buf)[i];
11542 
11543 			if (csum8)
11544 				goto out;
11545 		}
11546 	}
11547 
11548 	err = 0;
11549 
11550 out:
11551 	kfree(buf);
11552 	return err;
11553 }
11554 
11555 #define TG3_SERDES_TIMEOUT_SEC	2
11556 #define TG3_COPPER_TIMEOUT_SEC	6
11557 
11558 static int tg3_test_link(struct tg3 *tp)
11559 {
11560 	int i, max;
11561 
11562 	if (!netif_running(tp->dev))
11563 		return -ENODEV;
11564 
11565 	if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
11566 		max = TG3_SERDES_TIMEOUT_SEC;
11567 	else
11568 		max = TG3_COPPER_TIMEOUT_SEC;
11569 
11570 	for (i = 0; i < max; i++) {
11571 		if (netif_carrier_ok(tp->dev))
11572 			return 0;
11573 
11574 		if (msleep_interruptible(1000))
11575 			break;
11576 	}
11577 
11578 	return -EIO;
11579 }
11580 
11581 /* Only test the commonly used registers */
11582 static int tg3_test_registers(struct tg3 *tp)
11583 {
11584 	int i, is_5705, is_5750;
11585 	u32 offset, read_mask, write_mask, val, save_val, read_val;
11586 	static struct {
11587 		u16 offset;
11588 		u16 flags;
11589 #define TG3_FL_5705	0x1
11590 #define TG3_FL_NOT_5705	0x2
11591 #define TG3_FL_NOT_5788	0x4
11592 #define TG3_FL_NOT_5750	0x8
11593 		u32 read_mask;
11594 		u32 write_mask;
11595 	} reg_tbl[] = {
11596 		/* MAC Control Registers */
11597 		{ MAC_MODE, TG3_FL_NOT_5705,
11598 			0x00000000, 0x00ef6f8c },
11599 		{ MAC_MODE, TG3_FL_5705,
11600 			0x00000000, 0x01ef6b8c },
11601 		{ MAC_STATUS, TG3_FL_NOT_5705,
11602 			0x03800107, 0x00000000 },
11603 		{ MAC_STATUS, TG3_FL_5705,
11604 			0x03800100, 0x00000000 },
11605 		{ MAC_ADDR_0_HIGH, 0x0000,
11606 			0x00000000, 0x0000ffff },
11607 		{ MAC_ADDR_0_LOW, 0x0000,
11608 			0x00000000, 0xffffffff },
11609 		{ MAC_RX_MTU_SIZE, 0x0000,
11610 			0x00000000, 0x0000ffff },
11611 		{ MAC_TX_MODE, 0x0000,
11612 			0x00000000, 0x00000070 },
11613 		{ MAC_TX_LENGTHS, 0x0000,
11614 			0x00000000, 0x00003fff },
11615 		{ MAC_RX_MODE, TG3_FL_NOT_5705,
11616 			0x00000000, 0x000007fc },
11617 		{ MAC_RX_MODE, TG3_FL_5705,
11618 			0x00000000, 0x000007dc },
11619 		{ MAC_HASH_REG_0, 0x0000,
11620 			0x00000000, 0xffffffff },
11621 		{ MAC_HASH_REG_1, 0x0000,
11622 			0x00000000, 0xffffffff },
11623 		{ MAC_HASH_REG_2, 0x0000,
11624 			0x00000000, 0xffffffff },
11625 		{ MAC_HASH_REG_3, 0x0000,
11626 			0x00000000, 0xffffffff },
11627 
11628 		/* Receive Data and Receive BD Initiator Control Registers. */
11629 		{ RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
11630 			0x00000000, 0xffffffff },
11631 		{ RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
11632 			0x00000000, 0xffffffff },
11633 		{ RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
11634 			0x00000000, 0x00000003 },
11635 		{ RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
11636 			0x00000000, 0xffffffff },
11637 		{ RCVDBDI_STD_BD+0, 0x0000,
11638 			0x00000000, 0xffffffff },
11639 		{ RCVDBDI_STD_BD+4, 0x0000,
11640 			0x00000000, 0xffffffff },
11641 		{ RCVDBDI_STD_BD+8, 0x0000,
11642 			0x00000000, 0xffff0002 },
11643 		{ RCVDBDI_STD_BD+0xc, 0x0000,
11644 			0x00000000, 0xffffffff },
11645 
11646 		/* Receive BD Initiator Control Registers. */
11647 		{ RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
11648 			0x00000000, 0xffffffff },
11649 		{ RCVBDI_STD_THRESH, TG3_FL_5705,
11650 			0x00000000, 0x000003ff },
11651 		{ RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
11652 			0x00000000, 0xffffffff },
11653 
11654 		/* Host Coalescing Control Registers. */
11655 		{ HOSTCC_MODE, TG3_FL_NOT_5705,
11656 			0x00000000, 0x00000004 },
11657 		{ HOSTCC_MODE, TG3_FL_5705,
11658 			0x00000000, 0x000000f6 },
11659 		{ HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
11660 			0x00000000, 0xffffffff },
11661 		{ HOSTCC_RXCOL_TICKS, TG3_FL_5705,
11662 			0x00000000, 0x000003ff },
11663 		{ HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
11664 			0x00000000, 0xffffffff },
11665 		{ HOSTCC_TXCOL_TICKS, TG3_FL_5705,
11666 			0x00000000, 0x000003ff },
11667 		{ HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
11668 			0x00000000, 0xffffffff },
11669 		{ HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
11670 			0x00000000, 0x000000ff },
11671 		{ HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
11672 			0x00000000, 0xffffffff },
11673 		{ HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
11674 			0x00000000, 0x000000ff },
11675 		{ HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
11676 			0x00000000, 0xffffffff },
11677 		{ HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
11678 			0x00000000, 0xffffffff },
11679 		{ HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
11680 			0x00000000, 0xffffffff },
11681 		{ HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
11682 			0x00000000, 0x000000ff },
11683 		{ HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
11684 			0x00000000, 0xffffffff },
11685 		{ HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
11686 			0x00000000, 0x000000ff },
11687 		{ HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
11688 			0x00000000, 0xffffffff },
11689 		{ HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
11690 			0x00000000, 0xffffffff },
11691 		{ HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
11692 			0x00000000, 0xffffffff },
11693 		{ HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
11694 			0x00000000, 0xffffffff },
11695 		{ HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
11696 			0x00000000, 0xffffffff },
11697 		{ HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
11698 			0xffffffff, 0x00000000 },
11699 		{ HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
11700 			0xffffffff, 0x00000000 },
11701 
11702 		/* Buffer Manager Control Registers. */
11703 		{ BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
11704 			0x00000000, 0x007fff80 },
11705 		{ BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
11706 			0x00000000, 0x007fffff },
11707 		{ BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
11708 			0x00000000, 0x0000003f },
11709 		{ BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
11710 			0x00000000, 0x000001ff },
11711 		{ BUFMGR_MB_HIGH_WATER, 0x0000,
11712 			0x00000000, 0x000001ff },
11713 		{ BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
11714 			0xffffffff, 0x00000000 },
11715 		{ BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
11716 			0xffffffff, 0x00000000 },
11717 
11718 		/* Mailbox Registers */
11719 		{ GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
11720 			0x00000000, 0x000001ff },
11721 		{ GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
11722 			0x00000000, 0x000001ff },
11723 		{ GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
11724 			0x00000000, 0x000007ff },
11725 		{ GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
11726 			0x00000000, 0x000001ff },
11727 
11728 		{ 0xffff, 0x0000, 0x00000000, 0x00000000 },
11729 	};
11730 
11731 	is_5705 = is_5750 = 0;
11732 	if (tg3_flag(tp, 5705_PLUS)) {
11733 		is_5705 = 1;
11734 		if (tg3_flag(tp, 5750_PLUS))
11735 			is_5750 = 1;
11736 	}
11737 
11738 	for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
11739 		if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
11740 			continue;
11741 
11742 		if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
11743 			continue;
11744 
11745 		if (tg3_flag(tp, IS_5788) &&
11746 		    (reg_tbl[i].flags & TG3_FL_NOT_5788))
11747 			continue;
11748 
11749 		if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
11750 			continue;
11751 
11752 		offset = (u32) reg_tbl[i].offset;
11753 		read_mask = reg_tbl[i].read_mask;
11754 		write_mask = reg_tbl[i].write_mask;
11755 
11756 		/* Save the original register content */
11757 		save_val = tr32(offset);
11758 
11759 		/* Determine the read-only value. */
11760 		read_val = save_val & read_mask;
11761 
11762 		/* Write zero to the register, then make sure the read-only bits
11763 		 * are not changed and the read/write bits are all zeros.
11764 		 */
11765 		tw32(offset, 0);
11766 
11767 		val = tr32(offset);
11768 
11769 		/* Test the read-only and read/write bits. */
11770 		if (((val & read_mask) != read_val) || (val & write_mask))
11771 			goto out;
11772 
11773 		/* Write ones to all the bits defined by RdMask and WrMask, then
11774 		 * make sure the read-only bits are not changed and the
11775 		 * read/write bits are all ones.
11776 		 */
11777 		tw32(offset, read_mask | write_mask);
11778 
11779 		val = tr32(offset);
11780 
11781 		/* Test the read-only bits. */
11782 		if ((val & read_mask) != read_val)
11783 			goto out;
11784 
11785 		/* Test the read/write bits. */
11786 		if ((val & write_mask) != write_mask)
11787 			goto out;
11788 
11789 		tw32(offset, save_val);
11790 	}
11791 
11792 	return 0;
11793 
11794 out:
11795 	if (netif_msg_hw(tp))
11796 		netdev_err(tp->dev,
11797 			   "Register test failed at offset %x\n", offset);
11798 	tw32(offset, save_val);
11799 	return -EIO;
11800 }
11801 
11802 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
11803 {
11804 	static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
11805 	int i;
11806 	u32 j;
11807 
11808 	for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
11809 		for (j = 0; j < len; j += 4) {
11810 			u32 val;
11811 
11812 			tg3_write_mem(tp, offset + j, test_pattern[i]);
11813 			tg3_read_mem(tp, offset + j, &val);
11814 			if (val != test_pattern[i])
11815 				return -EIO;
11816 		}
11817 	}
11818 	return 0;
11819 }
11820 
11821 static int tg3_test_memory(struct tg3 *tp)
11822 {
11823 	static struct mem_entry {
11824 		u32 offset;
11825 		u32 len;
11826 	} mem_tbl_570x[] = {
11827 		{ 0x00000000, 0x00b50},
11828 		{ 0x00002000, 0x1c000},
11829 		{ 0xffffffff, 0x00000}
11830 	}, mem_tbl_5705[] = {
11831 		{ 0x00000100, 0x0000c},
11832 		{ 0x00000200, 0x00008},
11833 		{ 0x00004000, 0x00800},
11834 		{ 0x00006000, 0x01000},
11835 		{ 0x00008000, 0x02000},
11836 		{ 0x00010000, 0x0e000},
11837 		{ 0xffffffff, 0x00000}
11838 	}, mem_tbl_5755[] = {
11839 		{ 0x00000200, 0x00008},
11840 		{ 0x00004000, 0x00800},
11841 		{ 0x00006000, 0x00800},
11842 		{ 0x00008000, 0x02000},
11843 		{ 0x00010000, 0x0c000},
11844 		{ 0xffffffff, 0x00000}
11845 	}, mem_tbl_5906[] = {
11846 		{ 0x00000200, 0x00008},
11847 		{ 0x00004000, 0x00400},
11848 		{ 0x00006000, 0x00400},
11849 		{ 0x00008000, 0x01000},
11850 		{ 0x00010000, 0x01000},
11851 		{ 0xffffffff, 0x00000}
11852 	}, mem_tbl_5717[] = {
11853 		{ 0x00000200, 0x00008},
11854 		{ 0x00010000, 0x0a000},
11855 		{ 0x00020000, 0x13c00},
11856 		{ 0xffffffff, 0x00000}
11857 	}, mem_tbl_57765[] = {
11858 		{ 0x00000200, 0x00008},
11859 		{ 0x00004000, 0x00800},
11860 		{ 0x00006000, 0x09800},
11861 		{ 0x00010000, 0x0a000},
11862 		{ 0xffffffff, 0x00000}
11863 	};
11864 	struct mem_entry *mem_tbl;
11865 	int err = 0;
11866 	int i;
11867 
11868 	if (tg3_flag(tp, 5717_PLUS))
11869 		mem_tbl = mem_tbl_5717;
11870 	else if (tg3_flag(tp, 57765_CLASS))
11871 		mem_tbl = mem_tbl_57765;
11872 	else if (tg3_flag(tp, 5755_PLUS))
11873 		mem_tbl = mem_tbl_5755;
11874 	else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
11875 		mem_tbl = mem_tbl_5906;
11876 	else if (tg3_flag(tp, 5705_PLUS))
11877 		mem_tbl = mem_tbl_5705;
11878 	else
11879 		mem_tbl = mem_tbl_570x;
11880 
11881 	for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
11882 		err = tg3_do_mem_test(tp, mem_tbl[i].offset, mem_tbl[i].len);
11883 		if (err)
11884 			break;
11885 	}
11886 
11887 	return err;
11888 }
11889 
11890 #define TG3_TSO_MSS		500
11891 
11892 #define TG3_TSO_IP_HDR_LEN	20
11893 #define TG3_TSO_TCP_HDR_LEN	20
11894 #define TG3_TSO_TCP_OPT_LEN	12
11895 
11896 static const u8 tg3_tso_header[] = {
11897 0x08, 0x00,
11898 0x45, 0x00, 0x00, 0x00,
11899 0x00, 0x00, 0x40, 0x00,
11900 0x40, 0x06, 0x00, 0x00,
11901 0x0a, 0x00, 0x00, 0x01,
11902 0x0a, 0x00, 0x00, 0x02,
11903 0x0d, 0x00, 0xe0, 0x00,
11904 0x00, 0x00, 0x01, 0x00,
11905 0x00, 0x00, 0x02, 0x00,
11906 0x80, 0x10, 0x10, 0x00,
11907 0x14, 0x09, 0x00, 0x00,
11908 0x01, 0x01, 0x08, 0x0a,
11909 0x11, 0x11, 0x11, 0x11,
11910 0x11, 0x11, 0x11, 0x11,
11911 };
11912 
11913 static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, bool tso_loopback)
11914 {
11915 	u32 rx_start_idx, rx_idx, tx_idx, opaque_key;
11916 	u32 base_flags = 0, mss = 0, desc_idx, coal_now, data_off, val;
11917 	u32 budget;
11918 	struct sk_buff *skb;
11919 	u8 *tx_data, *rx_data;
11920 	dma_addr_t map;
11921 	int num_pkts, tx_len, rx_len, i, err;
11922 	struct tg3_rx_buffer_desc *desc;
11923 	struct tg3_napi *tnapi, *rnapi;
11924 	struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
11925 
11926 	tnapi = &tp->napi[0];
11927 	rnapi = &tp->napi[0];
11928 	if (tp->irq_cnt > 1) {
11929 		if (tg3_flag(tp, ENABLE_RSS))
11930 			rnapi = &tp->napi[1];
11931 		if (tg3_flag(tp, ENABLE_TSS))
11932 			tnapi = &tp->napi[1];
11933 	}
11934 	coal_now = tnapi->coal_now | rnapi->coal_now;
11935 
11936 	err = -EIO;
11937 
11938 	tx_len = pktsz;
11939 	skb = netdev_alloc_skb(tp->dev, tx_len);
11940 	if (!skb)
11941 		return -ENOMEM;
11942 
11943 	tx_data = skb_put(skb, tx_len);
11944 	memcpy(tx_data, tp->dev->dev_addr, 6);
11945 	memset(tx_data + 6, 0x0, 8);
11946 
11947 	tw32(MAC_RX_MTU_SIZE, tx_len + ETH_FCS_LEN);
11948 
11949 	if (tso_loopback) {
11950 		struct iphdr *iph = (struct iphdr *)&tx_data[ETH_HLEN];
11951 
11952 		u32 hdr_len = TG3_TSO_IP_HDR_LEN + TG3_TSO_TCP_HDR_LEN +
11953 			      TG3_TSO_TCP_OPT_LEN;
11954 
11955 		memcpy(tx_data + ETH_ALEN * 2, tg3_tso_header,
11956 		       sizeof(tg3_tso_header));
11957 		mss = TG3_TSO_MSS;
11958 
11959 		val = tx_len - ETH_ALEN * 2 - sizeof(tg3_tso_header);
11960 		num_pkts = DIV_ROUND_UP(val, TG3_TSO_MSS);
11961 
11962 		/* Set the total length field in the IP header */
11963 		iph->tot_len = htons((u16)(mss + hdr_len));
11964 
11965 		base_flags = (TXD_FLAG_CPU_PRE_DMA |
11966 			      TXD_FLAG_CPU_POST_DMA);
11967 
11968 		if (tg3_flag(tp, HW_TSO_1) ||
11969 		    tg3_flag(tp, HW_TSO_2) ||
11970 		    tg3_flag(tp, HW_TSO_3)) {
11971 			struct tcphdr *th;
11972 			val = ETH_HLEN + TG3_TSO_IP_HDR_LEN;
11973 			th = (struct tcphdr *)&tx_data[val];
11974 			th->check = 0;
11975 		} else
11976 			base_flags |= TXD_FLAG_TCPUDP_CSUM;
11977 
11978 		if (tg3_flag(tp, HW_TSO_3)) {
11979 			mss |= (hdr_len & 0xc) << 12;
11980 			if (hdr_len & 0x10)
11981 				base_flags |= 0x00000010;
11982 			base_flags |= (hdr_len & 0x3e0) << 5;
11983 		} else if (tg3_flag(tp, HW_TSO_2))
11984 			mss |= hdr_len << 9;
11985 		else if (tg3_flag(tp, HW_TSO_1) ||
11986 			 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
11987 			mss |= (TG3_TSO_TCP_OPT_LEN << 9);
11988 		} else {
11989 			base_flags |= (TG3_TSO_TCP_OPT_LEN << 10);
11990 		}
11991 
11992 		data_off = ETH_ALEN * 2 + sizeof(tg3_tso_header);
11993 	} else {
11994 		num_pkts = 1;
11995 		data_off = ETH_HLEN;
11996 
11997 		if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
11998 		    tx_len > VLAN_ETH_FRAME_LEN)
11999 			base_flags |= TXD_FLAG_JMB_PKT;
12000 	}
12001 
12002 	for (i = data_off; i < tx_len; i++)
12003 		tx_data[i] = (u8) (i & 0xff);
12004 
12005 	map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
12006 	if (pci_dma_mapping_error(tp->pdev, map)) {
12007 		dev_kfree_skb(skb);
12008 		return -EIO;
12009 	}
12010 
12011 	val = tnapi->tx_prod;
12012 	tnapi->tx_buffers[val].skb = skb;
12013 	dma_unmap_addr_set(&tnapi->tx_buffers[val], mapping, map);
12014 
12015 	tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
12016 	       rnapi->coal_now);
12017 
12018 	udelay(10);
12019 
12020 	rx_start_idx = rnapi->hw_status->idx[0].rx_producer;
12021 
12022 	budget = tg3_tx_avail(tnapi);
12023 	if (tg3_tx_frag_set(tnapi, &val, &budget, map, tx_len,
12024 			    base_flags | TXD_FLAG_END, mss, 0)) {
12025 		tnapi->tx_buffers[val].skb = NULL;
12026 		dev_kfree_skb(skb);
12027 		return -EIO;
12028 	}
12029 
12030 	tnapi->tx_prod++;
12031 
12032 	/* Sync BD data before updating mailbox */
12033 	wmb();
12034 
12035 	tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod);
12036 	tr32_mailbox(tnapi->prodmbox);
12037 
12038 	udelay(10);
12039 
12040 	/* 350 usec to allow enough time on some 10/100 Mbps devices.  */
12041 	for (i = 0; i < 35; i++) {
12042 		tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
12043 		       coal_now);
12044 
12045 		udelay(10);
12046 
12047 		tx_idx = tnapi->hw_status->idx[0].tx_consumer;
12048 		rx_idx = rnapi->hw_status->idx[0].rx_producer;
12049 		if ((tx_idx == tnapi->tx_prod) &&
12050 		    (rx_idx == (rx_start_idx + num_pkts)))
12051 			break;
12052 	}
12053 
12054 	tg3_tx_skb_unmap(tnapi, tnapi->tx_prod - 1, -1);
12055 	dev_kfree_skb(skb);
12056 
12057 	if (tx_idx != tnapi->tx_prod)
12058 		goto out;
12059 
12060 	if (rx_idx != rx_start_idx + num_pkts)
12061 		goto out;
12062 
12063 	val = data_off;
12064 	while (rx_idx != rx_start_idx) {
12065 		desc = &rnapi->rx_rcb[rx_start_idx++];
12066 		desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
12067 		opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
12068 
12069 		if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
12070 		    (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
12071 			goto out;
12072 
12073 		rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT)
12074 			 - ETH_FCS_LEN;
12075 
12076 		if (!tso_loopback) {
12077 			if (rx_len != tx_len)
12078 				goto out;
12079 
12080 			if (pktsz <= TG3_RX_STD_DMA_SZ - ETH_FCS_LEN) {
12081 				if (opaque_key != RXD_OPAQUE_RING_STD)
12082 					goto out;
12083 			} else {
12084 				if (opaque_key != RXD_OPAQUE_RING_JUMBO)
12085 					goto out;
12086 			}
12087 		} else if ((desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
12088 			   (desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
12089 			    >> RXD_TCPCSUM_SHIFT != 0xffff) {
12090 			goto out;
12091 		}
12092 
12093 		if (opaque_key == RXD_OPAQUE_RING_STD) {
12094 			rx_data = tpr->rx_std_buffers[desc_idx].data;
12095 			map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx],
12096 					     mapping);
12097 		} else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
12098 			rx_data = tpr->rx_jmb_buffers[desc_idx].data;
12099 			map = dma_unmap_addr(&tpr->rx_jmb_buffers[desc_idx],
12100 					     mapping);
12101 		} else
12102 			goto out;
12103 
12104 		pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len,
12105 					    PCI_DMA_FROMDEVICE);
12106 
12107 		rx_data += TG3_RX_OFFSET(tp);
12108 		for (i = data_off; i < rx_len; i++, val++) {
12109 			if (*(rx_data + i) != (u8) (val & 0xff))
12110 				goto out;
12111 		}
12112 	}
12113 
12114 	err = 0;
12115 
12116 	/* tg3_free_rings will unmap and free the rx_data */
12117 out:
12118 	return err;
12119 }
12120 
12121 #define TG3_STD_LOOPBACK_FAILED		1
12122 #define TG3_JMB_LOOPBACK_FAILED		2
12123 #define TG3_TSO_LOOPBACK_FAILED		4
12124 #define TG3_LOOPBACK_FAILED \
12125 	(TG3_STD_LOOPBACK_FAILED | \
12126 	 TG3_JMB_LOOPBACK_FAILED | \
12127 	 TG3_TSO_LOOPBACK_FAILED)
12128 
12129 static int tg3_test_loopback(struct tg3 *tp, u64 *data, bool do_extlpbk)
12130 {
12131 	int err = -EIO;
12132 	u32 eee_cap;
12133 	u32 jmb_pkt_sz = 9000;
12134 
12135 	if (tp->dma_limit)
12136 		jmb_pkt_sz = tp->dma_limit - ETH_HLEN;
12137 
12138 	eee_cap = tp->phy_flags & TG3_PHYFLG_EEE_CAP;
12139 	tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
12140 
12141 	if (!netif_running(tp->dev)) {
12142 		data[0] = TG3_LOOPBACK_FAILED;
12143 		data[1] = TG3_LOOPBACK_FAILED;
12144 		if (do_extlpbk)
12145 			data[2] = TG3_LOOPBACK_FAILED;
12146 		goto done;
12147 	}
12148 
12149 	err = tg3_reset_hw(tp, 1);
12150 	if (err) {
12151 		data[0] = TG3_LOOPBACK_FAILED;
12152 		data[1] = TG3_LOOPBACK_FAILED;
12153 		if (do_extlpbk)
12154 			data[2] = TG3_LOOPBACK_FAILED;
12155 		goto done;
12156 	}
12157 
12158 	if (tg3_flag(tp, ENABLE_RSS)) {
12159 		int i;
12160 
12161 		/* Reroute all rx packets to the 1st queue */
12162 		for (i = MAC_RSS_INDIR_TBL_0;
12163 		     i < MAC_RSS_INDIR_TBL_0 + TG3_RSS_INDIR_TBL_SIZE; i += 4)
12164 			tw32(i, 0x0);
12165 	}
12166 
12167 	/* HW errata - mac loopback fails in some cases on 5780.
12168 	 * Normal traffic and PHY loopback are not affected by
12169 	 * errata.  Also, the MAC loopback test is deprecated for
12170 	 * all newer ASIC revisions.
12171 	 */
12172 	if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5780 &&
12173 	    !tg3_flag(tp, CPMU_PRESENT)) {
12174 		tg3_mac_loopback(tp, true);
12175 
12176 		if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
12177 			data[0] |= TG3_STD_LOOPBACK_FAILED;
12178 
12179 		if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
12180 		    tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
12181 			data[0] |= TG3_JMB_LOOPBACK_FAILED;
12182 
12183 		tg3_mac_loopback(tp, false);
12184 	}
12185 
12186 	if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
12187 	    !tg3_flag(tp, USE_PHYLIB)) {
12188 		int i;
12189 
12190 		tg3_phy_lpbk_set(tp, 0, false);
12191 
12192 		/* Wait for link */
12193 		for (i = 0; i < 100; i++) {
12194 			if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
12195 				break;
12196 			mdelay(1);
12197 		}
12198 
12199 		if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
12200 			data[1] |= TG3_STD_LOOPBACK_FAILED;
12201 		if (tg3_flag(tp, TSO_CAPABLE) &&
12202 		    tg3_run_loopback(tp, ETH_FRAME_LEN, true))
12203 			data[1] |= TG3_TSO_LOOPBACK_FAILED;
12204 		if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
12205 		    tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
12206 			data[1] |= TG3_JMB_LOOPBACK_FAILED;
12207 
12208 		if (do_extlpbk) {
12209 			tg3_phy_lpbk_set(tp, 0, true);
12210 
12211 			/* All link indications report up, but the hardware
12212 			 * isn't really ready for about 20 msec.  Double it
12213 			 * to be sure.
12214 			 */
12215 			mdelay(40);
12216 
12217 			if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
12218 				data[2] |= TG3_STD_LOOPBACK_FAILED;
12219 			if (tg3_flag(tp, TSO_CAPABLE) &&
12220 			    tg3_run_loopback(tp, ETH_FRAME_LEN, true))
12221 				data[2] |= TG3_TSO_LOOPBACK_FAILED;
12222 			if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
12223 			    tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
12224 				data[2] |= TG3_JMB_LOOPBACK_FAILED;
12225 		}
12226 
12227 		/* Re-enable gphy autopowerdown. */
12228 		if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
12229 			tg3_phy_toggle_apd(tp, true);
12230 	}
12231 
12232 	err = (data[0] | data[1] | data[2]) ? -EIO : 0;
12233 
12234 done:
12235 	tp->phy_flags |= eee_cap;
12236 
12237 	return err;
12238 }
12239 
12240 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
12241 			  u64 *data)
12242 {
12243 	struct tg3 *tp = netdev_priv(dev);
12244 	bool doextlpbk = etest->flags & ETH_TEST_FL_EXTERNAL_LB;
12245 
12246 	if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
12247 	    tg3_power_up(tp)) {
12248 		etest->flags |= ETH_TEST_FL_FAILED;
12249 		memset(data, 1, sizeof(u64) * TG3_NUM_TEST);
12250 		return;
12251 	}
12252 
12253 	memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
12254 
12255 	if (tg3_test_nvram(tp) != 0) {
12256 		etest->flags |= ETH_TEST_FL_FAILED;
12257 		data[0] = 1;
12258 	}
12259 	if (!doextlpbk && tg3_test_link(tp)) {
12260 		etest->flags |= ETH_TEST_FL_FAILED;
12261 		data[1] = 1;
12262 	}
12263 	if (etest->flags & ETH_TEST_FL_OFFLINE) {
12264 		int err, err2 = 0, irq_sync = 0;
12265 
12266 		if (netif_running(dev)) {
12267 			tg3_phy_stop(tp);
12268 			tg3_netif_stop(tp);
12269 			irq_sync = 1;
12270 		}
12271 
12272 		tg3_full_lock(tp, irq_sync);
12273 
12274 		tg3_halt(tp, RESET_KIND_SUSPEND, 1);
12275 		err = tg3_nvram_lock(tp);
12276 		tg3_halt_cpu(tp, RX_CPU_BASE);
12277 		if (!tg3_flag(tp, 5705_PLUS))
12278 			tg3_halt_cpu(tp, TX_CPU_BASE);
12279 		if (!err)
12280 			tg3_nvram_unlock(tp);
12281 
12282 		if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
12283 			tg3_phy_reset(tp);
12284 
12285 		if (tg3_test_registers(tp) != 0) {
12286 			etest->flags |= ETH_TEST_FL_FAILED;
12287 			data[2] = 1;
12288 		}
12289 
12290 		if (tg3_test_memory(tp) != 0) {
12291 			etest->flags |= ETH_TEST_FL_FAILED;
12292 			data[3] = 1;
12293 		}
12294 
12295 		if (doextlpbk)
12296 			etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE;
12297 
12298 		if (tg3_test_loopback(tp, &data[4], doextlpbk))
12299 			etest->flags |= ETH_TEST_FL_FAILED;
12300 
12301 		tg3_full_unlock(tp);
12302 
12303 		if (tg3_test_interrupt(tp) != 0) {
12304 			etest->flags |= ETH_TEST_FL_FAILED;
12305 			data[7] = 1;
12306 		}
12307 
12308 		tg3_full_lock(tp, 0);
12309 
12310 		tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12311 		if (netif_running(dev)) {
12312 			tg3_flag_set(tp, INIT_COMPLETE);
12313 			err2 = tg3_restart_hw(tp, 1);
12314 			if (!err2)
12315 				tg3_netif_start(tp);
12316 		}
12317 
12318 		tg3_full_unlock(tp);
12319 
12320 		if (irq_sync && !err2)
12321 			tg3_phy_start(tp);
12322 	}
12323 	if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
12324 		tg3_power_down(tp);
12325 
12326 }
12327 
12328 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
12329 {
12330 	struct mii_ioctl_data *data = if_mii(ifr);
12331 	struct tg3 *tp = netdev_priv(dev);
12332 	int err;
12333 
12334 	if (tg3_flag(tp, USE_PHYLIB)) {
12335 		struct phy_device *phydev;
12336 		if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
12337 			return -EAGAIN;
12338 		phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
12339 		return phy_mii_ioctl(phydev, ifr, cmd);
12340 	}
12341 
12342 	switch (cmd) {
12343 	case SIOCGMIIPHY:
12344 		data->phy_id = tp->phy_addr;
12345 
12346 		/* fallthru */
12347 	case SIOCGMIIREG: {
12348 		u32 mii_regval;
12349 
12350 		if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
12351 			break;			/* We have no PHY */
12352 
12353 		if (!netif_running(dev))
12354 			return -EAGAIN;
12355 
12356 		spin_lock_bh(&tp->lock);
12357 		err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
12358 		spin_unlock_bh(&tp->lock);
12359 
12360 		data->val_out = mii_regval;
12361 
12362 		return err;
12363 	}
12364 
12365 	case SIOCSMIIREG:
12366 		if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
12367 			break;			/* We have no PHY */
12368 
12369 		if (!netif_running(dev))
12370 			return -EAGAIN;
12371 
12372 		spin_lock_bh(&tp->lock);
12373 		err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
12374 		spin_unlock_bh(&tp->lock);
12375 
12376 		return err;
12377 
12378 	default:
12379 		/* do nothing */
12380 		break;
12381 	}
12382 	return -EOPNOTSUPP;
12383 }
12384 
12385 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
12386 {
12387 	struct tg3 *tp = netdev_priv(dev);
12388 
12389 	memcpy(ec, &tp->coal, sizeof(*ec));
12390 	return 0;
12391 }
12392 
12393 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
12394 {
12395 	struct tg3 *tp = netdev_priv(dev);
12396 	u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
12397 	u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
12398 
12399 	if (!tg3_flag(tp, 5705_PLUS)) {
12400 		max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
12401 		max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
12402 		max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
12403 		min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
12404 	}
12405 
12406 	if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
12407 	    (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
12408 	    (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
12409 	    (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
12410 	    (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
12411 	    (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
12412 	    (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
12413 	    (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
12414 	    (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
12415 	    (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
12416 		return -EINVAL;
12417 
12418 	/* No rx interrupts will be generated if both are zero */
12419 	if ((ec->rx_coalesce_usecs == 0) &&
12420 	    (ec->rx_max_coalesced_frames == 0))
12421 		return -EINVAL;
12422 
12423 	/* No tx interrupts will be generated if both are zero */
12424 	if ((ec->tx_coalesce_usecs == 0) &&
12425 	    (ec->tx_max_coalesced_frames == 0))
12426 		return -EINVAL;
12427 
12428 	/* Only copy relevant parameters, ignore all others. */
12429 	tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
12430 	tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
12431 	tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
12432 	tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
12433 	tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
12434 	tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
12435 	tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
12436 	tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
12437 	tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
12438 
12439 	if (netif_running(dev)) {
12440 		tg3_full_lock(tp, 0);
12441 		__tg3_set_coalesce(tp, &tp->coal);
12442 		tg3_full_unlock(tp);
12443 	}
12444 	return 0;
12445 }
12446 
12447 static const struct ethtool_ops tg3_ethtool_ops = {
12448 	.get_settings		= tg3_get_settings,
12449 	.set_settings		= tg3_set_settings,
12450 	.get_drvinfo		= tg3_get_drvinfo,
12451 	.get_regs_len		= tg3_get_regs_len,
12452 	.get_regs		= tg3_get_regs,
12453 	.get_wol		= tg3_get_wol,
12454 	.set_wol		= tg3_set_wol,
12455 	.get_msglevel		= tg3_get_msglevel,
12456 	.set_msglevel		= tg3_set_msglevel,
12457 	.nway_reset		= tg3_nway_reset,
12458 	.get_link		= ethtool_op_get_link,
12459 	.get_eeprom_len		= tg3_get_eeprom_len,
12460 	.get_eeprom		= tg3_get_eeprom,
12461 	.set_eeprom		= tg3_set_eeprom,
12462 	.get_ringparam		= tg3_get_ringparam,
12463 	.set_ringparam		= tg3_set_ringparam,
12464 	.get_pauseparam		= tg3_get_pauseparam,
12465 	.set_pauseparam		= tg3_set_pauseparam,
12466 	.self_test		= tg3_self_test,
12467 	.get_strings		= tg3_get_strings,
12468 	.set_phys_id		= tg3_set_phys_id,
12469 	.get_ethtool_stats	= tg3_get_ethtool_stats,
12470 	.get_coalesce		= tg3_get_coalesce,
12471 	.set_coalesce		= tg3_set_coalesce,
12472 	.get_sset_count		= tg3_get_sset_count,
12473 	.get_rxnfc		= tg3_get_rxnfc,
12474 	.get_rxfh_indir_size    = tg3_get_rxfh_indir_size,
12475 	.get_rxfh_indir		= tg3_get_rxfh_indir,
12476 	.set_rxfh_indir		= tg3_set_rxfh_indir,
12477 	.get_ts_info		= ethtool_op_get_ts_info,
12478 };
12479 
12480 static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *dev,
12481 						struct rtnl_link_stats64 *stats)
12482 {
12483 	struct tg3 *tp = netdev_priv(dev);
12484 
12485 	if (!tp->hw_stats)
12486 		return &tp->net_stats_prev;
12487 
12488 	spin_lock_bh(&tp->lock);
12489 	tg3_get_nstats(tp, stats);
12490 	spin_unlock_bh(&tp->lock);
12491 
12492 	return stats;
12493 }
12494 
12495 static void tg3_set_rx_mode(struct net_device *dev)
12496 {
12497 	struct tg3 *tp = netdev_priv(dev);
12498 
12499 	if (!netif_running(dev))
12500 		return;
12501 
12502 	tg3_full_lock(tp, 0);
12503 	__tg3_set_rx_mode(dev);
12504 	tg3_full_unlock(tp);
12505 }
12506 
12507 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
12508 			       int new_mtu)
12509 {
12510 	dev->mtu = new_mtu;
12511 
12512 	if (new_mtu > ETH_DATA_LEN) {
12513 		if (tg3_flag(tp, 5780_CLASS)) {
12514 			netdev_update_features(dev);
12515 			tg3_flag_clear(tp, TSO_CAPABLE);
12516 		} else {
12517 			tg3_flag_set(tp, JUMBO_RING_ENABLE);
12518 		}
12519 	} else {
12520 		if (tg3_flag(tp, 5780_CLASS)) {
12521 			tg3_flag_set(tp, TSO_CAPABLE);
12522 			netdev_update_features(dev);
12523 		}
12524 		tg3_flag_clear(tp, JUMBO_RING_ENABLE);
12525 	}
12526 }
12527 
12528 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
12529 {
12530 	struct tg3 *tp = netdev_priv(dev);
12531 	int err, reset_phy = 0;
12532 
12533 	if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
12534 		return -EINVAL;
12535 
12536 	if (!netif_running(dev)) {
12537 		/* We'll just catch it later when the
12538 		 * device is up'd.
12539 		 */
12540 		tg3_set_mtu(dev, tp, new_mtu);
12541 		return 0;
12542 	}
12543 
12544 	tg3_phy_stop(tp);
12545 
12546 	tg3_netif_stop(tp);
12547 
12548 	tg3_full_lock(tp, 1);
12549 
12550 	tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12551 
12552 	tg3_set_mtu(dev, tp, new_mtu);
12553 
12554 	/* Reset PHY, otherwise the read DMA engine will be in a mode that
12555 	 * breaks all requests to 256 bytes.
12556 	 */
12557 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57766)
12558 		reset_phy = 1;
12559 
12560 	err = tg3_restart_hw(tp, reset_phy);
12561 
12562 	if (!err)
12563 		tg3_netif_start(tp);
12564 
12565 	tg3_full_unlock(tp);
12566 
12567 	if (!err)
12568 		tg3_phy_start(tp);
12569 
12570 	return err;
12571 }
12572 
12573 static const struct net_device_ops tg3_netdev_ops = {
12574 	.ndo_open		= tg3_open,
12575 	.ndo_stop		= tg3_close,
12576 	.ndo_start_xmit		= tg3_start_xmit,
12577 	.ndo_get_stats64	= tg3_get_stats64,
12578 	.ndo_validate_addr	= eth_validate_addr,
12579 	.ndo_set_rx_mode	= tg3_set_rx_mode,
12580 	.ndo_set_mac_address	= tg3_set_mac_addr,
12581 	.ndo_do_ioctl		= tg3_ioctl,
12582 	.ndo_tx_timeout		= tg3_tx_timeout,
12583 	.ndo_change_mtu		= tg3_change_mtu,
12584 	.ndo_fix_features	= tg3_fix_features,
12585 	.ndo_set_features	= tg3_set_features,
12586 #ifdef CONFIG_NET_POLL_CONTROLLER
12587 	.ndo_poll_controller	= tg3_poll_controller,
12588 #endif
12589 };
12590 
12591 static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
12592 {
12593 	u32 cursize, val, magic;
12594 
12595 	tp->nvram_size = EEPROM_CHIP_SIZE;
12596 
12597 	if (tg3_nvram_read(tp, 0, &magic) != 0)
12598 		return;
12599 
12600 	if ((magic != TG3_EEPROM_MAGIC) &&
12601 	    ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
12602 	    ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
12603 		return;
12604 
12605 	/*
12606 	 * Size the chip by reading offsets at increasing powers of two.
12607 	 * When we encounter our validation signature, we know the addressing
12608 	 * has wrapped around, and thus have our chip size.
12609 	 */
12610 	cursize = 0x10;
12611 
12612 	while (cursize < tp->nvram_size) {
12613 		if (tg3_nvram_read(tp, cursize, &val) != 0)
12614 			return;
12615 
12616 		if (val == magic)
12617 			break;
12618 
12619 		cursize <<= 1;
12620 	}
12621 
12622 	tp->nvram_size = cursize;
12623 }
12624 
12625 static void __devinit tg3_get_nvram_size(struct tg3 *tp)
12626 {
12627 	u32 val;
12628 
12629 	if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &val) != 0)
12630 		return;
12631 
12632 	/* Selfboot format */
12633 	if (val != TG3_EEPROM_MAGIC) {
12634 		tg3_get_eeprom_size(tp);
12635 		return;
12636 	}
12637 
12638 	if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
12639 		if (val != 0) {
12640 			/* This is confusing.  We want to operate on the
12641 			 * 16-bit value at offset 0xf2.  The tg3_nvram_read()
12642 			 * call will read from NVRAM and byteswap the data
12643 			 * according to the byteswapping settings for all
12644 			 * other register accesses.  This ensures the data we
12645 			 * want will always reside in the lower 16-bits.
12646 			 * However, the data in NVRAM is in LE format, which
12647 			 * means the data from the NVRAM read will always be
12648 			 * opposite the endianness of the CPU.  The 16-bit
12649 			 * byteswap then brings the data to CPU endianness.
12650 			 */
12651 			tp->nvram_size = swab16((u16)(val & 0x0000ffff)) * 1024;
12652 			return;
12653 		}
12654 	}
12655 	tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12656 }
12657 
12658 static void __devinit tg3_get_nvram_info(struct tg3 *tp)
12659 {
12660 	u32 nvcfg1;
12661 
12662 	nvcfg1 = tr32(NVRAM_CFG1);
12663 	if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
12664 		tg3_flag_set(tp, FLASH);
12665 	} else {
12666 		nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12667 		tw32(NVRAM_CFG1, nvcfg1);
12668 	}
12669 
12670 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
12671 	    tg3_flag(tp, 5780_CLASS)) {
12672 		switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
12673 		case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
12674 			tp->nvram_jedecnum = JEDEC_ATMEL;
12675 			tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
12676 			tg3_flag_set(tp, NVRAM_BUFFERED);
12677 			break;
12678 		case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
12679 			tp->nvram_jedecnum = JEDEC_ATMEL;
12680 			tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
12681 			break;
12682 		case FLASH_VENDOR_ATMEL_EEPROM:
12683 			tp->nvram_jedecnum = JEDEC_ATMEL;
12684 			tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12685 			tg3_flag_set(tp, NVRAM_BUFFERED);
12686 			break;
12687 		case FLASH_VENDOR_ST:
12688 			tp->nvram_jedecnum = JEDEC_ST;
12689 			tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
12690 			tg3_flag_set(tp, NVRAM_BUFFERED);
12691 			break;
12692 		case FLASH_VENDOR_SAIFUN:
12693 			tp->nvram_jedecnum = JEDEC_SAIFUN;
12694 			tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
12695 			break;
12696 		case FLASH_VENDOR_SST_SMALL:
12697 		case FLASH_VENDOR_SST_LARGE:
12698 			tp->nvram_jedecnum = JEDEC_SST;
12699 			tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
12700 			break;
12701 		}
12702 	} else {
12703 		tp->nvram_jedecnum = JEDEC_ATMEL;
12704 		tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
12705 		tg3_flag_set(tp, NVRAM_BUFFERED);
12706 	}
12707 }
12708 
12709 static void __devinit tg3_nvram_get_pagesize(struct tg3 *tp, u32 nvmcfg1)
12710 {
12711 	switch (nvmcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
12712 	case FLASH_5752PAGE_SIZE_256:
12713 		tp->nvram_pagesize = 256;
12714 		break;
12715 	case FLASH_5752PAGE_SIZE_512:
12716 		tp->nvram_pagesize = 512;
12717 		break;
12718 	case FLASH_5752PAGE_SIZE_1K:
12719 		tp->nvram_pagesize = 1024;
12720 		break;
12721 	case FLASH_5752PAGE_SIZE_2K:
12722 		tp->nvram_pagesize = 2048;
12723 		break;
12724 	case FLASH_5752PAGE_SIZE_4K:
12725 		tp->nvram_pagesize = 4096;
12726 		break;
12727 	case FLASH_5752PAGE_SIZE_264:
12728 		tp->nvram_pagesize = 264;
12729 		break;
12730 	case FLASH_5752PAGE_SIZE_528:
12731 		tp->nvram_pagesize = 528;
12732 		break;
12733 	}
12734 }
12735 
12736 static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp)
12737 {
12738 	u32 nvcfg1;
12739 
12740 	nvcfg1 = tr32(NVRAM_CFG1);
12741 
12742 	/* NVRAM protection for TPM */
12743 	if (nvcfg1 & (1 << 27))
12744 		tg3_flag_set(tp, PROTECTED_NVRAM);
12745 
12746 	switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12747 	case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
12748 	case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
12749 		tp->nvram_jedecnum = JEDEC_ATMEL;
12750 		tg3_flag_set(tp, NVRAM_BUFFERED);
12751 		break;
12752 	case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
12753 		tp->nvram_jedecnum = JEDEC_ATMEL;
12754 		tg3_flag_set(tp, NVRAM_BUFFERED);
12755 		tg3_flag_set(tp, FLASH);
12756 		break;
12757 	case FLASH_5752VENDOR_ST_M45PE10:
12758 	case FLASH_5752VENDOR_ST_M45PE20:
12759 	case FLASH_5752VENDOR_ST_M45PE40:
12760 		tp->nvram_jedecnum = JEDEC_ST;
12761 		tg3_flag_set(tp, NVRAM_BUFFERED);
12762 		tg3_flag_set(tp, FLASH);
12763 		break;
12764 	}
12765 
12766 	if (tg3_flag(tp, FLASH)) {
12767 		tg3_nvram_get_pagesize(tp, nvcfg1);
12768 	} else {
12769 		/* For eeprom, set pagesize to maximum eeprom size */
12770 		tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12771 
12772 		nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12773 		tw32(NVRAM_CFG1, nvcfg1);
12774 	}
12775 }
12776 
12777 static void __devinit tg3_get_5755_nvram_info(struct tg3 *tp)
12778 {
12779 	u32 nvcfg1, protect = 0;
12780 
12781 	nvcfg1 = tr32(NVRAM_CFG1);
12782 
12783 	/* NVRAM protection for TPM */
12784 	if (nvcfg1 & (1 << 27)) {
12785 		tg3_flag_set(tp, PROTECTED_NVRAM);
12786 		protect = 1;
12787 	}
12788 
12789 	nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
12790 	switch (nvcfg1) {
12791 	case FLASH_5755VENDOR_ATMEL_FLASH_1:
12792 	case FLASH_5755VENDOR_ATMEL_FLASH_2:
12793 	case FLASH_5755VENDOR_ATMEL_FLASH_3:
12794 	case FLASH_5755VENDOR_ATMEL_FLASH_5:
12795 		tp->nvram_jedecnum = JEDEC_ATMEL;
12796 		tg3_flag_set(tp, NVRAM_BUFFERED);
12797 		tg3_flag_set(tp, FLASH);
12798 		tp->nvram_pagesize = 264;
12799 		if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
12800 		    nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
12801 			tp->nvram_size = (protect ? 0x3e200 :
12802 					  TG3_NVRAM_SIZE_512KB);
12803 		else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
12804 			tp->nvram_size = (protect ? 0x1f200 :
12805 					  TG3_NVRAM_SIZE_256KB);
12806 		else
12807 			tp->nvram_size = (protect ? 0x1f200 :
12808 					  TG3_NVRAM_SIZE_128KB);
12809 		break;
12810 	case FLASH_5752VENDOR_ST_M45PE10:
12811 	case FLASH_5752VENDOR_ST_M45PE20:
12812 	case FLASH_5752VENDOR_ST_M45PE40:
12813 		tp->nvram_jedecnum = JEDEC_ST;
12814 		tg3_flag_set(tp, NVRAM_BUFFERED);
12815 		tg3_flag_set(tp, FLASH);
12816 		tp->nvram_pagesize = 256;
12817 		if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
12818 			tp->nvram_size = (protect ?
12819 					  TG3_NVRAM_SIZE_64KB :
12820 					  TG3_NVRAM_SIZE_128KB);
12821 		else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
12822 			tp->nvram_size = (protect ?
12823 					  TG3_NVRAM_SIZE_64KB :
12824 					  TG3_NVRAM_SIZE_256KB);
12825 		else
12826 			tp->nvram_size = (protect ?
12827 					  TG3_NVRAM_SIZE_128KB :
12828 					  TG3_NVRAM_SIZE_512KB);
12829 		break;
12830 	}
12831 }
12832 
12833 static void __devinit tg3_get_5787_nvram_info(struct tg3 *tp)
12834 {
12835 	u32 nvcfg1;
12836 
12837 	nvcfg1 = tr32(NVRAM_CFG1);
12838 
12839 	switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12840 	case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
12841 	case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
12842 	case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
12843 	case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
12844 		tp->nvram_jedecnum = JEDEC_ATMEL;
12845 		tg3_flag_set(tp, NVRAM_BUFFERED);
12846 		tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12847 
12848 		nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12849 		tw32(NVRAM_CFG1, nvcfg1);
12850 		break;
12851 	case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
12852 	case FLASH_5755VENDOR_ATMEL_FLASH_1:
12853 	case FLASH_5755VENDOR_ATMEL_FLASH_2:
12854 	case FLASH_5755VENDOR_ATMEL_FLASH_3:
12855 		tp->nvram_jedecnum = JEDEC_ATMEL;
12856 		tg3_flag_set(tp, NVRAM_BUFFERED);
12857 		tg3_flag_set(tp, FLASH);
12858 		tp->nvram_pagesize = 264;
12859 		break;
12860 	case FLASH_5752VENDOR_ST_M45PE10:
12861 	case FLASH_5752VENDOR_ST_M45PE20:
12862 	case FLASH_5752VENDOR_ST_M45PE40:
12863 		tp->nvram_jedecnum = JEDEC_ST;
12864 		tg3_flag_set(tp, NVRAM_BUFFERED);
12865 		tg3_flag_set(tp, FLASH);
12866 		tp->nvram_pagesize = 256;
12867 		break;
12868 	}
12869 }
12870 
12871 static void __devinit tg3_get_5761_nvram_info(struct tg3 *tp)
12872 {
12873 	u32 nvcfg1, protect = 0;
12874 
12875 	nvcfg1 = tr32(NVRAM_CFG1);
12876 
12877 	/* NVRAM protection for TPM */
12878 	if (nvcfg1 & (1 << 27)) {
12879 		tg3_flag_set(tp, PROTECTED_NVRAM);
12880 		protect = 1;
12881 	}
12882 
12883 	nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
12884 	switch (nvcfg1) {
12885 	case FLASH_5761VENDOR_ATMEL_ADB021D:
12886 	case FLASH_5761VENDOR_ATMEL_ADB041D:
12887 	case FLASH_5761VENDOR_ATMEL_ADB081D:
12888 	case FLASH_5761VENDOR_ATMEL_ADB161D:
12889 	case FLASH_5761VENDOR_ATMEL_MDB021D:
12890 	case FLASH_5761VENDOR_ATMEL_MDB041D:
12891 	case FLASH_5761VENDOR_ATMEL_MDB081D:
12892 	case FLASH_5761VENDOR_ATMEL_MDB161D:
12893 		tp->nvram_jedecnum = JEDEC_ATMEL;
12894 		tg3_flag_set(tp, NVRAM_BUFFERED);
12895 		tg3_flag_set(tp, FLASH);
12896 		tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
12897 		tp->nvram_pagesize = 256;
12898 		break;
12899 	case FLASH_5761VENDOR_ST_A_M45PE20:
12900 	case FLASH_5761VENDOR_ST_A_M45PE40:
12901 	case FLASH_5761VENDOR_ST_A_M45PE80:
12902 	case FLASH_5761VENDOR_ST_A_M45PE16:
12903 	case FLASH_5761VENDOR_ST_M_M45PE20:
12904 	case FLASH_5761VENDOR_ST_M_M45PE40:
12905 	case FLASH_5761VENDOR_ST_M_M45PE80:
12906 	case FLASH_5761VENDOR_ST_M_M45PE16:
12907 		tp->nvram_jedecnum = JEDEC_ST;
12908 		tg3_flag_set(tp, NVRAM_BUFFERED);
12909 		tg3_flag_set(tp, FLASH);
12910 		tp->nvram_pagesize = 256;
12911 		break;
12912 	}
12913 
12914 	if (protect) {
12915 		tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
12916 	} else {
12917 		switch (nvcfg1) {
12918 		case FLASH_5761VENDOR_ATMEL_ADB161D:
12919 		case FLASH_5761VENDOR_ATMEL_MDB161D:
12920 		case FLASH_5761VENDOR_ST_A_M45PE16:
12921 		case FLASH_5761VENDOR_ST_M_M45PE16:
12922 			tp->nvram_size = TG3_NVRAM_SIZE_2MB;
12923 			break;
12924 		case FLASH_5761VENDOR_ATMEL_ADB081D:
12925 		case FLASH_5761VENDOR_ATMEL_MDB081D:
12926 		case FLASH_5761VENDOR_ST_A_M45PE80:
12927 		case FLASH_5761VENDOR_ST_M_M45PE80:
12928 			tp->nvram_size = TG3_NVRAM_SIZE_1MB;
12929 			break;
12930 		case FLASH_5761VENDOR_ATMEL_ADB041D:
12931 		case FLASH_5761VENDOR_ATMEL_MDB041D:
12932 		case FLASH_5761VENDOR_ST_A_M45PE40:
12933 		case FLASH_5761VENDOR_ST_M_M45PE40:
12934 			tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12935 			break;
12936 		case FLASH_5761VENDOR_ATMEL_ADB021D:
12937 		case FLASH_5761VENDOR_ATMEL_MDB021D:
12938 		case FLASH_5761VENDOR_ST_A_M45PE20:
12939 		case FLASH_5761VENDOR_ST_M_M45PE20:
12940 			tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12941 			break;
12942 		}
12943 	}
12944 }
12945 
12946 static void __devinit tg3_get_5906_nvram_info(struct tg3 *tp)
12947 {
12948 	tp->nvram_jedecnum = JEDEC_ATMEL;
12949 	tg3_flag_set(tp, NVRAM_BUFFERED);
12950 	tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12951 }
12952 
12953 static void __devinit tg3_get_57780_nvram_info(struct tg3 *tp)
12954 {
12955 	u32 nvcfg1;
12956 
12957 	nvcfg1 = tr32(NVRAM_CFG1);
12958 
12959 	switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12960 	case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
12961 	case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
12962 		tp->nvram_jedecnum = JEDEC_ATMEL;
12963 		tg3_flag_set(tp, NVRAM_BUFFERED);
12964 		tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12965 
12966 		nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12967 		tw32(NVRAM_CFG1, nvcfg1);
12968 		return;
12969 	case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
12970 	case FLASH_57780VENDOR_ATMEL_AT45DB011D:
12971 	case FLASH_57780VENDOR_ATMEL_AT45DB011B:
12972 	case FLASH_57780VENDOR_ATMEL_AT45DB021D:
12973 	case FLASH_57780VENDOR_ATMEL_AT45DB021B:
12974 	case FLASH_57780VENDOR_ATMEL_AT45DB041D:
12975 	case FLASH_57780VENDOR_ATMEL_AT45DB041B:
12976 		tp->nvram_jedecnum = JEDEC_ATMEL;
12977 		tg3_flag_set(tp, NVRAM_BUFFERED);
12978 		tg3_flag_set(tp, FLASH);
12979 
12980 		switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12981 		case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
12982 		case FLASH_57780VENDOR_ATMEL_AT45DB011D:
12983 		case FLASH_57780VENDOR_ATMEL_AT45DB011B:
12984 			tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12985 			break;
12986 		case FLASH_57780VENDOR_ATMEL_AT45DB021D:
12987 		case FLASH_57780VENDOR_ATMEL_AT45DB021B:
12988 			tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12989 			break;
12990 		case FLASH_57780VENDOR_ATMEL_AT45DB041D:
12991 		case FLASH_57780VENDOR_ATMEL_AT45DB041B:
12992 			tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12993 			break;
12994 		}
12995 		break;
12996 	case FLASH_5752VENDOR_ST_M45PE10:
12997 	case FLASH_5752VENDOR_ST_M45PE20:
12998 	case FLASH_5752VENDOR_ST_M45PE40:
12999 		tp->nvram_jedecnum = JEDEC_ST;
13000 		tg3_flag_set(tp, NVRAM_BUFFERED);
13001 		tg3_flag_set(tp, FLASH);
13002 
13003 		switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13004 		case FLASH_5752VENDOR_ST_M45PE10:
13005 			tp->nvram_size = TG3_NVRAM_SIZE_128KB;
13006 			break;
13007 		case FLASH_5752VENDOR_ST_M45PE20:
13008 			tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13009 			break;
13010 		case FLASH_5752VENDOR_ST_M45PE40:
13011 			tp->nvram_size = TG3_NVRAM_SIZE_512KB;
13012 			break;
13013 		}
13014 		break;
13015 	default:
13016 		tg3_flag_set(tp, NO_NVRAM);
13017 		return;
13018 	}
13019 
13020 	tg3_nvram_get_pagesize(tp, nvcfg1);
13021 	if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
13022 		tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
13023 }
13024 
13025 
13026 static void __devinit tg3_get_5717_nvram_info(struct tg3 *tp)
13027 {
13028 	u32 nvcfg1;
13029 
13030 	nvcfg1 = tr32(NVRAM_CFG1);
13031 
13032 	switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13033 	case FLASH_5717VENDOR_ATMEL_EEPROM:
13034 	case FLASH_5717VENDOR_MICRO_EEPROM:
13035 		tp->nvram_jedecnum = JEDEC_ATMEL;
13036 		tg3_flag_set(tp, NVRAM_BUFFERED);
13037 		tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13038 
13039 		nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
13040 		tw32(NVRAM_CFG1, nvcfg1);
13041 		return;
13042 	case FLASH_5717VENDOR_ATMEL_MDB011D:
13043 	case FLASH_5717VENDOR_ATMEL_ADB011B:
13044 	case FLASH_5717VENDOR_ATMEL_ADB011D:
13045 	case FLASH_5717VENDOR_ATMEL_MDB021D:
13046 	case FLASH_5717VENDOR_ATMEL_ADB021B:
13047 	case FLASH_5717VENDOR_ATMEL_ADB021D:
13048 	case FLASH_5717VENDOR_ATMEL_45USPT:
13049 		tp->nvram_jedecnum = JEDEC_ATMEL;
13050 		tg3_flag_set(tp, NVRAM_BUFFERED);
13051 		tg3_flag_set(tp, FLASH);
13052 
13053 		switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13054 		case FLASH_5717VENDOR_ATMEL_MDB021D:
13055 			/* Detect size with tg3_nvram_get_size() */
13056 			break;
13057 		case FLASH_5717VENDOR_ATMEL_ADB021B:
13058 		case FLASH_5717VENDOR_ATMEL_ADB021D:
13059 			tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13060 			break;
13061 		default:
13062 			tp->nvram_size = TG3_NVRAM_SIZE_128KB;
13063 			break;
13064 		}
13065 		break;
13066 	case FLASH_5717VENDOR_ST_M_M25PE10:
13067 	case FLASH_5717VENDOR_ST_A_M25PE10:
13068 	case FLASH_5717VENDOR_ST_M_M45PE10:
13069 	case FLASH_5717VENDOR_ST_A_M45PE10:
13070 	case FLASH_5717VENDOR_ST_M_M25PE20:
13071 	case FLASH_5717VENDOR_ST_A_M25PE20:
13072 	case FLASH_5717VENDOR_ST_M_M45PE20:
13073 	case FLASH_5717VENDOR_ST_A_M45PE20:
13074 	case FLASH_5717VENDOR_ST_25USPT:
13075 	case FLASH_5717VENDOR_ST_45USPT:
13076 		tp->nvram_jedecnum = JEDEC_ST;
13077 		tg3_flag_set(tp, NVRAM_BUFFERED);
13078 		tg3_flag_set(tp, FLASH);
13079 
13080 		switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13081 		case FLASH_5717VENDOR_ST_M_M25PE20:
13082 		case FLASH_5717VENDOR_ST_M_M45PE20:
13083 			/* Detect size with tg3_nvram_get_size() */
13084 			break;
13085 		case FLASH_5717VENDOR_ST_A_M25PE20:
13086 		case FLASH_5717VENDOR_ST_A_M45PE20:
13087 			tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13088 			break;
13089 		default:
13090 			tp->nvram_size = TG3_NVRAM_SIZE_128KB;
13091 			break;
13092 		}
13093 		break;
13094 	default:
13095 		tg3_flag_set(tp, NO_NVRAM);
13096 		return;
13097 	}
13098 
13099 	tg3_nvram_get_pagesize(tp, nvcfg1);
13100 	if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
13101 		tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
13102 }
13103 
13104 static void __devinit tg3_get_5720_nvram_info(struct tg3 *tp)
13105 {
13106 	u32 nvcfg1, nvmpinstrp;
13107 
13108 	nvcfg1 = tr32(NVRAM_CFG1);
13109 	nvmpinstrp = nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK;
13110 
13111 	switch (nvmpinstrp) {
13112 	case FLASH_5720_EEPROM_HD:
13113 	case FLASH_5720_EEPROM_LD:
13114 		tp->nvram_jedecnum = JEDEC_ATMEL;
13115 		tg3_flag_set(tp, NVRAM_BUFFERED);
13116 
13117 		nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
13118 		tw32(NVRAM_CFG1, nvcfg1);
13119 		if (nvmpinstrp == FLASH_5720_EEPROM_HD)
13120 			tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13121 		else
13122 			tp->nvram_pagesize = ATMEL_AT24C02_CHIP_SIZE;
13123 		return;
13124 	case FLASH_5720VENDOR_M_ATMEL_DB011D:
13125 	case FLASH_5720VENDOR_A_ATMEL_DB011B:
13126 	case FLASH_5720VENDOR_A_ATMEL_DB011D:
13127 	case FLASH_5720VENDOR_M_ATMEL_DB021D:
13128 	case FLASH_5720VENDOR_A_ATMEL_DB021B:
13129 	case FLASH_5720VENDOR_A_ATMEL_DB021D:
13130 	case FLASH_5720VENDOR_M_ATMEL_DB041D:
13131 	case FLASH_5720VENDOR_A_ATMEL_DB041B:
13132 	case FLASH_5720VENDOR_A_ATMEL_DB041D:
13133 	case FLASH_5720VENDOR_M_ATMEL_DB081D:
13134 	case FLASH_5720VENDOR_A_ATMEL_DB081D:
13135 	case FLASH_5720VENDOR_ATMEL_45USPT:
13136 		tp->nvram_jedecnum = JEDEC_ATMEL;
13137 		tg3_flag_set(tp, NVRAM_BUFFERED);
13138 		tg3_flag_set(tp, FLASH);
13139 
13140 		switch (nvmpinstrp) {
13141 		case FLASH_5720VENDOR_M_ATMEL_DB021D:
13142 		case FLASH_5720VENDOR_A_ATMEL_DB021B:
13143 		case FLASH_5720VENDOR_A_ATMEL_DB021D:
13144 			tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13145 			break;
13146 		case FLASH_5720VENDOR_M_ATMEL_DB041D:
13147 		case FLASH_5720VENDOR_A_ATMEL_DB041B:
13148 		case FLASH_5720VENDOR_A_ATMEL_DB041D:
13149 			tp->nvram_size = TG3_NVRAM_SIZE_512KB;
13150 			break;
13151 		case FLASH_5720VENDOR_M_ATMEL_DB081D:
13152 		case FLASH_5720VENDOR_A_ATMEL_DB081D:
13153 			tp->nvram_size = TG3_NVRAM_SIZE_1MB;
13154 			break;
13155 		default:
13156 			tp->nvram_size = TG3_NVRAM_SIZE_128KB;
13157 			break;
13158 		}
13159 		break;
13160 	case FLASH_5720VENDOR_M_ST_M25PE10:
13161 	case FLASH_5720VENDOR_M_ST_M45PE10:
13162 	case FLASH_5720VENDOR_A_ST_M25PE10:
13163 	case FLASH_5720VENDOR_A_ST_M45PE10:
13164 	case FLASH_5720VENDOR_M_ST_M25PE20:
13165 	case FLASH_5720VENDOR_M_ST_M45PE20:
13166 	case FLASH_5720VENDOR_A_ST_M25PE20:
13167 	case FLASH_5720VENDOR_A_ST_M45PE20:
13168 	case FLASH_5720VENDOR_M_ST_M25PE40:
13169 	case FLASH_5720VENDOR_M_ST_M45PE40:
13170 	case FLASH_5720VENDOR_A_ST_M25PE40:
13171 	case FLASH_5720VENDOR_A_ST_M45PE40:
13172 	case FLASH_5720VENDOR_M_ST_M25PE80:
13173 	case FLASH_5720VENDOR_M_ST_M45PE80:
13174 	case FLASH_5720VENDOR_A_ST_M25PE80:
13175 	case FLASH_5720VENDOR_A_ST_M45PE80:
13176 	case FLASH_5720VENDOR_ST_25USPT:
13177 	case FLASH_5720VENDOR_ST_45USPT:
13178 		tp->nvram_jedecnum = JEDEC_ST;
13179 		tg3_flag_set(tp, NVRAM_BUFFERED);
13180 		tg3_flag_set(tp, FLASH);
13181 
13182 		switch (nvmpinstrp) {
13183 		case FLASH_5720VENDOR_M_ST_M25PE20:
13184 		case FLASH_5720VENDOR_M_ST_M45PE20:
13185 		case FLASH_5720VENDOR_A_ST_M25PE20:
13186 		case FLASH_5720VENDOR_A_ST_M45PE20:
13187 			tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13188 			break;
13189 		case FLASH_5720VENDOR_M_ST_M25PE40:
13190 		case FLASH_5720VENDOR_M_ST_M45PE40:
13191 		case FLASH_5720VENDOR_A_ST_M25PE40:
13192 		case FLASH_5720VENDOR_A_ST_M45PE40:
13193 			tp->nvram_size = TG3_NVRAM_SIZE_512KB;
13194 			break;
13195 		case FLASH_5720VENDOR_M_ST_M25PE80:
13196 		case FLASH_5720VENDOR_M_ST_M45PE80:
13197 		case FLASH_5720VENDOR_A_ST_M25PE80:
13198 		case FLASH_5720VENDOR_A_ST_M45PE80:
13199 			tp->nvram_size = TG3_NVRAM_SIZE_1MB;
13200 			break;
13201 		default:
13202 			tp->nvram_size = TG3_NVRAM_SIZE_128KB;
13203 			break;
13204 		}
13205 		break;
13206 	default:
13207 		tg3_flag_set(tp, NO_NVRAM);
13208 		return;
13209 	}
13210 
13211 	tg3_nvram_get_pagesize(tp, nvcfg1);
13212 	if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
13213 		tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
13214 }
13215 
13216 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
13217 static void __devinit tg3_nvram_init(struct tg3 *tp)
13218 {
13219 	tw32_f(GRC_EEPROM_ADDR,
13220 	     (EEPROM_ADDR_FSM_RESET |
13221 	      (EEPROM_DEFAULT_CLOCK_PERIOD <<
13222 	       EEPROM_ADDR_CLKPERD_SHIFT)));
13223 
13224 	msleep(1);
13225 
13226 	/* Enable seeprom accesses. */
13227 	tw32_f(GRC_LOCAL_CTRL,
13228 	     tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
13229 	udelay(100);
13230 
13231 	if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
13232 	    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
13233 		tg3_flag_set(tp, NVRAM);
13234 
13235 		if (tg3_nvram_lock(tp)) {
13236 			netdev_warn(tp->dev,
13237 				    "Cannot get nvram lock, %s failed\n",
13238 				    __func__);
13239 			return;
13240 		}
13241 		tg3_enable_nvram_access(tp);
13242 
13243 		tp->nvram_size = 0;
13244 
13245 		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
13246 			tg3_get_5752_nvram_info(tp);
13247 		else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
13248 			tg3_get_5755_nvram_info(tp);
13249 		else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
13250 			 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
13251 			 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
13252 			tg3_get_5787_nvram_info(tp);
13253 		else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
13254 			tg3_get_5761_nvram_info(tp);
13255 		else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
13256 			tg3_get_5906_nvram_info(tp);
13257 		else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
13258 			 tg3_flag(tp, 57765_CLASS))
13259 			tg3_get_57780_nvram_info(tp);
13260 		else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
13261 			 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
13262 			tg3_get_5717_nvram_info(tp);
13263 		else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
13264 			tg3_get_5720_nvram_info(tp);
13265 		else
13266 			tg3_get_nvram_info(tp);
13267 
13268 		if (tp->nvram_size == 0)
13269 			tg3_get_nvram_size(tp);
13270 
13271 		tg3_disable_nvram_access(tp);
13272 		tg3_nvram_unlock(tp);
13273 
13274 	} else {
13275 		tg3_flag_clear(tp, NVRAM);
13276 		tg3_flag_clear(tp, NVRAM_BUFFERED);
13277 
13278 		tg3_get_eeprom_size(tp);
13279 	}
13280 }
13281 
13282 struct subsys_tbl_ent {
13283 	u16 subsys_vendor, subsys_devid;
13284 	u32 phy_id;
13285 };
13286 
13287 static struct subsys_tbl_ent subsys_id_to_phy_id[] __devinitdata = {
13288 	/* Broadcom boards. */
13289 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
13290 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6, TG3_PHY_ID_BCM5401 },
13291 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
13292 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5, TG3_PHY_ID_BCM5701 },
13293 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
13294 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6, TG3_PHY_ID_BCM8002 },
13295 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
13296 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9, 0 },
13297 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
13298 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1, TG3_PHY_ID_BCM5701 },
13299 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
13300 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8, TG3_PHY_ID_BCM5701 },
13301 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
13302 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7, 0 },
13303 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
13304 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10, TG3_PHY_ID_BCM5701 },
13305 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
13306 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12, TG3_PHY_ID_BCM5701 },
13307 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
13308 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1, TG3_PHY_ID_BCM5703 },
13309 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
13310 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2, TG3_PHY_ID_BCM5703 },
13311 
13312 	/* 3com boards. */
13313 	{ TG3PCI_SUBVENDOR_ID_3COM,
13314 	  TG3PCI_SUBDEVICE_ID_3COM_3C996T, TG3_PHY_ID_BCM5401 },
13315 	{ TG3PCI_SUBVENDOR_ID_3COM,
13316 	  TG3PCI_SUBDEVICE_ID_3COM_3C996BT, TG3_PHY_ID_BCM5701 },
13317 	{ TG3PCI_SUBVENDOR_ID_3COM,
13318 	  TG3PCI_SUBDEVICE_ID_3COM_3C996SX, 0 },
13319 	{ TG3PCI_SUBVENDOR_ID_3COM,
13320 	  TG3PCI_SUBDEVICE_ID_3COM_3C1000T, TG3_PHY_ID_BCM5701 },
13321 	{ TG3PCI_SUBVENDOR_ID_3COM,
13322 	  TG3PCI_SUBDEVICE_ID_3COM_3C940BR01, TG3_PHY_ID_BCM5701 },
13323 
13324 	/* DELL boards. */
13325 	{ TG3PCI_SUBVENDOR_ID_DELL,
13326 	  TG3PCI_SUBDEVICE_ID_DELL_VIPER, TG3_PHY_ID_BCM5401 },
13327 	{ TG3PCI_SUBVENDOR_ID_DELL,
13328 	  TG3PCI_SUBDEVICE_ID_DELL_JAGUAR, TG3_PHY_ID_BCM5401 },
13329 	{ TG3PCI_SUBVENDOR_ID_DELL,
13330 	  TG3PCI_SUBDEVICE_ID_DELL_MERLOT, TG3_PHY_ID_BCM5411 },
13331 	{ TG3PCI_SUBVENDOR_ID_DELL,
13332 	  TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT, TG3_PHY_ID_BCM5411 },
13333 
13334 	/* Compaq boards. */
13335 	{ TG3PCI_SUBVENDOR_ID_COMPAQ,
13336 	  TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE, TG3_PHY_ID_BCM5701 },
13337 	{ TG3PCI_SUBVENDOR_ID_COMPAQ,
13338 	  TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2, TG3_PHY_ID_BCM5701 },
13339 	{ TG3PCI_SUBVENDOR_ID_COMPAQ,
13340 	  TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING, 0 },
13341 	{ TG3PCI_SUBVENDOR_ID_COMPAQ,
13342 	  TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780, TG3_PHY_ID_BCM5701 },
13343 	{ TG3PCI_SUBVENDOR_ID_COMPAQ,
13344 	  TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2, TG3_PHY_ID_BCM5701 },
13345 
13346 	/* IBM boards. */
13347 	{ TG3PCI_SUBVENDOR_ID_IBM,
13348 	  TG3PCI_SUBDEVICE_ID_IBM_5703SAX2, 0 }
13349 };
13350 
13351 static struct subsys_tbl_ent * __devinit tg3_lookup_by_subsys(struct tg3 *tp)
13352 {
13353 	int i;
13354 
13355 	for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
13356 		if ((subsys_id_to_phy_id[i].subsys_vendor ==
13357 		     tp->pdev->subsystem_vendor) &&
13358 		    (subsys_id_to_phy_id[i].subsys_devid ==
13359 		     tp->pdev->subsystem_device))
13360 			return &subsys_id_to_phy_id[i];
13361 	}
13362 	return NULL;
13363 }
13364 
13365 static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
13366 {
13367 	u32 val;
13368 
13369 	tp->phy_id = TG3_PHY_ID_INVALID;
13370 	tp->led_ctrl = LED_CTRL_MODE_PHY_1;
13371 
13372 	/* Assume an onboard device and WOL capable by default.  */
13373 	tg3_flag_set(tp, EEPROM_WRITE_PROT);
13374 	tg3_flag_set(tp, WOL_CAP);
13375 
13376 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
13377 		if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
13378 			tg3_flag_clear(tp, EEPROM_WRITE_PROT);
13379 			tg3_flag_set(tp, IS_NIC);
13380 		}
13381 		val = tr32(VCPU_CFGSHDW);
13382 		if (val & VCPU_CFGSHDW_ASPM_DBNC)
13383 			tg3_flag_set(tp, ASPM_WORKAROUND);
13384 		if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
13385 		    (val & VCPU_CFGSHDW_WOL_MAGPKT)) {
13386 			tg3_flag_set(tp, WOL_ENABLE);
13387 			device_set_wakeup_enable(&tp->pdev->dev, true);
13388 		}
13389 		goto done;
13390 	}
13391 
13392 	tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
13393 	if (val == NIC_SRAM_DATA_SIG_MAGIC) {
13394 		u32 nic_cfg, led_cfg;
13395 		u32 nic_phy_id, ver, cfg2 = 0, cfg4 = 0, eeprom_phy_id;
13396 		int eeprom_phy_serdes = 0;
13397 
13398 		tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
13399 		tp->nic_sram_data_cfg = nic_cfg;
13400 
13401 		tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
13402 		ver >>= NIC_SRAM_DATA_VER_SHIFT;
13403 		if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
13404 		    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
13405 		    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703 &&
13406 		    (ver > 0) && (ver < 0x100))
13407 			tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
13408 
13409 		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
13410 			tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
13411 
13412 		if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
13413 		    NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
13414 			eeprom_phy_serdes = 1;
13415 
13416 		tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
13417 		if (nic_phy_id != 0) {
13418 			u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
13419 			u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
13420 
13421 			eeprom_phy_id  = (id1 >> 16) << 10;
13422 			eeprom_phy_id |= (id2 & 0xfc00) << 16;
13423 			eeprom_phy_id |= (id2 & 0x03ff) <<  0;
13424 		} else
13425 			eeprom_phy_id = 0;
13426 
13427 		tp->phy_id = eeprom_phy_id;
13428 		if (eeprom_phy_serdes) {
13429 			if (!tg3_flag(tp, 5705_PLUS))
13430 				tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
13431 			else
13432 				tp->phy_flags |= TG3_PHYFLG_MII_SERDES;
13433 		}
13434 
13435 		if (tg3_flag(tp, 5750_PLUS))
13436 			led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
13437 				    SHASTA_EXT_LED_MODE_MASK);
13438 		else
13439 			led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
13440 
13441 		switch (led_cfg) {
13442 		default:
13443 		case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
13444 			tp->led_ctrl = LED_CTRL_MODE_PHY_1;
13445 			break;
13446 
13447 		case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
13448 			tp->led_ctrl = LED_CTRL_MODE_PHY_2;
13449 			break;
13450 
13451 		case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
13452 			tp->led_ctrl = LED_CTRL_MODE_MAC;
13453 
13454 			/* Default to PHY_1_MODE if 0 (MAC_MODE) is
13455 			 * read on some older 5700/5701 bootcode.
13456 			 */
13457 			if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
13458 			    ASIC_REV_5700 ||
13459 			    GET_ASIC_REV(tp->pci_chip_rev_id) ==
13460 			    ASIC_REV_5701)
13461 				tp->led_ctrl = LED_CTRL_MODE_PHY_1;
13462 
13463 			break;
13464 
13465 		case SHASTA_EXT_LED_SHARED:
13466 			tp->led_ctrl = LED_CTRL_MODE_SHARED;
13467 			if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
13468 			    tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
13469 				tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
13470 						 LED_CTRL_MODE_PHY_2);
13471 			break;
13472 
13473 		case SHASTA_EXT_LED_MAC:
13474 			tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
13475 			break;
13476 
13477 		case SHASTA_EXT_LED_COMBO:
13478 			tp->led_ctrl = LED_CTRL_MODE_COMBO;
13479 			if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
13480 				tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
13481 						 LED_CTRL_MODE_PHY_2);
13482 			break;
13483 
13484 		}
13485 
13486 		if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
13487 		     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
13488 		    tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
13489 			tp->led_ctrl = LED_CTRL_MODE_PHY_2;
13490 
13491 		if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX)
13492 			tp->led_ctrl = LED_CTRL_MODE_PHY_1;
13493 
13494 		if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
13495 			tg3_flag_set(tp, EEPROM_WRITE_PROT);
13496 			if ((tp->pdev->subsystem_vendor ==
13497 			     PCI_VENDOR_ID_ARIMA) &&
13498 			    (tp->pdev->subsystem_device == 0x205a ||
13499 			     tp->pdev->subsystem_device == 0x2063))
13500 				tg3_flag_clear(tp, EEPROM_WRITE_PROT);
13501 		} else {
13502 			tg3_flag_clear(tp, EEPROM_WRITE_PROT);
13503 			tg3_flag_set(tp, IS_NIC);
13504 		}
13505 
13506 		if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
13507 			tg3_flag_set(tp, ENABLE_ASF);
13508 			if (tg3_flag(tp, 5750_PLUS))
13509 				tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
13510 		}
13511 
13512 		if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
13513 		    tg3_flag(tp, 5750_PLUS))
13514 			tg3_flag_set(tp, ENABLE_APE);
13515 
13516 		if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES &&
13517 		    !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
13518 			tg3_flag_clear(tp, WOL_CAP);
13519 
13520 		if (tg3_flag(tp, WOL_CAP) &&
13521 		    (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)) {
13522 			tg3_flag_set(tp, WOL_ENABLE);
13523 			device_set_wakeup_enable(&tp->pdev->dev, true);
13524 		}
13525 
13526 		if (cfg2 & (1 << 17))
13527 			tp->phy_flags |= TG3_PHYFLG_CAPACITIVE_COUPLING;
13528 
13529 		/* serdes signal pre-emphasis in register 0x590 set by */
13530 		/* bootcode if bit 18 is set */
13531 		if (cfg2 & (1 << 18))
13532 			tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS;
13533 
13534 		if ((tg3_flag(tp, 57765_PLUS) ||
13535 		     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
13536 		      GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX)) &&
13537 		    (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
13538 			tp->phy_flags |= TG3_PHYFLG_ENABLE_APD;
13539 
13540 		if (tg3_flag(tp, PCI_EXPRESS) &&
13541 		    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
13542 		    !tg3_flag(tp, 57765_PLUS)) {
13543 			u32 cfg3;
13544 
13545 			tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
13546 			if (cfg3 & NIC_SRAM_ASPM_DEBOUNCE)
13547 				tg3_flag_set(tp, ASPM_WORKAROUND);
13548 		}
13549 
13550 		if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE)
13551 			tg3_flag_set(tp, RGMII_INBAND_DISABLE);
13552 		if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
13553 			tg3_flag_set(tp, RGMII_EXT_IBND_RX_EN);
13554 		if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
13555 			tg3_flag_set(tp, RGMII_EXT_IBND_TX_EN);
13556 	}
13557 done:
13558 	if (tg3_flag(tp, WOL_CAP))
13559 		device_set_wakeup_enable(&tp->pdev->dev,
13560 					 tg3_flag(tp, WOL_ENABLE));
13561 	else
13562 		device_set_wakeup_capable(&tp->pdev->dev, false);
13563 }
13564 
13565 static int __devinit tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
13566 {
13567 	int i;
13568 	u32 val;
13569 
13570 	tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
13571 	tw32(OTP_CTRL, cmd);
13572 
13573 	/* Wait for up to 1 ms for command to execute. */
13574 	for (i = 0; i < 100; i++) {
13575 		val = tr32(OTP_STATUS);
13576 		if (val & OTP_STATUS_CMD_DONE)
13577 			break;
13578 		udelay(10);
13579 	}
13580 
13581 	return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
13582 }
13583 
13584 /* Read the gphy configuration from the OTP region of the chip.  The gphy
13585  * configuration is a 32-bit value that straddles the alignment boundary.
13586  * We do two 32-bit reads and then shift and merge the results.
13587  */
13588 static u32 __devinit tg3_read_otp_phycfg(struct tg3 *tp)
13589 {
13590 	u32 bhalf_otp, thalf_otp;
13591 
13592 	tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
13593 
13594 	if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
13595 		return 0;
13596 
13597 	tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
13598 
13599 	if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
13600 		return 0;
13601 
13602 	thalf_otp = tr32(OTP_READ_DATA);
13603 
13604 	tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
13605 
13606 	if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
13607 		return 0;
13608 
13609 	bhalf_otp = tr32(OTP_READ_DATA);
13610 
13611 	return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
13612 }
13613 
13614 static void __devinit tg3_phy_init_link_config(struct tg3 *tp)
13615 {
13616 	u32 adv = ADVERTISED_Autoneg;
13617 
13618 	if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
13619 		adv |= ADVERTISED_1000baseT_Half |
13620 		       ADVERTISED_1000baseT_Full;
13621 
13622 	if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
13623 		adv |= ADVERTISED_100baseT_Half |
13624 		       ADVERTISED_100baseT_Full |
13625 		       ADVERTISED_10baseT_Half |
13626 		       ADVERTISED_10baseT_Full |
13627 		       ADVERTISED_TP;
13628 	else
13629 		adv |= ADVERTISED_FIBRE;
13630 
13631 	tp->link_config.advertising = adv;
13632 	tp->link_config.speed = SPEED_UNKNOWN;
13633 	tp->link_config.duplex = DUPLEX_UNKNOWN;
13634 	tp->link_config.autoneg = AUTONEG_ENABLE;
13635 	tp->link_config.active_speed = SPEED_UNKNOWN;
13636 	tp->link_config.active_duplex = DUPLEX_UNKNOWN;
13637 
13638 	tp->old_link = -1;
13639 }
13640 
13641 static int __devinit tg3_phy_probe(struct tg3 *tp)
13642 {
13643 	u32 hw_phy_id_1, hw_phy_id_2;
13644 	u32 hw_phy_id, hw_phy_id_masked;
13645 	int err;
13646 
13647 	/* flow control autonegotiation is default behavior */
13648 	tg3_flag_set(tp, PAUSE_AUTONEG);
13649 	tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
13650 
13651 	if (tg3_flag(tp, USE_PHYLIB))
13652 		return tg3_phy_init(tp);
13653 
13654 	/* Reading the PHY ID register can conflict with ASF
13655 	 * firmware access to the PHY hardware.
13656 	 */
13657 	err = 0;
13658 	if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)) {
13659 		hw_phy_id = hw_phy_id_masked = TG3_PHY_ID_INVALID;
13660 	} else {
13661 		/* Now read the physical PHY_ID from the chip and verify
13662 		 * that it is sane.  If it doesn't look good, we fall back
13663 		 * to either the hard-coded table based PHY_ID and failing
13664 		 * that the value found in the eeprom area.
13665 		 */
13666 		err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
13667 		err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
13668 
13669 		hw_phy_id  = (hw_phy_id_1 & 0xffff) << 10;
13670 		hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
13671 		hw_phy_id |= (hw_phy_id_2 & 0x03ff) <<  0;
13672 
13673 		hw_phy_id_masked = hw_phy_id & TG3_PHY_ID_MASK;
13674 	}
13675 
13676 	if (!err && TG3_KNOWN_PHY_ID(hw_phy_id_masked)) {
13677 		tp->phy_id = hw_phy_id;
13678 		if (hw_phy_id_masked == TG3_PHY_ID_BCM8002)
13679 			tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
13680 		else
13681 			tp->phy_flags &= ~TG3_PHYFLG_PHY_SERDES;
13682 	} else {
13683 		if (tp->phy_id != TG3_PHY_ID_INVALID) {
13684 			/* Do nothing, phy ID already set up in
13685 			 * tg3_get_eeprom_hw_cfg().
13686 			 */
13687 		} else {
13688 			struct subsys_tbl_ent *p;
13689 
13690 			/* No eeprom signature?  Try the hardcoded
13691 			 * subsys device table.
13692 			 */
13693 			p = tg3_lookup_by_subsys(tp);
13694 			if (!p)
13695 				return -ENODEV;
13696 
13697 			tp->phy_id = p->phy_id;
13698 			if (!tp->phy_id ||
13699 			    tp->phy_id == TG3_PHY_ID_BCM8002)
13700 				tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
13701 		}
13702 	}
13703 
13704 	if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
13705 	    (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
13706 	     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720 ||
13707 	     (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 &&
13708 	      tp->pci_chip_rev_id != CHIPREV_ID_5717_A0) ||
13709 	     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 &&
13710 	      tp->pci_chip_rev_id != CHIPREV_ID_57765_A0)))
13711 		tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
13712 
13713 	tg3_phy_init_link_config(tp);
13714 
13715 	if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
13716 	    !tg3_flag(tp, ENABLE_APE) &&
13717 	    !tg3_flag(tp, ENABLE_ASF)) {
13718 		u32 bmsr, dummy;
13719 
13720 		tg3_readphy(tp, MII_BMSR, &bmsr);
13721 		if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
13722 		    (bmsr & BMSR_LSTATUS))
13723 			goto skip_phy_reset;
13724 
13725 		err = tg3_phy_reset(tp);
13726 		if (err)
13727 			return err;
13728 
13729 		tg3_phy_set_wirespeed(tp);
13730 
13731 		if (!tg3_phy_copper_an_config_ok(tp, &dummy)) {
13732 			tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
13733 					    tp->link_config.flowctrl);
13734 
13735 			tg3_writephy(tp, MII_BMCR,
13736 				     BMCR_ANENABLE | BMCR_ANRESTART);
13737 		}
13738 	}
13739 
13740 skip_phy_reset:
13741 	if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
13742 		err = tg3_init_5401phy_dsp(tp);
13743 		if (err)
13744 			return err;
13745 
13746 		err = tg3_init_5401phy_dsp(tp);
13747 	}
13748 
13749 	return err;
13750 }
13751 
13752 static void __devinit tg3_read_vpd(struct tg3 *tp)
13753 {
13754 	u8 *vpd_data;
13755 	unsigned int block_end, rosize, len;
13756 	u32 vpdlen;
13757 	int j, i = 0;
13758 
13759 	vpd_data = (u8 *)tg3_vpd_readblock(tp, &vpdlen);
13760 	if (!vpd_data)
13761 		goto out_no_vpd;
13762 
13763 	i = pci_vpd_find_tag(vpd_data, 0, vpdlen, PCI_VPD_LRDT_RO_DATA);
13764 	if (i < 0)
13765 		goto out_not_found;
13766 
13767 	rosize = pci_vpd_lrdt_size(&vpd_data[i]);
13768 	block_end = i + PCI_VPD_LRDT_TAG_SIZE + rosize;
13769 	i += PCI_VPD_LRDT_TAG_SIZE;
13770 
13771 	if (block_end > vpdlen)
13772 		goto out_not_found;
13773 
13774 	j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
13775 				      PCI_VPD_RO_KEYWORD_MFR_ID);
13776 	if (j > 0) {
13777 		len = pci_vpd_info_field_size(&vpd_data[j]);
13778 
13779 		j += PCI_VPD_INFO_FLD_HDR_SIZE;
13780 		if (j + len > block_end || len != 4 ||
13781 		    memcmp(&vpd_data[j], "1028", 4))
13782 			goto partno;
13783 
13784 		j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
13785 					      PCI_VPD_RO_KEYWORD_VENDOR0);
13786 		if (j < 0)
13787 			goto partno;
13788 
13789 		len = pci_vpd_info_field_size(&vpd_data[j]);
13790 
13791 		j += PCI_VPD_INFO_FLD_HDR_SIZE;
13792 		if (j + len > block_end)
13793 			goto partno;
13794 
13795 		memcpy(tp->fw_ver, &vpd_data[j], len);
13796 		strncat(tp->fw_ver, " bc ", vpdlen - len - 1);
13797 	}
13798 
13799 partno:
13800 	i = pci_vpd_find_info_keyword(vpd_data, i, rosize,
13801 				      PCI_VPD_RO_KEYWORD_PARTNO);
13802 	if (i < 0)
13803 		goto out_not_found;
13804 
13805 	len = pci_vpd_info_field_size(&vpd_data[i]);
13806 
13807 	i += PCI_VPD_INFO_FLD_HDR_SIZE;
13808 	if (len > TG3_BPN_SIZE ||
13809 	    (len + i) > vpdlen)
13810 		goto out_not_found;
13811 
13812 	memcpy(tp->board_part_number, &vpd_data[i], len);
13813 
13814 out_not_found:
13815 	kfree(vpd_data);
13816 	if (tp->board_part_number[0])
13817 		return;
13818 
13819 out_no_vpd:
13820 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
13821 		if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717)
13822 			strcpy(tp->board_part_number, "BCM5717");
13823 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718)
13824 			strcpy(tp->board_part_number, "BCM5718");
13825 		else
13826 			goto nomatch;
13827 	} else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
13828 		if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780)
13829 			strcpy(tp->board_part_number, "BCM57780");
13830 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760)
13831 			strcpy(tp->board_part_number, "BCM57760");
13832 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790)
13833 			strcpy(tp->board_part_number, "BCM57790");
13834 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788)
13835 			strcpy(tp->board_part_number, "BCM57788");
13836 		else
13837 			goto nomatch;
13838 	} else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
13839 		if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761)
13840 			strcpy(tp->board_part_number, "BCM57761");
13841 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765)
13842 			strcpy(tp->board_part_number, "BCM57765");
13843 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781)
13844 			strcpy(tp->board_part_number, "BCM57781");
13845 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785)
13846 			strcpy(tp->board_part_number, "BCM57785");
13847 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791)
13848 			strcpy(tp->board_part_number, "BCM57791");
13849 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
13850 			strcpy(tp->board_part_number, "BCM57795");
13851 		else
13852 			goto nomatch;
13853 	} else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57766) {
13854 		if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762)
13855 			strcpy(tp->board_part_number, "BCM57762");
13856 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766)
13857 			strcpy(tp->board_part_number, "BCM57766");
13858 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782)
13859 			strcpy(tp->board_part_number, "BCM57782");
13860 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
13861 			strcpy(tp->board_part_number, "BCM57786");
13862 		else
13863 			goto nomatch;
13864 	} else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
13865 		strcpy(tp->board_part_number, "BCM95906");
13866 	} else {
13867 nomatch:
13868 		strcpy(tp->board_part_number, "none");
13869 	}
13870 }
13871 
13872 static int __devinit tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
13873 {
13874 	u32 val;
13875 
13876 	if (tg3_nvram_read(tp, offset, &val) ||
13877 	    (val & 0xfc000000) != 0x0c000000 ||
13878 	    tg3_nvram_read(tp, offset + 4, &val) ||
13879 	    val != 0)
13880 		return 0;
13881 
13882 	return 1;
13883 }
13884 
13885 static void __devinit tg3_read_bc_ver(struct tg3 *tp)
13886 {
13887 	u32 val, offset, start, ver_offset;
13888 	int i, dst_off;
13889 	bool newver = false;
13890 
13891 	if (tg3_nvram_read(tp, 0xc, &offset) ||
13892 	    tg3_nvram_read(tp, 0x4, &start))
13893 		return;
13894 
13895 	offset = tg3_nvram_logical_addr(tp, offset);
13896 
13897 	if (tg3_nvram_read(tp, offset, &val))
13898 		return;
13899 
13900 	if ((val & 0xfc000000) == 0x0c000000) {
13901 		if (tg3_nvram_read(tp, offset + 4, &val))
13902 			return;
13903 
13904 		if (val == 0)
13905 			newver = true;
13906 	}
13907 
13908 	dst_off = strlen(tp->fw_ver);
13909 
13910 	if (newver) {
13911 		if (TG3_VER_SIZE - dst_off < 16 ||
13912 		    tg3_nvram_read(tp, offset + 8, &ver_offset))
13913 			return;
13914 
13915 		offset = offset + ver_offset - start;
13916 		for (i = 0; i < 16; i += 4) {
13917 			__be32 v;
13918 			if (tg3_nvram_read_be32(tp, offset + i, &v))
13919 				return;
13920 
13921 			memcpy(tp->fw_ver + dst_off + i, &v, sizeof(v));
13922 		}
13923 	} else {
13924 		u32 major, minor;
13925 
13926 		if (tg3_nvram_read(tp, TG3_NVM_PTREV_BCVER, &ver_offset))
13927 			return;
13928 
13929 		major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >>
13930 			TG3_NVM_BCVER_MAJSFT;
13931 		minor = ver_offset & TG3_NVM_BCVER_MINMSK;
13932 		snprintf(&tp->fw_ver[dst_off], TG3_VER_SIZE - dst_off,
13933 			 "v%d.%02d", major, minor);
13934 	}
13935 }
13936 
13937 static void __devinit tg3_read_hwsb_ver(struct tg3 *tp)
13938 {
13939 	u32 val, major, minor;
13940 
13941 	/* Use native endian representation */
13942 	if (tg3_nvram_read(tp, TG3_NVM_HWSB_CFG1, &val))
13943 		return;
13944 
13945 	major = (val & TG3_NVM_HWSB_CFG1_MAJMSK) >>
13946 		TG3_NVM_HWSB_CFG1_MAJSFT;
13947 	minor = (val & TG3_NVM_HWSB_CFG1_MINMSK) >>
13948 		TG3_NVM_HWSB_CFG1_MINSFT;
13949 
13950 	snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor);
13951 }
13952 
13953 static void __devinit tg3_read_sb_ver(struct tg3 *tp, u32 val)
13954 {
13955 	u32 offset, major, minor, build;
13956 
13957 	strncat(tp->fw_ver, "sb", TG3_VER_SIZE - strlen(tp->fw_ver) - 1);
13958 
13959 	if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1)
13960 		return;
13961 
13962 	switch (val & TG3_EEPROM_SB_REVISION_MASK) {
13963 	case TG3_EEPROM_SB_REVISION_0:
13964 		offset = TG3_EEPROM_SB_F1R0_EDH_OFF;
13965 		break;
13966 	case TG3_EEPROM_SB_REVISION_2:
13967 		offset = TG3_EEPROM_SB_F1R2_EDH_OFF;
13968 		break;
13969 	case TG3_EEPROM_SB_REVISION_3:
13970 		offset = TG3_EEPROM_SB_F1R3_EDH_OFF;
13971 		break;
13972 	case TG3_EEPROM_SB_REVISION_4:
13973 		offset = TG3_EEPROM_SB_F1R4_EDH_OFF;
13974 		break;
13975 	case TG3_EEPROM_SB_REVISION_5:
13976 		offset = TG3_EEPROM_SB_F1R5_EDH_OFF;
13977 		break;
13978 	case TG3_EEPROM_SB_REVISION_6:
13979 		offset = TG3_EEPROM_SB_F1R6_EDH_OFF;
13980 		break;
13981 	default:
13982 		return;
13983 	}
13984 
13985 	if (tg3_nvram_read(tp, offset, &val))
13986 		return;
13987 
13988 	build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >>
13989 		TG3_EEPROM_SB_EDH_BLD_SHFT;
13990 	major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >>
13991 		TG3_EEPROM_SB_EDH_MAJ_SHFT;
13992 	minor =  val & TG3_EEPROM_SB_EDH_MIN_MASK;
13993 
13994 	if (minor > 99 || build > 26)
13995 		return;
13996 
13997 	offset = strlen(tp->fw_ver);
13998 	snprintf(&tp->fw_ver[offset], TG3_VER_SIZE - offset,
13999 		 " v%d.%02d", major, minor);
14000 
14001 	if (build > 0) {
14002 		offset = strlen(tp->fw_ver);
14003 		if (offset < TG3_VER_SIZE - 1)
14004 			tp->fw_ver[offset] = 'a' + build - 1;
14005 	}
14006 }
14007 
14008 static void __devinit tg3_read_mgmtfw_ver(struct tg3 *tp)
14009 {
14010 	u32 val, offset, start;
14011 	int i, vlen;
14012 
14013 	for (offset = TG3_NVM_DIR_START;
14014 	     offset < TG3_NVM_DIR_END;
14015 	     offset += TG3_NVM_DIRENT_SIZE) {
14016 		if (tg3_nvram_read(tp, offset, &val))
14017 			return;
14018 
14019 		if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
14020 			break;
14021 	}
14022 
14023 	if (offset == TG3_NVM_DIR_END)
14024 		return;
14025 
14026 	if (!tg3_flag(tp, 5705_PLUS))
14027 		start = 0x08000000;
14028 	else if (tg3_nvram_read(tp, offset - 4, &start))
14029 		return;
14030 
14031 	if (tg3_nvram_read(tp, offset + 4, &offset) ||
14032 	    !tg3_fw_img_is_valid(tp, offset) ||
14033 	    tg3_nvram_read(tp, offset + 8, &val))
14034 		return;
14035 
14036 	offset += val - start;
14037 
14038 	vlen = strlen(tp->fw_ver);
14039 
14040 	tp->fw_ver[vlen++] = ',';
14041 	tp->fw_ver[vlen++] = ' ';
14042 
14043 	for (i = 0; i < 4; i++) {
14044 		__be32 v;
14045 		if (tg3_nvram_read_be32(tp, offset, &v))
14046 			return;
14047 
14048 		offset += sizeof(v);
14049 
14050 		if (vlen > TG3_VER_SIZE - sizeof(v)) {
14051 			memcpy(&tp->fw_ver[vlen], &v, TG3_VER_SIZE - vlen);
14052 			break;
14053 		}
14054 
14055 		memcpy(&tp->fw_ver[vlen], &v, sizeof(v));
14056 		vlen += sizeof(v);
14057 	}
14058 }
14059 
14060 static void __devinit tg3_probe_ncsi(struct tg3 *tp)
14061 {
14062 	u32 apedata;
14063 
14064 	apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
14065 	if (apedata != APE_SEG_SIG_MAGIC)
14066 		return;
14067 
14068 	apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
14069 	if (!(apedata & APE_FW_STATUS_READY))
14070 		return;
14071 
14072 	if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI)
14073 		tg3_flag_set(tp, APE_HAS_NCSI);
14074 }
14075 
14076 static void __devinit tg3_read_dash_ver(struct tg3 *tp)
14077 {
14078 	int vlen;
14079 	u32 apedata;
14080 	char *fwtype;
14081 
14082 	apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION);
14083 
14084 	if (tg3_flag(tp, APE_HAS_NCSI))
14085 		fwtype = "NCSI";
14086 	else
14087 		fwtype = "DASH";
14088 
14089 	vlen = strlen(tp->fw_ver);
14090 
14091 	snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " %s v%d.%d.%d.%d",
14092 		 fwtype,
14093 		 (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT,
14094 		 (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT,
14095 		 (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT,
14096 		 (apedata & APE_FW_VERSION_BLDMSK));
14097 }
14098 
14099 static void __devinit tg3_read_fw_ver(struct tg3 *tp)
14100 {
14101 	u32 val;
14102 	bool vpd_vers = false;
14103 
14104 	if (tp->fw_ver[0] != 0)
14105 		vpd_vers = true;
14106 
14107 	if (tg3_flag(tp, NO_NVRAM)) {
14108 		strcat(tp->fw_ver, "sb");
14109 		return;
14110 	}
14111 
14112 	if (tg3_nvram_read(tp, 0, &val))
14113 		return;
14114 
14115 	if (val == TG3_EEPROM_MAGIC)
14116 		tg3_read_bc_ver(tp);
14117 	else if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW)
14118 		tg3_read_sb_ver(tp, val);
14119 	else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
14120 		tg3_read_hwsb_ver(tp);
14121 
14122 	if (tg3_flag(tp, ENABLE_ASF)) {
14123 		if (tg3_flag(tp, ENABLE_APE)) {
14124 			tg3_probe_ncsi(tp);
14125 			if (!vpd_vers)
14126 				tg3_read_dash_ver(tp);
14127 		} else if (!vpd_vers) {
14128 			tg3_read_mgmtfw_ver(tp);
14129 		}
14130 	}
14131 
14132 	tp->fw_ver[TG3_VER_SIZE - 1] = 0;
14133 }
14134 
14135 static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp)
14136 {
14137 	if (tg3_flag(tp, LRG_PROD_RING_CAP))
14138 		return TG3_RX_RET_MAX_SIZE_5717;
14139 	else if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))
14140 		return TG3_RX_RET_MAX_SIZE_5700;
14141 	else
14142 		return TG3_RX_RET_MAX_SIZE_5705;
14143 }
14144 
14145 static DEFINE_PCI_DEVICE_TABLE(tg3_write_reorder_chipsets) = {
14146 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C) },
14147 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE) },
14148 	{ PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8385_0) },
14149 	{ },
14150 };
14151 
14152 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *tp)
14153 {
14154 	struct pci_dev *peer;
14155 	unsigned int func, devnr = tp->pdev->devfn & ~7;
14156 
14157 	for (func = 0; func < 8; func++) {
14158 		peer = pci_get_slot(tp->pdev->bus, devnr | func);
14159 		if (peer && peer != tp->pdev)
14160 			break;
14161 		pci_dev_put(peer);
14162 	}
14163 	/* 5704 can be configured in single-port mode, set peer to
14164 	 * tp->pdev in that case.
14165 	 */
14166 	if (!peer) {
14167 		peer = tp->pdev;
14168 		return peer;
14169 	}
14170 
14171 	/*
14172 	 * We don't need to keep the refcount elevated; there's no way
14173 	 * to remove one half of this device without removing the other
14174 	 */
14175 	pci_dev_put(peer);
14176 
14177 	return peer;
14178 }
14179 
14180 static void __devinit tg3_detect_asic_rev(struct tg3 *tp, u32 misc_ctrl_reg)
14181 {
14182 	tp->pci_chip_rev_id = misc_ctrl_reg >> MISC_HOST_CTRL_CHIPREV_SHIFT;
14183 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_USE_PROD_ID_REG) {
14184 		u32 reg;
14185 
14186 		/* All devices that use the alternate
14187 		 * ASIC REV location have a CPMU.
14188 		 */
14189 		tg3_flag_set(tp, CPMU_PRESENT);
14190 
14191 		if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
14192 		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
14193 		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
14194 		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720)
14195 			reg = TG3PCI_GEN2_PRODID_ASICREV;
14196 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 ||
14197 			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 ||
14198 			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 ||
14199 			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 ||
14200 			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
14201 			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
14202 			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762 ||
14203 			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766 ||
14204 			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782 ||
14205 			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
14206 			reg = TG3PCI_GEN15_PRODID_ASICREV;
14207 		else
14208 			reg = TG3PCI_PRODID_ASICREV;
14209 
14210 		pci_read_config_dword(tp->pdev, reg, &tp->pci_chip_rev_id);
14211 	}
14212 
14213 	/* Wrong chip ID in 5752 A0. This code can be removed later
14214 	 * as A0 is not in production.
14215 	 */
14216 	if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
14217 		tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
14218 
14219 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
14220 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
14221 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
14222 		tg3_flag_set(tp, 5717_PLUS);
14223 
14224 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 ||
14225 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57766)
14226 		tg3_flag_set(tp, 57765_CLASS);
14227 
14228 	if (tg3_flag(tp, 57765_CLASS) || tg3_flag(tp, 5717_PLUS))
14229 		tg3_flag_set(tp, 57765_PLUS);
14230 
14231 	/* Intentionally exclude ASIC_REV_5906 */
14232 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
14233 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
14234 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
14235 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
14236 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
14237 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
14238 	    tg3_flag(tp, 57765_PLUS))
14239 		tg3_flag_set(tp, 5755_PLUS);
14240 
14241 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
14242 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)
14243 		tg3_flag_set(tp, 5780_CLASS);
14244 
14245 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
14246 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
14247 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
14248 	    tg3_flag(tp, 5755_PLUS) ||
14249 	    tg3_flag(tp, 5780_CLASS))
14250 		tg3_flag_set(tp, 5750_PLUS);
14251 
14252 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
14253 	    tg3_flag(tp, 5750_PLUS))
14254 		tg3_flag_set(tp, 5705_PLUS);
14255 }
14256 
14257 static int __devinit tg3_get_invariants(struct tg3 *tp)
14258 {
14259 	u32 misc_ctrl_reg;
14260 	u32 pci_state_reg, grc_misc_cfg;
14261 	u32 val;
14262 	u16 pci_cmd;
14263 	int err;
14264 
14265 	/* Force memory write invalidate off.  If we leave it on,
14266 	 * then on 5700_BX chips we have to enable a workaround.
14267 	 * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
14268 	 * to match the cacheline size.  The Broadcom driver have this
14269 	 * workaround but turns MWI off all the times so never uses
14270 	 * it.  This seems to suggest that the workaround is insufficient.
14271 	 */
14272 	pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
14273 	pci_cmd &= ~PCI_COMMAND_INVALIDATE;
14274 	pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
14275 
14276 	/* Important! -- Make sure register accesses are byteswapped
14277 	 * correctly.  Also, for those chips that require it, make
14278 	 * sure that indirect register accesses are enabled before
14279 	 * the first operation.
14280 	 */
14281 	pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
14282 			      &misc_ctrl_reg);
14283 	tp->misc_host_ctrl |= (misc_ctrl_reg &
14284 			       MISC_HOST_CTRL_CHIPREV);
14285 	pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
14286 			       tp->misc_host_ctrl);
14287 
14288 	tg3_detect_asic_rev(tp, misc_ctrl_reg);
14289 
14290 	/* If we have 5702/03 A1 or A2 on certain ICH chipsets,
14291 	 * we need to disable memory and use config. cycles
14292 	 * only to access all registers. The 5702/03 chips
14293 	 * can mistakenly decode the special cycles from the
14294 	 * ICH chipsets as memory write cycles, causing corruption
14295 	 * of register and memory space. Only certain ICH bridges
14296 	 * will drive special cycles with non-zero data during the
14297 	 * address phase which can fall within the 5703's address
14298 	 * range. This is not an ICH bug as the PCI spec allows
14299 	 * non-zero address during special cycles. However, only
14300 	 * these ICH bridges are known to drive non-zero addresses
14301 	 * during special cycles.
14302 	 *
14303 	 * Since special cycles do not cross PCI bridges, we only
14304 	 * enable this workaround if the 5703 is on the secondary
14305 	 * bus of these ICH bridges.
14306 	 */
14307 	if ((tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) ||
14308 	    (tp->pci_chip_rev_id == CHIPREV_ID_5703_A2)) {
14309 		static struct tg3_dev_id {
14310 			u32	vendor;
14311 			u32	device;
14312 			u32	rev;
14313 		} ich_chipsets[] = {
14314 			{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
14315 			  PCI_ANY_ID },
14316 			{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
14317 			  PCI_ANY_ID },
14318 			{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
14319 			  0xa },
14320 			{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
14321 			  PCI_ANY_ID },
14322 			{ },
14323 		};
14324 		struct tg3_dev_id *pci_id = &ich_chipsets[0];
14325 		struct pci_dev *bridge = NULL;
14326 
14327 		while (pci_id->vendor != 0) {
14328 			bridge = pci_get_device(pci_id->vendor, pci_id->device,
14329 						bridge);
14330 			if (!bridge) {
14331 				pci_id++;
14332 				continue;
14333 			}
14334 			if (pci_id->rev != PCI_ANY_ID) {
14335 				if (bridge->revision > pci_id->rev)
14336 					continue;
14337 			}
14338 			if (bridge->subordinate &&
14339 			    (bridge->subordinate->number ==
14340 			     tp->pdev->bus->number)) {
14341 				tg3_flag_set(tp, ICH_WORKAROUND);
14342 				pci_dev_put(bridge);
14343 				break;
14344 			}
14345 		}
14346 	}
14347 
14348 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
14349 		static struct tg3_dev_id {
14350 			u32	vendor;
14351 			u32	device;
14352 		} bridge_chipsets[] = {
14353 			{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
14354 			{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
14355 			{ },
14356 		};
14357 		struct tg3_dev_id *pci_id = &bridge_chipsets[0];
14358 		struct pci_dev *bridge = NULL;
14359 
14360 		while (pci_id->vendor != 0) {
14361 			bridge = pci_get_device(pci_id->vendor,
14362 						pci_id->device,
14363 						bridge);
14364 			if (!bridge) {
14365 				pci_id++;
14366 				continue;
14367 			}
14368 			if (bridge->subordinate &&
14369 			    (bridge->subordinate->number <=
14370 			     tp->pdev->bus->number) &&
14371 			    (bridge->subordinate->busn_res.end >=
14372 			     tp->pdev->bus->number)) {
14373 				tg3_flag_set(tp, 5701_DMA_BUG);
14374 				pci_dev_put(bridge);
14375 				break;
14376 			}
14377 		}
14378 	}
14379 
14380 	/* The EPB bridge inside 5714, 5715, and 5780 cannot support
14381 	 * DMA addresses > 40-bit. This bridge may have other additional
14382 	 * 57xx devices behind it in some 4-port NIC designs for example.
14383 	 * Any tg3 device found behind the bridge will also need the 40-bit
14384 	 * DMA workaround.
14385 	 */
14386 	if (tg3_flag(tp, 5780_CLASS)) {
14387 		tg3_flag_set(tp, 40BIT_DMA_BUG);
14388 		tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
14389 	} else {
14390 		struct pci_dev *bridge = NULL;
14391 
14392 		do {
14393 			bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
14394 						PCI_DEVICE_ID_SERVERWORKS_EPB,
14395 						bridge);
14396 			if (bridge && bridge->subordinate &&
14397 			    (bridge->subordinate->number <=
14398 			     tp->pdev->bus->number) &&
14399 			    (bridge->subordinate->busn_res.end >=
14400 			     tp->pdev->bus->number)) {
14401 				tg3_flag_set(tp, 40BIT_DMA_BUG);
14402 				pci_dev_put(bridge);
14403 				break;
14404 			}
14405 		} while (bridge);
14406 	}
14407 
14408 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
14409 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)
14410 		tp->pdev_peer = tg3_find_peer(tp);
14411 
14412 	/* Determine TSO capabilities */
14413 	if (tp->pci_chip_rev_id == CHIPREV_ID_5719_A0)
14414 		; /* Do nothing. HW bug. */
14415 	else if (tg3_flag(tp, 57765_PLUS))
14416 		tg3_flag_set(tp, HW_TSO_3);
14417 	else if (tg3_flag(tp, 5755_PLUS) ||
14418 		 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
14419 		tg3_flag_set(tp, HW_TSO_2);
14420 	else if (tg3_flag(tp, 5750_PLUS)) {
14421 		tg3_flag_set(tp, HW_TSO_1);
14422 		tg3_flag_set(tp, TSO_BUG);
14423 		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 &&
14424 		    tp->pci_chip_rev_id >= CHIPREV_ID_5750_C2)
14425 			tg3_flag_clear(tp, TSO_BUG);
14426 	} else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
14427 		   GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
14428 		   tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
14429 			tg3_flag_set(tp, TSO_BUG);
14430 		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)
14431 			tp->fw_needed = FIRMWARE_TG3TSO5;
14432 		else
14433 			tp->fw_needed = FIRMWARE_TG3TSO;
14434 	}
14435 
14436 	/* Selectively allow TSO based on operating conditions */
14437 	if (tg3_flag(tp, HW_TSO_1) ||
14438 	    tg3_flag(tp, HW_TSO_2) ||
14439 	    tg3_flag(tp, HW_TSO_3) ||
14440 	    tp->fw_needed) {
14441 		/* For firmware TSO, assume ASF is disabled.
14442 		 * We'll disable TSO later if we discover ASF
14443 		 * is enabled in tg3_get_eeprom_hw_cfg().
14444 		 */
14445 		tg3_flag_set(tp, TSO_CAPABLE);
14446 	} else {
14447 		tg3_flag_clear(tp, TSO_CAPABLE);
14448 		tg3_flag_clear(tp, TSO_BUG);
14449 		tp->fw_needed = NULL;
14450 	}
14451 
14452 	if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0)
14453 		tp->fw_needed = FIRMWARE_TG3;
14454 
14455 	tp->irq_max = 1;
14456 
14457 	if (tg3_flag(tp, 5750_PLUS)) {
14458 		tg3_flag_set(tp, SUPPORT_MSI);
14459 		if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX ||
14460 		    GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX ||
14461 		    (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 &&
14462 		     tp->pci_chip_rev_id <= CHIPREV_ID_5714_A2 &&
14463 		     tp->pdev_peer == tp->pdev))
14464 			tg3_flag_clear(tp, SUPPORT_MSI);
14465 
14466 		if (tg3_flag(tp, 5755_PLUS) ||
14467 		    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
14468 			tg3_flag_set(tp, 1SHOT_MSI);
14469 		}
14470 
14471 		if (tg3_flag(tp, 57765_PLUS)) {
14472 			tg3_flag_set(tp, SUPPORT_MSIX);
14473 			tp->irq_max = TG3_IRQ_MAX_VECS;
14474 			tg3_rss_init_dflt_indir_tbl(tp);
14475 		}
14476 	}
14477 
14478 	if (tg3_flag(tp, 5755_PLUS) ||
14479 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
14480 		tg3_flag_set(tp, SHORT_DMA_BUG);
14481 
14482 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
14483 		tp->dma_limit = TG3_TX_BD_DMA_MAX_4K;
14484 
14485 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
14486 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
14487 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
14488 		tg3_flag_set(tp, LRG_PROD_RING_CAP);
14489 
14490 	if (tg3_flag(tp, 57765_PLUS) &&
14491 	    tp->pci_chip_rev_id != CHIPREV_ID_5719_A0)
14492 		tg3_flag_set(tp, USE_JUMBO_BDFLAG);
14493 
14494 	if (!tg3_flag(tp, 5705_PLUS) ||
14495 	    tg3_flag(tp, 5780_CLASS) ||
14496 	    tg3_flag(tp, USE_JUMBO_BDFLAG))
14497 		tg3_flag_set(tp, JUMBO_CAPABLE);
14498 
14499 	pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
14500 			      &pci_state_reg);
14501 
14502 	if (pci_is_pcie(tp->pdev)) {
14503 		u16 lnkctl;
14504 
14505 		tg3_flag_set(tp, PCI_EXPRESS);
14506 
14507 		pci_read_config_word(tp->pdev,
14508 				     pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
14509 				     &lnkctl);
14510 		if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
14511 			if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
14512 			    ASIC_REV_5906) {
14513 				tg3_flag_clear(tp, HW_TSO_2);
14514 				tg3_flag_clear(tp, TSO_CAPABLE);
14515 			}
14516 			if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
14517 			    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
14518 			    tp->pci_chip_rev_id == CHIPREV_ID_57780_A0 ||
14519 			    tp->pci_chip_rev_id == CHIPREV_ID_57780_A1)
14520 				tg3_flag_set(tp, CLKREQ_BUG);
14521 		} else if (tp->pci_chip_rev_id == CHIPREV_ID_5717_A0) {
14522 			tg3_flag_set(tp, L1PLLPD_EN);
14523 		}
14524 	} else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
14525 		/* BCM5785 devices are effectively PCIe devices, and should
14526 		 * follow PCIe codepaths, but do not have a PCIe capabilities
14527 		 * section.
14528 		 */
14529 		tg3_flag_set(tp, PCI_EXPRESS);
14530 	} else if (!tg3_flag(tp, 5705_PLUS) ||
14531 		   tg3_flag(tp, 5780_CLASS)) {
14532 		tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
14533 		if (!tp->pcix_cap) {
14534 			dev_err(&tp->pdev->dev,
14535 				"Cannot find PCI-X capability, aborting\n");
14536 			return -EIO;
14537 		}
14538 
14539 		if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE))
14540 			tg3_flag_set(tp, PCIX_MODE);
14541 	}
14542 
14543 	/* If we have an AMD 762 or VIA K8T800 chipset, write
14544 	 * reordering to the mailbox registers done by the host
14545 	 * controller can cause major troubles.  We read back from
14546 	 * every mailbox register write to force the writes to be
14547 	 * posted to the chip in order.
14548 	 */
14549 	if (pci_dev_present(tg3_write_reorder_chipsets) &&
14550 	    !tg3_flag(tp, PCI_EXPRESS))
14551 		tg3_flag_set(tp, MBOX_WRITE_REORDER);
14552 
14553 	pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
14554 			     &tp->pci_cacheline_sz);
14555 	pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER,
14556 			     &tp->pci_lat_timer);
14557 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
14558 	    tp->pci_lat_timer < 64) {
14559 		tp->pci_lat_timer = 64;
14560 		pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
14561 				      tp->pci_lat_timer);
14562 	}
14563 
14564 	/* Important! -- It is critical that the PCI-X hw workaround
14565 	 * situation is decided before the first MMIO register access.
14566 	 */
14567 	if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
14568 		/* 5700 BX chips need to have their TX producer index
14569 		 * mailboxes written twice to workaround a bug.
14570 		 */
14571 		tg3_flag_set(tp, TXD_MBOX_HWBUG);
14572 
14573 		/* If we are in PCI-X mode, enable register write workaround.
14574 		 *
14575 		 * The workaround is to use indirect register accesses
14576 		 * for all chip writes not to mailbox registers.
14577 		 */
14578 		if (tg3_flag(tp, PCIX_MODE)) {
14579 			u32 pm_reg;
14580 
14581 			tg3_flag_set(tp, PCIX_TARGET_HWBUG);
14582 
14583 			/* The chip can have it's power management PCI config
14584 			 * space registers clobbered due to this bug.
14585 			 * So explicitly force the chip into D0 here.
14586 			 */
14587 			pci_read_config_dword(tp->pdev,
14588 					      tp->pm_cap + PCI_PM_CTRL,
14589 					      &pm_reg);
14590 			pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
14591 			pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
14592 			pci_write_config_dword(tp->pdev,
14593 					       tp->pm_cap + PCI_PM_CTRL,
14594 					       pm_reg);
14595 
14596 			/* Also, force SERR#/PERR# in PCI command. */
14597 			pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
14598 			pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
14599 			pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
14600 		}
14601 	}
14602 
14603 	if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
14604 		tg3_flag_set(tp, PCI_HIGH_SPEED);
14605 	if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
14606 		tg3_flag_set(tp, PCI_32BIT);
14607 
14608 	/* Chip-specific fixup from Broadcom driver */
14609 	if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
14610 	    (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
14611 		pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
14612 		pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
14613 	}
14614 
14615 	/* Default fast path register access methods */
14616 	tp->read32 = tg3_read32;
14617 	tp->write32 = tg3_write32;
14618 	tp->read32_mbox = tg3_read32;
14619 	tp->write32_mbox = tg3_write32;
14620 	tp->write32_tx_mbox = tg3_write32;
14621 	tp->write32_rx_mbox = tg3_write32;
14622 
14623 	/* Various workaround register access methods */
14624 	if (tg3_flag(tp, PCIX_TARGET_HWBUG))
14625 		tp->write32 = tg3_write_indirect_reg32;
14626 	else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
14627 		 (tg3_flag(tp, PCI_EXPRESS) &&
14628 		  tp->pci_chip_rev_id == CHIPREV_ID_5750_A0)) {
14629 		/*
14630 		 * Back to back register writes can cause problems on these
14631 		 * chips, the workaround is to read back all reg writes
14632 		 * except those to mailbox regs.
14633 		 *
14634 		 * See tg3_write_indirect_reg32().
14635 		 */
14636 		tp->write32 = tg3_write_flush_reg32;
14637 	}
14638 
14639 	if (tg3_flag(tp, TXD_MBOX_HWBUG) || tg3_flag(tp, MBOX_WRITE_REORDER)) {
14640 		tp->write32_tx_mbox = tg3_write32_tx_mbox;
14641 		if (tg3_flag(tp, MBOX_WRITE_REORDER))
14642 			tp->write32_rx_mbox = tg3_write_flush_reg32;
14643 	}
14644 
14645 	if (tg3_flag(tp, ICH_WORKAROUND)) {
14646 		tp->read32 = tg3_read_indirect_reg32;
14647 		tp->write32 = tg3_write_indirect_reg32;
14648 		tp->read32_mbox = tg3_read_indirect_mbox;
14649 		tp->write32_mbox = tg3_write_indirect_mbox;
14650 		tp->write32_tx_mbox = tg3_write_indirect_mbox;
14651 		tp->write32_rx_mbox = tg3_write_indirect_mbox;
14652 
14653 		iounmap(tp->regs);
14654 		tp->regs = NULL;
14655 
14656 		pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
14657 		pci_cmd &= ~PCI_COMMAND_MEMORY;
14658 		pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
14659 	}
14660 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
14661 		tp->read32_mbox = tg3_read32_mbox_5906;
14662 		tp->write32_mbox = tg3_write32_mbox_5906;
14663 		tp->write32_tx_mbox = tg3_write32_mbox_5906;
14664 		tp->write32_rx_mbox = tg3_write32_mbox_5906;
14665 	}
14666 
14667 	if (tp->write32 == tg3_write_indirect_reg32 ||
14668 	    (tg3_flag(tp, PCIX_MODE) &&
14669 	     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14670 	      GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)))
14671 		tg3_flag_set(tp, SRAM_USE_CONFIG);
14672 
14673 	/* The memory arbiter has to be enabled in order for SRAM accesses
14674 	 * to succeed.  Normally on powerup the tg3 chip firmware will make
14675 	 * sure it is enabled, but other entities such as system netboot
14676 	 * code might disable it.
14677 	 */
14678 	val = tr32(MEMARB_MODE);
14679 	tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
14680 
14681 	tp->pci_fn = PCI_FUNC(tp->pdev->devfn) & 3;
14682 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
14683 	    tg3_flag(tp, 5780_CLASS)) {
14684 		if (tg3_flag(tp, PCIX_MODE)) {
14685 			pci_read_config_dword(tp->pdev,
14686 					      tp->pcix_cap + PCI_X_STATUS,
14687 					      &val);
14688 			tp->pci_fn = val & 0x7;
14689 		}
14690 	} else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
14691 		tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val);
14692 		if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) ==
14693 		    NIC_SRAM_CPMUSTAT_SIG) {
14694 			tp->pci_fn = val & TG3_CPMU_STATUS_FMSK_5717;
14695 			tp->pci_fn = tp->pci_fn ? 1 : 0;
14696 		}
14697 	} else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
14698 		   GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
14699 		tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val);
14700 		if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) ==
14701 		    NIC_SRAM_CPMUSTAT_SIG) {
14702 			tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5719) >>
14703 				     TG3_CPMU_STATUS_FSHFT_5719;
14704 		}
14705 	}
14706 
14707 	/* Get eeprom hw config before calling tg3_set_power_state().
14708 	 * In particular, the TG3_FLAG_IS_NIC flag must be
14709 	 * determined before calling tg3_set_power_state() so that
14710 	 * we know whether or not to switch out of Vaux power.
14711 	 * When the flag is set, it means that GPIO1 is used for eeprom
14712 	 * write protect and also implies that it is a LOM where GPIOs
14713 	 * are not used to switch power.
14714 	 */
14715 	tg3_get_eeprom_hw_cfg(tp);
14716 
14717 	if (tp->fw_needed && tg3_flag(tp, ENABLE_ASF)) {
14718 		tg3_flag_clear(tp, TSO_CAPABLE);
14719 		tg3_flag_clear(tp, TSO_BUG);
14720 		tp->fw_needed = NULL;
14721 	}
14722 
14723 	if (tg3_flag(tp, ENABLE_APE)) {
14724 		/* Allow reads and writes to the
14725 		 * APE register and memory space.
14726 		 */
14727 		pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
14728 				 PCISTATE_ALLOW_APE_SHMEM_WR |
14729 				 PCISTATE_ALLOW_APE_PSPACE_WR;
14730 		pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
14731 				       pci_state_reg);
14732 
14733 		tg3_ape_lock_init(tp);
14734 	}
14735 
14736 	/* Set up tp->grc_local_ctrl before calling
14737 	 * tg3_pwrsrc_switch_to_vmain().  GPIO1 driven high
14738 	 * will bring 5700's external PHY out of reset.
14739 	 * It is also used as eeprom write protect on LOMs.
14740 	 */
14741 	tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
14742 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14743 	    tg3_flag(tp, EEPROM_WRITE_PROT))
14744 		tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
14745 				       GRC_LCLCTRL_GPIO_OUTPUT1);
14746 	/* Unused GPIO3 must be driven as output on 5752 because there
14747 	 * are no pull-up resistors on unused GPIO pins.
14748 	 */
14749 	else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
14750 		tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
14751 
14752 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
14753 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
14754 	    tg3_flag(tp, 57765_CLASS))
14755 		tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
14756 
14757 	if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
14758 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
14759 		/* Turn off the debug UART. */
14760 		tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
14761 		if (tg3_flag(tp, IS_NIC))
14762 			/* Keep VMain power. */
14763 			tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
14764 					      GRC_LCLCTRL_GPIO_OUTPUT0;
14765 	}
14766 
14767 	/* Switch out of Vaux if it is a NIC */
14768 	tg3_pwrsrc_switch_to_vmain(tp);
14769 
14770 	/* Derive initial jumbo mode from MTU assigned in
14771 	 * ether_setup() via the alloc_etherdev() call
14772 	 */
14773 	if (tp->dev->mtu > ETH_DATA_LEN && !tg3_flag(tp, 5780_CLASS))
14774 		tg3_flag_set(tp, JUMBO_RING_ENABLE);
14775 
14776 	/* Determine WakeOnLan speed to use. */
14777 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14778 	    tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
14779 	    tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
14780 	    tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
14781 		tg3_flag_clear(tp, WOL_SPEED_100MB);
14782 	} else {
14783 		tg3_flag_set(tp, WOL_SPEED_100MB);
14784 	}
14785 
14786 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
14787 		tp->phy_flags |= TG3_PHYFLG_IS_FET;
14788 
14789 	/* A few boards don't want Ethernet@WireSpeed phy feature */
14790 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14791 	    (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
14792 	     (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
14793 	     (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) ||
14794 	    (tp->phy_flags & TG3_PHYFLG_IS_FET) ||
14795 	    (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
14796 		tp->phy_flags |= TG3_PHYFLG_NO_ETH_WIRE_SPEED;
14797 
14798 	if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
14799 	    GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
14800 		tp->phy_flags |= TG3_PHYFLG_ADC_BUG;
14801 	if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
14802 		tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG;
14803 
14804 	if (tg3_flag(tp, 5705_PLUS) &&
14805 	    !(tp->phy_flags & TG3_PHYFLG_IS_FET) &&
14806 	    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
14807 	    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57780 &&
14808 	    !tg3_flag(tp, 57765_PLUS)) {
14809 		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
14810 		    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
14811 		    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
14812 		    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
14813 			if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
14814 			    tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
14815 				tp->phy_flags |= TG3_PHYFLG_JITTER_BUG;
14816 			if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
14817 				tp->phy_flags |= TG3_PHYFLG_ADJUST_TRIM;
14818 		} else
14819 			tp->phy_flags |= TG3_PHYFLG_BER_BUG;
14820 	}
14821 
14822 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
14823 	    GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
14824 		tp->phy_otp = tg3_read_otp_phycfg(tp);
14825 		if (tp->phy_otp == 0)
14826 			tp->phy_otp = TG3_OTP_DEFAULT;
14827 	}
14828 
14829 	if (tg3_flag(tp, CPMU_PRESENT))
14830 		tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
14831 	else
14832 		tp->mi_mode = MAC_MI_MODE_BASE;
14833 
14834 	tp->coalesce_mode = 0;
14835 	if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
14836 	    GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
14837 		tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
14838 
14839 	/* Set these bits to enable statistics workaround. */
14840 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
14841 	    tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
14842 	    tp->pci_chip_rev_id == CHIPREV_ID_5720_A0) {
14843 		tp->coalesce_mode |= HOSTCC_MODE_ATTN;
14844 		tp->grc_mode |= GRC_MODE_IRQ_ON_FLOW_ATTN;
14845 	}
14846 
14847 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
14848 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
14849 		tg3_flag_set(tp, USE_PHYLIB);
14850 
14851 	err = tg3_mdio_init(tp);
14852 	if (err)
14853 		return err;
14854 
14855 	/* Initialize data/descriptor byte/word swapping. */
14856 	val = tr32(GRC_MODE);
14857 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
14858 		val &= (GRC_MODE_BYTE_SWAP_B2HRX_DATA |
14859 			GRC_MODE_WORD_SWAP_B2HRX_DATA |
14860 			GRC_MODE_B2HRX_ENABLE |
14861 			GRC_MODE_HTX2B_ENABLE |
14862 			GRC_MODE_HOST_STACKUP);
14863 	else
14864 		val &= GRC_MODE_HOST_STACKUP;
14865 
14866 	tw32(GRC_MODE, val | tp->grc_mode);
14867 
14868 	tg3_switch_clocks(tp);
14869 
14870 	/* Clear this out for sanity. */
14871 	tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
14872 
14873 	pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
14874 			      &pci_state_reg);
14875 	if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
14876 	    !tg3_flag(tp, PCIX_TARGET_HWBUG)) {
14877 		u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
14878 
14879 		if (chiprevid == CHIPREV_ID_5701_A0 ||
14880 		    chiprevid == CHIPREV_ID_5701_B0 ||
14881 		    chiprevid == CHIPREV_ID_5701_B2 ||
14882 		    chiprevid == CHIPREV_ID_5701_B5) {
14883 			void __iomem *sram_base;
14884 
14885 			/* Write some dummy words into the SRAM status block
14886 			 * area, see if it reads back correctly.  If the return
14887 			 * value is bad, force enable the PCIX workaround.
14888 			 */
14889 			sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
14890 
14891 			writel(0x00000000, sram_base);
14892 			writel(0x00000000, sram_base + 4);
14893 			writel(0xffffffff, sram_base + 4);
14894 			if (readl(sram_base) != 0x00000000)
14895 				tg3_flag_set(tp, PCIX_TARGET_HWBUG);
14896 		}
14897 	}
14898 
14899 	udelay(50);
14900 	tg3_nvram_init(tp);
14901 
14902 	grc_misc_cfg = tr32(GRC_MISC_CFG);
14903 	grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
14904 
14905 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
14906 	    (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
14907 	     grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
14908 		tg3_flag_set(tp, IS_5788);
14909 
14910 	if (!tg3_flag(tp, IS_5788) &&
14911 	    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
14912 		tg3_flag_set(tp, TAGGED_STATUS);
14913 	if (tg3_flag(tp, TAGGED_STATUS)) {
14914 		tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
14915 				      HOSTCC_MODE_CLRTICK_TXBD);
14916 
14917 		tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
14918 		pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
14919 				       tp->misc_host_ctrl);
14920 	}
14921 
14922 	/* Preserve the APE MAC_MODE bits */
14923 	if (tg3_flag(tp, ENABLE_APE))
14924 		tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
14925 	else
14926 		tp->mac_mode = 0;
14927 
14928 	/* these are limited to 10/100 only */
14929 	if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
14930 	     (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
14931 	    (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
14932 	     tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
14933 	     (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
14934 	      tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
14935 	      tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
14936 	    (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
14937 	     (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F ||
14938 	      tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F ||
14939 	      tp->pdev->device == PCI_DEVICE_ID_TIGON3_5787F)) ||
14940 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790 ||
14941 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
14942 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
14943 	    (tp->phy_flags & TG3_PHYFLG_IS_FET))
14944 		tp->phy_flags |= TG3_PHYFLG_10_100_ONLY;
14945 
14946 	err = tg3_phy_probe(tp);
14947 	if (err) {
14948 		dev_err(&tp->pdev->dev, "phy probe failed, err %d\n", err);
14949 		/* ... but do not return immediately ... */
14950 		tg3_mdio_fini(tp);
14951 	}
14952 
14953 	tg3_read_vpd(tp);
14954 	tg3_read_fw_ver(tp);
14955 
14956 	if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
14957 		tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
14958 	} else {
14959 		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
14960 			tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
14961 		else
14962 			tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
14963 	}
14964 
14965 	/* 5700 {AX,BX} chips have a broken status block link
14966 	 * change bit implementation, so we must use the
14967 	 * status register in those cases.
14968 	 */
14969 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
14970 		tg3_flag_set(tp, USE_LINKCHG_REG);
14971 	else
14972 		tg3_flag_clear(tp, USE_LINKCHG_REG);
14973 
14974 	/* The led_ctrl is set during tg3_phy_probe, here we might
14975 	 * have to force the link status polling mechanism based
14976 	 * upon subsystem IDs.
14977 	 */
14978 	if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
14979 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
14980 	    !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
14981 		tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
14982 		tg3_flag_set(tp, USE_LINKCHG_REG);
14983 	}
14984 
14985 	/* For all SERDES we poll the MAC status register. */
14986 	if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
14987 		tg3_flag_set(tp, POLL_SERDES);
14988 	else
14989 		tg3_flag_clear(tp, POLL_SERDES);
14990 
14991 	tp->rx_offset = NET_SKB_PAD + NET_IP_ALIGN;
14992 	tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;
14993 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
14994 	    tg3_flag(tp, PCIX_MODE)) {
14995 		tp->rx_offset = NET_SKB_PAD;
14996 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
14997 		tp->rx_copy_thresh = ~(u16)0;
14998 #endif
14999 	}
15000 
15001 	tp->rx_std_ring_mask = TG3_RX_STD_RING_SIZE(tp) - 1;
15002 	tp->rx_jmb_ring_mask = TG3_RX_JMB_RING_SIZE(tp) - 1;
15003 	tp->rx_ret_ring_mask = tg3_rx_ret_ring_size(tp) - 1;
15004 
15005 	tp->rx_std_max_post = tp->rx_std_ring_mask + 1;
15006 
15007 	/* Increment the rx prod index on the rx std ring by at most
15008 	 * 8 for these chips to workaround hw errata.
15009 	 */
15010 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
15011 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
15012 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
15013 		tp->rx_std_max_post = 8;
15014 
15015 	if (tg3_flag(tp, ASPM_WORKAROUND))
15016 		tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
15017 				     PCIE_PWR_MGMT_L1_THRESH_MSK;
15018 
15019 	return err;
15020 }
15021 
15022 #ifdef CONFIG_SPARC
15023 static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
15024 {
15025 	struct net_device *dev = tp->dev;
15026 	struct pci_dev *pdev = tp->pdev;
15027 	struct device_node *dp = pci_device_to_OF_node(pdev);
15028 	const unsigned char *addr;
15029 	int len;
15030 
15031 	addr = of_get_property(dp, "local-mac-address", &len);
15032 	if (addr && len == 6) {
15033 		memcpy(dev->dev_addr, addr, 6);
15034 		memcpy(dev->perm_addr, dev->dev_addr, 6);
15035 		return 0;
15036 	}
15037 	return -ENODEV;
15038 }
15039 
15040 static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp)
15041 {
15042 	struct net_device *dev = tp->dev;
15043 
15044 	memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
15045 	memcpy(dev->perm_addr, idprom->id_ethaddr, 6);
15046 	return 0;
15047 }
15048 #endif
15049 
15050 static int __devinit tg3_get_device_address(struct tg3 *tp)
15051 {
15052 	struct net_device *dev = tp->dev;
15053 	u32 hi, lo, mac_offset;
15054 	int addr_ok = 0;
15055 
15056 #ifdef CONFIG_SPARC
15057 	if (!tg3_get_macaddr_sparc(tp))
15058 		return 0;
15059 #endif
15060 
15061 	mac_offset = 0x7c;
15062 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
15063 	    tg3_flag(tp, 5780_CLASS)) {
15064 		if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
15065 			mac_offset = 0xcc;
15066 		if (tg3_nvram_lock(tp))
15067 			tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
15068 		else
15069 			tg3_nvram_unlock(tp);
15070 	} else if (tg3_flag(tp, 5717_PLUS)) {
15071 		if (tp->pci_fn & 1)
15072 			mac_offset = 0xcc;
15073 		if (tp->pci_fn > 1)
15074 			mac_offset += 0x18c;
15075 	} else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
15076 		mac_offset = 0x10;
15077 
15078 	/* First try to get it from MAC address mailbox. */
15079 	tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
15080 	if ((hi >> 16) == 0x484b) {
15081 		dev->dev_addr[0] = (hi >>  8) & 0xff;
15082 		dev->dev_addr[1] = (hi >>  0) & 0xff;
15083 
15084 		tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
15085 		dev->dev_addr[2] = (lo >> 24) & 0xff;
15086 		dev->dev_addr[3] = (lo >> 16) & 0xff;
15087 		dev->dev_addr[4] = (lo >>  8) & 0xff;
15088 		dev->dev_addr[5] = (lo >>  0) & 0xff;
15089 
15090 		/* Some old bootcode may report a 0 MAC address in SRAM */
15091 		addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
15092 	}
15093 	if (!addr_ok) {
15094 		/* Next, try NVRAM. */
15095 		if (!tg3_flag(tp, NO_NVRAM) &&
15096 		    !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) &&
15097 		    !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) {
15098 			memcpy(&dev->dev_addr[0], ((char *)&hi) + 2, 2);
15099 			memcpy(&dev->dev_addr[2], (char *)&lo, sizeof(lo));
15100 		}
15101 		/* Finally just fetch it out of the MAC control regs. */
15102 		else {
15103 			hi = tr32(MAC_ADDR_0_HIGH);
15104 			lo = tr32(MAC_ADDR_0_LOW);
15105 
15106 			dev->dev_addr[5] = lo & 0xff;
15107 			dev->dev_addr[4] = (lo >> 8) & 0xff;
15108 			dev->dev_addr[3] = (lo >> 16) & 0xff;
15109 			dev->dev_addr[2] = (lo >> 24) & 0xff;
15110 			dev->dev_addr[1] = hi & 0xff;
15111 			dev->dev_addr[0] = (hi >> 8) & 0xff;
15112 		}
15113 	}
15114 
15115 	if (!is_valid_ether_addr(&dev->dev_addr[0])) {
15116 #ifdef CONFIG_SPARC
15117 		if (!tg3_get_default_macaddr_sparc(tp))
15118 			return 0;
15119 #endif
15120 		return -EINVAL;
15121 	}
15122 	memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
15123 	return 0;
15124 }
15125 
15126 #define BOUNDARY_SINGLE_CACHELINE	1
15127 #define BOUNDARY_MULTI_CACHELINE	2
15128 
15129 static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
15130 {
15131 	int cacheline_size;
15132 	u8 byte;
15133 	int goal;
15134 
15135 	pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
15136 	if (byte == 0)
15137 		cacheline_size = 1024;
15138 	else
15139 		cacheline_size = (int) byte * 4;
15140 
15141 	/* On 5703 and later chips, the boundary bits have no
15142 	 * effect.
15143 	 */
15144 	if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
15145 	    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
15146 	    !tg3_flag(tp, PCI_EXPRESS))
15147 		goto out;
15148 
15149 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
15150 	goal = BOUNDARY_MULTI_CACHELINE;
15151 #else
15152 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
15153 	goal = BOUNDARY_SINGLE_CACHELINE;
15154 #else
15155 	goal = 0;
15156 #endif
15157 #endif
15158 
15159 	if (tg3_flag(tp, 57765_PLUS)) {
15160 		val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
15161 		goto out;
15162 	}
15163 
15164 	if (!goal)
15165 		goto out;
15166 
15167 	/* PCI controllers on most RISC systems tend to disconnect
15168 	 * when a device tries to burst across a cache-line boundary.
15169 	 * Therefore, letting tg3 do so just wastes PCI bandwidth.
15170 	 *
15171 	 * Unfortunately, for PCI-E there are only limited
15172 	 * write-side controls for this, and thus for reads
15173 	 * we will still get the disconnects.  We'll also waste
15174 	 * these PCI cycles for both read and write for chips
15175 	 * other than 5700 and 5701 which do not implement the
15176 	 * boundary bits.
15177 	 */
15178 	if (tg3_flag(tp, PCIX_MODE) && !tg3_flag(tp, PCI_EXPRESS)) {
15179 		switch (cacheline_size) {
15180 		case 16:
15181 		case 32:
15182 		case 64:
15183 		case 128:
15184 			if (goal == BOUNDARY_SINGLE_CACHELINE) {
15185 				val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
15186 					DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
15187 			} else {
15188 				val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
15189 					DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
15190 			}
15191 			break;
15192 
15193 		case 256:
15194 			val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
15195 				DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
15196 			break;
15197 
15198 		default:
15199 			val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
15200 				DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
15201 			break;
15202 		}
15203 	} else if (tg3_flag(tp, PCI_EXPRESS)) {
15204 		switch (cacheline_size) {
15205 		case 16:
15206 		case 32:
15207 		case 64:
15208 			if (goal == BOUNDARY_SINGLE_CACHELINE) {
15209 				val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
15210 				val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
15211 				break;
15212 			}
15213 			/* fallthrough */
15214 		case 128:
15215 		default:
15216 			val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
15217 			val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
15218 			break;
15219 		}
15220 	} else {
15221 		switch (cacheline_size) {
15222 		case 16:
15223 			if (goal == BOUNDARY_SINGLE_CACHELINE) {
15224 				val |= (DMA_RWCTRL_READ_BNDRY_16 |
15225 					DMA_RWCTRL_WRITE_BNDRY_16);
15226 				break;
15227 			}
15228 			/* fallthrough */
15229 		case 32:
15230 			if (goal == BOUNDARY_SINGLE_CACHELINE) {
15231 				val |= (DMA_RWCTRL_READ_BNDRY_32 |
15232 					DMA_RWCTRL_WRITE_BNDRY_32);
15233 				break;
15234 			}
15235 			/* fallthrough */
15236 		case 64:
15237 			if (goal == BOUNDARY_SINGLE_CACHELINE) {
15238 				val |= (DMA_RWCTRL_READ_BNDRY_64 |
15239 					DMA_RWCTRL_WRITE_BNDRY_64);
15240 				break;
15241 			}
15242 			/* fallthrough */
15243 		case 128:
15244 			if (goal == BOUNDARY_SINGLE_CACHELINE) {
15245 				val |= (DMA_RWCTRL_READ_BNDRY_128 |
15246 					DMA_RWCTRL_WRITE_BNDRY_128);
15247 				break;
15248 			}
15249 			/* fallthrough */
15250 		case 256:
15251 			val |= (DMA_RWCTRL_READ_BNDRY_256 |
15252 				DMA_RWCTRL_WRITE_BNDRY_256);
15253 			break;
15254 		case 512:
15255 			val |= (DMA_RWCTRL_READ_BNDRY_512 |
15256 				DMA_RWCTRL_WRITE_BNDRY_512);
15257 			break;
15258 		case 1024:
15259 		default:
15260 			val |= (DMA_RWCTRL_READ_BNDRY_1024 |
15261 				DMA_RWCTRL_WRITE_BNDRY_1024);
15262 			break;
15263 		}
15264 	}
15265 
15266 out:
15267 	return val;
15268 }
15269 
15270 static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
15271 {
15272 	struct tg3_internal_buffer_desc test_desc;
15273 	u32 sram_dma_descs;
15274 	int i, ret;
15275 
15276 	sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
15277 
15278 	tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
15279 	tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
15280 	tw32(RDMAC_STATUS, 0);
15281 	tw32(WDMAC_STATUS, 0);
15282 
15283 	tw32(BUFMGR_MODE, 0);
15284 	tw32(FTQ_RESET, 0);
15285 
15286 	test_desc.addr_hi = ((u64) buf_dma) >> 32;
15287 	test_desc.addr_lo = buf_dma & 0xffffffff;
15288 	test_desc.nic_mbuf = 0x00002100;
15289 	test_desc.len = size;
15290 
15291 	/*
15292 	 * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
15293 	 * the *second* time the tg3 driver was getting loaded after an
15294 	 * initial scan.
15295 	 *
15296 	 * Broadcom tells me:
15297 	 *   ...the DMA engine is connected to the GRC block and a DMA
15298 	 *   reset may affect the GRC block in some unpredictable way...
15299 	 *   The behavior of resets to individual blocks has not been tested.
15300 	 *
15301 	 * Broadcom noted the GRC reset will also reset all sub-components.
15302 	 */
15303 	if (to_device) {
15304 		test_desc.cqid_sqid = (13 << 8) | 2;
15305 
15306 		tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
15307 		udelay(40);
15308 	} else {
15309 		test_desc.cqid_sqid = (16 << 8) | 7;
15310 
15311 		tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
15312 		udelay(40);
15313 	}
15314 	test_desc.flags = 0x00000005;
15315 
15316 	for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
15317 		u32 val;
15318 
15319 		val = *(((u32 *)&test_desc) + i);
15320 		pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
15321 				       sram_dma_descs + (i * sizeof(u32)));
15322 		pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
15323 	}
15324 	pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
15325 
15326 	if (to_device)
15327 		tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
15328 	else
15329 		tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
15330 
15331 	ret = -ENODEV;
15332 	for (i = 0; i < 40; i++) {
15333 		u32 val;
15334 
15335 		if (to_device)
15336 			val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
15337 		else
15338 			val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
15339 		if ((val & 0xffff) == sram_dma_descs) {
15340 			ret = 0;
15341 			break;
15342 		}
15343 
15344 		udelay(100);
15345 	}
15346 
15347 	return ret;
15348 }
15349 
15350 #define TEST_BUFFER_SIZE	0x2000
15351 
15352 static DEFINE_PCI_DEVICE_TABLE(tg3_dma_wait_state_chipsets) = {
15353 	{ PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
15354 	{ },
15355 };
15356 
15357 static int __devinit tg3_test_dma(struct tg3 *tp)
15358 {
15359 	dma_addr_t buf_dma;
15360 	u32 *buf, saved_dma_rwctrl;
15361 	int ret = 0;
15362 
15363 	buf = dma_alloc_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE,
15364 				 &buf_dma, GFP_KERNEL);
15365 	if (!buf) {
15366 		ret = -ENOMEM;
15367 		goto out_nofree;
15368 	}
15369 
15370 	tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
15371 			  (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
15372 
15373 	tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
15374 
15375 	if (tg3_flag(tp, 57765_PLUS))
15376 		goto out;
15377 
15378 	if (tg3_flag(tp, PCI_EXPRESS)) {
15379 		/* DMA read watermark not used on PCIE */
15380 		tp->dma_rwctrl |= 0x00180000;
15381 	} else if (!tg3_flag(tp, PCIX_MODE)) {
15382 		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
15383 		    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
15384 			tp->dma_rwctrl |= 0x003f0000;
15385 		else
15386 			tp->dma_rwctrl |= 0x003f000f;
15387 	} else {
15388 		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
15389 		    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
15390 			u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
15391 			u32 read_water = 0x7;
15392 
15393 			/* If the 5704 is behind the EPB bridge, we can
15394 			 * do the less restrictive ONE_DMA workaround for
15395 			 * better performance.
15396 			 */
15397 			if (tg3_flag(tp, 40BIT_DMA_BUG) &&
15398 			    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
15399 				tp->dma_rwctrl |= 0x8000;
15400 			else if (ccval == 0x6 || ccval == 0x7)
15401 				tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
15402 
15403 			if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703)
15404 				read_water = 4;
15405 			/* Set bit 23 to enable PCIX hw bug fix */
15406 			tp->dma_rwctrl |=
15407 				(read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
15408 				(0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
15409 				(1 << 23);
15410 		} else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
15411 			/* 5780 always in PCIX mode */
15412 			tp->dma_rwctrl |= 0x00144000;
15413 		} else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
15414 			/* 5714 always in PCIX mode */
15415 			tp->dma_rwctrl |= 0x00148000;
15416 		} else {
15417 			tp->dma_rwctrl |= 0x001b000f;
15418 		}
15419 	}
15420 
15421 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
15422 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
15423 		tp->dma_rwctrl &= 0xfffffff0;
15424 
15425 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
15426 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
15427 		/* Remove this if it causes problems for some boards. */
15428 		tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
15429 
15430 		/* On 5700/5701 chips, we need to set this bit.
15431 		 * Otherwise the chip will issue cacheline transactions
15432 		 * to streamable DMA memory with not all the byte
15433 		 * enables turned on.  This is an error on several
15434 		 * RISC PCI controllers, in particular sparc64.
15435 		 *
15436 		 * On 5703/5704 chips, this bit has been reassigned
15437 		 * a different meaning.  In particular, it is used
15438 		 * on those chips to enable a PCI-X workaround.
15439 		 */
15440 		tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
15441 	}
15442 
15443 	tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
15444 
15445 #if 0
15446 	/* Unneeded, already done by tg3_get_invariants.  */
15447 	tg3_switch_clocks(tp);
15448 #endif
15449 
15450 	if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
15451 	    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
15452 		goto out;
15453 
15454 	/* It is best to perform DMA test with maximum write burst size
15455 	 * to expose the 5700/5701 write DMA bug.
15456 	 */
15457 	saved_dma_rwctrl = tp->dma_rwctrl;
15458 	tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
15459 	tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
15460 
15461 	while (1) {
15462 		u32 *p = buf, i;
15463 
15464 		for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
15465 			p[i] = i;
15466 
15467 		/* Send the buffer to the chip. */
15468 		ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
15469 		if (ret) {
15470 			dev_err(&tp->pdev->dev,
15471 				"%s: Buffer write failed. err = %d\n",
15472 				__func__, ret);
15473 			break;
15474 		}
15475 
15476 #if 0
15477 		/* validate data reached card RAM correctly. */
15478 		for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
15479 			u32 val;
15480 			tg3_read_mem(tp, 0x2100 + (i*4), &val);
15481 			if (le32_to_cpu(val) != p[i]) {
15482 				dev_err(&tp->pdev->dev,
15483 					"%s: Buffer corrupted on device! "
15484 					"(%d != %d)\n", __func__, val, i);
15485 				/* ret = -ENODEV here? */
15486 			}
15487 			p[i] = 0;
15488 		}
15489 #endif
15490 		/* Now read it back. */
15491 		ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
15492 		if (ret) {
15493 			dev_err(&tp->pdev->dev, "%s: Buffer read failed. "
15494 				"err = %d\n", __func__, ret);
15495 			break;
15496 		}
15497 
15498 		/* Verify it. */
15499 		for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
15500 			if (p[i] == i)
15501 				continue;
15502 
15503 			if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
15504 			    DMA_RWCTRL_WRITE_BNDRY_16) {
15505 				tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
15506 				tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
15507 				tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
15508 				break;
15509 			} else {
15510 				dev_err(&tp->pdev->dev,
15511 					"%s: Buffer corrupted on read back! "
15512 					"(%d != %d)\n", __func__, p[i], i);
15513 				ret = -ENODEV;
15514 				goto out;
15515 			}
15516 		}
15517 
15518 		if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
15519 			/* Success. */
15520 			ret = 0;
15521 			break;
15522 		}
15523 	}
15524 	if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
15525 	    DMA_RWCTRL_WRITE_BNDRY_16) {
15526 		/* DMA test passed without adjusting DMA boundary,
15527 		 * now look for chipsets that are known to expose the
15528 		 * DMA bug without failing the test.
15529 		 */
15530 		if (pci_dev_present(tg3_dma_wait_state_chipsets)) {
15531 			tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
15532 			tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
15533 		} else {
15534 			/* Safe to use the calculated DMA boundary. */
15535 			tp->dma_rwctrl = saved_dma_rwctrl;
15536 		}
15537 
15538 		tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
15539 	}
15540 
15541 out:
15542 	dma_free_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, buf, buf_dma);
15543 out_nofree:
15544 	return ret;
15545 }
15546 
15547 static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
15548 {
15549 	if (tg3_flag(tp, 57765_PLUS)) {
15550 		tp->bufmgr_config.mbuf_read_dma_low_water =
15551 			DEFAULT_MB_RDMA_LOW_WATER_5705;
15552 		tp->bufmgr_config.mbuf_mac_rx_low_water =
15553 			DEFAULT_MB_MACRX_LOW_WATER_57765;
15554 		tp->bufmgr_config.mbuf_high_water =
15555 			DEFAULT_MB_HIGH_WATER_57765;
15556 
15557 		tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
15558 			DEFAULT_MB_RDMA_LOW_WATER_5705;
15559 		tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
15560 			DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765;
15561 		tp->bufmgr_config.mbuf_high_water_jumbo =
15562 			DEFAULT_MB_HIGH_WATER_JUMBO_57765;
15563 	} else if (tg3_flag(tp, 5705_PLUS)) {
15564 		tp->bufmgr_config.mbuf_read_dma_low_water =
15565 			DEFAULT_MB_RDMA_LOW_WATER_5705;
15566 		tp->bufmgr_config.mbuf_mac_rx_low_water =
15567 			DEFAULT_MB_MACRX_LOW_WATER_5705;
15568 		tp->bufmgr_config.mbuf_high_water =
15569 			DEFAULT_MB_HIGH_WATER_5705;
15570 		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
15571 			tp->bufmgr_config.mbuf_mac_rx_low_water =
15572 				DEFAULT_MB_MACRX_LOW_WATER_5906;
15573 			tp->bufmgr_config.mbuf_high_water =
15574 				DEFAULT_MB_HIGH_WATER_5906;
15575 		}
15576 
15577 		tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
15578 			DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
15579 		tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
15580 			DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
15581 		tp->bufmgr_config.mbuf_high_water_jumbo =
15582 			DEFAULT_MB_HIGH_WATER_JUMBO_5780;
15583 	} else {
15584 		tp->bufmgr_config.mbuf_read_dma_low_water =
15585 			DEFAULT_MB_RDMA_LOW_WATER;
15586 		tp->bufmgr_config.mbuf_mac_rx_low_water =
15587 			DEFAULT_MB_MACRX_LOW_WATER;
15588 		tp->bufmgr_config.mbuf_high_water =
15589 			DEFAULT_MB_HIGH_WATER;
15590 
15591 		tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
15592 			DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
15593 		tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
15594 			DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
15595 		tp->bufmgr_config.mbuf_high_water_jumbo =
15596 			DEFAULT_MB_HIGH_WATER_JUMBO;
15597 	}
15598 
15599 	tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
15600 	tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
15601 }
15602 
15603 static char * __devinit tg3_phy_string(struct tg3 *tp)
15604 {
15605 	switch (tp->phy_id & TG3_PHY_ID_MASK) {
15606 	case TG3_PHY_ID_BCM5400:	return "5400";
15607 	case TG3_PHY_ID_BCM5401:	return "5401";
15608 	case TG3_PHY_ID_BCM5411:	return "5411";
15609 	case TG3_PHY_ID_BCM5701:	return "5701";
15610 	case TG3_PHY_ID_BCM5703:	return "5703";
15611 	case TG3_PHY_ID_BCM5704:	return "5704";
15612 	case TG3_PHY_ID_BCM5705:	return "5705";
15613 	case TG3_PHY_ID_BCM5750:	return "5750";
15614 	case TG3_PHY_ID_BCM5752:	return "5752";
15615 	case TG3_PHY_ID_BCM5714:	return "5714";
15616 	case TG3_PHY_ID_BCM5780:	return "5780";
15617 	case TG3_PHY_ID_BCM5755:	return "5755";
15618 	case TG3_PHY_ID_BCM5787:	return "5787";
15619 	case TG3_PHY_ID_BCM5784:	return "5784";
15620 	case TG3_PHY_ID_BCM5756:	return "5722/5756";
15621 	case TG3_PHY_ID_BCM5906:	return "5906";
15622 	case TG3_PHY_ID_BCM5761:	return "5761";
15623 	case TG3_PHY_ID_BCM5718C:	return "5718C";
15624 	case TG3_PHY_ID_BCM5718S:	return "5718S";
15625 	case TG3_PHY_ID_BCM57765:	return "57765";
15626 	case TG3_PHY_ID_BCM5719C:	return "5719C";
15627 	case TG3_PHY_ID_BCM5720C:	return "5720C";
15628 	case TG3_PHY_ID_BCM8002:	return "8002/serdes";
15629 	case 0:			return "serdes";
15630 	default:		return "unknown";
15631 	}
15632 }
15633 
15634 static char * __devinit tg3_bus_string(struct tg3 *tp, char *str)
15635 {
15636 	if (tg3_flag(tp, PCI_EXPRESS)) {
15637 		strcpy(str, "PCI Express");
15638 		return str;
15639 	} else if (tg3_flag(tp, PCIX_MODE)) {
15640 		u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
15641 
15642 		strcpy(str, "PCIX:");
15643 
15644 		if ((clock_ctrl == 7) ||
15645 		    ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
15646 		     GRC_MISC_CFG_BOARD_ID_5704CIOBE))
15647 			strcat(str, "133MHz");
15648 		else if (clock_ctrl == 0)
15649 			strcat(str, "33MHz");
15650 		else if (clock_ctrl == 2)
15651 			strcat(str, "50MHz");
15652 		else if (clock_ctrl == 4)
15653 			strcat(str, "66MHz");
15654 		else if (clock_ctrl == 6)
15655 			strcat(str, "100MHz");
15656 	} else {
15657 		strcpy(str, "PCI:");
15658 		if (tg3_flag(tp, PCI_HIGH_SPEED))
15659 			strcat(str, "66MHz");
15660 		else
15661 			strcat(str, "33MHz");
15662 	}
15663 	if (tg3_flag(tp, PCI_32BIT))
15664 		strcat(str, ":32-bit");
15665 	else
15666 		strcat(str, ":64-bit");
15667 	return str;
15668 }
15669 
15670 static void __devinit tg3_init_coal(struct tg3 *tp)
15671 {
15672 	struct ethtool_coalesce *ec = &tp->coal;
15673 
15674 	memset(ec, 0, sizeof(*ec));
15675 	ec->cmd = ETHTOOL_GCOALESCE;
15676 	ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
15677 	ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
15678 	ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
15679 	ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
15680 	ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
15681 	ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
15682 	ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
15683 	ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
15684 	ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
15685 
15686 	if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
15687 				 HOSTCC_MODE_CLRTICK_TXBD)) {
15688 		ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
15689 		ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
15690 		ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
15691 		ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
15692 	}
15693 
15694 	if (tg3_flag(tp, 5705_PLUS)) {
15695 		ec->rx_coalesce_usecs_irq = 0;
15696 		ec->tx_coalesce_usecs_irq = 0;
15697 		ec->stats_block_coalesce_usecs = 0;
15698 	}
15699 }
15700 
15701 static int __devinit tg3_init_one(struct pci_dev *pdev,
15702 				  const struct pci_device_id *ent)
15703 {
15704 	struct net_device *dev;
15705 	struct tg3 *tp;
15706 	int i, err, pm_cap;
15707 	u32 sndmbx, rcvmbx, intmbx;
15708 	char str[40];
15709 	u64 dma_mask, persist_dma_mask;
15710 	netdev_features_t features = 0;
15711 
15712 	printk_once(KERN_INFO "%s\n", version);
15713 
15714 	err = pci_enable_device(pdev);
15715 	if (err) {
15716 		dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
15717 		return err;
15718 	}
15719 
15720 	err = pci_request_regions(pdev, DRV_MODULE_NAME);
15721 	if (err) {
15722 		dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
15723 		goto err_out_disable_pdev;
15724 	}
15725 
15726 	pci_set_master(pdev);
15727 
15728 	/* Find power-management capability. */
15729 	pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
15730 	if (pm_cap == 0) {
15731 		dev_err(&pdev->dev,
15732 			"Cannot find Power Management capability, aborting\n");
15733 		err = -EIO;
15734 		goto err_out_free_res;
15735 	}
15736 
15737 	err = pci_set_power_state(pdev, PCI_D0);
15738 	if (err) {
15739 		dev_err(&pdev->dev, "Transition to D0 failed, aborting\n");
15740 		goto err_out_free_res;
15741 	}
15742 
15743 	dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS);
15744 	if (!dev) {
15745 		err = -ENOMEM;
15746 		goto err_out_power_down;
15747 	}
15748 
15749 	SET_NETDEV_DEV(dev, &pdev->dev);
15750 
15751 	tp = netdev_priv(dev);
15752 	tp->pdev = pdev;
15753 	tp->dev = dev;
15754 	tp->pm_cap = pm_cap;
15755 	tp->rx_mode = TG3_DEF_RX_MODE;
15756 	tp->tx_mode = TG3_DEF_TX_MODE;
15757 
15758 	if (tg3_debug > 0)
15759 		tp->msg_enable = tg3_debug;
15760 	else
15761 		tp->msg_enable = TG3_DEF_MSG_ENABLE;
15762 
15763 	/* The word/byte swap controls here control register access byte
15764 	 * swapping.  DMA data byte swapping is controlled in the GRC_MODE
15765 	 * setting below.
15766 	 */
15767 	tp->misc_host_ctrl =
15768 		MISC_HOST_CTRL_MASK_PCI_INT |
15769 		MISC_HOST_CTRL_WORD_SWAP |
15770 		MISC_HOST_CTRL_INDIR_ACCESS |
15771 		MISC_HOST_CTRL_PCISTATE_RW;
15772 
15773 	/* The NONFRM (non-frame) byte/word swap controls take effect
15774 	 * on descriptor entries, anything which isn't packet data.
15775 	 *
15776 	 * The StrongARM chips on the board (one for tx, one for rx)
15777 	 * are running in big-endian mode.
15778 	 */
15779 	tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
15780 			GRC_MODE_WSWAP_NONFRM_DATA);
15781 #ifdef __BIG_ENDIAN
15782 	tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
15783 #endif
15784 	spin_lock_init(&tp->lock);
15785 	spin_lock_init(&tp->indirect_lock);
15786 	INIT_WORK(&tp->reset_task, tg3_reset_task);
15787 
15788 	tp->regs = pci_ioremap_bar(pdev, BAR_0);
15789 	if (!tp->regs) {
15790 		dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
15791 		err = -ENOMEM;
15792 		goto err_out_free_dev;
15793 	}
15794 
15795 	if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
15796 	    tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761E ||
15797 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S ||
15798 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761SE ||
15799 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
15800 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
15801 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
15802 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720) {
15803 		tg3_flag_set(tp, ENABLE_APE);
15804 		tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
15805 		if (!tp->aperegs) {
15806 			dev_err(&pdev->dev,
15807 				"Cannot map APE registers, aborting\n");
15808 			err = -ENOMEM;
15809 			goto err_out_iounmap;
15810 		}
15811 	}
15812 
15813 	tp->rx_pending = TG3_DEF_RX_RING_PENDING;
15814 	tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
15815 
15816 	dev->ethtool_ops = &tg3_ethtool_ops;
15817 	dev->watchdog_timeo = TG3_TX_TIMEOUT;
15818 	dev->netdev_ops = &tg3_netdev_ops;
15819 	dev->irq = pdev->irq;
15820 
15821 	err = tg3_get_invariants(tp);
15822 	if (err) {
15823 		dev_err(&pdev->dev,
15824 			"Problem fetching invariants of chip, aborting\n");
15825 		goto err_out_apeunmap;
15826 	}
15827 
15828 	/* The EPB bridge inside 5714, 5715, and 5780 and any
15829 	 * device behind the EPB cannot support DMA addresses > 40-bit.
15830 	 * On 64-bit systems with IOMMU, use 40-bit dma_mask.
15831 	 * On 64-bit systems without IOMMU, use 64-bit dma_mask and
15832 	 * do DMA address check in tg3_start_xmit().
15833 	 */
15834 	if (tg3_flag(tp, IS_5788))
15835 		persist_dma_mask = dma_mask = DMA_BIT_MASK(32);
15836 	else if (tg3_flag(tp, 40BIT_DMA_BUG)) {
15837 		persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
15838 #ifdef CONFIG_HIGHMEM
15839 		dma_mask = DMA_BIT_MASK(64);
15840 #endif
15841 	} else
15842 		persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
15843 
15844 	/* Configure DMA attributes. */
15845 	if (dma_mask > DMA_BIT_MASK(32)) {
15846 		err = pci_set_dma_mask(pdev, dma_mask);
15847 		if (!err) {
15848 			features |= NETIF_F_HIGHDMA;
15849 			err = pci_set_consistent_dma_mask(pdev,
15850 							  persist_dma_mask);
15851 			if (err < 0) {
15852 				dev_err(&pdev->dev, "Unable to obtain 64 bit "
15853 					"DMA for consistent allocations\n");
15854 				goto err_out_apeunmap;
15855 			}
15856 		}
15857 	}
15858 	if (err || dma_mask == DMA_BIT_MASK(32)) {
15859 		err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
15860 		if (err) {
15861 			dev_err(&pdev->dev,
15862 				"No usable DMA configuration, aborting\n");
15863 			goto err_out_apeunmap;
15864 		}
15865 	}
15866 
15867 	tg3_init_bufmgr_config(tp);
15868 
15869 	features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
15870 
15871 	/* 5700 B0 chips do not support checksumming correctly due
15872 	 * to hardware bugs.
15873 	 */
15874 	if (tp->pci_chip_rev_id != CHIPREV_ID_5700_B0) {
15875 		features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
15876 
15877 		if (tg3_flag(tp, 5755_PLUS))
15878 			features |= NETIF_F_IPV6_CSUM;
15879 	}
15880 
15881 	/* TSO is on by default on chips that support hardware TSO.
15882 	 * Firmware TSO on older chips gives lower performance, so it
15883 	 * is off by default, but can be enabled using ethtool.
15884 	 */
15885 	if ((tg3_flag(tp, HW_TSO_1) ||
15886 	     tg3_flag(tp, HW_TSO_2) ||
15887 	     tg3_flag(tp, HW_TSO_3)) &&
15888 	    (features & NETIF_F_IP_CSUM))
15889 		features |= NETIF_F_TSO;
15890 	if (tg3_flag(tp, HW_TSO_2) || tg3_flag(tp, HW_TSO_3)) {
15891 		if (features & NETIF_F_IPV6_CSUM)
15892 			features |= NETIF_F_TSO6;
15893 		if (tg3_flag(tp, HW_TSO_3) ||
15894 		    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
15895 		    (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
15896 		     GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) ||
15897 		    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
15898 		    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
15899 			features |= NETIF_F_TSO_ECN;
15900 	}
15901 
15902 	dev->features |= features;
15903 	dev->vlan_features |= features;
15904 
15905 	/*
15906 	 * Add loopback capability only for a subset of devices that support
15907 	 * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY
15908 	 * loopback for the remaining devices.
15909 	 */
15910 	if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5780 &&
15911 	    !tg3_flag(tp, CPMU_PRESENT))
15912 		/* Add the loopback capability */
15913 		features |= NETIF_F_LOOPBACK;
15914 
15915 	dev->hw_features |= features;
15916 
15917 	if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
15918 	    !tg3_flag(tp, TSO_CAPABLE) &&
15919 	    !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
15920 		tg3_flag_set(tp, MAX_RXPEND_64);
15921 		tp->rx_pending = 63;
15922 	}
15923 
15924 	err = tg3_get_device_address(tp);
15925 	if (err) {
15926 		dev_err(&pdev->dev,
15927 			"Could not obtain valid ethernet address, aborting\n");
15928 		goto err_out_apeunmap;
15929 	}
15930 
15931 	/*
15932 	 * Reset chip in case UNDI or EFI driver did not shutdown
15933 	 * DMA self test will enable WDMAC and we'll see (spurious)
15934 	 * pending DMA on the PCI bus at that point.
15935 	 */
15936 	if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
15937 	    (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
15938 		tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
15939 		tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
15940 	}
15941 
15942 	err = tg3_test_dma(tp);
15943 	if (err) {
15944 		dev_err(&pdev->dev, "DMA engine test failed, aborting\n");
15945 		goto err_out_apeunmap;
15946 	}
15947 
15948 	intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
15949 	rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
15950 	sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
15951 	for (i = 0; i < tp->irq_max; i++) {
15952 		struct tg3_napi *tnapi = &tp->napi[i];
15953 
15954 		tnapi->tp = tp;
15955 		tnapi->tx_pending = TG3_DEF_TX_RING_PENDING;
15956 
15957 		tnapi->int_mbox = intmbx;
15958 		if (i <= 4)
15959 			intmbx += 0x8;
15960 		else
15961 			intmbx += 0x4;
15962 
15963 		tnapi->consmbox = rcvmbx;
15964 		tnapi->prodmbox = sndmbx;
15965 
15966 		if (i)
15967 			tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1);
15968 		else
15969 			tnapi->coal_now = HOSTCC_MODE_NOW;
15970 
15971 		if (!tg3_flag(tp, SUPPORT_MSIX))
15972 			break;
15973 
15974 		/*
15975 		 * If we support MSIX, we'll be using RSS.  If we're using
15976 		 * RSS, the first vector only handles link interrupts and the
15977 		 * remaining vectors handle rx and tx interrupts.  Reuse the
15978 		 * mailbox values for the next iteration.  The values we setup
15979 		 * above are still useful for the single vectored mode.
15980 		 */
15981 		if (!i)
15982 			continue;
15983 
15984 		rcvmbx += 0x8;
15985 
15986 		if (sndmbx & 0x4)
15987 			sndmbx -= 0x4;
15988 		else
15989 			sndmbx += 0xc;
15990 	}
15991 
15992 	tg3_init_coal(tp);
15993 
15994 	pci_set_drvdata(pdev, dev);
15995 
15996 	if (tg3_flag(tp, 5717_PLUS)) {
15997 		/* Resume a low-power mode */
15998 		tg3_frob_aux_power(tp, false);
15999 	}
16000 
16001 	tg3_timer_init(tp);
16002 
16003 	err = register_netdev(dev);
16004 	if (err) {
16005 		dev_err(&pdev->dev, "Cannot register net device, aborting\n");
16006 		goto err_out_apeunmap;
16007 	}
16008 
16009 	netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
16010 		    tp->board_part_number,
16011 		    tp->pci_chip_rev_id,
16012 		    tg3_bus_string(tp, str),
16013 		    dev->dev_addr);
16014 
16015 	if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
16016 		struct phy_device *phydev;
16017 		phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
16018 		netdev_info(dev,
16019 			    "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
16020 			    phydev->drv->name, dev_name(&phydev->dev));
16021 	} else {
16022 		char *ethtype;
16023 
16024 		if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
16025 			ethtype = "10/100Base-TX";
16026 		else if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
16027 			ethtype = "1000Base-SX";
16028 		else
16029 			ethtype = "10/100/1000Base-T";
16030 
16031 		netdev_info(dev, "attached PHY is %s (%s Ethernet) "
16032 			    "(WireSpeed[%d], EEE[%d])\n",
16033 			    tg3_phy_string(tp), ethtype,
16034 			    (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) == 0,
16035 			    (tp->phy_flags & TG3_PHYFLG_EEE_CAP) != 0);
16036 	}
16037 
16038 	netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
16039 		    (dev->features & NETIF_F_RXCSUM) != 0,
16040 		    tg3_flag(tp, USE_LINKCHG_REG) != 0,
16041 		    (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) != 0,
16042 		    tg3_flag(tp, ENABLE_ASF) != 0,
16043 		    tg3_flag(tp, TSO_CAPABLE) != 0);
16044 	netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
16045 		    tp->dma_rwctrl,
16046 		    pdev->dma_mask == DMA_BIT_MASK(32) ? 32 :
16047 		    ((u64)pdev->dma_mask) == DMA_BIT_MASK(40) ? 40 : 64);
16048 
16049 	pci_save_state(pdev);
16050 
16051 	return 0;
16052 
16053 err_out_apeunmap:
16054 	if (tp->aperegs) {
16055 		iounmap(tp->aperegs);
16056 		tp->aperegs = NULL;
16057 	}
16058 
16059 err_out_iounmap:
16060 	if (tp->regs) {
16061 		iounmap(tp->regs);
16062 		tp->regs = NULL;
16063 	}
16064 
16065 err_out_free_dev:
16066 	free_netdev(dev);
16067 
16068 err_out_power_down:
16069 	pci_set_power_state(pdev, PCI_D3hot);
16070 
16071 err_out_free_res:
16072 	pci_release_regions(pdev);
16073 
16074 err_out_disable_pdev:
16075 	pci_disable_device(pdev);
16076 	pci_set_drvdata(pdev, NULL);
16077 	return err;
16078 }
16079 
16080 static void __devexit tg3_remove_one(struct pci_dev *pdev)
16081 {
16082 	struct net_device *dev = pci_get_drvdata(pdev);
16083 
16084 	if (dev) {
16085 		struct tg3 *tp = netdev_priv(dev);
16086 
16087 		release_firmware(tp->fw);
16088 
16089 		tg3_reset_task_cancel(tp);
16090 
16091 		if (tg3_flag(tp, USE_PHYLIB)) {
16092 			tg3_phy_fini(tp);
16093 			tg3_mdio_fini(tp);
16094 		}
16095 
16096 		unregister_netdev(dev);
16097 		if (tp->aperegs) {
16098 			iounmap(tp->aperegs);
16099 			tp->aperegs = NULL;
16100 		}
16101 		if (tp->regs) {
16102 			iounmap(tp->regs);
16103 			tp->regs = NULL;
16104 		}
16105 		free_netdev(dev);
16106 		pci_release_regions(pdev);
16107 		pci_disable_device(pdev);
16108 		pci_set_drvdata(pdev, NULL);
16109 	}
16110 }
16111 
16112 #ifdef CONFIG_PM_SLEEP
16113 static int tg3_suspend(struct device *device)
16114 {
16115 	struct pci_dev *pdev = to_pci_dev(device);
16116 	struct net_device *dev = pci_get_drvdata(pdev);
16117 	struct tg3 *tp = netdev_priv(dev);
16118 	int err;
16119 
16120 	if (!netif_running(dev))
16121 		return 0;
16122 
16123 	tg3_reset_task_cancel(tp);
16124 	tg3_phy_stop(tp);
16125 	tg3_netif_stop(tp);
16126 
16127 	tg3_timer_stop(tp);
16128 
16129 	tg3_full_lock(tp, 1);
16130 	tg3_disable_ints(tp);
16131 	tg3_full_unlock(tp);
16132 
16133 	netif_device_detach(dev);
16134 
16135 	tg3_full_lock(tp, 0);
16136 	tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
16137 	tg3_flag_clear(tp, INIT_COMPLETE);
16138 	tg3_full_unlock(tp);
16139 
16140 	err = tg3_power_down_prepare(tp);
16141 	if (err) {
16142 		int err2;
16143 
16144 		tg3_full_lock(tp, 0);
16145 
16146 		tg3_flag_set(tp, INIT_COMPLETE);
16147 		err2 = tg3_restart_hw(tp, 1);
16148 		if (err2)
16149 			goto out;
16150 
16151 		tg3_timer_start(tp);
16152 
16153 		netif_device_attach(dev);
16154 		tg3_netif_start(tp);
16155 
16156 out:
16157 		tg3_full_unlock(tp);
16158 
16159 		if (!err2)
16160 			tg3_phy_start(tp);
16161 	}
16162 
16163 	return err;
16164 }
16165 
16166 static int tg3_resume(struct device *device)
16167 {
16168 	struct pci_dev *pdev = to_pci_dev(device);
16169 	struct net_device *dev = pci_get_drvdata(pdev);
16170 	struct tg3 *tp = netdev_priv(dev);
16171 	int err;
16172 
16173 	if (!netif_running(dev))
16174 		return 0;
16175 
16176 	netif_device_attach(dev);
16177 
16178 	tg3_full_lock(tp, 0);
16179 
16180 	tg3_flag_set(tp, INIT_COMPLETE);
16181 	err = tg3_restart_hw(tp, 1);
16182 	if (err)
16183 		goto out;
16184 
16185 	tg3_timer_start(tp);
16186 
16187 	tg3_netif_start(tp);
16188 
16189 out:
16190 	tg3_full_unlock(tp);
16191 
16192 	if (!err)
16193 		tg3_phy_start(tp);
16194 
16195 	return err;
16196 }
16197 
16198 static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume);
16199 #define TG3_PM_OPS (&tg3_pm_ops)
16200 
16201 #else
16202 
16203 #define TG3_PM_OPS NULL
16204 
16205 #endif /* CONFIG_PM_SLEEP */
16206 
16207 /**
16208  * tg3_io_error_detected - called when PCI error is detected
16209  * @pdev: Pointer to PCI device
16210  * @state: The current pci connection state
16211  *
16212  * This function is called after a PCI bus error affecting
16213  * this device has been detected.
16214  */
16215 static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
16216 					      pci_channel_state_t state)
16217 {
16218 	struct net_device *netdev = pci_get_drvdata(pdev);
16219 	struct tg3 *tp = netdev_priv(netdev);
16220 	pci_ers_result_t err = PCI_ERS_RESULT_NEED_RESET;
16221 
16222 	netdev_info(netdev, "PCI I/O error detected\n");
16223 
16224 	rtnl_lock();
16225 
16226 	if (!netif_running(netdev))
16227 		goto done;
16228 
16229 	tg3_phy_stop(tp);
16230 
16231 	tg3_netif_stop(tp);
16232 
16233 	tg3_timer_stop(tp);
16234 
16235 	/* Want to make sure that the reset task doesn't run */
16236 	tg3_reset_task_cancel(tp);
16237 
16238 	netif_device_detach(netdev);
16239 
16240 	/* Clean up software state, even if MMIO is blocked */
16241 	tg3_full_lock(tp, 0);
16242 	tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
16243 	tg3_full_unlock(tp);
16244 
16245 done:
16246 	if (state == pci_channel_io_perm_failure)
16247 		err = PCI_ERS_RESULT_DISCONNECT;
16248 	else
16249 		pci_disable_device(pdev);
16250 
16251 	rtnl_unlock();
16252 
16253 	return err;
16254 }
16255 
16256 /**
16257  * tg3_io_slot_reset - called after the pci bus has been reset.
16258  * @pdev: Pointer to PCI device
16259  *
16260  * Restart the card from scratch, as if from a cold-boot.
16261  * At this point, the card has exprienced a hard reset,
16262  * followed by fixups by BIOS, and has its config space
16263  * set up identically to what it was at cold boot.
16264  */
16265 static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev)
16266 {
16267 	struct net_device *netdev = pci_get_drvdata(pdev);
16268 	struct tg3 *tp = netdev_priv(netdev);
16269 	pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
16270 	int err;
16271 
16272 	rtnl_lock();
16273 
16274 	if (pci_enable_device(pdev)) {
16275 		netdev_err(netdev, "Cannot re-enable PCI device after reset.\n");
16276 		goto done;
16277 	}
16278 
16279 	pci_set_master(pdev);
16280 	pci_restore_state(pdev);
16281 	pci_save_state(pdev);
16282 
16283 	if (!netif_running(netdev)) {
16284 		rc = PCI_ERS_RESULT_RECOVERED;
16285 		goto done;
16286 	}
16287 
16288 	err = tg3_power_up(tp);
16289 	if (err)
16290 		goto done;
16291 
16292 	rc = PCI_ERS_RESULT_RECOVERED;
16293 
16294 done:
16295 	rtnl_unlock();
16296 
16297 	return rc;
16298 }
16299 
16300 /**
16301  * tg3_io_resume - called when traffic can start flowing again.
16302  * @pdev: Pointer to PCI device
16303  *
16304  * This callback is called when the error recovery driver tells
16305  * us that its OK to resume normal operation.
16306  */
16307 static void tg3_io_resume(struct pci_dev *pdev)
16308 {
16309 	struct net_device *netdev = pci_get_drvdata(pdev);
16310 	struct tg3 *tp = netdev_priv(netdev);
16311 	int err;
16312 
16313 	rtnl_lock();
16314 
16315 	if (!netif_running(netdev))
16316 		goto done;
16317 
16318 	tg3_full_lock(tp, 0);
16319 	tg3_flag_set(tp, INIT_COMPLETE);
16320 	err = tg3_restart_hw(tp, 1);
16321 	tg3_full_unlock(tp);
16322 	if (err) {
16323 		netdev_err(netdev, "Cannot restart hardware after reset.\n");
16324 		goto done;
16325 	}
16326 
16327 	netif_device_attach(netdev);
16328 
16329 	tg3_timer_start(tp);
16330 
16331 	tg3_netif_start(tp);
16332 
16333 	tg3_phy_start(tp);
16334 
16335 done:
16336 	rtnl_unlock();
16337 }
16338 
16339 static struct pci_error_handlers tg3_err_handler = {
16340 	.error_detected	= tg3_io_error_detected,
16341 	.slot_reset	= tg3_io_slot_reset,
16342 	.resume		= tg3_io_resume
16343 };
16344 
16345 static struct pci_driver tg3_driver = {
16346 	.name		= DRV_MODULE_NAME,
16347 	.id_table	= tg3_pci_tbl,
16348 	.probe		= tg3_init_one,
16349 	.remove		= __devexit_p(tg3_remove_one),
16350 	.err_handler	= &tg3_err_handler,
16351 	.driver.pm	= TG3_PM_OPS,
16352 };
16353 
16354 static int __init tg3_init(void)
16355 {
16356 	return pci_register_driver(&tg3_driver);
16357 }
16358 
16359 static void __exit tg3_cleanup(void)
16360 {
16361 	pci_unregister_driver(&tg3_driver);
16362 }
16363 
16364 module_init(tg3_init);
16365 module_exit(tg3_cleanup);
16366