1 /*
2  * tg3.c: Broadcom Tigon3 ethernet driver.
3  *
4  * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5  * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6  * Copyright (C) 2004 Sun Microsystems Inc.
7  * Copyright (C) 2005-2013 Broadcom Corporation.
8  *
9  * Firmware is:
10  *	Derived from proprietary unpublished source code,
11  *	Copyright (C) 2000-2003 Broadcom Corporation.
12  *
13  *	Permission is hereby granted for the distribution of this firmware
14  *	data in hexadecimal or equivalent format, provided this copyright
15  *	notice is accompanying it.
16  */
17 
18 
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/stringify.h>
22 #include <linux/kernel.h>
23 #include <linux/types.h>
24 #include <linux/compiler.h>
25 #include <linux/slab.h>
26 #include <linux/delay.h>
27 #include <linux/in.h>
28 #include <linux/init.h>
29 #include <linux/interrupt.h>
30 #include <linux/ioport.h>
31 #include <linux/pci.h>
32 #include <linux/netdevice.h>
33 #include <linux/etherdevice.h>
34 #include <linux/skbuff.h>
35 #include <linux/ethtool.h>
36 #include <linux/mdio.h>
37 #include <linux/mii.h>
38 #include <linux/phy.h>
39 #include <linux/brcmphy.h>
40 #include <linux/if_vlan.h>
41 #include <linux/ip.h>
42 #include <linux/tcp.h>
43 #include <linux/workqueue.h>
44 #include <linux/prefetch.h>
45 #include <linux/dma-mapping.h>
46 #include <linux/firmware.h>
47 #include <linux/ssb/ssb_driver_gige.h>
48 #include <linux/hwmon.h>
49 #include <linux/hwmon-sysfs.h>
50 
51 #include <net/checksum.h>
52 #include <net/ip.h>
53 
54 #include <linux/io.h>
55 #include <asm/byteorder.h>
56 #include <linux/uaccess.h>
57 
58 #include <uapi/linux/net_tstamp.h>
59 #include <linux/ptp_clock_kernel.h>
60 
61 #ifdef CONFIG_SPARC
62 #include <asm/idprom.h>
63 #include <asm/prom.h>
64 #endif
65 
66 #define BAR_0	0
67 #define BAR_2	2
68 
69 #include "tg3.h"
70 
71 /* Functions & macros to verify TG3_FLAGS types */
72 
73 static inline int _tg3_flag(enum TG3_FLAGS flag, unsigned long *bits)
74 {
75 	return test_bit(flag, bits);
76 }
77 
78 static inline void _tg3_flag_set(enum TG3_FLAGS flag, unsigned long *bits)
79 {
80 	set_bit(flag, bits);
81 }
82 
83 static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
84 {
85 	clear_bit(flag, bits);
86 }
87 
88 #define tg3_flag(tp, flag)				\
89 	_tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags)
90 #define tg3_flag_set(tp, flag)				\
91 	_tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags)
92 #define tg3_flag_clear(tp, flag)			\
93 	_tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags)
94 
95 #define DRV_MODULE_NAME		"tg3"
96 #define TG3_MAJ_NUM			3
97 #define TG3_MIN_NUM			130
98 #define DRV_MODULE_VERSION	\
99 	__stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
100 #define DRV_MODULE_RELDATE	"February 14, 2013"
101 
102 #define RESET_KIND_SHUTDOWN	0
103 #define RESET_KIND_INIT		1
104 #define RESET_KIND_SUSPEND	2
105 
106 #define TG3_DEF_RX_MODE		0
107 #define TG3_DEF_TX_MODE		0
108 #define TG3_DEF_MSG_ENABLE	  \
109 	(NETIF_MSG_DRV		| \
110 	 NETIF_MSG_PROBE	| \
111 	 NETIF_MSG_LINK		| \
112 	 NETIF_MSG_TIMER	| \
113 	 NETIF_MSG_IFDOWN	| \
114 	 NETIF_MSG_IFUP		| \
115 	 NETIF_MSG_RX_ERR	| \
116 	 NETIF_MSG_TX_ERR)
117 
118 #define TG3_GRC_LCLCTL_PWRSW_DELAY	100
119 
120 /* length of time before we decide the hardware is borked,
121  * and dev->tx_timeout() should be called to fix the problem
122  */
123 
124 #define TG3_TX_TIMEOUT			(5 * HZ)
125 
126 /* hardware minimum and maximum for a single frame's data payload */
127 #define TG3_MIN_MTU			60
128 #define TG3_MAX_MTU(tp)	\
129 	(tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500)
130 
131 /* These numbers seem to be hard coded in the NIC firmware somehow.
132  * You can't change the ring sizes, but you can change where you place
133  * them in the NIC onboard memory.
134  */
135 #define TG3_RX_STD_RING_SIZE(tp) \
136 	(tg3_flag(tp, LRG_PROD_RING_CAP) ? \
137 	 TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700)
138 #define TG3_DEF_RX_RING_PENDING		200
139 #define TG3_RX_JMB_RING_SIZE(tp) \
140 	(tg3_flag(tp, LRG_PROD_RING_CAP) ? \
141 	 TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
142 #define TG3_DEF_RX_JUMBO_RING_PENDING	100
143 
144 /* Do not place this n-ring entries value into the tp struct itself,
145  * we really want to expose these constants to GCC so that modulo et
146  * al.  operations are done with shifts and masks instead of with
147  * hw multiply/modulo instructions.  Another solution would be to
148  * replace things like '% foo' with '& (foo - 1)'.
149  */
150 
151 #define TG3_TX_RING_SIZE		512
152 #define TG3_DEF_TX_RING_PENDING		(TG3_TX_RING_SIZE - 1)
153 
154 #define TG3_RX_STD_RING_BYTES(tp) \
155 	(sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
156 #define TG3_RX_JMB_RING_BYTES(tp) \
157 	(sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
158 #define TG3_RX_RCB_RING_BYTES(tp) \
159 	(sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1))
160 #define TG3_TX_RING_BYTES	(sizeof(struct tg3_tx_buffer_desc) * \
161 				 TG3_TX_RING_SIZE)
162 #define NEXT_TX(N)		(((N) + 1) & (TG3_TX_RING_SIZE - 1))
163 
164 #define TG3_DMA_BYTE_ENAB		64
165 
166 #define TG3_RX_STD_DMA_SZ		1536
167 #define TG3_RX_JMB_DMA_SZ		9046
168 
169 #define TG3_RX_DMA_TO_MAP_SZ(x)		((x) + TG3_DMA_BYTE_ENAB)
170 
171 #define TG3_RX_STD_MAP_SZ		TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
172 #define TG3_RX_JMB_MAP_SZ		TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
173 
174 #define TG3_RX_STD_BUFF_RING_SIZE(tp) \
175 	(sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
176 
177 #define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
178 	(sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
179 
180 /* Due to a hardware bug, the 5701 can only DMA to memory addresses
181  * that are at least dword aligned when used in PCIX mode.  The driver
182  * works around this bug by double copying the packet.  This workaround
183  * is built into the normal double copy length check for efficiency.
184  *
185  * However, the double copy is only necessary on those architectures
186  * where unaligned memory accesses are inefficient.  For those architectures
187  * where unaligned memory accesses incur little penalty, we can reintegrate
188  * the 5701 in the normal rx path.  Doing so saves a device structure
189  * dereference by hardcoding the double copy threshold in place.
190  */
191 #define TG3_RX_COPY_THRESHOLD		256
192 #if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
193 	#define TG3_RX_COPY_THRESH(tp)	TG3_RX_COPY_THRESHOLD
194 #else
195 	#define TG3_RX_COPY_THRESH(tp)	((tp)->rx_copy_thresh)
196 #endif
197 
198 #if (NET_IP_ALIGN != 0)
199 #define TG3_RX_OFFSET(tp)	((tp)->rx_offset)
200 #else
201 #define TG3_RX_OFFSET(tp)	(NET_SKB_PAD)
202 #endif
203 
204 /* minimum number of free TX descriptors required to wake up TX process */
205 #define TG3_TX_WAKEUP_THRESH(tnapi)		((tnapi)->tx_pending / 4)
206 #define TG3_TX_BD_DMA_MAX_2K		2048
207 #define TG3_TX_BD_DMA_MAX_4K		4096
208 
209 #define TG3_RAW_IP_ALIGN 2
210 
211 #define TG3_FW_UPDATE_TIMEOUT_SEC	5
212 #define TG3_FW_UPDATE_FREQ_SEC		(TG3_FW_UPDATE_TIMEOUT_SEC / 2)
213 
214 #define FIRMWARE_TG3		"tigon/tg3.bin"
215 #define FIRMWARE_TG3TSO		"tigon/tg3_tso.bin"
216 #define FIRMWARE_TG3TSO5	"tigon/tg3_tso5.bin"
217 
218 static char version[] =
219 	DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")";
220 
221 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
222 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
223 MODULE_LICENSE("GPL");
224 MODULE_VERSION(DRV_MODULE_VERSION);
225 MODULE_FIRMWARE(FIRMWARE_TG3);
226 MODULE_FIRMWARE(FIRMWARE_TG3TSO);
227 MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
228 
229 static int tg3_debug = -1;	/* -1 == use TG3_DEF_MSG_ENABLE as value */
230 module_param(tg3_debug, int, 0);
231 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
232 
233 #define TG3_DRV_DATA_FLAG_10_100_ONLY	0x0001
234 #define TG3_DRV_DATA_FLAG_5705_10_100	0x0002
235 
236 static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl) = {
237 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
238 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
239 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
240 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
241 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
242 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
243 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
244 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
245 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
246 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
247 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
248 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
249 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
250 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
251 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
252 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
253 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
254 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
255 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901),
256 	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
257 			TG3_DRV_DATA_FLAG_5705_10_100},
258 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2),
259 	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
260 			TG3_DRV_DATA_FLAG_5705_10_100},
261 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
262 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F),
263 	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
264 			TG3_DRV_DATA_FLAG_5705_10_100},
265 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
266 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
267 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750)},
268 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
269 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
270 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F),
271 	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
272 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
273 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
274 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
275 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
276 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F),
277 	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
278 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
279 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
280 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
281 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
282 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
283 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
284 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
285 	{PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5787M,
286 			PCI_VENDOR_ID_LENOVO,
287 			TG3PCI_SUBDEVICE_ID_LENOVO_5787M),
288 	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
289 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
290 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F),
291 	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
292 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
293 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
294 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
295 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
296 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
297 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
298 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
299 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
300 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
301 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
302 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
303 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
304 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
305 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
306 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
307 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
308 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)},
309 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)},
310 	{PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
311 			PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_A),
312 	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
313 	{PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
314 			PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_B),
315 	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
316 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
317 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
318 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790),
319 	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
320 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
321 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
322 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717_C)},
323 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
324 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)},
325 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)},
326 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)},
327 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)},
328 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791),
329 	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
330 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795),
331 	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
332 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)},
333 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)},
334 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57762)},
335 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57766)},
336 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5762)},
337 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5725)},
338 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5727)},
339 	{PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
340 	{PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
341 	{PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
342 	{PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
343 	{PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
344 	{PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
345 	{PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
346 	{PCI_DEVICE(0x10cf, 0x11a2)}, /* Fujitsu 1000base-SX with BCM5703SKHB */
347 	{}
348 };
349 
350 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
351 
352 static const struct {
353 	const char string[ETH_GSTRING_LEN];
354 } ethtool_stats_keys[] = {
355 	{ "rx_octets" },
356 	{ "rx_fragments" },
357 	{ "rx_ucast_packets" },
358 	{ "rx_mcast_packets" },
359 	{ "rx_bcast_packets" },
360 	{ "rx_fcs_errors" },
361 	{ "rx_align_errors" },
362 	{ "rx_xon_pause_rcvd" },
363 	{ "rx_xoff_pause_rcvd" },
364 	{ "rx_mac_ctrl_rcvd" },
365 	{ "rx_xoff_entered" },
366 	{ "rx_frame_too_long_errors" },
367 	{ "rx_jabbers" },
368 	{ "rx_undersize_packets" },
369 	{ "rx_in_length_errors" },
370 	{ "rx_out_length_errors" },
371 	{ "rx_64_or_less_octet_packets" },
372 	{ "rx_65_to_127_octet_packets" },
373 	{ "rx_128_to_255_octet_packets" },
374 	{ "rx_256_to_511_octet_packets" },
375 	{ "rx_512_to_1023_octet_packets" },
376 	{ "rx_1024_to_1522_octet_packets" },
377 	{ "rx_1523_to_2047_octet_packets" },
378 	{ "rx_2048_to_4095_octet_packets" },
379 	{ "rx_4096_to_8191_octet_packets" },
380 	{ "rx_8192_to_9022_octet_packets" },
381 
382 	{ "tx_octets" },
383 	{ "tx_collisions" },
384 
385 	{ "tx_xon_sent" },
386 	{ "tx_xoff_sent" },
387 	{ "tx_flow_control" },
388 	{ "tx_mac_errors" },
389 	{ "tx_single_collisions" },
390 	{ "tx_mult_collisions" },
391 	{ "tx_deferred" },
392 	{ "tx_excessive_collisions" },
393 	{ "tx_late_collisions" },
394 	{ "tx_collide_2times" },
395 	{ "tx_collide_3times" },
396 	{ "tx_collide_4times" },
397 	{ "tx_collide_5times" },
398 	{ "tx_collide_6times" },
399 	{ "tx_collide_7times" },
400 	{ "tx_collide_8times" },
401 	{ "tx_collide_9times" },
402 	{ "tx_collide_10times" },
403 	{ "tx_collide_11times" },
404 	{ "tx_collide_12times" },
405 	{ "tx_collide_13times" },
406 	{ "tx_collide_14times" },
407 	{ "tx_collide_15times" },
408 	{ "tx_ucast_packets" },
409 	{ "tx_mcast_packets" },
410 	{ "tx_bcast_packets" },
411 	{ "tx_carrier_sense_errors" },
412 	{ "tx_discards" },
413 	{ "tx_errors" },
414 
415 	{ "dma_writeq_full" },
416 	{ "dma_write_prioq_full" },
417 	{ "rxbds_empty" },
418 	{ "rx_discards" },
419 	{ "rx_errors" },
420 	{ "rx_threshold_hit" },
421 
422 	{ "dma_readq_full" },
423 	{ "dma_read_prioq_full" },
424 	{ "tx_comp_queue_full" },
425 
426 	{ "ring_set_send_prod_index" },
427 	{ "ring_status_update" },
428 	{ "nic_irqs" },
429 	{ "nic_avoided_irqs" },
430 	{ "nic_tx_threshold_hit" },
431 
432 	{ "mbuf_lwm_thresh_hit" },
433 };
434 
435 #define TG3_NUM_STATS	ARRAY_SIZE(ethtool_stats_keys)
436 #define TG3_NVRAM_TEST		0
437 #define TG3_LINK_TEST		1
438 #define TG3_REGISTER_TEST	2
439 #define TG3_MEMORY_TEST		3
440 #define TG3_MAC_LOOPB_TEST	4
441 #define TG3_PHY_LOOPB_TEST	5
442 #define TG3_EXT_LOOPB_TEST	6
443 #define TG3_INTERRUPT_TEST	7
444 
445 
446 static const struct {
447 	const char string[ETH_GSTRING_LEN];
448 } ethtool_test_keys[] = {
449 	[TG3_NVRAM_TEST]	= { "nvram test        (online) " },
450 	[TG3_LINK_TEST]		= { "link test         (online) " },
451 	[TG3_REGISTER_TEST]	= { "register test     (offline)" },
452 	[TG3_MEMORY_TEST]	= { "memory test       (offline)" },
453 	[TG3_MAC_LOOPB_TEST]	= { "mac loopback test (offline)" },
454 	[TG3_PHY_LOOPB_TEST]	= { "phy loopback test (offline)" },
455 	[TG3_EXT_LOOPB_TEST]	= { "ext loopback test (offline)" },
456 	[TG3_INTERRUPT_TEST]	= { "interrupt test    (offline)" },
457 };
458 
459 #define TG3_NUM_TEST	ARRAY_SIZE(ethtool_test_keys)
460 
461 
462 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
463 {
464 	writel(val, tp->regs + off);
465 }
466 
467 static u32 tg3_read32(struct tg3 *tp, u32 off)
468 {
469 	return readl(tp->regs + off);
470 }
471 
472 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
473 {
474 	writel(val, tp->aperegs + off);
475 }
476 
477 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
478 {
479 	return readl(tp->aperegs + off);
480 }
481 
482 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
483 {
484 	unsigned long flags;
485 
486 	spin_lock_irqsave(&tp->indirect_lock, flags);
487 	pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
488 	pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
489 	spin_unlock_irqrestore(&tp->indirect_lock, flags);
490 }
491 
492 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
493 {
494 	writel(val, tp->regs + off);
495 	readl(tp->regs + off);
496 }
497 
498 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
499 {
500 	unsigned long flags;
501 	u32 val;
502 
503 	spin_lock_irqsave(&tp->indirect_lock, flags);
504 	pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
505 	pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
506 	spin_unlock_irqrestore(&tp->indirect_lock, flags);
507 	return val;
508 }
509 
510 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
511 {
512 	unsigned long flags;
513 
514 	if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
515 		pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
516 				       TG3_64BIT_REG_LOW, val);
517 		return;
518 	}
519 	if (off == TG3_RX_STD_PROD_IDX_REG) {
520 		pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
521 				       TG3_64BIT_REG_LOW, val);
522 		return;
523 	}
524 
525 	spin_lock_irqsave(&tp->indirect_lock, flags);
526 	pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
527 	pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
528 	spin_unlock_irqrestore(&tp->indirect_lock, flags);
529 
530 	/* In indirect mode when disabling interrupts, we also need
531 	 * to clear the interrupt bit in the GRC local ctrl register.
532 	 */
533 	if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
534 	    (val == 0x1)) {
535 		pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
536 				       tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
537 	}
538 }
539 
540 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
541 {
542 	unsigned long flags;
543 	u32 val;
544 
545 	spin_lock_irqsave(&tp->indirect_lock, flags);
546 	pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
547 	pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
548 	spin_unlock_irqrestore(&tp->indirect_lock, flags);
549 	return val;
550 }
551 
552 /* usec_wait specifies the wait time in usec when writing to certain registers
553  * where it is unsafe to read back the register without some delay.
554  * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
555  * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
556  */
557 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
558 {
559 	if (tg3_flag(tp, PCIX_TARGET_HWBUG) || tg3_flag(tp, ICH_WORKAROUND))
560 		/* Non-posted methods */
561 		tp->write32(tp, off, val);
562 	else {
563 		/* Posted method */
564 		tg3_write32(tp, off, val);
565 		if (usec_wait)
566 			udelay(usec_wait);
567 		tp->read32(tp, off);
568 	}
569 	/* Wait again after the read for the posted method to guarantee that
570 	 * the wait time is met.
571 	 */
572 	if (usec_wait)
573 		udelay(usec_wait);
574 }
575 
576 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
577 {
578 	tp->write32_mbox(tp, off, val);
579 	if (tg3_flag(tp, FLUSH_POSTED_WRITES) ||
580 	    (!tg3_flag(tp, MBOX_WRITE_REORDER) &&
581 	     !tg3_flag(tp, ICH_WORKAROUND)))
582 		tp->read32_mbox(tp, off);
583 }
584 
585 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
586 {
587 	void __iomem *mbox = tp->regs + off;
588 	writel(val, mbox);
589 	if (tg3_flag(tp, TXD_MBOX_HWBUG))
590 		writel(val, mbox);
591 	if (tg3_flag(tp, MBOX_WRITE_REORDER) ||
592 	    tg3_flag(tp, FLUSH_POSTED_WRITES))
593 		readl(mbox);
594 }
595 
596 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
597 {
598 	return readl(tp->regs + off + GRCMBOX_BASE);
599 }
600 
601 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
602 {
603 	writel(val, tp->regs + off + GRCMBOX_BASE);
604 }
605 
606 #define tw32_mailbox(reg, val)		tp->write32_mbox(tp, reg, val)
607 #define tw32_mailbox_f(reg, val)	tw32_mailbox_flush(tp, (reg), (val))
608 #define tw32_rx_mbox(reg, val)		tp->write32_rx_mbox(tp, reg, val)
609 #define tw32_tx_mbox(reg, val)		tp->write32_tx_mbox(tp, reg, val)
610 #define tr32_mailbox(reg)		tp->read32_mbox(tp, reg)
611 
612 #define tw32(reg, val)			tp->write32(tp, reg, val)
613 #define tw32_f(reg, val)		_tw32_flush(tp, (reg), (val), 0)
614 #define tw32_wait_f(reg, val, us)	_tw32_flush(tp, (reg), (val), (us))
615 #define tr32(reg)			tp->read32(tp, reg)
616 
617 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
618 {
619 	unsigned long flags;
620 
621 	if (tg3_asic_rev(tp) == ASIC_REV_5906 &&
622 	    (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
623 		return;
624 
625 	spin_lock_irqsave(&tp->indirect_lock, flags);
626 	if (tg3_flag(tp, SRAM_USE_CONFIG)) {
627 		pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
628 		pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
629 
630 		/* Always leave this as zero. */
631 		pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
632 	} else {
633 		tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
634 		tw32_f(TG3PCI_MEM_WIN_DATA, val);
635 
636 		/* Always leave this as zero. */
637 		tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
638 	}
639 	spin_unlock_irqrestore(&tp->indirect_lock, flags);
640 }
641 
642 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
643 {
644 	unsigned long flags;
645 
646 	if (tg3_asic_rev(tp) == ASIC_REV_5906 &&
647 	    (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
648 		*val = 0;
649 		return;
650 	}
651 
652 	spin_lock_irqsave(&tp->indirect_lock, flags);
653 	if (tg3_flag(tp, SRAM_USE_CONFIG)) {
654 		pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
655 		pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
656 
657 		/* Always leave this as zero. */
658 		pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
659 	} else {
660 		tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
661 		*val = tr32(TG3PCI_MEM_WIN_DATA);
662 
663 		/* Always leave this as zero. */
664 		tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
665 	}
666 	spin_unlock_irqrestore(&tp->indirect_lock, flags);
667 }
668 
669 static void tg3_ape_lock_init(struct tg3 *tp)
670 {
671 	int i;
672 	u32 regbase, bit;
673 
674 	if (tg3_asic_rev(tp) == ASIC_REV_5761)
675 		regbase = TG3_APE_LOCK_GRANT;
676 	else
677 		regbase = TG3_APE_PER_LOCK_GRANT;
678 
679 	/* Make sure the driver hasn't any stale locks. */
680 	for (i = TG3_APE_LOCK_PHY0; i <= TG3_APE_LOCK_GPIO; i++) {
681 		switch (i) {
682 		case TG3_APE_LOCK_PHY0:
683 		case TG3_APE_LOCK_PHY1:
684 		case TG3_APE_LOCK_PHY2:
685 		case TG3_APE_LOCK_PHY3:
686 			bit = APE_LOCK_GRANT_DRIVER;
687 			break;
688 		default:
689 			if (!tp->pci_fn)
690 				bit = APE_LOCK_GRANT_DRIVER;
691 			else
692 				bit = 1 << tp->pci_fn;
693 		}
694 		tg3_ape_write32(tp, regbase + 4 * i, bit);
695 	}
696 
697 }
698 
699 static int tg3_ape_lock(struct tg3 *tp, int locknum)
700 {
701 	int i, off;
702 	int ret = 0;
703 	u32 status, req, gnt, bit;
704 
705 	if (!tg3_flag(tp, ENABLE_APE))
706 		return 0;
707 
708 	switch (locknum) {
709 	case TG3_APE_LOCK_GPIO:
710 		if (tg3_asic_rev(tp) == ASIC_REV_5761)
711 			return 0;
712 	case TG3_APE_LOCK_GRC:
713 	case TG3_APE_LOCK_MEM:
714 		if (!tp->pci_fn)
715 			bit = APE_LOCK_REQ_DRIVER;
716 		else
717 			bit = 1 << tp->pci_fn;
718 		break;
719 	case TG3_APE_LOCK_PHY0:
720 	case TG3_APE_LOCK_PHY1:
721 	case TG3_APE_LOCK_PHY2:
722 	case TG3_APE_LOCK_PHY3:
723 		bit = APE_LOCK_REQ_DRIVER;
724 		break;
725 	default:
726 		return -EINVAL;
727 	}
728 
729 	if (tg3_asic_rev(tp) == ASIC_REV_5761) {
730 		req = TG3_APE_LOCK_REQ;
731 		gnt = TG3_APE_LOCK_GRANT;
732 	} else {
733 		req = TG3_APE_PER_LOCK_REQ;
734 		gnt = TG3_APE_PER_LOCK_GRANT;
735 	}
736 
737 	off = 4 * locknum;
738 
739 	tg3_ape_write32(tp, req + off, bit);
740 
741 	/* Wait for up to 1 millisecond to acquire lock. */
742 	for (i = 0; i < 100; i++) {
743 		status = tg3_ape_read32(tp, gnt + off);
744 		if (status == bit)
745 			break;
746 		udelay(10);
747 	}
748 
749 	if (status != bit) {
750 		/* Revoke the lock request. */
751 		tg3_ape_write32(tp, gnt + off, bit);
752 		ret = -EBUSY;
753 	}
754 
755 	return ret;
756 }
757 
758 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
759 {
760 	u32 gnt, bit;
761 
762 	if (!tg3_flag(tp, ENABLE_APE))
763 		return;
764 
765 	switch (locknum) {
766 	case TG3_APE_LOCK_GPIO:
767 		if (tg3_asic_rev(tp) == ASIC_REV_5761)
768 			return;
769 	case TG3_APE_LOCK_GRC:
770 	case TG3_APE_LOCK_MEM:
771 		if (!tp->pci_fn)
772 			bit = APE_LOCK_GRANT_DRIVER;
773 		else
774 			bit = 1 << tp->pci_fn;
775 		break;
776 	case TG3_APE_LOCK_PHY0:
777 	case TG3_APE_LOCK_PHY1:
778 	case TG3_APE_LOCK_PHY2:
779 	case TG3_APE_LOCK_PHY3:
780 		bit = APE_LOCK_GRANT_DRIVER;
781 		break;
782 	default:
783 		return;
784 	}
785 
786 	if (tg3_asic_rev(tp) == ASIC_REV_5761)
787 		gnt = TG3_APE_LOCK_GRANT;
788 	else
789 		gnt = TG3_APE_PER_LOCK_GRANT;
790 
791 	tg3_ape_write32(tp, gnt + 4 * locknum, bit);
792 }
793 
794 static int tg3_ape_event_lock(struct tg3 *tp, u32 timeout_us)
795 {
796 	u32 apedata;
797 
798 	while (timeout_us) {
799 		if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
800 			return -EBUSY;
801 
802 		apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
803 		if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
804 			break;
805 
806 		tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
807 
808 		udelay(10);
809 		timeout_us -= (timeout_us > 10) ? 10 : timeout_us;
810 	}
811 
812 	return timeout_us ? 0 : -EBUSY;
813 }
814 
815 static int tg3_ape_wait_for_event(struct tg3 *tp, u32 timeout_us)
816 {
817 	u32 i, apedata;
818 
819 	for (i = 0; i < timeout_us / 10; i++) {
820 		apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
821 
822 		if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
823 			break;
824 
825 		udelay(10);
826 	}
827 
828 	return i == timeout_us / 10;
829 }
830 
831 static int tg3_ape_scratchpad_read(struct tg3 *tp, u32 *data, u32 base_off,
832 				   u32 len)
833 {
834 	int err;
835 	u32 i, bufoff, msgoff, maxlen, apedata;
836 
837 	if (!tg3_flag(tp, APE_HAS_NCSI))
838 		return 0;
839 
840 	apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
841 	if (apedata != APE_SEG_SIG_MAGIC)
842 		return -ENODEV;
843 
844 	apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
845 	if (!(apedata & APE_FW_STATUS_READY))
846 		return -EAGAIN;
847 
848 	bufoff = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_OFF) +
849 		 TG3_APE_SHMEM_BASE;
850 	msgoff = bufoff + 2 * sizeof(u32);
851 	maxlen = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_LEN);
852 
853 	while (len) {
854 		u32 length;
855 
856 		/* Cap xfer sizes to scratchpad limits. */
857 		length = (len > maxlen) ? maxlen : len;
858 		len -= length;
859 
860 		apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
861 		if (!(apedata & APE_FW_STATUS_READY))
862 			return -EAGAIN;
863 
864 		/* Wait for up to 1 msec for APE to service previous event. */
865 		err = tg3_ape_event_lock(tp, 1000);
866 		if (err)
867 			return err;
868 
869 		apedata = APE_EVENT_STATUS_DRIVER_EVNT |
870 			  APE_EVENT_STATUS_SCRTCHPD_READ |
871 			  APE_EVENT_STATUS_EVENT_PENDING;
872 		tg3_ape_write32(tp, TG3_APE_EVENT_STATUS, apedata);
873 
874 		tg3_ape_write32(tp, bufoff, base_off);
875 		tg3_ape_write32(tp, bufoff + sizeof(u32), length);
876 
877 		tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
878 		tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
879 
880 		base_off += length;
881 
882 		if (tg3_ape_wait_for_event(tp, 30000))
883 			return -EAGAIN;
884 
885 		for (i = 0; length; i += 4, length -= 4) {
886 			u32 val = tg3_ape_read32(tp, msgoff + i);
887 			memcpy(data, &val, sizeof(u32));
888 			data++;
889 		}
890 	}
891 
892 	return 0;
893 }
894 
895 static int tg3_ape_send_event(struct tg3 *tp, u32 event)
896 {
897 	int err;
898 	u32 apedata;
899 
900 	apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
901 	if (apedata != APE_SEG_SIG_MAGIC)
902 		return -EAGAIN;
903 
904 	apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
905 	if (!(apedata & APE_FW_STATUS_READY))
906 		return -EAGAIN;
907 
908 	/* Wait for up to 1 millisecond for APE to service previous event. */
909 	err = tg3_ape_event_lock(tp, 1000);
910 	if (err)
911 		return err;
912 
913 	tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
914 			event | APE_EVENT_STATUS_EVENT_PENDING);
915 
916 	tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
917 	tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
918 
919 	return 0;
920 }
921 
922 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
923 {
924 	u32 event;
925 	u32 apedata;
926 
927 	if (!tg3_flag(tp, ENABLE_APE))
928 		return;
929 
930 	switch (kind) {
931 	case RESET_KIND_INIT:
932 		tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
933 				APE_HOST_SEG_SIG_MAGIC);
934 		tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
935 				APE_HOST_SEG_LEN_MAGIC);
936 		apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
937 		tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
938 		tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
939 			APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, TG3_MIN_NUM));
940 		tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
941 				APE_HOST_BEHAV_NO_PHYLOCK);
942 		tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE,
943 				    TG3_APE_HOST_DRVR_STATE_START);
944 
945 		event = APE_EVENT_STATUS_STATE_START;
946 		break;
947 	case RESET_KIND_SHUTDOWN:
948 		/* With the interface we are currently using,
949 		 * APE does not track driver state.  Wiping
950 		 * out the HOST SEGMENT SIGNATURE forces
951 		 * the APE to assume OS absent status.
952 		 */
953 		tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0);
954 
955 		if (device_may_wakeup(&tp->pdev->dev) &&
956 		    tg3_flag(tp, WOL_ENABLE)) {
957 			tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED,
958 					    TG3_APE_HOST_WOL_SPEED_AUTO);
959 			apedata = TG3_APE_HOST_DRVR_STATE_WOL;
960 		} else
961 			apedata = TG3_APE_HOST_DRVR_STATE_UNLOAD;
962 
963 		tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, apedata);
964 
965 		event = APE_EVENT_STATUS_STATE_UNLOAD;
966 		break;
967 	case RESET_KIND_SUSPEND:
968 		event = APE_EVENT_STATUS_STATE_SUSPEND;
969 		break;
970 	default:
971 		return;
972 	}
973 
974 	event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
975 
976 	tg3_ape_send_event(tp, event);
977 }
978 
979 static void tg3_disable_ints(struct tg3 *tp)
980 {
981 	int i;
982 
983 	tw32(TG3PCI_MISC_HOST_CTRL,
984 	     (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
985 	for (i = 0; i < tp->irq_max; i++)
986 		tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001);
987 }
988 
989 static void tg3_enable_ints(struct tg3 *tp)
990 {
991 	int i;
992 
993 	tp->irq_sync = 0;
994 	wmb();
995 
996 	tw32(TG3PCI_MISC_HOST_CTRL,
997 	     (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
998 
999 	tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE;
1000 	for (i = 0; i < tp->irq_cnt; i++) {
1001 		struct tg3_napi *tnapi = &tp->napi[i];
1002 
1003 		tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
1004 		if (tg3_flag(tp, 1SHOT_MSI))
1005 			tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
1006 
1007 		tp->coal_now |= tnapi->coal_now;
1008 	}
1009 
1010 	/* Force an initial interrupt */
1011 	if (!tg3_flag(tp, TAGGED_STATUS) &&
1012 	    (tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
1013 		tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
1014 	else
1015 		tw32(HOSTCC_MODE, tp->coal_now);
1016 
1017 	tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now);
1018 }
1019 
1020 static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
1021 {
1022 	struct tg3 *tp = tnapi->tp;
1023 	struct tg3_hw_status *sblk = tnapi->hw_status;
1024 	unsigned int work_exists = 0;
1025 
1026 	/* check for phy events */
1027 	if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
1028 		if (sblk->status & SD_STATUS_LINK_CHG)
1029 			work_exists = 1;
1030 	}
1031 
1032 	/* check for TX work to do */
1033 	if (sblk->idx[0].tx_consumer != tnapi->tx_cons)
1034 		work_exists = 1;
1035 
1036 	/* check for RX work to do */
1037 	if (tnapi->rx_rcb_prod_idx &&
1038 	    *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
1039 		work_exists = 1;
1040 
1041 	return work_exists;
1042 }
1043 
1044 /* tg3_int_reenable
1045  *  similar to tg3_enable_ints, but it accurately determines whether there
1046  *  is new work pending and can return without flushing the PIO write
1047  *  which reenables interrupts
1048  */
1049 static void tg3_int_reenable(struct tg3_napi *tnapi)
1050 {
1051 	struct tg3 *tp = tnapi->tp;
1052 
1053 	tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
1054 	mmiowb();
1055 
1056 	/* When doing tagged status, this work check is unnecessary.
1057 	 * The last_tag we write above tells the chip which piece of
1058 	 * work we've completed.
1059 	 */
1060 	if (!tg3_flag(tp, TAGGED_STATUS) && tg3_has_work(tnapi))
1061 		tw32(HOSTCC_MODE, tp->coalesce_mode |
1062 		     HOSTCC_MODE_ENABLE | tnapi->coal_now);
1063 }
1064 
1065 static void tg3_switch_clocks(struct tg3 *tp)
1066 {
1067 	u32 clock_ctrl;
1068 	u32 orig_clock_ctrl;
1069 
1070 	if (tg3_flag(tp, CPMU_PRESENT) || tg3_flag(tp, 5780_CLASS))
1071 		return;
1072 
1073 	clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
1074 
1075 	orig_clock_ctrl = clock_ctrl;
1076 	clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
1077 		       CLOCK_CTRL_CLKRUN_OENABLE |
1078 		       0x1f);
1079 	tp->pci_clock_ctrl = clock_ctrl;
1080 
1081 	if (tg3_flag(tp, 5705_PLUS)) {
1082 		if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
1083 			tw32_wait_f(TG3PCI_CLOCK_CTRL,
1084 				    clock_ctrl | CLOCK_CTRL_625_CORE, 40);
1085 		}
1086 	} else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
1087 		tw32_wait_f(TG3PCI_CLOCK_CTRL,
1088 			    clock_ctrl |
1089 			    (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
1090 			    40);
1091 		tw32_wait_f(TG3PCI_CLOCK_CTRL,
1092 			    clock_ctrl | (CLOCK_CTRL_ALTCLK),
1093 			    40);
1094 	}
1095 	tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
1096 }
1097 
1098 #define PHY_BUSY_LOOPS	5000
1099 
1100 static int __tg3_readphy(struct tg3 *tp, unsigned int phy_addr, int reg,
1101 			 u32 *val)
1102 {
1103 	u32 frame_val;
1104 	unsigned int loops;
1105 	int ret;
1106 
1107 	if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1108 		tw32_f(MAC_MI_MODE,
1109 		     (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1110 		udelay(80);
1111 	}
1112 
1113 	tg3_ape_lock(tp, tp->phy_ape_lock);
1114 
1115 	*val = 0x0;
1116 
1117 	frame_val  = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1118 		      MI_COM_PHY_ADDR_MASK);
1119 	frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1120 		      MI_COM_REG_ADDR_MASK);
1121 	frame_val |= (MI_COM_CMD_READ | MI_COM_START);
1122 
1123 	tw32_f(MAC_MI_COM, frame_val);
1124 
1125 	loops = PHY_BUSY_LOOPS;
1126 	while (loops != 0) {
1127 		udelay(10);
1128 		frame_val = tr32(MAC_MI_COM);
1129 
1130 		if ((frame_val & MI_COM_BUSY) == 0) {
1131 			udelay(5);
1132 			frame_val = tr32(MAC_MI_COM);
1133 			break;
1134 		}
1135 		loops -= 1;
1136 	}
1137 
1138 	ret = -EBUSY;
1139 	if (loops != 0) {
1140 		*val = frame_val & MI_COM_DATA_MASK;
1141 		ret = 0;
1142 	}
1143 
1144 	if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1145 		tw32_f(MAC_MI_MODE, tp->mi_mode);
1146 		udelay(80);
1147 	}
1148 
1149 	tg3_ape_unlock(tp, tp->phy_ape_lock);
1150 
1151 	return ret;
1152 }
1153 
1154 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
1155 {
1156 	return __tg3_readphy(tp, tp->phy_addr, reg, val);
1157 }
1158 
1159 static int __tg3_writephy(struct tg3 *tp, unsigned int phy_addr, int reg,
1160 			  u32 val)
1161 {
1162 	u32 frame_val;
1163 	unsigned int loops;
1164 	int ret;
1165 
1166 	if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
1167 	    (reg == MII_CTRL1000 || reg == MII_TG3_AUX_CTRL))
1168 		return 0;
1169 
1170 	if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1171 		tw32_f(MAC_MI_MODE,
1172 		     (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1173 		udelay(80);
1174 	}
1175 
1176 	tg3_ape_lock(tp, tp->phy_ape_lock);
1177 
1178 	frame_val  = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1179 		      MI_COM_PHY_ADDR_MASK);
1180 	frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1181 		      MI_COM_REG_ADDR_MASK);
1182 	frame_val |= (val & MI_COM_DATA_MASK);
1183 	frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
1184 
1185 	tw32_f(MAC_MI_COM, frame_val);
1186 
1187 	loops = PHY_BUSY_LOOPS;
1188 	while (loops != 0) {
1189 		udelay(10);
1190 		frame_val = tr32(MAC_MI_COM);
1191 		if ((frame_val & MI_COM_BUSY) == 0) {
1192 			udelay(5);
1193 			frame_val = tr32(MAC_MI_COM);
1194 			break;
1195 		}
1196 		loops -= 1;
1197 	}
1198 
1199 	ret = -EBUSY;
1200 	if (loops != 0)
1201 		ret = 0;
1202 
1203 	if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1204 		tw32_f(MAC_MI_MODE, tp->mi_mode);
1205 		udelay(80);
1206 	}
1207 
1208 	tg3_ape_unlock(tp, tp->phy_ape_lock);
1209 
1210 	return ret;
1211 }
1212 
1213 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
1214 {
1215 	return __tg3_writephy(tp, tp->phy_addr, reg, val);
1216 }
1217 
1218 static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val)
1219 {
1220 	int err;
1221 
1222 	err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1223 	if (err)
1224 		goto done;
1225 
1226 	err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1227 	if (err)
1228 		goto done;
1229 
1230 	err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1231 			   MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1232 	if (err)
1233 		goto done;
1234 
1235 	err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, val);
1236 
1237 done:
1238 	return err;
1239 }
1240 
1241 static int tg3_phy_cl45_read(struct tg3 *tp, u32 devad, u32 addr, u32 *val)
1242 {
1243 	int err;
1244 
1245 	err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1246 	if (err)
1247 		goto done;
1248 
1249 	err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1250 	if (err)
1251 		goto done;
1252 
1253 	err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1254 			   MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1255 	if (err)
1256 		goto done;
1257 
1258 	err = tg3_readphy(tp, MII_TG3_MMD_ADDRESS, val);
1259 
1260 done:
1261 	return err;
1262 }
1263 
1264 static int tg3_phydsp_read(struct tg3 *tp, u32 reg, u32 *val)
1265 {
1266 	int err;
1267 
1268 	err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1269 	if (!err)
1270 		err = tg3_readphy(tp, MII_TG3_DSP_RW_PORT, val);
1271 
1272 	return err;
1273 }
1274 
1275 static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
1276 {
1277 	int err;
1278 
1279 	err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1280 	if (!err)
1281 		err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
1282 
1283 	return err;
1284 }
1285 
1286 static int tg3_phy_auxctl_read(struct tg3 *tp, int reg, u32 *val)
1287 {
1288 	int err;
1289 
1290 	err = tg3_writephy(tp, MII_TG3_AUX_CTRL,
1291 			   (reg << MII_TG3_AUXCTL_MISC_RDSEL_SHIFT) |
1292 			   MII_TG3_AUXCTL_SHDWSEL_MISC);
1293 	if (!err)
1294 		err = tg3_readphy(tp, MII_TG3_AUX_CTRL, val);
1295 
1296 	return err;
1297 }
1298 
1299 static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set)
1300 {
1301 	if (reg == MII_TG3_AUXCTL_SHDWSEL_MISC)
1302 		set |= MII_TG3_AUXCTL_MISC_WREN;
1303 
1304 	return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg);
1305 }
1306 
1307 static int tg3_phy_toggle_auxctl_smdsp(struct tg3 *tp, bool enable)
1308 {
1309 	u32 val;
1310 	int err;
1311 
1312 	err = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
1313 
1314 	if (err)
1315 		return err;
1316 	if (enable)
1317 
1318 		val |= MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
1319 	else
1320 		val &= ~MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
1321 
1322 	err = tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
1323 				   val | MII_TG3_AUXCTL_ACTL_TX_6DB);
1324 
1325 	return err;
1326 }
1327 
1328 static int tg3_bmcr_reset(struct tg3 *tp)
1329 {
1330 	u32 phy_control;
1331 	int limit, err;
1332 
1333 	/* OK, reset it, and poll the BMCR_RESET bit until it
1334 	 * clears or we time out.
1335 	 */
1336 	phy_control = BMCR_RESET;
1337 	err = tg3_writephy(tp, MII_BMCR, phy_control);
1338 	if (err != 0)
1339 		return -EBUSY;
1340 
1341 	limit = 5000;
1342 	while (limit--) {
1343 		err = tg3_readphy(tp, MII_BMCR, &phy_control);
1344 		if (err != 0)
1345 			return -EBUSY;
1346 
1347 		if ((phy_control & BMCR_RESET) == 0) {
1348 			udelay(40);
1349 			break;
1350 		}
1351 		udelay(10);
1352 	}
1353 	if (limit < 0)
1354 		return -EBUSY;
1355 
1356 	return 0;
1357 }
1358 
1359 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
1360 {
1361 	struct tg3 *tp = bp->priv;
1362 	u32 val;
1363 
1364 	spin_lock_bh(&tp->lock);
1365 
1366 	if (tg3_readphy(tp, reg, &val))
1367 		val = -EIO;
1368 
1369 	spin_unlock_bh(&tp->lock);
1370 
1371 	return val;
1372 }
1373 
1374 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
1375 {
1376 	struct tg3 *tp = bp->priv;
1377 	u32 ret = 0;
1378 
1379 	spin_lock_bh(&tp->lock);
1380 
1381 	if (tg3_writephy(tp, reg, val))
1382 		ret = -EIO;
1383 
1384 	spin_unlock_bh(&tp->lock);
1385 
1386 	return ret;
1387 }
1388 
1389 static int tg3_mdio_reset(struct mii_bus *bp)
1390 {
1391 	return 0;
1392 }
1393 
1394 static void tg3_mdio_config_5785(struct tg3 *tp)
1395 {
1396 	u32 val;
1397 	struct phy_device *phydev;
1398 
1399 	phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1400 	switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1401 	case PHY_ID_BCM50610:
1402 	case PHY_ID_BCM50610M:
1403 		val = MAC_PHYCFG2_50610_LED_MODES;
1404 		break;
1405 	case PHY_ID_BCMAC131:
1406 		val = MAC_PHYCFG2_AC131_LED_MODES;
1407 		break;
1408 	case PHY_ID_RTL8211C:
1409 		val = MAC_PHYCFG2_RTL8211C_LED_MODES;
1410 		break;
1411 	case PHY_ID_RTL8201E:
1412 		val = MAC_PHYCFG2_RTL8201E_LED_MODES;
1413 		break;
1414 	default:
1415 		return;
1416 	}
1417 
1418 	if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
1419 		tw32(MAC_PHYCFG2, val);
1420 
1421 		val = tr32(MAC_PHYCFG1);
1422 		val &= ~(MAC_PHYCFG1_RGMII_INT |
1423 			 MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK);
1424 		val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT;
1425 		tw32(MAC_PHYCFG1, val);
1426 
1427 		return;
1428 	}
1429 
1430 	if (!tg3_flag(tp, RGMII_INBAND_DISABLE))
1431 		val |= MAC_PHYCFG2_EMODE_MASK_MASK |
1432 		       MAC_PHYCFG2_FMODE_MASK_MASK |
1433 		       MAC_PHYCFG2_GMODE_MASK_MASK |
1434 		       MAC_PHYCFG2_ACT_MASK_MASK   |
1435 		       MAC_PHYCFG2_QUAL_MASK_MASK |
1436 		       MAC_PHYCFG2_INBAND_ENABLE;
1437 
1438 	tw32(MAC_PHYCFG2, val);
1439 
1440 	val = tr32(MAC_PHYCFG1);
1441 	val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
1442 		 MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
1443 	if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1444 		if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1445 			val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
1446 		if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1447 			val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
1448 	}
1449 	val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT |
1450 	       MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV;
1451 	tw32(MAC_PHYCFG1, val);
1452 
1453 	val = tr32(MAC_EXT_RGMII_MODE);
1454 	val &= ~(MAC_RGMII_MODE_RX_INT_B |
1455 		 MAC_RGMII_MODE_RX_QUALITY |
1456 		 MAC_RGMII_MODE_RX_ACTIVITY |
1457 		 MAC_RGMII_MODE_RX_ENG_DET |
1458 		 MAC_RGMII_MODE_TX_ENABLE |
1459 		 MAC_RGMII_MODE_TX_LOWPWR |
1460 		 MAC_RGMII_MODE_TX_RESET);
1461 	if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1462 		if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1463 			val |= MAC_RGMII_MODE_RX_INT_B |
1464 			       MAC_RGMII_MODE_RX_QUALITY |
1465 			       MAC_RGMII_MODE_RX_ACTIVITY |
1466 			       MAC_RGMII_MODE_RX_ENG_DET;
1467 		if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1468 			val |= MAC_RGMII_MODE_TX_ENABLE |
1469 			       MAC_RGMII_MODE_TX_LOWPWR |
1470 			       MAC_RGMII_MODE_TX_RESET;
1471 	}
1472 	tw32(MAC_EXT_RGMII_MODE, val);
1473 }
1474 
1475 static void tg3_mdio_start(struct tg3 *tp)
1476 {
1477 	tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
1478 	tw32_f(MAC_MI_MODE, tp->mi_mode);
1479 	udelay(80);
1480 
1481 	if (tg3_flag(tp, MDIOBUS_INITED) &&
1482 	    tg3_asic_rev(tp) == ASIC_REV_5785)
1483 		tg3_mdio_config_5785(tp);
1484 }
1485 
1486 static int tg3_mdio_init(struct tg3 *tp)
1487 {
1488 	int i;
1489 	u32 reg;
1490 	struct phy_device *phydev;
1491 
1492 	if (tg3_flag(tp, 5717_PLUS)) {
1493 		u32 is_serdes;
1494 
1495 		tp->phy_addr = tp->pci_fn + 1;
1496 
1497 		if (tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0)
1498 			is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
1499 		else
1500 			is_serdes = tr32(TG3_CPMU_PHY_STRAP) &
1501 				    TG3_CPMU_PHY_STRAP_IS_SERDES;
1502 		if (is_serdes)
1503 			tp->phy_addr += 7;
1504 	} else
1505 		tp->phy_addr = TG3_PHY_MII_ADDR;
1506 
1507 	tg3_mdio_start(tp);
1508 
1509 	if (!tg3_flag(tp, USE_PHYLIB) || tg3_flag(tp, MDIOBUS_INITED))
1510 		return 0;
1511 
1512 	tp->mdio_bus = mdiobus_alloc();
1513 	if (tp->mdio_bus == NULL)
1514 		return -ENOMEM;
1515 
1516 	tp->mdio_bus->name     = "tg3 mdio bus";
1517 	snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
1518 		 (tp->pdev->bus->number << 8) | tp->pdev->devfn);
1519 	tp->mdio_bus->priv     = tp;
1520 	tp->mdio_bus->parent   = &tp->pdev->dev;
1521 	tp->mdio_bus->read     = &tg3_mdio_read;
1522 	tp->mdio_bus->write    = &tg3_mdio_write;
1523 	tp->mdio_bus->reset    = &tg3_mdio_reset;
1524 	tp->mdio_bus->phy_mask = ~(1 << TG3_PHY_MII_ADDR);
1525 	tp->mdio_bus->irq      = &tp->mdio_irq[0];
1526 
1527 	for (i = 0; i < PHY_MAX_ADDR; i++)
1528 		tp->mdio_bus->irq[i] = PHY_POLL;
1529 
1530 	/* The bus registration will look for all the PHYs on the mdio bus.
1531 	 * Unfortunately, it does not ensure the PHY is powered up before
1532 	 * accessing the PHY ID registers.  A chip reset is the
1533 	 * quickest way to bring the device back to an operational state..
1534 	 */
1535 	if (tg3_readphy(tp, MII_BMCR, &reg) || (reg & BMCR_PDOWN))
1536 		tg3_bmcr_reset(tp);
1537 
1538 	i = mdiobus_register(tp->mdio_bus);
1539 	if (i) {
1540 		dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i);
1541 		mdiobus_free(tp->mdio_bus);
1542 		return i;
1543 	}
1544 
1545 	phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1546 
1547 	if (!phydev || !phydev->drv) {
1548 		dev_warn(&tp->pdev->dev, "No PHY devices\n");
1549 		mdiobus_unregister(tp->mdio_bus);
1550 		mdiobus_free(tp->mdio_bus);
1551 		return -ENODEV;
1552 	}
1553 
1554 	switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1555 	case PHY_ID_BCM57780:
1556 		phydev->interface = PHY_INTERFACE_MODE_GMII;
1557 		phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1558 		break;
1559 	case PHY_ID_BCM50610:
1560 	case PHY_ID_BCM50610M:
1561 		phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE |
1562 				     PHY_BRCM_RX_REFCLK_UNUSED |
1563 				     PHY_BRCM_DIS_TXCRXC_NOENRGY |
1564 				     PHY_BRCM_AUTO_PWRDWN_ENABLE;
1565 		if (tg3_flag(tp, RGMII_INBAND_DISABLE))
1566 			phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
1567 		if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1568 			phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
1569 		if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1570 			phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
1571 		/* fallthru */
1572 	case PHY_ID_RTL8211C:
1573 		phydev->interface = PHY_INTERFACE_MODE_RGMII;
1574 		break;
1575 	case PHY_ID_RTL8201E:
1576 	case PHY_ID_BCMAC131:
1577 		phydev->interface = PHY_INTERFACE_MODE_MII;
1578 		phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1579 		tp->phy_flags |= TG3_PHYFLG_IS_FET;
1580 		break;
1581 	}
1582 
1583 	tg3_flag_set(tp, MDIOBUS_INITED);
1584 
1585 	if (tg3_asic_rev(tp) == ASIC_REV_5785)
1586 		tg3_mdio_config_5785(tp);
1587 
1588 	return 0;
1589 }
1590 
1591 static void tg3_mdio_fini(struct tg3 *tp)
1592 {
1593 	if (tg3_flag(tp, MDIOBUS_INITED)) {
1594 		tg3_flag_clear(tp, MDIOBUS_INITED);
1595 		mdiobus_unregister(tp->mdio_bus);
1596 		mdiobus_free(tp->mdio_bus);
1597 	}
1598 }
1599 
1600 /* tp->lock is held. */
1601 static inline void tg3_generate_fw_event(struct tg3 *tp)
1602 {
1603 	u32 val;
1604 
1605 	val = tr32(GRC_RX_CPU_EVENT);
1606 	val |= GRC_RX_CPU_DRIVER_EVENT;
1607 	tw32_f(GRC_RX_CPU_EVENT, val);
1608 
1609 	tp->last_event_jiffies = jiffies;
1610 }
1611 
1612 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1613 
1614 /* tp->lock is held. */
1615 static void tg3_wait_for_event_ack(struct tg3 *tp)
1616 {
1617 	int i;
1618 	unsigned int delay_cnt;
1619 	long time_remain;
1620 
1621 	/* If enough time has passed, no wait is necessary. */
1622 	time_remain = (long)(tp->last_event_jiffies + 1 +
1623 		      usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1624 		      (long)jiffies;
1625 	if (time_remain < 0)
1626 		return;
1627 
1628 	/* Check if we can shorten the wait time. */
1629 	delay_cnt = jiffies_to_usecs(time_remain);
1630 	if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1631 		delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1632 	delay_cnt = (delay_cnt >> 3) + 1;
1633 
1634 	for (i = 0; i < delay_cnt; i++) {
1635 		if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1636 			break;
1637 		udelay(8);
1638 	}
1639 }
1640 
1641 /* tp->lock is held. */
1642 static void tg3_phy_gather_ump_data(struct tg3 *tp, u32 *data)
1643 {
1644 	u32 reg, val;
1645 
1646 	val = 0;
1647 	if (!tg3_readphy(tp, MII_BMCR, &reg))
1648 		val = reg << 16;
1649 	if (!tg3_readphy(tp, MII_BMSR, &reg))
1650 		val |= (reg & 0xffff);
1651 	*data++ = val;
1652 
1653 	val = 0;
1654 	if (!tg3_readphy(tp, MII_ADVERTISE, &reg))
1655 		val = reg << 16;
1656 	if (!tg3_readphy(tp, MII_LPA, &reg))
1657 		val |= (reg & 0xffff);
1658 	*data++ = val;
1659 
1660 	val = 0;
1661 	if (!(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) {
1662 		if (!tg3_readphy(tp, MII_CTRL1000, &reg))
1663 			val = reg << 16;
1664 		if (!tg3_readphy(tp, MII_STAT1000, &reg))
1665 			val |= (reg & 0xffff);
1666 	}
1667 	*data++ = val;
1668 
1669 	if (!tg3_readphy(tp, MII_PHYADDR, &reg))
1670 		val = reg << 16;
1671 	else
1672 		val = 0;
1673 	*data++ = val;
1674 }
1675 
1676 /* tp->lock is held. */
1677 static void tg3_ump_link_report(struct tg3 *tp)
1678 {
1679 	u32 data[4];
1680 
1681 	if (!tg3_flag(tp, 5780_CLASS) || !tg3_flag(tp, ENABLE_ASF))
1682 		return;
1683 
1684 	tg3_phy_gather_ump_data(tp, data);
1685 
1686 	tg3_wait_for_event_ack(tp);
1687 
1688 	tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1689 	tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1690 	tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x0, data[0]);
1691 	tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x4, data[1]);
1692 	tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x8, data[2]);
1693 	tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0xc, data[3]);
1694 
1695 	tg3_generate_fw_event(tp);
1696 }
1697 
1698 /* tp->lock is held. */
1699 static void tg3_stop_fw(struct tg3 *tp)
1700 {
1701 	if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
1702 		/* Wait for RX cpu to ACK the previous event. */
1703 		tg3_wait_for_event_ack(tp);
1704 
1705 		tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
1706 
1707 		tg3_generate_fw_event(tp);
1708 
1709 		/* Wait for RX cpu to ACK this event. */
1710 		tg3_wait_for_event_ack(tp);
1711 	}
1712 }
1713 
1714 /* tp->lock is held. */
1715 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
1716 {
1717 	tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
1718 		      NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
1719 
1720 	if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1721 		switch (kind) {
1722 		case RESET_KIND_INIT:
1723 			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1724 				      DRV_STATE_START);
1725 			break;
1726 
1727 		case RESET_KIND_SHUTDOWN:
1728 			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1729 				      DRV_STATE_UNLOAD);
1730 			break;
1731 
1732 		case RESET_KIND_SUSPEND:
1733 			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1734 				      DRV_STATE_SUSPEND);
1735 			break;
1736 
1737 		default:
1738 			break;
1739 		}
1740 	}
1741 
1742 	if (kind == RESET_KIND_INIT ||
1743 	    kind == RESET_KIND_SUSPEND)
1744 		tg3_ape_driver_state_change(tp, kind);
1745 }
1746 
1747 /* tp->lock is held. */
1748 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
1749 {
1750 	if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1751 		switch (kind) {
1752 		case RESET_KIND_INIT:
1753 			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1754 				      DRV_STATE_START_DONE);
1755 			break;
1756 
1757 		case RESET_KIND_SHUTDOWN:
1758 			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1759 				      DRV_STATE_UNLOAD_DONE);
1760 			break;
1761 
1762 		default:
1763 			break;
1764 		}
1765 	}
1766 
1767 	if (kind == RESET_KIND_SHUTDOWN)
1768 		tg3_ape_driver_state_change(tp, kind);
1769 }
1770 
1771 /* tp->lock is held. */
1772 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
1773 {
1774 	if (tg3_flag(tp, ENABLE_ASF)) {
1775 		switch (kind) {
1776 		case RESET_KIND_INIT:
1777 			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1778 				      DRV_STATE_START);
1779 			break;
1780 
1781 		case RESET_KIND_SHUTDOWN:
1782 			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1783 				      DRV_STATE_UNLOAD);
1784 			break;
1785 
1786 		case RESET_KIND_SUSPEND:
1787 			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1788 				      DRV_STATE_SUSPEND);
1789 			break;
1790 
1791 		default:
1792 			break;
1793 		}
1794 	}
1795 }
1796 
1797 static int tg3_poll_fw(struct tg3 *tp)
1798 {
1799 	int i;
1800 	u32 val;
1801 
1802 	if (tg3_flag(tp, IS_SSB_CORE)) {
1803 		/* We don't use firmware. */
1804 		return 0;
1805 	}
1806 
1807 	if (tg3_asic_rev(tp) == ASIC_REV_5906) {
1808 		/* Wait up to 20ms for init done. */
1809 		for (i = 0; i < 200; i++) {
1810 			if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
1811 				return 0;
1812 			udelay(100);
1813 		}
1814 		return -ENODEV;
1815 	}
1816 
1817 	/* Wait for firmware initialization to complete. */
1818 	for (i = 0; i < 100000; i++) {
1819 		tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
1820 		if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1821 			break;
1822 		udelay(10);
1823 	}
1824 
1825 	/* Chip might not be fitted with firmware.  Some Sun onboard
1826 	 * parts are configured like that.  So don't signal the timeout
1827 	 * of the above loop as an error, but do report the lack of
1828 	 * running firmware once.
1829 	 */
1830 	if (i >= 100000 && !tg3_flag(tp, NO_FWARE_REPORTED)) {
1831 		tg3_flag_set(tp, NO_FWARE_REPORTED);
1832 
1833 		netdev_info(tp->dev, "No firmware running\n");
1834 	}
1835 
1836 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) {
1837 		/* The 57765 A0 needs a little more
1838 		 * time to do some important work.
1839 		 */
1840 		mdelay(10);
1841 	}
1842 
1843 	return 0;
1844 }
1845 
1846 static void tg3_link_report(struct tg3 *tp)
1847 {
1848 	if (!netif_carrier_ok(tp->dev)) {
1849 		netif_info(tp, link, tp->dev, "Link is down\n");
1850 		tg3_ump_link_report(tp);
1851 	} else if (netif_msg_link(tp)) {
1852 		netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n",
1853 			    (tp->link_config.active_speed == SPEED_1000 ?
1854 			     1000 :
1855 			     (tp->link_config.active_speed == SPEED_100 ?
1856 			      100 : 10)),
1857 			    (tp->link_config.active_duplex == DUPLEX_FULL ?
1858 			     "full" : "half"));
1859 
1860 		netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n",
1861 			    (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
1862 			    "on" : "off",
1863 			    (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
1864 			    "on" : "off");
1865 
1866 		if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
1867 			netdev_info(tp->dev, "EEE is %s\n",
1868 				    tp->setlpicnt ? "enabled" : "disabled");
1869 
1870 		tg3_ump_link_report(tp);
1871 	}
1872 
1873 	tp->link_up = netif_carrier_ok(tp->dev);
1874 }
1875 
1876 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1877 {
1878 	u16 miireg;
1879 
1880 	if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1881 		miireg = ADVERTISE_1000XPAUSE;
1882 	else if (flow_ctrl & FLOW_CTRL_TX)
1883 		miireg = ADVERTISE_1000XPSE_ASYM;
1884 	else if (flow_ctrl & FLOW_CTRL_RX)
1885 		miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1886 	else
1887 		miireg = 0;
1888 
1889 	return miireg;
1890 }
1891 
1892 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1893 {
1894 	u8 cap = 0;
1895 
1896 	if (lcladv & rmtadv & ADVERTISE_1000XPAUSE) {
1897 		cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1898 	} else if (lcladv & rmtadv & ADVERTISE_1000XPSE_ASYM) {
1899 		if (lcladv & ADVERTISE_1000XPAUSE)
1900 			cap = FLOW_CTRL_RX;
1901 		if (rmtadv & ADVERTISE_1000XPAUSE)
1902 			cap = FLOW_CTRL_TX;
1903 	}
1904 
1905 	return cap;
1906 }
1907 
1908 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1909 {
1910 	u8 autoneg;
1911 	u8 flowctrl = 0;
1912 	u32 old_rx_mode = tp->rx_mode;
1913 	u32 old_tx_mode = tp->tx_mode;
1914 
1915 	if (tg3_flag(tp, USE_PHYLIB))
1916 		autoneg = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]->autoneg;
1917 	else
1918 		autoneg = tp->link_config.autoneg;
1919 
1920 	if (autoneg == AUTONEG_ENABLE && tg3_flag(tp, PAUSE_AUTONEG)) {
1921 		if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
1922 			flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1923 		else
1924 			flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1925 	} else
1926 		flowctrl = tp->link_config.flowctrl;
1927 
1928 	tp->link_config.active_flowctrl = flowctrl;
1929 
1930 	if (flowctrl & FLOW_CTRL_RX)
1931 		tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1932 	else
1933 		tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1934 
1935 	if (old_rx_mode != tp->rx_mode)
1936 		tw32_f(MAC_RX_MODE, tp->rx_mode);
1937 
1938 	if (flowctrl & FLOW_CTRL_TX)
1939 		tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1940 	else
1941 		tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1942 
1943 	if (old_tx_mode != tp->tx_mode)
1944 		tw32_f(MAC_TX_MODE, tp->tx_mode);
1945 }
1946 
1947 static void tg3_adjust_link(struct net_device *dev)
1948 {
1949 	u8 oldflowctrl, linkmesg = 0;
1950 	u32 mac_mode, lcl_adv, rmt_adv;
1951 	struct tg3 *tp = netdev_priv(dev);
1952 	struct phy_device *phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1953 
1954 	spin_lock_bh(&tp->lock);
1955 
1956 	mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
1957 				    MAC_MODE_HALF_DUPLEX);
1958 
1959 	oldflowctrl = tp->link_config.active_flowctrl;
1960 
1961 	if (phydev->link) {
1962 		lcl_adv = 0;
1963 		rmt_adv = 0;
1964 
1965 		if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
1966 			mac_mode |= MAC_MODE_PORT_MODE_MII;
1967 		else if (phydev->speed == SPEED_1000 ||
1968 			 tg3_asic_rev(tp) != ASIC_REV_5785)
1969 			mac_mode |= MAC_MODE_PORT_MODE_GMII;
1970 		else
1971 			mac_mode |= MAC_MODE_PORT_MODE_MII;
1972 
1973 		if (phydev->duplex == DUPLEX_HALF)
1974 			mac_mode |= MAC_MODE_HALF_DUPLEX;
1975 		else {
1976 			lcl_adv = mii_advertise_flowctrl(
1977 				  tp->link_config.flowctrl);
1978 
1979 			if (phydev->pause)
1980 				rmt_adv = LPA_PAUSE_CAP;
1981 			if (phydev->asym_pause)
1982 				rmt_adv |= LPA_PAUSE_ASYM;
1983 		}
1984 
1985 		tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
1986 	} else
1987 		mac_mode |= MAC_MODE_PORT_MODE_GMII;
1988 
1989 	if (mac_mode != tp->mac_mode) {
1990 		tp->mac_mode = mac_mode;
1991 		tw32_f(MAC_MODE, tp->mac_mode);
1992 		udelay(40);
1993 	}
1994 
1995 	if (tg3_asic_rev(tp) == ASIC_REV_5785) {
1996 		if (phydev->speed == SPEED_10)
1997 			tw32(MAC_MI_STAT,
1998 			     MAC_MI_STAT_10MBPS_MODE |
1999 			     MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
2000 		else
2001 			tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
2002 	}
2003 
2004 	if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
2005 		tw32(MAC_TX_LENGTHS,
2006 		     ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2007 		      (6 << TX_LENGTHS_IPG_SHIFT) |
2008 		      (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
2009 	else
2010 		tw32(MAC_TX_LENGTHS,
2011 		     ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2012 		      (6 << TX_LENGTHS_IPG_SHIFT) |
2013 		      (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
2014 
2015 	if (phydev->link != tp->old_link ||
2016 	    phydev->speed != tp->link_config.active_speed ||
2017 	    phydev->duplex != tp->link_config.active_duplex ||
2018 	    oldflowctrl != tp->link_config.active_flowctrl)
2019 		linkmesg = 1;
2020 
2021 	tp->old_link = phydev->link;
2022 	tp->link_config.active_speed = phydev->speed;
2023 	tp->link_config.active_duplex = phydev->duplex;
2024 
2025 	spin_unlock_bh(&tp->lock);
2026 
2027 	if (linkmesg)
2028 		tg3_link_report(tp);
2029 }
2030 
2031 static int tg3_phy_init(struct tg3 *tp)
2032 {
2033 	struct phy_device *phydev;
2034 
2035 	if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)
2036 		return 0;
2037 
2038 	/* Bring the PHY back to a known state. */
2039 	tg3_bmcr_reset(tp);
2040 
2041 	phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
2042 
2043 	/* Attach the MAC to the PHY. */
2044 	phydev = phy_connect(tp->dev, dev_name(&phydev->dev),
2045 			     tg3_adjust_link, phydev->interface);
2046 	if (IS_ERR(phydev)) {
2047 		dev_err(&tp->pdev->dev, "Could not attach to PHY\n");
2048 		return PTR_ERR(phydev);
2049 	}
2050 
2051 	/* Mask with MAC supported features. */
2052 	switch (phydev->interface) {
2053 	case PHY_INTERFACE_MODE_GMII:
2054 	case PHY_INTERFACE_MODE_RGMII:
2055 		if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
2056 			phydev->supported &= (PHY_GBIT_FEATURES |
2057 					      SUPPORTED_Pause |
2058 					      SUPPORTED_Asym_Pause);
2059 			break;
2060 		}
2061 		/* fallthru */
2062 	case PHY_INTERFACE_MODE_MII:
2063 		phydev->supported &= (PHY_BASIC_FEATURES |
2064 				      SUPPORTED_Pause |
2065 				      SUPPORTED_Asym_Pause);
2066 		break;
2067 	default:
2068 		phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
2069 		return -EINVAL;
2070 	}
2071 
2072 	tp->phy_flags |= TG3_PHYFLG_IS_CONNECTED;
2073 
2074 	phydev->advertising = phydev->supported;
2075 
2076 	return 0;
2077 }
2078 
2079 static void tg3_phy_start(struct tg3 *tp)
2080 {
2081 	struct phy_device *phydev;
2082 
2083 	if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
2084 		return;
2085 
2086 	phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
2087 
2088 	if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
2089 		tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
2090 		phydev->speed = tp->link_config.speed;
2091 		phydev->duplex = tp->link_config.duplex;
2092 		phydev->autoneg = tp->link_config.autoneg;
2093 		phydev->advertising = tp->link_config.advertising;
2094 	}
2095 
2096 	phy_start(phydev);
2097 
2098 	phy_start_aneg(phydev);
2099 }
2100 
2101 static void tg3_phy_stop(struct tg3 *tp)
2102 {
2103 	if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
2104 		return;
2105 
2106 	phy_stop(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
2107 }
2108 
2109 static void tg3_phy_fini(struct tg3 *tp)
2110 {
2111 	if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
2112 		phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
2113 		tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED;
2114 	}
2115 }
2116 
2117 static int tg3_phy_set_extloopbk(struct tg3 *tp)
2118 {
2119 	int err;
2120 	u32 val;
2121 
2122 	if (tp->phy_flags & TG3_PHYFLG_IS_FET)
2123 		return 0;
2124 
2125 	if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2126 		/* Cannot do read-modify-write on 5401 */
2127 		err = tg3_phy_auxctl_write(tp,
2128 					   MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2129 					   MII_TG3_AUXCTL_ACTL_EXTLOOPBK |
2130 					   0x4c20);
2131 		goto done;
2132 	}
2133 
2134 	err = tg3_phy_auxctl_read(tp,
2135 				  MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2136 	if (err)
2137 		return err;
2138 
2139 	val |= MII_TG3_AUXCTL_ACTL_EXTLOOPBK;
2140 	err = tg3_phy_auxctl_write(tp,
2141 				   MII_TG3_AUXCTL_SHDWSEL_AUXCTL, val);
2142 
2143 done:
2144 	return err;
2145 }
2146 
2147 static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
2148 {
2149 	u32 phytest;
2150 
2151 	if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2152 		u32 phy;
2153 
2154 		tg3_writephy(tp, MII_TG3_FET_TEST,
2155 			     phytest | MII_TG3_FET_SHADOW_EN);
2156 		if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) {
2157 			if (enable)
2158 				phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD;
2159 			else
2160 				phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD;
2161 			tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy);
2162 		}
2163 		tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2164 	}
2165 }
2166 
2167 static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
2168 {
2169 	u32 reg;
2170 
2171 	if (!tg3_flag(tp, 5705_PLUS) ||
2172 	    (tg3_flag(tp, 5717_PLUS) &&
2173 	     (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
2174 		return;
2175 
2176 	if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2177 		tg3_phy_fet_toggle_apd(tp, enable);
2178 		return;
2179 	}
2180 
2181 	reg = MII_TG3_MISC_SHDW_WREN |
2182 	      MII_TG3_MISC_SHDW_SCR5_SEL |
2183 	      MII_TG3_MISC_SHDW_SCR5_LPED |
2184 	      MII_TG3_MISC_SHDW_SCR5_DLPTLM |
2185 	      MII_TG3_MISC_SHDW_SCR5_SDTL |
2186 	      MII_TG3_MISC_SHDW_SCR5_C125OE;
2187 	if (tg3_asic_rev(tp) != ASIC_REV_5784 || !enable)
2188 		reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
2189 
2190 	tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
2191 
2192 
2193 	reg = MII_TG3_MISC_SHDW_WREN |
2194 	      MII_TG3_MISC_SHDW_APD_SEL |
2195 	      MII_TG3_MISC_SHDW_APD_WKTM_84MS;
2196 	if (enable)
2197 		reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
2198 
2199 	tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
2200 }
2201 
2202 static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable)
2203 {
2204 	u32 phy;
2205 
2206 	if (!tg3_flag(tp, 5705_PLUS) ||
2207 	    (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
2208 		return;
2209 
2210 	if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2211 		u32 ephy;
2212 
2213 		if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
2214 			u32 reg = MII_TG3_FET_SHDW_MISCCTRL;
2215 
2216 			tg3_writephy(tp, MII_TG3_FET_TEST,
2217 				     ephy | MII_TG3_FET_SHADOW_EN);
2218 			if (!tg3_readphy(tp, reg, &phy)) {
2219 				if (enable)
2220 					phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2221 				else
2222 					phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2223 				tg3_writephy(tp, reg, phy);
2224 			}
2225 			tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
2226 		}
2227 	} else {
2228 		int ret;
2229 
2230 		ret = tg3_phy_auxctl_read(tp,
2231 					  MII_TG3_AUXCTL_SHDWSEL_MISC, &phy);
2232 		if (!ret) {
2233 			if (enable)
2234 				phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2235 			else
2236 				phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2237 			tg3_phy_auxctl_write(tp,
2238 					     MII_TG3_AUXCTL_SHDWSEL_MISC, phy);
2239 		}
2240 	}
2241 }
2242 
2243 static void tg3_phy_set_wirespeed(struct tg3 *tp)
2244 {
2245 	int ret;
2246 	u32 val;
2247 
2248 	if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED)
2249 		return;
2250 
2251 	ret = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, &val);
2252 	if (!ret)
2253 		tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_MISC,
2254 				     val | MII_TG3_AUXCTL_MISC_WIRESPD_EN);
2255 }
2256 
2257 static void tg3_phy_apply_otp(struct tg3 *tp)
2258 {
2259 	u32 otp, phy;
2260 
2261 	if (!tp->phy_otp)
2262 		return;
2263 
2264 	otp = tp->phy_otp;
2265 
2266 	if (tg3_phy_toggle_auxctl_smdsp(tp, true))
2267 		return;
2268 
2269 	phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
2270 	phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
2271 	tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
2272 
2273 	phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
2274 	      ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
2275 	tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
2276 
2277 	phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
2278 	phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
2279 	tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
2280 
2281 	phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
2282 	tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
2283 
2284 	phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
2285 	tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
2286 
2287 	phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
2288 	      ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
2289 	tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
2290 
2291 	tg3_phy_toggle_auxctl_smdsp(tp, false);
2292 }
2293 
2294 static void tg3_phy_eee_adjust(struct tg3 *tp, u32 current_link_up)
2295 {
2296 	u32 val;
2297 
2298 	if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2299 		return;
2300 
2301 	tp->setlpicnt = 0;
2302 
2303 	if (tp->link_config.autoneg == AUTONEG_ENABLE &&
2304 	    current_link_up == 1 &&
2305 	    tp->link_config.active_duplex == DUPLEX_FULL &&
2306 	    (tp->link_config.active_speed == SPEED_100 ||
2307 	     tp->link_config.active_speed == SPEED_1000)) {
2308 		u32 eeectl;
2309 
2310 		if (tp->link_config.active_speed == SPEED_1000)
2311 			eeectl = TG3_CPMU_EEE_CTRL_EXIT_16_5_US;
2312 		else
2313 			eeectl = TG3_CPMU_EEE_CTRL_EXIT_36_US;
2314 
2315 		tw32(TG3_CPMU_EEE_CTRL, eeectl);
2316 
2317 		tg3_phy_cl45_read(tp, MDIO_MMD_AN,
2318 				  TG3_CL45_D7_EEERES_STAT, &val);
2319 
2320 		if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T ||
2321 		    val == TG3_CL45_D7_EEERES_STAT_LP_100TX)
2322 			tp->setlpicnt = 2;
2323 	}
2324 
2325 	if (!tp->setlpicnt) {
2326 		if (current_link_up == 1 &&
2327 		   !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2328 			tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0000);
2329 			tg3_phy_toggle_auxctl_smdsp(tp, false);
2330 		}
2331 
2332 		val = tr32(TG3_CPMU_EEE_MODE);
2333 		tw32(TG3_CPMU_EEE_MODE, val & ~TG3_CPMU_EEEMD_LPI_ENABLE);
2334 	}
2335 }
2336 
2337 static void tg3_phy_eee_enable(struct tg3 *tp)
2338 {
2339 	u32 val;
2340 
2341 	if (tp->link_config.active_speed == SPEED_1000 &&
2342 	    (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2343 	     tg3_asic_rev(tp) == ASIC_REV_5719 ||
2344 	     tg3_flag(tp, 57765_CLASS)) &&
2345 	    !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2346 		val = MII_TG3_DSP_TAP26_ALNOKO |
2347 		      MII_TG3_DSP_TAP26_RMRXSTO;
2348 		tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
2349 		tg3_phy_toggle_auxctl_smdsp(tp, false);
2350 	}
2351 
2352 	val = tr32(TG3_CPMU_EEE_MODE);
2353 	tw32(TG3_CPMU_EEE_MODE, val | TG3_CPMU_EEEMD_LPI_ENABLE);
2354 }
2355 
2356 static int tg3_wait_macro_done(struct tg3 *tp)
2357 {
2358 	int limit = 100;
2359 
2360 	while (limit--) {
2361 		u32 tmp32;
2362 
2363 		if (!tg3_readphy(tp, MII_TG3_DSP_CONTROL, &tmp32)) {
2364 			if ((tmp32 & 0x1000) == 0)
2365 				break;
2366 		}
2367 	}
2368 	if (limit < 0)
2369 		return -EBUSY;
2370 
2371 	return 0;
2372 }
2373 
2374 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
2375 {
2376 	static const u32 test_pat[4][6] = {
2377 	{ 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
2378 	{ 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
2379 	{ 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
2380 	{ 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
2381 	};
2382 	int chan;
2383 
2384 	for (chan = 0; chan < 4; chan++) {
2385 		int i;
2386 
2387 		tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2388 			     (chan * 0x2000) | 0x0200);
2389 		tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2390 
2391 		for (i = 0; i < 6; i++)
2392 			tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
2393 				     test_pat[chan][i]);
2394 
2395 		tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2396 		if (tg3_wait_macro_done(tp)) {
2397 			*resetp = 1;
2398 			return -EBUSY;
2399 		}
2400 
2401 		tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2402 			     (chan * 0x2000) | 0x0200);
2403 		tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0082);
2404 		if (tg3_wait_macro_done(tp)) {
2405 			*resetp = 1;
2406 			return -EBUSY;
2407 		}
2408 
2409 		tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0802);
2410 		if (tg3_wait_macro_done(tp)) {
2411 			*resetp = 1;
2412 			return -EBUSY;
2413 		}
2414 
2415 		for (i = 0; i < 6; i += 2) {
2416 			u32 low, high;
2417 
2418 			if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
2419 			    tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
2420 			    tg3_wait_macro_done(tp)) {
2421 				*resetp = 1;
2422 				return -EBUSY;
2423 			}
2424 			low &= 0x7fff;
2425 			high &= 0x000f;
2426 			if (low != test_pat[chan][i] ||
2427 			    high != test_pat[chan][i+1]) {
2428 				tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
2429 				tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
2430 				tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
2431 
2432 				return -EBUSY;
2433 			}
2434 		}
2435 	}
2436 
2437 	return 0;
2438 }
2439 
2440 static int tg3_phy_reset_chanpat(struct tg3 *tp)
2441 {
2442 	int chan;
2443 
2444 	for (chan = 0; chan < 4; chan++) {
2445 		int i;
2446 
2447 		tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2448 			     (chan * 0x2000) | 0x0200);
2449 		tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2450 		for (i = 0; i < 6; i++)
2451 			tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
2452 		tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2453 		if (tg3_wait_macro_done(tp))
2454 			return -EBUSY;
2455 	}
2456 
2457 	return 0;
2458 }
2459 
2460 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
2461 {
2462 	u32 reg32, phy9_orig;
2463 	int retries, do_phy_reset, err;
2464 
2465 	retries = 10;
2466 	do_phy_reset = 1;
2467 	do {
2468 		if (do_phy_reset) {
2469 			err = tg3_bmcr_reset(tp);
2470 			if (err)
2471 				return err;
2472 			do_phy_reset = 0;
2473 		}
2474 
2475 		/* Disable transmitter and interrupt.  */
2476 		if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
2477 			continue;
2478 
2479 		reg32 |= 0x3000;
2480 		tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2481 
2482 		/* Set full-duplex, 1000 mbps.  */
2483 		tg3_writephy(tp, MII_BMCR,
2484 			     BMCR_FULLDPLX | BMCR_SPEED1000);
2485 
2486 		/* Set to master mode.  */
2487 		if (tg3_readphy(tp, MII_CTRL1000, &phy9_orig))
2488 			continue;
2489 
2490 		tg3_writephy(tp, MII_CTRL1000,
2491 			     CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
2492 
2493 		err = tg3_phy_toggle_auxctl_smdsp(tp, true);
2494 		if (err)
2495 			return err;
2496 
2497 		/* Block the PHY control access.  */
2498 		tg3_phydsp_write(tp, 0x8005, 0x0800);
2499 
2500 		err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
2501 		if (!err)
2502 			break;
2503 	} while (--retries);
2504 
2505 	err = tg3_phy_reset_chanpat(tp);
2506 	if (err)
2507 		return err;
2508 
2509 	tg3_phydsp_write(tp, 0x8005, 0x0000);
2510 
2511 	tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
2512 	tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000);
2513 
2514 	tg3_phy_toggle_auxctl_smdsp(tp, false);
2515 
2516 	tg3_writephy(tp, MII_CTRL1000, phy9_orig);
2517 
2518 	if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
2519 		reg32 &= ~0x3000;
2520 		tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2521 	} else if (!err)
2522 		err = -EBUSY;
2523 
2524 	return err;
2525 }
2526 
2527 static void tg3_carrier_off(struct tg3 *tp)
2528 {
2529 	netif_carrier_off(tp->dev);
2530 	tp->link_up = false;
2531 }
2532 
2533 /* This will reset the tigon3 PHY if there is no valid
2534  * link unless the FORCE argument is non-zero.
2535  */
2536 static int tg3_phy_reset(struct tg3 *tp)
2537 {
2538 	u32 val, cpmuctrl;
2539 	int err;
2540 
2541 	if (tg3_asic_rev(tp) == ASIC_REV_5906) {
2542 		val = tr32(GRC_MISC_CFG);
2543 		tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
2544 		udelay(40);
2545 	}
2546 	err  = tg3_readphy(tp, MII_BMSR, &val);
2547 	err |= tg3_readphy(tp, MII_BMSR, &val);
2548 	if (err != 0)
2549 		return -EBUSY;
2550 
2551 	if (netif_running(tp->dev) && tp->link_up) {
2552 		netif_carrier_off(tp->dev);
2553 		tg3_link_report(tp);
2554 	}
2555 
2556 	if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
2557 	    tg3_asic_rev(tp) == ASIC_REV_5704 ||
2558 	    tg3_asic_rev(tp) == ASIC_REV_5705) {
2559 		err = tg3_phy_reset_5703_4_5(tp);
2560 		if (err)
2561 			return err;
2562 		goto out;
2563 	}
2564 
2565 	cpmuctrl = 0;
2566 	if (tg3_asic_rev(tp) == ASIC_REV_5784 &&
2567 	    tg3_chip_rev(tp) != CHIPREV_5784_AX) {
2568 		cpmuctrl = tr32(TG3_CPMU_CTRL);
2569 		if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
2570 			tw32(TG3_CPMU_CTRL,
2571 			     cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
2572 	}
2573 
2574 	err = tg3_bmcr_reset(tp);
2575 	if (err)
2576 		return err;
2577 
2578 	if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
2579 		val = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
2580 		tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, val);
2581 
2582 		tw32(TG3_CPMU_CTRL, cpmuctrl);
2583 	}
2584 
2585 	if (tg3_chip_rev(tp) == CHIPREV_5784_AX ||
2586 	    tg3_chip_rev(tp) == CHIPREV_5761_AX) {
2587 		val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2588 		if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
2589 		    CPMU_LSPD_1000MB_MACCLK_12_5) {
2590 			val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2591 			udelay(40);
2592 			tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2593 		}
2594 	}
2595 
2596 	if (tg3_flag(tp, 5717_PLUS) &&
2597 	    (tp->phy_flags & TG3_PHYFLG_MII_SERDES))
2598 		return 0;
2599 
2600 	tg3_phy_apply_otp(tp);
2601 
2602 	if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
2603 		tg3_phy_toggle_apd(tp, true);
2604 	else
2605 		tg3_phy_toggle_apd(tp, false);
2606 
2607 out:
2608 	if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) &&
2609 	    !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2610 		tg3_phydsp_write(tp, 0x201f, 0x2aaa);
2611 		tg3_phydsp_write(tp, 0x000a, 0x0323);
2612 		tg3_phy_toggle_auxctl_smdsp(tp, false);
2613 	}
2614 
2615 	if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) {
2616 		tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2617 		tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2618 	}
2619 
2620 	if (tp->phy_flags & TG3_PHYFLG_BER_BUG) {
2621 		if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2622 			tg3_phydsp_write(tp, 0x000a, 0x310b);
2623 			tg3_phydsp_write(tp, 0x201f, 0x9506);
2624 			tg3_phydsp_write(tp, 0x401f, 0x14e2);
2625 			tg3_phy_toggle_auxctl_smdsp(tp, false);
2626 		}
2627 	} else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) {
2628 		if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2629 			tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
2630 			if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) {
2631 				tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
2632 				tg3_writephy(tp, MII_TG3_TEST1,
2633 					     MII_TG3_TEST1_TRIM_EN | 0x4);
2634 			} else
2635 				tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
2636 
2637 			tg3_phy_toggle_auxctl_smdsp(tp, false);
2638 		}
2639 	}
2640 
2641 	/* Set Extended packet length bit (bit 14) on all chips that */
2642 	/* support jumbo frames */
2643 	if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2644 		/* Cannot do read-modify-write on 5401 */
2645 		tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
2646 	} else if (tg3_flag(tp, JUMBO_CAPABLE)) {
2647 		/* Set bit 14 with read-modify-write to preserve other bits */
2648 		err = tg3_phy_auxctl_read(tp,
2649 					  MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2650 		if (!err)
2651 			tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2652 					   val | MII_TG3_AUXCTL_ACTL_EXTPKTLEN);
2653 	}
2654 
2655 	/* Set phy register 0x10 bit 0 to high fifo elasticity to support
2656 	 * jumbo frames transmission.
2657 	 */
2658 	if (tg3_flag(tp, JUMBO_CAPABLE)) {
2659 		if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &val))
2660 			tg3_writephy(tp, MII_TG3_EXT_CTRL,
2661 				     val | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
2662 	}
2663 
2664 	if (tg3_asic_rev(tp) == ASIC_REV_5906) {
2665 		/* adjust output voltage */
2666 		tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
2667 	}
2668 
2669 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5762_A0)
2670 		tg3_phydsp_write(tp, 0xffb, 0x4000);
2671 
2672 	tg3_phy_toggle_automdix(tp, 1);
2673 	tg3_phy_set_wirespeed(tp);
2674 	return 0;
2675 }
2676 
2677 #define TG3_GPIO_MSG_DRVR_PRES		 0x00000001
2678 #define TG3_GPIO_MSG_NEED_VAUX		 0x00000002
2679 #define TG3_GPIO_MSG_MASK		 (TG3_GPIO_MSG_DRVR_PRES | \
2680 					  TG3_GPIO_MSG_NEED_VAUX)
2681 #define TG3_GPIO_MSG_ALL_DRVR_PRES_MASK \
2682 	((TG3_GPIO_MSG_DRVR_PRES << 0) | \
2683 	 (TG3_GPIO_MSG_DRVR_PRES << 4) | \
2684 	 (TG3_GPIO_MSG_DRVR_PRES << 8) | \
2685 	 (TG3_GPIO_MSG_DRVR_PRES << 12))
2686 
2687 #define TG3_GPIO_MSG_ALL_NEED_VAUX_MASK \
2688 	((TG3_GPIO_MSG_NEED_VAUX << 0) | \
2689 	 (TG3_GPIO_MSG_NEED_VAUX << 4) | \
2690 	 (TG3_GPIO_MSG_NEED_VAUX << 8) | \
2691 	 (TG3_GPIO_MSG_NEED_VAUX << 12))
2692 
2693 static inline u32 tg3_set_function_status(struct tg3 *tp, u32 newstat)
2694 {
2695 	u32 status, shift;
2696 
2697 	if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2698 	    tg3_asic_rev(tp) == ASIC_REV_5719)
2699 		status = tg3_ape_read32(tp, TG3_APE_GPIO_MSG);
2700 	else
2701 		status = tr32(TG3_CPMU_DRV_STATUS);
2702 
2703 	shift = TG3_APE_GPIO_MSG_SHIFT + 4 * tp->pci_fn;
2704 	status &= ~(TG3_GPIO_MSG_MASK << shift);
2705 	status |= (newstat << shift);
2706 
2707 	if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2708 	    tg3_asic_rev(tp) == ASIC_REV_5719)
2709 		tg3_ape_write32(tp, TG3_APE_GPIO_MSG, status);
2710 	else
2711 		tw32(TG3_CPMU_DRV_STATUS, status);
2712 
2713 	return status >> TG3_APE_GPIO_MSG_SHIFT;
2714 }
2715 
2716 static inline int tg3_pwrsrc_switch_to_vmain(struct tg3 *tp)
2717 {
2718 	if (!tg3_flag(tp, IS_NIC))
2719 		return 0;
2720 
2721 	if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2722 	    tg3_asic_rev(tp) == ASIC_REV_5719 ||
2723 	    tg3_asic_rev(tp) == ASIC_REV_5720) {
2724 		if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2725 			return -EIO;
2726 
2727 		tg3_set_function_status(tp, TG3_GPIO_MSG_DRVR_PRES);
2728 
2729 		tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2730 			    TG3_GRC_LCLCTL_PWRSW_DELAY);
2731 
2732 		tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2733 	} else {
2734 		tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2735 			    TG3_GRC_LCLCTL_PWRSW_DELAY);
2736 	}
2737 
2738 	return 0;
2739 }
2740 
2741 static void tg3_pwrsrc_die_with_vmain(struct tg3 *tp)
2742 {
2743 	u32 grc_local_ctrl;
2744 
2745 	if (!tg3_flag(tp, IS_NIC) ||
2746 	    tg3_asic_rev(tp) == ASIC_REV_5700 ||
2747 	    tg3_asic_rev(tp) == ASIC_REV_5701)
2748 		return;
2749 
2750 	grc_local_ctrl = tp->grc_local_ctrl | GRC_LCLCTRL_GPIO_OE1;
2751 
2752 	tw32_wait_f(GRC_LOCAL_CTRL,
2753 		    grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2754 		    TG3_GRC_LCLCTL_PWRSW_DELAY);
2755 
2756 	tw32_wait_f(GRC_LOCAL_CTRL,
2757 		    grc_local_ctrl,
2758 		    TG3_GRC_LCLCTL_PWRSW_DELAY);
2759 
2760 	tw32_wait_f(GRC_LOCAL_CTRL,
2761 		    grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2762 		    TG3_GRC_LCLCTL_PWRSW_DELAY);
2763 }
2764 
2765 static void tg3_pwrsrc_switch_to_vaux(struct tg3 *tp)
2766 {
2767 	if (!tg3_flag(tp, IS_NIC))
2768 		return;
2769 
2770 	if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
2771 	    tg3_asic_rev(tp) == ASIC_REV_5701) {
2772 		tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2773 			    (GRC_LCLCTRL_GPIO_OE0 |
2774 			     GRC_LCLCTRL_GPIO_OE1 |
2775 			     GRC_LCLCTRL_GPIO_OE2 |
2776 			     GRC_LCLCTRL_GPIO_OUTPUT0 |
2777 			     GRC_LCLCTRL_GPIO_OUTPUT1),
2778 			    TG3_GRC_LCLCTL_PWRSW_DELAY);
2779 	} else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
2780 		   tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
2781 		/* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2782 		u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
2783 				     GRC_LCLCTRL_GPIO_OE1 |
2784 				     GRC_LCLCTRL_GPIO_OE2 |
2785 				     GRC_LCLCTRL_GPIO_OUTPUT0 |
2786 				     GRC_LCLCTRL_GPIO_OUTPUT1 |
2787 				     tp->grc_local_ctrl;
2788 		tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2789 			    TG3_GRC_LCLCTL_PWRSW_DELAY);
2790 
2791 		grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
2792 		tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2793 			    TG3_GRC_LCLCTL_PWRSW_DELAY);
2794 
2795 		grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
2796 		tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2797 			    TG3_GRC_LCLCTL_PWRSW_DELAY);
2798 	} else {
2799 		u32 no_gpio2;
2800 		u32 grc_local_ctrl = 0;
2801 
2802 		/* Workaround to prevent overdrawing Amps. */
2803 		if (tg3_asic_rev(tp) == ASIC_REV_5714) {
2804 			grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
2805 			tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2806 				    grc_local_ctrl,
2807 				    TG3_GRC_LCLCTL_PWRSW_DELAY);
2808 		}
2809 
2810 		/* On 5753 and variants, GPIO2 cannot be used. */
2811 		no_gpio2 = tp->nic_sram_data_cfg &
2812 			   NIC_SRAM_DATA_CFG_NO_GPIO2;
2813 
2814 		grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
2815 				  GRC_LCLCTRL_GPIO_OE1 |
2816 				  GRC_LCLCTRL_GPIO_OE2 |
2817 				  GRC_LCLCTRL_GPIO_OUTPUT1 |
2818 				  GRC_LCLCTRL_GPIO_OUTPUT2;
2819 		if (no_gpio2) {
2820 			grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
2821 					    GRC_LCLCTRL_GPIO_OUTPUT2);
2822 		}
2823 		tw32_wait_f(GRC_LOCAL_CTRL,
2824 			    tp->grc_local_ctrl | grc_local_ctrl,
2825 			    TG3_GRC_LCLCTL_PWRSW_DELAY);
2826 
2827 		grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
2828 
2829 		tw32_wait_f(GRC_LOCAL_CTRL,
2830 			    tp->grc_local_ctrl | grc_local_ctrl,
2831 			    TG3_GRC_LCLCTL_PWRSW_DELAY);
2832 
2833 		if (!no_gpio2) {
2834 			grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
2835 			tw32_wait_f(GRC_LOCAL_CTRL,
2836 				    tp->grc_local_ctrl | grc_local_ctrl,
2837 				    TG3_GRC_LCLCTL_PWRSW_DELAY);
2838 		}
2839 	}
2840 }
2841 
2842 static void tg3_frob_aux_power_5717(struct tg3 *tp, bool wol_enable)
2843 {
2844 	u32 msg = 0;
2845 
2846 	/* Serialize power state transitions */
2847 	if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2848 		return;
2849 
2850 	if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE) || wol_enable)
2851 		msg = TG3_GPIO_MSG_NEED_VAUX;
2852 
2853 	msg = tg3_set_function_status(tp, msg);
2854 
2855 	if (msg & TG3_GPIO_MSG_ALL_DRVR_PRES_MASK)
2856 		goto done;
2857 
2858 	if (msg & TG3_GPIO_MSG_ALL_NEED_VAUX_MASK)
2859 		tg3_pwrsrc_switch_to_vaux(tp);
2860 	else
2861 		tg3_pwrsrc_die_with_vmain(tp);
2862 
2863 done:
2864 	tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2865 }
2866 
2867 static void tg3_frob_aux_power(struct tg3 *tp, bool include_wol)
2868 {
2869 	bool need_vaux = false;
2870 
2871 	/* The GPIOs do something completely different on 57765. */
2872 	if (!tg3_flag(tp, IS_NIC) || tg3_flag(tp, 57765_CLASS))
2873 		return;
2874 
2875 	if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2876 	    tg3_asic_rev(tp) == ASIC_REV_5719 ||
2877 	    tg3_asic_rev(tp) == ASIC_REV_5720) {
2878 		tg3_frob_aux_power_5717(tp, include_wol ?
2879 					tg3_flag(tp, WOL_ENABLE) != 0 : 0);
2880 		return;
2881 	}
2882 
2883 	if (tp->pdev_peer && tp->pdev_peer != tp->pdev) {
2884 		struct net_device *dev_peer;
2885 
2886 		dev_peer = pci_get_drvdata(tp->pdev_peer);
2887 
2888 		/* remove_one() may have been run on the peer. */
2889 		if (dev_peer) {
2890 			struct tg3 *tp_peer = netdev_priv(dev_peer);
2891 
2892 			if (tg3_flag(tp_peer, INIT_COMPLETE))
2893 				return;
2894 
2895 			if ((include_wol && tg3_flag(tp_peer, WOL_ENABLE)) ||
2896 			    tg3_flag(tp_peer, ENABLE_ASF))
2897 				need_vaux = true;
2898 		}
2899 	}
2900 
2901 	if ((include_wol && tg3_flag(tp, WOL_ENABLE)) ||
2902 	    tg3_flag(tp, ENABLE_ASF))
2903 		need_vaux = true;
2904 
2905 	if (need_vaux)
2906 		tg3_pwrsrc_switch_to_vaux(tp);
2907 	else
2908 		tg3_pwrsrc_die_with_vmain(tp);
2909 }
2910 
2911 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
2912 {
2913 	if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
2914 		return 1;
2915 	else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) {
2916 		if (speed != SPEED_10)
2917 			return 1;
2918 	} else if (speed == SPEED_10)
2919 		return 1;
2920 
2921 	return 0;
2922 }
2923 
2924 static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
2925 {
2926 	u32 val;
2927 
2928 	if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
2929 		if (tg3_asic_rev(tp) == ASIC_REV_5704) {
2930 			u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
2931 			u32 serdes_cfg = tr32(MAC_SERDES_CFG);
2932 
2933 			sg_dig_ctrl |=
2934 				SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
2935 			tw32(SG_DIG_CTRL, sg_dig_ctrl);
2936 			tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
2937 		}
2938 		return;
2939 	}
2940 
2941 	if (tg3_asic_rev(tp) == ASIC_REV_5906) {
2942 		tg3_bmcr_reset(tp);
2943 		val = tr32(GRC_MISC_CFG);
2944 		tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
2945 		udelay(40);
2946 		return;
2947 	} else if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2948 		u32 phytest;
2949 		if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2950 			u32 phy;
2951 
2952 			tg3_writephy(tp, MII_ADVERTISE, 0);
2953 			tg3_writephy(tp, MII_BMCR,
2954 				     BMCR_ANENABLE | BMCR_ANRESTART);
2955 
2956 			tg3_writephy(tp, MII_TG3_FET_TEST,
2957 				     phytest | MII_TG3_FET_SHADOW_EN);
2958 			if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) {
2959 				phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD;
2960 				tg3_writephy(tp,
2961 					     MII_TG3_FET_SHDW_AUXMODE4,
2962 					     phy);
2963 			}
2964 			tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2965 		}
2966 		return;
2967 	} else if (do_low_power) {
2968 		tg3_writephy(tp, MII_TG3_EXT_CTRL,
2969 			     MII_TG3_EXT_CTRL_FORCE_LED_OFF);
2970 
2971 		val = MII_TG3_AUXCTL_PCTL_100TX_LPWR |
2972 		      MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
2973 		      MII_TG3_AUXCTL_PCTL_VREG_11V;
2974 		tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, val);
2975 	}
2976 
2977 	/* The PHY should not be powered down on some chips because
2978 	 * of bugs.
2979 	 */
2980 	if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
2981 	    tg3_asic_rev(tp) == ASIC_REV_5704 ||
2982 	    (tg3_asic_rev(tp) == ASIC_REV_5780 &&
2983 	     (tp->phy_flags & TG3_PHYFLG_MII_SERDES)) ||
2984 	    (tg3_asic_rev(tp) == ASIC_REV_5717 &&
2985 	     !tp->pci_fn))
2986 		return;
2987 
2988 	if (tg3_chip_rev(tp) == CHIPREV_5784_AX ||
2989 	    tg3_chip_rev(tp) == CHIPREV_5761_AX) {
2990 		val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2991 		val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2992 		val |= CPMU_LSPD_1000MB_MACCLK_12_5;
2993 		tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2994 	}
2995 
2996 	tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
2997 }
2998 
2999 /* tp->lock is held. */
3000 static int tg3_nvram_lock(struct tg3 *tp)
3001 {
3002 	if (tg3_flag(tp, NVRAM)) {
3003 		int i;
3004 
3005 		if (tp->nvram_lock_cnt == 0) {
3006 			tw32(NVRAM_SWARB, SWARB_REQ_SET1);
3007 			for (i = 0; i < 8000; i++) {
3008 				if (tr32(NVRAM_SWARB) & SWARB_GNT1)
3009 					break;
3010 				udelay(20);
3011 			}
3012 			if (i == 8000) {
3013 				tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
3014 				return -ENODEV;
3015 			}
3016 		}
3017 		tp->nvram_lock_cnt++;
3018 	}
3019 	return 0;
3020 }
3021 
3022 /* tp->lock is held. */
3023 static void tg3_nvram_unlock(struct tg3 *tp)
3024 {
3025 	if (tg3_flag(tp, NVRAM)) {
3026 		if (tp->nvram_lock_cnt > 0)
3027 			tp->nvram_lock_cnt--;
3028 		if (tp->nvram_lock_cnt == 0)
3029 			tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
3030 	}
3031 }
3032 
3033 /* tp->lock is held. */
3034 static void tg3_enable_nvram_access(struct tg3 *tp)
3035 {
3036 	if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
3037 		u32 nvaccess = tr32(NVRAM_ACCESS);
3038 
3039 		tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
3040 	}
3041 }
3042 
3043 /* tp->lock is held. */
3044 static void tg3_disable_nvram_access(struct tg3 *tp)
3045 {
3046 	if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
3047 		u32 nvaccess = tr32(NVRAM_ACCESS);
3048 
3049 		tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
3050 	}
3051 }
3052 
3053 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
3054 					u32 offset, u32 *val)
3055 {
3056 	u32 tmp;
3057 	int i;
3058 
3059 	if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
3060 		return -EINVAL;
3061 
3062 	tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
3063 					EEPROM_ADDR_DEVID_MASK |
3064 					EEPROM_ADDR_READ);
3065 	tw32(GRC_EEPROM_ADDR,
3066 	     tmp |
3067 	     (0 << EEPROM_ADDR_DEVID_SHIFT) |
3068 	     ((offset << EEPROM_ADDR_ADDR_SHIFT) &
3069 	      EEPROM_ADDR_ADDR_MASK) |
3070 	     EEPROM_ADDR_READ | EEPROM_ADDR_START);
3071 
3072 	for (i = 0; i < 1000; i++) {
3073 		tmp = tr32(GRC_EEPROM_ADDR);
3074 
3075 		if (tmp & EEPROM_ADDR_COMPLETE)
3076 			break;
3077 		msleep(1);
3078 	}
3079 	if (!(tmp & EEPROM_ADDR_COMPLETE))
3080 		return -EBUSY;
3081 
3082 	tmp = tr32(GRC_EEPROM_DATA);
3083 
3084 	/*
3085 	 * The data will always be opposite the native endian
3086 	 * format.  Perform a blind byteswap to compensate.
3087 	 */
3088 	*val = swab32(tmp);
3089 
3090 	return 0;
3091 }
3092 
3093 #define NVRAM_CMD_TIMEOUT 10000
3094 
3095 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
3096 {
3097 	int i;
3098 
3099 	tw32(NVRAM_CMD, nvram_cmd);
3100 	for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
3101 		udelay(10);
3102 		if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
3103 			udelay(10);
3104 			break;
3105 		}
3106 	}
3107 
3108 	if (i == NVRAM_CMD_TIMEOUT)
3109 		return -EBUSY;
3110 
3111 	return 0;
3112 }
3113 
3114 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
3115 {
3116 	if (tg3_flag(tp, NVRAM) &&
3117 	    tg3_flag(tp, NVRAM_BUFFERED) &&
3118 	    tg3_flag(tp, FLASH) &&
3119 	    !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
3120 	    (tp->nvram_jedecnum == JEDEC_ATMEL))
3121 
3122 		addr = ((addr / tp->nvram_pagesize) <<
3123 			ATMEL_AT45DB0X1B_PAGE_POS) +
3124 		       (addr % tp->nvram_pagesize);
3125 
3126 	return addr;
3127 }
3128 
3129 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
3130 {
3131 	if (tg3_flag(tp, NVRAM) &&
3132 	    tg3_flag(tp, NVRAM_BUFFERED) &&
3133 	    tg3_flag(tp, FLASH) &&
3134 	    !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
3135 	    (tp->nvram_jedecnum == JEDEC_ATMEL))
3136 
3137 		addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
3138 			tp->nvram_pagesize) +
3139 		       (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
3140 
3141 	return addr;
3142 }
3143 
3144 /* NOTE: Data read in from NVRAM is byteswapped according to
3145  * the byteswapping settings for all other register accesses.
3146  * tg3 devices are BE devices, so on a BE machine, the data
3147  * returned will be exactly as it is seen in NVRAM.  On a LE
3148  * machine, the 32-bit value will be byteswapped.
3149  */
3150 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
3151 {
3152 	int ret;
3153 
3154 	if (!tg3_flag(tp, NVRAM))
3155 		return tg3_nvram_read_using_eeprom(tp, offset, val);
3156 
3157 	offset = tg3_nvram_phys_addr(tp, offset);
3158 
3159 	if (offset > NVRAM_ADDR_MSK)
3160 		return -EINVAL;
3161 
3162 	ret = tg3_nvram_lock(tp);
3163 	if (ret)
3164 		return ret;
3165 
3166 	tg3_enable_nvram_access(tp);
3167 
3168 	tw32(NVRAM_ADDR, offset);
3169 	ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
3170 		NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
3171 
3172 	if (ret == 0)
3173 		*val = tr32(NVRAM_RDDATA);
3174 
3175 	tg3_disable_nvram_access(tp);
3176 
3177 	tg3_nvram_unlock(tp);
3178 
3179 	return ret;
3180 }
3181 
3182 /* Ensures NVRAM data is in bytestream format. */
3183 static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val)
3184 {
3185 	u32 v;
3186 	int res = tg3_nvram_read(tp, offset, &v);
3187 	if (!res)
3188 		*val = cpu_to_be32(v);
3189 	return res;
3190 }
3191 
3192 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
3193 				    u32 offset, u32 len, u8 *buf)
3194 {
3195 	int i, j, rc = 0;
3196 	u32 val;
3197 
3198 	for (i = 0; i < len; i += 4) {
3199 		u32 addr;
3200 		__be32 data;
3201 
3202 		addr = offset + i;
3203 
3204 		memcpy(&data, buf + i, 4);
3205 
3206 		/*
3207 		 * The SEEPROM interface expects the data to always be opposite
3208 		 * the native endian format.  We accomplish this by reversing
3209 		 * all the operations that would have been performed on the
3210 		 * data from a call to tg3_nvram_read_be32().
3211 		 */
3212 		tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data)));
3213 
3214 		val = tr32(GRC_EEPROM_ADDR);
3215 		tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
3216 
3217 		val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
3218 			EEPROM_ADDR_READ);
3219 		tw32(GRC_EEPROM_ADDR, val |
3220 			(0 << EEPROM_ADDR_DEVID_SHIFT) |
3221 			(addr & EEPROM_ADDR_ADDR_MASK) |
3222 			EEPROM_ADDR_START |
3223 			EEPROM_ADDR_WRITE);
3224 
3225 		for (j = 0; j < 1000; j++) {
3226 			val = tr32(GRC_EEPROM_ADDR);
3227 
3228 			if (val & EEPROM_ADDR_COMPLETE)
3229 				break;
3230 			msleep(1);
3231 		}
3232 		if (!(val & EEPROM_ADDR_COMPLETE)) {
3233 			rc = -EBUSY;
3234 			break;
3235 		}
3236 	}
3237 
3238 	return rc;
3239 }
3240 
3241 /* offset and length are dword aligned */
3242 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
3243 		u8 *buf)
3244 {
3245 	int ret = 0;
3246 	u32 pagesize = tp->nvram_pagesize;
3247 	u32 pagemask = pagesize - 1;
3248 	u32 nvram_cmd;
3249 	u8 *tmp;
3250 
3251 	tmp = kmalloc(pagesize, GFP_KERNEL);
3252 	if (tmp == NULL)
3253 		return -ENOMEM;
3254 
3255 	while (len) {
3256 		int j;
3257 		u32 phy_addr, page_off, size;
3258 
3259 		phy_addr = offset & ~pagemask;
3260 
3261 		for (j = 0; j < pagesize; j += 4) {
3262 			ret = tg3_nvram_read_be32(tp, phy_addr + j,
3263 						  (__be32 *) (tmp + j));
3264 			if (ret)
3265 				break;
3266 		}
3267 		if (ret)
3268 			break;
3269 
3270 		page_off = offset & pagemask;
3271 		size = pagesize;
3272 		if (len < size)
3273 			size = len;
3274 
3275 		len -= size;
3276 
3277 		memcpy(tmp + page_off, buf, size);
3278 
3279 		offset = offset + (pagesize - page_off);
3280 
3281 		tg3_enable_nvram_access(tp);
3282 
3283 		/*
3284 		 * Before we can erase the flash page, we need
3285 		 * to issue a special "write enable" command.
3286 		 */
3287 		nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3288 
3289 		if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3290 			break;
3291 
3292 		/* Erase the target page */
3293 		tw32(NVRAM_ADDR, phy_addr);
3294 
3295 		nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
3296 			NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
3297 
3298 		if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3299 			break;
3300 
3301 		/* Issue another write enable to start the write. */
3302 		nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3303 
3304 		if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3305 			break;
3306 
3307 		for (j = 0; j < pagesize; j += 4) {
3308 			__be32 data;
3309 
3310 			data = *((__be32 *) (tmp + j));
3311 
3312 			tw32(NVRAM_WRDATA, be32_to_cpu(data));
3313 
3314 			tw32(NVRAM_ADDR, phy_addr + j);
3315 
3316 			nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
3317 				NVRAM_CMD_WR;
3318 
3319 			if (j == 0)
3320 				nvram_cmd |= NVRAM_CMD_FIRST;
3321 			else if (j == (pagesize - 4))
3322 				nvram_cmd |= NVRAM_CMD_LAST;
3323 
3324 			ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3325 			if (ret)
3326 				break;
3327 		}
3328 		if (ret)
3329 			break;
3330 	}
3331 
3332 	nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3333 	tg3_nvram_exec_cmd(tp, nvram_cmd);
3334 
3335 	kfree(tmp);
3336 
3337 	return ret;
3338 }
3339 
3340 /* offset and length are dword aligned */
3341 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
3342 		u8 *buf)
3343 {
3344 	int i, ret = 0;
3345 
3346 	for (i = 0; i < len; i += 4, offset += 4) {
3347 		u32 page_off, phy_addr, nvram_cmd;
3348 		__be32 data;
3349 
3350 		memcpy(&data, buf + i, 4);
3351 		tw32(NVRAM_WRDATA, be32_to_cpu(data));
3352 
3353 		page_off = offset % tp->nvram_pagesize;
3354 
3355 		phy_addr = tg3_nvram_phys_addr(tp, offset);
3356 
3357 		nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
3358 
3359 		if (page_off == 0 || i == 0)
3360 			nvram_cmd |= NVRAM_CMD_FIRST;
3361 		if (page_off == (tp->nvram_pagesize - 4))
3362 			nvram_cmd |= NVRAM_CMD_LAST;
3363 
3364 		if (i == (len - 4))
3365 			nvram_cmd |= NVRAM_CMD_LAST;
3366 
3367 		if ((nvram_cmd & NVRAM_CMD_FIRST) ||
3368 		    !tg3_flag(tp, FLASH) ||
3369 		    !tg3_flag(tp, 57765_PLUS))
3370 			tw32(NVRAM_ADDR, phy_addr);
3371 
3372 		if (tg3_asic_rev(tp) != ASIC_REV_5752 &&
3373 		    !tg3_flag(tp, 5755_PLUS) &&
3374 		    (tp->nvram_jedecnum == JEDEC_ST) &&
3375 		    (nvram_cmd & NVRAM_CMD_FIRST)) {
3376 			u32 cmd;
3377 
3378 			cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3379 			ret = tg3_nvram_exec_cmd(tp, cmd);
3380 			if (ret)
3381 				break;
3382 		}
3383 		if (!tg3_flag(tp, FLASH)) {
3384 			/* We always do complete word writes to eeprom. */
3385 			nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
3386 		}
3387 
3388 		ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3389 		if (ret)
3390 			break;
3391 	}
3392 	return ret;
3393 }
3394 
3395 /* offset and length are dword aligned */
3396 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
3397 {
3398 	int ret;
3399 
3400 	if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3401 		tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
3402 		       ~GRC_LCLCTRL_GPIO_OUTPUT1);
3403 		udelay(40);
3404 	}
3405 
3406 	if (!tg3_flag(tp, NVRAM)) {
3407 		ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
3408 	} else {
3409 		u32 grc_mode;
3410 
3411 		ret = tg3_nvram_lock(tp);
3412 		if (ret)
3413 			return ret;
3414 
3415 		tg3_enable_nvram_access(tp);
3416 		if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM))
3417 			tw32(NVRAM_WRITE1, 0x406);
3418 
3419 		grc_mode = tr32(GRC_MODE);
3420 		tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
3421 
3422 		if (tg3_flag(tp, NVRAM_BUFFERED) || !tg3_flag(tp, FLASH)) {
3423 			ret = tg3_nvram_write_block_buffered(tp, offset, len,
3424 				buf);
3425 		} else {
3426 			ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
3427 				buf);
3428 		}
3429 
3430 		grc_mode = tr32(GRC_MODE);
3431 		tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
3432 
3433 		tg3_disable_nvram_access(tp);
3434 		tg3_nvram_unlock(tp);
3435 	}
3436 
3437 	if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3438 		tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
3439 		udelay(40);
3440 	}
3441 
3442 	return ret;
3443 }
3444 
3445 #define RX_CPU_SCRATCH_BASE	0x30000
3446 #define RX_CPU_SCRATCH_SIZE	0x04000
3447 #define TX_CPU_SCRATCH_BASE	0x34000
3448 #define TX_CPU_SCRATCH_SIZE	0x04000
3449 
3450 /* tp->lock is held. */
3451 static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
3452 {
3453 	int i;
3454 
3455 	BUG_ON(offset == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS));
3456 
3457 	if (tg3_asic_rev(tp) == ASIC_REV_5906) {
3458 		u32 val = tr32(GRC_VCPU_EXT_CTRL);
3459 
3460 		tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
3461 		return 0;
3462 	}
3463 	if (offset == RX_CPU_BASE) {
3464 		for (i = 0; i < 10000; i++) {
3465 			tw32(offset + CPU_STATE, 0xffffffff);
3466 			tw32(offset + CPU_MODE,  CPU_MODE_HALT);
3467 			if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
3468 				break;
3469 		}
3470 
3471 		tw32(offset + CPU_STATE, 0xffffffff);
3472 		tw32_f(offset + CPU_MODE,  CPU_MODE_HALT);
3473 		udelay(10);
3474 	} else {
3475 		/*
3476 		 * There is only an Rx CPU for the 5750 derivative in the
3477 		 * BCM4785.
3478 		 */
3479 		if (tg3_flag(tp, IS_SSB_CORE))
3480 			return 0;
3481 
3482 		for (i = 0; i < 10000; i++) {
3483 			tw32(offset + CPU_STATE, 0xffffffff);
3484 			tw32(offset + CPU_MODE,  CPU_MODE_HALT);
3485 			if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
3486 				break;
3487 		}
3488 	}
3489 
3490 	if (i >= 10000) {
3491 		netdev_err(tp->dev, "%s timed out, %s CPU\n",
3492 			   __func__, offset == RX_CPU_BASE ? "RX" : "TX");
3493 		return -ENODEV;
3494 	}
3495 
3496 	/* Clear firmware's nvram arbitration. */
3497 	if (tg3_flag(tp, NVRAM))
3498 		tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
3499 	return 0;
3500 }
3501 
3502 struct fw_info {
3503 	unsigned int fw_base;
3504 	unsigned int fw_len;
3505 	const __be32 *fw_data;
3506 };
3507 
3508 /* tp->lock is held. */
3509 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base,
3510 				 u32 cpu_scratch_base, int cpu_scratch_size,
3511 				 struct fw_info *info)
3512 {
3513 	int err, lock_err, i;
3514 	void (*write_op)(struct tg3 *, u32, u32);
3515 
3516 	if (cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)) {
3517 		netdev_err(tp->dev,
3518 			   "%s: Trying to load TX cpu firmware which is 5705\n",
3519 			   __func__);
3520 		return -EINVAL;
3521 	}
3522 
3523 	if (tg3_flag(tp, 5705_PLUS))
3524 		write_op = tg3_write_mem;
3525 	else
3526 		write_op = tg3_write_indirect_reg32;
3527 
3528 	/* It is possible that bootcode is still loading at this point.
3529 	 * Get the nvram lock first before halting the cpu.
3530 	 */
3531 	lock_err = tg3_nvram_lock(tp);
3532 	err = tg3_halt_cpu(tp, cpu_base);
3533 	if (!lock_err)
3534 		tg3_nvram_unlock(tp);
3535 	if (err)
3536 		goto out;
3537 
3538 	for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
3539 		write_op(tp, cpu_scratch_base + i, 0);
3540 	tw32(cpu_base + CPU_STATE, 0xffffffff);
3541 	tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
3542 	for (i = 0; i < (info->fw_len / sizeof(u32)); i++)
3543 		write_op(tp, (cpu_scratch_base +
3544 			      (info->fw_base & 0xffff) +
3545 			      (i * sizeof(u32))),
3546 			      be32_to_cpu(info->fw_data[i]));
3547 
3548 	err = 0;
3549 
3550 out:
3551 	return err;
3552 }
3553 
3554 /* tp->lock is held. */
3555 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
3556 {
3557 	struct fw_info info;
3558 	const __be32 *fw_data;
3559 	int err, i;
3560 
3561 	fw_data = (void *)tp->fw->data;
3562 
3563 	/* Firmware blob starts with version numbers, followed by
3564 	   start address and length. We are setting complete length.
3565 	   length = end_address_of_bss - start_address_of_text.
3566 	   Remainder is the blob to be loaded contiguously
3567 	   from start address. */
3568 
3569 	info.fw_base = be32_to_cpu(fw_data[1]);
3570 	info.fw_len = tp->fw->size - 12;
3571 	info.fw_data = &fw_data[3];
3572 
3573 	err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
3574 				    RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
3575 				    &info);
3576 	if (err)
3577 		return err;
3578 
3579 	err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
3580 				    TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
3581 				    &info);
3582 	if (err)
3583 		return err;
3584 
3585 	/* Now startup only the RX cpu. */
3586 	tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3587 	tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
3588 
3589 	for (i = 0; i < 5; i++) {
3590 		if (tr32(RX_CPU_BASE + CPU_PC) == info.fw_base)
3591 			break;
3592 		tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3593 		tw32(RX_CPU_BASE + CPU_MODE,  CPU_MODE_HALT);
3594 		tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
3595 		udelay(1000);
3596 	}
3597 	if (i >= 5) {
3598 		netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x "
3599 			   "should be %08x\n", __func__,
3600 			   tr32(RX_CPU_BASE + CPU_PC), info.fw_base);
3601 		return -ENODEV;
3602 	}
3603 	tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3604 	tw32_f(RX_CPU_BASE + CPU_MODE,  0x00000000);
3605 
3606 	return 0;
3607 }
3608 
3609 /* tp->lock is held. */
3610 static int tg3_load_tso_firmware(struct tg3 *tp)
3611 {
3612 	struct fw_info info;
3613 	const __be32 *fw_data;
3614 	unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
3615 	int err, i;
3616 
3617 	if (tg3_flag(tp, HW_TSO_1) ||
3618 	    tg3_flag(tp, HW_TSO_2) ||
3619 	    tg3_flag(tp, HW_TSO_3))
3620 		return 0;
3621 
3622 	fw_data = (void *)tp->fw->data;
3623 
3624 	/* Firmware blob starts with version numbers, followed by
3625 	   start address and length. We are setting complete length.
3626 	   length = end_address_of_bss - start_address_of_text.
3627 	   Remainder is the blob to be loaded contiguously
3628 	   from start address. */
3629 
3630 	info.fw_base = be32_to_cpu(fw_data[1]);
3631 	cpu_scratch_size = tp->fw_len;
3632 	info.fw_len = tp->fw->size - 12;
3633 	info.fw_data = &fw_data[3];
3634 
3635 	if (tg3_asic_rev(tp) == ASIC_REV_5705) {
3636 		cpu_base = RX_CPU_BASE;
3637 		cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
3638 	} else {
3639 		cpu_base = TX_CPU_BASE;
3640 		cpu_scratch_base = TX_CPU_SCRATCH_BASE;
3641 		cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
3642 	}
3643 
3644 	err = tg3_load_firmware_cpu(tp, cpu_base,
3645 				    cpu_scratch_base, cpu_scratch_size,
3646 				    &info);
3647 	if (err)
3648 		return err;
3649 
3650 	/* Now startup the cpu. */
3651 	tw32(cpu_base + CPU_STATE, 0xffffffff);
3652 	tw32_f(cpu_base + CPU_PC, info.fw_base);
3653 
3654 	for (i = 0; i < 5; i++) {
3655 		if (tr32(cpu_base + CPU_PC) == info.fw_base)
3656 			break;
3657 		tw32(cpu_base + CPU_STATE, 0xffffffff);
3658 		tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
3659 		tw32_f(cpu_base + CPU_PC, info.fw_base);
3660 		udelay(1000);
3661 	}
3662 	if (i >= 5) {
3663 		netdev_err(tp->dev,
3664 			   "%s fails to set CPU PC, is %08x should be %08x\n",
3665 			   __func__, tr32(cpu_base + CPU_PC), info.fw_base);
3666 		return -ENODEV;
3667 	}
3668 	tw32(cpu_base + CPU_STATE, 0xffffffff);
3669 	tw32_f(cpu_base + CPU_MODE,  0x00000000);
3670 	return 0;
3671 }
3672 
3673 
3674 /* tp->lock is held. */
3675 static void __tg3_set_mac_addr(struct tg3 *tp, int skip_mac_1)
3676 {
3677 	u32 addr_high, addr_low;
3678 	int i;
3679 
3680 	addr_high = ((tp->dev->dev_addr[0] << 8) |
3681 		     tp->dev->dev_addr[1]);
3682 	addr_low = ((tp->dev->dev_addr[2] << 24) |
3683 		    (tp->dev->dev_addr[3] << 16) |
3684 		    (tp->dev->dev_addr[4] <<  8) |
3685 		    (tp->dev->dev_addr[5] <<  0));
3686 	for (i = 0; i < 4; i++) {
3687 		if (i == 1 && skip_mac_1)
3688 			continue;
3689 		tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
3690 		tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
3691 	}
3692 
3693 	if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
3694 	    tg3_asic_rev(tp) == ASIC_REV_5704) {
3695 		for (i = 0; i < 12; i++) {
3696 			tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
3697 			tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
3698 		}
3699 	}
3700 
3701 	addr_high = (tp->dev->dev_addr[0] +
3702 		     tp->dev->dev_addr[1] +
3703 		     tp->dev->dev_addr[2] +
3704 		     tp->dev->dev_addr[3] +
3705 		     tp->dev->dev_addr[4] +
3706 		     tp->dev->dev_addr[5]) &
3707 		TX_BACKOFF_SEED_MASK;
3708 	tw32(MAC_TX_BACKOFF_SEED, addr_high);
3709 }
3710 
3711 static void tg3_enable_register_access(struct tg3 *tp)
3712 {
3713 	/*
3714 	 * Make sure register accesses (indirect or otherwise) will function
3715 	 * correctly.
3716 	 */
3717 	pci_write_config_dword(tp->pdev,
3718 			       TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl);
3719 }
3720 
3721 static int tg3_power_up(struct tg3 *tp)
3722 {
3723 	int err;
3724 
3725 	tg3_enable_register_access(tp);
3726 
3727 	err = pci_set_power_state(tp->pdev, PCI_D0);
3728 	if (!err) {
3729 		/* Switch out of Vaux if it is a NIC */
3730 		tg3_pwrsrc_switch_to_vmain(tp);
3731 	} else {
3732 		netdev_err(tp->dev, "Transition to D0 failed\n");
3733 	}
3734 
3735 	return err;
3736 }
3737 
3738 static int tg3_setup_phy(struct tg3 *, int);
3739 
3740 static int tg3_power_down_prepare(struct tg3 *tp)
3741 {
3742 	u32 misc_host_ctrl;
3743 	bool device_should_wake, do_low_power;
3744 
3745 	tg3_enable_register_access(tp);
3746 
3747 	/* Restore the CLKREQ setting. */
3748 	if (tg3_flag(tp, CLKREQ_BUG))
3749 		pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
3750 					 PCI_EXP_LNKCTL_CLKREQ_EN);
3751 
3752 	misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
3753 	tw32(TG3PCI_MISC_HOST_CTRL,
3754 	     misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
3755 
3756 	device_should_wake = device_may_wakeup(&tp->pdev->dev) &&
3757 			     tg3_flag(tp, WOL_ENABLE);
3758 
3759 	if (tg3_flag(tp, USE_PHYLIB)) {
3760 		do_low_power = false;
3761 		if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) &&
3762 		    !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
3763 			struct phy_device *phydev;
3764 			u32 phyid, advertising;
3765 
3766 			phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
3767 
3768 			tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
3769 
3770 			tp->link_config.speed = phydev->speed;
3771 			tp->link_config.duplex = phydev->duplex;
3772 			tp->link_config.autoneg = phydev->autoneg;
3773 			tp->link_config.advertising = phydev->advertising;
3774 
3775 			advertising = ADVERTISED_TP |
3776 				      ADVERTISED_Pause |
3777 				      ADVERTISED_Autoneg |
3778 				      ADVERTISED_10baseT_Half;
3779 
3780 			if (tg3_flag(tp, ENABLE_ASF) || device_should_wake) {
3781 				if (tg3_flag(tp, WOL_SPEED_100MB))
3782 					advertising |=
3783 						ADVERTISED_100baseT_Half |
3784 						ADVERTISED_100baseT_Full |
3785 						ADVERTISED_10baseT_Full;
3786 				else
3787 					advertising |= ADVERTISED_10baseT_Full;
3788 			}
3789 
3790 			phydev->advertising = advertising;
3791 
3792 			phy_start_aneg(phydev);
3793 
3794 			phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
3795 			if (phyid != PHY_ID_BCMAC131) {
3796 				phyid &= PHY_BCM_OUI_MASK;
3797 				if (phyid == PHY_BCM_OUI_1 ||
3798 				    phyid == PHY_BCM_OUI_2 ||
3799 				    phyid == PHY_BCM_OUI_3)
3800 					do_low_power = true;
3801 			}
3802 		}
3803 	} else {
3804 		do_low_power = true;
3805 
3806 		if (!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER))
3807 			tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
3808 
3809 		if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
3810 			tg3_setup_phy(tp, 0);
3811 	}
3812 
3813 	if (tg3_asic_rev(tp) == ASIC_REV_5906) {
3814 		u32 val;
3815 
3816 		val = tr32(GRC_VCPU_EXT_CTRL);
3817 		tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
3818 	} else if (!tg3_flag(tp, ENABLE_ASF)) {
3819 		int i;
3820 		u32 val;
3821 
3822 		for (i = 0; i < 200; i++) {
3823 			tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
3824 			if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
3825 				break;
3826 			msleep(1);
3827 		}
3828 	}
3829 	if (tg3_flag(tp, WOL_CAP))
3830 		tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
3831 						     WOL_DRV_STATE_SHUTDOWN |
3832 						     WOL_DRV_WOL |
3833 						     WOL_SET_MAGIC_PKT);
3834 
3835 	if (device_should_wake) {
3836 		u32 mac_mode;
3837 
3838 		if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
3839 			if (do_low_power &&
3840 			    !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
3841 				tg3_phy_auxctl_write(tp,
3842 					       MII_TG3_AUXCTL_SHDWSEL_PWRCTL,
3843 					       MII_TG3_AUXCTL_PCTL_WOL_EN |
3844 					       MII_TG3_AUXCTL_PCTL_100TX_LPWR |
3845 					       MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC);
3846 				udelay(40);
3847 			}
3848 
3849 			if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
3850 				mac_mode = MAC_MODE_PORT_MODE_GMII;
3851 			else
3852 				mac_mode = MAC_MODE_PORT_MODE_MII;
3853 
3854 			mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
3855 			if (tg3_asic_rev(tp) == ASIC_REV_5700) {
3856 				u32 speed = tg3_flag(tp, WOL_SPEED_100MB) ?
3857 					     SPEED_100 : SPEED_10;
3858 				if (tg3_5700_link_polarity(tp, speed))
3859 					mac_mode |= MAC_MODE_LINK_POLARITY;
3860 				else
3861 					mac_mode &= ~MAC_MODE_LINK_POLARITY;
3862 			}
3863 		} else {
3864 			mac_mode = MAC_MODE_PORT_MODE_TBI;
3865 		}
3866 
3867 		if (!tg3_flag(tp, 5750_PLUS))
3868 			tw32(MAC_LED_CTRL, tp->led_ctrl);
3869 
3870 		mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
3871 		if ((tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) &&
3872 		    (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)))
3873 			mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
3874 
3875 		if (tg3_flag(tp, ENABLE_APE))
3876 			mac_mode |= MAC_MODE_APE_TX_EN |
3877 				    MAC_MODE_APE_RX_EN |
3878 				    MAC_MODE_TDE_ENABLE;
3879 
3880 		tw32_f(MAC_MODE, mac_mode);
3881 		udelay(100);
3882 
3883 		tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
3884 		udelay(10);
3885 	}
3886 
3887 	if (!tg3_flag(tp, WOL_SPEED_100MB) &&
3888 	    (tg3_asic_rev(tp) == ASIC_REV_5700 ||
3889 	     tg3_asic_rev(tp) == ASIC_REV_5701)) {
3890 		u32 base_val;
3891 
3892 		base_val = tp->pci_clock_ctrl;
3893 		base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
3894 			     CLOCK_CTRL_TXCLK_DISABLE);
3895 
3896 		tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
3897 			    CLOCK_CTRL_PWRDOWN_PLL133, 40);
3898 	} else if (tg3_flag(tp, 5780_CLASS) ||
3899 		   tg3_flag(tp, CPMU_PRESENT) ||
3900 		   tg3_asic_rev(tp) == ASIC_REV_5906) {
3901 		/* do nothing */
3902 	} else if (!(tg3_flag(tp, 5750_PLUS) && tg3_flag(tp, ENABLE_ASF))) {
3903 		u32 newbits1, newbits2;
3904 
3905 		if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
3906 		    tg3_asic_rev(tp) == ASIC_REV_5701) {
3907 			newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
3908 				    CLOCK_CTRL_TXCLK_DISABLE |
3909 				    CLOCK_CTRL_ALTCLK);
3910 			newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
3911 		} else if (tg3_flag(tp, 5705_PLUS)) {
3912 			newbits1 = CLOCK_CTRL_625_CORE;
3913 			newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
3914 		} else {
3915 			newbits1 = CLOCK_CTRL_ALTCLK;
3916 			newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
3917 		}
3918 
3919 		tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
3920 			    40);
3921 
3922 		tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
3923 			    40);
3924 
3925 		if (!tg3_flag(tp, 5705_PLUS)) {
3926 			u32 newbits3;
3927 
3928 			if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
3929 			    tg3_asic_rev(tp) == ASIC_REV_5701) {
3930 				newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
3931 					    CLOCK_CTRL_TXCLK_DISABLE |
3932 					    CLOCK_CTRL_44MHZ_CORE);
3933 			} else {
3934 				newbits3 = CLOCK_CTRL_44MHZ_CORE;
3935 			}
3936 
3937 			tw32_wait_f(TG3PCI_CLOCK_CTRL,
3938 				    tp->pci_clock_ctrl | newbits3, 40);
3939 		}
3940 	}
3941 
3942 	if (!(device_should_wake) && !tg3_flag(tp, ENABLE_ASF))
3943 		tg3_power_down_phy(tp, do_low_power);
3944 
3945 	tg3_frob_aux_power(tp, true);
3946 
3947 	/* Workaround for unstable PLL clock */
3948 	if ((!tg3_flag(tp, IS_SSB_CORE)) &&
3949 	    ((tg3_chip_rev(tp) == CHIPREV_5750_AX) ||
3950 	     (tg3_chip_rev(tp) == CHIPREV_5750_BX))) {
3951 		u32 val = tr32(0x7d00);
3952 
3953 		val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
3954 		tw32(0x7d00, val);
3955 		if (!tg3_flag(tp, ENABLE_ASF)) {
3956 			int err;
3957 
3958 			err = tg3_nvram_lock(tp);
3959 			tg3_halt_cpu(tp, RX_CPU_BASE);
3960 			if (!err)
3961 				tg3_nvram_unlock(tp);
3962 		}
3963 	}
3964 
3965 	tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
3966 
3967 	return 0;
3968 }
3969 
3970 static void tg3_power_down(struct tg3 *tp)
3971 {
3972 	tg3_power_down_prepare(tp);
3973 
3974 	pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE));
3975 	pci_set_power_state(tp->pdev, PCI_D3hot);
3976 }
3977 
3978 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
3979 {
3980 	switch (val & MII_TG3_AUX_STAT_SPDMASK) {
3981 	case MII_TG3_AUX_STAT_10HALF:
3982 		*speed = SPEED_10;
3983 		*duplex = DUPLEX_HALF;
3984 		break;
3985 
3986 	case MII_TG3_AUX_STAT_10FULL:
3987 		*speed = SPEED_10;
3988 		*duplex = DUPLEX_FULL;
3989 		break;
3990 
3991 	case MII_TG3_AUX_STAT_100HALF:
3992 		*speed = SPEED_100;
3993 		*duplex = DUPLEX_HALF;
3994 		break;
3995 
3996 	case MII_TG3_AUX_STAT_100FULL:
3997 		*speed = SPEED_100;
3998 		*duplex = DUPLEX_FULL;
3999 		break;
4000 
4001 	case MII_TG3_AUX_STAT_1000HALF:
4002 		*speed = SPEED_1000;
4003 		*duplex = DUPLEX_HALF;
4004 		break;
4005 
4006 	case MII_TG3_AUX_STAT_1000FULL:
4007 		*speed = SPEED_1000;
4008 		*duplex = DUPLEX_FULL;
4009 		break;
4010 
4011 	default:
4012 		if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4013 			*speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
4014 				 SPEED_10;
4015 			*duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
4016 				  DUPLEX_HALF;
4017 			break;
4018 		}
4019 		*speed = SPEED_UNKNOWN;
4020 		*duplex = DUPLEX_UNKNOWN;
4021 		break;
4022 	}
4023 }
4024 
4025 static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
4026 {
4027 	int err = 0;
4028 	u32 val, new_adv;
4029 
4030 	new_adv = ADVERTISE_CSMA;
4031 	new_adv |= ethtool_adv_to_mii_adv_t(advertise) & ADVERTISE_ALL;
4032 	new_adv |= mii_advertise_flowctrl(flowctrl);
4033 
4034 	err = tg3_writephy(tp, MII_ADVERTISE, new_adv);
4035 	if (err)
4036 		goto done;
4037 
4038 	if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4039 		new_adv = ethtool_adv_to_mii_ctrl1000_t(advertise);
4040 
4041 		if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4042 		    tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0)
4043 			new_adv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4044 
4045 		err = tg3_writephy(tp, MII_CTRL1000, new_adv);
4046 		if (err)
4047 			goto done;
4048 	}
4049 
4050 	if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
4051 		goto done;
4052 
4053 	tw32(TG3_CPMU_EEE_MODE,
4054 	     tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE);
4055 
4056 	err = tg3_phy_toggle_auxctl_smdsp(tp, true);
4057 	if (!err) {
4058 		u32 err2;
4059 
4060 		val = 0;
4061 		/* Advertise 100-BaseTX EEE ability */
4062 		if (advertise & ADVERTISED_100baseT_Full)
4063 			val |= MDIO_AN_EEE_ADV_100TX;
4064 		/* Advertise 1000-BaseT EEE ability */
4065 		if (advertise & ADVERTISED_1000baseT_Full)
4066 			val |= MDIO_AN_EEE_ADV_1000T;
4067 		err = tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val);
4068 		if (err)
4069 			val = 0;
4070 
4071 		switch (tg3_asic_rev(tp)) {
4072 		case ASIC_REV_5717:
4073 		case ASIC_REV_57765:
4074 		case ASIC_REV_57766:
4075 		case ASIC_REV_5719:
4076 			/* If we advertised any eee advertisements above... */
4077 			if (val)
4078 				val = MII_TG3_DSP_TAP26_ALNOKO |
4079 				      MII_TG3_DSP_TAP26_RMRXSTO |
4080 				      MII_TG3_DSP_TAP26_OPCSINPT;
4081 			tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
4082 			/* Fall through */
4083 		case ASIC_REV_5720:
4084 		case ASIC_REV_5762:
4085 			if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val))
4086 				tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val |
4087 						 MII_TG3_DSP_CH34TP2_HIBW01);
4088 		}
4089 
4090 		err2 = tg3_phy_toggle_auxctl_smdsp(tp, false);
4091 		if (!err)
4092 			err = err2;
4093 	}
4094 
4095 done:
4096 	return err;
4097 }
4098 
4099 static void tg3_phy_copper_begin(struct tg3 *tp)
4100 {
4101 	if (tp->link_config.autoneg == AUTONEG_ENABLE ||
4102 	    (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4103 		u32 adv, fc;
4104 
4105 		if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
4106 			adv = ADVERTISED_10baseT_Half |
4107 			      ADVERTISED_10baseT_Full;
4108 			if (tg3_flag(tp, WOL_SPEED_100MB))
4109 				adv |= ADVERTISED_100baseT_Half |
4110 				       ADVERTISED_100baseT_Full;
4111 
4112 			fc = FLOW_CTRL_TX | FLOW_CTRL_RX;
4113 		} else {
4114 			adv = tp->link_config.advertising;
4115 			if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
4116 				adv &= ~(ADVERTISED_1000baseT_Half |
4117 					 ADVERTISED_1000baseT_Full);
4118 
4119 			fc = tp->link_config.flowctrl;
4120 		}
4121 
4122 		tg3_phy_autoneg_cfg(tp, adv, fc);
4123 
4124 		tg3_writephy(tp, MII_BMCR,
4125 			     BMCR_ANENABLE | BMCR_ANRESTART);
4126 	} else {
4127 		int i;
4128 		u32 bmcr, orig_bmcr;
4129 
4130 		tp->link_config.active_speed = tp->link_config.speed;
4131 		tp->link_config.active_duplex = tp->link_config.duplex;
4132 
4133 		bmcr = 0;
4134 		switch (tp->link_config.speed) {
4135 		default:
4136 		case SPEED_10:
4137 			break;
4138 
4139 		case SPEED_100:
4140 			bmcr |= BMCR_SPEED100;
4141 			break;
4142 
4143 		case SPEED_1000:
4144 			bmcr |= BMCR_SPEED1000;
4145 			break;
4146 		}
4147 
4148 		if (tp->link_config.duplex == DUPLEX_FULL)
4149 			bmcr |= BMCR_FULLDPLX;
4150 
4151 		if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
4152 		    (bmcr != orig_bmcr)) {
4153 			tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
4154 			for (i = 0; i < 1500; i++) {
4155 				u32 tmp;
4156 
4157 				udelay(10);
4158 				if (tg3_readphy(tp, MII_BMSR, &tmp) ||
4159 				    tg3_readphy(tp, MII_BMSR, &tmp))
4160 					continue;
4161 				if (!(tmp & BMSR_LSTATUS)) {
4162 					udelay(40);
4163 					break;
4164 				}
4165 			}
4166 			tg3_writephy(tp, MII_BMCR, bmcr);
4167 			udelay(40);
4168 		}
4169 	}
4170 }
4171 
4172 static int tg3_init_5401phy_dsp(struct tg3 *tp)
4173 {
4174 	int err;
4175 
4176 	/* Turn off tap power management. */
4177 	/* Set Extended packet length bit */
4178 	err = tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
4179 
4180 	err |= tg3_phydsp_write(tp, 0x0012, 0x1804);
4181 	err |= tg3_phydsp_write(tp, 0x0013, 0x1204);
4182 	err |= tg3_phydsp_write(tp, 0x8006, 0x0132);
4183 	err |= tg3_phydsp_write(tp, 0x8006, 0x0232);
4184 	err |= tg3_phydsp_write(tp, 0x201f, 0x0a20);
4185 
4186 	udelay(40);
4187 
4188 	return err;
4189 }
4190 
4191 static bool tg3_phy_copper_an_config_ok(struct tg3 *tp, u32 *lcladv)
4192 {
4193 	u32 advmsk, tgtadv, advertising;
4194 
4195 	advertising = tp->link_config.advertising;
4196 	tgtadv = ethtool_adv_to_mii_adv_t(advertising) & ADVERTISE_ALL;
4197 
4198 	advmsk = ADVERTISE_ALL;
4199 	if (tp->link_config.active_duplex == DUPLEX_FULL) {
4200 		tgtadv |= mii_advertise_flowctrl(tp->link_config.flowctrl);
4201 		advmsk |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
4202 	}
4203 
4204 	if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
4205 		return false;
4206 
4207 	if ((*lcladv & advmsk) != tgtadv)
4208 		return false;
4209 
4210 	if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4211 		u32 tg3_ctrl;
4212 
4213 		tgtadv = ethtool_adv_to_mii_ctrl1000_t(advertising);
4214 
4215 		if (tg3_readphy(tp, MII_CTRL1000, &tg3_ctrl))
4216 			return false;
4217 
4218 		if (tgtadv &&
4219 		    (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4220 		     tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0)) {
4221 			tgtadv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4222 			tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL |
4223 				     CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
4224 		} else {
4225 			tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL);
4226 		}
4227 
4228 		if (tg3_ctrl != tgtadv)
4229 			return false;
4230 	}
4231 
4232 	return true;
4233 }
4234 
4235 static bool tg3_phy_copper_fetch_rmtadv(struct tg3 *tp, u32 *rmtadv)
4236 {
4237 	u32 lpeth = 0;
4238 
4239 	if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4240 		u32 val;
4241 
4242 		if (tg3_readphy(tp, MII_STAT1000, &val))
4243 			return false;
4244 
4245 		lpeth = mii_stat1000_to_ethtool_lpa_t(val);
4246 	}
4247 
4248 	if (tg3_readphy(tp, MII_LPA, rmtadv))
4249 		return false;
4250 
4251 	lpeth |= mii_lpa_to_ethtool_lpa_t(*rmtadv);
4252 	tp->link_config.rmt_adv = lpeth;
4253 
4254 	return true;
4255 }
4256 
4257 static bool tg3_test_and_report_link_chg(struct tg3 *tp, int curr_link_up)
4258 {
4259 	if (curr_link_up != tp->link_up) {
4260 		if (curr_link_up) {
4261 			netif_carrier_on(tp->dev);
4262 		} else {
4263 			netif_carrier_off(tp->dev);
4264 			if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4265 				tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4266 		}
4267 
4268 		tg3_link_report(tp);
4269 		return true;
4270 	}
4271 
4272 	return false;
4273 }
4274 
4275 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
4276 {
4277 	int current_link_up;
4278 	u32 bmsr, val;
4279 	u32 lcl_adv, rmt_adv;
4280 	u16 current_speed;
4281 	u8 current_duplex;
4282 	int i, err;
4283 
4284 	tw32(MAC_EVENT, 0);
4285 
4286 	tw32_f(MAC_STATUS,
4287 	     (MAC_STATUS_SYNC_CHANGED |
4288 	      MAC_STATUS_CFG_CHANGED |
4289 	      MAC_STATUS_MI_COMPLETION |
4290 	      MAC_STATUS_LNKSTATE_CHANGED));
4291 	udelay(40);
4292 
4293 	if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
4294 		tw32_f(MAC_MI_MODE,
4295 		     (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
4296 		udelay(80);
4297 	}
4298 
4299 	tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, 0);
4300 
4301 	/* Some third-party PHYs need to be reset on link going
4302 	 * down.
4303 	 */
4304 	if ((tg3_asic_rev(tp) == ASIC_REV_5703 ||
4305 	     tg3_asic_rev(tp) == ASIC_REV_5704 ||
4306 	     tg3_asic_rev(tp) == ASIC_REV_5705) &&
4307 	    tp->link_up) {
4308 		tg3_readphy(tp, MII_BMSR, &bmsr);
4309 		if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4310 		    !(bmsr & BMSR_LSTATUS))
4311 			force_reset = 1;
4312 	}
4313 	if (force_reset)
4314 		tg3_phy_reset(tp);
4315 
4316 	if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
4317 		tg3_readphy(tp, MII_BMSR, &bmsr);
4318 		if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
4319 		    !tg3_flag(tp, INIT_COMPLETE))
4320 			bmsr = 0;
4321 
4322 		if (!(bmsr & BMSR_LSTATUS)) {
4323 			err = tg3_init_5401phy_dsp(tp);
4324 			if (err)
4325 				return err;
4326 
4327 			tg3_readphy(tp, MII_BMSR, &bmsr);
4328 			for (i = 0; i < 1000; i++) {
4329 				udelay(10);
4330 				if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4331 				    (bmsr & BMSR_LSTATUS)) {
4332 					udelay(40);
4333 					break;
4334 				}
4335 			}
4336 
4337 			if ((tp->phy_id & TG3_PHY_ID_REV_MASK) ==
4338 			    TG3_PHY_REV_BCM5401_B0 &&
4339 			    !(bmsr & BMSR_LSTATUS) &&
4340 			    tp->link_config.active_speed == SPEED_1000) {
4341 				err = tg3_phy_reset(tp);
4342 				if (!err)
4343 					err = tg3_init_5401phy_dsp(tp);
4344 				if (err)
4345 					return err;
4346 			}
4347 		}
4348 	} else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4349 		   tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0) {
4350 		/* 5701 {A0,B0} CRC bug workaround */
4351 		tg3_writephy(tp, 0x15, 0x0a75);
4352 		tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4353 		tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
4354 		tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4355 	}
4356 
4357 	/* Clear pending interrupts... */
4358 	tg3_readphy(tp, MII_TG3_ISTAT, &val);
4359 	tg3_readphy(tp, MII_TG3_ISTAT, &val);
4360 
4361 	if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT)
4362 		tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
4363 	else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET))
4364 		tg3_writephy(tp, MII_TG3_IMASK, ~0);
4365 
4366 	if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4367 	    tg3_asic_rev(tp) == ASIC_REV_5701) {
4368 		if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
4369 			tg3_writephy(tp, MII_TG3_EXT_CTRL,
4370 				     MII_TG3_EXT_CTRL_LNK3_LED_MODE);
4371 		else
4372 			tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
4373 	}
4374 
4375 	current_link_up = 0;
4376 	current_speed = SPEED_UNKNOWN;
4377 	current_duplex = DUPLEX_UNKNOWN;
4378 	tp->phy_flags &= ~TG3_PHYFLG_MDIX_STATE;
4379 	tp->link_config.rmt_adv = 0;
4380 
4381 	if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) {
4382 		err = tg3_phy_auxctl_read(tp,
4383 					  MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4384 					  &val);
4385 		if (!err && !(val & (1 << 10))) {
4386 			tg3_phy_auxctl_write(tp,
4387 					     MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4388 					     val | (1 << 10));
4389 			goto relink;
4390 		}
4391 	}
4392 
4393 	bmsr = 0;
4394 	for (i = 0; i < 100; i++) {
4395 		tg3_readphy(tp, MII_BMSR, &bmsr);
4396 		if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4397 		    (bmsr & BMSR_LSTATUS))
4398 			break;
4399 		udelay(40);
4400 	}
4401 
4402 	if (bmsr & BMSR_LSTATUS) {
4403 		u32 aux_stat, bmcr;
4404 
4405 		tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
4406 		for (i = 0; i < 2000; i++) {
4407 			udelay(10);
4408 			if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
4409 			    aux_stat)
4410 				break;
4411 		}
4412 
4413 		tg3_aux_stat_to_speed_duplex(tp, aux_stat,
4414 					     &current_speed,
4415 					     &current_duplex);
4416 
4417 		bmcr = 0;
4418 		for (i = 0; i < 200; i++) {
4419 			tg3_readphy(tp, MII_BMCR, &bmcr);
4420 			if (tg3_readphy(tp, MII_BMCR, &bmcr))
4421 				continue;
4422 			if (bmcr && bmcr != 0x7fff)
4423 				break;
4424 			udelay(10);
4425 		}
4426 
4427 		lcl_adv = 0;
4428 		rmt_adv = 0;
4429 
4430 		tp->link_config.active_speed = current_speed;
4431 		tp->link_config.active_duplex = current_duplex;
4432 
4433 		if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4434 			if ((bmcr & BMCR_ANENABLE) &&
4435 			    tg3_phy_copper_an_config_ok(tp, &lcl_adv) &&
4436 			    tg3_phy_copper_fetch_rmtadv(tp, &rmt_adv))
4437 				current_link_up = 1;
4438 		} else {
4439 			if (!(bmcr & BMCR_ANENABLE) &&
4440 			    tp->link_config.speed == current_speed &&
4441 			    tp->link_config.duplex == current_duplex &&
4442 			    tp->link_config.flowctrl ==
4443 			    tp->link_config.active_flowctrl) {
4444 				current_link_up = 1;
4445 			}
4446 		}
4447 
4448 		if (current_link_up == 1 &&
4449 		    tp->link_config.active_duplex == DUPLEX_FULL) {
4450 			u32 reg, bit;
4451 
4452 			if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4453 				reg = MII_TG3_FET_GEN_STAT;
4454 				bit = MII_TG3_FET_GEN_STAT_MDIXSTAT;
4455 			} else {
4456 				reg = MII_TG3_EXT_STAT;
4457 				bit = MII_TG3_EXT_STAT_MDIX;
4458 			}
4459 
4460 			if (!tg3_readphy(tp, reg, &val) && (val & bit))
4461 				tp->phy_flags |= TG3_PHYFLG_MDIX_STATE;
4462 
4463 			tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
4464 		}
4465 	}
4466 
4467 relink:
4468 	if (current_link_up == 0 || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4469 		tg3_phy_copper_begin(tp);
4470 
4471 		if (tg3_flag(tp, ROBOSWITCH)) {
4472 			current_link_up = 1;
4473 			/* FIXME: when BCM5325 switch is used use 100 MBit/s */
4474 			current_speed = SPEED_1000;
4475 			current_duplex = DUPLEX_FULL;
4476 			tp->link_config.active_speed = current_speed;
4477 			tp->link_config.active_duplex = current_duplex;
4478 		}
4479 
4480 		tg3_readphy(tp, MII_BMSR, &bmsr);
4481 		if ((!tg3_readphy(tp, MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) ||
4482 		    (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
4483 			current_link_up = 1;
4484 	}
4485 
4486 	tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
4487 	if (current_link_up == 1) {
4488 		if (tp->link_config.active_speed == SPEED_100 ||
4489 		    tp->link_config.active_speed == SPEED_10)
4490 			tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4491 		else
4492 			tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4493 	} else if (tp->phy_flags & TG3_PHYFLG_IS_FET)
4494 		tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4495 	else
4496 		tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4497 
4498 	/* In order for the 5750 core in BCM4785 chip to work properly
4499 	 * in RGMII mode, the Led Control Register must be set up.
4500 	 */
4501 	if (tg3_flag(tp, RGMII_MODE)) {
4502 		u32 led_ctrl = tr32(MAC_LED_CTRL);
4503 		led_ctrl &= ~(LED_CTRL_1000MBPS_ON | LED_CTRL_100MBPS_ON);
4504 
4505 		if (tp->link_config.active_speed == SPEED_10)
4506 			led_ctrl |= LED_CTRL_LNKLED_OVERRIDE;
4507 		else if (tp->link_config.active_speed == SPEED_100)
4508 			led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE |
4509 				     LED_CTRL_100MBPS_ON);
4510 		else if (tp->link_config.active_speed == SPEED_1000)
4511 			led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE |
4512 				     LED_CTRL_1000MBPS_ON);
4513 
4514 		tw32(MAC_LED_CTRL, led_ctrl);
4515 		udelay(40);
4516 	}
4517 
4518 	tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
4519 	if (tp->link_config.active_duplex == DUPLEX_HALF)
4520 		tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
4521 
4522 	if (tg3_asic_rev(tp) == ASIC_REV_5700) {
4523 		if (current_link_up == 1 &&
4524 		    tg3_5700_link_polarity(tp, tp->link_config.active_speed))
4525 			tp->mac_mode |= MAC_MODE_LINK_POLARITY;
4526 		else
4527 			tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
4528 	}
4529 
4530 	/* ??? Without this setting Netgear GA302T PHY does not
4531 	 * ??? send/receive packets...
4532 	 */
4533 	if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 &&
4534 	    tg3_chip_rev_id(tp) == CHIPREV_ID_5700_ALTIMA) {
4535 		tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
4536 		tw32_f(MAC_MI_MODE, tp->mi_mode);
4537 		udelay(80);
4538 	}
4539 
4540 	tw32_f(MAC_MODE, tp->mac_mode);
4541 	udelay(40);
4542 
4543 	tg3_phy_eee_adjust(tp, current_link_up);
4544 
4545 	if (tg3_flag(tp, USE_LINKCHG_REG)) {
4546 		/* Polled via timer. */
4547 		tw32_f(MAC_EVENT, 0);
4548 	} else {
4549 		tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4550 	}
4551 	udelay(40);
4552 
4553 	if (tg3_asic_rev(tp) == ASIC_REV_5700 &&
4554 	    current_link_up == 1 &&
4555 	    tp->link_config.active_speed == SPEED_1000 &&
4556 	    (tg3_flag(tp, PCIX_MODE) || tg3_flag(tp, PCI_HIGH_SPEED))) {
4557 		udelay(120);
4558 		tw32_f(MAC_STATUS,
4559 		     (MAC_STATUS_SYNC_CHANGED |
4560 		      MAC_STATUS_CFG_CHANGED));
4561 		udelay(40);
4562 		tg3_write_mem(tp,
4563 			      NIC_SRAM_FIRMWARE_MBOX,
4564 			      NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
4565 	}
4566 
4567 	/* Prevent send BD corruption. */
4568 	if (tg3_flag(tp, CLKREQ_BUG)) {
4569 		if (tp->link_config.active_speed == SPEED_100 ||
4570 		    tp->link_config.active_speed == SPEED_10)
4571 			pcie_capability_clear_word(tp->pdev, PCI_EXP_LNKCTL,
4572 						   PCI_EXP_LNKCTL_CLKREQ_EN);
4573 		else
4574 			pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
4575 						 PCI_EXP_LNKCTL_CLKREQ_EN);
4576 	}
4577 
4578 	tg3_test_and_report_link_chg(tp, current_link_up);
4579 
4580 	return 0;
4581 }
4582 
4583 struct tg3_fiber_aneginfo {
4584 	int state;
4585 #define ANEG_STATE_UNKNOWN		0
4586 #define ANEG_STATE_AN_ENABLE		1
4587 #define ANEG_STATE_RESTART_INIT		2
4588 #define ANEG_STATE_RESTART		3
4589 #define ANEG_STATE_DISABLE_LINK_OK	4
4590 #define ANEG_STATE_ABILITY_DETECT_INIT	5
4591 #define ANEG_STATE_ABILITY_DETECT	6
4592 #define ANEG_STATE_ACK_DETECT_INIT	7
4593 #define ANEG_STATE_ACK_DETECT		8
4594 #define ANEG_STATE_COMPLETE_ACK_INIT	9
4595 #define ANEG_STATE_COMPLETE_ACK		10
4596 #define ANEG_STATE_IDLE_DETECT_INIT	11
4597 #define ANEG_STATE_IDLE_DETECT		12
4598 #define ANEG_STATE_LINK_OK		13
4599 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT	14
4600 #define ANEG_STATE_NEXT_PAGE_WAIT	15
4601 
4602 	u32 flags;
4603 #define MR_AN_ENABLE		0x00000001
4604 #define MR_RESTART_AN		0x00000002
4605 #define MR_AN_COMPLETE		0x00000004
4606 #define MR_PAGE_RX		0x00000008
4607 #define MR_NP_LOADED		0x00000010
4608 #define MR_TOGGLE_TX		0x00000020
4609 #define MR_LP_ADV_FULL_DUPLEX	0x00000040
4610 #define MR_LP_ADV_HALF_DUPLEX	0x00000080
4611 #define MR_LP_ADV_SYM_PAUSE	0x00000100
4612 #define MR_LP_ADV_ASYM_PAUSE	0x00000200
4613 #define MR_LP_ADV_REMOTE_FAULT1	0x00000400
4614 #define MR_LP_ADV_REMOTE_FAULT2	0x00000800
4615 #define MR_LP_ADV_NEXT_PAGE	0x00001000
4616 #define MR_TOGGLE_RX		0x00002000
4617 #define MR_NP_RX		0x00004000
4618 
4619 #define MR_LINK_OK		0x80000000
4620 
4621 	unsigned long link_time, cur_time;
4622 
4623 	u32 ability_match_cfg;
4624 	int ability_match_count;
4625 
4626 	char ability_match, idle_match, ack_match;
4627 
4628 	u32 txconfig, rxconfig;
4629 #define ANEG_CFG_NP		0x00000080
4630 #define ANEG_CFG_ACK		0x00000040
4631 #define ANEG_CFG_RF2		0x00000020
4632 #define ANEG_CFG_RF1		0x00000010
4633 #define ANEG_CFG_PS2		0x00000001
4634 #define ANEG_CFG_PS1		0x00008000
4635 #define ANEG_CFG_HD		0x00004000
4636 #define ANEG_CFG_FD		0x00002000
4637 #define ANEG_CFG_INVAL		0x00001f06
4638 
4639 };
4640 #define ANEG_OK		0
4641 #define ANEG_DONE	1
4642 #define ANEG_TIMER_ENAB	2
4643 #define ANEG_FAILED	-1
4644 
4645 #define ANEG_STATE_SETTLE_TIME	10000
4646 
4647 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
4648 				   struct tg3_fiber_aneginfo *ap)
4649 {
4650 	u16 flowctrl;
4651 	unsigned long delta;
4652 	u32 rx_cfg_reg;
4653 	int ret;
4654 
4655 	if (ap->state == ANEG_STATE_UNKNOWN) {
4656 		ap->rxconfig = 0;
4657 		ap->link_time = 0;
4658 		ap->cur_time = 0;
4659 		ap->ability_match_cfg = 0;
4660 		ap->ability_match_count = 0;
4661 		ap->ability_match = 0;
4662 		ap->idle_match = 0;
4663 		ap->ack_match = 0;
4664 	}
4665 	ap->cur_time++;
4666 
4667 	if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
4668 		rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
4669 
4670 		if (rx_cfg_reg != ap->ability_match_cfg) {
4671 			ap->ability_match_cfg = rx_cfg_reg;
4672 			ap->ability_match = 0;
4673 			ap->ability_match_count = 0;
4674 		} else {
4675 			if (++ap->ability_match_count > 1) {
4676 				ap->ability_match = 1;
4677 				ap->ability_match_cfg = rx_cfg_reg;
4678 			}
4679 		}
4680 		if (rx_cfg_reg & ANEG_CFG_ACK)
4681 			ap->ack_match = 1;
4682 		else
4683 			ap->ack_match = 0;
4684 
4685 		ap->idle_match = 0;
4686 	} else {
4687 		ap->idle_match = 1;
4688 		ap->ability_match_cfg = 0;
4689 		ap->ability_match_count = 0;
4690 		ap->ability_match = 0;
4691 		ap->ack_match = 0;
4692 
4693 		rx_cfg_reg = 0;
4694 	}
4695 
4696 	ap->rxconfig = rx_cfg_reg;
4697 	ret = ANEG_OK;
4698 
4699 	switch (ap->state) {
4700 	case ANEG_STATE_UNKNOWN:
4701 		if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
4702 			ap->state = ANEG_STATE_AN_ENABLE;
4703 
4704 		/* fallthru */
4705 	case ANEG_STATE_AN_ENABLE:
4706 		ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
4707 		if (ap->flags & MR_AN_ENABLE) {
4708 			ap->link_time = 0;
4709 			ap->cur_time = 0;
4710 			ap->ability_match_cfg = 0;
4711 			ap->ability_match_count = 0;
4712 			ap->ability_match = 0;
4713 			ap->idle_match = 0;
4714 			ap->ack_match = 0;
4715 
4716 			ap->state = ANEG_STATE_RESTART_INIT;
4717 		} else {
4718 			ap->state = ANEG_STATE_DISABLE_LINK_OK;
4719 		}
4720 		break;
4721 
4722 	case ANEG_STATE_RESTART_INIT:
4723 		ap->link_time = ap->cur_time;
4724 		ap->flags &= ~(MR_NP_LOADED);
4725 		ap->txconfig = 0;
4726 		tw32(MAC_TX_AUTO_NEG, 0);
4727 		tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
4728 		tw32_f(MAC_MODE, tp->mac_mode);
4729 		udelay(40);
4730 
4731 		ret = ANEG_TIMER_ENAB;
4732 		ap->state = ANEG_STATE_RESTART;
4733 
4734 		/* fallthru */
4735 	case ANEG_STATE_RESTART:
4736 		delta = ap->cur_time - ap->link_time;
4737 		if (delta > ANEG_STATE_SETTLE_TIME)
4738 			ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
4739 		else
4740 			ret = ANEG_TIMER_ENAB;
4741 		break;
4742 
4743 	case ANEG_STATE_DISABLE_LINK_OK:
4744 		ret = ANEG_DONE;
4745 		break;
4746 
4747 	case ANEG_STATE_ABILITY_DETECT_INIT:
4748 		ap->flags &= ~(MR_TOGGLE_TX);
4749 		ap->txconfig = ANEG_CFG_FD;
4750 		flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
4751 		if (flowctrl & ADVERTISE_1000XPAUSE)
4752 			ap->txconfig |= ANEG_CFG_PS1;
4753 		if (flowctrl & ADVERTISE_1000XPSE_ASYM)
4754 			ap->txconfig |= ANEG_CFG_PS2;
4755 		tw32(MAC_TX_AUTO_NEG, ap->txconfig);
4756 		tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
4757 		tw32_f(MAC_MODE, tp->mac_mode);
4758 		udelay(40);
4759 
4760 		ap->state = ANEG_STATE_ABILITY_DETECT;
4761 		break;
4762 
4763 	case ANEG_STATE_ABILITY_DETECT:
4764 		if (ap->ability_match != 0 && ap->rxconfig != 0)
4765 			ap->state = ANEG_STATE_ACK_DETECT_INIT;
4766 		break;
4767 
4768 	case ANEG_STATE_ACK_DETECT_INIT:
4769 		ap->txconfig |= ANEG_CFG_ACK;
4770 		tw32(MAC_TX_AUTO_NEG, ap->txconfig);
4771 		tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
4772 		tw32_f(MAC_MODE, tp->mac_mode);
4773 		udelay(40);
4774 
4775 		ap->state = ANEG_STATE_ACK_DETECT;
4776 
4777 		/* fallthru */
4778 	case ANEG_STATE_ACK_DETECT:
4779 		if (ap->ack_match != 0) {
4780 			if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
4781 			    (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
4782 				ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
4783 			} else {
4784 				ap->state = ANEG_STATE_AN_ENABLE;
4785 			}
4786 		} else if (ap->ability_match != 0 &&
4787 			   ap->rxconfig == 0) {
4788 			ap->state = ANEG_STATE_AN_ENABLE;
4789 		}
4790 		break;
4791 
4792 	case ANEG_STATE_COMPLETE_ACK_INIT:
4793 		if (ap->rxconfig & ANEG_CFG_INVAL) {
4794 			ret = ANEG_FAILED;
4795 			break;
4796 		}
4797 		ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
4798 			       MR_LP_ADV_HALF_DUPLEX |
4799 			       MR_LP_ADV_SYM_PAUSE |
4800 			       MR_LP_ADV_ASYM_PAUSE |
4801 			       MR_LP_ADV_REMOTE_FAULT1 |
4802 			       MR_LP_ADV_REMOTE_FAULT2 |
4803 			       MR_LP_ADV_NEXT_PAGE |
4804 			       MR_TOGGLE_RX |
4805 			       MR_NP_RX);
4806 		if (ap->rxconfig & ANEG_CFG_FD)
4807 			ap->flags |= MR_LP_ADV_FULL_DUPLEX;
4808 		if (ap->rxconfig & ANEG_CFG_HD)
4809 			ap->flags |= MR_LP_ADV_HALF_DUPLEX;
4810 		if (ap->rxconfig & ANEG_CFG_PS1)
4811 			ap->flags |= MR_LP_ADV_SYM_PAUSE;
4812 		if (ap->rxconfig & ANEG_CFG_PS2)
4813 			ap->flags |= MR_LP_ADV_ASYM_PAUSE;
4814 		if (ap->rxconfig & ANEG_CFG_RF1)
4815 			ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
4816 		if (ap->rxconfig & ANEG_CFG_RF2)
4817 			ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
4818 		if (ap->rxconfig & ANEG_CFG_NP)
4819 			ap->flags |= MR_LP_ADV_NEXT_PAGE;
4820 
4821 		ap->link_time = ap->cur_time;
4822 
4823 		ap->flags ^= (MR_TOGGLE_TX);
4824 		if (ap->rxconfig & 0x0008)
4825 			ap->flags |= MR_TOGGLE_RX;
4826 		if (ap->rxconfig & ANEG_CFG_NP)
4827 			ap->flags |= MR_NP_RX;
4828 		ap->flags |= MR_PAGE_RX;
4829 
4830 		ap->state = ANEG_STATE_COMPLETE_ACK;
4831 		ret = ANEG_TIMER_ENAB;
4832 		break;
4833 
4834 	case ANEG_STATE_COMPLETE_ACK:
4835 		if (ap->ability_match != 0 &&
4836 		    ap->rxconfig == 0) {
4837 			ap->state = ANEG_STATE_AN_ENABLE;
4838 			break;
4839 		}
4840 		delta = ap->cur_time - ap->link_time;
4841 		if (delta > ANEG_STATE_SETTLE_TIME) {
4842 			if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
4843 				ap->state = ANEG_STATE_IDLE_DETECT_INIT;
4844 			} else {
4845 				if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
4846 				    !(ap->flags & MR_NP_RX)) {
4847 					ap->state = ANEG_STATE_IDLE_DETECT_INIT;
4848 				} else {
4849 					ret = ANEG_FAILED;
4850 				}
4851 			}
4852 		}
4853 		break;
4854 
4855 	case ANEG_STATE_IDLE_DETECT_INIT:
4856 		ap->link_time = ap->cur_time;
4857 		tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
4858 		tw32_f(MAC_MODE, tp->mac_mode);
4859 		udelay(40);
4860 
4861 		ap->state = ANEG_STATE_IDLE_DETECT;
4862 		ret = ANEG_TIMER_ENAB;
4863 		break;
4864 
4865 	case ANEG_STATE_IDLE_DETECT:
4866 		if (ap->ability_match != 0 &&
4867 		    ap->rxconfig == 0) {
4868 			ap->state = ANEG_STATE_AN_ENABLE;
4869 			break;
4870 		}
4871 		delta = ap->cur_time - ap->link_time;
4872 		if (delta > ANEG_STATE_SETTLE_TIME) {
4873 			/* XXX another gem from the Broadcom driver :( */
4874 			ap->state = ANEG_STATE_LINK_OK;
4875 		}
4876 		break;
4877 
4878 	case ANEG_STATE_LINK_OK:
4879 		ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
4880 		ret = ANEG_DONE;
4881 		break;
4882 
4883 	case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
4884 		/* ??? unimplemented */
4885 		break;
4886 
4887 	case ANEG_STATE_NEXT_PAGE_WAIT:
4888 		/* ??? unimplemented */
4889 		break;
4890 
4891 	default:
4892 		ret = ANEG_FAILED;
4893 		break;
4894 	}
4895 
4896 	return ret;
4897 }
4898 
4899 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
4900 {
4901 	int res = 0;
4902 	struct tg3_fiber_aneginfo aninfo;
4903 	int status = ANEG_FAILED;
4904 	unsigned int tick;
4905 	u32 tmp;
4906 
4907 	tw32_f(MAC_TX_AUTO_NEG, 0);
4908 
4909 	tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
4910 	tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
4911 	udelay(40);
4912 
4913 	tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
4914 	udelay(40);
4915 
4916 	memset(&aninfo, 0, sizeof(aninfo));
4917 	aninfo.flags |= MR_AN_ENABLE;
4918 	aninfo.state = ANEG_STATE_UNKNOWN;
4919 	aninfo.cur_time = 0;
4920 	tick = 0;
4921 	while (++tick < 195000) {
4922 		status = tg3_fiber_aneg_smachine(tp, &aninfo);
4923 		if (status == ANEG_DONE || status == ANEG_FAILED)
4924 			break;
4925 
4926 		udelay(1);
4927 	}
4928 
4929 	tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
4930 	tw32_f(MAC_MODE, tp->mac_mode);
4931 	udelay(40);
4932 
4933 	*txflags = aninfo.txconfig;
4934 	*rxflags = aninfo.flags;
4935 
4936 	if (status == ANEG_DONE &&
4937 	    (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
4938 			     MR_LP_ADV_FULL_DUPLEX)))
4939 		res = 1;
4940 
4941 	return res;
4942 }
4943 
4944 static void tg3_init_bcm8002(struct tg3 *tp)
4945 {
4946 	u32 mac_status = tr32(MAC_STATUS);
4947 	int i;
4948 
4949 	/* Reset when initting first time or we have a link. */
4950 	if (tg3_flag(tp, INIT_COMPLETE) &&
4951 	    !(mac_status & MAC_STATUS_PCS_SYNCED))
4952 		return;
4953 
4954 	/* Set PLL lock range. */
4955 	tg3_writephy(tp, 0x16, 0x8007);
4956 
4957 	/* SW reset */
4958 	tg3_writephy(tp, MII_BMCR, BMCR_RESET);
4959 
4960 	/* Wait for reset to complete. */
4961 	/* XXX schedule_timeout() ... */
4962 	for (i = 0; i < 500; i++)
4963 		udelay(10);
4964 
4965 	/* Config mode; select PMA/Ch 1 regs. */
4966 	tg3_writephy(tp, 0x10, 0x8411);
4967 
4968 	/* Enable auto-lock and comdet, select txclk for tx. */
4969 	tg3_writephy(tp, 0x11, 0x0a10);
4970 
4971 	tg3_writephy(tp, 0x18, 0x00a0);
4972 	tg3_writephy(tp, 0x16, 0x41ff);
4973 
4974 	/* Assert and deassert POR. */
4975 	tg3_writephy(tp, 0x13, 0x0400);
4976 	udelay(40);
4977 	tg3_writephy(tp, 0x13, 0x0000);
4978 
4979 	tg3_writephy(tp, 0x11, 0x0a50);
4980 	udelay(40);
4981 	tg3_writephy(tp, 0x11, 0x0a10);
4982 
4983 	/* Wait for signal to stabilize */
4984 	/* XXX schedule_timeout() ... */
4985 	for (i = 0; i < 15000; i++)
4986 		udelay(10);
4987 
4988 	/* Deselect the channel register so we can read the PHYID
4989 	 * later.
4990 	 */
4991 	tg3_writephy(tp, 0x10, 0x8011);
4992 }
4993 
4994 static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
4995 {
4996 	u16 flowctrl;
4997 	u32 sg_dig_ctrl, sg_dig_status;
4998 	u32 serdes_cfg, expected_sg_dig_ctrl;
4999 	int workaround, port_a;
5000 	int current_link_up;
5001 
5002 	serdes_cfg = 0;
5003 	expected_sg_dig_ctrl = 0;
5004 	workaround = 0;
5005 	port_a = 1;
5006 	current_link_up = 0;
5007 
5008 	if (tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A0 &&
5009 	    tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A1) {
5010 		workaround = 1;
5011 		if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
5012 			port_a = 0;
5013 
5014 		/* preserve bits 0-11,13,14 for signal pre-emphasis */
5015 		/* preserve bits 20-23 for voltage regulator */
5016 		serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
5017 	}
5018 
5019 	sg_dig_ctrl = tr32(SG_DIG_CTRL);
5020 
5021 	if (tp->link_config.autoneg != AUTONEG_ENABLE) {
5022 		if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
5023 			if (workaround) {
5024 				u32 val = serdes_cfg;
5025 
5026 				if (port_a)
5027 					val |= 0xc010000;
5028 				else
5029 					val |= 0x4010000;
5030 				tw32_f(MAC_SERDES_CFG, val);
5031 			}
5032 
5033 			tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
5034 		}
5035 		if (mac_status & MAC_STATUS_PCS_SYNCED) {
5036 			tg3_setup_flow_control(tp, 0, 0);
5037 			current_link_up = 1;
5038 		}
5039 		goto out;
5040 	}
5041 
5042 	/* Want auto-negotiation.  */
5043 	expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
5044 
5045 	flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5046 	if (flowctrl & ADVERTISE_1000XPAUSE)
5047 		expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
5048 	if (flowctrl & ADVERTISE_1000XPSE_ASYM)
5049 		expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
5050 
5051 	if (sg_dig_ctrl != expected_sg_dig_ctrl) {
5052 		if ((tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT) &&
5053 		    tp->serdes_counter &&
5054 		    ((mac_status & (MAC_STATUS_PCS_SYNCED |
5055 				    MAC_STATUS_RCVD_CFG)) ==
5056 		     MAC_STATUS_PCS_SYNCED)) {
5057 			tp->serdes_counter--;
5058 			current_link_up = 1;
5059 			goto out;
5060 		}
5061 restart_autoneg:
5062 		if (workaround)
5063 			tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
5064 		tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
5065 		udelay(5);
5066 		tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
5067 
5068 		tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
5069 		tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5070 	} else if (mac_status & (MAC_STATUS_PCS_SYNCED |
5071 				 MAC_STATUS_SIGNAL_DET)) {
5072 		sg_dig_status = tr32(SG_DIG_STATUS);
5073 		mac_status = tr32(MAC_STATUS);
5074 
5075 		if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
5076 		    (mac_status & MAC_STATUS_PCS_SYNCED)) {
5077 			u32 local_adv = 0, remote_adv = 0;
5078 
5079 			if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
5080 				local_adv |= ADVERTISE_1000XPAUSE;
5081 			if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
5082 				local_adv |= ADVERTISE_1000XPSE_ASYM;
5083 
5084 			if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
5085 				remote_adv |= LPA_1000XPAUSE;
5086 			if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
5087 				remote_adv |= LPA_1000XPAUSE_ASYM;
5088 
5089 			tp->link_config.rmt_adv =
5090 					   mii_adv_to_ethtool_adv_x(remote_adv);
5091 
5092 			tg3_setup_flow_control(tp, local_adv, remote_adv);
5093 			current_link_up = 1;
5094 			tp->serdes_counter = 0;
5095 			tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5096 		} else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
5097 			if (tp->serdes_counter)
5098 				tp->serdes_counter--;
5099 			else {
5100 				if (workaround) {
5101 					u32 val = serdes_cfg;
5102 
5103 					if (port_a)
5104 						val |= 0xc010000;
5105 					else
5106 						val |= 0x4010000;
5107 
5108 					tw32_f(MAC_SERDES_CFG, val);
5109 				}
5110 
5111 				tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
5112 				udelay(40);
5113 
5114 				/* Link parallel detection - link is up */
5115 				/* only if we have PCS_SYNC and not */
5116 				/* receiving config code words */
5117 				mac_status = tr32(MAC_STATUS);
5118 				if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
5119 				    !(mac_status & MAC_STATUS_RCVD_CFG)) {
5120 					tg3_setup_flow_control(tp, 0, 0);
5121 					current_link_up = 1;
5122 					tp->phy_flags |=
5123 						TG3_PHYFLG_PARALLEL_DETECT;
5124 					tp->serdes_counter =
5125 						SERDES_PARALLEL_DET_TIMEOUT;
5126 				} else
5127 					goto restart_autoneg;
5128 			}
5129 		}
5130 	} else {
5131 		tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
5132 		tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5133 	}
5134 
5135 out:
5136 	return current_link_up;
5137 }
5138 
5139 static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
5140 {
5141 	int current_link_up = 0;
5142 
5143 	if (!(mac_status & MAC_STATUS_PCS_SYNCED))
5144 		goto out;
5145 
5146 	if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5147 		u32 txflags, rxflags;
5148 		int i;
5149 
5150 		if (fiber_autoneg(tp, &txflags, &rxflags)) {
5151 			u32 local_adv = 0, remote_adv = 0;
5152 
5153 			if (txflags & ANEG_CFG_PS1)
5154 				local_adv |= ADVERTISE_1000XPAUSE;
5155 			if (txflags & ANEG_CFG_PS2)
5156 				local_adv |= ADVERTISE_1000XPSE_ASYM;
5157 
5158 			if (rxflags & MR_LP_ADV_SYM_PAUSE)
5159 				remote_adv |= LPA_1000XPAUSE;
5160 			if (rxflags & MR_LP_ADV_ASYM_PAUSE)
5161 				remote_adv |= LPA_1000XPAUSE_ASYM;
5162 
5163 			tp->link_config.rmt_adv =
5164 					   mii_adv_to_ethtool_adv_x(remote_adv);
5165 
5166 			tg3_setup_flow_control(tp, local_adv, remote_adv);
5167 
5168 			current_link_up = 1;
5169 		}
5170 		for (i = 0; i < 30; i++) {
5171 			udelay(20);
5172 			tw32_f(MAC_STATUS,
5173 			       (MAC_STATUS_SYNC_CHANGED |
5174 				MAC_STATUS_CFG_CHANGED));
5175 			udelay(40);
5176 			if ((tr32(MAC_STATUS) &
5177 			     (MAC_STATUS_SYNC_CHANGED |
5178 			      MAC_STATUS_CFG_CHANGED)) == 0)
5179 				break;
5180 		}
5181 
5182 		mac_status = tr32(MAC_STATUS);
5183 		if (current_link_up == 0 &&
5184 		    (mac_status & MAC_STATUS_PCS_SYNCED) &&
5185 		    !(mac_status & MAC_STATUS_RCVD_CFG))
5186 			current_link_up = 1;
5187 	} else {
5188 		tg3_setup_flow_control(tp, 0, 0);
5189 
5190 		/* Forcing 1000FD link up. */
5191 		current_link_up = 1;
5192 
5193 		tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
5194 		udelay(40);
5195 
5196 		tw32_f(MAC_MODE, tp->mac_mode);
5197 		udelay(40);
5198 	}
5199 
5200 out:
5201 	return current_link_up;
5202 }
5203 
5204 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
5205 {
5206 	u32 orig_pause_cfg;
5207 	u16 orig_active_speed;
5208 	u8 orig_active_duplex;
5209 	u32 mac_status;
5210 	int current_link_up;
5211 	int i;
5212 
5213 	orig_pause_cfg = tp->link_config.active_flowctrl;
5214 	orig_active_speed = tp->link_config.active_speed;
5215 	orig_active_duplex = tp->link_config.active_duplex;
5216 
5217 	if (!tg3_flag(tp, HW_AUTONEG) &&
5218 	    tp->link_up &&
5219 	    tg3_flag(tp, INIT_COMPLETE)) {
5220 		mac_status = tr32(MAC_STATUS);
5221 		mac_status &= (MAC_STATUS_PCS_SYNCED |
5222 			       MAC_STATUS_SIGNAL_DET |
5223 			       MAC_STATUS_CFG_CHANGED |
5224 			       MAC_STATUS_RCVD_CFG);
5225 		if (mac_status == (MAC_STATUS_PCS_SYNCED |
5226 				   MAC_STATUS_SIGNAL_DET)) {
5227 			tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5228 					    MAC_STATUS_CFG_CHANGED));
5229 			return 0;
5230 		}
5231 	}
5232 
5233 	tw32_f(MAC_TX_AUTO_NEG, 0);
5234 
5235 	tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
5236 	tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
5237 	tw32_f(MAC_MODE, tp->mac_mode);
5238 	udelay(40);
5239 
5240 	if (tp->phy_id == TG3_PHY_ID_BCM8002)
5241 		tg3_init_bcm8002(tp);
5242 
5243 	/* Enable link change event even when serdes polling.  */
5244 	tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5245 	udelay(40);
5246 
5247 	current_link_up = 0;
5248 	tp->link_config.rmt_adv = 0;
5249 	mac_status = tr32(MAC_STATUS);
5250 
5251 	if (tg3_flag(tp, HW_AUTONEG))
5252 		current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
5253 	else
5254 		current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
5255 
5256 	tp->napi[0].hw_status->status =
5257 		(SD_STATUS_UPDATED |
5258 		 (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG));
5259 
5260 	for (i = 0; i < 100; i++) {
5261 		tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5262 				    MAC_STATUS_CFG_CHANGED));
5263 		udelay(5);
5264 		if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
5265 					 MAC_STATUS_CFG_CHANGED |
5266 					 MAC_STATUS_LNKSTATE_CHANGED)) == 0)
5267 			break;
5268 	}
5269 
5270 	mac_status = tr32(MAC_STATUS);
5271 	if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
5272 		current_link_up = 0;
5273 		if (tp->link_config.autoneg == AUTONEG_ENABLE &&
5274 		    tp->serdes_counter == 0) {
5275 			tw32_f(MAC_MODE, (tp->mac_mode |
5276 					  MAC_MODE_SEND_CONFIGS));
5277 			udelay(1);
5278 			tw32_f(MAC_MODE, tp->mac_mode);
5279 		}
5280 	}
5281 
5282 	if (current_link_up == 1) {
5283 		tp->link_config.active_speed = SPEED_1000;
5284 		tp->link_config.active_duplex = DUPLEX_FULL;
5285 		tw32(MAC_LED_CTRL, (tp->led_ctrl |
5286 				    LED_CTRL_LNKLED_OVERRIDE |
5287 				    LED_CTRL_1000MBPS_ON));
5288 	} else {
5289 		tp->link_config.active_speed = SPEED_UNKNOWN;
5290 		tp->link_config.active_duplex = DUPLEX_UNKNOWN;
5291 		tw32(MAC_LED_CTRL, (tp->led_ctrl |
5292 				    LED_CTRL_LNKLED_OVERRIDE |
5293 				    LED_CTRL_TRAFFIC_OVERRIDE));
5294 	}
5295 
5296 	if (!tg3_test_and_report_link_chg(tp, current_link_up)) {
5297 		u32 now_pause_cfg = tp->link_config.active_flowctrl;
5298 		if (orig_pause_cfg != now_pause_cfg ||
5299 		    orig_active_speed != tp->link_config.active_speed ||
5300 		    orig_active_duplex != tp->link_config.active_duplex)
5301 			tg3_link_report(tp);
5302 	}
5303 
5304 	return 0;
5305 }
5306 
5307 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
5308 {
5309 	int current_link_up, err = 0;
5310 	u32 bmsr, bmcr;
5311 	u16 current_speed;
5312 	u8 current_duplex;
5313 	u32 local_adv, remote_adv;
5314 
5315 	tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5316 	tw32_f(MAC_MODE, tp->mac_mode);
5317 	udelay(40);
5318 
5319 	tw32(MAC_EVENT, 0);
5320 
5321 	tw32_f(MAC_STATUS,
5322 	     (MAC_STATUS_SYNC_CHANGED |
5323 	      MAC_STATUS_CFG_CHANGED |
5324 	      MAC_STATUS_MI_COMPLETION |
5325 	      MAC_STATUS_LNKSTATE_CHANGED));
5326 	udelay(40);
5327 
5328 	if (force_reset)
5329 		tg3_phy_reset(tp);
5330 
5331 	current_link_up = 0;
5332 	current_speed = SPEED_UNKNOWN;
5333 	current_duplex = DUPLEX_UNKNOWN;
5334 	tp->link_config.rmt_adv = 0;
5335 
5336 	err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5337 	err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5338 	if (tg3_asic_rev(tp) == ASIC_REV_5714) {
5339 		if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5340 			bmsr |= BMSR_LSTATUS;
5341 		else
5342 			bmsr &= ~BMSR_LSTATUS;
5343 	}
5344 
5345 	err |= tg3_readphy(tp, MII_BMCR, &bmcr);
5346 
5347 	if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
5348 	    (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
5349 		/* do nothing, just check for link up at the end */
5350 	} else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5351 		u32 adv, newadv;
5352 
5353 		err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5354 		newadv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
5355 				 ADVERTISE_1000XPAUSE |
5356 				 ADVERTISE_1000XPSE_ASYM |
5357 				 ADVERTISE_SLCT);
5358 
5359 		newadv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5360 		newadv |= ethtool_adv_to_mii_adv_x(tp->link_config.advertising);
5361 
5362 		if ((newadv != adv) || !(bmcr & BMCR_ANENABLE)) {
5363 			tg3_writephy(tp, MII_ADVERTISE, newadv);
5364 			bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
5365 			tg3_writephy(tp, MII_BMCR, bmcr);
5366 
5367 			tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5368 			tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
5369 			tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5370 
5371 			return err;
5372 		}
5373 	} else {
5374 		u32 new_bmcr;
5375 
5376 		bmcr &= ~BMCR_SPEED1000;
5377 		new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
5378 
5379 		if (tp->link_config.duplex == DUPLEX_FULL)
5380 			new_bmcr |= BMCR_FULLDPLX;
5381 
5382 		if (new_bmcr != bmcr) {
5383 			/* BMCR_SPEED1000 is a reserved bit that needs
5384 			 * to be set on write.
5385 			 */
5386 			new_bmcr |= BMCR_SPEED1000;
5387 
5388 			/* Force a linkdown */
5389 			if (tp->link_up) {
5390 				u32 adv;
5391 
5392 				err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5393 				adv &= ~(ADVERTISE_1000XFULL |
5394 					 ADVERTISE_1000XHALF |
5395 					 ADVERTISE_SLCT);
5396 				tg3_writephy(tp, MII_ADVERTISE, adv);
5397 				tg3_writephy(tp, MII_BMCR, bmcr |
5398 							   BMCR_ANRESTART |
5399 							   BMCR_ANENABLE);
5400 				udelay(10);
5401 				tg3_carrier_off(tp);
5402 			}
5403 			tg3_writephy(tp, MII_BMCR, new_bmcr);
5404 			bmcr = new_bmcr;
5405 			err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5406 			err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5407 			if (tg3_asic_rev(tp) == ASIC_REV_5714) {
5408 				if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5409 					bmsr |= BMSR_LSTATUS;
5410 				else
5411 					bmsr &= ~BMSR_LSTATUS;
5412 			}
5413 			tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5414 		}
5415 	}
5416 
5417 	if (bmsr & BMSR_LSTATUS) {
5418 		current_speed = SPEED_1000;
5419 		current_link_up = 1;
5420 		if (bmcr & BMCR_FULLDPLX)
5421 			current_duplex = DUPLEX_FULL;
5422 		else
5423 			current_duplex = DUPLEX_HALF;
5424 
5425 		local_adv = 0;
5426 		remote_adv = 0;
5427 
5428 		if (bmcr & BMCR_ANENABLE) {
5429 			u32 common;
5430 
5431 			err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
5432 			err |= tg3_readphy(tp, MII_LPA, &remote_adv);
5433 			common = local_adv & remote_adv;
5434 			if (common & (ADVERTISE_1000XHALF |
5435 				      ADVERTISE_1000XFULL)) {
5436 				if (common & ADVERTISE_1000XFULL)
5437 					current_duplex = DUPLEX_FULL;
5438 				else
5439 					current_duplex = DUPLEX_HALF;
5440 
5441 				tp->link_config.rmt_adv =
5442 					   mii_adv_to_ethtool_adv_x(remote_adv);
5443 			} else if (!tg3_flag(tp, 5780_CLASS)) {
5444 				/* Link is up via parallel detect */
5445 			} else {
5446 				current_link_up = 0;
5447 			}
5448 		}
5449 	}
5450 
5451 	if (current_link_up == 1 && current_duplex == DUPLEX_FULL)
5452 		tg3_setup_flow_control(tp, local_adv, remote_adv);
5453 
5454 	tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
5455 	if (tp->link_config.active_duplex == DUPLEX_HALF)
5456 		tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
5457 
5458 	tw32_f(MAC_MODE, tp->mac_mode);
5459 	udelay(40);
5460 
5461 	tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5462 
5463 	tp->link_config.active_speed = current_speed;
5464 	tp->link_config.active_duplex = current_duplex;
5465 
5466 	tg3_test_and_report_link_chg(tp, current_link_up);
5467 	return err;
5468 }
5469 
5470 static void tg3_serdes_parallel_detect(struct tg3 *tp)
5471 {
5472 	if (tp->serdes_counter) {
5473 		/* Give autoneg time to complete. */
5474 		tp->serdes_counter--;
5475 		return;
5476 	}
5477 
5478 	if (!tp->link_up &&
5479 	    (tp->link_config.autoneg == AUTONEG_ENABLE)) {
5480 		u32 bmcr;
5481 
5482 		tg3_readphy(tp, MII_BMCR, &bmcr);
5483 		if (bmcr & BMCR_ANENABLE) {
5484 			u32 phy1, phy2;
5485 
5486 			/* Select shadow register 0x1f */
5487 			tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x7c00);
5488 			tg3_readphy(tp, MII_TG3_MISC_SHDW, &phy1);
5489 
5490 			/* Select expansion interrupt status register */
5491 			tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
5492 					 MII_TG3_DSP_EXP1_INT_STAT);
5493 			tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5494 			tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5495 
5496 			if ((phy1 & 0x10) && !(phy2 & 0x20)) {
5497 				/* We have signal detect and not receiving
5498 				 * config code words, link is up by parallel
5499 				 * detection.
5500 				 */
5501 
5502 				bmcr &= ~BMCR_ANENABLE;
5503 				bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
5504 				tg3_writephy(tp, MII_BMCR, bmcr);
5505 				tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT;
5506 			}
5507 		}
5508 	} else if (tp->link_up &&
5509 		   (tp->link_config.autoneg == AUTONEG_ENABLE) &&
5510 		   (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
5511 		u32 phy2;
5512 
5513 		/* Select expansion interrupt status register */
5514 		tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
5515 				 MII_TG3_DSP_EXP1_INT_STAT);
5516 		tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5517 		if (phy2 & 0x20) {
5518 			u32 bmcr;
5519 
5520 			/* Config code words received, turn on autoneg. */
5521 			tg3_readphy(tp, MII_BMCR, &bmcr);
5522 			tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
5523 
5524 			tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5525 
5526 		}
5527 	}
5528 }
5529 
5530 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
5531 {
5532 	u32 val;
5533 	int err;
5534 
5535 	if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
5536 		err = tg3_setup_fiber_phy(tp, force_reset);
5537 	else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
5538 		err = tg3_setup_fiber_mii_phy(tp, force_reset);
5539 	else
5540 		err = tg3_setup_copper_phy(tp, force_reset);
5541 
5542 	if (tg3_chip_rev(tp) == CHIPREV_5784_AX) {
5543 		u32 scale;
5544 
5545 		val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
5546 		if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
5547 			scale = 65;
5548 		else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
5549 			scale = 6;
5550 		else
5551 			scale = 12;
5552 
5553 		val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
5554 		val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
5555 		tw32(GRC_MISC_CFG, val);
5556 	}
5557 
5558 	val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
5559 	      (6 << TX_LENGTHS_IPG_SHIFT);
5560 	if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
5561 	    tg3_asic_rev(tp) == ASIC_REV_5762)
5562 		val |= tr32(MAC_TX_LENGTHS) &
5563 		       (TX_LENGTHS_JMB_FRM_LEN_MSK |
5564 			TX_LENGTHS_CNT_DWN_VAL_MSK);
5565 
5566 	if (tp->link_config.active_speed == SPEED_1000 &&
5567 	    tp->link_config.active_duplex == DUPLEX_HALF)
5568 		tw32(MAC_TX_LENGTHS, val |
5569 		     (0xff << TX_LENGTHS_SLOT_TIME_SHIFT));
5570 	else
5571 		tw32(MAC_TX_LENGTHS, val |
5572 		     (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
5573 
5574 	if (!tg3_flag(tp, 5705_PLUS)) {
5575 		if (tp->link_up) {
5576 			tw32(HOSTCC_STAT_COAL_TICKS,
5577 			     tp->coal.stats_block_coalesce_usecs);
5578 		} else {
5579 			tw32(HOSTCC_STAT_COAL_TICKS, 0);
5580 		}
5581 	}
5582 
5583 	if (tg3_flag(tp, ASPM_WORKAROUND)) {
5584 		val = tr32(PCIE_PWR_MGMT_THRESH);
5585 		if (!tp->link_up)
5586 			val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
5587 			      tp->pwrmgmt_thresh;
5588 		else
5589 			val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
5590 		tw32(PCIE_PWR_MGMT_THRESH, val);
5591 	}
5592 
5593 	return err;
5594 }
5595 
5596 /* tp->lock must be held */
5597 static u64 tg3_refclk_read(struct tg3 *tp)
5598 {
5599 	u64 stamp = tr32(TG3_EAV_REF_CLCK_LSB);
5600 	return stamp | (u64)tr32(TG3_EAV_REF_CLCK_MSB) << 32;
5601 }
5602 
5603 /* tp->lock must be held */
5604 static void tg3_refclk_write(struct tg3 *tp, u64 newval)
5605 {
5606 	tw32(TG3_EAV_REF_CLCK_CTL, TG3_EAV_REF_CLCK_CTL_STOP);
5607 	tw32(TG3_EAV_REF_CLCK_LSB, newval & 0xffffffff);
5608 	tw32(TG3_EAV_REF_CLCK_MSB, newval >> 32);
5609 	tw32_f(TG3_EAV_REF_CLCK_CTL, TG3_EAV_REF_CLCK_CTL_RESUME);
5610 }
5611 
5612 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync);
5613 static inline void tg3_full_unlock(struct tg3 *tp);
5614 static int tg3_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info)
5615 {
5616 	struct tg3 *tp = netdev_priv(dev);
5617 
5618 	info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
5619 				SOF_TIMESTAMPING_RX_SOFTWARE |
5620 				SOF_TIMESTAMPING_SOFTWARE    |
5621 				SOF_TIMESTAMPING_TX_HARDWARE |
5622 				SOF_TIMESTAMPING_RX_HARDWARE |
5623 				SOF_TIMESTAMPING_RAW_HARDWARE;
5624 
5625 	if (tp->ptp_clock)
5626 		info->phc_index = ptp_clock_index(tp->ptp_clock);
5627 	else
5628 		info->phc_index = -1;
5629 
5630 	info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
5631 
5632 	info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
5633 			   (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
5634 			   (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
5635 			   (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT);
5636 	return 0;
5637 }
5638 
5639 static int tg3_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
5640 {
5641 	struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
5642 	bool neg_adj = false;
5643 	u32 correction = 0;
5644 
5645 	if (ppb < 0) {
5646 		neg_adj = true;
5647 		ppb = -ppb;
5648 	}
5649 
5650 	/* Frequency adjustment is performed using hardware with a 24 bit
5651 	 * accumulator and a programmable correction value. On each clk, the
5652 	 * correction value gets added to the accumulator and when it
5653 	 * overflows, the time counter is incremented/decremented.
5654 	 *
5655 	 * So conversion from ppb to correction value is
5656 	 *		ppb * (1 << 24) / 1000000000
5657 	 */
5658 	correction = div_u64((u64)ppb * (1 << 24), 1000000000ULL) &
5659 		     TG3_EAV_REF_CLK_CORRECT_MASK;
5660 
5661 	tg3_full_lock(tp, 0);
5662 
5663 	if (correction)
5664 		tw32(TG3_EAV_REF_CLK_CORRECT_CTL,
5665 		     TG3_EAV_REF_CLK_CORRECT_EN |
5666 		     (neg_adj ? TG3_EAV_REF_CLK_CORRECT_NEG : 0) | correction);
5667 	else
5668 		tw32(TG3_EAV_REF_CLK_CORRECT_CTL, 0);
5669 
5670 	tg3_full_unlock(tp);
5671 
5672 	return 0;
5673 }
5674 
5675 static int tg3_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
5676 {
5677 	struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
5678 
5679 	tg3_full_lock(tp, 0);
5680 	tp->ptp_adjust += delta;
5681 	tg3_full_unlock(tp);
5682 
5683 	return 0;
5684 }
5685 
5686 static int tg3_ptp_gettime(struct ptp_clock_info *ptp, struct timespec *ts)
5687 {
5688 	u64 ns;
5689 	u32 remainder;
5690 	struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
5691 
5692 	tg3_full_lock(tp, 0);
5693 	ns = tg3_refclk_read(tp);
5694 	ns += tp->ptp_adjust;
5695 	tg3_full_unlock(tp);
5696 
5697 	ts->tv_sec = div_u64_rem(ns, 1000000000, &remainder);
5698 	ts->tv_nsec = remainder;
5699 
5700 	return 0;
5701 }
5702 
5703 static int tg3_ptp_settime(struct ptp_clock_info *ptp,
5704 			   const struct timespec *ts)
5705 {
5706 	u64 ns;
5707 	struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
5708 
5709 	ns = timespec_to_ns(ts);
5710 
5711 	tg3_full_lock(tp, 0);
5712 	tg3_refclk_write(tp, ns);
5713 	tp->ptp_adjust = 0;
5714 	tg3_full_unlock(tp);
5715 
5716 	return 0;
5717 }
5718 
5719 static int tg3_ptp_enable(struct ptp_clock_info *ptp,
5720 			  struct ptp_clock_request *rq, int on)
5721 {
5722 	return -EOPNOTSUPP;
5723 }
5724 
5725 static const struct ptp_clock_info tg3_ptp_caps = {
5726 	.owner		= THIS_MODULE,
5727 	.name		= "tg3 clock",
5728 	.max_adj	= 250000000,
5729 	.n_alarm	= 0,
5730 	.n_ext_ts	= 0,
5731 	.n_per_out	= 0,
5732 	.pps		= 0,
5733 	.adjfreq	= tg3_ptp_adjfreq,
5734 	.adjtime	= tg3_ptp_adjtime,
5735 	.gettime	= tg3_ptp_gettime,
5736 	.settime	= tg3_ptp_settime,
5737 	.enable		= tg3_ptp_enable,
5738 };
5739 
5740 static void tg3_hwclock_to_timestamp(struct tg3 *tp, u64 hwclock,
5741 				     struct skb_shared_hwtstamps *timestamp)
5742 {
5743 	memset(timestamp, 0, sizeof(struct skb_shared_hwtstamps));
5744 	timestamp->hwtstamp  = ns_to_ktime((hwclock & TG3_TSTAMP_MASK) +
5745 					   tp->ptp_adjust);
5746 }
5747 
5748 /* tp->lock must be held */
5749 static void tg3_ptp_init(struct tg3 *tp)
5750 {
5751 	if (!tg3_flag(tp, PTP_CAPABLE))
5752 		return;
5753 
5754 	/* Initialize the hardware clock to the system time. */
5755 	tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()));
5756 	tp->ptp_adjust = 0;
5757 	tp->ptp_info = tg3_ptp_caps;
5758 }
5759 
5760 /* tp->lock must be held */
5761 static void tg3_ptp_resume(struct tg3 *tp)
5762 {
5763 	if (!tg3_flag(tp, PTP_CAPABLE))
5764 		return;
5765 
5766 	tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()) + tp->ptp_adjust);
5767 	tp->ptp_adjust = 0;
5768 }
5769 
5770 static void tg3_ptp_fini(struct tg3 *tp)
5771 {
5772 	if (!tg3_flag(tp, PTP_CAPABLE) || !tp->ptp_clock)
5773 		return;
5774 
5775 	ptp_clock_unregister(tp->ptp_clock);
5776 	tp->ptp_clock = NULL;
5777 	tp->ptp_adjust = 0;
5778 }
5779 
5780 static inline int tg3_irq_sync(struct tg3 *tp)
5781 {
5782 	return tp->irq_sync;
5783 }
5784 
5785 static inline void tg3_rd32_loop(struct tg3 *tp, u32 *dst, u32 off, u32 len)
5786 {
5787 	int i;
5788 
5789 	dst = (u32 *)((u8 *)dst + off);
5790 	for (i = 0; i < len; i += sizeof(u32))
5791 		*dst++ = tr32(off + i);
5792 }
5793 
5794 static void tg3_dump_legacy_regs(struct tg3 *tp, u32 *regs)
5795 {
5796 	tg3_rd32_loop(tp, regs, TG3PCI_VENDOR, 0xb0);
5797 	tg3_rd32_loop(tp, regs, MAILBOX_INTERRUPT_0, 0x200);
5798 	tg3_rd32_loop(tp, regs, MAC_MODE, 0x4f0);
5799 	tg3_rd32_loop(tp, regs, SNDDATAI_MODE, 0xe0);
5800 	tg3_rd32_loop(tp, regs, SNDDATAC_MODE, 0x04);
5801 	tg3_rd32_loop(tp, regs, SNDBDS_MODE, 0x80);
5802 	tg3_rd32_loop(tp, regs, SNDBDI_MODE, 0x48);
5803 	tg3_rd32_loop(tp, regs, SNDBDC_MODE, 0x04);
5804 	tg3_rd32_loop(tp, regs, RCVLPC_MODE, 0x20);
5805 	tg3_rd32_loop(tp, regs, RCVLPC_SELLST_BASE, 0x15c);
5806 	tg3_rd32_loop(tp, regs, RCVDBDI_MODE, 0x0c);
5807 	tg3_rd32_loop(tp, regs, RCVDBDI_JUMBO_BD, 0x3c);
5808 	tg3_rd32_loop(tp, regs, RCVDBDI_BD_PROD_IDX_0, 0x44);
5809 	tg3_rd32_loop(tp, regs, RCVDCC_MODE, 0x04);
5810 	tg3_rd32_loop(tp, regs, RCVBDI_MODE, 0x20);
5811 	tg3_rd32_loop(tp, regs, RCVCC_MODE, 0x14);
5812 	tg3_rd32_loop(tp, regs, RCVLSC_MODE, 0x08);
5813 	tg3_rd32_loop(tp, regs, MBFREE_MODE, 0x08);
5814 	tg3_rd32_loop(tp, regs, HOSTCC_MODE, 0x100);
5815 
5816 	if (tg3_flag(tp, SUPPORT_MSIX))
5817 		tg3_rd32_loop(tp, regs, HOSTCC_RXCOL_TICKS_VEC1, 0x180);
5818 
5819 	tg3_rd32_loop(tp, regs, MEMARB_MODE, 0x10);
5820 	tg3_rd32_loop(tp, regs, BUFMGR_MODE, 0x58);
5821 	tg3_rd32_loop(tp, regs, RDMAC_MODE, 0x08);
5822 	tg3_rd32_loop(tp, regs, WDMAC_MODE, 0x08);
5823 	tg3_rd32_loop(tp, regs, RX_CPU_MODE, 0x04);
5824 	tg3_rd32_loop(tp, regs, RX_CPU_STATE, 0x04);
5825 	tg3_rd32_loop(tp, regs, RX_CPU_PGMCTR, 0x04);
5826 	tg3_rd32_loop(tp, regs, RX_CPU_HWBKPT, 0x04);
5827 
5828 	if (!tg3_flag(tp, 5705_PLUS)) {
5829 		tg3_rd32_loop(tp, regs, TX_CPU_MODE, 0x04);
5830 		tg3_rd32_loop(tp, regs, TX_CPU_STATE, 0x04);
5831 		tg3_rd32_loop(tp, regs, TX_CPU_PGMCTR, 0x04);
5832 	}
5833 
5834 	tg3_rd32_loop(tp, regs, GRCMBOX_INTERRUPT_0, 0x110);
5835 	tg3_rd32_loop(tp, regs, FTQ_RESET, 0x120);
5836 	tg3_rd32_loop(tp, regs, MSGINT_MODE, 0x0c);
5837 	tg3_rd32_loop(tp, regs, DMAC_MODE, 0x04);
5838 	tg3_rd32_loop(tp, regs, GRC_MODE, 0x4c);
5839 
5840 	if (tg3_flag(tp, NVRAM))
5841 		tg3_rd32_loop(tp, regs, NVRAM_CMD, 0x24);
5842 }
5843 
5844 static void tg3_dump_state(struct tg3 *tp)
5845 {
5846 	int i;
5847 	u32 *regs;
5848 
5849 	regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC);
5850 	if (!regs)
5851 		return;
5852 
5853 	if (tg3_flag(tp, PCI_EXPRESS)) {
5854 		/* Read up to but not including private PCI registers */
5855 		for (i = 0; i < TG3_PCIE_TLDLPL_PORT; i += sizeof(u32))
5856 			regs[i / sizeof(u32)] = tr32(i);
5857 	} else
5858 		tg3_dump_legacy_regs(tp, regs);
5859 
5860 	for (i = 0; i < TG3_REG_BLK_SIZE / sizeof(u32); i += 4) {
5861 		if (!regs[i + 0] && !regs[i + 1] &&
5862 		    !regs[i + 2] && !regs[i + 3])
5863 			continue;
5864 
5865 		netdev_err(tp->dev, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
5866 			   i * 4,
5867 			   regs[i + 0], regs[i + 1], regs[i + 2], regs[i + 3]);
5868 	}
5869 
5870 	kfree(regs);
5871 
5872 	for (i = 0; i < tp->irq_cnt; i++) {
5873 		struct tg3_napi *tnapi = &tp->napi[i];
5874 
5875 		/* SW status block */
5876 		netdev_err(tp->dev,
5877 			 "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
5878 			   i,
5879 			   tnapi->hw_status->status,
5880 			   tnapi->hw_status->status_tag,
5881 			   tnapi->hw_status->rx_jumbo_consumer,
5882 			   tnapi->hw_status->rx_consumer,
5883 			   tnapi->hw_status->rx_mini_consumer,
5884 			   tnapi->hw_status->idx[0].rx_producer,
5885 			   tnapi->hw_status->idx[0].tx_consumer);
5886 
5887 		netdev_err(tp->dev,
5888 		"%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n",
5889 			   i,
5890 			   tnapi->last_tag, tnapi->last_irq_tag,
5891 			   tnapi->tx_prod, tnapi->tx_cons, tnapi->tx_pending,
5892 			   tnapi->rx_rcb_ptr,
5893 			   tnapi->prodring.rx_std_prod_idx,
5894 			   tnapi->prodring.rx_std_cons_idx,
5895 			   tnapi->prodring.rx_jmb_prod_idx,
5896 			   tnapi->prodring.rx_jmb_cons_idx);
5897 	}
5898 }
5899 
5900 /* This is called whenever we suspect that the system chipset is re-
5901  * ordering the sequence of MMIO to the tx send mailbox. The symptom
5902  * is bogus tx completions. We try to recover by setting the
5903  * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
5904  * in the workqueue.
5905  */
5906 static void tg3_tx_recover(struct tg3 *tp)
5907 {
5908 	BUG_ON(tg3_flag(tp, MBOX_WRITE_REORDER) ||
5909 	       tp->write32_tx_mbox == tg3_write_indirect_mbox);
5910 
5911 	netdev_warn(tp->dev,
5912 		    "The system may be re-ordering memory-mapped I/O "
5913 		    "cycles to the network device, attempting to recover. "
5914 		    "Please report the problem to the driver maintainer "
5915 		    "and include system chipset information.\n");
5916 
5917 	spin_lock(&tp->lock);
5918 	tg3_flag_set(tp, TX_RECOVERY_PENDING);
5919 	spin_unlock(&tp->lock);
5920 }
5921 
5922 static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
5923 {
5924 	/* Tell compiler to fetch tx indices from memory. */
5925 	barrier();
5926 	return tnapi->tx_pending -
5927 	       ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1));
5928 }
5929 
5930 /* Tigon3 never reports partial packet sends.  So we do not
5931  * need special logic to handle SKBs that have not had all
5932  * of their frags sent yet, like SunGEM does.
5933  */
5934 static void tg3_tx(struct tg3_napi *tnapi)
5935 {
5936 	struct tg3 *tp = tnapi->tp;
5937 	u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer;
5938 	u32 sw_idx = tnapi->tx_cons;
5939 	struct netdev_queue *txq;
5940 	int index = tnapi - tp->napi;
5941 	unsigned int pkts_compl = 0, bytes_compl = 0;
5942 
5943 	if (tg3_flag(tp, ENABLE_TSS))
5944 		index--;
5945 
5946 	txq = netdev_get_tx_queue(tp->dev, index);
5947 
5948 	while (sw_idx != hw_idx) {
5949 		struct tg3_tx_ring_info *ri = &tnapi->tx_buffers[sw_idx];
5950 		struct sk_buff *skb = ri->skb;
5951 		int i, tx_bug = 0;
5952 
5953 		if (unlikely(skb == NULL)) {
5954 			tg3_tx_recover(tp);
5955 			return;
5956 		}
5957 
5958 		if (tnapi->tx_ring[sw_idx].len_flags & TXD_FLAG_HWTSTAMP) {
5959 			struct skb_shared_hwtstamps timestamp;
5960 			u64 hwclock = tr32(TG3_TX_TSTAMP_LSB);
5961 			hwclock |= (u64)tr32(TG3_TX_TSTAMP_MSB) << 32;
5962 
5963 			tg3_hwclock_to_timestamp(tp, hwclock, &timestamp);
5964 
5965 			skb_tstamp_tx(skb, &timestamp);
5966 		}
5967 
5968 		pci_unmap_single(tp->pdev,
5969 				 dma_unmap_addr(ri, mapping),
5970 				 skb_headlen(skb),
5971 				 PCI_DMA_TODEVICE);
5972 
5973 		ri->skb = NULL;
5974 
5975 		while (ri->fragmented) {
5976 			ri->fragmented = false;
5977 			sw_idx = NEXT_TX(sw_idx);
5978 			ri = &tnapi->tx_buffers[sw_idx];
5979 		}
5980 
5981 		sw_idx = NEXT_TX(sw_idx);
5982 
5983 		for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
5984 			ri = &tnapi->tx_buffers[sw_idx];
5985 			if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
5986 				tx_bug = 1;
5987 
5988 			pci_unmap_page(tp->pdev,
5989 				       dma_unmap_addr(ri, mapping),
5990 				       skb_frag_size(&skb_shinfo(skb)->frags[i]),
5991 				       PCI_DMA_TODEVICE);
5992 
5993 			while (ri->fragmented) {
5994 				ri->fragmented = false;
5995 				sw_idx = NEXT_TX(sw_idx);
5996 				ri = &tnapi->tx_buffers[sw_idx];
5997 			}
5998 
5999 			sw_idx = NEXT_TX(sw_idx);
6000 		}
6001 
6002 		pkts_compl++;
6003 		bytes_compl += skb->len;
6004 
6005 		dev_kfree_skb(skb);
6006 
6007 		if (unlikely(tx_bug)) {
6008 			tg3_tx_recover(tp);
6009 			return;
6010 		}
6011 	}
6012 
6013 	netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
6014 
6015 	tnapi->tx_cons = sw_idx;
6016 
6017 	/* Need to make the tx_cons update visible to tg3_start_xmit()
6018 	 * before checking for netif_queue_stopped().  Without the
6019 	 * memory barrier, there is a small possibility that tg3_start_xmit()
6020 	 * will miss it and cause the queue to be stopped forever.
6021 	 */
6022 	smp_mb();
6023 
6024 	if (unlikely(netif_tx_queue_stopped(txq) &&
6025 		     (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) {
6026 		__netif_tx_lock(txq, smp_processor_id());
6027 		if (netif_tx_queue_stopped(txq) &&
6028 		    (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))
6029 			netif_tx_wake_queue(txq);
6030 		__netif_tx_unlock(txq);
6031 	}
6032 }
6033 
6034 static void tg3_frag_free(bool is_frag, void *data)
6035 {
6036 	if (is_frag)
6037 		put_page(virt_to_head_page(data));
6038 	else
6039 		kfree(data);
6040 }
6041 
6042 static void tg3_rx_data_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
6043 {
6044 	unsigned int skb_size = SKB_DATA_ALIGN(map_sz + TG3_RX_OFFSET(tp)) +
6045 		   SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
6046 
6047 	if (!ri->data)
6048 		return;
6049 
6050 	pci_unmap_single(tp->pdev, dma_unmap_addr(ri, mapping),
6051 			 map_sz, PCI_DMA_FROMDEVICE);
6052 	tg3_frag_free(skb_size <= PAGE_SIZE, ri->data);
6053 	ri->data = NULL;
6054 }
6055 
6056 
6057 /* Returns size of skb allocated or < 0 on error.
6058  *
6059  * We only need to fill in the address because the other members
6060  * of the RX descriptor are invariant, see tg3_init_rings.
6061  *
6062  * Note the purposeful assymetry of cpu vs. chip accesses.  For
6063  * posting buffers we only dirty the first cache line of the RX
6064  * descriptor (containing the address).  Whereas for the RX status
6065  * buffers the cpu only reads the last cacheline of the RX descriptor
6066  * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
6067  */
6068 static int tg3_alloc_rx_data(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
6069 			     u32 opaque_key, u32 dest_idx_unmasked,
6070 			     unsigned int *frag_size)
6071 {
6072 	struct tg3_rx_buffer_desc *desc;
6073 	struct ring_info *map;
6074 	u8 *data;
6075 	dma_addr_t mapping;
6076 	int skb_size, data_size, dest_idx;
6077 
6078 	switch (opaque_key) {
6079 	case RXD_OPAQUE_RING_STD:
6080 		dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
6081 		desc = &tpr->rx_std[dest_idx];
6082 		map = &tpr->rx_std_buffers[dest_idx];
6083 		data_size = tp->rx_pkt_map_sz;
6084 		break;
6085 
6086 	case RXD_OPAQUE_RING_JUMBO:
6087 		dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
6088 		desc = &tpr->rx_jmb[dest_idx].std;
6089 		map = &tpr->rx_jmb_buffers[dest_idx];
6090 		data_size = TG3_RX_JMB_MAP_SZ;
6091 		break;
6092 
6093 	default:
6094 		return -EINVAL;
6095 	}
6096 
6097 	/* Do not overwrite any of the map or rp information
6098 	 * until we are sure we can commit to a new buffer.
6099 	 *
6100 	 * Callers depend upon this behavior and assume that
6101 	 * we leave everything unchanged if we fail.
6102 	 */
6103 	skb_size = SKB_DATA_ALIGN(data_size + TG3_RX_OFFSET(tp)) +
6104 		   SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
6105 	if (skb_size <= PAGE_SIZE) {
6106 		data = netdev_alloc_frag(skb_size);
6107 		*frag_size = skb_size;
6108 	} else {
6109 		data = kmalloc(skb_size, GFP_ATOMIC);
6110 		*frag_size = 0;
6111 	}
6112 	if (!data)
6113 		return -ENOMEM;
6114 
6115 	mapping = pci_map_single(tp->pdev,
6116 				 data + TG3_RX_OFFSET(tp),
6117 				 data_size,
6118 				 PCI_DMA_FROMDEVICE);
6119 	if (unlikely(pci_dma_mapping_error(tp->pdev, mapping))) {
6120 		tg3_frag_free(skb_size <= PAGE_SIZE, data);
6121 		return -EIO;
6122 	}
6123 
6124 	map->data = data;
6125 	dma_unmap_addr_set(map, mapping, mapping);
6126 
6127 	desc->addr_hi = ((u64)mapping >> 32);
6128 	desc->addr_lo = ((u64)mapping & 0xffffffff);
6129 
6130 	return data_size;
6131 }
6132 
6133 /* We only need to move over in the address because the other
6134  * members of the RX descriptor are invariant.  See notes above
6135  * tg3_alloc_rx_data for full details.
6136  */
6137 static void tg3_recycle_rx(struct tg3_napi *tnapi,
6138 			   struct tg3_rx_prodring_set *dpr,
6139 			   u32 opaque_key, int src_idx,
6140 			   u32 dest_idx_unmasked)
6141 {
6142 	struct tg3 *tp = tnapi->tp;
6143 	struct tg3_rx_buffer_desc *src_desc, *dest_desc;
6144 	struct ring_info *src_map, *dest_map;
6145 	struct tg3_rx_prodring_set *spr = &tp->napi[0].prodring;
6146 	int dest_idx;
6147 
6148 	switch (opaque_key) {
6149 	case RXD_OPAQUE_RING_STD:
6150 		dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
6151 		dest_desc = &dpr->rx_std[dest_idx];
6152 		dest_map = &dpr->rx_std_buffers[dest_idx];
6153 		src_desc = &spr->rx_std[src_idx];
6154 		src_map = &spr->rx_std_buffers[src_idx];
6155 		break;
6156 
6157 	case RXD_OPAQUE_RING_JUMBO:
6158 		dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
6159 		dest_desc = &dpr->rx_jmb[dest_idx].std;
6160 		dest_map = &dpr->rx_jmb_buffers[dest_idx];
6161 		src_desc = &spr->rx_jmb[src_idx].std;
6162 		src_map = &spr->rx_jmb_buffers[src_idx];
6163 		break;
6164 
6165 	default:
6166 		return;
6167 	}
6168 
6169 	dest_map->data = src_map->data;
6170 	dma_unmap_addr_set(dest_map, mapping,
6171 			   dma_unmap_addr(src_map, mapping));
6172 	dest_desc->addr_hi = src_desc->addr_hi;
6173 	dest_desc->addr_lo = src_desc->addr_lo;
6174 
6175 	/* Ensure that the update to the skb happens after the physical
6176 	 * addresses have been transferred to the new BD location.
6177 	 */
6178 	smp_wmb();
6179 
6180 	src_map->data = NULL;
6181 }
6182 
6183 /* The RX ring scheme is composed of multiple rings which post fresh
6184  * buffers to the chip, and one special ring the chip uses to report
6185  * status back to the host.
6186  *
6187  * The special ring reports the status of received packets to the
6188  * host.  The chip does not write into the original descriptor the
6189  * RX buffer was obtained from.  The chip simply takes the original
6190  * descriptor as provided by the host, updates the status and length
6191  * field, then writes this into the next status ring entry.
6192  *
6193  * Each ring the host uses to post buffers to the chip is described
6194  * by a TG3_BDINFO entry in the chips SRAM area.  When a packet arrives,
6195  * it is first placed into the on-chip ram.  When the packet's length
6196  * is known, it walks down the TG3_BDINFO entries to select the ring.
6197  * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
6198  * which is within the range of the new packet's length is chosen.
6199  *
6200  * The "separate ring for rx status" scheme may sound queer, but it makes
6201  * sense from a cache coherency perspective.  If only the host writes
6202  * to the buffer post rings, and only the chip writes to the rx status
6203  * rings, then cache lines never move beyond shared-modified state.
6204  * If both the host and chip were to write into the same ring, cache line
6205  * eviction could occur since both entities want it in an exclusive state.
6206  */
6207 static int tg3_rx(struct tg3_napi *tnapi, int budget)
6208 {
6209 	struct tg3 *tp = tnapi->tp;
6210 	u32 work_mask, rx_std_posted = 0;
6211 	u32 std_prod_idx, jmb_prod_idx;
6212 	u32 sw_idx = tnapi->rx_rcb_ptr;
6213 	u16 hw_idx;
6214 	int received;
6215 	struct tg3_rx_prodring_set *tpr = &tnapi->prodring;
6216 
6217 	hw_idx = *(tnapi->rx_rcb_prod_idx);
6218 	/*
6219 	 * We need to order the read of hw_idx and the read of
6220 	 * the opaque cookie.
6221 	 */
6222 	rmb();
6223 	work_mask = 0;
6224 	received = 0;
6225 	std_prod_idx = tpr->rx_std_prod_idx;
6226 	jmb_prod_idx = tpr->rx_jmb_prod_idx;
6227 	while (sw_idx != hw_idx && budget > 0) {
6228 		struct ring_info *ri;
6229 		struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
6230 		unsigned int len;
6231 		struct sk_buff *skb;
6232 		dma_addr_t dma_addr;
6233 		u32 opaque_key, desc_idx, *post_ptr;
6234 		u8 *data;
6235 		u64 tstamp = 0;
6236 
6237 		desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
6238 		opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
6239 		if (opaque_key == RXD_OPAQUE_RING_STD) {
6240 			ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx];
6241 			dma_addr = dma_unmap_addr(ri, mapping);
6242 			data = ri->data;
6243 			post_ptr = &std_prod_idx;
6244 			rx_std_posted++;
6245 		} else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
6246 			ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx];
6247 			dma_addr = dma_unmap_addr(ri, mapping);
6248 			data = ri->data;
6249 			post_ptr = &jmb_prod_idx;
6250 		} else
6251 			goto next_pkt_nopost;
6252 
6253 		work_mask |= opaque_key;
6254 
6255 		if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
6256 		    (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
6257 		drop_it:
6258 			tg3_recycle_rx(tnapi, tpr, opaque_key,
6259 				       desc_idx, *post_ptr);
6260 		drop_it_no_recycle:
6261 			/* Other statistics kept track of by card. */
6262 			tp->rx_dropped++;
6263 			goto next_pkt;
6264 		}
6265 
6266 		prefetch(data + TG3_RX_OFFSET(tp));
6267 		len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
6268 		      ETH_FCS_LEN;
6269 
6270 		if ((desc->type_flags & RXD_FLAG_PTPSTAT_MASK) ==
6271 		     RXD_FLAG_PTPSTAT_PTPV1 ||
6272 		    (desc->type_flags & RXD_FLAG_PTPSTAT_MASK) ==
6273 		     RXD_FLAG_PTPSTAT_PTPV2) {
6274 			tstamp = tr32(TG3_RX_TSTAMP_LSB);
6275 			tstamp |= (u64)tr32(TG3_RX_TSTAMP_MSB) << 32;
6276 		}
6277 
6278 		if (len > TG3_RX_COPY_THRESH(tp)) {
6279 			int skb_size;
6280 			unsigned int frag_size;
6281 
6282 			skb_size = tg3_alloc_rx_data(tp, tpr, opaque_key,
6283 						    *post_ptr, &frag_size);
6284 			if (skb_size < 0)
6285 				goto drop_it;
6286 
6287 			pci_unmap_single(tp->pdev, dma_addr, skb_size,
6288 					 PCI_DMA_FROMDEVICE);
6289 
6290 			skb = build_skb(data, frag_size);
6291 			if (!skb) {
6292 				tg3_frag_free(frag_size != 0, data);
6293 				goto drop_it_no_recycle;
6294 			}
6295 			skb_reserve(skb, TG3_RX_OFFSET(tp));
6296 			/* Ensure that the update to the data happens
6297 			 * after the usage of the old DMA mapping.
6298 			 */
6299 			smp_wmb();
6300 
6301 			ri->data = NULL;
6302 
6303 		} else {
6304 			tg3_recycle_rx(tnapi, tpr, opaque_key,
6305 				       desc_idx, *post_ptr);
6306 
6307 			skb = netdev_alloc_skb(tp->dev,
6308 					       len + TG3_RAW_IP_ALIGN);
6309 			if (skb == NULL)
6310 				goto drop_it_no_recycle;
6311 
6312 			skb_reserve(skb, TG3_RAW_IP_ALIGN);
6313 			pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
6314 			memcpy(skb->data,
6315 			       data + TG3_RX_OFFSET(tp),
6316 			       len);
6317 			pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
6318 		}
6319 
6320 		skb_put(skb, len);
6321 		if (tstamp)
6322 			tg3_hwclock_to_timestamp(tp, tstamp,
6323 						 skb_hwtstamps(skb));
6324 
6325 		if ((tp->dev->features & NETIF_F_RXCSUM) &&
6326 		    (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
6327 		    (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
6328 		      >> RXD_TCPCSUM_SHIFT) == 0xffff))
6329 			skb->ip_summed = CHECKSUM_UNNECESSARY;
6330 		else
6331 			skb_checksum_none_assert(skb);
6332 
6333 		skb->protocol = eth_type_trans(skb, tp->dev);
6334 
6335 		if (len > (tp->dev->mtu + ETH_HLEN) &&
6336 		    skb->protocol != htons(ETH_P_8021Q)) {
6337 			dev_kfree_skb(skb);
6338 			goto drop_it_no_recycle;
6339 		}
6340 
6341 		if (desc->type_flags & RXD_FLAG_VLAN &&
6342 		    !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG))
6343 			__vlan_hwaccel_put_tag(skb,
6344 					       desc->err_vlan & RXD_VLAN_MASK);
6345 
6346 		napi_gro_receive(&tnapi->napi, skb);
6347 
6348 		received++;
6349 		budget--;
6350 
6351 next_pkt:
6352 		(*post_ptr)++;
6353 
6354 		if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
6355 			tpr->rx_std_prod_idx = std_prod_idx &
6356 					       tp->rx_std_ring_mask;
6357 			tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6358 				     tpr->rx_std_prod_idx);
6359 			work_mask &= ~RXD_OPAQUE_RING_STD;
6360 			rx_std_posted = 0;
6361 		}
6362 next_pkt_nopost:
6363 		sw_idx++;
6364 		sw_idx &= tp->rx_ret_ring_mask;
6365 
6366 		/* Refresh hw_idx to see if there is new work */
6367 		if (sw_idx == hw_idx) {
6368 			hw_idx = *(tnapi->rx_rcb_prod_idx);
6369 			rmb();
6370 		}
6371 	}
6372 
6373 	/* ACK the status ring. */
6374 	tnapi->rx_rcb_ptr = sw_idx;
6375 	tw32_rx_mbox(tnapi->consmbox, sw_idx);
6376 
6377 	/* Refill RX ring(s). */
6378 	if (!tg3_flag(tp, ENABLE_RSS)) {
6379 		/* Sync BD data before updating mailbox */
6380 		wmb();
6381 
6382 		if (work_mask & RXD_OPAQUE_RING_STD) {
6383 			tpr->rx_std_prod_idx = std_prod_idx &
6384 					       tp->rx_std_ring_mask;
6385 			tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6386 				     tpr->rx_std_prod_idx);
6387 		}
6388 		if (work_mask & RXD_OPAQUE_RING_JUMBO) {
6389 			tpr->rx_jmb_prod_idx = jmb_prod_idx &
6390 					       tp->rx_jmb_ring_mask;
6391 			tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
6392 				     tpr->rx_jmb_prod_idx);
6393 		}
6394 		mmiowb();
6395 	} else if (work_mask) {
6396 		/* rx_std_buffers[] and rx_jmb_buffers[] entries must be
6397 		 * updated before the producer indices can be updated.
6398 		 */
6399 		smp_wmb();
6400 
6401 		tpr->rx_std_prod_idx = std_prod_idx & tp->rx_std_ring_mask;
6402 		tpr->rx_jmb_prod_idx = jmb_prod_idx & tp->rx_jmb_ring_mask;
6403 
6404 		if (tnapi != &tp->napi[1]) {
6405 			tp->rx_refill = true;
6406 			napi_schedule(&tp->napi[1].napi);
6407 		}
6408 	}
6409 
6410 	return received;
6411 }
6412 
6413 static void tg3_poll_link(struct tg3 *tp)
6414 {
6415 	/* handle link change and other phy events */
6416 	if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
6417 		struct tg3_hw_status *sblk = tp->napi[0].hw_status;
6418 
6419 		if (sblk->status & SD_STATUS_LINK_CHG) {
6420 			sblk->status = SD_STATUS_UPDATED |
6421 				       (sblk->status & ~SD_STATUS_LINK_CHG);
6422 			spin_lock(&tp->lock);
6423 			if (tg3_flag(tp, USE_PHYLIB)) {
6424 				tw32_f(MAC_STATUS,
6425 				     (MAC_STATUS_SYNC_CHANGED |
6426 				      MAC_STATUS_CFG_CHANGED |
6427 				      MAC_STATUS_MI_COMPLETION |
6428 				      MAC_STATUS_LNKSTATE_CHANGED));
6429 				udelay(40);
6430 			} else
6431 				tg3_setup_phy(tp, 0);
6432 			spin_unlock(&tp->lock);
6433 		}
6434 	}
6435 }
6436 
6437 static int tg3_rx_prodring_xfer(struct tg3 *tp,
6438 				struct tg3_rx_prodring_set *dpr,
6439 				struct tg3_rx_prodring_set *spr)
6440 {
6441 	u32 si, di, cpycnt, src_prod_idx;
6442 	int i, err = 0;
6443 
6444 	while (1) {
6445 		src_prod_idx = spr->rx_std_prod_idx;
6446 
6447 		/* Make sure updates to the rx_std_buffers[] entries and the
6448 		 * standard producer index are seen in the correct order.
6449 		 */
6450 		smp_rmb();
6451 
6452 		if (spr->rx_std_cons_idx == src_prod_idx)
6453 			break;
6454 
6455 		if (spr->rx_std_cons_idx < src_prod_idx)
6456 			cpycnt = src_prod_idx - spr->rx_std_cons_idx;
6457 		else
6458 			cpycnt = tp->rx_std_ring_mask + 1 -
6459 				 spr->rx_std_cons_idx;
6460 
6461 		cpycnt = min(cpycnt,
6462 			     tp->rx_std_ring_mask + 1 - dpr->rx_std_prod_idx);
6463 
6464 		si = spr->rx_std_cons_idx;
6465 		di = dpr->rx_std_prod_idx;
6466 
6467 		for (i = di; i < di + cpycnt; i++) {
6468 			if (dpr->rx_std_buffers[i].data) {
6469 				cpycnt = i - di;
6470 				err = -ENOSPC;
6471 				break;
6472 			}
6473 		}
6474 
6475 		if (!cpycnt)
6476 			break;
6477 
6478 		/* Ensure that updates to the rx_std_buffers ring and the
6479 		 * shadowed hardware producer ring from tg3_recycle_skb() are
6480 		 * ordered correctly WRT the skb check above.
6481 		 */
6482 		smp_rmb();
6483 
6484 		memcpy(&dpr->rx_std_buffers[di],
6485 		       &spr->rx_std_buffers[si],
6486 		       cpycnt * sizeof(struct ring_info));
6487 
6488 		for (i = 0; i < cpycnt; i++, di++, si++) {
6489 			struct tg3_rx_buffer_desc *sbd, *dbd;
6490 			sbd = &spr->rx_std[si];
6491 			dbd = &dpr->rx_std[di];
6492 			dbd->addr_hi = sbd->addr_hi;
6493 			dbd->addr_lo = sbd->addr_lo;
6494 		}
6495 
6496 		spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) &
6497 				       tp->rx_std_ring_mask;
6498 		dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) &
6499 				       tp->rx_std_ring_mask;
6500 	}
6501 
6502 	while (1) {
6503 		src_prod_idx = spr->rx_jmb_prod_idx;
6504 
6505 		/* Make sure updates to the rx_jmb_buffers[] entries and
6506 		 * the jumbo producer index are seen in the correct order.
6507 		 */
6508 		smp_rmb();
6509 
6510 		if (spr->rx_jmb_cons_idx == src_prod_idx)
6511 			break;
6512 
6513 		if (spr->rx_jmb_cons_idx < src_prod_idx)
6514 			cpycnt = src_prod_idx - spr->rx_jmb_cons_idx;
6515 		else
6516 			cpycnt = tp->rx_jmb_ring_mask + 1 -
6517 				 spr->rx_jmb_cons_idx;
6518 
6519 		cpycnt = min(cpycnt,
6520 			     tp->rx_jmb_ring_mask + 1 - dpr->rx_jmb_prod_idx);
6521 
6522 		si = spr->rx_jmb_cons_idx;
6523 		di = dpr->rx_jmb_prod_idx;
6524 
6525 		for (i = di; i < di + cpycnt; i++) {
6526 			if (dpr->rx_jmb_buffers[i].data) {
6527 				cpycnt = i - di;
6528 				err = -ENOSPC;
6529 				break;
6530 			}
6531 		}
6532 
6533 		if (!cpycnt)
6534 			break;
6535 
6536 		/* Ensure that updates to the rx_jmb_buffers ring and the
6537 		 * shadowed hardware producer ring from tg3_recycle_skb() are
6538 		 * ordered correctly WRT the skb check above.
6539 		 */
6540 		smp_rmb();
6541 
6542 		memcpy(&dpr->rx_jmb_buffers[di],
6543 		       &spr->rx_jmb_buffers[si],
6544 		       cpycnt * sizeof(struct ring_info));
6545 
6546 		for (i = 0; i < cpycnt; i++, di++, si++) {
6547 			struct tg3_rx_buffer_desc *sbd, *dbd;
6548 			sbd = &spr->rx_jmb[si].std;
6549 			dbd = &dpr->rx_jmb[di].std;
6550 			dbd->addr_hi = sbd->addr_hi;
6551 			dbd->addr_lo = sbd->addr_lo;
6552 		}
6553 
6554 		spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) &
6555 				       tp->rx_jmb_ring_mask;
6556 		dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) &
6557 				       tp->rx_jmb_ring_mask;
6558 	}
6559 
6560 	return err;
6561 }
6562 
6563 static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
6564 {
6565 	struct tg3 *tp = tnapi->tp;
6566 
6567 	/* run TX completion thread */
6568 	if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
6569 		tg3_tx(tnapi);
6570 		if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
6571 			return work_done;
6572 	}
6573 
6574 	if (!tnapi->rx_rcb_prod_idx)
6575 		return work_done;
6576 
6577 	/* run RX thread, within the bounds set by NAPI.
6578 	 * All RX "locking" is done by ensuring outside
6579 	 * code synchronizes with tg3->napi.poll()
6580 	 */
6581 	if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
6582 		work_done += tg3_rx(tnapi, budget - work_done);
6583 
6584 	if (tg3_flag(tp, ENABLE_RSS) && tnapi == &tp->napi[1]) {
6585 		struct tg3_rx_prodring_set *dpr = &tp->napi[0].prodring;
6586 		int i, err = 0;
6587 		u32 std_prod_idx = dpr->rx_std_prod_idx;
6588 		u32 jmb_prod_idx = dpr->rx_jmb_prod_idx;
6589 
6590 		tp->rx_refill = false;
6591 		for (i = 1; i <= tp->rxq_cnt; i++)
6592 			err |= tg3_rx_prodring_xfer(tp, dpr,
6593 						    &tp->napi[i].prodring);
6594 
6595 		wmb();
6596 
6597 		if (std_prod_idx != dpr->rx_std_prod_idx)
6598 			tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6599 				     dpr->rx_std_prod_idx);
6600 
6601 		if (jmb_prod_idx != dpr->rx_jmb_prod_idx)
6602 			tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
6603 				     dpr->rx_jmb_prod_idx);
6604 
6605 		mmiowb();
6606 
6607 		if (err)
6608 			tw32_f(HOSTCC_MODE, tp->coal_now);
6609 	}
6610 
6611 	return work_done;
6612 }
6613 
6614 static inline void tg3_reset_task_schedule(struct tg3 *tp)
6615 {
6616 	if (!test_and_set_bit(TG3_FLAG_RESET_TASK_PENDING, tp->tg3_flags))
6617 		schedule_work(&tp->reset_task);
6618 }
6619 
6620 static inline void tg3_reset_task_cancel(struct tg3 *tp)
6621 {
6622 	cancel_work_sync(&tp->reset_task);
6623 	tg3_flag_clear(tp, RESET_TASK_PENDING);
6624 	tg3_flag_clear(tp, TX_RECOVERY_PENDING);
6625 }
6626 
6627 static int tg3_poll_msix(struct napi_struct *napi, int budget)
6628 {
6629 	struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
6630 	struct tg3 *tp = tnapi->tp;
6631 	int work_done = 0;
6632 	struct tg3_hw_status *sblk = tnapi->hw_status;
6633 
6634 	while (1) {
6635 		work_done = tg3_poll_work(tnapi, work_done, budget);
6636 
6637 		if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
6638 			goto tx_recovery;
6639 
6640 		if (unlikely(work_done >= budget))
6641 			break;
6642 
6643 		/* tp->last_tag is used in tg3_int_reenable() below
6644 		 * to tell the hw how much work has been processed,
6645 		 * so we must read it before checking for more work.
6646 		 */
6647 		tnapi->last_tag = sblk->status_tag;
6648 		tnapi->last_irq_tag = tnapi->last_tag;
6649 		rmb();
6650 
6651 		/* check for RX/TX work to do */
6652 		if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons &&
6653 			   *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) {
6654 
6655 			/* This test here is not race free, but will reduce
6656 			 * the number of interrupts by looping again.
6657 			 */
6658 			if (tnapi == &tp->napi[1] && tp->rx_refill)
6659 				continue;
6660 
6661 			napi_complete(napi);
6662 			/* Reenable interrupts. */
6663 			tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
6664 
6665 			/* This test here is synchronized by napi_schedule()
6666 			 * and napi_complete() to close the race condition.
6667 			 */
6668 			if (unlikely(tnapi == &tp->napi[1] && tp->rx_refill)) {
6669 				tw32(HOSTCC_MODE, tp->coalesce_mode |
6670 						  HOSTCC_MODE_ENABLE |
6671 						  tnapi->coal_now);
6672 			}
6673 			mmiowb();
6674 			break;
6675 		}
6676 	}
6677 
6678 	return work_done;
6679 
6680 tx_recovery:
6681 	/* work_done is guaranteed to be less than budget. */
6682 	napi_complete(napi);
6683 	tg3_reset_task_schedule(tp);
6684 	return work_done;
6685 }
6686 
6687 static void tg3_process_error(struct tg3 *tp)
6688 {
6689 	u32 val;
6690 	bool real_error = false;
6691 
6692 	if (tg3_flag(tp, ERROR_PROCESSED))
6693 		return;
6694 
6695 	/* Check Flow Attention register */
6696 	val = tr32(HOSTCC_FLOW_ATTN);
6697 	if (val & ~HOSTCC_FLOW_ATTN_MBUF_LWM) {
6698 		netdev_err(tp->dev, "FLOW Attention error.  Resetting chip.\n");
6699 		real_error = true;
6700 	}
6701 
6702 	if (tr32(MSGINT_STATUS) & ~MSGINT_STATUS_MSI_REQ) {
6703 		netdev_err(tp->dev, "MSI Status error.  Resetting chip.\n");
6704 		real_error = true;
6705 	}
6706 
6707 	if (tr32(RDMAC_STATUS) || tr32(WDMAC_STATUS)) {
6708 		netdev_err(tp->dev, "DMA Status error.  Resetting chip.\n");
6709 		real_error = true;
6710 	}
6711 
6712 	if (!real_error)
6713 		return;
6714 
6715 	tg3_dump_state(tp);
6716 
6717 	tg3_flag_set(tp, ERROR_PROCESSED);
6718 	tg3_reset_task_schedule(tp);
6719 }
6720 
6721 static int tg3_poll(struct napi_struct *napi, int budget)
6722 {
6723 	struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
6724 	struct tg3 *tp = tnapi->tp;
6725 	int work_done = 0;
6726 	struct tg3_hw_status *sblk = tnapi->hw_status;
6727 
6728 	while (1) {
6729 		if (sblk->status & SD_STATUS_ERROR)
6730 			tg3_process_error(tp);
6731 
6732 		tg3_poll_link(tp);
6733 
6734 		work_done = tg3_poll_work(tnapi, work_done, budget);
6735 
6736 		if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
6737 			goto tx_recovery;
6738 
6739 		if (unlikely(work_done >= budget))
6740 			break;
6741 
6742 		if (tg3_flag(tp, TAGGED_STATUS)) {
6743 			/* tp->last_tag is used in tg3_int_reenable() below
6744 			 * to tell the hw how much work has been processed,
6745 			 * so we must read it before checking for more work.
6746 			 */
6747 			tnapi->last_tag = sblk->status_tag;
6748 			tnapi->last_irq_tag = tnapi->last_tag;
6749 			rmb();
6750 		} else
6751 			sblk->status &= ~SD_STATUS_UPDATED;
6752 
6753 		if (likely(!tg3_has_work(tnapi))) {
6754 			napi_complete(napi);
6755 			tg3_int_reenable(tnapi);
6756 			break;
6757 		}
6758 	}
6759 
6760 	return work_done;
6761 
6762 tx_recovery:
6763 	/* work_done is guaranteed to be less than budget. */
6764 	napi_complete(napi);
6765 	tg3_reset_task_schedule(tp);
6766 	return work_done;
6767 }
6768 
6769 static void tg3_napi_disable(struct tg3 *tp)
6770 {
6771 	int i;
6772 
6773 	for (i = tp->irq_cnt - 1; i >= 0; i--)
6774 		napi_disable(&tp->napi[i].napi);
6775 }
6776 
6777 static void tg3_napi_enable(struct tg3 *tp)
6778 {
6779 	int i;
6780 
6781 	for (i = 0; i < tp->irq_cnt; i++)
6782 		napi_enable(&tp->napi[i].napi);
6783 }
6784 
6785 static void tg3_napi_init(struct tg3 *tp)
6786 {
6787 	int i;
6788 
6789 	netif_napi_add(tp->dev, &tp->napi[0].napi, tg3_poll, 64);
6790 	for (i = 1; i < tp->irq_cnt; i++)
6791 		netif_napi_add(tp->dev, &tp->napi[i].napi, tg3_poll_msix, 64);
6792 }
6793 
6794 static void tg3_napi_fini(struct tg3 *tp)
6795 {
6796 	int i;
6797 
6798 	for (i = 0; i < tp->irq_cnt; i++)
6799 		netif_napi_del(&tp->napi[i].napi);
6800 }
6801 
6802 static inline void tg3_netif_stop(struct tg3 *tp)
6803 {
6804 	tp->dev->trans_start = jiffies;	/* prevent tx timeout */
6805 	tg3_napi_disable(tp);
6806 	netif_carrier_off(tp->dev);
6807 	netif_tx_disable(tp->dev);
6808 }
6809 
6810 /* tp->lock must be held */
6811 static inline void tg3_netif_start(struct tg3 *tp)
6812 {
6813 	tg3_ptp_resume(tp);
6814 
6815 	/* NOTE: unconditional netif_tx_wake_all_queues is only
6816 	 * appropriate so long as all callers are assured to
6817 	 * have free tx slots (such as after tg3_init_hw)
6818 	 */
6819 	netif_tx_wake_all_queues(tp->dev);
6820 
6821 	if (tp->link_up)
6822 		netif_carrier_on(tp->dev);
6823 
6824 	tg3_napi_enable(tp);
6825 	tp->napi[0].hw_status->status |= SD_STATUS_UPDATED;
6826 	tg3_enable_ints(tp);
6827 }
6828 
6829 static void tg3_irq_quiesce(struct tg3 *tp)
6830 {
6831 	int i;
6832 
6833 	BUG_ON(tp->irq_sync);
6834 
6835 	tp->irq_sync = 1;
6836 	smp_mb();
6837 
6838 	for (i = 0; i < tp->irq_cnt; i++)
6839 		synchronize_irq(tp->napi[i].irq_vec);
6840 }
6841 
6842 /* Fully shutdown all tg3 driver activity elsewhere in the system.
6843  * If irq_sync is non-zero, then the IRQ handler must be synchronized
6844  * with as well.  Most of the time, this is not necessary except when
6845  * shutting down the device.
6846  */
6847 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
6848 {
6849 	spin_lock_bh(&tp->lock);
6850 	if (irq_sync)
6851 		tg3_irq_quiesce(tp);
6852 }
6853 
6854 static inline void tg3_full_unlock(struct tg3 *tp)
6855 {
6856 	spin_unlock_bh(&tp->lock);
6857 }
6858 
6859 /* One-shot MSI handler - Chip automatically disables interrupt
6860  * after sending MSI so driver doesn't have to do it.
6861  */
6862 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
6863 {
6864 	struct tg3_napi *tnapi = dev_id;
6865 	struct tg3 *tp = tnapi->tp;
6866 
6867 	prefetch(tnapi->hw_status);
6868 	if (tnapi->rx_rcb)
6869 		prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
6870 
6871 	if (likely(!tg3_irq_sync(tp)))
6872 		napi_schedule(&tnapi->napi);
6873 
6874 	return IRQ_HANDLED;
6875 }
6876 
6877 /* MSI ISR - No need to check for interrupt sharing and no need to
6878  * flush status block and interrupt mailbox. PCI ordering rules
6879  * guarantee that MSI will arrive after the status block.
6880  */
6881 static irqreturn_t tg3_msi(int irq, void *dev_id)
6882 {
6883 	struct tg3_napi *tnapi = dev_id;
6884 	struct tg3 *tp = tnapi->tp;
6885 
6886 	prefetch(tnapi->hw_status);
6887 	if (tnapi->rx_rcb)
6888 		prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
6889 	/*
6890 	 * Writing any value to intr-mbox-0 clears PCI INTA# and
6891 	 * chip-internal interrupt pending events.
6892 	 * Writing non-zero to intr-mbox-0 additional tells the
6893 	 * NIC to stop sending us irqs, engaging "in-intr-handler"
6894 	 * event coalescing.
6895 	 */
6896 	tw32_mailbox(tnapi->int_mbox, 0x00000001);
6897 	if (likely(!tg3_irq_sync(tp)))
6898 		napi_schedule(&tnapi->napi);
6899 
6900 	return IRQ_RETVAL(1);
6901 }
6902 
6903 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
6904 {
6905 	struct tg3_napi *tnapi = dev_id;
6906 	struct tg3 *tp = tnapi->tp;
6907 	struct tg3_hw_status *sblk = tnapi->hw_status;
6908 	unsigned int handled = 1;
6909 
6910 	/* In INTx mode, it is possible for the interrupt to arrive at
6911 	 * the CPU before the status block posted prior to the interrupt.
6912 	 * Reading the PCI State register will confirm whether the
6913 	 * interrupt is ours and will flush the status block.
6914 	 */
6915 	if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
6916 		if (tg3_flag(tp, CHIP_RESETTING) ||
6917 		    (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
6918 			handled = 0;
6919 			goto out;
6920 		}
6921 	}
6922 
6923 	/*
6924 	 * Writing any value to intr-mbox-0 clears PCI INTA# and
6925 	 * chip-internal interrupt pending events.
6926 	 * Writing non-zero to intr-mbox-0 additional tells the
6927 	 * NIC to stop sending us irqs, engaging "in-intr-handler"
6928 	 * event coalescing.
6929 	 *
6930 	 * Flush the mailbox to de-assert the IRQ immediately to prevent
6931 	 * spurious interrupts.  The flush impacts performance but
6932 	 * excessive spurious interrupts can be worse in some cases.
6933 	 */
6934 	tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
6935 	if (tg3_irq_sync(tp))
6936 		goto out;
6937 	sblk->status &= ~SD_STATUS_UPDATED;
6938 	if (likely(tg3_has_work(tnapi))) {
6939 		prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
6940 		napi_schedule(&tnapi->napi);
6941 	} else {
6942 		/* No work, shared interrupt perhaps?  re-enable
6943 		 * interrupts, and flush that PCI write
6944 		 */
6945 		tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
6946 			       0x00000000);
6947 	}
6948 out:
6949 	return IRQ_RETVAL(handled);
6950 }
6951 
6952 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
6953 {
6954 	struct tg3_napi *tnapi = dev_id;
6955 	struct tg3 *tp = tnapi->tp;
6956 	struct tg3_hw_status *sblk = tnapi->hw_status;
6957 	unsigned int handled = 1;
6958 
6959 	/* In INTx mode, it is possible for the interrupt to arrive at
6960 	 * the CPU before the status block posted prior to the interrupt.
6961 	 * Reading the PCI State register will confirm whether the
6962 	 * interrupt is ours and will flush the status block.
6963 	 */
6964 	if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) {
6965 		if (tg3_flag(tp, CHIP_RESETTING) ||
6966 		    (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
6967 			handled = 0;
6968 			goto out;
6969 		}
6970 	}
6971 
6972 	/*
6973 	 * writing any value to intr-mbox-0 clears PCI INTA# and
6974 	 * chip-internal interrupt pending events.
6975 	 * writing non-zero to intr-mbox-0 additional tells the
6976 	 * NIC to stop sending us irqs, engaging "in-intr-handler"
6977 	 * event coalescing.
6978 	 *
6979 	 * Flush the mailbox to de-assert the IRQ immediately to prevent
6980 	 * spurious interrupts.  The flush impacts performance but
6981 	 * excessive spurious interrupts can be worse in some cases.
6982 	 */
6983 	tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
6984 
6985 	/*
6986 	 * In a shared interrupt configuration, sometimes other devices'
6987 	 * interrupts will scream.  We record the current status tag here
6988 	 * so that the above check can report that the screaming interrupts
6989 	 * are unhandled.  Eventually they will be silenced.
6990 	 */
6991 	tnapi->last_irq_tag = sblk->status_tag;
6992 
6993 	if (tg3_irq_sync(tp))
6994 		goto out;
6995 
6996 	prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
6997 
6998 	napi_schedule(&tnapi->napi);
6999 
7000 out:
7001 	return IRQ_RETVAL(handled);
7002 }
7003 
7004 /* ISR for interrupt test */
7005 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
7006 {
7007 	struct tg3_napi *tnapi = dev_id;
7008 	struct tg3 *tp = tnapi->tp;
7009 	struct tg3_hw_status *sblk = tnapi->hw_status;
7010 
7011 	if ((sblk->status & SD_STATUS_UPDATED) ||
7012 	    !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7013 		tg3_disable_ints(tp);
7014 		return IRQ_RETVAL(1);
7015 	}
7016 	return IRQ_RETVAL(0);
7017 }
7018 
7019 #ifdef CONFIG_NET_POLL_CONTROLLER
7020 static void tg3_poll_controller(struct net_device *dev)
7021 {
7022 	int i;
7023 	struct tg3 *tp = netdev_priv(dev);
7024 
7025 	if (tg3_irq_sync(tp))
7026 		return;
7027 
7028 	for (i = 0; i < tp->irq_cnt; i++)
7029 		tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]);
7030 }
7031 #endif
7032 
7033 static void tg3_tx_timeout(struct net_device *dev)
7034 {
7035 	struct tg3 *tp = netdev_priv(dev);
7036 
7037 	if (netif_msg_tx_err(tp)) {
7038 		netdev_err(dev, "transmit timed out, resetting\n");
7039 		tg3_dump_state(tp);
7040 	}
7041 
7042 	tg3_reset_task_schedule(tp);
7043 }
7044 
7045 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
7046 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
7047 {
7048 	u32 base = (u32) mapping & 0xffffffff;
7049 
7050 	return (base > 0xffffdcc0) && (base + len + 8 < base);
7051 }
7052 
7053 /* Test for DMA addresses > 40-bit */
7054 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
7055 					  int len)
7056 {
7057 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
7058 	if (tg3_flag(tp, 40BIT_DMA_BUG))
7059 		return ((u64) mapping + len) > DMA_BIT_MASK(40);
7060 	return 0;
7061 #else
7062 	return 0;
7063 #endif
7064 }
7065 
7066 static inline void tg3_tx_set_bd(struct tg3_tx_buffer_desc *txbd,
7067 				 dma_addr_t mapping, u32 len, u32 flags,
7068 				 u32 mss, u32 vlan)
7069 {
7070 	txbd->addr_hi = ((u64) mapping >> 32);
7071 	txbd->addr_lo = ((u64) mapping & 0xffffffff);
7072 	txbd->len_flags = (len << TXD_LEN_SHIFT) | (flags & 0x0000ffff);
7073 	txbd->vlan_tag = (mss << TXD_MSS_SHIFT) | (vlan << TXD_VLAN_TAG_SHIFT);
7074 }
7075 
7076 static bool tg3_tx_frag_set(struct tg3_napi *tnapi, u32 *entry, u32 *budget,
7077 			    dma_addr_t map, u32 len, u32 flags,
7078 			    u32 mss, u32 vlan)
7079 {
7080 	struct tg3 *tp = tnapi->tp;
7081 	bool hwbug = false;
7082 
7083 	if (tg3_flag(tp, SHORT_DMA_BUG) && len <= 8)
7084 		hwbug = true;
7085 
7086 	if (tg3_4g_overflow_test(map, len))
7087 		hwbug = true;
7088 
7089 	if (tg3_40bit_overflow_test(tp, map, len))
7090 		hwbug = true;
7091 
7092 	if (tp->dma_limit) {
7093 		u32 prvidx = *entry;
7094 		u32 tmp_flag = flags & ~TXD_FLAG_END;
7095 		while (len > tp->dma_limit && *budget) {
7096 			u32 frag_len = tp->dma_limit;
7097 			len -= tp->dma_limit;
7098 
7099 			/* Avoid the 8byte DMA problem */
7100 			if (len <= 8) {
7101 				len += tp->dma_limit / 2;
7102 				frag_len = tp->dma_limit / 2;
7103 			}
7104 
7105 			tnapi->tx_buffers[*entry].fragmented = true;
7106 
7107 			tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7108 				      frag_len, tmp_flag, mss, vlan);
7109 			*budget -= 1;
7110 			prvidx = *entry;
7111 			*entry = NEXT_TX(*entry);
7112 
7113 			map += frag_len;
7114 		}
7115 
7116 		if (len) {
7117 			if (*budget) {
7118 				tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7119 					      len, flags, mss, vlan);
7120 				*budget -= 1;
7121 				*entry = NEXT_TX(*entry);
7122 			} else {
7123 				hwbug = true;
7124 				tnapi->tx_buffers[prvidx].fragmented = false;
7125 			}
7126 		}
7127 	} else {
7128 		tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7129 			      len, flags, mss, vlan);
7130 		*entry = NEXT_TX(*entry);
7131 	}
7132 
7133 	return hwbug;
7134 }
7135 
7136 static void tg3_tx_skb_unmap(struct tg3_napi *tnapi, u32 entry, int last)
7137 {
7138 	int i;
7139 	struct sk_buff *skb;
7140 	struct tg3_tx_ring_info *txb = &tnapi->tx_buffers[entry];
7141 
7142 	skb = txb->skb;
7143 	txb->skb = NULL;
7144 
7145 	pci_unmap_single(tnapi->tp->pdev,
7146 			 dma_unmap_addr(txb, mapping),
7147 			 skb_headlen(skb),
7148 			 PCI_DMA_TODEVICE);
7149 
7150 	while (txb->fragmented) {
7151 		txb->fragmented = false;
7152 		entry = NEXT_TX(entry);
7153 		txb = &tnapi->tx_buffers[entry];
7154 	}
7155 
7156 	for (i = 0; i <= last; i++) {
7157 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
7158 
7159 		entry = NEXT_TX(entry);
7160 		txb = &tnapi->tx_buffers[entry];
7161 
7162 		pci_unmap_page(tnapi->tp->pdev,
7163 			       dma_unmap_addr(txb, mapping),
7164 			       skb_frag_size(frag), PCI_DMA_TODEVICE);
7165 
7166 		while (txb->fragmented) {
7167 			txb->fragmented = false;
7168 			entry = NEXT_TX(entry);
7169 			txb = &tnapi->tx_buffers[entry];
7170 		}
7171 	}
7172 }
7173 
7174 /* Workaround 4GB and 40-bit hardware DMA bugs. */
7175 static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
7176 				       struct sk_buff **pskb,
7177 				       u32 *entry, u32 *budget,
7178 				       u32 base_flags, u32 mss, u32 vlan)
7179 {
7180 	struct tg3 *tp = tnapi->tp;
7181 	struct sk_buff *new_skb, *skb = *pskb;
7182 	dma_addr_t new_addr = 0;
7183 	int ret = 0;
7184 
7185 	if (tg3_asic_rev(tp) != ASIC_REV_5701)
7186 		new_skb = skb_copy(skb, GFP_ATOMIC);
7187 	else {
7188 		int more_headroom = 4 - ((unsigned long)skb->data & 3);
7189 
7190 		new_skb = skb_copy_expand(skb,
7191 					  skb_headroom(skb) + more_headroom,
7192 					  skb_tailroom(skb), GFP_ATOMIC);
7193 	}
7194 
7195 	if (!new_skb) {
7196 		ret = -1;
7197 	} else {
7198 		/* New SKB is guaranteed to be linear. */
7199 		new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
7200 					  PCI_DMA_TODEVICE);
7201 		/* Make sure the mapping succeeded */
7202 		if (pci_dma_mapping_error(tp->pdev, new_addr)) {
7203 			dev_kfree_skb(new_skb);
7204 			ret = -1;
7205 		} else {
7206 			u32 save_entry = *entry;
7207 
7208 			base_flags |= TXD_FLAG_END;
7209 
7210 			tnapi->tx_buffers[*entry].skb = new_skb;
7211 			dma_unmap_addr_set(&tnapi->tx_buffers[*entry],
7212 					   mapping, new_addr);
7213 
7214 			if (tg3_tx_frag_set(tnapi, entry, budget, new_addr,
7215 					    new_skb->len, base_flags,
7216 					    mss, vlan)) {
7217 				tg3_tx_skb_unmap(tnapi, save_entry, -1);
7218 				dev_kfree_skb(new_skb);
7219 				ret = -1;
7220 			}
7221 		}
7222 	}
7223 
7224 	dev_kfree_skb(skb);
7225 	*pskb = new_skb;
7226 	return ret;
7227 }
7228 
7229 static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *);
7230 
7231 /* Use GSO to workaround a rare TSO bug that may be triggered when the
7232  * TSO header is greater than 80 bytes.
7233  */
7234 static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
7235 {
7236 	struct sk_buff *segs, *nskb;
7237 	u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3;
7238 
7239 	/* Estimate the number of fragments in the worst case */
7240 	if (unlikely(tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)) {
7241 		netif_stop_queue(tp->dev);
7242 
7243 		/* netif_tx_stop_queue() must be done before checking
7244 		 * checking tx index in tg3_tx_avail() below, because in
7245 		 * tg3_tx(), we update tx index before checking for
7246 		 * netif_tx_queue_stopped().
7247 		 */
7248 		smp_mb();
7249 		if (tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)
7250 			return NETDEV_TX_BUSY;
7251 
7252 		netif_wake_queue(tp->dev);
7253 	}
7254 
7255 	segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
7256 	if (IS_ERR(segs))
7257 		goto tg3_tso_bug_end;
7258 
7259 	do {
7260 		nskb = segs;
7261 		segs = segs->next;
7262 		nskb->next = NULL;
7263 		tg3_start_xmit(nskb, tp->dev);
7264 	} while (segs);
7265 
7266 tg3_tso_bug_end:
7267 	dev_kfree_skb(skb);
7268 
7269 	return NETDEV_TX_OK;
7270 }
7271 
7272 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
7273  * support TG3_FLAG_HW_TSO_1 or firmware TSO only.
7274  */
7275 static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
7276 {
7277 	struct tg3 *tp = netdev_priv(dev);
7278 	u32 len, entry, base_flags, mss, vlan = 0;
7279 	u32 budget;
7280 	int i = -1, would_hit_hwbug;
7281 	dma_addr_t mapping;
7282 	struct tg3_napi *tnapi;
7283 	struct netdev_queue *txq;
7284 	unsigned int last;
7285 
7286 	txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
7287 	tnapi = &tp->napi[skb_get_queue_mapping(skb)];
7288 	if (tg3_flag(tp, ENABLE_TSS))
7289 		tnapi++;
7290 
7291 	budget = tg3_tx_avail(tnapi);
7292 
7293 	/* We are running in BH disabled context with netif_tx_lock
7294 	 * and TX reclaim runs via tp->napi.poll inside of a software
7295 	 * interrupt.  Furthermore, IRQ processing runs lockless so we have
7296 	 * no IRQ context deadlocks to worry about either.  Rejoice!
7297 	 */
7298 	if (unlikely(budget <= (skb_shinfo(skb)->nr_frags + 1))) {
7299 		if (!netif_tx_queue_stopped(txq)) {
7300 			netif_tx_stop_queue(txq);
7301 
7302 			/* This is a hard error, log it. */
7303 			netdev_err(dev,
7304 				   "BUG! Tx Ring full when queue awake!\n");
7305 		}
7306 		return NETDEV_TX_BUSY;
7307 	}
7308 
7309 	entry = tnapi->tx_prod;
7310 	base_flags = 0;
7311 	if (skb->ip_summed == CHECKSUM_PARTIAL)
7312 		base_flags |= TXD_FLAG_TCPUDP_CSUM;
7313 
7314 	mss = skb_shinfo(skb)->gso_size;
7315 	if (mss) {
7316 		struct iphdr *iph;
7317 		u32 tcp_opt_len, hdr_len;
7318 
7319 		if (skb_header_cloned(skb) &&
7320 		    pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
7321 			goto drop;
7322 
7323 		iph = ip_hdr(skb);
7324 		tcp_opt_len = tcp_optlen(skb);
7325 
7326 		hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb) - ETH_HLEN;
7327 
7328 		if (!skb_is_gso_v6(skb)) {
7329 			iph->check = 0;
7330 			iph->tot_len = htons(mss + hdr_len);
7331 		}
7332 
7333 		if (unlikely((ETH_HLEN + hdr_len) > 80) &&
7334 		    tg3_flag(tp, TSO_BUG))
7335 			return tg3_tso_bug(tp, skb);
7336 
7337 		base_flags |= (TXD_FLAG_CPU_PRE_DMA |
7338 			       TXD_FLAG_CPU_POST_DMA);
7339 
7340 		if (tg3_flag(tp, HW_TSO_1) ||
7341 		    tg3_flag(tp, HW_TSO_2) ||
7342 		    tg3_flag(tp, HW_TSO_3)) {
7343 			tcp_hdr(skb)->check = 0;
7344 			base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
7345 		} else
7346 			tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
7347 								 iph->daddr, 0,
7348 								 IPPROTO_TCP,
7349 								 0);
7350 
7351 		if (tg3_flag(tp, HW_TSO_3)) {
7352 			mss |= (hdr_len & 0xc) << 12;
7353 			if (hdr_len & 0x10)
7354 				base_flags |= 0x00000010;
7355 			base_flags |= (hdr_len & 0x3e0) << 5;
7356 		} else if (tg3_flag(tp, HW_TSO_2))
7357 			mss |= hdr_len << 9;
7358 		else if (tg3_flag(tp, HW_TSO_1) ||
7359 			 tg3_asic_rev(tp) == ASIC_REV_5705) {
7360 			if (tcp_opt_len || iph->ihl > 5) {
7361 				int tsflags;
7362 
7363 				tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
7364 				mss |= (tsflags << 11);
7365 			}
7366 		} else {
7367 			if (tcp_opt_len || iph->ihl > 5) {
7368 				int tsflags;
7369 
7370 				tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
7371 				base_flags |= tsflags << 12;
7372 			}
7373 		}
7374 	}
7375 
7376 	if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
7377 	    !mss && skb->len > VLAN_ETH_FRAME_LEN)
7378 		base_flags |= TXD_FLAG_JMB_PKT;
7379 
7380 	if (vlan_tx_tag_present(skb)) {
7381 		base_flags |= TXD_FLAG_VLAN;
7382 		vlan = vlan_tx_tag_get(skb);
7383 	}
7384 
7385 	if ((unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) &&
7386 	    tg3_flag(tp, TX_TSTAMP_EN)) {
7387 		skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
7388 		base_flags |= TXD_FLAG_HWTSTAMP;
7389 	}
7390 
7391 	len = skb_headlen(skb);
7392 
7393 	mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
7394 	if (pci_dma_mapping_error(tp->pdev, mapping))
7395 		goto drop;
7396 
7397 
7398 	tnapi->tx_buffers[entry].skb = skb;
7399 	dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
7400 
7401 	would_hit_hwbug = 0;
7402 
7403 	if (tg3_flag(tp, 5701_DMA_BUG))
7404 		would_hit_hwbug = 1;
7405 
7406 	if (tg3_tx_frag_set(tnapi, &entry, &budget, mapping, len, base_flags |
7407 			  ((skb_shinfo(skb)->nr_frags == 0) ? TXD_FLAG_END : 0),
7408 			    mss, vlan)) {
7409 		would_hit_hwbug = 1;
7410 	} else if (skb_shinfo(skb)->nr_frags > 0) {
7411 		u32 tmp_mss = mss;
7412 
7413 		if (!tg3_flag(tp, HW_TSO_1) &&
7414 		    !tg3_flag(tp, HW_TSO_2) &&
7415 		    !tg3_flag(tp, HW_TSO_3))
7416 			tmp_mss = 0;
7417 
7418 		/* Now loop through additional data
7419 		 * fragments, and queue them.
7420 		 */
7421 		last = skb_shinfo(skb)->nr_frags - 1;
7422 		for (i = 0; i <= last; i++) {
7423 			skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
7424 
7425 			len = skb_frag_size(frag);
7426 			mapping = skb_frag_dma_map(&tp->pdev->dev, frag, 0,
7427 						   len, DMA_TO_DEVICE);
7428 
7429 			tnapi->tx_buffers[entry].skb = NULL;
7430 			dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
7431 					   mapping);
7432 			if (dma_mapping_error(&tp->pdev->dev, mapping))
7433 				goto dma_error;
7434 
7435 			if (!budget ||
7436 			    tg3_tx_frag_set(tnapi, &entry, &budget, mapping,
7437 					    len, base_flags |
7438 					    ((i == last) ? TXD_FLAG_END : 0),
7439 					    tmp_mss, vlan)) {
7440 				would_hit_hwbug = 1;
7441 				break;
7442 			}
7443 		}
7444 	}
7445 
7446 	if (would_hit_hwbug) {
7447 		tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i);
7448 
7449 		/* If the workaround fails due to memory/mapping
7450 		 * failure, silently drop this packet.
7451 		 */
7452 		entry = tnapi->tx_prod;
7453 		budget = tg3_tx_avail(tnapi);
7454 		if (tigon3_dma_hwbug_workaround(tnapi, &skb, &entry, &budget,
7455 						base_flags, mss, vlan))
7456 			goto drop_nofree;
7457 	}
7458 
7459 	skb_tx_timestamp(skb);
7460 	netdev_tx_sent_queue(txq, skb->len);
7461 
7462 	/* Sync BD data before updating mailbox */
7463 	wmb();
7464 
7465 	/* Packets are ready, update Tx producer idx local and on card. */
7466 	tw32_tx_mbox(tnapi->prodmbox, entry);
7467 
7468 	tnapi->tx_prod = entry;
7469 	if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
7470 		netif_tx_stop_queue(txq);
7471 
7472 		/* netif_tx_stop_queue() must be done before checking
7473 		 * checking tx index in tg3_tx_avail() below, because in
7474 		 * tg3_tx(), we update tx index before checking for
7475 		 * netif_tx_queue_stopped().
7476 		 */
7477 		smp_mb();
7478 		if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
7479 			netif_tx_wake_queue(txq);
7480 	}
7481 
7482 	mmiowb();
7483 	return NETDEV_TX_OK;
7484 
7485 dma_error:
7486 	tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, --i);
7487 	tnapi->tx_buffers[tnapi->tx_prod].skb = NULL;
7488 drop:
7489 	dev_kfree_skb(skb);
7490 drop_nofree:
7491 	tp->tx_dropped++;
7492 	return NETDEV_TX_OK;
7493 }
7494 
7495 static void tg3_mac_loopback(struct tg3 *tp, bool enable)
7496 {
7497 	if (enable) {
7498 		tp->mac_mode &= ~(MAC_MODE_HALF_DUPLEX |
7499 				  MAC_MODE_PORT_MODE_MASK);
7500 
7501 		tp->mac_mode |= MAC_MODE_PORT_INT_LPBACK;
7502 
7503 		if (!tg3_flag(tp, 5705_PLUS))
7504 			tp->mac_mode |= MAC_MODE_LINK_POLARITY;
7505 
7506 		if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
7507 			tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
7508 		else
7509 			tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
7510 	} else {
7511 		tp->mac_mode &= ~MAC_MODE_PORT_INT_LPBACK;
7512 
7513 		if (tg3_flag(tp, 5705_PLUS) ||
7514 		    (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) ||
7515 		    tg3_asic_rev(tp) == ASIC_REV_5700)
7516 			tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
7517 	}
7518 
7519 	tw32(MAC_MODE, tp->mac_mode);
7520 	udelay(40);
7521 }
7522 
7523 static int tg3_phy_lpbk_set(struct tg3 *tp, u32 speed, bool extlpbk)
7524 {
7525 	u32 val, bmcr, mac_mode, ptest = 0;
7526 
7527 	tg3_phy_toggle_apd(tp, false);
7528 	tg3_phy_toggle_automdix(tp, 0);
7529 
7530 	if (extlpbk && tg3_phy_set_extloopbk(tp))
7531 		return -EIO;
7532 
7533 	bmcr = BMCR_FULLDPLX;
7534 	switch (speed) {
7535 	case SPEED_10:
7536 		break;
7537 	case SPEED_100:
7538 		bmcr |= BMCR_SPEED100;
7539 		break;
7540 	case SPEED_1000:
7541 	default:
7542 		if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
7543 			speed = SPEED_100;
7544 			bmcr |= BMCR_SPEED100;
7545 		} else {
7546 			speed = SPEED_1000;
7547 			bmcr |= BMCR_SPEED1000;
7548 		}
7549 	}
7550 
7551 	if (extlpbk) {
7552 		if (!(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
7553 			tg3_readphy(tp, MII_CTRL1000, &val);
7554 			val |= CTL1000_AS_MASTER |
7555 			       CTL1000_ENABLE_MASTER;
7556 			tg3_writephy(tp, MII_CTRL1000, val);
7557 		} else {
7558 			ptest = MII_TG3_FET_PTEST_TRIM_SEL |
7559 				MII_TG3_FET_PTEST_TRIM_2;
7560 			tg3_writephy(tp, MII_TG3_FET_PTEST, ptest);
7561 		}
7562 	} else
7563 		bmcr |= BMCR_LOOPBACK;
7564 
7565 	tg3_writephy(tp, MII_BMCR, bmcr);
7566 
7567 	/* The write needs to be flushed for the FETs */
7568 	if (tp->phy_flags & TG3_PHYFLG_IS_FET)
7569 		tg3_readphy(tp, MII_BMCR, &bmcr);
7570 
7571 	udelay(40);
7572 
7573 	if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
7574 	    tg3_asic_rev(tp) == ASIC_REV_5785) {
7575 		tg3_writephy(tp, MII_TG3_FET_PTEST, ptest |
7576 			     MII_TG3_FET_PTEST_FRC_TX_LINK |
7577 			     MII_TG3_FET_PTEST_FRC_TX_LOCK);
7578 
7579 		/* The write needs to be flushed for the AC131 */
7580 		tg3_readphy(tp, MII_TG3_FET_PTEST, &val);
7581 	}
7582 
7583 	/* Reset to prevent losing 1st rx packet intermittently */
7584 	if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
7585 	    tg3_flag(tp, 5780_CLASS)) {
7586 		tw32_f(MAC_RX_MODE, RX_MODE_RESET);
7587 		udelay(10);
7588 		tw32_f(MAC_RX_MODE, tp->rx_mode);
7589 	}
7590 
7591 	mac_mode = tp->mac_mode &
7592 		   ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
7593 	if (speed == SPEED_1000)
7594 		mac_mode |= MAC_MODE_PORT_MODE_GMII;
7595 	else
7596 		mac_mode |= MAC_MODE_PORT_MODE_MII;
7597 
7598 	if (tg3_asic_rev(tp) == ASIC_REV_5700) {
7599 		u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK;
7600 
7601 		if (masked_phy_id == TG3_PHY_ID_BCM5401)
7602 			mac_mode &= ~MAC_MODE_LINK_POLARITY;
7603 		else if (masked_phy_id == TG3_PHY_ID_BCM5411)
7604 			mac_mode |= MAC_MODE_LINK_POLARITY;
7605 
7606 		tg3_writephy(tp, MII_TG3_EXT_CTRL,
7607 			     MII_TG3_EXT_CTRL_LNK3_LED_MODE);
7608 	}
7609 
7610 	tw32(MAC_MODE, mac_mode);
7611 	udelay(40);
7612 
7613 	return 0;
7614 }
7615 
7616 static void tg3_set_loopback(struct net_device *dev, netdev_features_t features)
7617 {
7618 	struct tg3 *tp = netdev_priv(dev);
7619 
7620 	if (features & NETIF_F_LOOPBACK) {
7621 		if (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)
7622 			return;
7623 
7624 		spin_lock_bh(&tp->lock);
7625 		tg3_mac_loopback(tp, true);
7626 		netif_carrier_on(tp->dev);
7627 		spin_unlock_bh(&tp->lock);
7628 		netdev_info(dev, "Internal MAC loopback mode enabled.\n");
7629 	} else {
7630 		if (!(tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
7631 			return;
7632 
7633 		spin_lock_bh(&tp->lock);
7634 		tg3_mac_loopback(tp, false);
7635 		/* Force link status check */
7636 		tg3_setup_phy(tp, 1);
7637 		spin_unlock_bh(&tp->lock);
7638 		netdev_info(dev, "Internal MAC loopback mode disabled.\n");
7639 	}
7640 }
7641 
7642 static netdev_features_t tg3_fix_features(struct net_device *dev,
7643 	netdev_features_t features)
7644 {
7645 	struct tg3 *tp = netdev_priv(dev);
7646 
7647 	if (dev->mtu > ETH_DATA_LEN && tg3_flag(tp, 5780_CLASS))
7648 		features &= ~NETIF_F_ALL_TSO;
7649 
7650 	return features;
7651 }
7652 
7653 static int tg3_set_features(struct net_device *dev, netdev_features_t features)
7654 {
7655 	netdev_features_t changed = dev->features ^ features;
7656 
7657 	if ((changed & NETIF_F_LOOPBACK) && netif_running(dev))
7658 		tg3_set_loopback(dev, features);
7659 
7660 	return 0;
7661 }
7662 
7663 static void tg3_rx_prodring_free(struct tg3 *tp,
7664 				 struct tg3_rx_prodring_set *tpr)
7665 {
7666 	int i;
7667 
7668 	if (tpr != &tp->napi[0].prodring) {
7669 		for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx;
7670 		     i = (i + 1) & tp->rx_std_ring_mask)
7671 			tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
7672 					tp->rx_pkt_map_sz);
7673 
7674 		if (tg3_flag(tp, JUMBO_CAPABLE)) {
7675 			for (i = tpr->rx_jmb_cons_idx;
7676 			     i != tpr->rx_jmb_prod_idx;
7677 			     i = (i + 1) & tp->rx_jmb_ring_mask) {
7678 				tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
7679 						TG3_RX_JMB_MAP_SZ);
7680 			}
7681 		}
7682 
7683 		return;
7684 	}
7685 
7686 	for (i = 0; i <= tp->rx_std_ring_mask; i++)
7687 		tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
7688 				tp->rx_pkt_map_sz);
7689 
7690 	if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
7691 		for (i = 0; i <= tp->rx_jmb_ring_mask; i++)
7692 			tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
7693 					TG3_RX_JMB_MAP_SZ);
7694 	}
7695 }
7696 
7697 /* Initialize rx rings for packet processing.
7698  *
7699  * The chip has been shut down and the driver detached from
7700  * the networking, so no interrupts or new tx packets will
7701  * end up in the driver.  tp->{tx,}lock are held and thus
7702  * we may not sleep.
7703  */
7704 static int tg3_rx_prodring_alloc(struct tg3 *tp,
7705 				 struct tg3_rx_prodring_set *tpr)
7706 {
7707 	u32 i, rx_pkt_dma_sz;
7708 
7709 	tpr->rx_std_cons_idx = 0;
7710 	tpr->rx_std_prod_idx = 0;
7711 	tpr->rx_jmb_cons_idx = 0;
7712 	tpr->rx_jmb_prod_idx = 0;
7713 
7714 	if (tpr != &tp->napi[0].prodring) {
7715 		memset(&tpr->rx_std_buffers[0], 0,
7716 		       TG3_RX_STD_BUFF_RING_SIZE(tp));
7717 		if (tpr->rx_jmb_buffers)
7718 			memset(&tpr->rx_jmb_buffers[0], 0,
7719 			       TG3_RX_JMB_BUFF_RING_SIZE(tp));
7720 		goto done;
7721 	}
7722 
7723 	/* Zero out all descriptors. */
7724 	memset(tpr->rx_std, 0, TG3_RX_STD_RING_BYTES(tp));
7725 
7726 	rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ;
7727 	if (tg3_flag(tp, 5780_CLASS) &&
7728 	    tp->dev->mtu > ETH_DATA_LEN)
7729 		rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ;
7730 	tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz);
7731 
7732 	/* Initialize invariants of the rings, we only set this
7733 	 * stuff once.  This works because the card does not
7734 	 * write into the rx buffer posting rings.
7735 	 */
7736 	for (i = 0; i <= tp->rx_std_ring_mask; i++) {
7737 		struct tg3_rx_buffer_desc *rxd;
7738 
7739 		rxd = &tpr->rx_std[i];
7740 		rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT;
7741 		rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
7742 		rxd->opaque = (RXD_OPAQUE_RING_STD |
7743 			       (i << RXD_OPAQUE_INDEX_SHIFT));
7744 	}
7745 
7746 	/* Now allocate fresh SKBs for each rx ring. */
7747 	for (i = 0; i < tp->rx_pending; i++) {
7748 		unsigned int frag_size;
7749 
7750 		if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_STD, i,
7751 				      &frag_size) < 0) {
7752 			netdev_warn(tp->dev,
7753 				    "Using a smaller RX standard ring. Only "
7754 				    "%d out of %d buffers were allocated "
7755 				    "successfully\n", i, tp->rx_pending);
7756 			if (i == 0)
7757 				goto initfail;
7758 			tp->rx_pending = i;
7759 			break;
7760 		}
7761 	}
7762 
7763 	if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
7764 		goto done;
7765 
7766 	memset(tpr->rx_jmb, 0, TG3_RX_JMB_RING_BYTES(tp));
7767 
7768 	if (!tg3_flag(tp, JUMBO_RING_ENABLE))
7769 		goto done;
7770 
7771 	for (i = 0; i <= tp->rx_jmb_ring_mask; i++) {
7772 		struct tg3_rx_buffer_desc *rxd;
7773 
7774 		rxd = &tpr->rx_jmb[i].std;
7775 		rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT;
7776 		rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
7777 				  RXD_FLAG_JUMBO;
7778 		rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
7779 		       (i << RXD_OPAQUE_INDEX_SHIFT));
7780 	}
7781 
7782 	for (i = 0; i < tp->rx_jumbo_pending; i++) {
7783 		unsigned int frag_size;
7784 
7785 		if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_JUMBO, i,
7786 				      &frag_size) < 0) {
7787 			netdev_warn(tp->dev,
7788 				    "Using a smaller RX jumbo ring. Only %d "
7789 				    "out of %d buffers were allocated "
7790 				    "successfully\n", i, tp->rx_jumbo_pending);
7791 			if (i == 0)
7792 				goto initfail;
7793 			tp->rx_jumbo_pending = i;
7794 			break;
7795 		}
7796 	}
7797 
7798 done:
7799 	return 0;
7800 
7801 initfail:
7802 	tg3_rx_prodring_free(tp, tpr);
7803 	return -ENOMEM;
7804 }
7805 
7806 static void tg3_rx_prodring_fini(struct tg3 *tp,
7807 				 struct tg3_rx_prodring_set *tpr)
7808 {
7809 	kfree(tpr->rx_std_buffers);
7810 	tpr->rx_std_buffers = NULL;
7811 	kfree(tpr->rx_jmb_buffers);
7812 	tpr->rx_jmb_buffers = NULL;
7813 	if (tpr->rx_std) {
7814 		dma_free_coherent(&tp->pdev->dev, TG3_RX_STD_RING_BYTES(tp),
7815 				  tpr->rx_std, tpr->rx_std_mapping);
7816 		tpr->rx_std = NULL;
7817 	}
7818 	if (tpr->rx_jmb) {
7819 		dma_free_coherent(&tp->pdev->dev, TG3_RX_JMB_RING_BYTES(tp),
7820 				  tpr->rx_jmb, tpr->rx_jmb_mapping);
7821 		tpr->rx_jmb = NULL;
7822 	}
7823 }
7824 
7825 static int tg3_rx_prodring_init(struct tg3 *tp,
7826 				struct tg3_rx_prodring_set *tpr)
7827 {
7828 	tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp),
7829 				      GFP_KERNEL);
7830 	if (!tpr->rx_std_buffers)
7831 		return -ENOMEM;
7832 
7833 	tpr->rx_std = dma_alloc_coherent(&tp->pdev->dev,
7834 					 TG3_RX_STD_RING_BYTES(tp),
7835 					 &tpr->rx_std_mapping,
7836 					 GFP_KERNEL);
7837 	if (!tpr->rx_std)
7838 		goto err_out;
7839 
7840 	if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
7841 		tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp),
7842 					      GFP_KERNEL);
7843 		if (!tpr->rx_jmb_buffers)
7844 			goto err_out;
7845 
7846 		tpr->rx_jmb = dma_alloc_coherent(&tp->pdev->dev,
7847 						 TG3_RX_JMB_RING_BYTES(tp),
7848 						 &tpr->rx_jmb_mapping,
7849 						 GFP_KERNEL);
7850 		if (!tpr->rx_jmb)
7851 			goto err_out;
7852 	}
7853 
7854 	return 0;
7855 
7856 err_out:
7857 	tg3_rx_prodring_fini(tp, tpr);
7858 	return -ENOMEM;
7859 }
7860 
7861 /* Free up pending packets in all rx/tx rings.
7862  *
7863  * The chip has been shut down and the driver detached from
7864  * the networking, so no interrupts or new tx packets will
7865  * end up in the driver.  tp->{tx,}lock is not held and we are not
7866  * in an interrupt context and thus may sleep.
7867  */
7868 static void tg3_free_rings(struct tg3 *tp)
7869 {
7870 	int i, j;
7871 
7872 	for (j = 0; j < tp->irq_cnt; j++) {
7873 		struct tg3_napi *tnapi = &tp->napi[j];
7874 
7875 		tg3_rx_prodring_free(tp, &tnapi->prodring);
7876 
7877 		if (!tnapi->tx_buffers)
7878 			continue;
7879 
7880 		for (i = 0; i < TG3_TX_RING_SIZE; i++) {
7881 			struct sk_buff *skb = tnapi->tx_buffers[i].skb;
7882 
7883 			if (!skb)
7884 				continue;
7885 
7886 			tg3_tx_skb_unmap(tnapi, i,
7887 					 skb_shinfo(skb)->nr_frags - 1);
7888 
7889 			dev_kfree_skb_any(skb);
7890 		}
7891 		netdev_tx_reset_queue(netdev_get_tx_queue(tp->dev, j));
7892 	}
7893 }
7894 
7895 /* Initialize tx/rx rings for packet processing.
7896  *
7897  * The chip has been shut down and the driver detached from
7898  * the networking, so no interrupts or new tx packets will
7899  * end up in the driver.  tp->{tx,}lock are held and thus
7900  * we may not sleep.
7901  */
7902 static int tg3_init_rings(struct tg3 *tp)
7903 {
7904 	int i;
7905 
7906 	/* Free up all the SKBs. */
7907 	tg3_free_rings(tp);
7908 
7909 	for (i = 0; i < tp->irq_cnt; i++) {
7910 		struct tg3_napi *tnapi = &tp->napi[i];
7911 
7912 		tnapi->last_tag = 0;
7913 		tnapi->last_irq_tag = 0;
7914 		tnapi->hw_status->status = 0;
7915 		tnapi->hw_status->status_tag = 0;
7916 		memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7917 
7918 		tnapi->tx_prod = 0;
7919 		tnapi->tx_cons = 0;
7920 		if (tnapi->tx_ring)
7921 			memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES);
7922 
7923 		tnapi->rx_rcb_ptr = 0;
7924 		if (tnapi->rx_rcb)
7925 			memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
7926 
7927 		if (tg3_rx_prodring_alloc(tp, &tnapi->prodring)) {
7928 			tg3_free_rings(tp);
7929 			return -ENOMEM;
7930 		}
7931 	}
7932 
7933 	return 0;
7934 }
7935 
7936 static void tg3_mem_tx_release(struct tg3 *tp)
7937 {
7938 	int i;
7939 
7940 	for (i = 0; i < tp->irq_max; i++) {
7941 		struct tg3_napi *tnapi = &tp->napi[i];
7942 
7943 		if (tnapi->tx_ring) {
7944 			dma_free_coherent(&tp->pdev->dev, TG3_TX_RING_BYTES,
7945 				tnapi->tx_ring, tnapi->tx_desc_mapping);
7946 			tnapi->tx_ring = NULL;
7947 		}
7948 
7949 		kfree(tnapi->tx_buffers);
7950 		tnapi->tx_buffers = NULL;
7951 	}
7952 }
7953 
7954 static int tg3_mem_tx_acquire(struct tg3 *tp)
7955 {
7956 	int i;
7957 	struct tg3_napi *tnapi = &tp->napi[0];
7958 
7959 	/* If multivector TSS is enabled, vector 0 does not handle
7960 	 * tx interrupts.  Don't allocate any resources for it.
7961 	 */
7962 	if (tg3_flag(tp, ENABLE_TSS))
7963 		tnapi++;
7964 
7965 	for (i = 0; i < tp->txq_cnt; i++, tnapi++) {
7966 		tnapi->tx_buffers = kzalloc(sizeof(struct tg3_tx_ring_info) *
7967 					    TG3_TX_RING_SIZE, GFP_KERNEL);
7968 		if (!tnapi->tx_buffers)
7969 			goto err_out;
7970 
7971 		tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev,
7972 						    TG3_TX_RING_BYTES,
7973 						    &tnapi->tx_desc_mapping,
7974 						    GFP_KERNEL);
7975 		if (!tnapi->tx_ring)
7976 			goto err_out;
7977 	}
7978 
7979 	return 0;
7980 
7981 err_out:
7982 	tg3_mem_tx_release(tp);
7983 	return -ENOMEM;
7984 }
7985 
7986 static void tg3_mem_rx_release(struct tg3 *tp)
7987 {
7988 	int i;
7989 
7990 	for (i = 0; i < tp->irq_max; i++) {
7991 		struct tg3_napi *tnapi = &tp->napi[i];
7992 
7993 		tg3_rx_prodring_fini(tp, &tnapi->prodring);
7994 
7995 		if (!tnapi->rx_rcb)
7996 			continue;
7997 
7998 		dma_free_coherent(&tp->pdev->dev,
7999 				  TG3_RX_RCB_RING_BYTES(tp),
8000 				  tnapi->rx_rcb,
8001 				  tnapi->rx_rcb_mapping);
8002 		tnapi->rx_rcb = NULL;
8003 	}
8004 }
8005 
8006 static int tg3_mem_rx_acquire(struct tg3 *tp)
8007 {
8008 	unsigned int i, limit;
8009 
8010 	limit = tp->rxq_cnt;
8011 
8012 	/* If RSS is enabled, we need a (dummy) producer ring
8013 	 * set on vector zero.  This is the true hw prodring.
8014 	 */
8015 	if (tg3_flag(tp, ENABLE_RSS))
8016 		limit++;
8017 
8018 	for (i = 0; i < limit; i++) {
8019 		struct tg3_napi *tnapi = &tp->napi[i];
8020 
8021 		if (tg3_rx_prodring_init(tp, &tnapi->prodring))
8022 			goto err_out;
8023 
8024 		/* If multivector RSS is enabled, vector 0
8025 		 * does not handle rx or tx interrupts.
8026 		 * Don't allocate any resources for it.
8027 		 */
8028 		if (!i && tg3_flag(tp, ENABLE_RSS))
8029 			continue;
8030 
8031 		tnapi->rx_rcb = dma_alloc_coherent(&tp->pdev->dev,
8032 						   TG3_RX_RCB_RING_BYTES(tp),
8033 						   &tnapi->rx_rcb_mapping,
8034 						   GFP_KERNEL);
8035 		if (!tnapi->rx_rcb)
8036 			goto err_out;
8037 
8038 		memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
8039 	}
8040 
8041 	return 0;
8042 
8043 err_out:
8044 	tg3_mem_rx_release(tp);
8045 	return -ENOMEM;
8046 }
8047 
8048 /*
8049  * Must not be invoked with interrupt sources disabled and
8050  * the hardware shutdown down.
8051  */
8052 static void tg3_free_consistent(struct tg3 *tp)
8053 {
8054 	int i;
8055 
8056 	for (i = 0; i < tp->irq_cnt; i++) {
8057 		struct tg3_napi *tnapi = &tp->napi[i];
8058 
8059 		if (tnapi->hw_status) {
8060 			dma_free_coherent(&tp->pdev->dev, TG3_HW_STATUS_SIZE,
8061 					  tnapi->hw_status,
8062 					  tnapi->status_mapping);
8063 			tnapi->hw_status = NULL;
8064 		}
8065 	}
8066 
8067 	tg3_mem_rx_release(tp);
8068 	tg3_mem_tx_release(tp);
8069 
8070 	if (tp->hw_stats) {
8071 		dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats),
8072 				  tp->hw_stats, tp->stats_mapping);
8073 		tp->hw_stats = NULL;
8074 	}
8075 }
8076 
8077 /*
8078  * Must not be invoked with interrupt sources disabled and
8079  * the hardware shutdown down.  Can sleep.
8080  */
8081 static int tg3_alloc_consistent(struct tg3 *tp)
8082 {
8083 	int i;
8084 
8085 	tp->hw_stats = dma_alloc_coherent(&tp->pdev->dev,
8086 					  sizeof(struct tg3_hw_stats),
8087 					  &tp->stats_mapping,
8088 					  GFP_KERNEL);
8089 	if (!tp->hw_stats)
8090 		goto err_out;
8091 
8092 	memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
8093 
8094 	for (i = 0; i < tp->irq_cnt; i++) {
8095 		struct tg3_napi *tnapi = &tp->napi[i];
8096 		struct tg3_hw_status *sblk;
8097 
8098 		tnapi->hw_status = dma_alloc_coherent(&tp->pdev->dev,
8099 						      TG3_HW_STATUS_SIZE,
8100 						      &tnapi->status_mapping,
8101 						      GFP_KERNEL);
8102 		if (!tnapi->hw_status)
8103 			goto err_out;
8104 
8105 		memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8106 		sblk = tnapi->hw_status;
8107 
8108 		if (tg3_flag(tp, ENABLE_RSS)) {
8109 			u16 *prodptr = NULL;
8110 
8111 			/*
8112 			 * When RSS is enabled, the status block format changes
8113 			 * slightly.  The "rx_jumbo_consumer", "reserved",
8114 			 * and "rx_mini_consumer" members get mapped to the
8115 			 * other three rx return ring producer indexes.
8116 			 */
8117 			switch (i) {
8118 			case 1:
8119 				prodptr = &sblk->idx[0].rx_producer;
8120 				break;
8121 			case 2:
8122 				prodptr = &sblk->rx_jumbo_consumer;
8123 				break;
8124 			case 3:
8125 				prodptr = &sblk->reserved;
8126 				break;
8127 			case 4:
8128 				prodptr = &sblk->rx_mini_consumer;
8129 				break;
8130 			}
8131 			tnapi->rx_rcb_prod_idx = prodptr;
8132 		} else {
8133 			tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer;
8134 		}
8135 	}
8136 
8137 	if (tg3_mem_tx_acquire(tp) || tg3_mem_rx_acquire(tp))
8138 		goto err_out;
8139 
8140 	return 0;
8141 
8142 err_out:
8143 	tg3_free_consistent(tp);
8144 	return -ENOMEM;
8145 }
8146 
8147 #define MAX_WAIT_CNT 1000
8148 
8149 /* To stop a block, clear the enable bit and poll till it
8150  * clears.  tp->lock is held.
8151  */
8152 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
8153 {
8154 	unsigned int i;
8155 	u32 val;
8156 
8157 	if (tg3_flag(tp, 5705_PLUS)) {
8158 		switch (ofs) {
8159 		case RCVLSC_MODE:
8160 		case DMAC_MODE:
8161 		case MBFREE_MODE:
8162 		case BUFMGR_MODE:
8163 		case MEMARB_MODE:
8164 			/* We can't enable/disable these bits of the
8165 			 * 5705/5750, just say success.
8166 			 */
8167 			return 0;
8168 
8169 		default:
8170 			break;
8171 		}
8172 	}
8173 
8174 	val = tr32(ofs);
8175 	val &= ~enable_bit;
8176 	tw32_f(ofs, val);
8177 
8178 	for (i = 0; i < MAX_WAIT_CNT; i++) {
8179 		udelay(100);
8180 		val = tr32(ofs);
8181 		if ((val & enable_bit) == 0)
8182 			break;
8183 	}
8184 
8185 	if (i == MAX_WAIT_CNT && !silent) {
8186 		dev_err(&tp->pdev->dev,
8187 			"tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
8188 			ofs, enable_bit);
8189 		return -ENODEV;
8190 	}
8191 
8192 	return 0;
8193 }
8194 
8195 /* tp->lock is held. */
8196 static int tg3_abort_hw(struct tg3 *tp, int silent)
8197 {
8198 	int i, err;
8199 
8200 	tg3_disable_ints(tp);
8201 
8202 	tp->rx_mode &= ~RX_MODE_ENABLE;
8203 	tw32_f(MAC_RX_MODE, tp->rx_mode);
8204 	udelay(10);
8205 
8206 	err  = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
8207 	err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
8208 	err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
8209 	err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
8210 	err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
8211 	err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
8212 
8213 	err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
8214 	err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
8215 	err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
8216 	err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
8217 	err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
8218 	err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
8219 	err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
8220 
8221 	tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
8222 	tw32_f(MAC_MODE, tp->mac_mode);
8223 	udelay(40);
8224 
8225 	tp->tx_mode &= ~TX_MODE_ENABLE;
8226 	tw32_f(MAC_TX_MODE, tp->tx_mode);
8227 
8228 	for (i = 0; i < MAX_WAIT_CNT; i++) {
8229 		udelay(100);
8230 		if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
8231 			break;
8232 	}
8233 	if (i >= MAX_WAIT_CNT) {
8234 		dev_err(&tp->pdev->dev,
8235 			"%s timed out, TX_MODE_ENABLE will not clear "
8236 			"MAC_TX_MODE=%08x\n", __func__, tr32(MAC_TX_MODE));
8237 		err |= -ENODEV;
8238 	}
8239 
8240 	err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
8241 	err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
8242 	err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
8243 
8244 	tw32(FTQ_RESET, 0xffffffff);
8245 	tw32(FTQ_RESET, 0x00000000);
8246 
8247 	err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
8248 	err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
8249 
8250 	for (i = 0; i < tp->irq_cnt; i++) {
8251 		struct tg3_napi *tnapi = &tp->napi[i];
8252 		if (tnapi->hw_status)
8253 			memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8254 	}
8255 
8256 	return err;
8257 }
8258 
8259 /* Save PCI command register before chip reset */
8260 static void tg3_save_pci_state(struct tg3 *tp)
8261 {
8262 	pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
8263 }
8264 
8265 /* Restore PCI state after chip reset */
8266 static void tg3_restore_pci_state(struct tg3 *tp)
8267 {
8268 	u32 val;
8269 
8270 	/* Re-enable indirect register accesses. */
8271 	pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
8272 			       tp->misc_host_ctrl);
8273 
8274 	/* Set MAX PCI retry to zero. */
8275 	val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
8276 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0 &&
8277 	    tg3_flag(tp, PCIX_MODE))
8278 		val |= PCISTATE_RETRY_SAME_DMA;
8279 	/* Allow reads and writes to the APE register and memory space. */
8280 	if (tg3_flag(tp, ENABLE_APE))
8281 		val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
8282 		       PCISTATE_ALLOW_APE_SHMEM_WR |
8283 		       PCISTATE_ALLOW_APE_PSPACE_WR;
8284 	pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
8285 
8286 	pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
8287 
8288 	if (!tg3_flag(tp, PCI_EXPRESS)) {
8289 		pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
8290 				      tp->pci_cacheline_sz);
8291 		pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
8292 				      tp->pci_lat_timer);
8293 	}
8294 
8295 	/* Make sure PCI-X relaxed ordering bit is clear. */
8296 	if (tg3_flag(tp, PCIX_MODE)) {
8297 		u16 pcix_cmd;
8298 
8299 		pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8300 				     &pcix_cmd);
8301 		pcix_cmd &= ~PCI_X_CMD_ERO;
8302 		pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8303 				      pcix_cmd);
8304 	}
8305 
8306 	if (tg3_flag(tp, 5780_CLASS)) {
8307 
8308 		/* Chip reset on 5780 will reset MSI enable bit,
8309 		 * so need to restore it.
8310 		 */
8311 		if (tg3_flag(tp, USING_MSI)) {
8312 			u16 ctrl;
8313 
8314 			pci_read_config_word(tp->pdev,
8315 					     tp->msi_cap + PCI_MSI_FLAGS,
8316 					     &ctrl);
8317 			pci_write_config_word(tp->pdev,
8318 					      tp->msi_cap + PCI_MSI_FLAGS,
8319 					      ctrl | PCI_MSI_FLAGS_ENABLE);
8320 			val = tr32(MSGINT_MODE);
8321 			tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
8322 		}
8323 	}
8324 }
8325 
8326 /* tp->lock is held. */
8327 static int tg3_chip_reset(struct tg3 *tp)
8328 {
8329 	u32 val;
8330 	void (*write_op)(struct tg3 *, u32, u32);
8331 	int i, err;
8332 
8333 	tg3_nvram_lock(tp);
8334 
8335 	tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
8336 
8337 	/* No matching tg3_nvram_unlock() after this because
8338 	 * chip reset below will undo the nvram lock.
8339 	 */
8340 	tp->nvram_lock_cnt = 0;
8341 
8342 	/* GRC_MISC_CFG core clock reset will clear the memory
8343 	 * enable bit in PCI register 4 and the MSI enable bit
8344 	 * on some chips, so we save relevant registers here.
8345 	 */
8346 	tg3_save_pci_state(tp);
8347 
8348 	if (tg3_asic_rev(tp) == ASIC_REV_5752 ||
8349 	    tg3_flag(tp, 5755_PLUS))
8350 		tw32(GRC_FASTBOOT_PC, 0);
8351 
8352 	/*
8353 	 * We must avoid the readl() that normally takes place.
8354 	 * It locks machines, causes machine checks, and other
8355 	 * fun things.  So, temporarily disable the 5701
8356 	 * hardware workaround, while we do the reset.
8357 	 */
8358 	write_op = tp->write32;
8359 	if (write_op == tg3_write_flush_reg32)
8360 		tp->write32 = tg3_write32;
8361 
8362 	/* Prevent the irq handler from reading or writing PCI registers
8363 	 * during chip reset when the memory enable bit in the PCI command
8364 	 * register may be cleared.  The chip does not generate interrupt
8365 	 * at this time, but the irq handler may still be called due to irq
8366 	 * sharing or irqpoll.
8367 	 */
8368 	tg3_flag_set(tp, CHIP_RESETTING);
8369 	for (i = 0; i < tp->irq_cnt; i++) {
8370 		struct tg3_napi *tnapi = &tp->napi[i];
8371 		if (tnapi->hw_status) {
8372 			tnapi->hw_status->status = 0;
8373 			tnapi->hw_status->status_tag = 0;
8374 		}
8375 		tnapi->last_tag = 0;
8376 		tnapi->last_irq_tag = 0;
8377 	}
8378 	smp_mb();
8379 
8380 	for (i = 0; i < tp->irq_cnt; i++)
8381 		synchronize_irq(tp->napi[i].irq_vec);
8382 
8383 	if (tg3_asic_rev(tp) == ASIC_REV_57780) {
8384 		val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
8385 		tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
8386 	}
8387 
8388 	/* do the reset */
8389 	val = GRC_MISC_CFG_CORECLK_RESET;
8390 
8391 	if (tg3_flag(tp, PCI_EXPRESS)) {
8392 		/* Force PCIe 1.0a mode */
8393 		if (tg3_asic_rev(tp) != ASIC_REV_5785 &&
8394 		    !tg3_flag(tp, 57765_PLUS) &&
8395 		    tr32(TG3_PCIE_PHY_TSTCTL) ==
8396 		    (TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM))
8397 			tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM);
8398 
8399 		if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0) {
8400 			tw32(GRC_MISC_CFG, (1 << 29));
8401 			val |= (1 << 29);
8402 		}
8403 	}
8404 
8405 	if (tg3_asic_rev(tp) == ASIC_REV_5906) {
8406 		tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
8407 		tw32(GRC_VCPU_EXT_CTRL,
8408 		     tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
8409 	}
8410 
8411 	/* Manage gphy power for all CPMU absent PCIe devices. */
8412 	if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, CPMU_PRESENT))
8413 		val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
8414 
8415 	tw32(GRC_MISC_CFG, val);
8416 
8417 	/* restore 5701 hardware bug workaround write method */
8418 	tp->write32 = write_op;
8419 
8420 	/* Unfortunately, we have to delay before the PCI read back.
8421 	 * Some 575X chips even will not respond to a PCI cfg access
8422 	 * when the reset command is given to the chip.
8423 	 *
8424 	 * How do these hardware designers expect things to work
8425 	 * properly if the PCI write is posted for a long period
8426 	 * of time?  It is always necessary to have some method by
8427 	 * which a register read back can occur to push the write
8428 	 * out which does the reset.
8429 	 *
8430 	 * For most tg3 variants the trick below was working.
8431 	 * Ho hum...
8432 	 */
8433 	udelay(120);
8434 
8435 	/* Flush PCI posted writes.  The normal MMIO registers
8436 	 * are inaccessible at this time so this is the only
8437 	 * way to make this reliably (actually, this is no longer
8438 	 * the case, see above).  I tried to use indirect
8439 	 * register read/write but this upset some 5701 variants.
8440 	 */
8441 	pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
8442 
8443 	udelay(120);
8444 
8445 	if (tg3_flag(tp, PCI_EXPRESS) && pci_is_pcie(tp->pdev)) {
8446 		u16 val16;
8447 
8448 		if (tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A0) {
8449 			int j;
8450 			u32 cfg_val;
8451 
8452 			/* Wait for link training to complete.  */
8453 			for (j = 0; j < 5000; j++)
8454 				udelay(100);
8455 
8456 			pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
8457 			pci_write_config_dword(tp->pdev, 0xc4,
8458 					       cfg_val | (1 << 15));
8459 		}
8460 
8461 		/* Clear the "no snoop" and "relaxed ordering" bits. */
8462 		val16 = PCI_EXP_DEVCTL_RELAX_EN | PCI_EXP_DEVCTL_NOSNOOP_EN;
8463 		/*
8464 		 * Older PCIe devices only support the 128 byte
8465 		 * MPS setting.  Enforce the restriction.
8466 		 */
8467 		if (!tg3_flag(tp, CPMU_PRESENT))
8468 			val16 |= PCI_EXP_DEVCTL_PAYLOAD;
8469 		pcie_capability_clear_word(tp->pdev, PCI_EXP_DEVCTL, val16);
8470 
8471 		/* Clear error status */
8472 		pcie_capability_write_word(tp->pdev, PCI_EXP_DEVSTA,
8473 				      PCI_EXP_DEVSTA_CED |
8474 				      PCI_EXP_DEVSTA_NFED |
8475 				      PCI_EXP_DEVSTA_FED |
8476 				      PCI_EXP_DEVSTA_URD);
8477 	}
8478 
8479 	tg3_restore_pci_state(tp);
8480 
8481 	tg3_flag_clear(tp, CHIP_RESETTING);
8482 	tg3_flag_clear(tp, ERROR_PROCESSED);
8483 
8484 	val = 0;
8485 	if (tg3_flag(tp, 5780_CLASS))
8486 		val = tr32(MEMARB_MODE);
8487 	tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
8488 
8489 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A3) {
8490 		tg3_stop_fw(tp);
8491 		tw32(0x5000, 0x400);
8492 	}
8493 
8494 	if (tg3_flag(tp, IS_SSB_CORE)) {
8495 		/*
8496 		 * BCM4785: In order to avoid repercussions from using
8497 		 * potentially defective internal ROM, stop the Rx RISC CPU,
8498 		 * which is not required.
8499 		 */
8500 		tg3_stop_fw(tp);
8501 		tg3_halt_cpu(tp, RX_CPU_BASE);
8502 	}
8503 
8504 	tw32(GRC_MODE, tp->grc_mode);
8505 
8506 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0) {
8507 		val = tr32(0xc4);
8508 
8509 		tw32(0xc4, val | (1 << 15));
8510 	}
8511 
8512 	if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
8513 	    tg3_asic_rev(tp) == ASIC_REV_5705) {
8514 		tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
8515 		if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0)
8516 			tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
8517 		tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
8518 	}
8519 
8520 	if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
8521 		tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
8522 		val = tp->mac_mode;
8523 	} else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
8524 		tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
8525 		val = tp->mac_mode;
8526 	} else
8527 		val = 0;
8528 
8529 	tw32_f(MAC_MODE, val);
8530 	udelay(40);
8531 
8532 	tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
8533 
8534 	err = tg3_poll_fw(tp);
8535 	if (err)
8536 		return err;
8537 
8538 	tg3_mdio_start(tp);
8539 
8540 	if (tg3_flag(tp, PCI_EXPRESS) &&
8541 	    tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0 &&
8542 	    tg3_asic_rev(tp) != ASIC_REV_5785 &&
8543 	    !tg3_flag(tp, 57765_PLUS)) {
8544 		val = tr32(0x7c00);
8545 
8546 		tw32(0x7c00, val | (1 << 25));
8547 	}
8548 
8549 	if (tg3_asic_rev(tp) == ASIC_REV_5720) {
8550 		val = tr32(TG3_CPMU_CLCK_ORIDE);
8551 		tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
8552 	}
8553 
8554 	/* Reprobe ASF enable state.  */
8555 	tg3_flag_clear(tp, ENABLE_ASF);
8556 	tg3_flag_clear(tp, ASF_NEW_HANDSHAKE);
8557 	tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
8558 	if (val == NIC_SRAM_DATA_SIG_MAGIC) {
8559 		u32 nic_cfg;
8560 
8561 		tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
8562 		if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
8563 			tg3_flag_set(tp, ENABLE_ASF);
8564 			tp->last_event_jiffies = jiffies;
8565 			if (tg3_flag(tp, 5750_PLUS))
8566 				tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
8567 		}
8568 	}
8569 
8570 	return 0;
8571 }
8572 
8573 static void tg3_get_nstats(struct tg3 *, struct rtnl_link_stats64 *);
8574 static void tg3_get_estats(struct tg3 *, struct tg3_ethtool_stats *);
8575 
8576 /* tp->lock is held. */
8577 static int tg3_halt(struct tg3 *tp, int kind, int silent)
8578 {
8579 	int err;
8580 
8581 	tg3_stop_fw(tp);
8582 
8583 	tg3_write_sig_pre_reset(tp, kind);
8584 
8585 	tg3_abort_hw(tp, silent);
8586 	err = tg3_chip_reset(tp);
8587 
8588 	__tg3_set_mac_addr(tp, 0);
8589 
8590 	tg3_write_sig_legacy(tp, kind);
8591 	tg3_write_sig_post_reset(tp, kind);
8592 
8593 	if (tp->hw_stats) {
8594 		/* Save the stats across chip resets... */
8595 		tg3_get_nstats(tp, &tp->net_stats_prev);
8596 		tg3_get_estats(tp, &tp->estats_prev);
8597 
8598 		/* And make sure the next sample is new data */
8599 		memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
8600 	}
8601 
8602 	if (err)
8603 		return err;
8604 
8605 	return 0;
8606 }
8607 
8608 static int tg3_set_mac_addr(struct net_device *dev, void *p)
8609 {
8610 	struct tg3 *tp = netdev_priv(dev);
8611 	struct sockaddr *addr = p;
8612 	int err = 0, skip_mac_1 = 0;
8613 
8614 	if (!is_valid_ether_addr(addr->sa_data))
8615 		return -EADDRNOTAVAIL;
8616 
8617 	memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
8618 
8619 	if (!netif_running(dev))
8620 		return 0;
8621 
8622 	if (tg3_flag(tp, ENABLE_ASF)) {
8623 		u32 addr0_high, addr0_low, addr1_high, addr1_low;
8624 
8625 		addr0_high = tr32(MAC_ADDR_0_HIGH);
8626 		addr0_low = tr32(MAC_ADDR_0_LOW);
8627 		addr1_high = tr32(MAC_ADDR_1_HIGH);
8628 		addr1_low = tr32(MAC_ADDR_1_LOW);
8629 
8630 		/* Skip MAC addr 1 if ASF is using it. */
8631 		if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
8632 		    !(addr1_high == 0 && addr1_low == 0))
8633 			skip_mac_1 = 1;
8634 	}
8635 	spin_lock_bh(&tp->lock);
8636 	__tg3_set_mac_addr(tp, skip_mac_1);
8637 	spin_unlock_bh(&tp->lock);
8638 
8639 	return err;
8640 }
8641 
8642 /* tp->lock is held. */
8643 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
8644 			   dma_addr_t mapping, u32 maxlen_flags,
8645 			   u32 nic_addr)
8646 {
8647 	tg3_write_mem(tp,
8648 		      (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
8649 		      ((u64) mapping >> 32));
8650 	tg3_write_mem(tp,
8651 		      (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
8652 		      ((u64) mapping & 0xffffffff));
8653 	tg3_write_mem(tp,
8654 		      (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
8655 		       maxlen_flags);
8656 
8657 	if (!tg3_flag(tp, 5705_PLUS))
8658 		tg3_write_mem(tp,
8659 			      (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
8660 			      nic_addr);
8661 }
8662 
8663 
8664 static void tg3_coal_tx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
8665 {
8666 	int i = 0;
8667 
8668 	if (!tg3_flag(tp, ENABLE_TSS)) {
8669 		tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
8670 		tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
8671 		tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
8672 	} else {
8673 		tw32(HOSTCC_TXCOL_TICKS, 0);
8674 		tw32(HOSTCC_TXMAX_FRAMES, 0);
8675 		tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
8676 
8677 		for (; i < tp->txq_cnt; i++) {
8678 			u32 reg;
8679 
8680 			reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18;
8681 			tw32(reg, ec->tx_coalesce_usecs);
8682 			reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18;
8683 			tw32(reg, ec->tx_max_coalesced_frames);
8684 			reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18;
8685 			tw32(reg, ec->tx_max_coalesced_frames_irq);
8686 		}
8687 	}
8688 
8689 	for (; i < tp->irq_max - 1; i++) {
8690 		tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0);
8691 		tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0);
8692 		tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
8693 	}
8694 }
8695 
8696 static void tg3_coal_rx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
8697 {
8698 	int i = 0;
8699 	u32 limit = tp->rxq_cnt;
8700 
8701 	if (!tg3_flag(tp, ENABLE_RSS)) {
8702 		tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
8703 		tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
8704 		tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
8705 		limit--;
8706 	} else {
8707 		tw32(HOSTCC_RXCOL_TICKS, 0);
8708 		tw32(HOSTCC_RXMAX_FRAMES, 0);
8709 		tw32(HOSTCC_RXCOAL_MAXF_INT, 0);
8710 	}
8711 
8712 	for (; i < limit; i++) {
8713 		u32 reg;
8714 
8715 		reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18;
8716 		tw32(reg, ec->rx_coalesce_usecs);
8717 		reg = HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18;
8718 		tw32(reg, ec->rx_max_coalesced_frames);
8719 		reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18;
8720 		tw32(reg, ec->rx_max_coalesced_frames_irq);
8721 	}
8722 
8723 	for (; i < tp->irq_max - 1; i++) {
8724 		tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0);
8725 		tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0);
8726 		tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
8727 	}
8728 }
8729 
8730 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
8731 {
8732 	tg3_coal_tx_init(tp, ec);
8733 	tg3_coal_rx_init(tp, ec);
8734 
8735 	if (!tg3_flag(tp, 5705_PLUS)) {
8736 		u32 val = ec->stats_block_coalesce_usecs;
8737 
8738 		tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
8739 		tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
8740 
8741 		if (!tp->link_up)
8742 			val = 0;
8743 
8744 		tw32(HOSTCC_STAT_COAL_TICKS, val);
8745 	}
8746 }
8747 
8748 /* tp->lock is held. */
8749 static void tg3_rings_reset(struct tg3 *tp)
8750 {
8751 	int i;
8752 	u32 stblk, txrcb, rxrcb, limit;
8753 	struct tg3_napi *tnapi = &tp->napi[0];
8754 
8755 	/* Disable all transmit rings but the first. */
8756 	if (!tg3_flag(tp, 5705_PLUS))
8757 		limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16;
8758 	else if (tg3_flag(tp, 5717_PLUS))
8759 		limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4;
8760 	else if (tg3_flag(tp, 57765_CLASS) ||
8761 		 tg3_asic_rev(tp) == ASIC_REV_5762)
8762 		limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2;
8763 	else
8764 		limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
8765 
8766 	for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
8767 	     txrcb < limit; txrcb += TG3_BDINFO_SIZE)
8768 		tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS,
8769 			      BDINFO_FLAGS_DISABLED);
8770 
8771 
8772 	/* Disable all receive return rings but the first. */
8773 	if (tg3_flag(tp, 5717_PLUS))
8774 		limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17;
8775 	else if (!tg3_flag(tp, 5705_PLUS))
8776 		limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
8777 	else if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
8778 		 tg3_asic_rev(tp) == ASIC_REV_5762 ||
8779 		 tg3_flag(tp, 57765_CLASS))
8780 		limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4;
8781 	else
8782 		limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
8783 
8784 	for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
8785 	     rxrcb < limit; rxrcb += TG3_BDINFO_SIZE)
8786 		tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS,
8787 			      BDINFO_FLAGS_DISABLED);
8788 
8789 	/* Disable interrupts */
8790 	tw32_mailbox_f(tp->napi[0].int_mbox, 1);
8791 	tp->napi[0].chk_msi_cnt = 0;
8792 	tp->napi[0].last_rx_cons = 0;
8793 	tp->napi[0].last_tx_cons = 0;
8794 
8795 	/* Zero mailbox registers. */
8796 	if (tg3_flag(tp, SUPPORT_MSIX)) {
8797 		for (i = 1; i < tp->irq_max; i++) {
8798 			tp->napi[i].tx_prod = 0;
8799 			tp->napi[i].tx_cons = 0;
8800 			if (tg3_flag(tp, ENABLE_TSS))
8801 				tw32_mailbox(tp->napi[i].prodmbox, 0);
8802 			tw32_rx_mbox(tp->napi[i].consmbox, 0);
8803 			tw32_mailbox_f(tp->napi[i].int_mbox, 1);
8804 			tp->napi[i].chk_msi_cnt = 0;
8805 			tp->napi[i].last_rx_cons = 0;
8806 			tp->napi[i].last_tx_cons = 0;
8807 		}
8808 		if (!tg3_flag(tp, ENABLE_TSS))
8809 			tw32_mailbox(tp->napi[0].prodmbox, 0);
8810 	} else {
8811 		tp->napi[0].tx_prod = 0;
8812 		tp->napi[0].tx_cons = 0;
8813 		tw32_mailbox(tp->napi[0].prodmbox, 0);
8814 		tw32_rx_mbox(tp->napi[0].consmbox, 0);
8815 	}
8816 
8817 	/* Make sure the NIC-based send BD rings are disabled. */
8818 	if (!tg3_flag(tp, 5705_PLUS)) {
8819 		u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW;
8820 		for (i = 0; i < 16; i++)
8821 			tw32_tx_mbox(mbox + i * 8, 0);
8822 	}
8823 
8824 	txrcb = NIC_SRAM_SEND_RCB;
8825 	rxrcb = NIC_SRAM_RCV_RET_RCB;
8826 
8827 	/* Clear status block in ram. */
8828 	memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8829 
8830 	/* Set status block DMA address */
8831 	tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
8832 	     ((u64) tnapi->status_mapping >> 32));
8833 	tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
8834 	     ((u64) tnapi->status_mapping & 0xffffffff));
8835 
8836 	if (tnapi->tx_ring) {
8837 		tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
8838 			       (TG3_TX_RING_SIZE <<
8839 				BDINFO_FLAGS_MAXLEN_SHIFT),
8840 			       NIC_SRAM_TX_BUFFER_DESC);
8841 		txrcb += TG3_BDINFO_SIZE;
8842 	}
8843 
8844 	if (tnapi->rx_rcb) {
8845 		tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
8846 			       (tp->rx_ret_ring_mask + 1) <<
8847 				BDINFO_FLAGS_MAXLEN_SHIFT, 0);
8848 		rxrcb += TG3_BDINFO_SIZE;
8849 	}
8850 
8851 	stblk = HOSTCC_STATBLCK_RING1;
8852 
8853 	for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) {
8854 		u64 mapping = (u64)tnapi->status_mapping;
8855 		tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32);
8856 		tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff);
8857 
8858 		/* Clear status block in ram. */
8859 		memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8860 
8861 		if (tnapi->tx_ring) {
8862 			tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
8863 				       (TG3_TX_RING_SIZE <<
8864 					BDINFO_FLAGS_MAXLEN_SHIFT),
8865 				       NIC_SRAM_TX_BUFFER_DESC);
8866 			txrcb += TG3_BDINFO_SIZE;
8867 		}
8868 
8869 		tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
8870 			       ((tp->rx_ret_ring_mask + 1) <<
8871 				BDINFO_FLAGS_MAXLEN_SHIFT), 0);
8872 
8873 		stblk += 8;
8874 		rxrcb += TG3_BDINFO_SIZE;
8875 	}
8876 }
8877 
8878 static void tg3_setup_rxbd_thresholds(struct tg3 *tp)
8879 {
8880 	u32 val, bdcache_maxcnt, host_rep_thresh, nic_rep_thresh;
8881 
8882 	if (!tg3_flag(tp, 5750_PLUS) ||
8883 	    tg3_flag(tp, 5780_CLASS) ||
8884 	    tg3_asic_rev(tp) == ASIC_REV_5750 ||
8885 	    tg3_asic_rev(tp) == ASIC_REV_5752 ||
8886 	    tg3_flag(tp, 57765_PLUS))
8887 		bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5700;
8888 	else if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
8889 		 tg3_asic_rev(tp) == ASIC_REV_5787)
8890 		bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5755;
8891 	else
8892 		bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5906;
8893 
8894 	nic_rep_thresh = min(bdcache_maxcnt / 2, tp->rx_std_max_post);
8895 	host_rep_thresh = max_t(u32, tp->rx_pending / 8, 1);
8896 
8897 	val = min(nic_rep_thresh, host_rep_thresh);
8898 	tw32(RCVBDI_STD_THRESH, val);
8899 
8900 	if (tg3_flag(tp, 57765_PLUS))
8901 		tw32(STD_REPLENISH_LWM, bdcache_maxcnt);
8902 
8903 	if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
8904 		return;
8905 
8906 	bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700;
8907 
8908 	host_rep_thresh = max_t(u32, tp->rx_jumbo_pending / 8, 1);
8909 
8910 	val = min(bdcache_maxcnt / 2, host_rep_thresh);
8911 	tw32(RCVBDI_JUMBO_THRESH, val);
8912 
8913 	if (tg3_flag(tp, 57765_PLUS))
8914 		tw32(JMB_REPLENISH_LWM, bdcache_maxcnt);
8915 }
8916 
8917 static inline u32 calc_crc(unsigned char *buf, int len)
8918 {
8919 	u32 reg;
8920 	u32 tmp;
8921 	int j, k;
8922 
8923 	reg = 0xffffffff;
8924 
8925 	for (j = 0; j < len; j++) {
8926 		reg ^= buf[j];
8927 
8928 		for (k = 0; k < 8; k++) {
8929 			tmp = reg & 0x01;
8930 
8931 			reg >>= 1;
8932 
8933 			if (tmp)
8934 				reg ^= 0xedb88320;
8935 		}
8936 	}
8937 
8938 	return ~reg;
8939 }
8940 
8941 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
8942 {
8943 	/* accept or reject all multicast frames */
8944 	tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
8945 	tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
8946 	tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
8947 	tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
8948 }
8949 
8950 static void __tg3_set_rx_mode(struct net_device *dev)
8951 {
8952 	struct tg3 *tp = netdev_priv(dev);
8953 	u32 rx_mode;
8954 
8955 	rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
8956 				  RX_MODE_KEEP_VLAN_TAG);
8957 
8958 #if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE)
8959 	/* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
8960 	 * flag clear.
8961 	 */
8962 	if (!tg3_flag(tp, ENABLE_ASF))
8963 		rx_mode |= RX_MODE_KEEP_VLAN_TAG;
8964 #endif
8965 
8966 	if (dev->flags & IFF_PROMISC) {
8967 		/* Promiscuous mode. */
8968 		rx_mode |= RX_MODE_PROMISC;
8969 	} else if (dev->flags & IFF_ALLMULTI) {
8970 		/* Accept all multicast. */
8971 		tg3_set_multi(tp, 1);
8972 	} else if (netdev_mc_empty(dev)) {
8973 		/* Reject all multicast. */
8974 		tg3_set_multi(tp, 0);
8975 	} else {
8976 		/* Accept one or more multicast(s). */
8977 		struct netdev_hw_addr *ha;
8978 		u32 mc_filter[4] = { 0, };
8979 		u32 regidx;
8980 		u32 bit;
8981 		u32 crc;
8982 
8983 		netdev_for_each_mc_addr(ha, dev) {
8984 			crc = calc_crc(ha->addr, ETH_ALEN);
8985 			bit = ~crc & 0x7f;
8986 			regidx = (bit & 0x60) >> 5;
8987 			bit &= 0x1f;
8988 			mc_filter[regidx] |= (1 << bit);
8989 		}
8990 
8991 		tw32(MAC_HASH_REG_0, mc_filter[0]);
8992 		tw32(MAC_HASH_REG_1, mc_filter[1]);
8993 		tw32(MAC_HASH_REG_2, mc_filter[2]);
8994 		tw32(MAC_HASH_REG_3, mc_filter[3]);
8995 	}
8996 
8997 	if (rx_mode != tp->rx_mode) {
8998 		tp->rx_mode = rx_mode;
8999 		tw32_f(MAC_RX_MODE, rx_mode);
9000 		udelay(10);
9001 	}
9002 }
9003 
9004 static void tg3_rss_init_dflt_indir_tbl(struct tg3 *tp, u32 qcnt)
9005 {
9006 	int i;
9007 
9008 	for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
9009 		tp->rss_ind_tbl[i] = ethtool_rxfh_indir_default(i, qcnt);
9010 }
9011 
9012 static void tg3_rss_check_indir_tbl(struct tg3 *tp)
9013 {
9014 	int i;
9015 
9016 	if (!tg3_flag(tp, SUPPORT_MSIX))
9017 		return;
9018 
9019 	if (tp->rxq_cnt == 1) {
9020 		memset(&tp->rss_ind_tbl[0], 0, sizeof(tp->rss_ind_tbl));
9021 		return;
9022 	}
9023 
9024 	/* Validate table against current IRQ count */
9025 	for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) {
9026 		if (tp->rss_ind_tbl[i] >= tp->rxq_cnt)
9027 			break;
9028 	}
9029 
9030 	if (i != TG3_RSS_INDIR_TBL_SIZE)
9031 		tg3_rss_init_dflt_indir_tbl(tp, tp->rxq_cnt);
9032 }
9033 
9034 static void tg3_rss_write_indir_tbl(struct tg3 *tp)
9035 {
9036 	int i = 0;
9037 	u32 reg = MAC_RSS_INDIR_TBL_0;
9038 
9039 	while (i < TG3_RSS_INDIR_TBL_SIZE) {
9040 		u32 val = tp->rss_ind_tbl[i];
9041 		i++;
9042 		for (; i % 8; i++) {
9043 			val <<= 4;
9044 			val |= tp->rss_ind_tbl[i];
9045 		}
9046 		tw32(reg, val);
9047 		reg += 4;
9048 	}
9049 }
9050 
9051 /* tp->lock is held. */
9052 static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
9053 {
9054 	u32 val, rdmac_mode;
9055 	int i, err, limit;
9056 	struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
9057 
9058 	tg3_disable_ints(tp);
9059 
9060 	tg3_stop_fw(tp);
9061 
9062 	tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
9063 
9064 	if (tg3_flag(tp, INIT_COMPLETE))
9065 		tg3_abort_hw(tp, 1);
9066 
9067 	/* Enable MAC control of LPI */
9068 	if (tp->phy_flags & TG3_PHYFLG_EEE_CAP) {
9069 		val = TG3_CPMU_EEE_LNKIDL_PCIE_NL0 |
9070 		      TG3_CPMU_EEE_LNKIDL_UART_IDL;
9071 		if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0)
9072 			val |= TG3_CPMU_EEE_LNKIDL_APE_TX_MT;
9073 
9074 		tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL, val);
9075 
9076 		tw32_f(TG3_CPMU_EEE_CTRL,
9077 		       TG3_CPMU_EEE_CTRL_EXIT_20_1_US);
9078 
9079 		val = TG3_CPMU_EEEMD_ERLY_L1_XIT_DET |
9080 		      TG3_CPMU_EEEMD_LPI_IN_TX |
9081 		      TG3_CPMU_EEEMD_LPI_IN_RX |
9082 		      TG3_CPMU_EEEMD_EEE_ENABLE;
9083 
9084 		if (tg3_asic_rev(tp) != ASIC_REV_5717)
9085 			val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN;
9086 
9087 		if (tg3_flag(tp, ENABLE_APE))
9088 			val |= TG3_CPMU_EEEMD_APE_TX_DET_EN;
9089 
9090 		tw32_f(TG3_CPMU_EEE_MODE, val);
9091 
9092 		tw32_f(TG3_CPMU_EEE_DBTMR1,
9093 		       TG3_CPMU_DBTMR1_PCIEXIT_2047US |
9094 		       TG3_CPMU_DBTMR1_LNKIDLE_2047US);
9095 
9096 		tw32_f(TG3_CPMU_EEE_DBTMR2,
9097 		       TG3_CPMU_DBTMR2_APE_TX_2047US |
9098 		       TG3_CPMU_DBTMR2_TXIDXEQ_2047US);
9099 	}
9100 
9101 	if (reset_phy)
9102 		tg3_phy_reset(tp);
9103 
9104 	err = tg3_chip_reset(tp);
9105 	if (err)
9106 		return err;
9107 
9108 	tg3_write_sig_legacy(tp, RESET_KIND_INIT);
9109 
9110 	if (tg3_chip_rev(tp) == CHIPREV_5784_AX) {
9111 		val = tr32(TG3_CPMU_CTRL);
9112 		val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
9113 		tw32(TG3_CPMU_CTRL, val);
9114 
9115 		val = tr32(TG3_CPMU_LSPD_10MB_CLK);
9116 		val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
9117 		val |= CPMU_LSPD_10MB_MACCLK_6_25;
9118 		tw32(TG3_CPMU_LSPD_10MB_CLK, val);
9119 
9120 		val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
9121 		val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
9122 		val |= CPMU_LNK_AWARE_MACCLK_6_25;
9123 		tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
9124 
9125 		val = tr32(TG3_CPMU_HST_ACC);
9126 		val &= ~CPMU_HST_ACC_MACCLK_MASK;
9127 		val |= CPMU_HST_ACC_MACCLK_6_25;
9128 		tw32(TG3_CPMU_HST_ACC, val);
9129 	}
9130 
9131 	if (tg3_asic_rev(tp) == ASIC_REV_57780) {
9132 		val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK;
9133 		val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN |
9134 		       PCIE_PWR_MGMT_L1_THRESH_4MS;
9135 		tw32(PCIE_PWR_MGMT_THRESH, val);
9136 
9137 		val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK;
9138 		tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS);
9139 
9140 		tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR);
9141 
9142 		val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
9143 		tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
9144 	}
9145 
9146 	if (tg3_flag(tp, L1PLLPD_EN)) {
9147 		u32 grc_mode = tr32(GRC_MODE);
9148 
9149 		/* Access the lower 1K of PL PCIE block registers. */
9150 		val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9151 		tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
9152 
9153 		val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1);
9154 		tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1,
9155 		     val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN);
9156 
9157 		tw32(GRC_MODE, grc_mode);
9158 	}
9159 
9160 	if (tg3_flag(tp, 57765_CLASS)) {
9161 		if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) {
9162 			u32 grc_mode = tr32(GRC_MODE);
9163 
9164 			/* Access the lower 1K of PL PCIE block registers. */
9165 			val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9166 			tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
9167 
9168 			val = tr32(TG3_PCIE_TLDLPL_PORT +
9169 				   TG3_PCIE_PL_LO_PHYCTL5);
9170 			tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5,
9171 			     val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ);
9172 
9173 			tw32(GRC_MODE, grc_mode);
9174 		}
9175 
9176 		if (tg3_chip_rev(tp) != CHIPREV_57765_AX) {
9177 			u32 grc_mode;
9178 
9179 			/* Fix transmit hangs */
9180 			val = tr32(TG3_CPMU_PADRNG_CTL);
9181 			val |= TG3_CPMU_PADRNG_CTL_RDIV2;
9182 			tw32(TG3_CPMU_PADRNG_CTL, val);
9183 
9184 			grc_mode = tr32(GRC_MODE);
9185 
9186 			/* Access the lower 1K of DL PCIE block registers. */
9187 			val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9188 			tw32(GRC_MODE, val | GRC_MODE_PCIE_DL_SEL);
9189 
9190 			val = tr32(TG3_PCIE_TLDLPL_PORT +
9191 				   TG3_PCIE_DL_LO_FTSMAX);
9192 			val &= ~TG3_PCIE_DL_LO_FTSMAX_MSK;
9193 			tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_DL_LO_FTSMAX,
9194 			     val | TG3_PCIE_DL_LO_FTSMAX_VAL);
9195 
9196 			tw32(GRC_MODE, grc_mode);
9197 		}
9198 
9199 		val = tr32(TG3_CPMU_LSPD_10MB_CLK);
9200 		val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
9201 		val |= CPMU_LSPD_10MB_MACCLK_6_25;
9202 		tw32(TG3_CPMU_LSPD_10MB_CLK, val);
9203 	}
9204 
9205 	/* This works around an issue with Athlon chipsets on
9206 	 * B3 tigon3 silicon.  This bit has no effect on any
9207 	 * other revision.  But do not set this on PCI Express
9208 	 * chips and don't even touch the clocks if the CPMU is present.
9209 	 */
9210 	if (!tg3_flag(tp, CPMU_PRESENT)) {
9211 		if (!tg3_flag(tp, PCI_EXPRESS))
9212 			tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
9213 		tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
9214 	}
9215 
9216 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0 &&
9217 	    tg3_flag(tp, PCIX_MODE)) {
9218 		val = tr32(TG3PCI_PCISTATE);
9219 		val |= PCISTATE_RETRY_SAME_DMA;
9220 		tw32(TG3PCI_PCISTATE, val);
9221 	}
9222 
9223 	if (tg3_flag(tp, ENABLE_APE)) {
9224 		/* Allow reads and writes to the
9225 		 * APE register and memory space.
9226 		 */
9227 		val = tr32(TG3PCI_PCISTATE);
9228 		val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
9229 		       PCISTATE_ALLOW_APE_SHMEM_WR |
9230 		       PCISTATE_ALLOW_APE_PSPACE_WR;
9231 		tw32(TG3PCI_PCISTATE, val);
9232 	}
9233 
9234 	if (tg3_chip_rev(tp) == CHIPREV_5704_BX) {
9235 		/* Enable some hw fixes.  */
9236 		val = tr32(TG3PCI_MSI_DATA);
9237 		val |= (1 << 26) | (1 << 28) | (1 << 29);
9238 		tw32(TG3PCI_MSI_DATA, val);
9239 	}
9240 
9241 	/* Descriptor ring init may make accesses to the
9242 	 * NIC SRAM area to setup the TX descriptors, so we
9243 	 * can only do this after the hardware has been
9244 	 * successfully reset.
9245 	 */
9246 	err = tg3_init_rings(tp);
9247 	if (err)
9248 		return err;
9249 
9250 	if (tg3_flag(tp, 57765_PLUS)) {
9251 		val = tr32(TG3PCI_DMA_RW_CTRL) &
9252 		      ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
9253 		if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0)
9254 			val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK;
9255 		if (!tg3_flag(tp, 57765_CLASS) &&
9256 		    tg3_asic_rev(tp) != ASIC_REV_5717 &&
9257 		    tg3_asic_rev(tp) != ASIC_REV_5762)
9258 			val |= DMA_RWCTRL_TAGGED_STAT_WA;
9259 		tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
9260 	} else if (tg3_asic_rev(tp) != ASIC_REV_5784 &&
9261 		   tg3_asic_rev(tp) != ASIC_REV_5761) {
9262 		/* This value is determined during the probe time DMA
9263 		 * engine test, tg3_test_dma.
9264 		 */
9265 		tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
9266 	}
9267 
9268 	tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
9269 			  GRC_MODE_4X_NIC_SEND_RINGS |
9270 			  GRC_MODE_NO_TX_PHDR_CSUM |
9271 			  GRC_MODE_NO_RX_PHDR_CSUM);
9272 	tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
9273 
9274 	/* Pseudo-header checksum is done by hardware logic and not
9275 	 * the offload processers, so make the chip do the pseudo-
9276 	 * header checksums on receive.  For transmit it is more
9277 	 * convenient to do the pseudo-header checksum in software
9278 	 * as Linux does that on transmit for us in all cases.
9279 	 */
9280 	tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
9281 
9282 	val = GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP;
9283 	if (tp->rxptpctl)
9284 		tw32(TG3_RX_PTP_CTL,
9285 		     tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK);
9286 
9287 	if (tg3_flag(tp, PTP_CAPABLE))
9288 		val |= GRC_MODE_TIME_SYNC_ENABLE;
9289 
9290 	tw32(GRC_MODE, tp->grc_mode | val);
9291 
9292 	/* Setup the timer prescalar register.  Clock is always 66Mhz. */
9293 	val = tr32(GRC_MISC_CFG);
9294 	val &= ~0xff;
9295 	val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
9296 	tw32(GRC_MISC_CFG, val);
9297 
9298 	/* Initialize MBUF/DESC pool. */
9299 	if (tg3_flag(tp, 5750_PLUS)) {
9300 		/* Do nothing.  */
9301 	} else if (tg3_asic_rev(tp) != ASIC_REV_5705) {
9302 		tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
9303 		if (tg3_asic_rev(tp) == ASIC_REV_5704)
9304 			tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
9305 		else
9306 			tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
9307 		tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
9308 		tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
9309 	} else if (tg3_flag(tp, TSO_CAPABLE)) {
9310 		int fw_len;
9311 
9312 		fw_len = tp->fw_len;
9313 		fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
9314 		tw32(BUFMGR_MB_POOL_ADDR,
9315 		     NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
9316 		tw32(BUFMGR_MB_POOL_SIZE,
9317 		     NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
9318 	}
9319 
9320 	if (tp->dev->mtu <= ETH_DATA_LEN) {
9321 		tw32(BUFMGR_MB_RDMA_LOW_WATER,
9322 		     tp->bufmgr_config.mbuf_read_dma_low_water);
9323 		tw32(BUFMGR_MB_MACRX_LOW_WATER,
9324 		     tp->bufmgr_config.mbuf_mac_rx_low_water);
9325 		tw32(BUFMGR_MB_HIGH_WATER,
9326 		     tp->bufmgr_config.mbuf_high_water);
9327 	} else {
9328 		tw32(BUFMGR_MB_RDMA_LOW_WATER,
9329 		     tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
9330 		tw32(BUFMGR_MB_MACRX_LOW_WATER,
9331 		     tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
9332 		tw32(BUFMGR_MB_HIGH_WATER,
9333 		     tp->bufmgr_config.mbuf_high_water_jumbo);
9334 	}
9335 	tw32(BUFMGR_DMA_LOW_WATER,
9336 	     tp->bufmgr_config.dma_low_water);
9337 	tw32(BUFMGR_DMA_HIGH_WATER,
9338 	     tp->bufmgr_config.dma_high_water);
9339 
9340 	val = BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE;
9341 	if (tg3_asic_rev(tp) == ASIC_REV_5719)
9342 		val |= BUFMGR_MODE_NO_TX_UNDERRUN;
9343 	if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
9344 	    tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
9345 	    tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0)
9346 		val |= BUFMGR_MODE_MBLOW_ATTN_ENAB;
9347 	tw32(BUFMGR_MODE, val);
9348 	for (i = 0; i < 2000; i++) {
9349 		if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
9350 			break;
9351 		udelay(10);
9352 	}
9353 	if (i >= 2000) {
9354 		netdev_err(tp->dev, "%s cannot enable BUFMGR\n", __func__);
9355 		return -ENODEV;
9356 	}
9357 
9358 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5906_A1)
9359 		tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
9360 
9361 	tg3_setup_rxbd_thresholds(tp);
9362 
9363 	/* Initialize TG3_BDINFO's at:
9364 	 *  RCVDBDI_STD_BD:	standard eth size rx ring
9365 	 *  RCVDBDI_JUMBO_BD:	jumbo frame rx ring
9366 	 *  RCVDBDI_MINI_BD:	small frame rx ring (??? does not work)
9367 	 *
9368 	 * like so:
9369 	 *  TG3_BDINFO_HOST_ADDR:	high/low parts of DMA address of ring
9370 	 *  TG3_BDINFO_MAXLEN_FLAGS:	(rx max buffer size << 16) |
9371 	 *                              ring attribute flags
9372 	 *  TG3_BDINFO_NIC_ADDR:	location of descriptors in nic SRAM
9373 	 *
9374 	 * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
9375 	 * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
9376 	 *
9377 	 * The size of each ring is fixed in the firmware, but the location is
9378 	 * configurable.
9379 	 */
9380 	tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
9381 	     ((u64) tpr->rx_std_mapping >> 32));
9382 	tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
9383 	     ((u64) tpr->rx_std_mapping & 0xffffffff));
9384 	if (!tg3_flag(tp, 5717_PLUS))
9385 		tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
9386 		     NIC_SRAM_RX_BUFFER_DESC);
9387 
9388 	/* Disable the mini ring */
9389 	if (!tg3_flag(tp, 5705_PLUS))
9390 		tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
9391 		     BDINFO_FLAGS_DISABLED);
9392 
9393 	/* Program the jumbo buffer descriptor ring control
9394 	 * blocks on those devices that have them.
9395 	 */
9396 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
9397 	    (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))) {
9398 
9399 		if (tg3_flag(tp, JUMBO_RING_ENABLE)) {
9400 			tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
9401 			     ((u64) tpr->rx_jmb_mapping >> 32));
9402 			tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
9403 			     ((u64) tpr->rx_jmb_mapping & 0xffffffff));
9404 			val = TG3_RX_JMB_RING_SIZE(tp) <<
9405 			      BDINFO_FLAGS_MAXLEN_SHIFT;
9406 			tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
9407 			     val | BDINFO_FLAGS_USE_EXT_RECV);
9408 			if (!tg3_flag(tp, USE_JUMBO_BDFLAG) ||
9409 			    tg3_flag(tp, 57765_CLASS) ||
9410 			    tg3_asic_rev(tp) == ASIC_REV_5762)
9411 				tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
9412 				     NIC_SRAM_RX_JUMBO_BUFFER_DESC);
9413 		} else {
9414 			tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
9415 			     BDINFO_FLAGS_DISABLED);
9416 		}
9417 
9418 		if (tg3_flag(tp, 57765_PLUS)) {
9419 			val = TG3_RX_STD_RING_SIZE(tp);
9420 			val <<= BDINFO_FLAGS_MAXLEN_SHIFT;
9421 			val |= (TG3_RX_STD_DMA_SZ << 2);
9422 		} else
9423 			val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT;
9424 	} else
9425 		val = TG3_RX_STD_MAX_SIZE_5700 << BDINFO_FLAGS_MAXLEN_SHIFT;
9426 
9427 	tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val);
9428 
9429 	tpr->rx_std_prod_idx = tp->rx_pending;
9430 	tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx);
9431 
9432 	tpr->rx_jmb_prod_idx =
9433 		tg3_flag(tp, JUMBO_RING_ENABLE) ? tp->rx_jumbo_pending : 0;
9434 	tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx);
9435 
9436 	tg3_rings_reset(tp);
9437 
9438 	/* Initialize MAC address and backoff seed. */
9439 	__tg3_set_mac_addr(tp, 0);
9440 
9441 	/* MTU + ethernet header + FCS + optional VLAN tag */
9442 	tw32(MAC_RX_MTU_SIZE,
9443 	     tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
9444 
9445 	/* The slot time is changed by tg3_setup_phy if we
9446 	 * run at gigabit with half duplex.
9447 	 */
9448 	val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
9449 	      (6 << TX_LENGTHS_IPG_SHIFT) |
9450 	      (32 << TX_LENGTHS_SLOT_TIME_SHIFT);
9451 
9452 	if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
9453 	    tg3_asic_rev(tp) == ASIC_REV_5762)
9454 		val |= tr32(MAC_TX_LENGTHS) &
9455 		       (TX_LENGTHS_JMB_FRM_LEN_MSK |
9456 			TX_LENGTHS_CNT_DWN_VAL_MSK);
9457 
9458 	tw32(MAC_TX_LENGTHS, val);
9459 
9460 	/* Receive rules. */
9461 	tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
9462 	tw32(RCVLPC_CONFIG, 0x0181);
9463 
9464 	/* Calculate RDMAC_MODE setting early, we need it to determine
9465 	 * the RCVLPC_STATE_ENABLE mask.
9466 	 */
9467 	rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
9468 		      RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
9469 		      RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
9470 		      RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
9471 		      RDMAC_MODE_LNGREAD_ENAB);
9472 
9473 	if (tg3_asic_rev(tp) == ASIC_REV_5717)
9474 		rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS;
9475 
9476 	if (tg3_asic_rev(tp) == ASIC_REV_5784 ||
9477 	    tg3_asic_rev(tp) == ASIC_REV_5785 ||
9478 	    tg3_asic_rev(tp) == ASIC_REV_57780)
9479 		rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
9480 			      RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
9481 			      RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
9482 
9483 	if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
9484 	    tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
9485 		if (tg3_flag(tp, TSO_CAPABLE) &&
9486 		    tg3_asic_rev(tp) == ASIC_REV_5705) {
9487 			rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
9488 		} else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
9489 			   !tg3_flag(tp, IS_5788)) {
9490 			rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
9491 		}
9492 	}
9493 
9494 	if (tg3_flag(tp, PCI_EXPRESS))
9495 		rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
9496 
9497 	if (tg3_asic_rev(tp) == ASIC_REV_57766) {
9498 		tp->dma_limit = 0;
9499 		if (tp->dev->mtu <= ETH_DATA_LEN) {
9500 			rdmac_mode |= RDMAC_MODE_JMB_2K_MMRR;
9501 			tp->dma_limit = TG3_TX_BD_DMA_MAX_2K;
9502 		}
9503 	}
9504 
9505 	if (tg3_flag(tp, HW_TSO_1) ||
9506 	    tg3_flag(tp, HW_TSO_2) ||
9507 	    tg3_flag(tp, HW_TSO_3))
9508 		rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN;
9509 
9510 	if (tg3_flag(tp, 57765_PLUS) ||
9511 	    tg3_asic_rev(tp) == ASIC_REV_5785 ||
9512 	    tg3_asic_rev(tp) == ASIC_REV_57780)
9513 		rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;
9514 
9515 	if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
9516 	    tg3_asic_rev(tp) == ASIC_REV_5762)
9517 		rdmac_mode |= tr32(RDMAC_MODE) & RDMAC_MODE_H2BNC_VLAN_DET;
9518 
9519 	if (tg3_asic_rev(tp) == ASIC_REV_5761 ||
9520 	    tg3_asic_rev(tp) == ASIC_REV_5784 ||
9521 	    tg3_asic_rev(tp) == ASIC_REV_5785 ||
9522 	    tg3_asic_rev(tp) == ASIC_REV_57780 ||
9523 	    tg3_flag(tp, 57765_PLUS)) {
9524 		u32 tgtreg;
9525 
9526 		if (tg3_asic_rev(tp) == ASIC_REV_5762)
9527 			tgtreg = TG3_RDMA_RSRVCTRL_REG2;
9528 		else
9529 			tgtreg = TG3_RDMA_RSRVCTRL_REG;
9530 
9531 		val = tr32(tgtreg);
9532 		if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
9533 		    tg3_asic_rev(tp) == ASIC_REV_5762) {
9534 			val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK |
9535 				 TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK |
9536 				 TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK);
9537 			val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B |
9538 			       TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
9539 			       TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K;
9540 		}
9541 		tw32(tgtreg, val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
9542 	}
9543 
9544 	if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
9545 	    tg3_asic_rev(tp) == ASIC_REV_5720 ||
9546 	    tg3_asic_rev(tp) == ASIC_REV_5762) {
9547 		u32 tgtreg;
9548 
9549 		if (tg3_asic_rev(tp) == ASIC_REV_5762)
9550 			tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL2;
9551 		else
9552 			tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL;
9553 
9554 		val = tr32(tgtreg);
9555 		tw32(tgtreg, val |
9556 		     TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K |
9557 		     TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K);
9558 	}
9559 
9560 	/* Receive/send statistics. */
9561 	if (tg3_flag(tp, 5750_PLUS)) {
9562 		val = tr32(RCVLPC_STATS_ENABLE);
9563 		val &= ~RCVLPC_STATSENAB_DACK_FIX;
9564 		tw32(RCVLPC_STATS_ENABLE, val);
9565 	} else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
9566 		   tg3_flag(tp, TSO_CAPABLE)) {
9567 		val = tr32(RCVLPC_STATS_ENABLE);
9568 		val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
9569 		tw32(RCVLPC_STATS_ENABLE, val);
9570 	} else {
9571 		tw32(RCVLPC_STATS_ENABLE, 0xffffff);
9572 	}
9573 	tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
9574 	tw32(SNDDATAI_STATSENAB, 0xffffff);
9575 	tw32(SNDDATAI_STATSCTRL,
9576 	     (SNDDATAI_SCTRL_ENABLE |
9577 	      SNDDATAI_SCTRL_FASTUPD));
9578 
9579 	/* Setup host coalescing engine. */
9580 	tw32(HOSTCC_MODE, 0);
9581 	for (i = 0; i < 2000; i++) {
9582 		if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
9583 			break;
9584 		udelay(10);
9585 	}
9586 
9587 	__tg3_set_coalesce(tp, &tp->coal);
9588 
9589 	if (!tg3_flag(tp, 5705_PLUS)) {
9590 		/* Status/statistics block address.  See tg3_timer,
9591 		 * the tg3_periodic_fetch_stats call there, and
9592 		 * tg3_get_stats to see how this works for 5705/5750 chips.
9593 		 */
9594 		tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
9595 		     ((u64) tp->stats_mapping >> 32));
9596 		tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
9597 		     ((u64) tp->stats_mapping & 0xffffffff));
9598 		tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
9599 
9600 		tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
9601 
9602 		/* Clear statistics and status block memory areas */
9603 		for (i = NIC_SRAM_STATS_BLK;
9604 		     i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
9605 		     i += sizeof(u32)) {
9606 			tg3_write_mem(tp, i, 0);
9607 			udelay(40);
9608 		}
9609 	}
9610 
9611 	tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
9612 
9613 	tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
9614 	tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
9615 	if (!tg3_flag(tp, 5705_PLUS))
9616 		tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
9617 
9618 	if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
9619 		tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
9620 		/* reset to prevent losing 1st rx packet intermittently */
9621 		tw32_f(MAC_RX_MODE, RX_MODE_RESET);
9622 		udelay(10);
9623 	}
9624 
9625 	tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
9626 			MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE |
9627 			MAC_MODE_FHDE_ENABLE;
9628 	if (tg3_flag(tp, ENABLE_APE))
9629 		tp->mac_mode |= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
9630 	if (!tg3_flag(tp, 5705_PLUS) &&
9631 	    !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
9632 	    tg3_asic_rev(tp) != ASIC_REV_5700)
9633 		tp->mac_mode |= MAC_MODE_LINK_POLARITY;
9634 	tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
9635 	udelay(40);
9636 
9637 	/* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
9638 	 * If TG3_FLAG_IS_NIC is zero, we should read the
9639 	 * register to preserve the GPIO settings for LOMs. The GPIOs,
9640 	 * whether used as inputs or outputs, are set by boot code after
9641 	 * reset.
9642 	 */
9643 	if (!tg3_flag(tp, IS_NIC)) {
9644 		u32 gpio_mask;
9645 
9646 		gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
9647 			    GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
9648 			    GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
9649 
9650 		if (tg3_asic_rev(tp) == ASIC_REV_5752)
9651 			gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
9652 				     GRC_LCLCTRL_GPIO_OUTPUT3;
9653 
9654 		if (tg3_asic_rev(tp) == ASIC_REV_5755)
9655 			gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
9656 
9657 		tp->grc_local_ctrl &= ~gpio_mask;
9658 		tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
9659 
9660 		/* GPIO1 must be driven high for eeprom write protect */
9661 		if (tg3_flag(tp, EEPROM_WRITE_PROT))
9662 			tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
9663 					       GRC_LCLCTRL_GPIO_OUTPUT1);
9664 	}
9665 	tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
9666 	udelay(100);
9667 
9668 	if (tg3_flag(tp, USING_MSIX)) {
9669 		val = tr32(MSGINT_MODE);
9670 		val |= MSGINT_MODE_ENABLE;
9671 		if (tp->irq_cnt > 1)
9672 			val |= MSGINT_MODE_MULTIVEC_EN;
9673 		if (!tg3_flag(tp, 1SHOT_MSI))
9674 			val |= MSGINT_MODE_ONE_SHOT_DISABLE;
9675 		tw32(MSGINT_MODE, val);
9676 	}
9677 
9678 	if (!tg3_flag(tp, 5705_PLUS)) {
9679 		tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
9680 		udelay(40);
9681 	}
9682 
9683 	val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
9684 	       WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
9685 	       WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
9686 	       WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
9687 	       WDMAC_MODE_LNGREAD_ENAB);
9688 
9689 	if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
9690 	    tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
9691 		if (tg3_flag(tp, TSO_CAPABLE) &&
9692 		    (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 ||
9693 		     tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A2)) {
9694 			/* nothing */
9695 		} else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
9696 			   !tg3_flag(tp, IS_5788)) {
9697 			val |= WDMAC_MODE_RX_ACCEL;
9698 		}
9699 	}
9700 
9701 	/* Enable host coalescing bug fix */
9702 	if (tg3_flag(tp, 5755_PLUS))
9703 		val |= WDMAC_MODE_STATUS_TAG_FIX;
9704 
9705 	if (tg3_asic_rev(tp) == ASIC_REV_5785)
9706 		val |= WDMAC_MODE_BURST_ALL_DATA;
9707 
9708 	tw32_f(WDMAC_MODE, val);
9709 	udelay(40);
9710 
9711 	if (tg3_flag(tp, PCIX_MODE)) {
9712 		u16 pcix_cmd;
9713 
9714 		pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
9715 				     &pcix_cmd);
9716 		if (tg3_asic_rev(tp) == ASIC_REV_5703) {
9717 			pcix_cmd &= ~PCI_X_CMD_MAX_READ;
9718 			pcix_cmd |= PCI_X_CMD_READ_2K;
9719 		} else if (tg3_asic_rev(tp) == ASIC_REV_5704) {
9720 			pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
9721 			pcix_cmd |= PCI_X_CMD_READ_2K;
9722 		}
9723 		pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
9724 				      pcix_cmd);
9725 	}
9726 
9727 	tw32_f(RDMAC_MODE, rdmac_mode);
9728 	udelay(40);
9729 
9730 	if (tg3_asic_rev(tp) == ASIC_REV_5719) {
9731 		for (i = 0; i < TG3_NUM_RDMA_CHANNELS; i++) {
9732 			if (tr32(TG3_RDMA_LENGTH + (i << 2)) > TG3_MAX_MTU(tp))
9733 				break;
9734 		}
9735 		if (i < TG3_NUM_RDMA_CHANNELS) {
9736 			val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
9737 			val |= TG3_LSO_RD_DMA_TX_LENGTH_WA;
9738 			tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
9739 			tg3_flag_set(tp, 5719_RDMA_BUG);
9740 		}
9741 	}
9742 
9743 	tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
9744 	if (!tg3_flag(tp, 5705_PLUS))
9745 		tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
9746 
9747 	if (tg3_asic_rev(tp) == ASIC_REV_5761)
9748 		tw32(SNDDATAC_MODE,
9749 		     SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
9750 	else
9751 		tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
9752 
9753 	tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
9754 	tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
9755 	val = RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ;
9756 	if (tg3_flag(tp, LRG_PROD_RING_CAP))
9757 		val |= RCVDBDI_MODE_LRG_RING_SZ;
9758 	tw32(RCVDBDI_MODE, val);
9759 	tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
9760 	if (tg3_flag(tp, HW_TSO_1) ||
9761 	    tg3_flag(tp, HW_TSO_2) ||
9762 	    tg3_flag(tp, HW_TSO_3))
9763 		tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
9764 	val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE;
9765 	if (tg3_flag(tp, ENABLE_TSS))
9766 		val |= SNDBDI_MODE_MULTI_TXQ_EN;
9767 	tw32(SNDBDI_MODE, val);
9768 	tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
9769 
9770 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) {
9771 		err = tg3_load_5701_a0_firmware_fix(tp);
9772 		if (err)
9773 			return err;
9774 	}
9775 
9776 	if (tg3_flag(tp, TSO_CAPABLE)) {
9777 		err = tg3_load_tso_firmware(tp);
9778 		if (err)
9779 			return err;
9780 	}
9781 
9782 	tp->tx_mode = TX_MODE_ENABLE;
9783 
9784 	if (tg3_flag(tp, 5755_PLUS) ||
9785 	    tg3_asic_rev(tp) == ASIC_REV_5906)
9786 		tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX;
9787 
9788 	if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
9789 	    tg3_asic_rev(tp) == ASIC_REV_5762) {
9790 		val = TX_MODE_JMB_FRM_LEN | TX_MODE_CNT_DN_MODE;
9791 		tp->tx_mode &= ~val;
9792 		tp->tx_mode |= tr32(MAC_TX_MODE) & val;
9793 	}
9794 
9795 	tw32_f(MAC_TX_MODE, tp->tx_mode);
9796 	udelay(100);
9797 
9798 	if (tg3_flag(tp, ENABLE_RSS)) {
9799 		tg3_rss_write_indir_tbl(tp);
9800 
9801 		/* Setup the "secret" hash key. */
9802 		tw32(MAC_RSS_HASH_KEY_0, 0x5f865437);
9803 		tw32(MAC_RSS_HASH_KEY_1, 0xe4ac62cc);
9804 		tw32(MAC_RSS_HASH_KEY_2, 0x50103a45);
9805 		tw32(MAC_RSS_HASH_KEY_3, 0x36621985);
9806 		tw32(MAC_RSS_HASH_KEY_4, 0xbf14c0e8);
9807 		tw32(MAC_RSS_HASH_KEY_5, 0x1bc27a1e);
9808 		tw32(MAC_RSS_HASH_KEY_6, 0x84f4b556);
9809 		tw32(MAC_RSS_HASH_KEY_7, 0x094ea6fe);
9810 		tw32(MAC_RSS_HASH_KEY_8, 0x7dda01e7);
9811 		tw32(MAC_RSS_HASH_KEY_9, 0xc04d7481);
9812 	}
9813 
9814 	tp->rx_mode = RX_MODE_ENABLE;
9815 	if (tg3_flag(tp, 5755_PLUS))
9816 		tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
9817 
9818 	if (tg3_flag(tp, ENABLE_RSS))
9819 		tp->rx_mode |= RX_MODE_RSS_ENABLE |
9820 			       RX_MODE_RSS_ITBL_HASH_BITS_7 |
9821 			       RX_MODE_RSS_IPV6_HASH_EN |
9822 			       RX_MODE_RSS_TCP_IPV6_HASH_EN |
9823 			       RX_MODE_RSS_IPV4_HASH_EN |
9824 			       RX_MODE_RSS_TCP_IPV4_HASH_EN;
9825 
9826 	tw32_f(MAC_RX_MODE, tp->rx_mode);
9827 	udelay(10);
9828 
9829 	tw32(MAC_LED_CTRL, tp->led_ctrl);
9830 
9831 	tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
9832 	if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
9833 		tw32_f(MAC_RX_MODE, RX_MODE_RESET);
9834 		udelay(10);
9835 	}
9836 	tw32_f(MAC_RX_MODE, tp->rx_mode);
9837 	udelay(10);
9838 
9839 	if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
9840 		if ((tg3_asic_rev(tp) == ASIC_REV_5704) &&
9841 		    !(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) {
9842 			/* Set drive transmission level to 1.2V  */
9843 			/* only if the signal pre-emphasis bit is not set  */
9844 			val = tr32(MAC_SERDES_CFG);
9845 			val &= 0xfffff000;
9846 			val |= 0x880;
9847 			tw32(MAC_SERDES_CFG, val);
9848 		}
9849 		if (tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A1)
9850 			tw32(MAC_SERDES_CFG, 0x616000);
9851 	}
9852 
9853 	/* Prevent chip from dropping frames when flow control
9854 	 * is enabled.
9855 	 */
9856 	if (tg3_flag(tp, 57765_CLASS))
9857 		val = 1;
9858 	else
9859 		val = 2;
9860 	tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val);
9861 
9862 	if (tg3_asic_rev(tp) == ASIC_REV_5704 &&
9863 	    (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
9864 		/* Use hardware link auto-negotiation */
9865 		tg3_flag_set(tp, HW_AUTONEG);
9866 	}
9867 
9868 	if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
9869 	    tg3_asic_rev(tp) == ASIC_REV_5714) {
9870 		u32 tmp;
9871 
9872 		tmp = tr32(SERDES_RX_CTRL);
9873 		tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
9874 		tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
9875 		tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
9876 		tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
9877 	}
9878 
9879 	if (!tg3_flag(tp, USE_PHYLIB)) {
9880 		if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
9881 			tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
9882 
9883 		err = tg3_setup_phy(tp, 0);
9884 		if (err)
9885 			return err;
9886 
9887 		if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
9888 		    !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
9889 			u32 tmp;
9890 
9891 			/* Clear CRC stats. */
9892 			if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
9893 				tg3_writephy(tp, MII_TG3_TEST1,
9894 					     tmp | MII_TG3_TEST1_CRC_EN);
9895 				tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &tmp);
9896 			}
9897 		}
9898 	}
9899 
9900 	__tg3_set_rx_mode(tp->dev);
9901 
9902 	/* Initialize receive rules. */
9903 	tw32(MAC_RCV_RULE_0,  0xc2000000 & RCV_RULE_DISABLE_MASK);
9904 	tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
9905 	tw32(MAC_RCV_RULE_1,  0x86000004 & RCV_RULE_DISABLE_MASK);
9906 	tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
9907 
9908 	if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS))
9909 		limit = 8;
9910 	else
9911 		limit = 16;
9912 	if (tg3_flag(tp, ENABLE_ASF))
9913 		limit -= 4;
9914 	switch (limit) {
9915 	case 16:
9916 		tw32(MAC_RCV_RULE_15,  0); tw32(MAC_RCV_VALUE_15,  0);
9917 	case 15:
9918 		tw32(MAC_RCV_RULE_14,  0); tw32(MAC_RCV_VALUE_14,  0);
9919 	case 14:
9920 		tw32(MAC_RCV_RULE_13,  0); tw32(MAC_RCV_VALUE_13,  0);
9921 	case 13:
9922 		tw32(MAC_RCV_RULE_12,  0); tw32(MAC_RCV_VALUE_12,  0);
9923 	case 12:
9924 		tw32(MAC_RCV_RULE_11,  0); tw32(MAC_RCV_VALUE_11,  0);
9925 	case 11:
9926 		tw32(MAC_RCV_RULE_10,  0); tw32(MAC_RCV_VALUE_10,  0);
9927 	case 10:
9928 		tw32(MAC_RCV_RULE_9,  0); tw32(MAC_RCV_VALUE_9,  0);
9929 	case 9:
9930 		tw32(MAC_RCV_RULE_8,  0); tw32(MAC_RCV_VALUE_8,  0);
9931 	case 8:
9932 		tw32(MAC_RCV_RULE_7,  0); tw32(MAC_RCV_VALUE_7,  0);
9933 	case 7:
9934 		tw32(MAC_RCV_RULE_6,  0); tw32(MAC_RCV_VALUE_6,  0);
9935 	case 6:
9936 		tw32(MAC_RCV_RULE_5,  0); tw32(MAC_RCV_VALUE_5,  0);
9937 	case 5:
9938 		tw32(MAC_RCV_RULE_4,  0); tw32(MAC_RCV_VALUE_4,  0);
9939 	case 4:
9940 		/* tw32(MAC_RCV_RULE_3,  0); tw32(MAC_RCV_VALUE_3,  0); */
9941 	case 3:
9942 		/* tw32(MAC_RCV_RULE_2,  0); tw32(MAC_RCV_VALUE_2,  0); */
9943 	case 2:
9944 	case 1:
9945 
9946 	default:
9947 		break;
9948 	}
9949 
9950 	if (tg3_flag(tp, ENABLE_APE))
9951 		/* Write our heartbeat update interval to APE. */
9952 		tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
9953 				APE_HOST_HEARTBEAT_INT_DISABLE);
9954 
9955 	tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
9956 
9957 	return 0;
9958 }
9959 
9960 /* Called at device open time to get the chip ready for
9961  * packet processing.  Invoked with tp->lock held.
9962  */
9963 static int tg3_init_hw(struct tg3 *tp, int reset_phy)
9964 {
9965 	tg3_switch_clocks(tp);
9966 
9967 	tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
9968 
9969 	return tg3_reset_hw(tp, reset_phy);
9970 }
9971 
9972 static void tg3_sd_scan_scratchpad(struct tg3 *tp, struct tg3_ocir *ocir)
9973 {
9974 	int i;
9975 
9976 	for (i = 0; i < TG3_SD_NUM_RECS; i++, ocir++) {
9977 		u32 off = i * TG3_OCIR_LEN, len = TG3_OCIR_LEN;
9978 
9979 		tg3_ape_scratchpad_read(tp, (u32 *) ocir, off, len);
9980 		off += len;
9981 
9982 		if (ocir->signature != TG3_OCIR_SIG_MAGIC ||
9983 		    !(ocir->version_flags & TG3_OCIR_FLAG_ACTIVE))
9984 			memset(ocir, 0, TG3_OCIR_LEN);
9985 	}
9986 }
9987 
9988 /* sysfs attributes for hwmon */
9989 static ssize_t tg3_show_temp(struct device *dev,
9990 			     struct device_attribute *devattr, char *buf)
9991 {
9992 	struct pci_dev *pdev = to_pci_dev(dev);
9993 	struct net_device *netdev = pci_get_drvdata(pdev);
9994 	struct tg3 *tp = netdev_priv(netdev);
9995 	struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
9996 	u32 temperature;
9997 
9998 	spin_lock_bh(&tp->lock);
9999 	tg3_ape_scratchpad_read(tp, &temperature, attr->index,
10000 				sizeof(temperature));
10001 	spin_unlock_bh(&tp->lock);
10002 	return sprintf(buf, "%u\n", temperature);
10003 }
10004 
10005 
10006 static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, tg3_show_temp, NULL,
10007 			  TG3_TEMP_SENSOR_OFFSET);
10008 static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, tg3_show_temp, NULL,
10009 			  TG3_TEMP_CAUTION_OFFSET);
10010 static SENSOR_DEVICE_ATTR(temp1_max, S_IRUGO, tg3_show_temp, NULL,
10011 			  TG3_TEMP_MAX_OFFSET);
10012 
10013 static struct attribute *tg3_attributes[] = {
10014 	&sensor_dev_attr_temp1_input.dev_attr.attr,
10015 	&sensor_dev_attr_temp1_crit.dev_attr.attr,
10016 	&sensor_dev_attr_temp1_max.dev_attr.attr,
10017 	NULL
10018 };
10019 
10020 static const struct attribute_group tg3_group = {
10021 	.attrs = tg3_attributes,
10022 };
10023 
10024 static void tg3_hwmon_close(struct tg3 *tp)
10025 {
10026 	if (tp->hwmon_dev) {
10027 		hwmon_device_unregister(tp->hwmon_dev);
10028 		tp->hwmon_dev = NULL;
10029 		sysfs_remove_group(&tp->pdev->dev.kobj, &tg3_group);
10030 	}
10031 }
10032 
10033 static void tg3_hwmon_open(struct tg3 *tp)
10034 {
10035 	int i, err;
10036 	u32 size = 0;
10037 	struct pci_dev *pdev = tp->pdev;
10038 	struct tg3_ocir ocirs[TG3_SD_NUM_RECS];
10039 
10040 	tg3_sd_scan_scratchpad(tp, ocirs);
10041 
10042 	for (i = 0; i < TG3_SD_NUM_RECS; i++) {
10043 		if (!ocirs[i].src_data_length)
10044 			continue;
10045 
10046 		size += ocirs[i].src_hdr_length;
10047 		size += ocirs[i].src_data_length;
10048 	}
10049 
10050 	if (!size)
10051 		return;
10052 
10053 	/* Register hwmon sysfs hooks */
10054 	err = sysfs_create_group(&pdev->dev.kobj, &tg3_group);
10055 	if (err) {
10056 		dev_err(&pdev->dev, "Cannot create sysfs group, aborting\n");
10057 		return;
10058 	}
10059 
10060 	tp->hwmon_dev = hwmon_device_register(&pdev->dev);
10061 	if (IS_ERR(tp->hwmon_dev)) {
10062 		tp->hwmon_dev = NULL;
10063 		dev_err(&pdev->dev, "Cannot register hwmon device, aborting\n");
10064 		sysfs_remove_group(&pdev->dev.kobj, &tg3_group);
10065 	}
10066 }
10067 
10068 
10069 #define TG3_STAT_ADD32(PSTAT, REG) \
10070 do {	u32 __val = tr32(REG); \
10071 	(PSTAT)->low += __val; \
10072 	if ((PSTAT)->low < __val) \
10073 		(PSTAT)->high += 1; \
10074 } while (0)
10075 
10076 static void tg3_periodic_fetch_stats(struct tg3 *tp)
10077 {
10078 	struct tg3_hw_stats *sp = tp->hw_stats;
10079 
10080 	if (!tp->link_up)
10081 		return;
10082 
10083 	TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
10084 	TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
10085 	TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
10086 	TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
10087 	TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
10088 	TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
10089 	TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
10090 	TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
10091 	TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
10092 	TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
10093 	TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
10094 	TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
10095 	TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
10096 	if (unlikely(tg3_flag(tp, 5719_RDMA_BUG) &&
10097 		     (sp->tx_ucast_packets.low + sp->tx_mcast_packets.low +
10098 		      sp->tx_bcast_packets.low) > TG3_NUM_RDMA_CHANNELS)) {
10099 		u32 val;
10100 
10101 		val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
10102 		val &= ~TG3_LSO_RD_DMA_TX_LENGTH_WA;
10103 		tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
10104 		tg3_flag_clear(tp, 5719_RDMA_BUG);
10105 	}
10106 
10107 	TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
10108 	TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
10109 	TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
10110 	TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
10111 	TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
10112 	TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
10113 	TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
10114 	TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
10115 	TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
10116 	TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
10117 	TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
10118 	TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
10119 	TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
10120 	TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
10121 
10122 	TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
10123 	if (tg3_asic_rev(tp) != ASIC_REV_5717 &&
10124 	    tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0 &&
10125 	    tg3_chip_rev_id(tp) != CHIPREV_ID_5720_A0) {
10126 		TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
10127 	} else {
10128 		u32 val = tr32(HOSTCC_FLOW_ATTN);
10129 		val = (val & HOSTCC_FLOW_ATTN_MBUF_LWM) ? 1 : 0;
10130 		if (val) {
10131 			tw32(HOSTCC_FLOW_ATTN, HOSTCC_FLOW_ATTN_MBUF_LWM);
10132 			sp->rx_discards.low += val;
10133 			if (sp->rx_discards.low < val)
10134 				sp->rx_discards.high += 1;
10135 		}
10136 		sp->mbuf_lwm_thresh_hit = sp->rx_discards;
10137 	}
10138 	TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
10139 }
10140 
10141 static void tg3_chk_missed_msi(struct tg3 *tp)
10142 {
10143 	u32 i;
10144 
10145 	for (i = 0; i < tp->irq_cnt; i++) {
10146 		struct tg3_napi *tnapi = &tp->napi[i];
10147 
10148 		if (tg3_has_work(tnapi)) {
10149 			if (tnapi->last_rx_cons == tnapi->rx_rcb_ptr &&
10150 			    tnapi->last_tx_cons == tnapi->tx_cons) {
10151 				if (tnapi->chk_msi_cnt < 1) {
10152 					tnapi->chk_msi_cnt++;
10153 					return;
10154 				}
10155 				tg3_msi(0, tnapi);
10156 			}
10157 		}
10158 		tnapi->chk_msi_cnt = 0;
10159 		tnapi->last_rx_cons = tnapi->rx_rcb_ptr;
10160 		tnapi->last_tx_cons = tnapi->tx_cons;
10161 	}
10162 }
10163 
10164 static void tg3_timer(unsigned long __opaque)
10165 {
10166 	struct tg3 *tp = (struct tg3 *) __opaque;
10167 
10168 	if (tp->irq_sync || tg3_flag(tp, RESET_TASK_PENDING))
10169 		goto restart_timer;
10170 
10171 	spin_lock(&tp->lock);
10172 
10173 	if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
10174 	    tg3_flag(tp, 57765_CLASS))
10175 		tg3_chk_missed_msi(tp);
10176 
10177 	if (tg3_flag(tp, FLUSH_POSTED_WRITES)) {
10178 		/* BCM4785: Flush posted writes from GbE to host memory. */
10179 		tr32(HOSTCC_MODE);
10180 	}
10181 
10182 	if (!tg3_flag(tp, TAGGED_STATUS)) {
10183 		/* All of this garbage is because when using non-tagged
10184 		 * IRQ status the mailbox/status_block protocol the chip
10185 		 * uses with the cpu is race prone.
10186 		 */
10187 		if (tp->napi[0].hw_status->status & SD_STATUS_UPDATED) {
10188 			tw32(GRC_LOCAL_CTRL,
10189 			     tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
10190 		} else {
10191 			tw32(HOSTCC_MODE, tp->coalesce_mode |
10192 			     HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW);
10193 		}
10194 
10195 		if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
10196 			spin_unlock(&tp->lock);
10197 			tg3_reset_task_schedule(tp);
10198 			goto restart_timer;
10199 		}
10200 	}
10201 
10202 	/* This part only runs once per second. */
10203 	if (!--tp->timer_counter) {
10204 		if (tg3_flag(tp, 5705_PLUS))
10205 			tg3_periodic_fetch_stats(tp);
10206 
10207 		if (tp->setlpicnt && !--tp->setlpicnt)
10208 			tg3_phy_eee_enable(tp);
10209 
10210 		if (tg3_flag(tp, USE_LINKCHG_REG)) {
10211 			u32 mac_stat;
10212 			int phy_event;
10213 
10214 			mac_stat = tr32(MAC_STATUS);
10215 
10216 			phy_event = 0;
10217 			if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) {
10218 				if (mac_stat & MAC_STATUS_MI_INTERRUPT)
10219 					phy_event = 1;
10220 			} else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
10221 				phy_event = 1;
10222 
10223 			if (phy_event)
10224 				tg3_setup_phy(tp, 0);
10225 		} else if (tg3_flag(tp, POLL_SERDES)) {
10226 			u32 mac_stat = tr32(MAC_STATUS);
10227 			int need_setup = 0;
10228 
10229 			if (tp->link_up &&
10230 			    (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
10231 				need_setup = 1;
10232 			}
10233 			if (!tp->link_up &&
10234 			    (mac_stat & (MAC_STATUS_PCS_SYNCED |
10235 					 MAC_STATUS_SIGNAL_DET))) {
10236 				need_setup = 1;
10237 			}
10238 			if (need_setup) {
10239 				if (!tp->serdes_counter) {
10240 					tw32_f(MAC_MODE,
10241 					     (tp->mac_mode &
10242 					      ~MAC_MODE_PORT_MODE_MASK));
10243 					udelay(40);
10244 					tw32_f(MAC_MODE, tp->mac_mode);
10245 					udelay(40);
10246 				}
10247 				tg3_setup_phy(tp, 0);
10248 			}
10249 		} else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
10250 			   tg3_flag(tp, 5780_CLASS)) {
10251 			tg3_serdes_parallel_detect(tp);
10252 		}
10253 
10254 		tp->timer_counter = tp->timer_multiplier;
10255 	}
10256 
10257 	/* Heartbeat is only sent once every 2 seconds.
10258 	 *
10259 	 * The heartbeat is to tell the ASF firmware that the host
10260 	 * driver is still alive.  In the event that the OS crashes,
10261 	 * ASF needs to reset the hardware to free up the FIFO space
10262 	 * that may be filled with rx packets destined for the host.
10263 	 * If the FIFO is full, ASF will no longer function properly.
10264 	 *
10265 	 * Unintended resets have been reported on real time kernels
10266 	 * where the timer doesn't run on time.  Netpoll will also have
10267 	 * same problem.
10268 	 *
10269 	 * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
10270 	 * to check the ring condition when the heartbeat is expiring
10271 	 * before doing the reset.  This will prevent most unintended
10272 	 * resets.
10273 	 */
10274 	if (!--tp->asf_counter) {
10275 		if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
10276 			tg3_wait_for_event_ack(tp);
10277 
10278 			tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
10279 				      FWCMD_NICDRV_ALIVE3);
10280 			tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
10281 			tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX,
10282 				      TG3_FW_UPDATE_TIMEOUT_SEC);
10283 
10284 			tg3_generate_fw_event(tp);
10285 		}
10286 		tp->asf_counter = tp->asf_multiplier;
10287 	}
10288 
10289 	spin_unlock(&tp->lock);
10290 
10291 restart_timer:
10292 	tp->timer.expires = jiffies + tp->timer_offset;
10293 	add_timer(&tp->timer);
10294 }
10295 
10296 static void tg3_timer_init(struct tg3 *tp)
10297 {
10298 	if (tg3_flag(tp, TAGGED_STATUS) &&
10299 	    tg3_asic_rev(tp) != ASIC_REV_5717 &&
10300 	    !tg3_flag(tp, 57765_CLASS))
10301 		tp->timer_offset = HZ;
10302 	else
10303 		tp->timer_offset = HZ / 10;
10304 
10305 	BUG_ON(tp->timer_offset > HZ);
10306 
10307 	tp->timer_multiplier = (HZ / tp->timer_offset);
10308 	tp->asf_multiplier = (HZ / tp->timer_offset) *
10309 			     TG3_FW_UPDATE_FREQ_SEC;
10310 
10311 	init_timer(&tp->timer);
10312 	tp->timer.data = (unsigned long) tp;
10313 	tp->timer.function = tg3_timer;
10314 }
10315 
10316 static void tg3_timer_start(struct tg3 *tp)
10317 {
10318 	tp->asf_counter   = tp->asf_multiplier;
10319 	tp->timer_counter = tp->timer_multiplier;
10320 
10321 	tp->timer.expires = jiffies + tp->timer_offset;
10322 	add_timer(&tp->timer);
10323 }
10324 
10325 static void tg3_timer_stop(struct tg3 *tp)
10326 {
10327 	del_timer_sync(&tp->timer);
10328 }
10329 
10330 /* Restart hardware after configuration changes, self-test, etc.
10331  * Invoked with tp->lock held.
10332  */
10333 static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
10334 	__releases(tp->lock)
10335 	__acquires(tp->lock)
10336 {
10337 	int err;
10338 
10339 	err = tg3_init_hw(tp, reset_phy);
10340 	if (err) {
10341 		netdev_err(tp->dev,
10342 			   "Failed to re-initialize device, aborting\n");
10343 		tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10344 		tg3_full_unlock(tp);
10345 		tg3_timer_stop(tp);
10346 		tp->irq_sync = 0;
10347 		tg3_napi_enable(tp);
10348 		dev_close(tp->dev);
10349 		tg3_full_lock(tp, 0);
10350 	}
10351 	return err;
10352 }
10353 
10354 static void tg3_reset_task(struct work_struct *work)
10355 {
10356 	struct tg3 *tp = container_of(work, struct tg3, reset_task);
10357 	int err;
10358 
10359 	tg3_full_lock(tp, 0);
10360 
10361 	if (!netif_running(tp->dev)) {
10362 		tg3_flag_clear(tp, RESET_TASK_PENDING);
10363 		tg3_full_unlock(tp);
10364 		return;
10365 	}
10366 
10367 	tg3_full_unlock(tp);
10368 
10369 	tg3_phy_stop(tp);
10370 
10371 	tg3_netif_stop(tp);
10372 
10373 	tg3_full_lock(tp, 1);
10374 
10375 	if (tg3_flag(tp, TX_RECOVERY_PENDING)) {
10376 		tp->write32_tx_mbox = tg3_write32_tx_mbox;
10377 		tp->write32_rx_mbox = tg3_write_flush_reg32;
10378 		tg3_flag_set(tp, MBOX_WRITE_REORDER);
10379 		tg3_flag_clear(tp, TX_RECOVERY_PENDING);
10380 	}
10381 
10382 	tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
10383 	err = tg3_init_hw(tp, 1);
10384 	if (err)
10385 		goto out;
10386 
10387 	tg3_netif_start(tp);
10388 
10389 out:
10390 	tg3_full_unlock(tp);
10391 
10392 	if (!err)
10393 		tg3_phy_start(tp);
10394 
10395 	tg3_flag_clear(tp, RESET_TASK_PENDING);
10396 }
10397 
10398 static int tg3_request_irq(struct tg3 *tp, int irq_num)
10399 {
10400 	irq_handler_t fn;
10401 	unsigned long flags;
10402 	char *name;
10403 	struct tg3_napi *tnapi = &tp->napi[irq_num];
10404 
10405 	if (tp->irq_cnt == 1)
10406 		name = tp->dev->name;
10407 	else {
10408 		name = &tnapi->irq_lbl[0];
10409 		snprintf(name, IFNAMSIZ, "%s-%d", tp->dev->name, irq_num);
10410 		name[IFNAMSIZ-1] = 0;
10411 	}
10412 
10413 	if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
10414 		fn = tg3_msi;
10415 		if (tg3_flag(tp, 1SHOT_MSI))
10416 			fn = tg3_msi_1shot;
10417 		flags = 0;
10418 	} else {
10419 		fn = tg3_interrupt;
10420 		if (tg3_flag(tp, TAGGED_STATUS))
10421 			fn = tg3_interrupt_tagged;
10422 		flags = IRQF_SHARED;
10423 	}
10424 
10425 	return request_irq(tnapi->irq_vec, fn, flags, name, tnapi);
10426 }
10427 
10428 static int tg3_test_interrupt(struct tg3 *tp)
10429 {
10430 	struct tg3_napi *tnapi = &tp->napi[0];
10431 	struct net_device *dev = tp->dev;
10432 	int err, i, intr_ok = 0;
10433 	u32 val;
10434 
10435 	if (!netif_running(dev))
10436 		return -ENODEV;
10437 
10438 	tg3_disable_ints(tp);
10439 
10440 	free_irq(tnapi->irq_vec, tnapi);
10441 
10442 	/*
10443 	 * Turn off MSI one shot mode.  Otherwise this test has no
10444 	 * observable way to know whether the interrupt was delivered.
10445 	 */
10446 	if (tg3_flag(tp, 57765_PLUS)) {
10447 		val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE;
10448 		tw32(MSGINT_MODE, val);
10449 	}
10450 
10451 	err = request_irq(tnapi->irq_vec, tg3_test_isr,
10452 			  IRQF_SHARED, dev->name, tnapi);
10453 	if (err)
10454 		return err;
10455 
10456 	tnapi->hw_status->status &= ~SD_STATUS_UPDATED;
10457 	tg3_enable_ints(tp);
10458 
10459 	tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
10460 	       tnapi->coal_now);
10461 
10462 	for (i = 0; i < 5; i++) {
10463 		u32 int_mbox, misc_host_ctrl;
10464 
10465 		int_mbox = tr32_mailbox(tnapi->int_mbox);
10466 		misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
10467 
10468 		if ((int_mbox != 0) ||
10469 		    (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
10470 			intr_ok = 1;
10471 			break;
10472 		}
10473 
10474 		if (tg3_flag(tp, 57765_PLUS) &&
10475 		    tnapi->hw_status->status_tag != tnapi->last_tag)
10476 			tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
10477 
10478 		msleep(10);
10479 	}
10480 
10481 	tg3_disable_ints(tp);
10482 
10483 	free_irq(tnapi->irq_vec, tnapi);
10484 
10485 	err = tg3_request_irq(tp, 0);
10486 
10487 	if (err)
10488 		return err;
10489 
10490 	if (intr_ok) {
10491 		/* Reenable MSI one shot mode. */
10492 		if (tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, 1SHOT_MSI)) {
10493 			val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE;
10494 			tw32(MSGINT_MODE, val);
10495 		}
10496 		return 0;
10497 	}
10498 
10499 	return -EIO;
10500 }
10501 
10502 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
10503  * successfully restored
10504  */
10505 static int tg3_test_msi(struct tg3 *tp)
10506 {
10507 	int err;
10508 	u16 pci_cmd;
10509 
10510 	if (!tg3_flag(tp, USING_MSI))
10511 		return 0;
10512 
10513 	/* Turn off SERR reporting in case MSI terminates with Master
10514 	 * Abort.
10515 	 */
10516 	pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
10517 	pci_write_config_word(tp->pdev, PCI_COMMAND,
10518 			      pci_cmd & ~PCI_COMMAND_SERR);
10519 
10520 	err = tg3_test_interrupt(tp);
10521 
10522 	pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
10523 
10524 	if (!err)
10525 		return 0;
10526 
10527 	/* other failures */
10528 	if (err != -EIO)
10529 		return err;
10530 
10531 	/* MSI test failed, go back to INTx mode */
10532 	netdev_warn(tp->dev, "No interrupt was generated using MSI. Switching "
10533 		    "to INTx mode. Please report this failure to the PCI "
10534 		    "maintainer and include system chipset information\n");
10535 
10536 	free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
10537 
10538 	pci_disable_msi(tp->pdev);
10539 
10540 	tg3_flag_clear(tp, USING_MSI);
10541 	tp->napi[0].irq_vec = tp->pdev->irq;
10542 
10543 	err = tg3_request_irq(tp, 0);
10544 	if (err)
10545 		return err;
10546 
10547 	/* Need to reset the chip because the MSI cycle may have terminated
10548 	 * with Master Abort.
10549 	 */
10550 	tg3_full_lock(tp, 1);
10551 
10552 	tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10553 	err = tg3_init_hw(tp, 1);
10554 
10555 	tg3_full_unlock(tp);
10556 
10557 	if (err)
10558 		free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
10559 
10560 	return err;
10561 }
10562 
10563 static int tg3_request_firmware(struct tg3 *tp)
10564 {
10565 	const __be32 *fw_data;
10566 
10567 	if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) {
10568 		netdev_err(tp->dev, "Failed to load firmware \"%s\"\n",
10569 			   tp->fw_needed);
10570 		return -ENOENT;
10571 	}
10572 
10573 	fw_data = (void *)tp->fw->data;
10574 
10575 	/* Firmware blob starts with version numbers, followed by
10576 	 * start address and _full_ length including BSS sections
10577 	 * (which must be longer than the actual data, of course
10578 	 */
10579 
10580 	tp->fw_len = be32_to_cpu(fw_data[2]);	/* includes bss */
10581 	if (tp->fw_len < (tp->fw->size - 12)) {
10582 		netdev_err(tp->dev, "bogus length %d in \"%s\"\n",
10583 			   tp->fw_len, tp->fw_needed);
10584 		release_firmware(tp->fw);
10585 		tp->fw = NULL;
10586 		return -EINVAL;
10587 	}
10588 
10589 	/* We no longer need firmware; we have it. */
10590 	tp->fw_needed = NULL;
10591 	return 0;
10592 }
10593 
10594 static u32 tg3_irq_count(struct tg3 *tp)
10595 {
10596 	u32 irq_cnt = max(tp->rxq_cnt, tp->txq_cnt);
10597 
10598 	if (irq_cnt > 1) {
10599 		/* We want as many rx rings enabled as there are cpus.
10600 		 * In multiqueue MSI-X mode, the first MSI-X vector
10601 		 * only deals with link interrupts, etc, so we add
10602 		 * one to the number of vectors we are requesting.
10603 		 */
10604 		irq_cnt = min_t(unsigned, irq_cnt + 1, tp->irq_max);
10605 	}
10606 
10607 	return irq_cnt;
10608 }
10609 
10610 static bool tg3_enable_msix(struct tg3 *tp)
10611 {
10612 	int i, rc;
10613 	struct msix_entry msix_ent[TG3_IRQ_MAX_VECS];
10614 
10615 	tp->txq_cnt = tp->txq_req;
10616 	tp->rxq_cnt = tp->rxq_req;
10617 	if (!tp->rxq_cnt)
10618 		tp->rxq_cnt = netif_get_num_default_rss_queues();
10619 	if (tp->rxq_cnt > tp->rxq_max)
10620 		tp->rxq_cnt = tp->rxq_max;
10621 
10622 	/* Disable multiple TX rings by default.  Simple round-robin hardware
10623 	 * scheduling of the TX rings can cause starvation of rings with
10624 	 * small packets when other rings have TSO or jumbo packets.
10625 	 */
10626 	if (!tp->txq_req)
10627 		tp->txq_cnt = 1;
10628 
10629 	tp->irq_cnt = tg3_irq_count(tp);
10630 
10631 	for (i = 0; i < tp->irq_max; i++) {
10632 		msix_ent[i].entry  = i;
10633 		msix_ent[i].vector = 0;
10634 	}
10635 
10636 	rc = pci_enable_msix(tp->pdev, msix_ent, tp->irq_cnt);
10637 	if (rc < 0) {
10638 		return false;
10639 	} else if (rc != 0) {
10640 		if (pci_enable_msix(tp->pdev, msix_ent, rc))
10641 			return false;
10642 		netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n",
10643 			      tp->irq_cnt, rc);
10644 		tp->irq_cnt = rc;
10645 		tp->rxq_cnt = max(rc - 1, 1);
10646 		if (tp->txq_cnt)
10647 			tp->txq_cnt = min(tp->rxq_cnt, tp->txq_max);
10648 	}
10649 
10650 	for (i = 0; i < tp->irq_max; i++)
10651 		tp->napi[i].irq_vec = msix_ent[i].vector;
10652 
10653 	if (netif_set_real_num_rx_queues(tp->dev, tp->rxq_cnt)) {
10654 		pci_disable_msix(tp->pdev);
10655 		return false;
10656 	}
10657 
10658 	if (tp->irq_cnt == 1)
10659 		return true;
10660 
10661 	tg3_flag_set(tp, ENABLE_RSS);
10662 
10663 	if (tp->txq_cnt > 1)
10664 		tg3_flag_set(tp, ENABLE_TSS);
10665 
10666 	netif_set_real_num_tx_queues(tp->dev, tp->txq_cnt);
10667 
10668 	return true;
10669 }
10670 
10671 static void tg3_ints_init(struct tg3 *tp)
10672 {
10673 	if ((tg3_flag(tp, SUPPORT_MSI) || tg3_flag(tp, SUPPORT_MSIX)) &&
10674 	    !tg3_flag(tp, TAGGED_STATUS)) {
10675 		/* All MSI supporting chips should support tagged
10676 		 * status.  Assert that this is the case.
10677 		 */
10678 		netdev_warn(tp->dev,
10679 			    "MSI without TAGGED_STATUS? Not using MSI\n");
10680 		goto defcfg;
10681 	}
10682 
10683 	if (tg3_flag(tp, SUPPORT_MSIX) && tg3_enable_msix(tp))
10684 		tg3_flag_set(tp, USING_MSIX);
10685 	else if (tg3_flag(tp, SUPPORT_MSI) && pci_enable_msi(tp->pdev) == 0)
10686 		tg3_flag_set(tp, USING_MSI);
10687 
10688 	if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
10689 		u32 msi_mode = tr32(MSGINT_MODE);
10690 		if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1)
10691 			msi_mode |= MSGINT_MODE_MULTIVEC_EN;
10692 		if (!tg3_flag(tp, 1SHOT_MSI))
10693 			msi_mode |= MSGINT_MODE_ONE_SHOT_DISABLE;
10694 		tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
10695 	}
10696 defcfg:
10697 	if (!tg3_flag(tp, USING_MSIX)) {
10698 		tp->irq_cnt = 1;
10699 		tp->napi[0].irq_vec = tp->pdev->irq;
10700 	}
10701 
10702 	if (tp->irq_cnt == 1) {
10703 		tp->txq_cnt = 1;
10704 		tp->rxq_cnt = 1;
10705 		netif_set_real_num_tx_queues(tp->dev, 1);
10706 		netif_set_real_num_rx_queues(tp->dev, 1);
10707 	}
10708 }
10709 
10710 static void tg3_ints_fini(struct tg3 *tp)
10711 {
10712 	if (tg3_flag(tp, USING_MSIX))
10713 		pci_disable_msix(tp->pdev);
10714 	else if (tg3_flag(tp, USING_MSI))
10715 		pci_disable_msi(tp->pdev);
10716 	tg3_flag_clear(tp, USING_MSI);
10717 	tg3_flag_clear(tp, USING_MSIX);
10718 	tg3_flag_clear(tp, ENABLE_RSS);
10719 	tg3_flag_clear(tp, ENABLE_TSS);
10720 }
10721 
10722 static int tg3_start(struct tg3 *tp, bool reset_phy, bool test_irq,
10723 		     bool init)
10724 {
10725 	struct net_device *dev = tp->dev;
10726 	int i, err;
10727 
10728 	/*
10729 	 * Setup interrupts first so we know how
10730 	 * many NAPI resources to allocate
10731 	 */
10732 	tg3_ints_init(tp);
10733 
10734 	tg3_rss_check_indir_tbl(tp);
10735 
10736 	/* The placement of this call is tied
10737 	 * to the setup and use of Host TX descriptors.
10738 	 */
10739 	err = tg3_alloc_consistent(tp);
10740 	if (err)
10741 		goto err_out1;
10742 
10743 	tg3_napi_init(tp);
10744 
10745 	tg3_napi_enable(tp);
10746 
10747 	for (i = 0; i < tp->irq_cnt; i++) {
10748 		struct tg3_napi *tnapi = &tp->napi[i];
10749 		err = tg3_request_irq(tp, i);
10750 		if (err) {
10751 			for (i--; i >= 0; i--) {
10752 				tnapi = &tp->napi[i];
10753 				free_irq(tnapi->irq_vec, tnapi);
10754 			}
10755 			goto err_out2;
10756 		}
10757 	}
10758 
10759 	tg3_full_lock(tp, 0);
10760 
10761 	err = tg3_init_hw(tp, reset_phy);
10762 	if (err) {
10763 		tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10764 		tg3_free_rings(tp);
10765 	}
10766 
10767 	tg3_full_unlock(tp);
10768 
10769 	if (err)
10770 		goto err_out3;
10771 
10772 	if (test_irq && tg3_flag(tp, USING_MSI)) {
10773 		err = tg3_test_msi(tp);
10774 
10775 		if (err) {
10776 			tg3_full_lock(tp, 0);
10777 			tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10778 			tg3_free_rings(tp);
10779 			tg3_full_unlock(tp);
10780 
10781 			goto err_out2;
10782 		}
10783 
10784 		if (!tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
10785 			u32 val = tr32(PCIE_TRANSACTION_CFG);
10786 
10787 			tw32(PCIE_TRANSACTION_CFG,
10788 			     val | PCIE_TRANS_CFG_1SHOT_MSI);
10789 		}
10790 	}
10791 
10792 	tg3_phy_start(tp);
10793 
10794 	tg3_hwmon_open(tp);
10795 
10796 	tg3_full_lock(tp, 0);
10797 
10798 	tg3_timer_start(tp);
10799 	tg3_flag_set(tp, INIT_COMPLETE);
10800 	tg3_enable_ints(tp);
10801 
10802 	if (init)
10803 		tg3_ptp_init(tp);
10804 	else
10805 		tg3_ptp_resume(tp);
10806 
10807 
10808 	tg3_full_unlock(tp);
10809 
10810 	netif_tx_start_all_queues(dev);
10811 
10812 	/*
10813 	 * Reset loopback feature if it was turned on while the device was down
10814 	 * make sure that it's installed properly now.
10815 	 */
10816 	if (dev->features & NETIF_F_LOOPBACK)
10817 		tg3_set_loopback(dev, dev->features);
10818 
10819 	return 0;
10820 
10821 err_out3:
10822 	for (i = tp->irq_cnt - 1; i >= 0; i--) {
10823 		struct tg3_napi *tnapi = &tp->napi[i];
10824 		free_irq(tnapi->irq_vec, tnapi);
10825 	}
10826 
10827 err_out2:
10828 	tg3_napi_disable(tp);
10829 	tg3_napi_fini(tp);
10830 	tg3_free_consistent(tp);
10831 
10832 err_out1:
10833 	tg3_ints_fini(tp);
10834 
10835 	return err;
10836 }
10837 
10838 static void tg3_stop(struct tg3 *tp)
10839 {
10840 	int i;
10841 
10842 	tg3_reset_task_cancel(tp);
10843 	tg3_netif_stop(tp);
10844 
10845 	tg3_timer_stop(tp);
10846 
10847 	tg3_hwmon_close(tp);
10848 
10849 	tg3_phy_stop(tp);
10850 
10851 	tg3_full_lock(tp, 1);
10852 
10853 	tg3_disable_ints(tp);
10854 
10855 	tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10856 	tg3_free_rings(tp);
10857 	tg3_flag_clear(tp, INIT_COMPLETE);
10858 
10859 	tg3_full_unlock(tp);
10860 
10861 	for (i = tp->irq_cnt - 1; i >= 0; i--) {
10862 		struct tg3_napi *tnapi = &tp->napi[i];
10863 		free_irq(tnapi->irq_vec, tnapi);
10864 	}
10865 
10866 	tg3_ints_fini(tp);
10867 
10868 	tg3_napi_fini(tp);
10869 
10870 	tg3_free_consistent(tp);
10871 }
10872 
10873 static int tg3_open(struct net_device *dev)
10874 {
10875 	struct tg3 *tp = netdev_priv(dev);
10876 	int err;
10877 
10878 	if (tp->fw_needed) {
10879 		err = tg3_request_firmware(tp);
10880 		if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) {
10881 			if (err)
10882 				return err;
10883 		} else if (err) {
10884 			netdev_warn(tp->dev, "TSO capability disabled\n");
10885 			tg3_flag_clear(tp, TSO_CAPABLE);
10886 		} else if (!tg3_flag(tp, TSO_CAPABLE)) {
10887 			netdev_notice(tp->dev, "TSO capability restored\n");
10888 			tg3_flag_set(tp, TSO_CAPABLE);
10889 		}
10890 	}
10891 
10892 	tg3_carrier_off(tp);
10893 
10894 	err = tg3_power_up(tp);
10895 	if (err)
10896 		return err;
10897 
10898 	tg3_full_lock(tp, 0);
10899 
10900 	tg3_disable_ints(tp);
10901 	tg3_flag_clear(tp, INIT_COMPLETE);
10902 
10903 	tg3_full_unlock(tp);
10904 
10905 	err = tg3_start(tp, true, true, true);
10906 	if (err) {
10907 		tg3_frob_aux_power(tp, false);
10908 		pci_set_power_state(tp->pdev, PCI_D3hot);
10909 	}
10910 
10911 	if (tg3_flag(tp, PTP_CAPABLE)) {
10912 		tp->ptp_clock = ptp_clock_register(&tp->ptp_info,
10913 						   &tp->pdev->dev);
10914 		if (IS_ERR(tp->ptp_clock))
10915 			tp->ptp_clock = NULL;
10916 	}
10917 
10918 	return err;
10919 }
10920 
10921 static int tg3_close(struct net_device *dev)
10922 {
10923 	struct tg3 *tp = netdev_priv(dev);
10924 
10925 	tg3_ptp_fini(tp);
10926 
10927 	tg3_stop(tp);
10928 
10929 	/* Clear stats across close / open calls */
10930 	memset(&tp->net_stats_prev, 0, sizeof(tp->net_stats_prev));
10931 	memset(&tp->estats_prev, 0, sizeof(tp->estats_prev));
10932 
10933 	tg3_power_down(tp);
10934 
10935 	tg3_carrier_off(tp);
10936 
10937 	return 0;
10938 }
10939 
10940 static inline u64 get_stat64(tg3_stat64_t *val)
10941 {
10942        return ((u64)val->high << 32) | ((u64)val->low);
10943 }
10944 
10945 static u64 tg3_calc_crc_errors(struct tg3 *tp)
10946 {
10947 	struct tg3_hw_stats *hw_stats = tp->hw_stats;
10948 
10949 	if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
10950 	    (tg3_asic_rev(tp) == ASIC_REV_5700 ||
10951 	     tg3_asic_rev(tp) == ASIC_REV_5701)) {
10952 		u32 val;
10953 
10954 		if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
10955 			tg3_writephy(tp, MII_TG3_TEST1,
10956 				     val | MII_TG3_TEST1_CRC_EN);
10957 			tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &val);
10958 		} else
10959 			val = 0;
10960 
10961 		tp->phy_crc_errors += val;
10962 
10963 		return tp->phy_crc_errors;
10964 	}
10965 
10966 	return get_stat64(&hw_stats->rx_fcs_errors);
10967 }
10968 
10969 #define ESTAT_ADD(member) \
10970 	estats->member =	old_estats->member + \
10971 				get_stat64(&hw_stats->member)
10972 
10973 static void tg3_get_estats(struct tg3 *tp, struct tg3_ethtool_stats *estats)
10974 {
10975 	struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
10976 	struct tg3_hw_stats *hw_stats = tp->hw_stats;
10977 
10978 	ESTAT_ADD(rx_octets);
10979 	ESTAT_ADD(rx_fragments);
10980 	ESTAT_ADD(rx_ucast_packets);
10981 	ESTAT_ADD(rx_mcast_packets);
10982 	ESTAT_ADD(rx_bcast_packets);
10983 	ESTAT_ADD(rx_fcs_errors);
10984 	ESTAT_ADD(rx_align_errors);
10985 	ESTAT_ADD(rx_xon_pause_rcvd);
10986 	ESTAT_ADD(rx_xoff_pause_rcvd);
10987 	ESTAT_ADD(rx_mac_ctrl_rcvd);
10988 	ESTAT_ADD(rx_xoff_entered);
10989 	ESTAT_ADD(rx_frame_too_long_errors);
10990 	ESTAT_ADD(rx_jabbers);
10991 	ESTAT_ADD(rx_undersize_packets);
10992 	ESTAT_ADD(rx_in_length_errors);
10993 	ESTAT_ADD(rx_out_length_errors);
10994 	ESTAT_ADD(rx_64_or_less_octet_packets);
10995 	ESTAT_ADD(rx_65_to_127_octet_packets);
10996 	ESTAT_ADD(rx_128_to_255_octet_packets);
10997 	ESTAT_ADD(rx_256_to_511_octet_packets);
10998 	ESTAT_ADD(rx_512_to_1023_octet_packets);
10999 	ESTAT_ADD(rx_1024_to_1522_octet_packets);
11000 	ESTAT_ADD(rx_1523_to_2047_octet_packets);
11001 	ESTAT_ADD(rx_2048_to_4095_octet_packets);
11002 	ESTAT_ADD(rx_4096_to_8191_octet_packets);
11003 	ESTAT_ADD(rx_8192_to_9022_octet_packets);
11004 
11005 	ESTAT_ADD(tx_octets);
11006 	ESTAT_ADD(tx_collisions);
11007 	ESTAT_ADD(tx_xon_sent);
11008 	ESTAT_ADD(tx_xoff_sent);
11009 	ESTAT_ADD(tx_flow_control);
11010 	ESTAT_ADD(tx_mac_errors);
11011 	ESTAT_ADD(tx_single_collisions);
11012 	ESTAT_ADD(tx_mult_collisions);
11013 	ESTAT_ADD(tx_deferred);
11014 	ESTAT_ADD(tx_excessive_collisions);
11015 	ESTAT_ADD(tx_late_collisions);
11016 	ESTAT_ADD(tx_collide_2times);
11017 	ESTAT_ADD(tx_collide_3times);
11018 	ESTAT_ADD(tx_collide_4times);
11019 	ESTAT_ADD(tx_collide_5times);
11020 	ESTAT_ADD(tx_collide_6times);
11021 	ESTAT_ADD(tx_collide_7times);
11022 	ESTAT_ADD(tx_collide_8times);
11023 	ESTAT_ADD(tx_collide_9times);
11024 	ESTAT_ADD(tx_collide_10times);
11025 	ESTAT_ADD(tx_collide_11times);
11026 	ESTAT_ADD(tx_collide_12times);
11027 	ESTAT_ADD(tx_collide_13times);
11028 	ESTAT_ADD(tx_collide_14times);
11029 	ESTAT_ADD(tx_collide_15times);
11030 	ESTAT_ADD(tx_ucast_packets);
11031 	ESTAT_ADD(tx_mcast_packets);
11032 	ESTAT_ADD(tx_bcast_packets);
11033 	ESTAT_ADD(tx_carrier_sense_errors);
11034 	ESTAT_ADD(tx_discards);
11035 	ESTAT_ADD(tx_errors);
11036 
11037 	ESTAT_ADD(dma_writeq_full);
11038 	ESTAT_ADD(dma_write_prioq_full);
11039 	ESTAT_ADD(rxbds_empty);
11040 	ESTAT_ADD(rx_discards);
11041 	ESTAT_ADD(rx_errors);
11042 	ESTAT_ADD(rx_threshold_hit);
11043 
11044 	ESTAT_ADD(dma_readq_full);
11045 	ESTAT_ADD(dma_read_prioq_full);
11046 	ESTAT_ADD(tx_comp_queue_full);
11047 
11048 	ESTAT_ADD(ring_set_send_prod_index);
11049 	ESTAT_ADD(ring_status_update);
11050 	ESTAT_ADD(nic_irqs);
11051 	ESTAT_ADD(nic_avoided_irqs);
11052 	ESTAT_ADD(nic_tx_threshold_hit);
11053 
11054 	ESTAT_ADD(mbuf_lwm_thresh_hit);
11055 }
11056 
11057 static void tg3_get_nstats(struct tg3 *tp, struct rtnl_link_stats64 *stats)
11058 {
11059 	struct rtnl_link_stats64 *old_stats = &tp->net_stats_prev;
11060 	struct tg3_hw_stats *hw_stats = tp->hw_stats;
11061 
11062 	stats->rx_packets = old_stats->rx_packets +
11063 		get_stat64(&hw_stats->rx_ucast_packets) +
11064 		get_stat64(&hw_stats->rx_mcast_packets) +
11065 		get_stat64(&hw_stats->rx_bcast_packets);
11066 
11067 	stats->tx_packets = old_stats->tx_packets +
11068 		get_stat64(&hw_stats->tx_ucast_packets) +
11069 		get_stat64(&hw_stats->tx_mcast_packets) +
11070 		get_stat64(&hw_stats->tx_bcast_packets);
11071 
11072 	stats->rx_bytes = old_stats->rx_bytes +
11073 		get_stat64(&hw_stats->rx_octets);
11074 	stats->tx_bytes = old_stats->tx_bytes +
11075 		get_stat64(&hw_stats->tx_octets);
11076 
11077 	stats->rx_errors = old_stats->rx_errors +
11078 		get_stat64(&hw_stats->rx_errors);
11079 	stats->tx_errors = old_stats->tx_errors +
11080 		get_stat64(&hw_stats->tx_errors) +
11081 		get_stat64(&hw_stats->tx_mac_errors) +
11082 		get_stat64(&hw_stats->tx_carrier_sense_errors) +
11083 		get_stat64(&hw_stats->tx_discards);
11084 
11085 	stats->multicast = old_stats->multicast +
11086 		get_stat64(&hw_stats->rx_mcast_packets);
11087 	stats->collisions = old_stats->collisions +
11088 		get_stat64(&hw_stats->tx_collisions);
11089 
11090 	stats->rx_length_errors = old_stats->rx_length_errors +
11091 		get_stat64(&hw_stats->rx_frame_too_long_errors) +
11092 		get_stat64(&hw_stats->rx_undersize_packets);
11093 
11094 	stats->rx_over_errors = old_stats->rx_over_errors +
11095 		get_stat64(&hw_stats->rxbds_empty);
11096 	stats->rx_frame_errors = old_stats->rx_frame_errors +
11097 		get_stat64(&hw_stats->rx_align_errors);
11098 	stats->tx_aborted_errors = old_stats->tx_aborted_errors +
11099 		get_stat64(&hw_stats->tx_discards);
11100 	stats->tx_carrier_errors = old_stats->tx_carrier_errors +
11101 		get_stat64(&hw_stats->tx_carrier_sense_errors);
11102 
11103 	stats->rx_crc_errors = old_stats->rx_crc_errors +
11104 		tg3_calc_crc_errors(tp);
11105 
11106 	stats->rx_missed_errors = old_stats->rx_missed_errors +
11107 		get_stat64(&hw_stats->rx_discards);
11108 
11109 	stats->rx_dropped = tp->rx_dropped;
11110 	stats->tx_dropped = tp->tx_dropped;
11111 }
11112 
11113 static int tg3_get_regs_len(struct net_device *dev)
11114 {
11115 	return TG3_REG_BLK_SIZE;
11116 }
11117 
11118 static void tg3_get_regs(struct net_device *dev,
11119 		struct ethtool_regs *regs, void *_p)
11120 {
11121 	struct tg3 *tp = netdev_priv(dev);
11122 
11123 	regs->version = 0;
11124 
11125 	memset(_p, 0, TG3_REG_BLK_SIZE);
11126 
11127 	if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11128 		return;
11129 
11130 	tg3_full_lock(tp, 0);
11131 
11132 	tg3_dump_legacy_regs(tp, (u32 *)_p);
11133 
11134 	tg3_full_unlock(tp);
11135 }
11136 
11137 static int tg3_get_eeprom_len(struct net_device *dev)
11138 {
11139 	struct tg3 *tp = netdev_priv(dev);
11140 
11141 	return tp->nvram_size;
11142 }
11143 
11144 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
11145 {
11146 	struct tg3 *tp = netdev_priv(dev);
11147 	int ret;
11148 	u8  *pd;
11149 	u32 i, offset, len, b_offset, b_count;
11150 	__be32 val;
11151 
11152 	if (tg3_flag(tp, NO_NVRAM))
11153 		return -EINVAL;
11154 
11155 	if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11156 		return -EAGAIN;
11157 
11158 	offset = eeprom->offset;
11159 	len = eeprom->len;
11160 	eeprom->len = 0;
11161 
11162 	eeprom->magic = TG3_EEPROM_MAGIC;
11163 
11164 	if (offset & 3) {
11165 		/* adjustments to start on required 4 byte boundary */
11166 		b_offset = offset & 3;
11167 		b_count = 4 - b_offset;
11168 		if (b_count > len) {
11169 			/* i.e. offset=1 len=2 */
11170 			b_count = len;
11171 		}
11172 		ret = tg3_nvram_read_be32(tp, offset-b_offset, &val);
11173 		if (ret)
11174 			return ret;
11175 		memcpy(data, ((char *)&val) + b_offset, b_count);
11176 		len -= b_count;
11177 		offset += b_count;
11178 		eeprom->len += b_count;
11179 	}
11180 
11181 	/* read bytes up to the last 4 byte boundary */
11182 	pd = &data[eeprom->len];
11183 	for (i = 0; i < (len - (len & 3)); i += 4) {
11184 		ret = tg3_nvram_read_be32(tp, offset + i, &val);
11185 		if (ret) {
11186 			eeprom->len += i;
11187 			return ret;
11188 		}
11189 		memcpy(pd + i, &val, 4);
11190 	}
11191 	eeprom->len += i;
11192 
11193 	if (len & 3) {
11194 		/* read last bytes not ending on 4 byte boundary */
11195 		pd = &data[eeprom->len];
11196 		b_count = len & 3;
11197 		b_offset = offset + len - b_count;
11198 		ret = tg3_nvram_read_be32(tp, b_offset, &val);
11199 		if (ret)
11200 			return ret;
11201 		memcpy(pd, &val, b_count);
11202 		eeprom->len += b_count;
11203 	}
11204 	return 0;
11205 }
11206 
11207 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
11208 {
11209 	struct tg3 *tp = netdev_priv(dev);
11210 	int ret;
11211 	u32 offset, len, b_offset, odd_len;
11212 	u8 *buf;
11213 	__be32 start, end;
11214 
11215 	if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11216 		return -EAGAIN;
11217 
11218 	if (tg3_flag(tp, NO_NVRAM) ||
11219 	    eeprom->magic != TG3_EEPROM_MAGIC)
11220 		return -EINVAL;
11221 
11222 	offset = eeprom->offset;
11223 	len = eeprom->len;
11224 
11225 	if ((b_offset = (offset & 3))) {
11226 		/* adjustments to start on required 4 byte boundary */
11227 		ret = tg3_nvram_read_be32(tp, offset-b_offset, &start);
11228 		if (ret)
11229 			return ret;
11230 		len += b_offset;
11231 		offset &= ~3;
11232 		if (len < 4)
11233 			len = 4;
11234 	}
11235 
11236 	odd_len = 0;
11237 	if (len & 3) {
11238 		/* adjustments to end on required 4 byte boundary */
11239 		odd_len = 1;
11240 		len = (len + 3) & ~3;
11241 		ret = tg3_nvram_read_be32(tp, offset+len-4, &end);
11242 		if (ret)
11243 			return ret;
11244 	}
11245 
11246 	buf = data;
11247 	if (b_offset || odd_len) {
11248 		buf = kmalloc(len, GFP_KERNEL);
11249 		if (!buf)
11250 			return -ENOMEM;
11251 		if (b_offset)
11252 			memcpy(buf, &start, 4);
11253 		if (odd_len)
11254 			memcpy(buf+len-4, &end, 4);
11255 		memcpy(buf + b_offset, data, eeprom->len);
11256 	}
11257 
11258 	ret = tg3_nvram_write_block(tp, offset, len, buf);
11259 
11260 	if (buf != data)
11261 		kfree(buf);
11262 
11263 	return ret;
11264 }
11265 
11266 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
11267 {
11268 	struct tg3 *tp = netdev_priv(dev);
11269 
11270 	if (tg3_flag(tp, USE_PHYLIB)) {
11271 		struct phy_device *phydev;
11272 		if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
11273 			return -EAGAIN;
11274 		phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
11275 		return phy_ethtool_gset(phydev, cmd);
11276 	}
11277 
11278 	cmd->supported = (SUPPORTED_Autoneg);
11279 
11280 	if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
11281 		cmd->supported |= (SUPPORTED_1000baseT_Half |
11282 				   SUPPORTED_1000baseT_Full);
11283 
11284 	if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
11285 		cmd->supported |= (SUPPORTED_100baseT_Half |
11286 				  SUPPORTED_100baseT_Full |
11287 				  SUPPORTED_10baseT_Half |
11288 				  SUPPORTED_10baseT_Full |
11289 				  SUPPORTED_TP);
11290 		cmd->port = PORT_TP;
11291 	} else {
11292 		cmd->supported |= SUPPORTED_FIBRE;
11293 		cmd->port = PORT_FIBRE;
11294 	}
11295 
11296 	cmd->advertising = tp->link_config.advertising;
11297 	if (tg3_flag(tp, PAUSE_AUTONEG)) {
11298 		if (tp->link_config.flowctrl & FLOW_CTRL_RX) {
11299 			if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
11300 				cmd->advertising |= ADVERTISED_Pause;
11301 			} else {
11302 				cmd->advertising |= ADVERTISED_Pause |
11303 						    ADVERTISED_Asym_Pause;
11304 			}
11305 		} else if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
11306 			cmd->advertising |= ADVERTISED_Asym_Pause;
11307 		}
11308 	}
11309 	if (netif_running(dev) && tp->link_up) {
11310 		ethtool_cmd_speed_set(cmd, tp->link_config.active_speed);
11311 		cmd->duplex = tp->link_config.active_duplex;
11312 		cmd->lp_advertising = tp->link_config.rmt_adv;
11313 		if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
11314 			if (tp->phy_flags & TG3_PHYFLG_MDIX_STATE)
11315 				cmd->eth_tp_mdix = ETH_TP_MDI_X;
11316 			else
11317 				cmd->eth_tp_mdix = ETH_TP_MDI;
11318 		}
11319 	} else {
11320 		ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN);
11321 		cmd->duplex = DUPLEX_UNKNOWN;
11322 		cmd->eth_tp_mdix = ETH_TP_MDI_INVALID;
11323 	}
11324 	cmd->phy_address = tp->phy_addr;
11325 	cmd->transceiver = XCVR_INTERNAL;
11326 	cmd->autoneg = tp->link_config.autoneg;
11327 	cmd->maxtxpkt = 0;
11328 	cmd->maxrxpkt = 0;
11329 	return 0;
11330 }
11331 
11332 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
11333 {
11334 	struct tg3 *tp = netdev_priv(dev);
11335 	u32 speed = ethtool_cmd_speed(cmd);
11336 
11337 	if (tg3_flag(tp, USE_PHYLIB)) {
11338 		struct phy_device *phydev;
11339 		if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
11340 			return -EAGAIN;
11341 		phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
11342 		return phy_ethtool_sset(phydev, cmd);
11343 	}
11344 
11345 	if (cmd->autoneg != AUTONEG_ENABLE &&
11346 	    cmd->autoneg != AUTONEG_DISABLE)
11347 		return -EINVAL;
11348 
11349 	if (cmd->autoneg == AUTONEG_DISABLE &&
11350 	    cmd->duplex != DUPLEX_FULL &&
11351 	    cmd->duplex != DUPLEX_HALF)
11352 		return -EINVAL;
11353 
11354 	if (cmd->autoneg == AUTONEG_ENABLE) {
11355 		u32 mask = ADVERTISED_Autoneg |
11356 			   ADVERTISED_Pause |
11357 			   ADVERTISED_Asym_Pause;
11358 
11359 		if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
11360 			mask |= ADVERTISED_1000baseT_Half |
11361 				ADVERTISED_1000baseT_Full;
11362 
11363 		if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
11364 			mask |= ADVERTISED_100baseT_Half |
11365 				ADVERTISED_100baseT_Full |
11366 				ADVERTISED_10baseT_Half |
11367 				ADVERTISED_10baseT_Full |
11368 				ADVERTISED_TP;
11369 		else
11370 			mask |= ADVERTISED_FIBRE;
11371 
11372 		if (cmd->advertising & ~mask)
11373 			return -EINVAL;
11374 
11375 		mask &= (ADVERTISED_1000baseT_Half |
11376 			 ADVERTISED_1000baseT_Full |
11377 			 ADVERTISED_100baseT_Half |
11378 			 ADVERTISED_100baseT_Full |
11379 			 ADVERTISED_10baseT_Half |
11380 			 ADVERTISED_10baseT_Full);
11381 
11382 		cmd->advertising &= mask;
11383 	} else {
11384 		if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) {
11385 			if (speed != SPEED_1000)
11386 				return -EINVAL;
11387 
11388 			if (cmd->duplex != DUPLEX_FULL)
11389 				return -EINVAL;
11390 		} else {
11391 			if (speed != SPEED_100 &&
11392 			    speed != SPEED_10)
11393 				return -EINVAL;
11394 		}
11395 	}
11396 
11397 	tg3_full_lock(tp, 0);
11398 
11399 	tp->link_config.autoneg = cmd->autoneg;
11400 	if (cmd->autoneg == AUTONEG_ENABLE) {
11401 		tp->link_config.advertising = (cmd->advertising |
11402 					      ADVERTISED_Autoneg);
11403 		tp->link_config.speed = SPEED_UNKNOWN;
11404 		tp->link_config.duplex = DUPLEX_UNKNOWN;
11405 	} else {
11406 		tp->link_config.advertising = 0;
11407 		tp->link_config.speed = speed;
11408 		tp->link_config.duplex = cmd->duplex;
11409 	}
11410 
11411 	if (netif_running(dev))
11412 		tg3_setup_phy(tp, 1);
11413 
11414 	tg3_full_unlock(tp);
11415 
11416 	return 0;
11417 }
11418 
11419 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
11420 {
11421 	struct tg3 *tp = netdev_priv(dev);
11422 
11423 	strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
11424 	strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
11425 	strlcpy(info->fw_version, tp->fw_ver, sizeof(info->fw_version));
11426 	strlcpy(info->bus_info, pci_name(tp->pdev), sizeof(info->bus_info));
11427 }
11428 
11429 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
11430 {
11431 	struct tg3 *tp = netdev_priv(dev);
11432 
11433 	if (tg3_flag(tp, WOL_CAP) && device_can_wakeup(&tp->pdev->dev))
11434 		wol->supported = WAKE_MAGIC;
11435 	else
11436 		wol->supported = 0;
11437 	wol->wolopts = 0;
11438 	if (tg3_flag(tp, WOL_ENABLE) && device_can_wakeup(&tp->pdev->dev))
11439 		wol->wolopts = WAKE_MAGIC;
11440 	memset(&wol->sopass, 0, sizeof(wol->sopass));
11441 }
11442 
11443 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
11444 {
11445 	struct tg3 *tp = netdev_priv(dev);
11446 	struct device *dp = &tp->pdev->dev;
11447 
11448 	if (wol->wolopts & ~WAKE_MAGIC)
11449 		return -EINVAL;
11450 	if ((wol->wolopts & WAKE_MAGIC) &&
11451 	    !(tg3_flag(tp, WOL_CAP) && device_can_wakeup(dp)))
11452 		return -EINVAL;
11453 
11454 	device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC);
11455 
11456 	spin_lock_bh(&tp->lock);
11457 	if (device_may_wakeup(dp))
11458 		tg3_flag_set(tp, WOL_ENABLE);
11459 	else
11460 		tg3_flag_clear(tp, WOL_ENABLE);
11461 	spin_unlock_bh(&tp->lock);
11462 
11463 	return 0;
11464 }
11465 
11466 static u32 tg3_get_msglevel(struct net_device *dev)
11467 {
11468 	struct tg3 *tp = netdev_priv(dev);
11469 	return tp->msg_enable;
11470 }
11471 
11472 static void tg3_set_msglevel(struct net_device *dev, u32 value)
11473 {
11474 	struct tg3 *tp = netdev_priv(dev);
11475 	tp->msg_enable = value;
11476 }
11477 
11478 static int tg3_nway_reset(struct net_device *dev)
11479 {
11480 	struct tg3 *tp = netdev_priv(dev);
11481 	int r;
11482 
11483 	if (!netif_running(dev))
11484 		return -EAGAIN;
11485 
11486 	if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
11487 		return -EINVAL;
11488 
11489 	if (tg3_flag(tp, USE_PHYLIB)) {
11490 		if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
11491 			return -EAGAIN;
11492 		r = phy_start_aneg(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
11493 	} else {
11494 		u32 bmcr;
11495 
11496 		spin_lock_bh(&tp->lock);
11497 		r = -EINVAL;
11498 		tg3_readphy(tp, MII_BMCR, &bmcr);
11499 		if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
11500 		    ((bmcr & BMCR_ANENABLE) ||
11501 		     (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT))) {
11502 			tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
11503 						   BMCR_ANENABLE);
11504 			r = 0;
11505 		}
11506 		spin_unlock_bh(&tp->lock);
11507 	}
11508 
11509 	return r;
11510 }
11511 
11512 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
11513 {
11514 	struct tg3 *tp = netdev_priv(dev);
11515 
11516 	ering->rx_max_pending = tp->rx_std_ring_mask;
11517 	if (tg3_flag(tp, JUMBO_RING_ENABLE))
11518 		ering->rx_jumbo_max_pending = tp->rx_jmb_ring_mask;
11519 	else
11520 		ering->rx_jumbo_max_pending = 0;
11521 
11522 	ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
11523 
11524 	ering->rx_pending = tp->rx_pending;
11525 	if (tg3_flag(tp, JUMBO_RING_ENABLE))
11526 		ering->rx_jumbo_pending = tp->rx_jumbo_pending;
11527 	else
11528 		ering->rx_jumbo_pending = 0;
11529 
11530 	ering->tx_pending = tp->napi[0].tx_pending;
11531 }
11532 
11533 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
11534 {
11535 	struct tg3 *tp = netdev_priv(dev);
11536 	int i, irq_sync = 0, err = 0;
11537 
11538 	if ((ering->rx_pending > tp->rx_std_ring_mask) ||
11539 	    (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) ||
11540 	    (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
11541 	    (ering->tx_pending <= MAX_SKB_FRAGS) ||
11542 	    (tg3_flag(tp, TSO_BUG) &&
11543 	     (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
11544 		return -EINVAL;
11545 
11546 	if (netif_running(dev)) {
11547 		tg3_phy_stop(tp);
11548 		tg3_netif_stop(tp);
11549 		irq_sync = 1;
11550 	}
11551 
11552 	tg3_full_lock(tp, irq_sync);
11553 
11554 	tp->rx_pending = ering->rx_pending;
11555 
11556 	if (tg3_flag(tp, MAX_RXPEND_64) &&
11557 	    tp->rx_pending > 63)
11558 		tp->rx_pending = 63;
11559 	tp->rx_jumbo_pending = ering->rx_jumbo_pending;
11560 
11561 	for (i = 0; i < tp->irq_max; i++)
11562 		tp->napi[i].tx_pending = ering->tx_pending;
11563 
11564 	if (netif_running(dev)) {
11565 		tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11566 		err = tg3_restart_hw(tp, 1);
11567 		if (!err)
11568 			tg3_netif_start(tp);
11569 	}
11570 
11571 	tg3_full_unlock(tp);
11572 
11573 	if (irq_sync && !err)
11574 		tg3_phy_start(tp);
11575 
11576 	return err;
11577 }
11578 
11579 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
11580 {
11581 	struct tg3 *tp = netdev_priv(dev);
11582 
11583 	epause->autoneg = !!tg3_flag(tp, PAUSE_AUTONEG);
11584 
11585 	if (tp->link_config.flowctrl & FLOW_CTRL_RX)
11586 		epause->rx_pause = 1;
11587 	else
11588 		epause->rx_pause = 0;
11589 
11590 	if (tp->link_config.flowctrl & FLOW_CTRL_TX)
11591 		epause->tx_pause = 1;
11592 	else
11593 		epause->tx_pause = 0;
11594 }
11595 
11596 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
11597 {
11598 	struct tg3 *tp = netdev_priv(dev);
11599 	int err = 0;
11600 
11601 	if (tg3_flag(tp, USE_PHYLIB)) {
11602 		u32 newadv;
11603 		struct phy_device *phydev;
11604 
11605 		phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
11606 
11607 		if (!(phydev->supported & SUPPORTED_Pause) ||
11608 		    (!(phydev->supported & SUPPORTED_Asym_Pause) &&
11609 		     (epause->rx_pause != epause->tx_pause)))
11610 			return -EINVAL;
11611 
11612 		tp->link_config.flowctrl = 0;
11613 		if (epause->rx_pause) {
11614 			tp->link_config.flowctrl |= FLOW_CTRL_RX;
11615 
11616 			if (epause->tx_pause) {
11617 				tp->link_config.flowctrl |= FLOW_CTRL_TX;
11618 				newadv = ADVERTISED_Pause;
11619 			} else
11620 				newadv = ADVERTISED_Pause |
11621 					 ADVERTISED_Asym_Pause;
11622 		} else if (epause->tx_pause) {
11623 			tp->link_config.flowctrl |= FLOW_CTRL_TX;
11624 			newadv = ADVERTISED_Asym_Pause;
11625 		} else
11626 			newadv = 0;
11627 
11628 		if (epause->autoneg)
11629 			tg3_flag_set(tp, PAUSE_AUTONEG);
11630 		else
11631 			tg3_flag_clear(tp, PAUSE_AUTONEG);
11632 
11633 		if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
11634 			u32 oldadv = phydev->advertising &
11635 				     (ADVERTISED_Pause | ADVERTISED_Asym_Pause);
11636 			if (oldadv != newadv) {
11637 				phydev->advertising &=
11638 					~(ADVERTISED_Pause |
11639 					  ADVERTISED_Asym_Pause);
11640 				phydev->advertising |= newadv;
11641 				if (phydev->autoneg) {
11642 					/*
11643 					 * Always renegotiate the link to
11644 					 * inform our link partner of our
11645 					 * flow control settings, even if the
11646 					 * flow control is forced.  Let
11647 					 * tg3_adjust_link() do the final
11648 					 * flow control setup.
11649 					 */
11650 					return phy_start_aneg(phydev);
11651 				}
11652 			}
11653 
11654 			if (!epause->autoneg)
11655 				tg3_setup_flow_control(tp, 0, 0);
11656 		} else {
11657 			tp->link_config.advertising &=
11658 					~(ADVERTISED_Pause |
11659 					  ADVERTISED_Asym_Pause);
11660 			tp->link_config.advertising |= newadv;
11661 		}
11662 	} else {
11663 		int irq_sync = 0;
11664 
11665 		if (netif_running(dev)) {
11666 			tg3_netif_stop(tp);
11667 			irq_sync = 1;
11668 		}
11669 
11670 		tg3_full_lock(tp, irq_sync);
11671 
11672 		if (epause->autoneg)
11673 			tg3_flag_set(tp, PAUSE_AUTONEG);
11674 		else
11675 			tg3_flag_clear(tp, PAUSE_AUTONEG);
11676 		if (epause->rx_pause)
11677 			tp->link_config.flowctrl |= FLOW_CTRL_RX;
11678 		else
11679 			tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
11680 		if (epause->tx_pause)
11681 			tp->link_config.flowctrl |= FLOW_CTRL_TX;
11682 		else
11683 			tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
11684 
11685 		if (netif_running(dev)) {
11686 			tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11687 			err = tg3_restart_hw(tp, 1);
11688 			if (!err)
11689 				tg3_netif_start(tp);
11690 		}
11691 
11692 		tg3_full_unlock(tp);
11693 	}
11694 
11695 	return err;
11696 }
11697 
11698 static int tg3_get_sset_count(struct net_device *dev, int sset)
11699 {
11700 	switch (sset) {
11701 	case ETH_SS_TEST:
11702 		return TG3_NUM_TEST;
11703 	case ETH_SS_STATS:
11704 		return TG3_NUM_STATS;
11705 	default:
11706 		return -EOPNOTSUPP;
11707 	}
11708 }
11709 
11710 static int tg3_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
11711 			 u32 *rules __always_unused)
11712 {
11713 	struct tg3 *tp = netdev_priv(dev);
11714 
11715 	if (!tg3_flag(tp, SUPPORT_MSIX))
11716 		return -EOPNOTSUPP;
11717 
11718 	switch (info->cmd) {
11719 	case ETHTOOL_GRXRINGS:
11720 		if (netif_running(tp->dev))
11721 			info->data = tp->rxq_cnt;
11722 		else {
11723 			info->data = num_online_cpus();
11724 			if (info->data > TG3_RSS_MAX_NUM_QS)
11725 				info->data = TG3_RSS_MAX_NUM_QS;
11726 		}
11727 
11728 		/* The first interrupt vector only
11729 		 * handles link interrupts.
11730 		 */
11731 		info->data -= 1;
11732 		return 0;
11733 
11734 	default:
11735 		return -EOPNOTSUPP;
11736 	}
11737 }
11738 
11739 static u32 tg3_get_rxfh_indir_size(struct net_device *dev)
11740 {
11741 	u32 size = 0;
11742 	struct tg3 *tp = netdev_priv(dev);
11743 
11744 	if (tg3_flag(tp, SUPPORT_MSIX))
11745 		size = TG3_RSS_INDIR_TBL_SIZE;
11746 
11747 	return size;
11748 }
11749 
11750 static int tg3_get_rxfh_indir(struct net_device *dev, u32 *indir)
11751 {
11752 	struct tg3 *tp = netdev_priv(dev);
11753 	int i;
11754 
11755 	for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
11756 		indir[i] = tp->rss_ind_tbl[i];
11757 
11758 	return 0;
11759 }
11760 
11761 static int tg3_set_rxfh_indir(struct net_device *dev, const u32 *indir)
11762 {
11763 	struct tg3 *tp = netdev_priv(dev);
11764 	size_t i;
11765 
11766 	for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
11767 		tp->rss_ind_tbl[i] = indir[i];
11768 
11769 	if (!netif_running(dev) || !tg3_flag(tp, ENABLE_RSS))
11770 		return 0;
11771 
11772 	/* It is legal to write the indirection
11773 	 * table while the device is running.
11774 	 */
11775 	tg3_full_lock(tp, 0);
11776 	tg3_rss_write_indir_tbl(tp);
11777 	tg3_full_unlock(tp);
11778 
11779 	return 0;
11780 }
11781 
11782 static void tg3_get_channels(struct net_device *dev,
11783 			     struct ethtool_channels *channel)
11784 {
11785 	struct tg3 *tp = netdev_priv(dev);
11786 	u32 deflt_qs = netif_get_num_default_rss_queues();
11787 
11788 	channel->max_rx = tp->rxq_max;
11789 	channel->max_tx = tp->txq_max;
11790 
11791 	if (netif_running(dev)) {
11792 		channel->rx_count = tp->rxq_cnt;
11793 		channel->tx_count = tp->txq_cnt;
11794 	} else {
11795 		if (tp->rxq_req)
11796 			channel->rx_count = tp->rxq_req;
11797 		else
11798 			channel->rx_count = min(deflt_qs, tp->rxq_max);
11799 
11800 		if (tp->txq_req)
11801 			channel->tx_count = tp->txq_req;
11802 		else
11803 			channel->tx_count = min(deflt_qs, tp->txq_max);
11804 	}
11805 }
11806 
11807 static int tg3_set_channels(struct net_device *dev,
11808 			    struct ethtool_channels *channel)
11809 {
11810 	struct tg3 *tp = netdev_priv(dev);
11811 
11812 	if (!tg3_flag(tp, SUPPORT_MSIX))
11813 		return -EOPNOTSUPP;
11814 
11815 	if (channel->rx_count > tp->rxq_max ||
11816 	    channel->tx_count > tp->txq_max)
11817 		return -EINVAL;
11818 
11819 	tp->rxq_req = channel->rx_count;
11820 	tp->txq_req = channel->tx_count;
11821 
11822 	if (!netif_running(dev))
11823 		return 0;
11824 
11825 	tg3_stop(tp);
11826 
11827 	tg3_carrier_off(tp);
11828 
11829 	tg3_start(tp, true, false, false);
11830 
11831 	return 0;
11832 }
11833 
11834 static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
11835 {
11836 	switch (stringset) {
11837 	case ETH_SS_STATS:
11838 		memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
11839 		break;
11840 	case ETH_SS_TEST:
11841 		memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
11842 		break;
11843 	default:
11844 		WARN_ON(1);	/* we need a WARN() */
11845 		break;
11846 	}
11847 }
11848 
11849 static int tg3_set_phys_id(struct net_device *dev,
11850 			    enum ethtool_phys_id_state state)
11851 {
11852 	struct tg3 *tp = netdev_priv(dev);
11853 
11854 	if (!netif_running(tp->dev))
11855 		return -EAGAIN;
11856 
11857 	switch (state) {
11858 	case ETHTOOL_ID_ACTIVE:
11859 		return 1;	/* cycle on/off once per second */
11860 
11861 	case ETHTOOL_ID_ON:
11862 		tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
11863 		     LED_CTRL_1000MBPS_ON |
11864 		     LED_CTRL_100MBPS_ON |
11865 		     LED_CTRL_10MBPS_ON |
11866 		     LED_CTRL_TRAFFIC_OVERRIDE |
11867 		     LED_CTRL_TRAFFIC_BLINK |
11868 		     LED_CTRL_TRAFFIC_LED);
11869 		break;
11870 
11871 	case ETHTOOL_ID_OFF:
11872 		tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
11873 		     LED_CTRL_TRAFFIC_OVERRIDE);
11874 		break;
11875 
11876 	case ETHTOOL_ID_INACTIVE:
11877 		tw32(MAC_LED_CTRL, tp->led_ctrl);
11878 		break;
11879 	}
11880 
11881 	return 0;
11882 }
11883 
11884 static void tg3_get_ethtool_stats(struct net_device *dev,
11885 				   struct ethtool_stats *estats, u64 *tmp_stats)
11886 {
11887 	struct tg3 *tp = netdev_priv(dev);
11888 
11889 	if (tp->hw_stats)
11890 		tg3_get_estats(tp, (struct tg3_ethtool_stats *)tmp_stats);
11891 	else
11892 		memset(tmp_stats, 0, sizeof(struct tg3_ethtool_stats));
11893 }
11894 
11895 static __be32 *tg3_vpd_readblock(struct tg3 *tp, u32 *vpdlen)
11896 {
11897 	int i;
11898 	__be32 *buf;
11899 	u32 offset = 0, len = 0;
11900 	u32 magic, val;
11901 
11902 	if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &magic))
11903 		return NULL;
11904 
11905 	if (magic == TG3_EEPROM_MAGIC) {
11906 		for (offset = TG3_NVM_DIR_START;
11907 		     offset < TG3_NVM_DIR_END;
11908 		     offset += TG3_NVM_DIRENT_SIZE) {
11909 			if (tg3_nvram_read(tp, offset, &val))
11910 				return NULL;
11911 
11912 			if ((val >> TG3_NVM_DIRTYPE_SHIFT) ==
11913 			    TG3_NVM_DIRTYPE_EXTVPD)
11914 				break;
11915 		}
11916 
11917 		if (offset != TG3_NVM_DIR_END) {
11918 			len = (val & TG3_NVM_DIRTYPE_LENMSK) * 4;
11919 			if (tg3_nvram_read(tp, offset + 4, &offset))
11920 				return NULL;
11921 
11922 			offset = tg3_nvram_logical_addr(tp, offset);
11923 		}
11924 	}
11925 
11926 	if (!offset || !len) {
11927 		offset = TG3_NVM_VPD_OFF;
11928 		len = TG3_NVM_VPD_LEN;
11929 	}
11930 
11931 	buf = kmalloc(len, GFP_KERNEL);
11932 	if (buf == NULL)
11933 		return NULL;
11934 
11935 	if (magic == TG3_EEPROM_MAGIC) {
11936 		for (i = 0; i < len; i += 4) {
11937 			/* The data is in little-endian format in NVRAM.
11938 			 * Use the big-endian read routines to preserve
11939 			 * the byte order as it exists in NVRAM.
11940 			 */
11941 			if (tg3_nvram_read_be32(tp, offset + i, &buf[i/4]))
11942 				goto error;
11943 		}
11944 	} else {
11945 		u8 *ptr;
11946 		ssize_t cnt;
11947 		unsigned int pos = 0;
11948 
11949 		ptr = (u8 *)&buf[0];
11950 		for (i = 0; pos < len && i < 3; i++, pos += cnt, ptr += cnt) {
11951 			cnt = pci_read_vpd(tp->pdev, pos,
11952 					   len - pos, ptr);
11953 			if (cnt == -ETIMEDOUT || cnt == -EINTR)
11954 				cnt = 0;
11955 			else if (cnt < 0)
11956 				goto error;
11957 		}
11958 		if (pos != len)
11959 			goto error;
11960 	}
11961 
11962 	*vpdlen = len;
11963 
11964 	return buf;
11965 
11966 error:
11967 	kfree(buf);
11968 	return NULL;
11969 }
11970 
11971 #define NVRAM_TEST_SIZE 0x100
11972 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE	0x14
11973 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE	0x18
11974 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE	0x1c
11975 #define NVRAM_SELFBOOT_FORMAT1_4_SIZE	0x20
11976 #define NVRAM_SELFBOOT_FORMAT1_5_SIZE	0x24
11977 #define NVRAM_SELFBOOT_FORMAT1_6_SIZE	0x50
11978 #define NVRAM_SELFBOOT_HW_SIZE 0x20
11979 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
11980 
11981 static int tg3_test_nvram(struct tg3 *tp)
11982 {
11983 	u32 csum, magic, len;
11984 	__be32 *buf;
11985 	int i, j, k, err = 0, size;
11986 
11987 	if (tg3_flag(tp, NO_NVRAM))
11988 		return 0;
11989 
11990 	if (tg3_nvram_read(tp, 0, &magic) != 0)
11991 		return -EIO;
11992 
11993 	if (magic == TG3_EEPROM_MAGIC)
11994 		size = NVRAM_TEST_SIZE;
11995 	else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
11996 		if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
11997 		    TG3_EEPROM_SB_FORMAT_1) {
11998 			switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
11999 			case TG3_EEPROM_SB_REVISION_0:
12000 				size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
12001 				break;
12002 			case TG3_EEPROM_SB_REVISION_2:
12003 				size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
12004 				break;
12005 			case TG3_EEPROM_SB_REVISION_3:
12006 				size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
12007 				break;
12008 			case TG3_EEPROM_SB_REVISION_4:
12009 				size = NVRAM_SELFBOOT_FORMAT1_4_SIZE;
12010 				break;
12011 			case TG3_EEPROM_SB_REVISION_5:
12012 				size = NVRAM_SELFBOOT_FORMAT1_5_SIZE;
12013 				break;
12014 			case TG3_EEPROM_SB_REVISION_6:
12015 				size = NVRAM_SELFBOOT_FORMAT1_6_SIZE;
12016 				break;
12017 			default:
12018 				return -EIO;
12019 			}
12020 		} else
12021 			return 0;
12022 	} else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
12023 		size = NVRAM_SELFBOOT_HW_SIZE;
12024 	else
12025 		return -EIO;
12026 
12027 	buf = kmalloc(size, GFP_KERNEL);
12028 	if (buf == NULL)
12029 		return -ENOMEM;
12030 
12031 	err = -EIO;
12032 	for (i = 0, j = 0; i < size; i += 4, j++) {
12033 		err = tg3_nvram_read_be32(tp, i, &buf[j]);
12034 		if (err)
12035 			break;
12036 	}
12037 	if (i < size)
12038 		goto out;
12039 
12040 	/* Selfboot format */
12041 	magic = be32_to_cpu(buf[0]);
12042 	if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
12043 	    TG3_EEPROM_MAGIC_FW) {
12044 		u8 *buf8 = (u8 *) buf, csum8 = 0;
12045 
12046 		if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
12047 		    TG3_EEPROM_SB_REVISION_2) {
12048 			/* For rev 2, the csum doesn't include the MBA. */
12049 			for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
12050 				csum8 += buf8[i];
12051 			for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
12052 				csum8 += buf8[i];
12053 		} else {
12054 			for (i = 0; i < size; i++)
12055 				csum8 += buf8[i];
12056 		}
12057 
12058 		if (csum8 == 0) {
12059 			err = 0;
12060 			goto out;
12061 		}
12062 
12063 		err = -EIO;
12064 		goto out;
12065 	}
12066 
12067 	if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
12068 	    TG3_EEPROM_MAGIC_HW) {
12069 		u8 data[NVRAM_SELFBOOT_DATA_SIZE];
12070 		u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
12071 		u8 *buf8 = (u8 *) buf;
12072 
12073 		/* Separate the parity bits and the data bytes.  */
12074 		for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
12075 			if ((i == 0) || (i == 8)) {
12076 				int l;
12077 				u8 msk;
12078 
12079 				for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
12080 					parity[k++] = buf8[i] & msk;
12081 				i++;
12082 			} else if (i == 16) {
12083 				int l;
12084 				u8 msk;
12085 
12086 				for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
12087 					parity[k++] = buf8[i] & msk;
12088 				i++;
12089 
12090 				for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
12091 					parity[k++] = buf8[i] & msk;
12092 				i++;
12093 			}
12094 			data[j++] = buf8[i];
12095 		}
12096 
12097 		err = -EIO;
12098 		for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
12099 			u8 hw8 = hweight8(data[i]);
12100 
12101 			if ((hw8 & 0x1) && parity[i])
12102 				goto out;
12103 			else if (!(hw8 & 0x1) && !parity[i])
12104 				goto out;
12105 		}
12106 		err = 0;
12107 		goto out;
12108 	}
12109 
12110 	err = -EIO;
12111 
12112 	/* Bootstrap checksum at offset 0x10 */
12113 	csum = calc_crc((unsigned char *) buf, 0x10);
12114 	if (csum != le32_to_cpu(buf[0x10/4]))
12115 		goto out;
12116 
12117 	/* Manufacturing block starts at offset 0x74, checksum at 0xfc */
12118 	csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
12119 	if (csum != le32_to_cpu(buf[0xfc/4]))
12120 		goto out;
12121 
12122 	kfree(buf);
12123 
12124 	buf = tg3_vpd_readblock(tp, &len);
12125 	if (!buf)
12126 		return -ENOMEM;
12127 
12128 	i = pci_vpd_find_tag((u8 *)buf, 0, len, PCI_VPD_LRDT_RO_DATA);
12129 	if (i > 0) {
12130 		j = pci_vpd_lrdt_size(&((u8 *)buf)[i]);
12131 		if (j < 0)
12132 			goto out;
12133 
12134 		if (i + PCI_VPD_LRDT_TAG_SIZE + j > len)
12135 			goto out;
12136 
12137 		i += PCI_VPD_LRDT_TAG_SIZE;
12138 		j = pci_vpd_find_info_keyword((u8 *)buf, i, j,
12139 					      PCI_VPD_RO_KEYWORD_CHKSUM);
12140 		if (j > 0) {
12141 			u8 csum8 = 0;
12142 
12143 			j += PCI_VPD_INFO_FLD_HDR_SIZE;
12144 
12145 			for (i = 0; i <= j; i++)
12146 				csum8 += ((u8 *)buf)[i];
12147 
12148 			if (csum8)
12149 				goto out;
12150 		}
12151 	}
12152 
12153 	err = 0;
12154 
12155 out:
12156 	kfree(buf);
12157 	return err;
12158 }
12159 
12160 #define TG3_SERDES_TIMEOUT_SEC	2
12161 #define TG3_COPPER_TIMEOUT_SEC	6
12162 
12163 static int tg3_test_link(struct tg3 *tp)
12164 {
12165 	int i, max;
12166 
12167 	if (!netif_running(tp->dev))
12168 		return -ENODEV;
12169 
12170 	if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
12171 		max = TG3_SERDES_TIMEOUT_SEC;
12172 	else
12173 		max = TG3_COPPER_TIMEOUT_SEC;
12174 
12175 	for (i = 0; i < max; i++) {
12176 		if (tp->link_up)
12177 			return 0;
12178 
12179 		if (msleep_interruptible(1000))
12180 			break;
12181 	}
12182 
12183 	return -EIO;
12184 }
12185 
12186 /* Only test the commonly used registers */
12187 static int tg3_test_registers(struct tg3 *tp)
12188 {
12189 	int i, is_5705, is_5750;
12190 	u32 offset, read_mask, write_mask, val, save_val, read_val;
12191 	static struct {
12192 		u16 offset;
12193 		u16 flags;
12194 #define TG3_FL_5705	0x1
12195 #define TG3_FL_NOT_5705	0x2
12196 #define TG3_FL_NOT_5788	0x4
12197 #define TG3_FL_NOT_5750	0x8
12198 		u32 read_mask;
12199 		u32 write_mask;
12200 	} reg_tbl[] = {
12201 		/* MAC Control Registers */
12202 		{ MAC_MODE, TG3_FL_NOT_5705,
12203 			0x00000000, 0x00ef6f8c },
12204 		{ MAC_MODE, TG3_FL_5705,
12205 			0x00000000, 0x01ef6b8c },
12206 		{ MAC_STATUS, TG3_FL_NOT_5705,
12207 			0x03800107, 0x00000000 },
12208 		{ MAC_STATUS, TG3_FL_5705,
12209 			0x03800100, 0x00000000 },
12210 		{ MAC_ADDR_0_HIGH, 0x0000,
12211 			0x00000000, 0x0000ffff },
12212 		{ MAC_ADDR_0_LOW, 0x0000,
12213 			0x00000000, 0xffffffff },
12214 		{ MAC_RX_MTU_SIZE, 0x0000,
12215 			0x00000000, 0x0000ffff },
12216 		{ MAC_TX_MODE, 0x0000,
12217 			0x00000000, 0x00000070 },
12218 		{ MAC_TX_LENGTHS, 0x0000,
12219 			0x00000000, 0x00003fff },
12220 		{ MAC_RX_MODE, TG3_FL_NOT_5705,
12221 			0x00000000, 0x000007fc },
12222 		{ MAC_RX_MODE, TG3_FL_5705,
12223 			0x00000000, 0x000007dc },
12224 		{ MAC_HASH_REG_0, 0x0000,
12225 			0x00000000, 0xffffffff },
12226 		{ MAC_HASH_REG_1, 0x0000,
12227 			0x00000000, 0xffffffff },
12228 		{ MAC_HASH_REG_2, 0x0000,
12229 			0x00000000, 0xffffffff },
12230 		{ MAC_HASH_REG_3, 0x0000,
12231 			0x00000000, 0xffffffff },
12232 
12233 		/* Receive Data and Receive BD Initiator Control Registers. */
12234 		{ RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
12235 			0x00000000, 0xffffffff },
12236 		{ RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
12237 			0x00000000, 0xffffffff },
12238 		{ RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
12239 			0x00000000, 0x00000003 },
12240 		{ RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
12241 			0x00000000, 0xffffffff },
12242 		{ RCVDBDI_STD_BD+0, 0x0000,
12243 			0x00000000, 0xffffffff },
12244 		{ RCVDBDI_STD_BD+4, 0x0000,
12245 			0x00000000, 0xffffffff },
12246 		{ RCVDBDI_STD_BD+8, 0x0000,
12247 			0x00000000, 0xffff0002 },
12248 		{ RCVDBDI_STD_BD+0xc, 0x0000,
12249 			0x00000000, 0xffffffff },
12250 
12251 		/* Receive BD Initiator Control Registers. */
12252 		{ RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
12253 			0x00000000, 0xffffffff },
12254 		{ RCVBDI_STD_THRESH, TG3_FL_5705,
12255 			0x00000000, 0x000003ff },
12256 		{ RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
12257 			0x00000000, 0xffffffff },
12258 
12259 		/* Host Coalescing Control Registers. */
12260 		{ HOSTCC_MODE, TG3_FL_NOT_5705,
12261 			0x00000000, 0x00000004 },
12262 		{ HOSTCC_MODE, TG3_FL_5705,
12263 			0x00000000, 0x000000f6 },
12264 		{ HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
12265 			0x00000000, 0xffffffff },
12266 		{ HOSTCC_RXCOL_TICKS, TG3_FL_5705,
12267 			0x00000000, 0x000003ff },
12268 		{ HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
12269 			0x00000000, 0xffffffff },
12270 		{ HOSTCC_TXCOL_TICKS, TG3_FL_5705,
12271 			0x00000000, 0x000003ff },
12272 		{ HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
12273 			0x00000000, 0xffffffff },
12274 		{ HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
12275 			0x00000000, 0x000000ff },
12276 		{ HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
12277 			0x00000000, 0xffffffff },
12278 		{ HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
12279 			0x00000000, 0x000000ff },
12280 		{ HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
12281 			0x00000000, 0xffffffff },
12282 		{ HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
12283 			0x00000000, 0xffffffff },
12284 		{ HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
12285 			0x00000000, 0xffffffff },
12286 		{ HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
12287 			0x00000000, 0x000000ff },
12288 		{ HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
12289 			0x00000000, 0xffffffff },
12290 		{ HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
12291 			0x00000000, 0x000000ff },
12292 		{ HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
12293 			0x00000000, 0xffffffff },
12294 		{ HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
12295 			0x00000000, 0xffffffff },
12296 		{ HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
12297 			0x00000000, 0xffffffff },
12298 		{ HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
12299 			0x00000000, 0xffffffff },
12300 		{ HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
12301 			0x00000000, 0xffffffff },
12302 		{ HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
12303 			0xffffffff, 0x00000000 },
12304 		{ HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
12305 			0xffffffff, 0x00000000 },
12306 
12307 		/* Buffer Manager Control Registers. */
12308 		{ BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
12309 			0x00000000, 0x007fff80 },
12310 		{ BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
12311 			0x00000000, 0x007fffff },
12312 		{ BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
12313 			0x00000000, 0x0000003f },
12314 		{ BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
12315 			0x00000000, 0x000001ff },
12316 		{ BUFMGR_MB_HIGH_WATER, 0x0000,
12317 			0x00000000, 0x000001ff },
12318 		{ BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
12319 			0xffffffff, 0x00000000 },
12320 		{ BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
12321 			0xffffffff, 0x00000000 },
12322 
12323 		/* Mailbox Registers */
12324 		{ GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
12325 			0x00000000, 0x000001ff },
12326 		{ GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
12327 			0x00000000, 0x000001ff },
12328 		{ GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
12329 			0x00000000, 0x000007ff },
12330 		{ GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
12331 			0x00000000, 0x000001ff },
12332 
12333 		{ 0xffff, 0x0000, 0x00000000, 0x00000000 },
12334 	};
12335 
12336 	is_5705 = is_5750 = 0;
12337 	if (tg3_flag(tp, 5705_PLUS)) {
12338 		is_5705 = 1;
12339 		if (tg3_flag(tp, 5750_PLUS))
12340 			is_5750 = 1;
12341 	}
12342 
12343 	for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
12344 		if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
12345 			continue;
12346 
12347 		if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
12348 			continue;
12349 
12350 		if (tg3_flag(tp, IS_5788) &&
12351 		    (reg_tbl[i].flags & TG3_FL_NOT_5788))
12352 			continue;
12353 
12354 		if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
12355 			continue;
12356 
12357 		offset = (u32) reg_tbl[i].offset;
12358 		read_mask = reg_tbl[i].read_mask;
12359 		write_mask = reg_tbl[i].write_mask;
12360 
12361 		/* Save the original register content */
12362 		save_val = tr32(offset);
12363 
12364 		/* Determine the read-only value. */
12365 		read_val = save_val & read_mask;
12366 
12367 		/* Write zero to the register, then make sure the read-only bits
12368 		 * are not changed and the read/write bits are all zeros.
12369 		 */
12370 		tw32(offset, 0);
12371 
12372 		val = tr32(offset);
12373 
12374 		/* Test the read-only and read/write bits. */
12375 		if (((val & read_mask) != read_val) || (val & write_mask))
12376 			goto out;
12377 
12378 		/* Write ones to all the bits defined by RdMask and WrMask, then
12379 		 * make sure the read-only bits are not changed and the
12380 		 * read/write bits are all ones.
12381 		 */
12382 		tw32(offset, read_mask | write_mask);
12383 
12384 		val = tr32(offset);
12385 
12386 		/* Test the read-only bits. */
12387 		if ((val & read_mask) != read_val)
12388 			goto out;
12389 
12390 		/* Test the read/write bits. */
12391 		if ((val & write_mask) != write_mask)
12392 			goto out;
12393 
12394 		tw32(offset, save_val);
12395 	}
12396 
12397 	return 0;
12398 
12399 out:
12400 	if (netif_msg_hw(tp))
12401 		netdev_err(tp->dev,
12402 			   "Register test failed at offset %x\n", offset);
12403 	tw32(offset, save_val);
12404 	return -EIO;
12405 }
12406 
12407 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
12408 {
12409 	static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
12410 	int i;
12411 	u32 j;
12412 
12413 	for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
12414 		for (j = 0; j < len; j += 4) {
12415 			u32 val;
12416 
12417 			tg3_write_mem(tp, offset + j, test_pattern[i]);
12418 			tg3_read_mem(tp, offset + j, &val);
12419 			if (val != test_pattern[i])
12420 				return -EIO;
12421 		}
12422 	}
12423 	return 0;
12424 }
12425 
12426 static int tg3_test_memory(struct tg3 *tp)
12427 {
12428 	static struct mem_entry {
12429 		u32 offset;
12430 		u32 len;
12431 	} mem_tbl_570x[] = {
12432 		{ 0x00000000, 0x00b50},
12433 		{ 0x00002000, 0x1c000},
12434 		{ 0xffffffff, 0x00000}
12435 	}, mem_tbl_5705[] = {
12436 		{ 0x00000100, 0x0000c},
12437 		{ 0x00000200, 0x00008},
12438 		{ 0x00004000, 0x00800},
12439 		{ 0x00006000, 0x01000},
12440 		{ 0x00008000, 0x02000},
12441 		{ 0x00010000, 0x0e000},
12442 		{ 0xffffffff, 0x00000}
12443 	}, mem_tbl_5755[] = {
12444 		{ 0x00000200, 0x00008},
12445 		{ 0x00004000, 0x00800},
12446 		{ 0x00006000, 0x00800},
12447 		{ 0x00008000, 0x02000},
12448 		{ 0x00010000, 0x0c000},
12449 		{ 0xffffffff, 0x00000}
12450 	}, mem_tbl_5906[] = {
12451 		{ 0x00000200, 0x00008},
12452 		{ 0x00004000, 0x00400},
12453 		{ 0x00006000, 0x00400},
12454 		{ 0x00008000, 0x01000},
12455 		{ 0x00010000, 0x01000},
12456 		{ 0xffffffff, 0x00000}
12457 	}, mem_tbl_5717[] = {
12458 		{ 0x00000200, 0x00008},
12459 		{ 0x00010000, 0x0a000},
12460 		{ 0x00020000, 0x13c00},
12461 		{ 0xffffffff, 0x00000}
12462 	}, mem_tbl_57765[] = {
12463 		{ 0x00000200, 0x00008},
12464 		{ 0x00004000, 0x00800},
12465 		{ 0x00006000, 0x09800},
12466 		{ 0x00010000, 0x0a000},
12467 		{ 0xffffffff, 0x00000}
12468 	};
12469 	struct mem_entry *mem_tbl;
12470 	int err = 0;
12471 	int i;
12472 
12473 	if (tg3_flag(tp, 5717_PLUS))
12474 		mem_tbl = mem_tbl_5717;
12475 	else if (tg3_flag(tp, 57765_CLASS) ||
12476 		 tg3_asic_rev(tp) == ASIC_REV_5762)
12477 		mem_tbl = mem_tbl_57765;
12478 	else if (tg3_flag(tp, 5755_PLUS))
12479 		mem_tbl = mem_tbl_5755;
12480 	else if (tg3_asic_rev(tp) == ASIC_REV_5906)
12481 		mem_tbl = mem_tbl_5906;
12482 	else if (tg3_flag(tp, 5705_PLUS))
12483 		mem_tbl = mem_tbl_5705;
12484 	else
12485 		mem_tbl = mem_tbl_570x;
12486 
12487 	for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
12488 		err = tg3_do_mem_test(tp, mem_tbl[i].offset, mem_tbl[i].len);
12489 		if (err)
12490 			break;
12491 	}
12492 
12493 	return err;
12494 }
12495 
12496 #define TG3_TSO_MSS		500
12497 
12498 #define TG3_TSO_IP_HDR_LEN	20
12499 #define TG3_TSO_TCP_HDR_LEN	20
12500 #define TG3_TSO_TCP_OPT_LEN	12
12501 
12502 static const u8 tg3_tso_header[] = {
12503 0x08, 0x00,
12504 0x45, 0x00, 0x00, 0x00,
12505 0x00, 0x00, 0x40, 0x00,
12506 0x40, 0x06, 0x00, 0x00,
12507 0x0a, 0x00, 0x00, 0x01,
12508 0x0a, 0x00, 0x00, 0x02,
12509 0x0d, 0x00, 0xe0, 0x00,
12510 0x00, 0x00, 0x01, 0x00,
12511 0x00, 0x00, 0x02, 0x00,
12512 0x80, 0x10, 0x10, 0x00,
12513 0x14, 0x09, 0x00, 0x00,
12514 0x01, 0x01, 0x08, 0x0a,
12515 0x11, 0x11, 0x11, 0x11,
12516 0x11, 0x11, 0x11, 0x11,
12517 };
12518 
12519 static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, bool tso_loopback)
12520 {
12521 	u32 rx_start_idx, rx_idx, tx_idx, opaque_key;
12522 	u32 base_flags = 0, mss = 0, desc_idx, coal_now, data_off, val;
12523 	u32 budget;
12524 	struct sk_buff *skb;
12525 	u8 *tx_data, *rx_data;
12526 	dma_addr_t map;
12527 	int num_pkts, tx_len, rx_len, i, err;
12528 	struct tg3_rx_buffer_desc *desc;
12529 	struct tg3_napi *tnapi, *rnapi;
12530 	struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
12531 
12532 	tnapi = &tp->napi[0];
12533 	rnapi = &tp->napi[0];
12534 	if (tp->irq_cnt > 1) {
12535 		if (tg3_flag(tp, ENABLE_RSS))
12536 			rnapi = &tp->napi[1];
12537 		if (tg3_flag(tp, ENABLE_TSS))
12538 			tnapi = &tp->napi[1];
12539 	}
12540 	coal_now = tnapi->coal_now | rnapi->coal_now;
12541 
12542 	err = -EIO;
12543 
12544 	tx_len = pktsz;
12545 	skb = netdev_alloc_skb(tp->dev, tx_len);
12546 	if (!skb)
12547 		return -ENOMEM;
12548 
12549 	tx_data = skb_put(skb, tx_len);
12550 	memcpy(tx_data, tp->dev->dev_addr, 6);
12551 	memset(tx_data + 6, 0x0, 8);
12552 
12553 	tw32(MAC_RX_MTU_SIZE, tx_len + ETH_FCS_LEN);
12554 
12555 	if (tso_loopback) {
12556 		struct iphdr *iph = (struct iphdr *)&tx_data[ETH_HLEN];
12557 
12558 		u32 hdr_len = TG3_TSO_IP_HDR_LEN + TG3_TSO_TCP_HDR_LEN +
12559 			      TG3_TSO_TCP_OPT_LEN;
12560 
12561 		memcpy(tx_data + ETH_ALEN * 2, tg3_tso_header,
12562 		       sizeof(tg3_tso_header));
12563 		mss = TG3_TSO_MSS;
12564 
12565 		val = tx_len - ETH_ALEN * 2 - sizeof(tg3_tso_header);
12566 		num_pkts = DIV_ROUND_UP(val, TG3_TSO_MSS);
12567 
12568 		/* Set the total length field in the IP header */
12569 		iph->tot_len = htons((u16)(mss + hdr_len));
12570 
12571 		base_flags = (TXD_FLAG_CPU_PRE_DMA |
12572 			      TXD_FLAG_CPU_POST_DMA);
12573 
12574 		if (tg3_flag(tp, HW_TSO_1) ||
12575 		    tg3_flag(tp, HW_TSO_2) ||
12576 		    tg3_flag(tp, HW_TSO_3)) {
12577 			struct tcphdr *th;
12578 			val = ETH_HLEN + TG3_TSO_IP_HDR_LEN;
12579 			th = (struct tcphdr *)&tx_data[val];
12580 			th->check = 0;
12581 		} else
12582 			base_flags |= TXD_FLAG_TCPUDP_CSUM;
12583 
12584 		if (tg3_flag(tp, HW_TSO_3)) {
12585 			mss |= (hdr_len & 0xc) << 12;
12586 			if (hdr_len & 0x10)
12587 				base_flags |= 0x00000010;
12588 			base_flags |= (hdr_len & 0x3e0) << 5;
12589 		} else if (tg3_flag(tp, HW_TSO_2))
12590 			mss |= hdr_len << 9;
12591 		else if (tg3_flag(tp, HW_TSO_1) ||
12592 			 tg3_asic_rev(tp) == ASIC_REV_5705) {
12593 			mss |= (TG3_TSO_TCP_OPT_LEN << 9);
12594 		} else {
12595 			base_flags |= (TG3_TSO_TCP_OPT_LEN << 10);
12596 		}
12597 
12598 		data_off = ETH_ALEN * 2 + sizeof(tg3_tso_header);
12599 	} else {
12600 		num_pkts = 1;
12601 		data_off = ETH_HLEN;
12602 
12603 		if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
12604 		    tx_len > VLAN_ETH_FRAME_LEN)
12605 			base_flags |= TXD_FLAG_JMB_PKT;
12606 	}
12607 
12608 	for (i = data_off; i < tx_len; i++)
12609 		tx_data[i] = (u8) (i & 0xff);
12610 
12611 	map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
12612 	if (pci_dma_mapping_error(tp->pdev, map)) {
12613 		dev_kfree_skb(skb);
12614 		return -EIO;
12615 	}
12616 
12617 	val = tnapi->tx_prod;
12618 	tnapi->tx_buffers[val].skb = skb;
12619 	dma_unmap_addr_set(&tnapi->tx_buffers[val], mapping, map);
12620 
12621 	tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
12622 	       rnapi->coal_now);
12623 
12624 	udelay(10);
12625 
12626 	rx_start_idx = rnapi->hw_status->idx[0].rx_producer;
12627 
12628 	budget = tg3_tx_avail(tnapi);
12629 	if (tg3_tx_frag_set(tnapi, &val, &budget, map, tx_len,
12630 			    base_flags | TXD_FLAG_END, mss, 0)) {
12631 		tnapi->tx_buffers[val].skb = NULL;
12632 		dev_kfree_skb(skb);
12633 		return -EIO;
12634 	}
12635 
12636 	tnapi->tx_prod++;
12637 
12638 	/* Sync BD data before updating mailbox */
12639 	wmb();
12640 
12641 	tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod);
12642 	tr32_mailbox(tnapi->prodmbox);
12643 
12644 	udelay(10);
12645 
12646 	/* 350 usec to allow enough time on some 10/100 Mbps devices.  */
12647 	for (i = 0; i < 35; i++) {
12648 		tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
12649 		       coal_now);
12650 
12651 		udelay(10);
12652 
12653 		tx_idx = tnapi->hw_status->idx[0].tx_consumer;
12654 		rx_idx = rnapi->hw_status->idx[0].rx_producer;
12655 		if ((tx_idx == tnapi->tx_prod) &&
12656 		    (rx_idx == (rx_start_idx + num_pkts)))
12657 			break;
12658 	}
12659 
12660 	tg3_tx_skb_unmap(tnapi, tnapi->tx_prod - 1, -1);
12661 	dev_kfree_skb(skb);
12662 
12663 	if (tx_idx != tnapi->tx_prod)
12664 		goto out;
12665 
12666 	if (rx_idx != rx_start_idx + num_pkts)
12667 		goto out;
12668 
12669 	val = data_off;
12670 	while (rx_idx != rx_start_idx) {
12671 		desc = &rnapi->rx_rcb[rx_start_idx++];
12672 		desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
12673 		opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
12674 
12675 		if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
12676 		    (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
12677 			goto out;
12678 
12679 		rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT)
12680 			 - ETH_FCS_LEN;
12681 
12682 		if (!tso_loopback) {
12683 			if (rx_len != tx_len)
12684 				goto out;
12685 
12686 			if (pktsz <= TG3_RX_STD_DMA_SZ - ETH_FCS_LEN) {
12687 				if (opaque_key != RXD_OPAQUE_RING_STD)
12688 					goto out;
12689 			} else {
12690 				if (opaque_key != RXD_OPAQUE_RING_JUMBO)
12691 					goto out;
12692 			}
12693 		} else if ((desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
12694 			   (desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
12695 			    >> RXD_TCPCSUM_SHIFT != 0xffff) {
12696 			goto out;
12697 		}
12698 
12699 		if (opaque_key == RXD_OPAQUE_RING_STD) {
12700 			rx_data = tpr->rx_std_buffers[desc_idx].data;
12701 			map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx],
12702 					     mapping);
12703 		} else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
12704 			rx_data = tpr->rx_jmb_buffers[desc_idx].data;
12705 			map = dma_unmap_addr(&tpr->rx_jmb_buffers[desc_idx],
12706 					     mapping);
12707 		} else
12708 			goto out;
12709 
12710 		pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len,
12711 					    PCI_DMA_FROMDEVICE);
12712 
12713 		rx_data += TG3_RX_OFFSET(tp);
12714 		for (i = data_off; i < rx_len; i++, val++) {
12715 			if (*(rx_data + i) != (u8) (val & 0xff))
12716 				goto out;
12717 		}
12718 	}
12719 
12720 	err = 0;
12721 
12722 	/* tg3_free_rings will unmap and free the rx_data */
12723 out:
12724 	return err;
12725 }
12726 
12727 #define TG3_STD_LOOPBACK_FAILED		1
12728 #define TG3_JMB_LOOPBACK_FAILED		2
12729 #define TG3_TSO_LOOPBACK_FAILED		4
12730 #define TG3_LOOPBACK_FAILED \
12731 	(TG3_STD_LOOPBACK_FAILED | \
12732 	 TG3_JMB_LOOPBACK_FAILED | \
12733 	 TG3_TSO_LOOPBACK_FAILED)
12734 
12735 static int tg3_test_loopback(struct tg3 *tp, u64 *data, bool do_extlpbk)
12736 {
12737 	int err = -EIO;
12738 	u32 eee_cap;
12739 	u32 jmb_pkt_sz = 9000;
12740 
12741 	if (tp->dma_limit)
12742 		jmb_pkt_sz = tp->dma_limit - ETH_HLEN;
12743 
12744 	eee_cap = tp->phy_flags & TG3_PHYFLG_EEE_CAP;
12745 	tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
12746 
12747 	if (!netif_running(tp->dev)) {
12748 		data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
12749 		data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
12750 		if (do_extlpbk)
12751 			data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
12752 		goto done;
12753 	}
12754 
12755 	err = tg3_reset_hw(tp, 1);
12756 	if (err) {
12757 		data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
12758 		data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
12759 		if (do_extlpbk)
12760 			data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
12761 		goto done;
12762 	}
12763 
12764 	if (tg3_flag(tp, ENABLE_RSS)) {
12765 		int i;
12766 
12767 		/* Reroute all rx packets to the 1st queue */
12768 		for (i = MAC_RSS_INDIR_TBL_0;
12769 		     i < MAC_RSS_INDIR_TBL_0 + TG3_RSS_INDIR_TBL_SIZE; i += 4)
12770 			tw32(i, 0x0);
12771 	}
12772 
12773 	/* HW errata - mac loopback fails in some cases on 5780.
12774 	 * Normal traffic and PHY loopback are not affected by
12775 	 * errata.  Also, the MAC loopback test is deprecated for
12776 	 * all newer ASIC revisions.
12777 	 */
12778 	if (tg3_asic_rev(tp) != ASIC_REV_5780 &&
12779 	    !tg3_flag(tp, CPMU_PRESENT)) {
12780 		tg3_mac_loopback(tp, true);
12781 
12782 		if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
12783 			data[TG3_MAC_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED;
12784 
12785 		if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
12786 		    tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
12787 			data[TG3_MAC_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED;
12788 
12789 		tg3_mac_loopback(tp, false);
12790 	}
12791 
12792 	if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
12793 	    !tg3_flag(tp, USE_PHYLIB)) {
12794 		int i;
12795 
12796 		tg3_phy_lpbk_set(tp, 0, false);
12797 
12798 		/* Wait for link */
12799 		for (i = 0; i < 100; i++) {
12800 			if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
12801 				break;
12802 			mdelay(1);
12803 		}
12804 
12805 		if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
12806 			data[TG3_PHY_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED;
12807 		if (tg3_flag(tp, TSO_CAPABLE) &&
12808 		    tg3_run_loopback(tp, ETH_FRAME_LEN, true))
12809 			data[TG3_PHY_LOOPB_TEST] |= TG3_TSO_LOOPBACK_FAILED;
12810 		if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
12811 		    tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
12812 			data[TG3_PHY_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED;
12813 
12814 		if (do_extlpbk) {
12815 			tg3_phy_lpbk_set(tp, 0, true);
12816 
12817 			/* All link indications report up, but the hardware
12818 			 * isn't really ready for about 20 msec.  Double it
12819 			 * to be sure.
12820 			 */
12821 			mdelay(40);
12822 
12823 			if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
12824 				data[TG3_EXT_LOOPB_TEST] |=
12825 							TG3_STD_LOOPBACK_FAILED;
12826 			if (tg3_flag(tp, TSO_CAPABLE) &&
12827 			    tg3_run_loopback(tp, ETH_FRAME_LEN, true))
12828 				data[TG3_EXT_LOOPB_TEST] |=
12829 							TG3_TSO_LOOPBACK_FAILED;
12830 			if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
12831 			    tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
12832 				data[TG3_EXT_LOOPB_TEST] |=
12833 							TG3_JMB_LOOPBACK_FAILED;
12834 		}
12835 
12836 		/* Re-enable gphy autopowerdown. */
12837 		if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
12838 			tg3_phy_toggle_apd(tp, true);
12839 	}
12840 
12841 	err = (data[TG3_MAC_LOOPB_TEST] | data[TG3_PHY_LOOPB_TEST] |
12842 	       data[TG3_EXT_LOOPB_TEST]) ? -EIO : 0;
12843 
12844 done:
12845 	tp->phy_flags |= eee_cap;
12846 
12847 	return err;
12848 }
12849 
12850 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
12851 			  u64 *data)
12852 {
12853 	struct tg3 *tp = netdev_priv(dev);
12854 	bool doextlpbk = etest->flags & ETH_TEST_FL_EXTERNAL_LB;
12855 
12856 	if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
12857 	    tg3_power_up(tp)) {
12858 		etest->flags |= ETH_TEST_FL_FAILED;
12859 		memset(data, 1, sizeof(u64) * TG3_NUM_TEST);
12860 		return;
12861 	}
12862 
12863 	memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
12864 
12865 	if (tg3_test_nvram(tp) != 0) {
12866 		etest->flags |= ETH_TEST_FL_FAILED;
12867 		data[TG3_NVRAM_TEST] = 1;
12868 	}
12869 	if (!doextlpbk && tg3_test_link(tp)) {
12870 		etest->flags |= ETH_TEST_FL_FAILED;
12871 		data[TG3_LINK_TEST] = 1;
12872 	}
12873 	if (etest->flags & ETH_TEST_FL_OFFLINE) {
12874 		int err, err2 = 0, irq_sync = 0;
12875 
12876 		if (netif_running(dev)) {
12877 			tg3_phy_stop(tp);
12878 			tg3_netif_stop(tp);
12879 			irq_sync = 1;
12880 		}
12881 
12882 		tg3_full_lock(tp, irq_sync);
12883 		tg3_halt(tp, RESET_KIND_SUSPEND, 1);
12884 		err = tg3_nvram_lock(tp);
12885 		tg3_halt_cpu(tp, RX_CPU_BASE);
12886 		if (!tg3_flag(tp, 5705_PLUS))
12887 			tg3_halt_cpu(tp, TX_CPU_BASE);
12888 		if (!err)
12889 			tg3_nvram_unlock(tp);
12890 
12891 		if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
12892 			tg3_phy_reset(tp);
12893 
12894 		if (tg3_test_registers(tp) != 0) {
12895 			etest->flags |= ETH_TEST_FL_FAILED;
12896 			data[TG3_REGISTER_TEST] = 1;
12897 		}
12898 
12899 		if (tg3_test_memory(tp) != 0) {
12900 			etest->flags |= ETH_TEST_FL_FAILED;
12901 			data[TG3_MEMORY_TEST] = 1;
12902 		}
12903 
12904 		if (doextlpbk)
12905 			etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE;
12906 
12907 		if (tg3_test_loopback(tp, data, doextlpbk))
12908 			etest->flags |= ETH_TEST_FL_FAILED;
12909 
12910 		tg3_full_unlock(tp);
12911 
12912 		if (tg3_test_interrupt(tp) != 0) {
12913 			etest->flags |= ETH_TEST_FL_FAILED;
12914 			data[TG3_INTERRUPT_TEST] = 1;
12915 		}
12916 
12917 		tg3_full_lock(tp, 0);
12918 
12919 		tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12920 		if (netif_running(dev)) {
12921 			tg3_flag_set(tp, INIT_COMPLETE);
12922 			err2 = tg3_restart_hw(tp, 1);
12923 			if (!err2)
12924 				tg3_netif_start(tp);
12925 		}
12926 
12927 		tg3_full_unlock(tp);
12928 
12929 		if (irq_sync && !err2)
12930 			tg3_phy_start(tp);
12931 	}
12932 	if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
12933 		tg3_power_down(tp);
12934 
12935 }
12936 
12937 static int tg3_hwtstamp_ioctl(struct net_device *dev,
12938 			      struct ifreq *ifr, int cmd)
12939 {
12940 	struct tg3 *tp = netdev_priv(dev);
12941 	struct hwtstamp_config stmpconf;
12942 
12943 	if (!tg3_flag(tp, PTP_CAPABLE))
12944 		return -EINVAL;
12945 
12946 	if (copy_from_user(&stmpconf, ifr->ifr_data, sizeof(stmpconf)))
12947 		return -EFAULT;
12948 
12949 	if (stmpconf.flags)
12950 		return -EINVAL;
12951 
12952 	switch (stmpconf.tx_type) {
12953 	case HWTSTAMP_TX_ON:
12954 		tg3_flag_set(tp, TX_TSTAMP_EN);
12955 		break;
12956 	case HWTSTAMP_TX_OFF:
12957 		tg3_flag_clear(tp, TX_TSTAMP_EN);
12958 		break;
12959 	default:
12960 		return -ERANGE;
12961 	}
12962 
12963 	switch (stmpconf.rx_filter) {
12964 	case HWTSTAMP_FILTER_NONE:
12965 		tp->rxptpctl = 0;
12966 		break;
12967 	case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
12968 		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
12969 			       TG3_RX_PTP_CTL_ALL_V1_EVENTS;
12970 		break;
12971 	case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
12972 		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
12973 			       TG3_RX_PTP_CTL_SYNC_EVNT;
12974 		break;
12975 	case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
12976 		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
12977 			       TG3_RX_PTP_CTL_DELAY_REQ;
12978 		break;
12979 	case HWTSTAMP_FILTER_PTP_V2_EVENT:
12980 		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
12981 			       TG3_RX_PTP_CTL_ALL_V2_EVENTS;
12982 		break;
12983 	case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
12984 		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
12985 			       TG3_RX_PTP_CTL_ALL_V2_EVENTS;
12986 		break;
12987 	case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
12988 		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
12989 			       TG3_RX_PTP_CTL_ALL_V2_EVENTS;
12990 		break;
12991 	case HWTSTAMP_FILTER_PTP_V2_SYNC:
12992 		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
12993 			       TG3_RX_PTP_CTL_SYNC_EVNT;
12994 		break;
12995 	case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
12996 		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
12997 			       TG3_RX_PTP_CTL_SYNC_EVNT;
12998 		break;
12999 	case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
13000 		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13001 			       TG3_RX_PTP_CTL_SYNC_EVNT;
13002 		break;
13003 	case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
13004 		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13005 			       TG3_RX_PTP_CTL_DELAY_REQ;
13006 		break;
13007 	case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
13008 		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13009 			       TG3_RX_PTP_CTL_DELAY_REQ;
13010 		break;
13011 	case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
13012 		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13013 			       TG3_RX_PTP_CTL_DELAY_REQ;
13014 		break;
13015 	default:
13016 		return -ERANGE;
13017 	}
13018 
13019 	if (netif_running(dev) && tp->rxptpctl)
13020 		tw32(TG3_RX_PTP_CTL,
13021 		     tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK);
13022 
13023 	return copy_to_user(ifr->ifr_data, &stmpconf, sizeof(stmpconf)) ?
13024 		-EFAULT : 0;
13025 }
13026 
13027 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
13028 {
13029 	struct mii_ioctl_data *data = if_mii(ifr);
13030 	struct tg3 *tp = netdev_priv(dev);
13031 	int err;
13032 
13033 	if (tg3_flag(tp, USE_PHYLIB)) {
13034 		struct phy_device *phydev;
13035 		if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
13036 			return -EAGAIN;
13037 		phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
13038 		return phy_mii_ioctl(phydev, ifr, cmd);
13039 	}
13040 
13041 	switch (cmd) {
13042 	case SIOCGMIIPHY:
13043 		data->phy_id = tp->phy_addr;
13044 
13045 		/* fallthru */
13046 	case SIOCGMIIREG: {
13047 		u32 mii_regval;
13048 
13049 		if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
13050 			break;			/* We have no PHY */
13051 
13052 		if (!netif_running(dev))
13053 			return -EAGAIN;
13054 
13055 		spin_lock_bh(&tp->lock);
13056 		err = __tg3_readphy(tp, data->phy_id & 0x1f,
13057 				    data->reg_num & 0x1f, &mii_regval);
13058 		spin_unlock_bh(&tp->lock);
13059 
13060 		data->val_out = mii_regval;
13061 
13062 		return err;
13063 	}
13064 
13065 	case SIOCSMIIREG:
13066 		if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
13067 			break;			/* We have no PHY */
13068 
13069 		if (!netif_running(dev))
13070 			return -EAGAIN;
13071 
13072 		spin_lock_bh(&tp->lock);
13073 		err = __tg3_writephy(tp, data->phy_id & 0x1f,
13074 				     data->reg_num & 0x1f, data->val_in);
13075 		spin_unlock_bh(&tp->lock);
13076 
13077 		return err;
13078 
13079 	case SIOCSHWTSTAMP:
13080 		return tg3_hwtstamp_ioctl(dev, ifr, cmd);
13081 
13082 	default:
13083 		/* do nothing */
13084 		break;
13085 	}
13086 	return -EOPNOTSUPP;
13087 }
13088 
13089 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
13090 {
13091 	struct tg3 *tp = netdev_priv(dev);
13092 
13093 	memcpy(ec, &tp->coal, sizeof(*ec));
13094 	return 0;
13095 }
13096 
13097 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
13098 {
13099 	struct tg3 *tp = netdev_priv(dev);
13100 	u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
13101 	u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
13102 
13103 	if (!tg3_flag(tp, 5705_PLUS)) {
13104 		max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
13105 		max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
13106 		max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
13107 		min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
13108 	}
13109 
13110 	if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
13111 	    (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
13112 	    (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
13113 	    (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
13114 	    (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
13115 	    (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
13116 	    (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
13117 	    (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
13118 	    (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
13119 	    (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
13120 		return -EINVAL;
13121 
13122 	/* No rx interrupts will be generated if both are zero */
13123 	if ((ec->rx_coalesce_usecs == 0) &&
13124 	    (ec->rx_max_coalesced_frames == 0))
13125 		return -EINVAL;
13126 
13127 	/* No tx interrupts will be generated if both are zero */
13128 	if ((ec->tx_coalesce_usecs == 0) &&
13129 	    (ec->tx_max_coalesced_frames == 0))
13130 		return -EINVAL;
13131 
13132 	/* Only copy relevant parameters, ignore all others. */
13133 	tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
13134 	tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
13135 	tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
13136 	tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
13137 	tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
13138 	tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
13139 	tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
13140 	tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
13141 	tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
13142 
13143 	if (netif_running(dev)) {
13144 		tg3_full_lock(tp, 0);
13145 		__tg3_set_coalesce(tp, &tp->coal);
13146 		tg3_full_unlock(tp);
13147 	}
13148 	return 0;
13149 }
13150 
13151 static const struct ethtool_ops tg3_ethtool_ops = {
13152 	.get_settings		= tg3_get_settings,
13153 	.set_settings		= tg3_set_settings,
13154 	.get_drvinfo		= tg3_get_drvinfo,
13155 	.get_regs_len		= tg3_get_regs_len,
13156 	.get_regs		= tg3_get_regs,
13157 	.get_wol		= tg3_get_wol,
13158 	.set_wol		= tg3_set_wol,
13159 	.get_msglevel		= tg3_get_msglevel,
13160 	.set_msglevel		= tg3_set_msglevel,
13161 	.nway_reset		= tg3_nway_reset,
13162 	.get_link		= ethtool_op_get_link,
13163 	.get_eeprom_len		= tg3_get_eeprom_len,
13164 	.get_eeprom		= tg3_get_eeprom,
13165 	.set_eeprom		= tg3_set_eeprom,
13166 	.get_ringparam		= tg3_get_ringparam,
13167 	.set_ringparam		= tg3_set_ringparam,
13168 	.get_pauseparam		= tg3_get_pauseparam,
13169 	.set_pauseparam		= tg3_set_pauseparam,
13170 	.self_test		= tg3_self_test,
13171 	.get_strings		= tg3_get_strings,
13172 	.set_phys_id		= tg3_set_phys_id,
13173 	.get_ethtool_stats	= tg3_get_ethtool_stats,
13174 	.get_coalesce		= tg3_get_coalesce,
13175 	.set_coalesce		= tg3_set_coalesce,
13176 	.get_sset_count		= tg3_get_sset_count,
13177 	.get_rxnfc		= tg3_get_rxnfc,
13178 	.get_rxfh_indir_size    = tg3_get_rxfh_indir_size,
13179 	.get_rxfh_indir		= tg3_get_rxfh_indir,
13180 	.set_rxfh_indir		= tg3_set_rxfh_indir,
13181 	.get_channels		= tg3_get_channels,
13182 	.set_channels		= tg3_set_channels,
13183 	.get_ts_info		= tg3_get_ts_info,
13184 };
13185 
13186 static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *dev,
13187 						struct rtnl_link_stats64 *stats)
13188 {
13189 	struct tg3 *tp = netdev_priv(dev);
13190 
13191 	spin_lock_bh(&tp->lock);
13192 	if (!tp->hw_stats) {
13193 		spin_unlock_bh(&tp->lock);
13194 		return &tp->net_stats_prev;
13195 	}
13196 
13197 	tg3_get_nstats(tp, stats);
13198 	spin_unlock_bh(&tp->lock);
13199 
13200 	return stats;
13201 }
13202 
13203 static void tg3_set_rx_mode(struct net_device *dev)
13204 {
13205 	struct tg3 *tp = netdev_priv(dev);
13206 
13207 	if (!netif_running(dev))
13208 		return;
13209 
13210 	tg3_full_lock(tp, 0);
13211 	__tg3_set_rx_mode(dev);
13212 	tg3_full_unlock(tp);
13213 }
13214 
13215 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
13216 			       int new_mtu)
13217 {
13218 	dev->mtu = new_mtu;
13219 
13220 	if (new_mtu > ETH_DATA_LEN) {
13221 		if (tg3_flag(tp, 5780_CLASS)) {
13222 			netdev_update_features(dev);
13223 			tg3_flag_clear(tp, TSO_CAPABLE);
13224 		} else {
13225 			tg3_flag_set(tp, JUMBO_RING_ENABLE);
13226 		}
13227 	} else {
13228 		if (tg3_flag(tp, 5780_CLASS)) {
13229 			tg3_flag_set(tp, TSO_CAPABLE);
13230 			netdev_update_features(dev);
13231 		}
13232 		tg3_flag_clear(tp, JUMBO_RING_ENABLE);
13233 	}
13234 }
13235 
13236 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
13237 {
13238 	struct tg3 *tp = netdev_priv(dev);
13239 	int err, reset_phy = 0;
13240 
13241 	if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
13242 		return -EINVAL;
13243 
13244 	if (!netif_running(dev)) {
13245 		/* We'll just catch it later when the
13246 		 * device is up'd.
13247 		 */
13248 		tg3_set_mtu(dev, tp, new_mtu);
13249 		return 0;
13250 	}
13251 
13252 	tg3_phy_stop(tp);
13253 
13254 	tg3_netif_stop(tp);
13255 
13256 	tg3_full_lock(tp, 1);
13257 
13258 	tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
13259 
13260 	tg3_set_mtu(dev, tp, new_mtu);
13261 
13262 	/* Reset PHY, otherwise the read DMA engine will be in a mode that
13263 	 * breaks all requests to 256 bytes.
13264 	 */
13265 	if (tg3_asic_rev(tp) == ASIC_REV_57766)
13266 		reset_phy = 1;
13267 
13268 	err = tg3_restart_hw(tp, reset_phy);
13269 
13270 	if (!err)
13271 		tg3_netif_start(tp);
13272 
13273 	tg3_full_unlock(tp);
13274 
13275 	if (!err)
13276 		tg3_phy_start(tp);
13277 
13278 	return err;
13279 }
13280 
13281 static const struct net_device_ops tg3_netdev_ops = {
13282 	.ndo_open		= tg3_open,
13283 	.ndo_stop		= tg3_close,
13284 	.ndo_start_xmit		= tg3_start_xmit,
13285 	.ndo_get_stats64	= tg3_get_stats64,
13286 	.ndo_validate_addr	= eth_validate_addr,
13287 	.ndo_set_rx_mode	= tg3_set_rx_mode,
13288 	.ndo_set_mac_address	= tg3_set_mac_addr,
13289 	.ndo_do_ioctl		= tg3_ioctl,
13290 	.ndo_tx_timeout		= tg3_tx_timeout,
13291 	.ndo_change_mtu		= tg3_change_mtu,
13292 	.ndo_fix_features	= tg3_fix_features,
13293 	.ndo_set_features	= tg3_set_features,
13294 #ifdef CONFIG_NET_POLL_CONTROLLER
13295 	.ndo_poll_controller	= tg3_poll_controller,
13296 #endif
13297 };
13298 
13299 static void tg3_get_eeprom_size(struct tg3 *tp)
13300 {
13301 	u32 cursize, val, magic;
13302 
13303 	tp->nvram_size = EEPROM_CHIP_SIZE;
13304 
13305 	if (tg3_nvram_read(tp, 0, &magic) != 0)
13306 		return;
13307 
13308 	if ((magic != TG3_EEPROM_MAGIC) &&
13309 	    ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
13310 	    ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
13311 		return;
13312 
13313 	/*
13314 	 * Size the chip by reading offsets at increasing powers of two.
13315 	 * When we encounter our validation signature, we know the addressing
13316 	 * has wrapped around, and thus have our chip size.
13317 	 */
13318 	cursize = 0x10;
13319 
13320 	while (cursize < tp->nvram_size) {
13321 		if (tg3_nvram_read(tp, cursize, &val) != 0)
13322 			return;
13323 
13324 		if (val == magic)
13325 			break;
13326 
13327 		cursize <<= 1;
13328 	}
13329 
13330 	tp->nvram_size = cursize;
13331 }
13332 
13333 static void tg3_get_nvram_size(struct tg3 *tp)
13334 {
13335 	u32 val;
13336 
13337 	if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &val) != 0)
13338 		return;
13339 
13340 	/* Selfboot format */
13341 	if (val != TG3_EEPROM_MAGIC) {
13342 		tg3_get_eeprom_size(tp);
13343 		return;
13344 	}
13345 
13346 	if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
13347 		if (val != 0) {
13348 			/* This is confusing.  We want to operate on the
13349 			 * 16-bit value at offset 0xf2.  The tg3_nvram_read()
13350 			 * call will read from NVRAM and byteswap the data
13351 			 * according to the byteswapping settings for all
13352 			 * other register accesses.  This ensures the data we
13353 			 * want will always reside in the lower 16-bits.
13354 			 * However, the data in NVRAM is in LE format, which
13355 			 * means the data from the NVRAM read will always be
13356 			 * opposite the endianness of the CPU.  The 16-bit
13357 			 * byteswap then brings the data to CPU endianness.
13358 			 */
13359 			tp->nvram_size = swab16((u16)(val & 0x0000ffff)) * 1024;
13360 			return;
13361 		}
13362 	}
13363 	tp->nvram_size = TG3_NVRAM_SIZE_512KB;
13364 }
13365 
13366 static void tg3_get_nvram_info(struct tg3 *tp)
13367 {
13368 	u32 nvcfg1;
13369 
13370 	nvcfg1 = tr32(NVRAM_CFG1);
13371 	if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
13372 		tg3_flag_set(tp, FLASH);
13373 	} else {
13374 		nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
13375 		tw32(NVRAM_CFG1, nvcfg1);
13376 	}
13377 
13378 	if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
13379 	    tg3_flag(tp, 5780_CLASS)) {
13380 		switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
13381 		case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
13382 			tp->nvram_jedecnum = JEDEC_ATMEL;
13383 			tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
13384 			tg3_flag_set(tp, NVRAM_BUFFERED);
13385 			break;
13386 		case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
13387 			tp->nvram_jedecnum = JEDEC_ATMEL;
13388 			tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
13389 			break;
13390 		case FLASH_VENDOR_ATMEL_EEPROM:
13391 			tp->nvram_jedecnum = JEDEC_ATMEL;
13392 			tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13393 			tg3_flag_set(tp, NVRAM_BUFFERED);
13394 			break;
13395 		case FLASH_VENDOR_ST:
13396 			tp->nvram_jedecnum = JEDEC_ST;
13397 			tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
13398 			tg3_flag_set(tp, NVRAM_BUFFERED);
13399 			break;
13400 		case FLASH_VENDOR_SAIFUN:
13401 			tp->nvram_jedecnum = JEDEC_SAIFUN;
13402 			tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
13403 			break;
13404 		case FLASH_VENDOR_SST_SMALL:
13405 		case FLASH_VENDOR_SST_LARGE:
13406 			tp->nvram_jedecnum = JEDEC_SST;
13407 			tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
13408 			break;
13409 		}
13410 	} else {
13411 		tp->nvram_jedecnum = JEDEC_ATMEL;
13412 		tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
13413 		tg3_flag_set(tp, NVRAM_BUFFERED);
13414 	}
13415 }
13416 
13417 static void tg3_nvram_get_pagesize(struct tg3 *tp, u32 nvmcfg1)
13418 {
13419 	switch (nvmcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
13420 	case FLASH_5752PAGE_SIZE_256:
13421 		tp->nvram_pagesize = 256;
13422 		break;
13423 	case FLASH_5752PAGE_SIZE_512:
13424 		tp->nvram_pagesize = 512;
13425 		break;
13426 	case FLASH_5752PAGE_SIZE_1K:
13427 		tp->nvram_pagesize = 1024;
13428 		break;
13429 	case FLASH_5752PAGE_SIZE_2K:
13430 		tp->nvram_pagesize = 2048;
13431 		break;
13432 	case FLASH_5752PAGE_SIZE_4K:
13433 		tp->nvram_pagesize = 4096;
13434 		break;
13435 	case FLASH_5752PAGE_SIZE_264:
13436 		tp->nvram_pagesize = 264;
13437 		break;
13438 	case FLASH_5752PAGE_SIZE_528:
13439 		tp->nvram_pagesize = 528;
13440 		break;
13441 	}
13442 }
13443 
13444 static void tg3_get_5752_nvram_info(struct tg3 *tp)
13445 {
13446 	u32 nvcfg1;
13447 
13448 	nvcfg1 = tr32(NVRAM_CFG1);
13449 
13450 	/* NVRAM protection for TPM */
13451 	if (nvcfg1 & (1 << 27))
13452 		tg3_flag_set(tp, PROTECTED_NVRAM);
13453 
13454 	switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13455 	case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
13456 	case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
13457 		tp->nvram_jedecnum = JEDEC_ATMEL;
13458 		tg3_flag_set(tp, NVRAM_BUFFERED);
13459 		break;
13460 	case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
13461 		tp->nvram_jedecnum = JEDEC_ATMEL;
13462 		tg3_flag_set(tp, NVRAM_BUFFERED);
13463 		tg3_flag_set(tp, FLASH);
13464 		break;
13465 	case FLASH_5752VENDOR_ST_M45PE10:
13466 	case FLASH_5752VENDOR_ST_M45PE20:
13467 	case FLASH_5752VENDOR_ST_M45PE40:
13468 		tp->nvram_jedecnum = JEDEC_ST;
13469 		tg3_flag_set(tp, NVRAM_BUFFERED);
13470 		tg3_flag_set(tp, FLASH);
13471 		break;
13472 	}
13473 
13474 	if (tg3_flag(tp, FLASH)) {
13475 		tg3_nvram_get_pagesize(tp, nvcfg1);
13476 	} else {
13477 		/* For eeprom, set pagesize to maximum eeprom size */
13478 		tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13479 
13480 		nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
13481 		tw32(NVRAM_CFG1, nvcfg1);
13482 	}
13483 }
13484 
13485 static void tg3_get_5755_nvram_info(struct tg3 *tp)
13486 {
13487 	u32 nvcfg1, protect = 0;
13488 
13489 	nvcfg1 = tr32(NVRAM_CFG1);
13490 
13491 	/* NVRAM protection for TPM */
13492 	if (nvcfg1 & (1 << 27)) {
13493 		tg3_flag_set(tp, PROTECTED_NVRAM);
13494 		protect = 1;
13495 	}
13496 
13497 	nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
13498 	switch (nvcfg1) {
13499 	case FLASH_5755VENDOR_ATMEL_FLASH_1:
13500 	case FLASH_5755VENDOR_ATMEL_FLASH_2:
13501 	case FLASH_5755VENDOR_ATMEL_FLASH_3:
13502 	case FLASH_5755VENDOR_ATMEL_FLASH_5:
13503 		tp->nvram_jedecnum = JEDEC_ATMEL;
13504 		tg3_flag_set(tp, NVRAM_BUFFERED);
13505 		tg3_flag_set(tp, FLASH);
13506 		tp->nvram_pagesize = 264;
13507 		if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
13508 		    nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
13509 			tp->nvram_size = (protect ? 0x3e200 :
13510 					  TG3_NVRAM_SIZE_512KB);
13511 		else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
13512 			tp->nvram_size = (protect ? 0x1f200 :
13513 					  TG3_NVRAM_SIZE_256KB);
13514 		else
13515 			tp->nvram_size = (protect ? 0x1f200 :
13516 					  TG3_NVRAM_SIZE_128KB);
13517 		break;
13518 	case FLASH_5752VENDOR_ST_M45PE10:
13519 	case FLASH_5752VENDOR_ST_M45PE20:
13520 	case FLASH_5752VENDOR_ST_M45PE40:
13521 		tp->nvram_jedecnum = JEDEC_ST;
13522 		tg3_flag_set(tp, NVRAM_BUFFERED);
13523 		tg3_flag_set(tp, FLASH);
13524 		tp->nvram_pagesize = 256;
13525 		if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
13526 			tp->nvram_size = (protect ?
13527 					  TG3_NVRAM_SIZE_64KB :
13528 					  TG3_NVRAM_SIZE_128KB);
13529 		else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
13530 			tp->nvram_size = (protect ?
13531 					  TG3_NVRAM_SIZE_64KB :
13532 					  TG3_NVRAM_SIZE_256KB);
13533 		else
13534 			tp->nvram_size = (protect ?
13535 					  TG3_NVRAM_SIZE_128KB :
13536 					  TG3_NVRAM_SIZE_512KB);
13537 		break;
13538 	}
13539 }
13540 
13541 static void tg3_get_5787_nvram_info(struct tg3 *tp)
13542 {
13543 	u32 nvcfg1;
13544 
13545 	nvcfg1 = tr32(NVRAM_CFG1);
13546 
13547 	switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13548 	case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
13549 	case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
13550 	case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
13551 	case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
13552 		tp->nvram_jedecnum = JEDEC_ATMEL;
13553 		tg3_flag_set(tp, NVRAM_BUFFERED);
13554 		tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13555 
13556 		nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
13557 		tw32(NVRAM_CFG1, nvcfg1);
13558 		break;
13559 	case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
13560 	case FLASH_5755VENDOR_ATMEL_FLASH_1:
13561 	case FLASH_5755VENDOR_ATMEL_FLASH_2:
13562 	case FLASH_5755VENDOR_ATMEL_FLASH_3:
13563 		tp->nvram_jedecnum = JEDEC_ATMEL;
13564 		tg3_flag_set(tp, NVRAM_BUFFERED);
13565 		tg3_flag_set(tp, FLASH);
13566 		tp->nvram_pagesize = 264;
13567 		break;
13568 	case FLASH_5752VENDOR_ST_M45PE10:
13569 	case FLASH_5752VENDOR_ST_M45PE20:
13570 	case FLASH_5752VENDOR_ST_M45PE40:
13571 		tp->nvram_jedecnum = JEDEC_ST;
13572 		tg3_flag_set(tp, NVRAM_BUFFERED);
13573 		tg3_flag_set(tp, FLASH);
13574 		tp->nvram_pagesize = 256;
13575 		break;
13576 	}
13577 }
13578 
13579 static void tg3_get_5761_nvram_info(struct tg3 *tp)
13580 {
13581 	u32 nvcfg1, protect = 0;
13582 
13583 	nvcfg1 = tr32(NVRAM_CFG1);
13584 
13585 	/* NVRAM protection for TPM */
13586 	if (nvcfg1 & (1 << 27)) {
13587 		tg3_flag_set(tp, PROTECTED_NVRAM);
13588 		protect = 1;
13589 	}
13590 
13591 	nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
13592 	switch (nvcfg1) {
13593 	case FLASH_5761VENDOR_ATMEL_ADB021D:
13594 	case FLASH_5761VENDOR_ATMEL_ADB041D:
13595 	case FLASH_5761VENDOR_ATMEL_ADB081D:
13596 	case FLASH_5761VENDOR_ATMEL_ADB161D:
13597 	case FLASH_5761VENDOR_ATMEL_MDB021D:
13598 	case FLASH_5761VENDOR_ATMEL_MDB041D:
13599 	case FLASH_5761VENDOR_ATMEL_MDB081D:
13600 	case FLASH_5761VENDOR_ATMEL_MDB161D:
13601 		tp->nvram_jedecnum = JEDEC_ATMEL;
13602 		tg3_flag_set(tp, NVRAM_BUFFERED);
13603 		tg3_flag_set(tp, FLASH);
13604 		tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
13605 		tp->nvram_pagesize = 256;
13606 		break;
13607 	case FLASH_5761VENDOR_ST_A_M45PE20:
13608 	case FLASH_5761VENDOR_ST_A_M45PE40:
13609 	case FLASH_5761VENDOR_ST_A_M45PE80:
13610 	case FLASH_5761VENDOR_ST_A_M45PE16:
13611 	case FLASH_5761VENDOR_ST_M_M45PE20:
13612 	case FLASH_5761VENDOR_ST_M_M45PE40:
13613 	case FLASH_5761VENDOR_ST_M_M45PE80:
13614 	case FLASH_5761VENDOR_ST_M_M45PE16:
13615 		tp->nvram_jedecnum = JEDEC_ST;
13616 		tg3_flag_set(tp, NVRAM_BUFFERED);
13617 		tg3_flag_set(tp, FLASH);
13618 		tp->nvram_pagesize = 256;
13619 		break;
13620 	}
13621 
13622 	if (protect) {
13623 		tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
13624 	} else {
13625 		switch (nvcfg1) {
13626 		case FLASH_5761VENDOR_ATMEL_ADB161D:
13627 		case FLASH_5761VENDOR_ATMEL_MDB161D:
13628 		case FLASH_5761VENDOR_ST_A_M45PE16:
13629 		case FLASH_5761VENDOR_ST_M_M45PE16:
13630 			tp->nvram_size = TG3_NVRAM_SIZE_2MB;
13631 			break;
13632 		case FLASH_5761VENDOR_ATMEL_ADB081D:
13633 		case FLASH_5761VENDOR_ATMEL_MDB081D:
13634 		case FLASH_5761VENDOR_ST_A_M45PE80:
13635 		case FLASH_5761VENDOR_ST_M_M45PE80:
13636 			tp->nvram_size = TG3_NVRAM_SIZE_1MB;
13637 			break;
13638 		case FLASH_5761VENDOR_ATMEL_ADB041D:
13639 		case FLASH_5761VENDOR_ATMEL_MDB041D:
13640 		case FLASH_5761VENDOR_ST_A_M45PE40:
13641 		case FLASH_5761VENDOR_ST_M_M45PE40:
13642 			tp->nvram_size = TG3_NVRAM_SIZE_512KB;
13643 			break;
13644 		case FLASH_5761VENDOR_ATMEL_ADB021D:
13645 		case FLASH_5761VENDOR_ATMEL_MDB021D:
13646 		case FLASH_5761VENDOR_ST_A_M45PE20:
13647 		case FLASH_5761VENDOR_ST_M_M45PE20:
13648 			tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13649 			break;
13650 		}
13651 	}
13652 }
13653 
13654 static void tg3_get_5906_nvram_info(struct tg3 *tp)
13655 {
13656 	tp->nvram_jedecnum = JEDEC_ATMEL;
13657 	tg3_flag_set(tp, NVRAM_BUFFERED);
13658 	tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13659 }
13660 
13661 static void tg3_get_57780_nvram_info(struct tg3 *tp)
13662 {
13663 	u32 nvcfg1;
13664 
13665 	nvcfg1 = tr32(NVRAM_CFG1);
13666 
13667 	switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13668 	case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
13669 	case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
13670 		tp->nvram_jedecnum = JEDEC_ATMEL;
13671 		tg3_flag_set(tp, NVRAM_BUFFERED);
13672 		tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13673 
13674 		nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
13675 		tw32(NVRAM_CFG1, nvcfg1);
13676 		return;
13677 	case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
13678 	case FLASH_57780VENDOR_ATMEL_AT45DB011D:
13679 	case FLASH_57780VENDOR_ATMEL_AT45DB011B:
13680 	case FLASH_57780VENDOR_ATMEL_AT45DB021D:
13681 	case FLASH_57780VENDOR_ATMEL_AT45DB021B:
13682 	case FLASH_57780VENDOR_ATMEL_AT45DB041D:
13683 	case FLASH_57780VENDOR_ATMEL_AT45DB041B:
13684 		tp->nvram_jedecnum = JEDEC_ATMEL;
13685 		tg3_flag_set(tp, NVRAM_BUFFERED);
13686 		tg3_flag_set(tp, FLASH);
13687 
13688 		switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13689 		case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
13690 		case FLASH_57780VENDOR_ATMEL_AT45DB011D:
13691 		case FLASH_57780VENDOR_ATMEL_AT45DB011B:
13692 			tp->nvram_size = TG3_NVRAM_SIZE_128KB;
13693 			break;
13694 		case FLASH_57780VENDOR_ATMEL_AT45DB021D:
13695 		case FLASH_57780VENDOR_ATMEL_AT45DB021B:
13696 			tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13697 			break;
13698 		case FLASH_57780VENDOR_ATMEL_AT45DB041D:
13699 		case FLASH_57780VENDOR_ATMEL_AT45DB041B:
13700 			tp->nvram_size = TG3_NVRAM_SIZE_512KB;
13701 			break;
13702 		}
13703 		break;
13704 	case FLASH_5752VENDOR_ST_M45PE10:
13705 	case FLASH_5752VENDOR_ST_M45PE20:
13706 	case FLASH_5752VENDOR_ST_M45PE40:
13707 		tp->nvram_jedecnum = JEDEC_ST;
13708 		tg3_flag_set(tp, NVRAM_BUFFERED);
13709 		tg3_flag_set(tp, FLASH);
13710 
13711 		switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13712 		case FLASH_5752VENDOR_ST_M45PE10:
13713 			tp->nvram_size = TG3_NVRAM_SIZE_128KB;
13714 			break;
13715 		case FLASH_5752VENDOR_ST_M45PE20:
13716 			tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13717 			break;
13718 		case FLASH_5752VENDOR_ST_M45PE40:
13719 			tp->nvram_size = TG3_NVRAM_SIZE_512KB;
13720 			break;
13721 		}
13722 		break;
13723 	default:
13724 		tg3_flag_set(tp, NO_NVRAM);
13725 		return;
13726 	}
13727 
13728 	tg3_nvram_get_pagesize(tp, nvcfg1);
13729 	if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
13730 		tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
13731 }
13732 
13733 
13734 static void tg3_get_5717_nvram_info(struct tg3 *tp)
13735 {
13736 	u32 nvcfg1;
13737 
13738 	nvcfg1 = tr32(NVRAM_CFG1);
13739 
13740 	switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13741 	case FLASH_5717VENDOR_ATMEL_EEPROM:
13742 	case FLASH_5717VENDOR_MICRO_EEPROM:
13743 		tp->nvram_jedecnum = JEDEC_ATMEL;
13744 		tg3_flag_set(tp, NVRAM_BUFFERED);
13745 		tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13746 
13747 		nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
13748 		tw32(NVRAM_CFG1, nvcfg1);
13749 		return;
13750 	case FLASH_5717VENDOR_ATMEL_MDB011D:
13751 	case FLASH_5717VENDOR_ATMEL_ADB011B:
13752 	case FLASH_5717VENDOR_ATMEL_ADB011D:
13753 	case FLASH_5717VENDOR_ATMEL_MDB021D:
13754 	case FLASH_5717VENDOR_ATMEL_ADB021B:
13755 	case FLASH_5717VENDOR_ATMEL_ADB021D:
13756 	case FLASH_5717VENDOR_ATMEL_45USPT:
13757 		tp->nvram_jedecnum = JEDEC_ATMEL;
13758 		tg3_flag_set(tp, NVRAM_BUFFERED);
13759 		tg3_flag_set(tp, FLASH);
13760 
13761 		switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13762 		case FLASH_5717VENDOR_ATMEL_MDB021D:
13763 			/* Detect size with tg3_nvram_get_size() */
13764 			break;
13765 		case FLASH_5717VENDOR_ATMEL_ADB021B:
13766 		case FLASH_5717VENDOR_ATMEL_ADB021D:
13767 			tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13768 			break;
13769 		default:
13770 			tp->nvram_size = TG3_NVRAM_SIZE_128KB;
13771 			break;
13772 		}
13773 		break;
13774 	case FLASH_5717VENDOR_ST_M_M25PE10:
13775 	case FLASH_5717VENDOR_ST_A_M25PE10:
13776 	case FLASH_5717VENDOR_ST_M_M45PE10:
13777 	case FLASH_5717VENDOR_ST_A_M45PE10:
13778 	case FLASH_5717VENDOR_ST_M_M25PE20:
13779 	case FLASH_5717VENDOR_ST_A_M25PE20:
13780 	case FLASH_5717VENDOR_ST_M_M45PE20:
13781 	case FLASH_5717VENDOR_ST_A_M45PE20:
13782 	case FLASH_5717VENDOR_ST_25USPT:
13783 	case FLASH_5717VENDOR_ST_45USPT:
13784 		tp->nvram_jedecnum = JEDEC_ST;
13785 		tg3_flag_set(tp, NVRAM_BUFFERED);
13786 		tg3_flag_set(tp, FLASH);
13787 
13788 		switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13789 		case FLASH_5717VENDOR_ST_M_M25PE20:
13790 		case FLASH_5717VENDOR_ST_M_M45PE20:
13791 			/* Detect size with tg3_nvram_get_size() */
13792 			break;
13793 		case FLASH_5717VENDOR_ST_A_M25PE20:
13794 		case FLASH_5717VENDOR_ST_A_M45PE20:
13795 			tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13796 			break;
13797 		default:
13798 			tp->nvram_size = TG3_NVRAM_SIZE_128KB;
13799 			break;
13800 		}
13801 		break;
13802 	default:
13803 		tg3_flag_set(tp, NO_NVRAM);
13804 		return;
13805 	}
13806 
13807 	tg3_nvram_get_pagesize(tp, nvcfg1);
13808 	if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
13809 		tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
13810 }
13811 
13812 static void tg3_get_5720_nvram_info(struct tg3 *tp)
13813 {
13814 	u32 nvcfg1, nvmpinstrp;
13815 
13816 	nvcfg1 = tr32(NVRAM_CFG1);
13817 	nvmpinstrp = nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK;
13818 
13819 	if (tg3_asic_rev(tp) == ASIC_REV_5762) {
13820 		if (!(nvcfg1 & NVRAM_CFG1_5762VENDOR_MASK)) {
13821 			tg3_flag_set(tp, NO_NVRAM);
13822 			return;
13823 		}
13824 
13825 		switch (nvmpinstrp) {
13826 		case FLASH_5762_EEPROM_HD:
13827 			nvmpinstrp = FLASH_5720_EEPROM_HD;
13828 			break;
13829 		case FLASH_5762_EEPROM_LD:
13830 			nvmpinstrp = FLASH_5720_EEPROM_LD;
13831 			break;
13832 		}
13833 	}
13834 
13835 	switch (nvmpinstrp) {
13836 	case FLASH_5720_EEPROM_HD:
13837 	case FLASH_5720_EEPROM_LD:
13838 		tp->nvram_jedecnum = JEDEC_ATMEL;
13839 		tg3_flag_set(tp, NVRAM_BUFFERED);
13840 
13841 		nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
13842 		tw32(NVRAM_CFG1, nvcfg1);
13843 		if (nvmpinstrp == FLASH_5720_EEPROM_HD)
13844 			tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13845 		else
13846 			tp->nvram_pagesize = ATMEL_AT24C02_CHIP_SIZE;
13847 		return;
13848 	case FLASH_5720VENDOR_M_ATMEL_DB011D:
13849 	case FLASH_5720VENDOR_A_ATMEL_DB011B:
13850 	case FLASH_5720VENDOR_A_ATMEL_DB011D:
13851 	case FLASH_5720VENDOR_M_ATMEL_DB021D:
13852 	case FLASH_5720VENDOR_A_ATMEL_DB021B:
13853 	case FLASH_5720VENDOR_A_ATMEL_DB021D:
13854 	case FLASH_5720VENDOR_M_ATMEL_DB041D:
13855 	case FLASH_5720VENDOR_A_ATMEL_DB041B:
13856 	case FLASH_5720VENDOR_A_ATMEL_DB041D:
13857 	case FLASH_5720VENDOR_M_ATMEL_DB081D:
13858 	case FLASH_5720VENDOR_A_ATMEL_DB081D:
13859 	case FLASH_5720VENDOR_ATMEL_45USPT:
13860 		tp->nvram_jedecnum = JEDEC_ATMEL;
13861 		tg3_flag_set(tp, NVRAM_BUFFERED);
13862 		tg3_flag_set(tp, FLASH);
13863 
13864 		switch (nvmpinstrp) {
13865 		case FLASH_5720VENDOR_M_ATMEL_DB021D:
13866 		case FLASH_5720VENDOR_A_ATMEL_DB021B:
13867 		case FLASH_5720VENDOR_A_ATMEL_DB021D:
13868 			tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13869 			break;
13870 		case FLASH_5720VENDOR_M_ATMEL_DB041D:
13871 		case FLASH_5720VENDOR_A_ATMEL_DB041B:
13872 		case FLASH_5720VENDOR_A_ATMEL_DB041D:
13873 			tp->nvram_size = TG3_NVRAM_SIZE_512KB;
13874 			break;
13875 		case FLASH_5720VENDOR_M_ATMEL_DB081D:
13876 		case FLASH_5720VENDOR_A_ATMEL_DB081D:
13877 			tp->nvram_size = TG3_NVRAM_SIZE_1MB;
13878 			break;
13879 		default:
13880 			if (tg3_asic_rev(tp) != ASIC_REV_5762)
13881 				tp->nvram_size = TG3_NVRAM_SIZE_128KB;
13882 			break;
13883 		}
13884 		break;
13885 	case FLASH_5720VENDOR_M_ST_M25PE10:
13886 	case FLASH_5720VENDOR_M_ST_M45PE10:
13887 	case FLASH_5720VENDOR_A_ST_M25PE10:
13888 	case FLASH_5720VENDOR_A_ST_M45PE10:
13889 	case FLASH_5720VENDOR_M_ST_M25PE20:
13890 	case FLASH_5720VENDOR_M_ST_M45PE20:
13891 	case FLASH_5720VENDOR_A_ST_M25PE20:
13892 	case FLASH_5720VENDOR_A_ST_M45PE20:
13893 	case FLASH_5720VENDOR_M_ST_M25PE40:
13894 	case FLASH_5720VENDOR_M_ST_M45PE40:
13895 	case FLASH_5720VENDOR_A_ST_M25PE40:
13896 	case FLASH_5720VENDOR_A_ST_M45PE40:
13897 	case FLASH_5720VENDOR_M_ST_M25PE80:
13898 	case FLASH_5720VENDOR_M_ST_M45PE80:
13899 	case FLASH_5720VENDOR_A_ST_M25PE80:
13900 	case FLASH_5720VENDOR_A_ST_M45PE80:
13901 	case FLASH_5720VENDOR_ST_25USPT:
13902 	case FLASH_5720VENDOR_ST_45USPT:
13903 		tp->nvram_jedecnum = JEDEC_ST;
13904 		tg3_flag_set(tp, NVRAM_BUFFERED);
13905 		tg3_flag_set(tp, FLASH);
13906 
13907 		switch (nvmpinstrp) {
13908 		case FLASH_5720VENDOR_M_ST_M25PE20:
13909 		case FLASH_5720VENDOR_M_ST_M45PE20:
13910 		case FLASH_5720VENDOR_A_ST_M25PE20:
13911 		case FLASH_5720VENDOR_A_ST_M45PE20:
13912 			tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13913 			break;
13914 		case FLASH_5720VENDOR_M_ST_M25PE40:
13915 		case FLASH_5720VENDOR_M_ST_M45PE40:
13916 		case FLASH_5720VENDOR_A_ST_M25PE40:
13917 		case FLASH_5720VENDOR_A_ST_M45PE40:
13918 			tp->nvram_size = TG3_NVRAM_SIZE_512KB;
13919 			break;
13920 		case FLASH_5720VENDOR_M_ST_M25PE80:
13921 		case FLASH_5720VENDOR_M_ST_M45PE80:
13922 		case FLASH_5720VENDOR_A_ST_M25PE80:
13923 		case FLASH_5720VENDOR_A_ST_M45PE80:
13924 			tp->nvram_size = TG3_NVRAM_SIZE_1MB;
13925 			break;
13926 		default:
13927 			if (tg3_asic_rev(tp) != ASIC_REV_5762)
13928 				tp->nvram_size = TG3_NVRAM_SIZE_128KB;
13929 			break;
13930 		}
13931 		break;
13932 	default:
13933 		tg3_flag_set(tp, NO_NVRAM);
13934 		return;
13935 	}
13936 
13937 	tg3_nvram_get_pagesize(tp, nvcfg1);
13938 	if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
13939 		tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
13940 
13941 	if (tg3_asic_rev(tp) == ASIC_REV_5762) {
13942 		u32 val;
13943 
13944 		if (tg3_nvram_read(tp, 0, &val))
13945 			return;
13946 
13947 		if (val != TG3_EEPROM_MAGIC &&
13948 		    (val & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW)
13949 			tg3_flag_set(tp, NO_NVRAM);
13950 	}
13951 }
13952 
13953 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
13954 static void tg3_nvram_init(struct tg3 *tp)
13955 {
13956 	if (tg3_flag(tp, IS_SSB_CORE)) {
13957 		/* No NVRAM and EEPROM on the SSB Broadcom GigE core. */
13958 		tg3_flag_clear(tp, NVRAM);
13959 		tg3_flag_clear(tp, NVRAM_BUFFERED);
13960 		tg3_flag_set(tp, NO_NVRAM);
13961 		return;
13962 	}
13963 
13964 	tw32_f(GRC_EEPROM_ADDR,
13965 	     (EEPROM_ADDR_FSM_RESET |
13966 	      (EEPROM_DEFAULT_CLOCK_PERIOD <<
13967 	       EEPROM_ADDR_CLKPERD_SHIFT)));
13968 
13969 	msleep(1);
13970 
13971 	/* Enable seeprom accesses. */
13972 	tw32_f(GRC_LOCAL_CTRL,
13973 	     tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
13974 	udelay(100);
13975 
13976 	if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
13977 	    tg3_asic_rev(tp) != ASIC_REV_5701) {
13978 		tg3_flag_set(tp, NVRAM);
13979 
13980 		if (tg3_nvram_lock(tp)) {
13981 			netdev_warn(tp->dev,
13982 				    "Cannot get nvram lock, %s failed\n",
13983 				    __func__);
13984 			return;
13985 		}
13986 		tg3_enable_nvram_access(tp);
13987 
13988 		tp->nvram_size = 0;
13989 
13990 		if (tg3_asic_rev(tp) == ASIC_REV_5752)
13991 			tg3_get_5752_nvram_info(tp);
13992 		else if (tg3_asic_rev(tp) == ASIC_REV_5755)
13993 			tg3_get_5755_nvram_info(tp);
13994 		else if (tg3_asic_rev(tp) == ASIC_REV_5787 ||
13995 			 tg3_asic_rev(tp) == ASIC_REV_5784 ||
13996 			 tg3_asic_rev(tp) == ASIC_REV_5785)
13997 			tg3_get_5787_nvram_info(tp);
13998 		else if (tg3_asic_rev(tp) == ASIC_REV_5761)
13999 			tg3_get_5761_nvram_info(tp);
14000 		else if (tg3_asic_rev(tp) == ASIC_REV_5906)
14001 			tg3_get_5906_nvram_info(tp);
14002 		else if (tg3_asic_rev(tp) == ASIC_REV_57780 ||
14003 			 tg3_flag(tp, 57765_CLASS))
14004 			tg3_get_57780_nvram_info(tp);
14005 		else if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
14006 			 tg3_asic_rev(tp) == ASIC_REV_5719)
14007 			tg3_get_5717_nvram_info(tp);
14008 		else if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
14009 			 tg3_asic_rev(tp) == ASIC_REV_5762)
14010 			tg3_get_5720_nvram_info(tp);
14011 		else
14012 			tg3_get_nvram_info(tp);
14013 
14014 		if (tp->nvram_size == 0)
14015 			tg3_get_nvram_size(tp);
14016 
14017 		tg3_disable_nvram_access(tp);
14018 		tg3_nvram_unlock(tp);
14019 
14020 	} else {
14021 		tg3_flag_clear(tp, NVRAM);
14022 		tg3_flag_clear(tp, NVRAM_BUFFERED);
14023 
14024 		tg3_get_eeprom_size(tp);
14025 	}
14026 }
14027 
14028 struct subsys_tbl_ent {
14029 	u16 subsys_vendor, subsys_devid;
14030 	u32 phy_id;
14031 };
14032 
14033 static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
14034 	/* Broadcom boards. */
14035 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
14036 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6, TG3_PHY_ID_BCM5401 },
14037 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
14038 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5, TG3_PHY_ID_BCM5701 },
14039 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
14040 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6, TG3_PHY_ID_BCM8002 },
14041 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
14042 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9, 0 },
14043 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
14044 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1, TG3_PHY_ID_BCM5701 },
14045 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
14046 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8, TG3_PHY_ID_BCM5701 },
14047 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
14048 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7, 0 },
14049 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
14050 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10, TG3_PHY_ID_BCM5701 },
14051 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
14052 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12, TG3_PHY_ID_BCM5701 },
14053 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
14054 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1, TG3_PHY_ID_BCM5703 },
14055 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
14056 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2, TG3_PHY_ID_BCM5703 },
14057 
14058 	/* 3com boards. */
14059 	{ TG3PCI_SUBVENDOR_ID_3COM,
14060 	  TG3PCI_SUBDEVICE_ID_3COM_3C996T, TG3_PHY_ID_BCM5401 },
14061 	{ TG3PCI_SUBVENDOR_ID_3COM,
14062 	  TG3PCI_SUBDEVICE_ID_3COM_3C996BT, TG3_PHY_ID_BCM5701 },
14063 	{ TG3PCI_SUBVENDOR_ID_3COM,
14064 	  TG3PCI_SUBDEVICE_ID_3COM_3C996SX, 0 },
14065 	{ TG3PCI_SUBVENDOR_ID_3COM,
14066 	  TG3PCI_SUBDEVICE_ID_3COM_3C1000T, TG3_PHY_ID_BCM5701 },
14067 	{ TG3PCI_SUBVENDOR_ID_3COM,
14068 	  TG3PCI_SUBDEVICE_ID_3COM_3C940BR01, TG3_PHY_ID_BCM5701 },
14069 
14070 	/* DELL boards. */
14071 	{ TG3PCI_SUBVENDOR_ID_DELL,
14072 	  TG3PCI_SUBDEVICE_ID_DELL_VIPER, TG3_PHY_ID_BCM5401 },
14073 	{ TG3PCI_SUBVENDOR_ID_DELL,
14074 	  TG3PCI_SUBDEVICE_ID_DELL_JAGUAR, TG3_PHY_ID_BCM5401 },
14075 	{ TG3PCI_SUBVENDOR_ID_DELL,
14076 	  TG3PCI_SUBDEVICE_ID_DELL_MERLOT, TG3_PHY_ID_BCM5411 },
14077 	{ TG3PCI_SUBVENDOR_ID_DELL,
14078 	  TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT, TG3_PHY_ID_BCM5411 },
14079 
14080 	/* Compaq boards. */
14081 	{ TG3PCI_SUBVENDOR_ID_COMPAQ,
14082 	  TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE, TG3_PHY_ID_BCM5701 },
14083 	{ TG3PCI_SUBVENDOR_ID_COMPAQ,
14084 	  TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2, TG3_PHY_ID_BCM5701 },
14085 	{ TG3PCI_SUBVENDOR_ID_COMPAQ,
14086 	  TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING, 0 },
14087 	{ TG3PCI_SUBVENDOR_ID_COMPAQ,
14088 	  TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780, TG3_PHY_ID_BCM5701 },
14089 	{ TG3PCI_SUBVENDOR_ID_COMPAQ,
14090 	  TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2, TG3_PHY_ID_BCM5701 },
14091 
14092 	/* IBM boards. */
14093 	{ TG3PCI_SUBVENDOR_ID_IBM,
14094 	  TG3PCI_SUBDEVICE_ID_IBM_5703SAX2, 0 }
14095 };
14096 
14097 static struct subsys_tbl_ent *tg3_lookup_by_subsys(struct tg3 *tp)
14098 {
14099 	int i;
14100 
14101 	for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
14102 		if ((subsys_id_to_phy_id[i].subsys_vendor ==
14103 		     tp->pdev->subsystem_vendor) &&
14104 		    (subsys_id_to_phy_id[i].subsys_devid ==
14105 		     tp->pdev->subsystem_device))
14106 			return &subsys_id_to_phy_id[i];
14107 	}
14108 	return NULL;
14109 }
14110 
14111 static void tg3_get_eeprom_hw_cfg(struct tg3 *tp)
14112 {
14113 	u32 val;
14114 
14115 	tp->phy_id = TG3_PHY_ID_INVALID;
14116 	tp->led_ctrl = LED_CTRL_MODE_PHY_1;
14117 
14118 	/* Assume an onboard device and WOL capable by default.  */
14119 	tg3_flag_set(tp, EEPROM_WRITE_PROT);
14120 	tg3_flag_set(tp, WOL_CAP);
14121 
14122 	if (tg3_asic_rev(tp) == ASIC_REV_5906) {
14123 		if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
14124 			tg3_flag_clear(tp, EEPROM_WRITE_PROT);
14125 			tg3_flag_set(tp, IS_NIC);
14126 		}
14127 		val = tr32(VCPU_CFGSHDW);
14128 		if (val & VCPU_CFGSHDW_ASPM_DBNC)
14129 			tg3_flag_set(tp, ASPM_WORKAROUND);
14130 		if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
14131 		    (val & VCPU_CFGSHDW_WOL_MAGPKT)) {
14132 			tg3_flag_set(tp, WOL_ENABLE);
14133 			device_set_wakeup_enable(&tp->pdev->dev, true);
14134 		}
14135 		goto done;
14136 	}
14137 
14138 	tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
14139 	if (val == NIC_SRAM_DATA_SIG_MAGIC) {
14140 		u32 nic_cfg, led_cfg;
14141 		u32 nic_phy_id, ver, cfg2 = 0, cfg4 = 0, eeprom_phy_id;
14142 		int eeprom_phy_serdes = 0;
14143 
14144 		tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
14145 		tp->nic_sram_data_cfg = nic_cfg;
14146 
14147 		tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
14148 		ver >>= NIC_SRAM_DATA_VER_SHIFT;
14149 		if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
14150 		    tg3_asic_rev(tp) != ASIC_REV_5701 &&
14151 		    tg3_asic_rev(tp) != ASIC_REV_5703 &&
14152 		    (ver > 0) && (ver < 0x100))
14153 			tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
14154 
14155 		if (tg3_asic_rev(tp) == ASIC_REV_5785)
14156 			tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
14157 
14158 		if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
14159 		    NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
14160 			eeprom_phy_serdes = 1;
14161 
14162 		tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
14163 		if (nic_phy_id != 0) {
14164 			u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
14165 			u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
14166 
14167 			eeprom_phy_id  = (id1 >> 16) << 10;
14168 			eeprom_phy_id |= (id2 & 0xfc00) << 16;
14169 			eeprom_phy_id |= (id2 & 0x03ff) <<  0;
14170 		} else
14171 			eeprom_phy_id = 0;
14172 
14173 		tp->phy_id = eeprom_phy_id;
14174 		if (eeprom_phy_serdes) {
14175 			if (!tg3_flag(tp, 5705_PLUS))
14176 				tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
14177 			else
14178 				tp->phy_flags |= TG3_PHYFLG_MII_SERDES;
14179 		}
14180 
14181 		if (tg3_flag(tp, 5750_PLUS))
14182 			led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
14183 				    SHASTA_EXT_LED_MODE_MASK);
14184 		else
14185 			led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
14186 
14187 		switch (led_cfg) {
14188 		default:
14189 		case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
14190 			tp->led_ctrl = LED_CTRL_MODE_PHY_1;
14191 			break;
14192 
14193 		case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
14194 			tp->led_ctrl = LED_CTRL_MODE_PHY_2;
14195 			break;
14196 
14197 		case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
14198 			tp->led_ctrl = LED_CTRL_MODE_MAC;
14199 
14200 			/* Default to PHY_1_MODE if 0 (MAC_MODE) is
14201 			 * read on some older 5700/5701 bootcode.
14202 			 */
14203 			if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
14204 			    tg3_asic_rev(tp) == ASIC_REV_5701)
14205 				tp->led_ctrl = LED_CTRL_MODE_PHY_1;
14206 
14207 			break;
14208 
14209 		case SHASTA_EXT_LED_SHARED:
14210 			tp->led_ctrl = LED_CTRL_MODE_SHARED;
14211 			if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0 &&
14212 			    tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A1)
14213 				tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
14214 						 LED_CTRL_MODE_PHY_2);
14215 			break;
14216 
14217 		case SHASTA_EXT_LED_MAC:
14218 			tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
14219 			break;
14220 
14221 		case SHASTA_EXT_LED_COMBO:
14222 			tp->led_ctrl = LED_CTRL_MODE_COMBO;
14223 			if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0)
14224 				tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
14225 						 LED_CTRL_MODE_PHY_2);
14226 			break;
14227 
14228 		}
14229 
14230 		if ((tg3_asic_rev(tp) == ASIC_REV_5700 ||
14231 		     tg3_asic_rev(tp) == ASIC_REV_5701) &&
14232 		    tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
14233 			tp->led_ctrl = LED_CTRL_MODE_PHY_2;
14234 
14235 		if (tg3_chip_rev(tp) == CHIPREV_5784_AX)
14236 			tp->led_ctrl = LED_CTRL_MODE_PHY_1;
14237 
14238 		if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
14239 			tg3_flag_set(tp, EEPROM_WRITE_PROT);
14240 			if ((tp->pdev->subsystem_vendor ==
14241 			     PCI_VENDOR_ID_ARIMA) &&
14242 			    (tp->pdev->subsystem_device == 0x205a ||
14243 			     tp->pdev->subsystem_device == 0x2063))
14244 				tg3_flag_clear(tp, EEPROM_WRITE_PROT);
14245 		} else {
14246 			tg3_flag_clear(tp, EEPROM_WRITE_PROT);
14247 			tg3_flag_set(tp, IS_NIC);
14248 		}
14249 
14250 		if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
14251 			tg3_flag_set(tp, ENABLE_ASF);
14252 			if (tg3_flag(tp, 5750_PLUS))
14253 				tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
14254 		}
14255 
14256 		if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
14257 		    tg3_flag(tp, 5750_PLUS))
14258 			tg3_flag_set(tp, ENABLE_APE);
14259 
14260 		if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES &&
14261 		    !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
14262 			tg3_flag_clear(tp, WOL_CAP);
14263 
14264 		if (tg3_flag(tp, WOL_CAP) &&
14265 		    (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)) {
14266 			tg3_flag_set(tp, WOL_ENABLE);
14267 			device_set_wakeup_enable(&tp->pdev->dev, true);
14268 		}
14269 
14270 		if (cfg2 & (1 << 17))
14271 			tp->phy_flags |= TG3_PHYFLG_CAPACITIVE_COUPLING;
14272 
14273 		/* serdes signal pre-emphasis in register 0x590 set by */
14274 		/* bootcode if bit 18 is set */
14275 		if (cfg2 & (1 << 18))
14276 			tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS;
14277 
14278 		if ((tg3_flag(tp, 57765_PLUS) ||
14279 		     (tg3_asic_rev(tp) == ASIC_REV_5784 &&
14280 		      tg3_chip_rev(tp) != CHIPREV_5784_AX)) &&
14281 		    (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
14282 			tp->phy_flags |= TG3_PHYFLG_ENABLE_APD;
14283 
14284 		if (tg3_flag(tp, PCI_EXPRESS) &&
14285 		    tg3_asic_rev(tp) != ASIC_REV_5785 &&
14286 		    !tg3_flag(tp, 57765_PLUS)) {
14287 			u32 cfg3;
14288 
14289 			tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
14290 			if (cfg3 & NIC_SRAM_ASPM_DEBOUNCE)
14291 				tg3_flag_set(tp, ASPM_WORKAROUND);
14292 		}
14293 
14294 		if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE)
14295 			tg3_flag_set(tp, RGMII_INBAND_DISABLE);
14296 		if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
14297 			tg3_flag_set(tp, RGMII_EXT_IBND_RX_EN);
14298 		if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
14299 			tg3_flag_set(tp, RGMII_EXT_IBND_TX_EN);
14300 	}
14301 done:
14302 	if (tg3_flag(tp, WOL_CAP))
14303 		device_set_wakeup_enable(&tp->pdev->dev,
14304 					 tg3_flag(tp, WOL_ENABLE));
14305 	else
14306 		device_set_wakeup_capable(&tp->pdev->dev, false);
14307 }
14308 
14309 static int tg3_ape_otp_read(struct tg3 *tp, u32 offset, u32 *val)
14310 {
14311 	int i, err;
14312 	u32 val2, off = offset * 8;
14313 
14314 	err = tg3_nvram_lock(tp);
14315 	if (err)
14316 		return err;
14317 
14318 	tg3_ape_write32(tp, TG3_APE_OTP_ADDR, off | APE_OTP_ADDR_CPU_ENABLE);
14319 	tg3_ape_write32(tp, TG3_APE_OTP_CTRL, APE_OTP_CTRL_PROG_EN |
14320 			APE_OTP_CTRL_CMD_RD | APE_OTP_CTRL_START);
14321 	tg3_ape_read32(tp, TG3_APE_OTP_CTRL);
14322 	udelay(10);
14323 
14324 	for (i = 0; i < 100; i++) {
14325 		val2 = tg3_ape_read32(tp, TG3_APE_OTP_STATUS);
14326 		if (val2 & APE_OTP_STATUS_CMD_DONE) {
14327 			*val = tg3_ape_read32(tp, TG3_APE_OTP_RD_DATA);
14328 			break;
14329 		}
14330 		udelay(10);
14331 	}
14332 
14333 	tg3_ape_write32(tp, TG3_APE_OTP_CTRL, 0);
14334 
14335 	tg3_nvram_unlock(tp);
14336 	if (val2 & APE_OTP_STATUS_CMD_DONE)
14337 		return 0;
14338 
14339 	return -EBUSY;
14340 }
14341 
14342 static int tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
14343 {
14344 	int i;
14345 	u32 val;
14346 
14347 	tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
14348 	tw32(OTP_CTRL, cmd);
14349 
14350 	/* Wait for up to 1 ms for command to execute. */
14351 	for (i = 0; i < 100; i++) {
14352 		val = tr32(OTP_STATUS);
14353 		if (val & OTP_STATUS_CMD_DONE)
14354 			break;
14355 		udelay(10);
14356 	}
14357 
14358 	return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
14359 }
14360 
14361 /* Read the gphy configuration from the OTP region of the chip.  The gphy
14362  * configuration is a 32-bit value that straddles the alignment boundary.
14363  * We do two 32-bit reads and then shift and merge the results.
14364  */
14365 static u32 tg3_read_otp_phycfg(struct tg3 *tp)
14366 {
14367 	u32 bhalf_otp, thalf_otp;
14368 
14369 	tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
14370 
14371 	if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
14372 		return 0;
14373 
14374 	tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
14375 
14376 	if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
14377 		return 0;
14378 
14379 	thalf_otp = tr32(OTP_READ_DATA);
14380 
14381 	tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
14382 
14383 	if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
14384 		return 0;
14385 
14386 	bhalf_otp = tr32(OTP_READ_DATA);
14387 
14388 	return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
14389 }
14390 
14391 static void tg3_phy_init_link_config(struct tg3 *tp)
14392 {
14393 	u32 adv = ADVERTISED_Autoneg;
14394 
14395 	if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
14396 		adv |= ADVERTISED_1000baseT_Half |
14397 		       ADVERTISED_1000baseT_Full;
14398 
14399 	if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
14400 		adv |= ADVERTISED_100baseT_Half |
14401 		       ADVERTISED_100baseT_Full |
14402 		       ADVERTISED_10baseT_Half |
14403 		       ADVERTISED_10baseT_Full |
14404 		       ADVERTISED_TP;
14405 	else
14406 		adv |= ADVERTISED_FIBRE;
14407 
14408 	tp->link_config.advertising = adv;
14409 	tp->link_config.speed = SPEED_UNKNOWN;
14410 	tp->link_config.duplex = DUPLEX_UNKNOWN;
14411 	tp->link_config.autoneg = AUTONEG_ENABLE;
14412 	tp->link_config.active_speed = SPEED_UNKNOWN;
14413 	tp->link_config.active_duplex = DUPLEX_UNKNOWN;
14414 
14415 	tp->old_link = -1;
14416 }
14417 
14418 static int tg3_phy_probe(struct tg3 *tp)
14419 {
14420 	u32 hw_phy_id_1, hw_phy_id_2;
14421 	u32 hw_phy_id, hw_phy_id_masked;
14422 	int err;
14423 
14424 	/* flow control autonegotiation is default behavior */
14425 	tg3_flag_set(tp, PAUSE_AUTONEG);
14426 	tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
14427 
14428 	if (tg3_flag(tp, ENABLE_APE)) {
14429 		switch (tp->pci_fn) {
14430 		case 0:
14431 			tp->phy_ape_lock = TG3_APE_LOCK_PHY0;
14432 			break;
14433 		case 1:
14434 			tp->phy_ape_lock = TG3_APE_LOCK_PHY1;
14435 			break;
14436 		case 2:
14437 			tp->phy_ape_lock = TG3_APE_LOCK_PHY2;
14438 			break;
14439 		case 3:
14440 			tp->phy_ape_lock = TG3_APE_LOCK_PHY3;
14441 			break;
14442 		}
14443 	}
14444 
14445 	if (tg3_flag(tp, USE_PHYLIB))
14446 		return tg3_phy_init(tp);
14447 
14448 	/* Reading the PHY ID register can conflict with ASF
14449 	 * firmware access to the PHY hardware.
14450 	 */
14451 	err = 0;
14452 	if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)) {
14453 		hw_phy_id = hw_phy_id_masked = TG3_PHY_ID_INVALID;
14454 	} else {
14455 		/* Now read the physical PHY_ID from the chip and verify
14456 		 * that it is sane.  If it doesn't look good, we fall back
14457 		 * to either the hard-coded table based PHY_ID and failing
14458 		 * that the value found in the eeprom area.
14459 		 */
14460 		err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
14461 		err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
14462 
14463 		hw_phy_id  = (hw_phy_id_1 & 0xffff) << 10;
14464 		hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
14465 		hw_phy_id |= (hw_phy_id_2 & 0x03ff) <<  0;
14466 
14467 		hw_phy_id_masked = hw_phy_id & TG3_PHY_ID_MASK;
14468 	}
14469 
14470 	if (!err && TG3_KNOWN_PHY_ID(hw_phy_id_masked)) {
14471 		tp->phy_id = hw_phy_id;
14472 		if (hw_phy_id_masked == TG3_PHY_ID_BCM8002)
14473 			tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
14474 		else
14475 			tp->phy_flags &= ~TG3_PHYFLG_PHY_SERDES;
14476 	} else {
14477 		if (tp->phy_id != TG3_PHY_ID_INVALID) {
14478 			/* Do nothing, phy ID already set up in
14479 			 * tg3_get_eeprom_hw_cfg().
14480 			 */
14481 		} else {
14482 			struct subsys_tbl_ent *p;
14483 
14484 			/* No eeprom signature?  Try the hardcoded
14485 			 * subsys device table.
14486 			 */
14487 			p = tg3_lookup_by_subsys(tp);
14488 			if (p) {
14489 				tp->phy_id = p->phy_id;
14490 			} else if (!tg3_flag(tp, IS_SSB_CORE)) {
14491 				/* For now we saw the IDs 0xbc050cd0,
14492 				 * 0xbc050f80 and 0xbc050c30 on devices
14493 				 * connected to an BCM4785 and there are
14494 				 * probably more. Just assume that the phy is
14495 				 * supported when it is connected to a SSB core
14496 				 * for now.
14497 				 */
14498 				return -ENODEV;
14499 			}
14500 
14501 			if (!tp->phy_id ||
14502 			    tp->phy_id == TG3_PHY_ID_BCM8002)
14503 				tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
14504 		}
14505 	}
14506 
14507 	if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
14508 	    (tg3_asic_rev(tp) == ASIC_REV_5719 ||
14509 	     tg3_asic_rev(tp) == ASIC_REV_5720 ||
14510 	     tg3_asic_rev(tp) == ASIC_REV_5762 ||
14511 	     (tg3_asic_rev(tp) == ASIC_REV_5717 &&
14512 	      tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0) ||
14513 	     (tg3_asic_rev(tp) == ASIC_REV_57765 &&
14514 	      tg3_chip_rev_id(tp) != CHIPREV_ID_57765_A0)))
14515 		tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
14516 
14517 	tg3_phy_init_link_config(tp);
14518 
14519 	if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
14520 	    !tg3_flag(tp, ENABLE_APE) &&
14521 	    !tg3_flag(tp, ENABLE_ASF)) {
14522 		u32 bmsr, dummy;
14523 
14524 		tg3_readphy(tp, MII_BMSR, &bmsr);
14525 		if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
14526 		    (bmsr & BMSR_LSTATUS))
14527 			goto skip_phy_reset;
14528 
14529 		err = tg3_phy_reset(tp);
14530 		if (err)
14531 			return err;
14532 
14533 		tg3_phy_set_wirespeed(tp);
14534 
14535 		if (!tg3_phy_copper_an_config_ok(tp, &dummy)) {
14536 			tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
14537 					    tp->link_config.flowctrl);
14538 
14539 			tg3_writephy(tp, MII_BMCR,
14540 				     BMCR_ANENABLE | BMCR_ANRESTART);
14541 		}
14542 	}
14543 
14544 skip_phy_reset:
14545 	if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
14546 		err = tg3_init_5401phy_dsp(tp);
14547 		if (err)
14548 			return err;
14549 
14550 		err = tg3_init_5401phy_dsp(tp);
14551 	}
14552 
14553 	return err;
14554 }
14555 
14556 static void tg3_read_vpd(struct tg3 *tp)
14557 {
14558 	u8 *vpd_data;
14559 	unsigned int block_end, rosize, len;
14560 	u32 vpdlen;
14561 	int j, i = 0;
14562 
14563 	vpd_data = (u8 *)tg3_vpd_readblock(tp, &vpdlen);
14564 	if (!vpd_data)
14565 		goto out_no_vpd;
14566 
14567 	i = pci_vpd_find_tag(vpd_data, 0, vpdlen, PCI_VPD_LRDT_RO_DATA);
14568 	if (i < 0)
14569 		goto out_not_found;
14570 
14571 	rosize = pci_vpd_lrdt_size(&vpd_data[i]);
14572 	block_end = i + PCI_VPD_LRDT_TAG_SIZE + rosize;
14573 	i += PCI_VPD_LRDT_TAG_SIZE;
14574 
14575 	if (block_end > vpdlen)
14576 		goto out_not_found;
14577 
14578 	j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
14579 				      PCI_VPD_RO_KEYWORD_MFR_ID);
14580 	if (j > 0) {
14581 		len = pci_vpd_info_field_size(&vpd_data[j]);
14582 
14583 		j += PCI_VPD_INFO_FLD_HDR_SIZE;
14584 		if (j + len > block_end || len != 4 ||
14585 		    memcmp(&vpd_data[j], "1028", 4))
14586 			goto partno;
14587 
14588 		j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
14589 					      PCI_VPD_RO_KEYWORD_VENDOR0);
14590 		if (j < 0)
14591 			goto partno;
14592 
14593 		len = pci_vpd_info_field_size(&vpd_data[j]);
14594 
14595 		j += PCI_VPD_INFO_FLD_HDR_SIZE;
14596 		if (j + len > block_end)
14597 			goto partno;
14598 
14599 		memcpy(tp->fw_ver, &vpd_data[j], len);
14600 		strncat(tp->fw_ver, " bc ", vpdlen - len - 1);
14601 	}
14602 
14603 partno:
14604 	i = pci_vpd_find_info_keyword(vpd_data, i, rosize,
14605 				      PCI_VPD_RO_KEYWORD_PARTNO);
14606 	if (i < 0)
14607 		goto out_not_found;
14608 
14609 	len = pci_vpd_info_field_size(&vpd_data[i]);
14610 
14611 	i += PCI_VPD_INFO_FLD_HDR_SIZE;
14612 	if (len > TG3_BPN_SIZE ||
14613 	    (len + i) > vpdlen)
14614 		goto out_not_found;
14615 
14616 	memcpy(tp->board_part_number, &vpd_data[i], len);
14617 
14618 out_not_found:
14619 	kfree(vpd_data);
14620 	if (tp->board_part_number[0])
14621 		return;
14622 
14623 out_no_vpd:
14624 	if (tg3_asic_rev(tp) == ASIC_REV_5717) {
14625 		if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
14626 		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C)
14627 			strcpy(tp->board_part_number, "BCM5717");
14628 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718)
14629 			strcpy(tp->board_part_number, "BCM5718");
14630 		else
14631 			goto nomatch;
14632 	} else if (tg3_asic_rev(tp) == ASIC_REV_57780) {
14633 		if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780)
14634 			strcpy(tp->board_part_number, "BCM57780");
14635 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760)
14636 			strcpy(tp->board_part_number, "BCM57760");
14637 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790)
14638 			strcpy(tp->board_part_number, "BCM57790");
14639 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788)
14640 			strcpy(tp->board_part_number, "BCM57788");
14641 		else
14642 			goto nomatch;
14643 	} else if (tg3_asic_rev(tp) == ASIC_REV_57765) {
14644 		if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761)
14645 			strcpy(tp->board_part_number, "BCM57761");
14646 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765)
14647 			strcpy(tp->board_part_number, "BCM57765");
14648 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781)
14649 			strcpy(tp->board_part_number, "BCM57781");
14650 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785)
14651 			strcpy(tp->board_part_number, "BCM57785");
14652 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791)
14653 			strcpy(tp->board_part_number, "BCM57791");
14654 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
14655 			strcpy(tp->board_part_number, "BCM57795");
14656 		else
14657 			goto nomatch;
14658 	} else if (tg3_asic_rev(tp) == ASIC_REV_57766) {
14659 		if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762)
14660 			strcpy(tp->board_part_number, "BCM57762");
14661 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766)
14662 			strcpy(tp->board_part_number, "BCM57766");
14663 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782)
14664 			strcpy(tp->board_part_number, "BCM57782");
14665 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
14666 			strcpy(tp->board_part_number, "BCM57786");
14667 		else
14668 			goto nomatch;
14669 	} else if (tg3_asic_rev(tp) == ASIC_REV_5906) {
14670 		strcpy(tp->board_part_number, "BCM95906");
14671 	} else {
14672 nomatch:
14673 		strcpy(tp->board_part_number, "none");
14674 	}
14675 }
14676 
14677 static int tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
14678 {
14679 	u32 val;
14680 
14681 	if (tg3_nvram_read(tp, offset, &val) ||
14682 	    (val & 0xfc000000) != 0x0c000000 ||
14683 	    tg3_nvram_read(tp, offset + 4, &val) ||
14684 	    val != 0)
14685 		return 0;
14686 
14687 	return 1;
14688 }
14689 
14690 static void tg3_read_bc_ver(struct tg3 *tp)
14691 {
14692 	u32 val, offset, start, ver_offset;
14693 	int i, dst_off;
14694 	bool newver = false;
14695 
14696 	if (tg3_nvram_read(tp, 0xc, &offset) ||
14697 	    tg3_nvram_read(tp, 0x4, &start))
14698 		return;
14699 
14700 	offset = tg3_nvram_logical_addr(tp, offset);
14701 
14702 	if (tg3_nvram_read(tp, offset, &val))
14703 		return;
14704 
14705 	if ((val & 0xfc000000) == 0x0c000000) {
14706 		if (tg3_nvram_read(tp, offset + 4, &val))
14707 			return;
14708 
14709 		if (val == 0)
14710 			newver = true;
14711 	}
14712 
14713 	dst_off = strlen(tp->fw_ver);
14714 
14715 	if (newver) {
14716 		if (TG3_VER_SIZE - dst_off < 16 ||
14717 		    tg3_nvram_read(tp, offset + 8, &ver_offset))
14718 			return;
14719 
14720 		offset = offset + ver_offset - start;
14721 		for (i = 0; i < 16; i += 4) {
14722 			__be32 v;
14723 			if (tg3_nvram_read_be32(tp, offset + i, &v))
14724 				return;
14725 
14726 			memcpy(tp->fw_ver + dst_off + i, &v, sizeof(v));
14727 		}
14728 	} else {
14729 		u32 major, minor;
14730 
14731 		if (tg3_nvram_read(tp, TG3_NVM_PTREV_BCVER, &ver_offset))
14732 			return;
14733 
14734 		major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >>
14735 			TG3_NVM_BCVER_MAJSFT;
14736 		minor = ver_offset & TG3_NVM_BCVER_MINMSK;
14737 		snprintf(&tp->fw_ver[dst_off], TG3_VER_SIZE - dst_off,
14738 			 "v%d.%02d", major, minor);
14739 	}
14740 }
14741 
14742 static void tg3_read_hwsb_ver(struct tg3 *tp)
14743 {
14744 	u32 val, major, minor;
14745 
14746 	/* Use native endian representation */
14747 	if (tg3_nvram_read(tp, TG3_NVM_HWSB_CFG1, &val))
14748 		return;
14749 
14750 	major = (val & TG3_NVM_HWSB_CFG1_MAJMSK) >>
14751 		TG3_NVM_HWSB_CFG1_MAJSFT;
14752 	minor = (val & TG3_NVM_HWSB_CFG1_MINMSK) >>
14753 		TG3_NVM_HWSB_CFG1_MINSFT;
14754 
14755 	snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor);
14756 }
14757 
14758 static void tg3_read_sb_ver(struct tg3 *tp, u32 val)
14759 {
14760 	u32 offset, major, minor, build;
14761 
14762 	strncat(tp->fw_ver, "sb", TG3_VER_SIZE - strlen(tp->fw_ver) - 1);
14763 
14764 	if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1)
14765 		return;
14766 
14767 	switch (val & TG3_EEPROM_SB_REVISION_MASK) {
14768 	case TG3_EEPROM_SB_REVISION_0:
14769 		offset = TG3_EEPROM_SB_F1R0_EDH_OFF;
14770 		break;
14771 	case TG3_EEPROM_SB_REVISION_2:
14772 		offset = TG3_EEPROM_SB_F1R2_EDH_OFF;
14773 		break;
14774 	case TG3_EEPROM_SB_REVISION_3:
14775 		offset = TG3_EEPROM_SB_F1R3_EDH_OFF;
14776 		break;
14777 	case TG3_EEPROM_SB_REVISION_4:
14778 		offset = TG3_EEPROM_SB_F1R4_EDH_OFF;
14779 		break;
14780 	case TG3_EEPROM_SB_REVISION_5:
14781 		offset = TG3_EEPROM_SB_F1R5_EDH_OFF;
14782 		break;
14783 	case TG3_EEPROM_SB_REVISION_6:
14784 		offset = TG3_EEPROM_SB_F1R6_EDH_OFF;
14785 		break;
14786 	default:
14787 		return;
14788 	}
14789 
14790 	if (tg3_nvram_read(tp, offset, &val))
14791 		return;
14792 
14793 	build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >>
14794 		TG3_EEPROM_SB_EDH_BLD_SHFT;
14795 	major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >>
14796 		TG3_EEPROM_SB_EDH_MAJ_SHFT;
14797 	minor =  val & TG3_EEPROM_SB_EDH_MIN_MASK;
14798 
14799 	if (minor > 99 || build > 26)
14800 		return;
14801 
14802 	offset = strlen(tp->fw_ver);
14803 	snprintf(&tp->fw_ver[offset], TG3_VER_SIZE - offset,
14804 		 " v%d.%02d", major, minor);
14805 
14806 	if (build > 0) {
14807 		offset = strlen(tp->fw_ver);
14808 		if (offset < TG3_VER_SIZE - 1)
14809 			tp->fw_ver[offset] = 'a' + build - 1;
14810 	}
14811 }
14812 
14813 static void tg3_read_mgmtfw_ver(struct tg3 *tp)
14814 {
14815 	u32 val, offset, start;
14816 	int i, vlen;
14817 
14818 	for (offset = TG3_NVM_DIR_START;
14819 	     offset < TG3_NVM_DIR_END;
14820 	     offset += TG3_NVM_DIRENT_SIZE) {
14821 		if (tg3_nvram_read(tp, offset, &val))
14822 			return;
14823 
14824 		if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
14825 			break;
14826 	}
14827 
14828 	if (offset == TG3_NVM_DIR_END)
14829 		return;
14830 
14831 	if (!tg3_flag(tp, 5705_PLUS))
14832 		start = 0x08000000;
14833 	else if (tg3_nvram_read(tp, offset - 4, &start))
14834 		return;
14835 
14836 	if (tg3_nvram_read(tp, offset + 4, &offset) ||
14837 	    !tg3_fw_img_is_valid(tp, offset) ||
14838 	    tg3_nvram_read(tp, offset + 8, &val))
14839 		return;
14840 
14841 	offset += val - start;
14842 
14843 	vlen = strlen(tp->fw_ver);
14844 
14845 	tp->fw_ver[vlen++] = ',';
14846 	tp->fw_ver[vlen++] = ' ';
14847 
14848 	for (i = 0; i < 4; i++) {
14849 		__be32 v;
14850 		if (tg3_nvram_read_be32(tp, offset, &v))
14851 			return;
14852 
14853 		offset += sizeof(v);
14854 
14855 		if (vlen > TG3_VER_SIZE - sizeof(v)) {
14856 			memcpy(&tp->fw_ver[vlen], &v, TG3_VER_SIZE - vlen);
14857 			break;
14858 		}
14859 
14860 		memcpy(&tp->fw_ver[vlen], &v, sizeof(v));
14861 		vlen += sizeof(v);
14862 	}
14863 }
14864 
14865 static void tg3_probe_ncsi(struct tg3 *tp)
14866 {
14867 	u32 apedata;
14868 
14869 	apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
14870 	if (apedata != APE_SEG_SIG_MAGIC)
14871 		return;
14872 
14873 	apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
14874 	if (!(apedata & APE_FW_STATUS_READY))
14875 		return;
14876 
14877 	if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI)
14878 		tg3_flag_set(tp, APE_HAS_NCSI);
14879 }
14880 
14881 static void tg3_read_dash_ver(struct tg3 *tp)
14882 {
14883 	int vlen;
14884 	u32 apedata;
14885 	char *fwtype;
14886 
14887 	apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION);
14888 
14889 	if (tg3_flag(tp, APE_HAS_NCSI))
14890 		fwtype = "NCSI";
14891 	else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725)
14892 		fwtype = "SMASH";
14893 	else
14894 		fwtype = "DASH";
14895 
14896 	vlen = strlen(tp->fw_ver);
14897 
14898 	snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " %s v%d.%d.%d.%d",
14899 		 fwtype,
14900 		 (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT,
14901 		 (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT,
14902 		 (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT,
14903 		 (apedata & APE_FW_VERSION_BLDMSK));
14904 }
14905 
14906 static void tg3_read_otp_ver(struct tg3 *tp)
14907 {
14908 	u32 val, val2;
14909 
14910 	if (tg3_asic_rev(tp) != ASIC_REV_5762)
14911 		return;
14912 
14913 	if (!tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0, &val) &&
14914 	    !tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0 + 4, &val2) &&
14915 	    TG3_OTP_MAGIC0_VALID(val)) {
14916 		u64 val64 = (u64) val << 32 | val2;
14917 		u32 ver = 0;
14918 		int i, vlen;
14919 
14920 		for (i = 0; i < 7; i++) {
14921 			if ((val64 & 0xff) == 0)
14922 				break;
14923 			ver = val64 & 0xff;
14924 			val64 >>= 8;
14925 		}
14926 		vlen = strlen(tp->fw_ver);
14927 		snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " .%02d", ver);
14928 	}
14929 }
14930 
14931 static void tg3_read_fw_ver(struct tg3 *tp)
14932 {
14933 	u32 val;
14934 	bool vpd_vers = false;
14935 
14936 	if (tp->fw_ver[0] != 0)
14937 		vpd_vers = true;
14938 
14939 	if (tg3_flag(tp, NO_NVRAM)) {
14940 		strcat(tp->fw_ver, "sb");
14941 		tg3_read_otp_ver(tp);
14942 		return;
14943 	}
14944 
14945 	if (tg3_nvram_read(tp, 0, &val))
14946 		return;
14947 
14948 	if (val == TG3_EEPROM_MAGIC)
14949 		tg3_read_bc_ver(tp);
14950 	else if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW)
14951 		tg3_read_sb_ver(tp, val);
14952 	else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
14953 		tg3_read_hwsb_ver(tp);
14954 
14955 	if (tg3_flag(tp, ENABLE_ASF)) {
14956 		if (tg3_flag(tp, ENABLE_APE)) {
14957 			tg3_probe_ncsi(tp);
14958 			if (!vpd_vers)
14959 				tg3_read_dash_ver(tp);
14960 		} else if (!vpd_vers) {
14961 			tg3_read_mgmtfw_ver(tp);
14962 		}
14963 	}
14964 
14965 	tp->fw_ver[TG3_VER_SIZE - 1] = 0;
14966 }
14967 
14968 static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp)
14969 {
14970 	if (tg3_flag(tp, LRG_PROD_RING_CAP))
14971 		return TG3_RX_RET_MAX_SIZE_5717;
14972 	else if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))
14973 		return TG3_RX_RET_MAX_SIZE_5700;
14974 	else
14975 		return TG3_RX_RET_MAX_SIZE_5705;
14976 }
14977 
14978 static DEFINE_PCI_DEVICE_TABLE(tg3_write_reorder_chipsets) = {
14979 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C) },
14980 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE) },
14981 	{ PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8385_0) },
14982 	{ },
14983 };
14984 
14985 static struct pci_dev *tg3_find_peer(struct tg3 *tp)
14986 {
14987 	struct pci_dev *peer;
14988 	unsigned int func, devnr = tp->pdev->devfn & ~7;
14989 
14990 	for (func = 0; func < 8; func++) {
14991 		peer = pci_get_slot(tp->pdev->bus, devnr | func);
14992 		if (peer && peer != tp->pdev)
14993 			break;
14994 		pci_dev_put(peer);
14995 	}
14996 	/* 5704 can be configured in single-port mode, set peer to
14997 	 * tp->pdev in that case.
14998 	 */
14999 	if (!peer) {
15000 		peer = tp->pdev;
15001 		return peer;
15002 	}
15003 
15004 	/*
15005 	 * We don't need to keep the refcount elevated; there's no way
15006 	 * to remove one half of this device without removing the other
15007 	 */
15008 	pci_dev_put(peer);
15009 
15010 	return peer;
15011 }
15012 
15013 static void tg3_detect_asic_rev(struct tg3 *tp, u32 misc_ctrl_reg)
15014 {
15015 	tp->pci_chip_rev_id = misc_ctrl_reg >> MISC_HOST_CTRL_CHIPREV_SHIFT;
15016 	if (tg3_asic_rev(tp) == ASIC_REV_USE_PROD_ID_REG) {
15017 		u32 reg;
15018 
15019 		/* All devices that use the alternate
15020 		 * ASIC REV location have a CPMU.
15021 		 */
15022 		tg3_flag_set(tp, CPMU_PRESENT);
15023 
15024 		if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
15025 		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
15026 		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
15027 		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
15028 		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 ||
15029 		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 ||
15030 		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 ||
15031 		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727)
15032 			reg = TG3PCI_GEN2_PRODID_ASICREV;
15033 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 ||
15034 			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 ||
15035 			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 ||
15036 			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 ||
15037 			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
15038 			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
15039 			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762 ||
15040 			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766 ||
15041 			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782 ||
15042 			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
15043 			reg = TG3PCI_GEN15_PRODID_ASICREV;
15044 		else
15045 			reg = TG3PCI_PRODID_ASICREV;
15046 
15047 		pci_read_config_dword(tp->pdev, reg, &tp->pci_chip_rev_id);
15048 	}
15049 
15050 	/* Wrong chip ID in 5752 A0. This code can be removed later
15051 	 * as A0 is not in production.
15052 	 */
15053 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5752_A0_HW)
15054 		tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
15055 
15056 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5717_C0)
15057 		tp->pci_chip_rev_id = CHIPREV_ID_5720_A0;
15058 
15059 	if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
15060 	    tg3_asic_rev(tp) == ASIC_REV_5719 ||
15061 	    tg3_asic_rev(tp) == ASIC_REV_5720)
15062 		tg3_flag_set(tp, 5717_PLUS);
15063 
15064 	if (tg3_asic_rev(tp) == ASIC_REV_57765 ||
15065 	    tg3_asic_rev(tp) == ASIC_REV_57766)
15066 		tg3_flag_set(tp, 57765_CLASS);
15067 
15068 	if (tg3_flag(tp, 57765_CLASS) || tg3_flag(tp, 5717_PLUS) ||
15069 	     tg3_asic_rev(tp) == ASIC_REV_5762)
15070 		tg3_flag_set(tp, 57765_PLUS);
15071 
15072 	/* Intentionally exclude ASIC_REV_5906 */
15073 	if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
15074 	    tg3_asic_rev(tp) == ASIC_REV_5787 ||
15075 	    tg3_asic_rev(tp) == ASIC_REV_5784 ||
15076 	    tg3_asic_rev(tp) == ASIC_REV_5761 ||
15077 	    tg3_asic_rev(tp) == ASIC_REV_5785 ||
15078 	    tg3_asic_rev(tp) == ASIC_REV_57780 ||
15079 	    tg3_flag(tp, 57765_PLUS))
15080 		tg3_flag_set(tp, 5755_PLUS);
15081 
15082 	if (tg3_asic_rev(tp) == ASIC_REV_5780 ||
15083 	    tg3_asic_rev(tp) == ASIC_REV_5714)
15084 		tg3_flag_set(tp, 5780_CLASS);
15085 
15086 	if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
15087 	    tg3_asic_rev(tp) == ASIC_REV_5752 ||
15088 	    tg3_asic_rev(tp) == ASIC_REV_5906 ||
15089 	    tg3_flag(tp, 5755_PLUS) ||
15090 	    tg3_flag(tp, 5780_CLASS))
15091 		tg3_flag_set(tp, 5750_PLUS);
15092 
15093 	if (tg3_asic_rev(tp) == ASIC_REV_5705 ||
15094 	    tg3_flag(tp, 5750_PLUS))
15095 		tg3_flag_set(tp, 5705_PLUS);
15096 }
15097 
15098 static bool tg3_10_100_only_device(struct tg3 *tp,
15099 				   const struct pci_device_id *ent)
15100 {
15101 	u32 grc_misc_cfg = tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK;
15102 
15103 	if ((tg3_asic_rev(tp) == ASIC_REV_5703 &&
15104 	     (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
15105 	    (tp->phy_flags & TG3_PHYFLG_IS_FET))
15106 		return true;
15107 
15108 	if (ent->driver_data & TG3_DRV_DATA_FLAG_10_100_ONLY) {
15109 		if (tg3_asic_rev(tp) == ASIC_REV_5705) {
15110 			if (ent->driver_data & TG3_DRV_DATA_FLAG_5705_10_100)
15111 				return true;
15112 		} else {
15113 			return true;
15114 		}
15115 	}
15116 
15117 	return false;
15118 }
15119 
15120 static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
15121 {
15122 	u32 misc_ctrl_reg;
15123 	u32 pci_state_reg, grc_misc_cfg;
15124 	u32 val;
15125 	u16 pci_cmd;
15126 	int err;
15127 
15128 	/* Force memory write invalidate off.  If we leave it on,
15129 	 * then on 5700_BX chips we have to enable a workaround.
15130 	 * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
15131 	 * to match the cacheline size.  The Broadcom driver have this
15132 	 * workaround but turns MWI off all the times so never uses
15133 	 * it.  This seems to suggest that the workaround is insufficient.
15134 	 */
15135 	pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
15136 	pci_cmd &= ~PCI_COMMAND_INVALIDATE;
15137 	pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
15138 
15139 	/* Important! -- Make sure register accesses are byteswapped
15140 	 * correctly.  Also, for those chips that require it, make
15141 	 * sure that indirect register accesses are enabled before
15142 	 * the first operation.
15143 	 */
15144 	pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
15145 			      &misc_ctrl_reg);
15146 	tp->misc_host_ctrl |= (misc_ctrl_reg &
15147 			       MISC_HOST_CTRL_CHIPREV);
15148 	pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
15149 			       tp->misc_host_ctrl);
15150 
15151 	tg3_detect_asic_rev(tp, misc_ctrl_reg);
15152 
15153 	/* If we have 5702/03 A1 or A2 on certain ICH chipsets,
15154 	 * we need to disable memory and use config. cycles
15155 	 * only to access all registers. The 5702/03 chips
15156 	 * can mistakenly decode the special cycles from the
15157 	 * ICH chipsets as memory write cycles, causing corruption
15158 	 * of register and memory space. Only certain ICH bridges
15159 	 * will drive special cycles with non-zero data during the
15160 	 * address phase which can fall within the 5703's address
15161 	 * range. This is not an ICH bug as the PCI spec allows
15162 	 * non-zero address during special cycles. However, only
15163 	 * these ICH bridges are known to drive non-zero addresses
15164 	 * during special cycles.
15165 	 *
15166 	 * Since special cycles do not cross PCI bridges, we only
15167 	 * enable this workaround if the 5703 is on the secondary
15168 	 * bus of these ICH bridges.
15169 	 */
15170 	if ((tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A1) ||
15171 	    (tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A2)) {
15172 		static struct tg3_dev_id {
15173 			u32	vendor;
15174 			u32	device;
15175 			u32	rev;
15176 		} ich_chipsets[] = {
15177 			{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
15178 			  PCI_ANY_ID },
15179 			{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
15180 			  PCI_ANY_ID },
15181 			{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
15182 			  0xa },
15183 			{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
15184 			  PCI_ANY_ID },
15185 			{ },
15186 		};
15187 		struct tg3_dev_id *pci_id = &ich_chipsets[0];
15188 		struct pci_dev *bridge = NULL;
15189 
15190 		while (pci_id->vendor != 0) {
15191 			bridge = pci_get_device(pci_id->vendor, pci_id->device,
15192 						bridge);
15193 			if (!bridge) {
15194 				pci_id++;
15195 				continue;
15196 			}
15197 			if (pci_id->rev != PCI_ANY_ID) {
15198 				if (bridge->revision > pci_id->rev)
15199 					continue;
15200 			}
15201 			if (bridge->subordinate &&
15202 			    (bridge->subordinate->number ==
15203 			     tp->pdev->bus->number)) {
15204 				tg3_flag_set(tp, ICH_WORKAROUND);
15205 				pci_dev_put(bridge);
15206 				break;
15207 			}
15208 		}
15209 	}
15210 
15211 	if (tg3_asic_rev(tp) == ASIC_REV_5701) {
15212 		static struct tg3_dev_id {
15213 			u32	vendor;
15214 			u32	device;
15215 		} bridge_chipsets[] = {
15216 			{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
15217 			{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
15218 			{ },
15219 		};
15220 		struct tg3_dev_id *pci_id = &bridge_chipsets[0];
15221 		struct pci_dev *bridge = NULL;
15222 
15223 		while (pci_id->vendor != 0) {
15224 			bridge = pci_get_device(pci_id->vendor,
15225 						pci_id->device,
15226 						bridge);
15227 			if (!bridge) {
15228 				pci_id++;
15229 				continue;
15230 			}
15231 			if (bridge->subordinate &&
15232 			    (bridge->subordinate->number <=
15233 			     tp->pdev->bus->number) &&
15234 			    (bridge->subordinate->busn_res.end >=
15235 			     tp->pdev->bus->number)) {
15236 				tg3_flag_set(tp, 5701_DMA_BUG);
15237 				pci_dev_put(bridge);
15238 				break;
15239 			}
15240 		}
15241 	}
15242 
15243 	/* The EPB bridge inside 5714, 5715, and 5780 cannot support
15244 	 * DMA addresses > 40-bit. This bridge may have other additional
15245 	 * 57xx devices behind it in some 4-port NIC designs for example.
15246 	 * Any tg3 device found behind the bridge will also need the 40-bit
15247 	 * DMA workaround.
15248 	 */
15249 	if (tg3_flag(tp, 5780_CLASS)) {
15250 		tg3_flag_set(tp, 40BIT_DMA_BUG);
15251 		tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
15252 	} else {
15253 		struct pci_dev *bridge = NULL;
15254 
15255 		do {
15256 			bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
15257 						PCI_DEVICE_ID_SERVERWORKS_EPB,
15258 						bridge);
15259 			if (bridge && bridge->subordinate &&
15260 			    (bridge->subordinate->number <=
15261 			     tp->pdev->bus->number) &&
15262 			    (bridge->subordinate->busn_res.end >=
15263 			     tp->pdev->bus->number)) {
15264 				tg3_flag_set(tp, 40BIT_DMA_BUG);
15265 				pci_dev_put(bridge);
15266 				break;
15267 			}
15268 		} while (bridge);
15269 	}
15270 
15271 	if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
15272 	    tg3_asic_rev(tp) == ASIC_REV_5714)
15273 		tp->pdev_peer = tg3_find_peer(tp);
15274 
15275 	/* Determine TSO capabilities */
15276 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0)
15277 		; /* Do nothing. HW bug. */
15278 	else if (tg3_flag(tp, 57765_PLUS))
15279 		tg3_flag_set(tp, HW_TSO_3);
15280 	else if (tg3_flag(tp, 5755_PLUS) ||
15281 		 tg3_asic_rev(tp) == ASIC_REV_5906)
15282 		tg3_flag_set(tp, HW_TSO_2);
15283 	else if (tg3_flag(tp, 5750_PLUS)) {
15284 		tg3_flag_set(tp, HW_TSO_1);
15285 		tg3_flag_set(tp, TSO_BUG);
15286 		if (tg3_asic_rev(tp) == ASIC_REV_5750 &&
15287 		    tg3_chip_rev_id(tp) >= CHIPREV_ID_5750_C2)
15288 			tg3_flag_clear(tp, TSO_BUG);
15289 	} else if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
15290 		   tg3_asic_rev(tp) != ASIC_REV_5701 &&
15291 		   tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
15292 			tg3_flag_set(tp, TSO_BUG);
15293 		if (tg3_asic_rev(tp) == ASIC_REV_5705)
15294 			tp->fw_needed = FIRMWARE_TG3TSO5;
15295 		else
15296 			tp->fw_needed = FIRMWARE_TG3TSO;
15297 	}
15298 
15299 	/* Selectively allow TSO based on operating conditions */
15300 	if (tg3_flag(tp, HW_TSO_1) ||
15301 	    tg3_flag(tp, HW_TSO_2) ||
15302 	    tg3_flag(tp, HW_TSO_3) ||
15303 	    tp->fw_needed) {
15304 		/* For firmware TSO, assume ASF is disabled.
15305 		 * We'll disable TSO later if we discover ASF
15306 		 * is enabled in tg3_get_eeprom_hw_cfg().
15307 		 */
15308 		tg3_flag_set(tp, TSO_CAPABLE);
15309 	} else {
15310 		tg3_flag_clear(tp, TSO_CAPABLE);
15311 		tg3_flag_clear(tp, TSO_BUG);
15312 		tp->fw_needed = NULL;
15313 	}
15314 
15315 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0)
15316 		tp->fw_needed = FIRMWARE_TG3;
15317 
15318 	tp->irq_max = 1;
15319 
15320 	if (tg3_flag(tp, 5750_PLUS)) {
15321 		tg3_flag_set(tp, SUPPORT_MSI);
15322 		if (tg3_chip_rev(tp) == CHIPREV_5750_AX ||
15323 		    tg3_chip_rev(tp) == CHIPREV_5750_BX ||
15324 		    (tg3_asic_rev(tp) == ASIC_REV_5714 &&
15325 		     tg3_chip_rev_id(tp) <= CHIPREV_ID_5714_A2 &&
15326 		     tp->pdev_peer == tp->pdev))
15327 			tg3_flag_clear(tp, SUPPORT_MSI);
15328 
15329 		if (tg3_flag(tp, 5755_PLUS) ||
15330 		    tg3_asic_rev(tp) == ASIC_REV_5906) {
15331 			tg3_flag_set(tp, 1SHOT_MSI);
15332 		}
15333 
15334 		if (tg3_flag(tp, 57765_PLUS)) {
15335 			tg3_flag_set(tp, SUPPORT_MSIX);
15336 			tp->irq_max = TG3_IRQ_MAX_VECS;
15337 		}
15338 	}
15339 
15340 	tp->txq_max = 1;
15341 	tp->rxq_max = 1;
15342 	if (tp->irq_max > 1) {
15343 		tp->rxq_max = TG3_RSS_MAX_NUM_QS;
15344 		tg3_rss_init_dflt_indir_tbl(tp, TG3_RSS_MAX_NUM_QS);
15345 
15346 		if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
15347 		    tg3_asic_rev(tp) == ASIC_REV_5720)
15348 			tp->txq_max = tp->irq_max - 1;
15349 	}
15350 
15351 	if (tg3_flag(tp, 5755_PLUS) ||
15352 	    tg3_asic_rev(tp) == ASIC_REV_5906)
15353 		tg3_flag_set(tp, SHORT_DMA_BUG);
15354 
15355 	if (tg3_asic_rev(tp) == ASIC_REV_5719)
15356 		tp->dma_limit = TG3_TX_BD_DMA_MAX_4K;
15357 
15358 	if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
15359 	    tg3_asic_rev(tp) == ASIC_REV_5719 ||
15360 	    tg3_asic_rev(tp) == ASIC_REV_5720 ||
15361 	    tg3_asic_rev(tp) == ASIC_REV_5762)
15362 		tg3_flag_set(tp, LRG_PROD_RING_CAP);
15363 
15364 	if (tg3_flag(tp, 57765_PLUS) &&
15365 	    tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0)
15366 		tg3_flag_set(tp, USE_JUMBO_BDFLAG);
15367 
15368 	if (!tg3_flag(tp, 5705_PLUS) ||
15369 	    tg3_flag(tp, 5780_CLASS) ||
15370 	    tg3_flag(tp, USE_JUMBO_BDFLAG))
15371 		tg3_flag_set(tp, JUMBO_CAPABLE);
15372 
15373 	pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
15374 			      &pci_state_reg);
15375 
15376 	if (pci_is_pcie(tp->pdev)) {
15377 		u16 lnkctl;
15378 
15379 		tg3_flag_set(tp, PCI_EXPRESS);
15380 
15381 		pcie_capability_read_word(tp->pdev, PCI_EXP_LNKCTL, &lnkctl);
15382 		if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
15383 			if (tg3_asic_rev(tp) == ASIC_REV_5906) {
15384 				tg3_flag_clear(tp, HW_TSO_2);
15385 				tg3_flag_clear(tp, TSO_CAPABLE);
15386 			}
15387 			if (tg3_asic_rev(tp) == ASIC_REV_5784 ||
15388 			    tg3_asic_rev(tp) == ASIC_REV_5761 ||
15389 			    tg3_chip_rev_id(tp) == CHIPREV_ID_57780_A0 ||
15390 			    tg3_chip_rev_id(tp) == CHIPREV_ID_57780_A1)
15391 				tg3_flag_set(tp, CLKREQ_BUG);
15392 		} else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5717_A0) {
15393 			tg3_flag_set(tp, L1PLLPD_EN);
15394 		}
15395 	} else if (tg3_asic_rev(tp) == ASIC_REV_5785) {
15396 		/* BCM5785 devices are effectively PCIe devices, and should
15397 		 * follow PCIe codepaths, but do not have a PCIe capabilities
15398 		 * section.
15399 		 */
15400 		tg3_flag_set(tp, PCI_EXPRESS);
15401 	} else if (!tg3_flag(tp, 5705_PLUS) ||
15402 		   tg3_flag(tp, 5780_CLASS)) {
15403 		tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
15404 		if (!tp->pcix_cap) {
15405 			dev_err(&tp->pdev->dev,
15406 				"Cannot find PCI-X capability, aborting\n");
15407 			return -EIO;
15408 		}
15409 
15410 		if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE))
15411 			tg3_flag_set(tp, PCIX_MODE);
15412 	}
15413 
15414 	/* If we have an AMD 762 or VIA K8T800 chipset, write
15415 	 * reordering to the mailbox registers done by the host
15416 	 * controller can cause major troubles.  We read back from
15417 	 * every mailbox register write to force the writes to be
15418 	 * posted to the chip in order.
15419 	 */
15420 	if (pci_dev_present(tg3_write_reorder_chipsets) &&
15421 	    !tg3_flag(tp, PCI_EXPRESS))
15422 		tg3_flag_set(tp, MBOX_WRITE_REORDER);
15423 
15424 	pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
15425 			     &tp->pci_cacheline_sz);
15426 	pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER,
15427 			     &tp->pci_lat_timer);
15428 	if (tg3_asic_rev(tp) == ASIC_REV_5703 &&
15429 	    tp->pci_lat_timer < 64) {
15430 		tp->pci_lat_timer = 64;
15431 		pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
15432 				      tp->pci_lat_timer);
15433 	}
15434 
15435 	/* Important! -- It is critical that the PCI-X hw workaround
15436 	 * situation is decided before the first MMIO register access.
15437 	 */
15438 	if (tg3_chip_rev(tp) == CHIPREV_5700_BX) {
15439 		/* 5700 BX chips need to have their TX producer index
15440 		 * mailboxes written twice to workaround a bug.
15441 		 */
15442 		tg3_flag_set(tp, TXD_MBOX_HWBUG);
15443 
15444 		/* If we are in PCI-X mode, enable register write workaround.
15445 		 *
15446 		 * The workaround is to use indirect register accesses
15447 		 * for all chip writes not to mailbox registers.
15448 		 */
15449 		if (tg3_flag(tp, PCIX_MODE)) {
15450 			u32 pm_reg;
15451 
15452 			tg3_flag_set(tp, PCIX_TARGET_HWBUG);
15453 
15454 			/* The chip can have it's power management PCI config
15455 			 * space registers clobbered due to this bug.
15456 			 * So explicitly force the chip into D0 here.
15457 			 */
15458 			pci_read_config_dword(tp->pdev,
15459 					      tp->pm_cap + PCI_PM_CTRL,
15460 					      &pm_reg);
15461 			pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
15462 			pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
15463 			pci_write_config_dword(tp->pdev,
15464 					       tp->pm_cap + PCI_PM_CTRL,
15465 					       pm_reg);
15466 
15467 			/* Also, force SERR#/PERR# in PCI command. */
15468 			pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
15469 			pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
15470 			pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
15471 		}
15472 	}
15473 
15474 	if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
15475 		tg3_flag_set(tp, PCI_HIGH_SPEED);
15476 	if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
15477 		tg3_flag_set(tp, PCI_32BIT);
15478 
15479 	/* Chip-specific fixup from Broadcom driver */
15480 	if ((tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0) &&
15481 	    (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
15482 		pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
15483 		pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
15484 	}
15485 
15486 	/* Default fast path register access methods */
15487 	tp->read32 = tg3_read32;
15488 	tp->write32 = tg3_write32;
15489 	tp->read32_mbox = tg3_read32;
15490 	tp->write32_mbox = tg3_write32;
15491 	tp->write32_tx_mbox = tg3_write32;
15492 	tp->write32_rx_mbox = tg3_write32;
15493 
15494 	/* Various workaround register access methods */
15495 	if (tg3_flag(tp, PCIX_TARGET_HWBUG))
15496 		tp->write32 = tg3_write_indirect_reg32;
15497 	else if (tg3_asic_rev(tp) == ASIC_REV_5701 ||
15498 		 (tg3_flag(tp, PCI_EXPRESS) &&
15499 		  tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A0)) {
15500 		/*
15501 		 * Back to back register writes can cause problems on these
15502 		 * chips, the workaround is to read back all reg writes
15503 		 * except those to mailbox regs.
15504 		 *
15505 		 * See tg3_write_indirect_reg32().
15506 		 */
15507 		tp->write32 = tg3_write_flush_reg32;
15508 	}
15509 
15510 	if (tg3_flag(tp, TXD_MBOX_HWBUG) || tg3_flag(tp, MBOX_WRITE_REORDER)) {
15511 		tp->write32_tx_mbox = tg3_write32_tx_mbox;
15512 		if (tg3_flag(tp, MBOX_WRITE_REORDER))
15513 			tp->write32_rx_mbox = tg3_write_flush_reg32;
15514 	}
15515 
15516 	if (tg3_flag(tp, ICH_WORKAROUND)) {
15517 		tp->read32 = tg3_read_indirect_reg32;
15518 		tp->write32 = tg3_write_indirect_reg32;
15519 		tp->read32_mbox = tg3_read_indirect_mbox;
15520 		tp->write32_mbox = tg3_write_indirect_mbox;
15521 		tp->write32_tx_mbox = tg3_write_indirect_mbox;
15522 		tp->write32_rx_mbox = tg3_write_indirect_mbox;
15523 
15524 		iounmap(tp->regs);
15525 		tp->regs = NULL;
15526 
15527 		pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
15528 		pci_cmd &= ~PCI_COMMAND_MEMORY;
15529 		pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
15530 	}
15531 	if (tg3_asic_rev(tp) == ASIC_REV_5906) {
15532 		tp->read32_mbox = tg3_read32_mbox_5906;
15533 		tp->write32_mbox = tg3_write32_mbox_5906;
15534 		tp->write32_tx_mbox = tg3_write32_mbox_5906;
15535 		tp->write32_rx_mbox = tg3_write32_mbox_5906;
15536 	}
15537 
15538 	if (tp->write32 == tg3_write_indirect_reg32 ||
15539 	    (tg3_flag(tp, PCIX_MODE) &&
15540 	     (tg3_asic_rev(tp) == ASIC_REV_5700 ||
15541 	      tg3_asic_rev(tp) == ASIC_REV_5701)))
15542 		tg3_flag_set(tp, SRAM_USE_CONFIG);
15543 
15544 	/* The memory arbiter has to be enabled in order for SRAM accesses
15545 	 * to succeed.  Normally on powerup the tg3 chip firmware will make
15546 	 * sure it is enabled, but other entities such as system netboot
15547 	 * code might disable it.
15548 	 */
15549 	val = tr32(MEMARB_MODE);
15550 	tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
15551 
15552 	tp->pci_fn = PCI_FUNC(tp->pdev->devfn) & 3;
15553 	if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
15554 	    tg3_flag(tp, 5780_CLASS)) {
15555 		if (tg3_flag(tp, PCIX_MODE)) {
15556 			pci_read_config_dword(tp->pdev,
15557 					      tp->pcix_cap + PCI_X_STATUS,
15558 					      &val);
15559 			tp->pci_fn = val & 0x7;
15560 		}
15561 	} else if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
15562 		   tg3_asic_rev(tp) == ASIC_REV_5719 ||
15563 		   tg3_asic_rev(tp) == ASIC_REV_5720) {
15564 		tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val);
15565 		if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) != NIC_SRAM_CPMUSTAT_SIG)
15566 			val = tr32(TG3_CPMU_STATUS);
15567 
15568 		if (tg3_asic_rev(tp) == ASIC_REV_5717)
15569 			tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5717) ? 1 : 0;
15570 		else
15571 			tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5719) >>
15572 				     TG3_CPMU_STATUS_FSHFT_5719;
15573 	}
15574 
15575 	if (tg3_flag(tp, FLUSH_POSTED_WRITES)) {
15576 		tp->write32_tx_mbox = tg3_write_flush_reg32;
15577 		tp->write32_rx_mbox = tg3_write_flush_reg32;
15578 	}
15579 
15580 	/* Get eeprom hw config before calling tg3_set_power_state().
15581 	 * In particular, the TG3_FLAG_IS_NIC flag must be
15582 	 * determined before calling tg3_set_power_state() so that
15583 	 * we know whether or not to switch out of Vaux power.
15584 	 * When the flag is set, it means that GPIO1 is used for eeprom
15585 	 * write protect and also implies that it is a LOM where GPIOs
15586 	 * are not used to switch power.
15587 	 */
15588 	tg3_get_eeprom_hw_cfg(tp);
15589 
15590 	if (tp->fw_needed && tg3_flag(tp, ENABLE_ASF)) {
15591 		tg3_flag_clear(tp, TSO_CAPABLE);
15592 		tg3_flag_clear(tp, TSO_BUG);
15593 		tp->fw_needed = NULL;
15594 	}
15595 
15596 	if (tg3_flag(tp, ENABLE_APE)) {
15597 		/* Allow reads and writes to the
15598 		 * APE register and memory space.
15599 		 */
15600 		pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
15601 				 PCISTATE_ALLOW_APE_SHMEM_WR |
15602 				 PCISTATE_ALLOW_APE_PSPACE_WR;
15603 		pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
15604 				       pci_state_reg);
15605 
15606 		tg3_ape_lock_init(tp);
15607 	}
15608 
15609 	/* Set up tp->grc_local_ctrl before calling
15610 	 * tg3_pwrsrc_switch_to_vmain().  GPIO1 driven high
15611 	 * will bring 5700's external PHY out of reset.
15612 	 * It is also used as eeprom write protect on LOMs.
15613 	 */
15614 	tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
15615 	if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
15616 	    tg3_flag(tp, EEPROM_WRITE_PROT))
15617 		tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
15618 				       GRC_LCLCTRL_GPIO_OUTPUT1);
15619 	/* Unused GPIO3 must be driven as output on 5752 because there
15620 	 * are no pull-up resistors on unused GPIO pins.
15621 	 */
15622 	else if (tg3_asic_rev(tp) == ASIC_REV_5752)
15623 		tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
15624 
15625 	if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
15626 	    tg3_asic_rev(tp) == ASIC_REV_57780 ||
15627 	    tg3_flag(tp, 57765_CLASS))
15628 		tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
15629 
15630 	if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
15631 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
15632 		/* Turn off the debug UART. */
15633 		tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
15634 		if (tg3_flag(tp, IS_NIC))
15635 			/* Keep VMain power. */
15636 			tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
15637 					      GRC_LCLCTRL_GPIO_OUTPUT0;
15638 	}
15639 
15640 	if (tg3_asic_rev(tp) == ASIC_REV_5762)
15641 		tp->grc_local_ctrl |=
15642 			tr32(GRC_LOCAL_CTRL) & GRC_LCLCTRL_GPIO_UART_SEL;
15643 
15644 	/* Switch out of Vaux if it is a NIC */
15645 	tg3_pwrsrc_switch_to_vmain(tp);
15646 
15647 	/* Derive initial jumbo mode from MTU assigned in
15648 	 * ether_setup() via the alloc_etherdev() call
15649 	 */
15650 	if (tp->dev->mtu > ETH_DATA_LEN && !tg3_flag(tp, 5780_CLASS))
15651 		tg3_flag_set(tp, JUMBO_RING_ENABLE);
15652 
15653 	/* Determine WakeOnLan speed to use. */
15654 	if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
15655 	    tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
15656 	    tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0 ||
15657 	    tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B2) {
15658 		tg3_flag_clear(tp, WOL_SPEED_100MB);
15659 	} else {
15660 		tg3_flag_set(tp, WOL_SPEED_100MB);
15661 	}
15662 
15663 	if (tg3_asic_rev(tp) == ASIC_REV_5906)
15664 		tp->phy_flags |= TG3_PHYFLG_IS_FET;
15665 
15666 	/* A few boards don't want Ethernet@WireSpeed phy feature */
15667 	if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
15668 	    (tg3_asic_rev(tp) == ASIC_REV_5705 &&
15669 	     (tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) &&
15670 	     (tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A1)) ||
15671 	    (tp->phy_flags & TG3_PHYFLG_IS_FET) ||
15672 	    (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
15673 		tp->phy_flags |= TG3_PHYFLG_NO_ETH_WIRE_SPEED;
15674 
15675 	if (tg3_chip_rev(tp) == CHIPREV_5703_AX ||
15676 	    tg3_chip_rev(tp) == CHIPREV_5704_AX)
15677 		tp->phy_flags |= TG3_PHYFLG_ADC_BUG;
15678 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0)
15679 		tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG;
15680 
15681 	if (tg3_flag(tp, 5705_PLUS) &&
15682 	    !(tp->phy_flags & TG3_PHYFLG_IS_FET) &&
15683 	    tg3_asic_rev(tp) != ASIC_REV_5785 &&
15684 	    tg3_asic_rev(tp) != ASIC_REV_57780 &&
15685 	    !tg3_flag(tp, 57765_PLUS)) {
15686 		if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
15687 		    tg3_asic_rev(tp) == ASIC_REV_5787 ||
15688 		    tg3_asic_rev(tp) == ASIC_REV_5784 ||
15689 		    tg3_asic_rev(tp) == ASIC_REV_5761) {
15690 			if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
15691 			    tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
15692 				tp->phy_flags |= TG3_PHYFLG_JITTER_BUG;
15693 			if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
15694 				tp->phy_flags |= TG3_PHYFLG_ADJUST_TRIM;
15695 		} else
15696 			tp->phy_flags |= TG3_PHYFLG_BER_BUG;
15697 	}
15698 
15699 	if (tg3_asic_rev(tp) == ASIC_REV_5784 &&
15700 	    tg3_chip_rev(tp) != CHIPREV_5784_AX) {
15701 		tp->phy_otp = tg3_read_otp_phycfg(tp);
15702 		if (tp->phy_otp == 0)
15703 			tp->phy_otp = TG3_OTP_DEFAULT;
15704 	}
15705 
15706 	if (tg3_flag(tp, CPMU_PRESENT))
15707 		tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
15708 	else
15709 		tp->mi_mode = MAC_MI_MODE_BASE;
15710 
15711 	tp->coalesce_mode = 0;
15712 	if (tg3_chip_rev(tp) != CHIPREV_5700_AX &&
15713 	    tg3_chip_rev(tp) != CHIPREV_5700_BX)
15714 		tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
15715 
15716 	/* Set these bits to enable statistics workaround. */
15717 	if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
15718 	    tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
15719 	    tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0) {
15720 		tp->coalesce_mode |= HOSTCC_MODE_ATTN;
15721 		tp->grc_mode |= GRC_MODE_IRQ_ON_FLOW_ATTN;
15722 	}
15723 
15724 	if (tg3_asic_rev(tp) == ASIC_REV_5785 ||
15725 	    tg3_asic_rev(tp) == ASIC_REV_57780)
15726 		tg3_flag_set(tp, USE_PHYLIB);
15727 
15728 	err = tg3_mdio_init(tp);
15729 	if (err)
15730 		return err;
15731 
15732 	/* Initialize data/descriptor byte/word swapping. */
15733 	val = tr32(GRC_MODE);
15734 	if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
15735 	    tg3_asic_rev(tp) == ASIC_REV_5762)
15736 		val &= (GRC_MODE_BYTE_SWAP_B2HRX_DATA |
15737 			GRC_MODE_WORD_SWAP_B2HRX_DATA |
15738 			GRC_MODE_B2HRX_ENABLE |
15739 			GRC_MODE_HTX2B_ENABLE |
15740 			GRC_MODE_HOST_STACKUP);
15741 	else
15742 		val &= GRC_MODE_HOST_STACKUP;
15743 
15744 	tw32(GRC_MODE, val | tp->grc_mode);
15745 
15746 	tg3_switch_clocks(tp);
15747 
15748 	/* Clear this out for sanity. */
15749 	tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
15750 
15751 	pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
15752 			      &pci_state_reg);
15753 	if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
15754 	    !tg3_flag(tp, PCIX_TARGET_HWBUG)) {
15755 		if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
15756 		    tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0 ||
15757 		    tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B2 ||
15758 		    tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B5) {
15759 			void __iomem *sram_base;
15760 
15761 			/* Write some dummy words into the SRAM status block
15762 			 * area, see if it reads back correctly.  If the return
15763 			 * value is bad, force enable the PCIX workaround.
15764 			 */
15765 			sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
15766 
15767 			writel(0x00000000, sram_base);
15768 			writel(0x00000000, sram_base + 4);
15769 			writel(0xffffffff, sram_base + 4);
15770 			if (readl(sram_base) != 0x00000000)
15771 				tg3_flag_set(tp, PCIX_TARGET_HWBUG);
15772 		}
15773 	}
15774 
15775 	udelay(50);
15776 	tg3_nvram_init(tp);
15777 
15778 	grc_misc_cfg = tr32(GRC_MISC_CFG);
15779 	grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
15780 
15781 	if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
15782 	    (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
15783 	     grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
15784 		tg3_flag_set(tp, IS_5788);
15785 
15786 	if (!tg3_flag(tp, IS_5788) &&
15787 	    tg3_asic_rev(tp) != ASIC_REV_5700)
15788 		tg3_flag_set(tp, TAGGED_STATUS);
15789 	if (tg3_flag(tp, TAGGED_STATUS)) {
15790 		tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
15791 				      HOSTCC_MODE_CLRTICK_TXBD);
15792 
15793 		tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
15794 		pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
15795 				       tp->misc_host_ctrl);
15796 	}
15797 
15798 	/* Preserve the APE MAC_MODE bits */
15799 	if (tg3_flag(tp, ENABLE_APE))
15800 		tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
15801 	else
15802 		tp->mac_mode = 0;
15803 
15804 	if (tg3_10_100_only_device(tp, ent))
15805 		tp->phy_flags |= TG3_PHYFLG_10_100_ONLY;
15806 
15807 	err = tg3_phy_probe(tp);
15808 	if (err) {
15809 		dev_err(&tp->pdev->dev, "phy probe failed, err %d\n", err);
15810 		/* ... but do not return immediately ... */
15811 		tg3_mdio_fini(tp);
15812 	}
15813 
15814 	tg3_read_vpd(tp);
15815 	tg3_read_fw_ver(tp);
15816 
15817 	if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
15818 		tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
15819 	} else {
15820 		if (tg3_asic_rev(tp) == ASIC_REV_5700)
15821 			tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
15822 		else
15823 			tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
15824 	}
15825 
15826 	/* 5700 {AX,BX} chips have a broken status block link
15827 	 * change bit implementation, so we must use the
15828 	 * status register in those cases.
15829 	 */
15830 	if (tg3_asic_rev(tp) == ASIC_REV_5700)
15831 		tg3_flag_set(tp, USE_LINKCHG_REG);
15832 	else
15833 		tg3_flag_clear(tp, USE_LINKCHG_REG);
15834 
15835 	/* The led_ctrl is set during tg3_phy_probe, here we might
15836 	 * have to force the link status polling mechanism based
15837 	 * upon subsystem IDs.
15838 	 */
15839 	if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
15840 	    tg3_asic_rev(tp) == ASIC_REV_5701 &&
15841 	    !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
15842 		tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
15843 		tg3_flag_set(tp, USE_LINKCHG_REG);
15844 	}
15845 
15846 	/* For all SERDES we poll the MAC status register. */
15847 	if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
15848 		tg3_flag_set(tp, POLL_SERDES);
15849 	else
15850 		tg3_flag_clear(tp, POLL_SERDES);
15851 
15852 	tp->rx_offset = NET_SKB_PAD + NET_IP_ALIGN;
15853 	tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;
15854 	if (tg3_asic_rev(tp) == ASIC_REV_5701 &&
15855 	    tg3_flag(tp, PCIX_MODE)) {
15856 		tp->rx_offset = NET_SKB_PAD;
15857 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
15858 		tp->rx_copy_thresh = ~(u16)0;
15859 #endif
15860 	}
15861 
15862 	tp->rx_std_ring_mask = TG3_RX_STD_RING_SIZE(tp) - 1;
15863 	tp->rx_jmb_ring_mask = TG3_RX_JMB_RING_SIZE(tp) - 1;
15864 	tp->rx_ret_ring_mask = tg3_rx_ret_ring_size(tp) - 1;
15865 
15866 	tp->rx_std_max_post = tp->rx_std_ring_mask + 1;
15867 
15868 	/* Increment the rx prod index on the rx std ring by at most
15869 	 * 8 for these chips to workaround hw errata.
15870 	 */
15871 	if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
15872 	    tg3_asic_rev(tp) == ASIC_REV_5752 ||
15873 	    tg3_asic_rev(tp) == ASIC_REV_5755)
15874 		tp->rx_std_max_post = 8;
15875 
15876 	if (tg3_flag(tp, ASPM_WORKAROUND))
15877 		tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
15878 				     PCIE_PWR_MGMT_L1_THRESH_MSK;
15879 
15880 	return err;
15881 }
15882 
15883 #ifdef CONFIG_SPARC
15884 static int tg3_get_macaddr_sparc(struct tg3 *tp)
15885 {
15886 	struct net_device *dev = tp->dev;
15887 	struct pci_dev *pdev = tp->pdev;
15888 	struct device_node *dp = pci_device_to_OF_node(pdev);
15889 	const unsigned char *addr;
15890 	int len;
15891 
15892 	addr = of_get_property(dp, "local-mac-address", &len);
15893 	if (addr && len == 6) {
15894 		memcpy(dev->dev_addr, addr, 6);
15895 		return 0;
15896 	}
15897 	return -ENODEV;
15898 }
15899 
15900 static int tg3_get_default_macaddr_sparc(struct tg3 *tp)
15901 {
15902 	struct net_device *dev = tp->dev;
15903 
15904 	memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
15905 	return 0;
15906 }
15907 #endif
15908 
15909 static int tg3_get_device_address(struct tg3 *tp)
15910 {
15911 	struct net_device *dev = tp->dev;
15912 	u32 hi, lo, mac_offset;
15913 	int addr_ok = 0;
15914 	int err;
15915 
15916 #ifdef CONFIG_SPARC
15917 	if (!tg3_get_macaddr_sparc(tp))
15918 		return 0;
15919 #endif
15920 
15921 	if (tg3_flag(tp, IS_SSB_CORE)) {
15922 		err = ssb_gige_get_macaddr(tp->pdev, &dev->dev_addr[0]);
15923 		if (!err && is_valid_ether_addr(&dev->dev_addr[0]))
15924 			return 0;
15925 	}
15926 
15927 	mac_offset = 0x7c;
15928 	if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
15929 	    tg3_flag(tp, 5780_CLASS)) {
15930 		if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
15931 			mac_offset = 0xcc;
15932 		if (tg3_nvram_lock(tp))
15933 			tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
15934 		else
15935 			tg3_nvram_unlock(tp);
15936 	} else if (tg3_flag(tp, 5717_PLUS)) {
15937 		if (tp->pci_fn & 1)
15938 			mac_offset = 0xcc;
15939 		if (tp->pci_fn > 1)
15940 			mac_offset += 0x18c;
15941 	} else if (tg3_asic_rev(tp) == ASIC_REV_5906)
15942 		mac_offset = 0x10;
15943 
15944 	/* First try to get it from MAC address mailbox. */
15945 	tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
15946 	if ((hi >> 16) == 0x484b) {
15947 		dev->dev_addr[0] = (hi >>  8) & 0xff;
15948 		dev->dev_addr[1] = (hi >>  0) & 0xff;
15949 
15950 		tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
15951 		dev->dev_addr[2] = (lo >> 24) & 0xff;
15952 		dev->dev_addr[3] = (lo >> 16) & 0xff;
15953 		dev->dev_addr[4] = (lo >>  8) & 0xff;
15954 		dev->dev_addr[5] = (lo >>  0) & 0xff;
15955 
15956 		/* Some old bootcode may report a 0 MAC address in SRAM */
15957 		addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
15958 	}
15959 	if (!addr_ok) {
15960 		/* Next, try NVRAM. */
15961 		if (!tg3_flag(tp, NO_NVRAM) &&
15962 		    !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) &&
15963 		    !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) {
15964 			memcpy(&dev->dev_addr[0], ((char *)&hi) + 2, 2);
15965 			memcpy(&dev->dev_addr[2], (char *)&lo, sizeof(lo));
15966 		}
15967 		/* Finally just fetch it out of the MAC control regs. */
15968 		else {
15969 			hi = tr32(MAC_ADDR_0_HIGH);
15970 			lo = tr32(MAC_ADDR_0_LOW);
15971 
15972 			dev->dev_addr[5] = lo & 0xff;
15973 			dev->dev_addr[4] = (lo >> 8) & 0xff;
15974 			dev->dev_addr[3] = (lo >> 16) & 0xff;
15975 			dev->dev_addr[2] = (lo >> 24) & 0xff;
15976 			dev->dev_addr[1] = hi & 0xff;
15977 			dev->dev_addr[0] = (hi >> 8) & 0xff;
15978 		}
15979 	}
15980 
15981 	if (!is_valid_ether_addr(&dev->dev_addr[0])) {
15982 #ifdef CONFIG_SPARC
15983 		if (!tg3_get_default_macaddr_sparc(tp))
15984 			return 0;
15985 #endif
15986 		return -EINVAL;
15987 	}
15988 	return 0;
15989 }
15990 
15991 #define BOUNDARY_SINGLE_CACHELINE	1
15992 #define BOUNDARY_MULTI_CACHELINE	2
15993 
15994 static u32 tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
15995 {
15996 	int cacheline_size;
15997 	u8 byte;
15998 	int goal;
15999 
16000 	pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
16001 	if (byte == 0)
16002 		cacheline_size = 1024;
16003 	else
16004 		cacheline_size = (int) byte * 4;
16005 
16006 	/* On 5703 and later chips, the boundary bits have no
16007 	 * effect.
16008 	 */
16009 	if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
16010 	    tg3_asic_rev(tp) != ASIC_REV_5701 &&
16011 	    !tg3_flag(tp, PCI_EXPRESS))
16012 		goto out;
16013 
16014 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
16015 	goal = BOUNDARY_MULTI_CACHELINE;
16016 #else
16017 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
16018 	goal = BOUNDARY_SINGLE_CACHELINE;
16019 #else
16020 	goal = 0;
16021 #endif
16022 #endif
16023 
16024 	if (tg3_flag(tp, 57765_PLUS)) {
16025 		val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
16026 		goto out;
16027 	}
16028 
16029 	if (!goal)
16030 		goto out;
16031 
16032 	/* PCI controllers on most RISC systems tend to disconnect
16033 	 * when a device tries to burst across a cache-line boundary.
16034 	 * Therefore, letting tg3 do so just wastes PCI bandwidth.
16035 	 *
16036 	 * Unfortunately, for PCI-E there are only limited
16037 	 * write-side controls for this, and thus for reads
16038 	 * we will still get the disconnects.  We'll also waste
16039 	 * these PCI cycles for both read and write for chips
16040 	 * other than 5700 and 5701 which do not implement the
16041 	 * boundary bits.
16042 	 */
16043 	if (tg3_flag(tp, PCIX_MODE) && !tg3_flag(tp, PCI_EXPRESS)) {
16044 		switch (cacheline_size) {
16045 		case 16:
16046 		case 32:
16047 		case 64:
16048 		case 128:
16049 			if (goal == BOUNDARY_SINGLE_CACHELINE) {
16050 				val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
16051 					DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
16052 			} else {
16053 				val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
16054 					DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
16055 			}
16056 			break;
16057 
16058 		case 256:
16059 			val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
16060 				DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
16061 			break;
16062 
16063 		default:
16064 			val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
16065 				DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
16066 			break;
16067 		}
16068 	} else if (tg3_flag(tp, PCI_EXPRESS)) {
16069 		switch (cacheline_size) {
16070 		case 16:
16071 		case 32:
16072 		case 64:
16073 			if (goal == BOUNDARY_SINGLE_CACHELINE) {
16074 				val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
16075 				val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
16076 				break;
16077 			}
16078 			/* fallthrough */
16079 		case 128:
16080 		default:
16081 			val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
16082 			val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
16083 			break;
16084 		}
16085 	} else {
16086 		switch (cacheline_size) {
16087 		case 16:
16088 			if (goal == BOUNDARY_SINGLE_CACHELINE) {
16089 				val |= (DMA_RWCTRL_READ_BNDRY_16 |
16090 					DMA_RWCTRL_WRITE_BNDRY_16);
16091 				break;
16092 			}
16093 			/* fallthrough */
16094 		case 32:
16095 			if (goal == BOUNDARY_SINGLE_CACHELINE) {
16096 				val |= (DMA_RWCTRL_READ_BNDRY_32 |
16097 					DMA_RWCTRL_WRITE_BNDRY_32);
16098 				break;
16099 			}
16100 			/* fallthrough */
16101 		case 64:
16102 			if (goal == BOUNDARY_SINGLE_CACHELINE) {
16103 				val |= (DMA_RWCTRL_READ_BNDRY_64 |
16104 					DMA_RWCTRL_WRITE_BNDRY_64);
16105 				break;
16106 			}
16107 			/* fallthrough */
16108 		case 128:
16109 			if (goal == BOUNDARY_SINGLE_CACHELINE) {
16110 				val |= (DMA_RWCTRL_READ_BNDRY_128 |
16111 					DMA_RWCTRL_WRITE_BNDRY_128);
16112 				break;
16113 			}
16114 			/* fallthrough */
16115 		case 256:
16116 			val |= (DMA_RWCTRL_READ_BNDRY_256 |
16117 				DMA_RWCTRL_WRITE_BNDRY_256);
16118 			break;
16119 		case 512:
16120 			val |= (DMA_RWCTRL_READ_BNDRY_512 |
16121 				DMA_RWCTRL_WRITE_BNDRY_512);
16122 			break;
16123 		case 1024:
16124 		default:
16125 			val |= (DMA_RWCTRL_READ_BNDRY_1024 |
16126 				DMA_RWCTRL_WRITE_BNDRY_1024);
16127 			break;
16128 		}
16129 	}
16130 
16131 out:
16132 	return val;
16133 }
16134 
16135 static int tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma,
16136 			   int size, int to_device)
16137 {
16138 	struct tg3_internal_buffer_desc test_desc;
16139 	u32 sram_dma_descs;
16140 	int i, ret;
16141 
16142 	sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
16143 
16144 	tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
16145 	tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
16146 	tw32(RDMAC_STATUS, 0);
16147 	tw32(WDMAC_STATUS, 0);
16148 
16149 	tw32(BUFMGR_MODE, 0);
16150 	tw32(FTQ_RESET, 0);
16151 
16152 	test_desc.addr_hi = ((u64) buf_dma) >> 32;
16153 	test_desc.addr_lo = buf_dma & 0xffffffff;
16154 	test_desc.nic_mbuf = 0x00002100;
16155 	test_desc.len = size;
16156 
16157 	/*
16158 	 * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
16159 	 * the *second* time the tg3 driver was getting loaded after an
16160 	 * initial scan.
16161 	 *
16162 	 * Broadcom tells me:
16163 	 *   ...the DMA engine is connected to the GRC block and a DMA
16164 	 *   reset may affect the GRC block in some unpredictable way...
16165 	 *   The behavior of resets to individual blocks has not been tested.
16166 	 *
16167 	 * Broadcom noted the GRC reset will also reset all sub-components.
16168 	 */
16169 	if (to_device) {
16170 		test_desc.cqid_sqid = (13 << 8) | 2;
16171 
16172 		tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
16173 		udelay(40);
16174 	} else {
16175 		test_desc.cqid_sqid = (16 << 8) | 7;
16176 
16177 		tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
16178 		udelay(40);
16179 	}
16180 	test_desc.flags = 0x00000005;
16181 
16182 	for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
16183 		u32 val;
16184 
16185 		val = *(((u32 *)&test_desc) + i);
16186 		pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
16187 				       sram_dma_descs + (i * sizeof(u32)));
16188 		pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
16189 	}
16190 	pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
16191 
16192 	if (to_device)
16193 		tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
16194 	else
16195 		tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
16196 
16197 	ret = -ENODEV;
16198 	for (i = 0; i < 40; i++) {
16199 		u32 val;
16200 
16201 		if (to_device)
16202 			val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
16203 		else
16204 			val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
16205 		if ((val & 0xffff) == sram_dma_descs) {
16206 			ret = 0;
16207 			break;
16208 		}
16209 
16210 		udelay(100);
16211 	}
16212 
16213 	return ret;
16214 }
16215 
16216 #define TEST_BUFFER_SIZE	0x2000
16217 
16218 static DEFINE_PCI_DEVICE_TABLE(tg3_dma_wait_state_chipsets) = {
16219 	{ PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
16220 	{ },
16221 };
16222 
16223 static int tg3_test_dma(struct tg3 *tp)
16224 {
16225 	dma_addr_t buf_dma;
16226 	u32 *buf, saved_dma_rwctrl;
16227 	int ret = 0;
16228 
16229 	buf = dma_alloc_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE,
16230 				 &buf_dma, GFP_KERNEL);
16231 	if (!buf) {
16232 		ret = -ENOMEM;
16233 		goto out_nofree;
16234 	}
16235 
16236 	tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
16237 			  (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
16238 
16239 	tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
16240 
16241 	if (tg3_flag(tp, 57765_PLUS))
16242 		goto out;
16243 
16244 	if (tg3_flag(tp, PCI_EXPRESS)) {
16245 		/* DMA read watermark not used on PCIE */
16246 		tp->dma_rwctrl |= 0x00180000;
16247 	} else if (!tg3_flag(tp, PCIX_MODE)) {
16248 		if (tg3_asic_rev(tp) == ASIC_REV_5705 ||
16249 		    tg3_asic_rev(tp) == ASIC_REV_5750)
16250 			tp->dma_rwctrl |= 0x003f0000;
16251 		else
16252 			tp->dma_rwctrl |= 0x003f000f;
16253 	} else {
16254 		if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
16255 		    tg3_asic_rev(tp) == ASIC_REV_5704) {
16256 			u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
16257 			u32 read_water = 0x7;
16258 
16259 			/* If the 5704 is behind the EPB bridge, we can
16260 			 * do the less restrictive ONE_DMA workaround for
16261 			 * better performance.
16262 			 */
16263 			if (tg3_flag(tp, 40BIT_DMA_BUG) &&
16264 			    tg3_asic_rev(tp) == ASIC_REV_5704)
16265 				tp->dma_rwctrl |= 0x8000;
16266 			else if (ccval == 0x6 || ccval == 0x7)
16267 				tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
16268 
16269 			if (tg3_asic_rev(tp) == ASIC_REV_5703)
16270 				read_water = 4;
16271 			/* Set bit 23 to enable PCIX hw bug fix */
16272 			tp->dma_rwctrl |=
16273 				(read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
16274 				(0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
16275 				(1 << 23);
16276 		} else if (tg3_asic_rev(tp) == ASIC_REV_5780) {
16277 			/* 5780 always in PCIX mode */
16278 			tp->dma_rwctrl |= 0x00144000;
16279 		} else if (tg3_asic_rev(tp) == ASIC_REV_5714) {
16280 			/* 5714 always in PCIX mode */
16281 			tp->dma_rwctrl |= 0x00148000;
16282 		} else {
16283 			tp->dma_rwctrl |= 0x001b000f;
16284 		}
16285 	}
16286 	if (tg3_flag(tp, ONE_DMA_AT_ONCE))
16287 		tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
16288 
16289 	if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
16290 	    tg3_asic_rev(tp) == ASIC_REV_5704)
16291 		tp->dma_rwctrl &= 0xfffffff0;
16292 
16293 	if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16294 	    tg3_asic_rev(tp) == ASIC_REV_5701) {
16295 		/* Remove this if it causes problems for some boards. */
16296 		tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
16297 
16298 		/* On 5700/5701 chips, we need to set this bit.
16299 		 * Otherwise the chip will issue cacheline transactions
16300 		 * to streamable DMA memory with not all the byte
16301 		 * enables turned on.  This is an error on several
16302 		 * RISC PCI controllers, in particular sparc64.
16303 		 *
16304 		 * On 5703/5704 chips, this bit has been reassigned
16305 		 * a different meaning.  In particular, it is used
16306 		 * on those chips to enable a PCI-X workaround.
16307 		 */
16308 		tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
16309 	}
16310 
16311 	tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
16312 
16313 #if 0
16314 	/* Unneeded, already done by tg3_get_invariants.  */
16315 	tg3_switch_clocks(tp);
16316 #endif
16317 
16318 	if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
16319 	    tg3_asic_rev(tp) != ASIC_REV_5701)
16320 		goto out;
16321 
16322 	/* It is best to perform DMA test with maximum write burst size
16323 	 * to expose the 5700/5701 write DMA bug.
16324 	 */
16325 	saved_dma_rwctrl = tp->dma_rwctrl;
16326 	tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
16327 	tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
16328 
16329 	while (1) {
16330 		u32 *p = buf, i;
16331 
16332 		for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
16333 			p[i] = i;
16334 
16335 		/* Send the buffer to the chip. */
16336 		ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
16337 		if (ret) {
16338 			dev_err(&tp->pdev->dev,
16339 				"%s: Buffer write failed. err = %d\n",
16340 				__func__, ret);
16341 			break;
16342 		}
16343 
16344 #if 0
16345 		/* validate data reached card RAM correctly. */
16346 		for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
16347 			u32 val;
16348 			tg3_read_mem(tp, 0x2100 + (i*4), &val);
16349 			if (le32_to_cpu(val) != p[i]) {
16350 				dev_err(&tp->pdev->dev,
16351 					"%s: Buffer corrupted on device! "
16352 					"(%d != %d)\n", __func__, val, i);
16353 				/* ret = -ENODEV here? */
16354 			}
16355 			p[i] = 0;
16356 		}
16357 #endif
16358 		/* Now read it back. */
16359 		ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
16360 		if (ret) {
16361 			dev_err(&tp->pdev->dev, "%s: Buffer read failed. "
16362 				"err = %d\n", __func__, ret);
16363 			break;
16364 		}
16365 
16366 		/* Verify it. */
16367 		for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
16368 			if (p[i] == i)
16369 				continue;
16370 
16371 			if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
16372 			    DMA_RWCTRL_WRITE_BNDRY_16) {
16373 				tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
16374 				tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
16375 				tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
16376 				break;
16377 			} else {
16378 				dev_err(&tp->pdev->dev,
16379 					"%s: Buffer corrupted on read back! "
16380 					"(%d != %d)\n", __func__, p[i], i);
16381 				ret = -ENODEV;
16382 				goto out;
16383 			}
16384 		}
16385 
16386 		if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
16387 			/* Success. */
16388 			ret = 0;
16389 			break;
16390 		}
16391 	}
16392 	if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
16393 	    DMA_RWCTRL_WRITE_BNDRY_16) {
16394 		/* DMA test passed without adjusting DMA boundary,
16395 		 * now look for chipsets that are known to expose the
16396 		 * DMA bug without failing the test.
16397 		 */
16398 		if (pci_dev_present(tg3_dma_wait_state_chipsets)) {
16399 			tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
16400 			tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
16401 		} else {
16402 			/* Safe to use the calculated DMA boundary. */
16403 			tp->dma_rwctrl = saved_dma_rwctrl;
16404 		}
16405 
16406 		tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
16407 	}
16408 
16409 out:
16410 	dma_free_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, buf, buf_dma);
16411 out_nofree:
16412 	return ret;
16413 }
16414 
16415 static void tg3_init_bufmgr_config(struct tg3 *tp)
16416 {
16417 	if (tg3_flag(tp, 57765_PLUS)) {
16418 		tp->bufmgr_config.mbuf_read_dma_low_water =
16419 			DEFAULT_MB_RDMA_LOW_WATER_5705;
16420 		tp->bufmgr_config.mbuf_mac_rx_low_water =
16421 			DEFAULT_MB_MACRX_LOW_WATER_57765;
16422 		tp->bufmgr_config.mbuf_high_water =
16423 			DEFAULT_MB_HIGH_WATER_57765;
16424 
16425 		tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
16426 			DEFAULT_MB_RDMA_LOW_WATER_5705;
16427 		tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
16428 			DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765;
16429 		tp->bufmgr_config.mbuf_high_water_jumbo =
16430 			DEFAULT_MB_HIGH_WATER_JUMBO_57765;
16431 	} else if (tg3_flag(tp, 5705_PLUS)) {
16432 		tp->bufmgr_config.mbuf_read_dma_low_water =
16433 			DEFAULT_MB_RDMA_LOW_WATER_5705;
16434 		tp->bufmgr_config.mbuf_mac_rx_low_water =
16435 			DEFAULT_MB_MACRX_LOW_WATER_5705;
16436 		tp->bufmgr_config.mbuf_high_water =
16437 			DEFAULT_MB_HIGH_WATER_5705;
16438 		if (tg3_asic_rev(tp) == ASIC_REV_5906) {
16439 			tp->bufmgr_config.mbuf_mac_rx_low_water =
16440 				DEFAULT_MB_MACRX_LOW_WATER_5906;
16441 			tp->bufmgr_config.mbuf_high_water =
16442 				DEFAULT_MB_HIGH_WATER_5906;
16443 		}
16444 
16445 		tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
16446 			DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
16447 		tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
16448 			DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
16449 		tp->bufmgr_config.mbuf_high_water_jumbo =
16450 			DEFAULT_MB_HIGH_WATER_JUMBO_5780;
16451 	} else {
16452 		tp->bufmgr_config.mbuf_read_dma_low_water =
16453 			DEFAULT_MB_RDMA_LOW_WATER;
16454 		tp->bufmgr_config.mbuf_mac_rx_low_water =
16455 			DEFAULT_MB_MACRX_LOW_WATER;
16456 		tp->bufmgr_config.mbuf_high_water =
16457 			DEFAULT_MB_HIGH_WATER;
16458 
16459 		tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
16460 			DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
16461 		tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
16462 			DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
16463 		tp->bufmgr_config.mbuf_high_water_jumbo =
16464 			DEFAULT_MB_HIGH_WATER_JUMBO;
16465 	}
16466 
16467 	tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
16468 	tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
16469 }
16470 
16471 static char *tg3_phy_string(struct tg3 *tp)
16472 {
16473 	switch (tp->phy_id & TG3_PHY_ID_MASK) {
16474 	case TG3_PHY_ID_BCM5400:	return "5400";
16475 	case TG3_PHY_ID_BCM5401:	return "5401";
16476 	case TG3_PHY_ID_BCM5411:	return "5411";
16477 	case TG3_PHY_ID_BCM5701:	return "5701";
16478 	case TG3_PHY_ID_BCM5703:	return "5703";
16479 	case TG3_PHY_ID_BCM5704:	return "5704";
16480 	case TG3_PHY_ID_BCM5705:	return "5705";
16481 	case TG3_PHY_ID_BCM5750:	return "5750";
16482 	case TG3_PHY_ID_BCM5752:	return "5752";
16483 	case TG3_PHY_ID_BCM5714:	return "5714";
16484 	case TG3_PHY_ID_BCM5780:	return "5780";
16485 	case TG3_PHY_ID_BCM5755:	return "5755";
16486 	case TG3_PHY_ID_BCM5787:	return "5787";
16487 	case TG3_PHY_ID_BCM5784:	return "5784";
16488 	case TG3_PHY_ID_BCM5756:	return "5722/5756";
16489 	case TG3_PHY_ID_BCM5906:	return "5906";
16490 	case TG3_PHY_ID_BCM5761:	return "5761";
16491 	case TG3_PHY_ID_BCM5718C:	return "5718C";
16492 	case TG3_PHY_ID_BCM5718S:	return "5718S";
16493 	case TG3_PHY_ID_BCM57765:	return "57765";
16494 	case TG3_PHY_ID_BCM5719C:	return "5719C";
16495 	case TG3_PHY_ID_BCM5720C:	return "5720C";
16496 	case TG3_PHY_ID_BCM5762:	return "5762C";
16497 	case TG3_PHY_ID_BCM8002:	return "8002/serdes";
16498 	case 0:			return "serdes";
16499 	default:		return "unknown";
16500 	}
16501 }
16502 
16503 static char *tg3_bus_string(struct tg3 *tp, char *str)
16504 {
16505 	if (tg3_flag(tp, PCI_EXPRESS)) {
16506 		strcpy(str, "PCI Express");
16507 		return str;
16508 	} else if (tg3_flag(tp, PCIX_MODE)) {
16509 		u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
16510 
16511 		strcpy(str, "PCIX:");
16512 
16513 		if ((clock_ctrl == 7) ||
16514 		    ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
16515 		     GRC_MISC_CFG_BOARD_ID_5704CIOBE))
16516 			strcat(str, "133MHz");
16517 		else if (clock_ctrl == 0)
16518 			strcat(str, "33MHz");
16519 		else if (clock_ctrl == 2)
16520 			strcat(str, "50MHz");
16521 		else if (clock_ctrl == 4)
16522 			strcat(str, "66MHz");
16523 		else if (clock_ctrl == 6)
16524 			strcat(str, "100MHz");
16525 	} else {
16526 		strcpy(str, "PCI:");
16527 		if (tg3_flag(tp, PCI_HIGH_SPEED))
16528 			strcat(str, "66MHz");
16529 		else
16530 			strcat(str, "33MHz");
16531 	}
16532 	if (tg3_flag(tp, PCI_32BIT))
16533 		strcat(str, ":32-bit");
16534 	else
16535 		strcat(str, ":64-bit");
16536 	return str;
16537 }
16538 
16539 static void tg3_init_coal(struct tg3 *tp)
16540 {
16541 	struct ethtool_coalesce *ec = &tp->coal;
16542 
16543 	memset(ec, 0, sizeof(*ec));
16544 	ec->cmd = ETHTOOL_GCOALESCE;
16545 	ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
16546 	ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
16547 	ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
16548 	ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
16549 	ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
16550 	ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
16551 	ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
16552 	ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
16553 	ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
16554 
16555 	if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
16556 				 HOSTCC_MODE_CLRTICK_TXBD)) {
16557 		ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
16558 		ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
16559 		ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
16560 		ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
16561 	}
16562 
16563 	if (tg3_flag(tp, 5705_PLUS)) {
16564 		ec->rx_coalesce_usecs_irq = 0;
16565 		ec->tx_coalesce_usecs_irq = 0;
16566 		ec->stats_block_coalesce_usecs = 0;
16567 	}
16568 }
16569 
16570 static int tg3_init_one(struct pci_dev *pdev,
16571 				  const struct pci_device_id *ent)
16572 {
16573 	struct net_device *dev;
16574 	struct tg3 *tp;
16575 	int i, err, pm_cap;
16576 	u32 sndmbx, rcvmbx, intmbx;
16577 	char str[40];
16578 	u64 dma_mask, persist_dma_mask;
16579 	netdev_features_t features = 0;
16580 
16581 	printk_once(KERN_INFO "%s\n", version);
16582 
16583 	err = pci_enable_device(pdev);
16584 	if (err) {
16585 		dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
16586 		return err;
16587 	}
16588 
16589 	err = pci_request_regions(pdev, DRV_MODULE_NAME);
16590 	if (err) {
16591 		dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
16592 		goto err_out_disable_pdev;
16593 	}
16594 
16595 	pci_set_master(pdev);
16596 
16597 	/* Find power-management capability. */
16598 	pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
16599 	if (pm_cap == 0) {
16600 		dev_err(&pdev->dev,
16601 			"Cannot find Power Management capability, aborting\n");
16602 		err = -EIO;
16603 		goto err_out_free_res;
16604 	}
16605 
16606 	err = pci_set_power_state(pdev, PCI_D0);
16607 	if (err) {
16608 		dev_err(&pdev->dev, "Transition to D0 failed, aborting\n");
16609 		goto err_out_free_res;
16610 	}
16611 
16612 	dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS);
16613 	if (!dev) {
16614 		err = -ENOMEM;
16615 		goto err_out_power_down;
16616 	}
16617 
16618 	SET_NETDEV_DEV(dev, &pdev->dev);
16619 
16620 	tp = netdev_priv(dev);
16621 	tp->pdev = pdev;
16622 	tp->dev = dev;
16623 	tp->pm_cap = pm_cap;
16624 	tp->rx_mode = TG3_DEF_RX_MODE;
16625 	tp->tx_mode = TG3_DEF_TX_MODE;
16626 	tp->irq_sync = 1;
16627 
16628 	if (tg3_debug > 0)
16629 		tp->msg_enable = tg3_debug;
16630 	else
16631 		tp->msg_enable = TG3_DEF_MSG_ENABLE;
16632 
16633 	if (pdev_is_ssb_gige_core(pdev)) {
16634 		tg3_flag_set(tp, IS_SSB_CORE);
16635 		if (ssb_gige_must_flush_posted_writes(pdev))
16636 			tg3_flag_set(tp, FLUSH_POSTED_WRITES);
16637 		if (ssb_gige_one_dma_at_once(pdev))
16638 			tg3_flag_set(tp, ONE_DMA_AT_ONCE);
16639 		if (ssb_gige_have_roboswitch(pdev))
16640 			tg3_flag_set(tp, ROBOSWITCH);
16641 		if (ssb_gige_is_rgmii(pdev))
16642 			tg3_flag_set(tp, RGMII_MODE);
16643 	}
16644 
16645 	/* The word/byte swap controls here control register access byte
16646 	 * swapping.  DMA data byte swapping is controlled in the GRC_MODE
16647 	 * setting below.
16648 	 */
16649 	tp->misc_host_ctrl =
16650 		MISC_HOST_CTRL_MASK_PCI_INT |
16651 		MISC_HOST_CTRL_WORD_SWAP |
16652 		MISC_HOST_CTRL_INDIR_ACCESS |
16653 		MISC_HOST_CTRL_PCISTATE_RW;
16654 
16655 	/* The NONFRM (non-frame) byte/word swap controls take effect
16656 	 * on descriptor entries, anything which isn't packet data.
16657 	 *
16658 	 * The StrongARM chips on the board (one for tx, one for rx)
16659 	 * are running in big-endian mode.
16660 	 */
16661 	tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
16662 			GRC_MODE_WSWAP_NONFRM_DATA);
16663 #ifdef __BIG_ENDIAN
16664 	tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
16665 #endif
16666 	spin_lock_init(&tp->lock);
16667 	spin_lock_init(&tp->indirect_lock);
16668 	INIT_WORK(&tp->reset_task, tg3_reset_task);
16669 
16670 	tp->regs = pci_ioremap_bar(pdev, BAR_0);
16671 	if (!tp->regs) {
16672 		dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
16673 		err = -ENOMEM;
16674 		goto err_out_free_dev;
16675 	}
16676 
16677 	if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
16678 	    tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761E ||
16679 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S ||
16680 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761SE ||
16681 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
16682 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
16683 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
16684 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
16685 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 ||
16686 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 ||
16687 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 ||
16688 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727) {
16689 		tg3_flag_set(tp, ENABLE_APE);
16690 		tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
16691 		if (!tp->aperegs) {
16692 			dev_err(&pdev->dev,
16693 				"Cannot map APE registers, aborting\n");
16694 			err = -ENOMEM;
16695 			goto err_out_iounmap;
16696 		}
16697 	}
16698 
16699 	tp->rx_pending = TG3_DEF_RX_RING_PENDING;
16700 	tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
16701 
16702 	dev->ethtool_ops = &tg3_ethtool_ops;
16703 	dev->watchdog_timeo = TG3_TX_TIMEOUT;
16704 	dev->netdev_ops = &tg3_netdev_ops;
16705 	dev->irq = pdev->irq;
16706 
16707 	err = tg3_get_invariants(tp, ent);
16708 	if (err) {
16709 		dev_err(&pdev->dev,
16710 			"Problem fetching invariants of chip, aborting\n");
16711 		goto err_out_apeunmap;
16712 	}
16713 
16714 	/* The EPB bridge inside 5714, 5715, and 5780 and any
16715 	 * device behind the EPB cannot support DMA addresses > 40-bit.
16716 	 * On 64-bit systems with IOMMU, use 40-bit dma_mask.
16717 	 * On 64-bit systems without IOMMU, use 64-bit dma_mask and
16718 	 * do DMA address check in tg3_start_xmit().
16719 	 */
16720 	if (tg3_flag(tp, IS_5788))
16721 		persist_dma_mask = dma_mask = DMA_BIT_MASK(32);
16722 	else if (tg3_flag(tp, 40BIT_DMA_BUG)) {
16723 		persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
16724 #ifdef CONFIG_HIGHMEM
16725 		dma_mask = DMA_BIT_MASK(64);
16726 #endif
16727 	} else
16728 		persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
16729 
16730 	/* Configure DMA attributes. */
16731 	if (dma_mask > DMA_BIT_MASK(32)) {
16732 		err = pci_set_dma_mask(pdev, dma_mask);
16733 		if (!err) {
16734 			features |= NETIF_F_HIGHDMA;
16735 			err = pci_set_consistent_dma_mask(pdev,
16736 							  persist_dma_mask);
16737 			if (err < 0) {
16738 				dev_err(&pdev->dev, "Unable to obtain 64 bit "
16739 					"DMA for consistent allocations\n");
16740 				goto err_out_apeunmap;
16741 			}
16742 		}
16743 	}
16744 	if (err || dma_mask == DMA_BIT_MASK(32)) {
16745 		err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
16746 		if (err) {
16747 			dev_err(&pdev->dev,
16748 				"No usable DMA configuration, aborting\n");
16749 			goto err_out_apeunmap;
16750 		}
16751 	}
16752 
16753 	tg3_init_bufmgr_config(tp);
16754 
16755 	features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
16756 
16757 	/* 5700 B0 chips do not support checksumming correctly due
16758 	 * to hardware bugs.
16759 	 */
16760 	if (tg3_chip_rev_id(tp) != CHIPREV_ID_5700_B0) {
16761 		features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
16762 
16763 		if (tg3_flag(tp, 5755_PLUS))
16764 			features |= NETIF_F_IPV6_CSUM;
16765 	}
16766 
16767 	/* TSO is on by default on chips that support hardware TSO.
16768 	 * Firmware TSO on older chips gives lower performance, so it
16769 	 * is off by default, but can be enabled using ethtool.
16770 	 */
16771 	if ((tg3_flag(tp, HW_TSO_1) ||
16772 	     tg3_flag(tp, HW_TSO_2) ||
16773 	     tg3_flag(tp, HW_TSO_3)) &&
16774 	    (features & NETIF_F_IP_CSUM))
16775 		features |= NETIF_F_TSO;
16776 	if (tg3_flag(tp, HW_TSO_2) || tg3_flag(tp, HW_TSO_3)) {
16777 		if (features & NETIF_F_IPV6_CSUM)
16778 			features |= NETIF_F_TSO6;
16779 		if (tg3_flag(tp, HW_TSO_3) ||
16780 		    tg3_asic_rev(tp) == ASIC_REV_5761 ||
16781 		    (tg3_asic_rev(tp) == ASIC_REV_5784 &&
16782 		     tg3_chip_rev(tp) != CHIPREV_5784_AX) ||
16783 		    tg3_asic_rev(tp) == ASIC_REV_5785 ||
16784 		    tg3_asic_rev(tp) == ASIC_REV_57780)
16785 			features |= NETIF_F_TSO_ECN;
16786 	}
16787 
16788 	dev->features |= features;
16789 	dev->vlan_features |= features;
16790 
16791 	/*
16792 	 * Add loopback capability only for a subset of devices that support
16793 	 * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY
16794 	 * loopback for the remaining devices.
16795 	 */
16796 	if (tg3_asic_rev(tp) != ASIC_REV_5780 &&
16797 	    !tg3_flag(tp, CPMU_PRESENT))
16798 		/* Add the loopback capability */
16799 		features |= NETIF_F_LOOPBACK;
16800 
16801 	dev->hw_features |= features;
16802 
16803 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 &&
16804 	    !tg3_flag(tp, TSO_CAPABLE) &&
16805 	    !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
16806 		tg3_flag_set(tp, MAX_RXPEND_64);
16807 		tp->rx_pending = 63;
16808 	}
16809 
16810 	err = tg3_get_device_address(tp);
16811 	if (err) {
16812 		dev_err(&pdev->dev,
16813 			"Could not obtain valid ethernet address, aborting\n");
16814 		goto err_out_apeunmap;
16815 	}
16816 
16817 	/*
16818 	 * Reset chip in case UNDI or EFI driver did not shutdown
16819 	 * DMA self test will enable WDMAC and we'll see (spurious)
16820 	 * pending DMA on the PCI bus at that point.
16821 	 */
16822 	if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
16823 	    (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
16824 		tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
16825 		tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
16826 	}
16827 
16828 	err = tg3_test_dma(tp);
16829 	if (err) {
16830 		dev_err(&pdev->dev, "DMA engine test failed, aborting\n");
16831 		goto err_out_apeunmap;
16832 	}
16833 
16834 	intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
16835 	rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
16836 	sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
16837 	for (i = 0; i < tp->irq_max; i++) {
16838 		struct tg3_napi *tnapi = &tp->napi[i];
16839 
16840 		tnapi->tp = tp;
16841 		tnapi->tx_pending = TG3_DEF_TX_RING_PENDING;
16842 
16843 		tnapi->int_mbox = intmbx;
16844 		if (i <= 4)
16845 			intmbx += 0x8;
16846 		else
16847 			intmbx += 0x4;
16848 
16849 		tnapi->consmbox = rcvmbx;
16850 		tnapi->prodmbox = sndmbx;
16851 
16852 		if (i)
16853 			tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1);
16854 		else
16855 			tnapi->coal_now = HOSTCC_MODE_NOW;
16856 
16857 		if (!tg3_flag(tp, SUPPORT_MSIX))
16858 			break;
16859 
16860 		/*
16861 		 * If we support MSIX, we'll be using RSS.  If we're using
16862 		 * RSS, the first vector only handles link interrupts and the
16863 		 * remaining vectors handle rx and tx interrupts.  Reuse the
16864 		 * mailbox values for the next iteration.  The values we setup
16865 		 * above are still useful for the single vectored mode.
16866 		 */
16867 		if (!i)
16868 			continue;
16869 
16870 		rcvmbx += 0x8;
16871 
16872 		if (sndmbx & 0x4)
16873 			sndmbx -= 0x4;
16874 		else
16875 			sndmbx += 0xc;
16876 	}
16877 
16878 	tg3_init_coal(tp);
16879 
16880 	pci_set_drvdata(pdev, dev);
16881 
16882 	if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
16883 	    tg3_asic_rev(tp) == ASIC_REV_5720 ||
16884 	    tg3_asic_rev(tp) == ASIC_REV_5762)
16885 		tg3_flag_set(tp, PTP_CAPABLE);
16886 
16887 	if (tg3_flag(tp, 5717_PLUS)) {
16888 		/* Resume a low-power mode */
16889 		tg3_frob_aux_power(tp, false);
16890 	}
16891 
16892 	tg3_timer_init(tp);
16893 
16894 	tg3_carrier_off(tp);
16895 
16896 	err = register_netdev(dev);
16897 	if (err) {
16898 		dev_err(&pdev->dev, "Cannot register net device, aborting\n");
16899 		goto err_out_apeunmap;
16900 	}
16901 
16902 	netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
16903 		    tp->board_part_number,
16904 		    tg3_chip_rev_id(tp),
16905 		    tg3_bus_string(tp, str),
16906 		    dev->dev_addr);
16907 
16908 	if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
16909 		struct phy_device *phydev;
16910 		phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
16911 		netdev_info(dev,
16912 			    "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
16913 			    phydev->drv->name, dev_name(&phydev->dev));
16914 	} else {
16915 		char *ethtype;
16916 
16917 		if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
16918 			ethtype = "10/100Base-TX";
16919 		else if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
16920 			ethtype = "1000Base-SX";
16921 		else
16922 			ethtype = "10/100/1000Base-T";
16923 
16924 		netdev_info(dev, "attached PHY is %s (%s Ethernet) "
16925 			    "(WireSpeed[%d], EEE[%d])\n",
16926 			    tg3_phy_string(tp), ethtype,
16927 			    (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) == 0,
16928 			    (tp->phy_flags & TG3_PHYFLG_EEE_CAP) != 0);
16929 	}
16930 
16931 	netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
16932 		    (dev->features & NETIF_F_RXCSUM) != 0,
16933 		    tg3_flag(tp, USE_LINKCHG_REG) != 0,
16934 		    (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) != 0,
16935 		    tg3_flag(tp, ENABLE_ASF) != 0,
16936 		    tg3_flag(tp, TSO_CAPABLE) != 0);
16937 	netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
16938 		    tp->dma_rwctrl,
16939 		    pdev->dma_mask == DMA_BIT_MASK(32) ? 32 :
16940 		    ((u64)pdev->dma_mask) == DMA_BIT_MASK(40) ? 40 : 64);
16941 
16942 	pci_save_state(pdev);
16943 
16944 	return 0;
16945 
16946 err_out_apeunmap:
16947 	if (tp->aperegs) {
16948 		iounmap(tp->aperegs);
16949 		tp->aperegs = NULL;
16950 	}
16951 
16952 err_out_iounmap:
16953 	if (tp->regs) {
16954 		iounmap(tp->regs);
16955 		tp->regs = NULL;
16956 	}
16957 
16958 err_out_free_dev:
16959 	free_netdev(dev);
16960 
16961 err_out_power_down:
16962 	pci_set_power_state(pdev, PCI_D3hot);
16963 
16964 err_out_free_res:
16965 	pci_release_regions(pdev);
16966 
16967 err_out_disable_pdev:
16968 	pci_disable_device(pdev);
16969 	pci_set_drvdata(pdev, NULL);
16970 	return err;
16971 }
16972 
16973 static void tg3_remove_one(struct pci_dev *pdev)
16974 {
16975 	struct net_device *dev = pci_get_drvdata(pdev);
16976 
16977 	if (dev) {
16978 		struct tg3 *tp = netdev_priv(dev);
16979 
16980 		release_firmware(tp->fw);
16981 
16982 		tg3_reset_task_cancel(tp);
16983 
16984 		if (tg3_flag(tp, USE_PHYLIB)) {
16985 			tg3_phy_fini(tp);
16986 			tg3_mdio_fini(tp);
16987 		}
16988 
16989 		unregister_netdev(dev);
16990 		if (tp->aperegs) {
16991 			iounmap(tp->aperegs);
16992 			tp->aperegs = NULL;
16993 		}
16994 		if (tp->regs) {
16995 			iounmap(tp->regs);
16996 			tp->regs = NULL;
16997 		}
16998 		free_netdev(dev);
16999 		pci_release_regions(pdev);
17000 		pci_disable_device(pdev);
17001 		pci_set_drvdata(pdev, NULL);
17002 	}
17003 }
17004 
17005 #ifdef CONFIG_PM_SLEEP
17006 static int tg3_suspend(struct device *device)
17007 {
17008 	struct pci_dev *pdev = to_pci_dev(device);
17009 	struct net_device *dev = pci_get_drvdata(pdev);
17010 	struct tg3 *tp = netdev_priv(dev);
17011 	int err;
17012 
17013 	if (!netif_running(dev))
17014 		return 0;
17015 
17016 	tg3_reset_task_cancel(tp);
17017 	tg3_phy_stop(tp);
17018 	tg3_netif_stop(tp);
17019 
17020 	tg3_timer_stop(tp);
17021 
17022 	tg3_full_lock(tp, 1);
17023 	tg3_disable_ints(tp);
17024 	tg3_full_unlock(tp);
17025 
17026 	netif_device_detach(dev);
17027 
17028 	tg3_full_lock(tp, 0);
17029 	tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
17030 	tg3_flag_clear(tp, INIT_COMPLETE);
17031 	tg3_full_unlock(tp);
17032 
17033 	err = tg3_power_down_prepare(tp);
17034 	if (err) {
17035 		int err2;
17036 
17037 		tg3_full_lock(tp, 0);
17038 
17039 		tg3_flag_set(tp, INIT_COMPLETE);
17040 		err2 = tg3_restart_hw(tp, 1);
17041 		if (err2)
17042 			goto out;
17043 
17044 		tg3_timer_start(tp);
17045 
17046 		netif_device_attach(dev);
17047 		tg3_netif_start(tp);
17048 
17049 out:
17050 		tg3_full_unlock(tp);
17051 
17052 		if (!err2)
17053 			tg3_phy_start(tp);
17054 	}
17055 
17056 	return err;
17057 }
17058 
17059 static int tg3_resume(struct device *device)
17060 {
17061 	struct pci_dev *pdev = to_pci_dev(device);
17062 	struct net_device *dev = pci_get_drvdata(pdev);
17063 	struct tg3 *tp = netdev_priv(dev);
17064 	int err;
17065 
17066 	if (!netif_running(dev))
17067 		return 0;
17068 
17069 	netif_device_attach(dev);
17070 
17071 	tg3_full_lock(tp, 0);
17072 
17073 	tg3_flag_set(tp, INIT_COMPLETE);
17074 	err = tg3_restart_hw(tp, 1);
17075 	if (err)
17076 		goto out;
17077 
17078 	tg3_timer_start(tp);
17079 
17080 	tg3_netif_start(tp);
17081 
17082 out:
17083 	tg3_full_unlock(tp);
17084 
17085 	if (!err)
17086 		tg3_phy_start(tp);
17087 
17088 	return err;
17089 }
17090 
17091 static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume);
17092 #define TG3_PM_OPS (&tg3_pm_ops)
17093 
17094 #else
17095 
17096 #define TG3_PM_OPS NULL
17097 
17098 #endif /* CONFIG_PM_SLEEP */
17099 
17100 /**
17101  * tg3_io_error_detected - called when PCI error is detected
17102  * @pdev: Pointer to PCI device
17103  * @state: The current pci connection state
17104  *
17105  * This function is called after a PCI bus error affecting
17106  * this device has been detected.
17107  */
17108 static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
17109 					      pci_channel_state_t state)
17110 {
17111 	struct net_device *netdev = pci_get_drvdata(pdev);
17112 	struct tg3 *tp = netdev_priv(netdev);
17113 	pci_ers_result_t err = PCI_ERS_RESULT_NEED_RESET;
17114 
17115 	netdev_info(netdev, "PCI I/O error detected\n");
17116 
17117 	rtnl_lock();
17118 
17119 	if (!netif_running(netdev))
17120 		goto done;
17121 
17122 	tg3_phy_stop(tp);
17123 
17124 	tg3_netif_stop(tp);
17125 
17126 	tg3_timer_stop(tp);
17127 
17128 	/* Want to make sure that the reset task doesn't run */
17129 	tg3_reset_task_cancel(tp);
17130 
17131 	netif_device_detach(netdev);
17132 
17133 	/* Clean up software state, even if MMIO is blocked */
17134 	tg3_full_lock(tp, 0);
17135 	tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
17136 	tg3_full_unlock(tp);
17137 
17138 done:
17139 	if (state == pci_channel_io_perm_failure)
17140 		err = PCI_ERS_RESULT_DISCONNECT;
17141 	else
17142 		pci_disable_device(pdev);
17143 
17144 	rtnl_unlock();
17145 
17146 	return err;
17147 }
17148 
17149 /**
17150  * tg3_io_slot_reset - called after the pci bus has been reset.
17151  * @pdev: Pointer to PCI device
17152  *
17153  * Restart the card from scratch, as if from a cold-boot.
17154  * At this point, the card has exprienced a hard reset,
17155  * followed by fixups by BIOS, and has its config space
17156  * set up identically to what it was at cold boot.
17157  */
17158 static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev)
17159 {
17160 	struct net_device *netdev = pci_get_drvdata(pdev);
17161 	struct tg3 *tp = netdev_priv(netdev);
17162 	pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
17163 	int err;
17164 
17165 	rtnl_lock();
17166 
17167 	if (pci_enable_device(pdev)) {
17168 		netdev_err(netdev, "Cannot re-enable PCI device after reset.\n");
17169 		goto done;
17170 	}
17171 
17172 	pci_set_master(pdev);
17173 	pci_restore_state(pdev);
17174 	pci_save_state(pdev);
17175 
17176 	if (!netif_running(netdev)) {
17177 		rc = PCI_ERS_RESULT_RECOVERED;
17178 		goto done;
17179 	}
17180 
17181 	err = tg3_power_up(tp);
17182 	if (err)
17183 		goto done;
17184 
17185 	rc = PCI_ERS_RESULT_RECOVERED;
17186 
17187 done:
17188 	rtnl_unlock();
17189 
17190 	return rc;
17191 }
17192 
17193 /**
17194  * tg3_io_resume - called when traffic can start flowing again.
17195  * @pdev: Pointer to PCI device
17196  *
17197  * This callback is called when the error recovery driver tells
17198  * us that its OK to resume normal operation.
17199  */
17200 static void tg3_io_resume(struct pci_dev *pdev)
17201 {
17202 	struct net_device *netdev = pci_get_drvdata(pdev);
17203 	struct tg3 *tp = netdev_priv(netdev);
17204 	int err;
17205 
17206 	rtnl_lock();
17207 
17208 	if (!netif_running(netdev))
17209 		goto done;
17210 
17211 	tg3_full_lock(tp, 0);
17212 	tg3_flag_set(tp, INIT_COMPLETE);
17213 	err = tg3_restart_hw(tp, 1);
17214 	if (err) {
17215 		tg3_full_unlock(tp);
17216 		netdev_err(netdev, "Cannot restart hardware after reset.\n");
17217 		goto done;
17218 	}
17219 
17220 	netif_device_attach(netdev);
17221 
17222 	tg3_timer_start(tp);
17223 
17224 	tg3_netif_start(tp);
17225 
17226 	tg3_full_unlock(tp);
17227 
17228 	tg3_phy_start(tp);
17229 
17230 done:
17231 	rtnl_unlock();
17232 }
17233 
17234 static const struct pci_error_handlers tg3_err_handler = {
17235 	.error_detected	= tg3_io_error_detected,
17236 	.slot_reset	= tg3_io_slot_reset,
17237 	.resume		= tg3_io_resume
17238 };
17239 
17240 static struct pci_driver tg3_driver = {
17241 	.name		= DRV_MODULE_NAME,
17242 	.id_table	= tg3_pci_tbl,
17243 	.probe		= tg3_init_one,
17244 	.remove		= tg3_remove_one,
17245 	.err_handler	= &tg3_err_handler,
17246 	.driver.pm	= TG3_PM_OPS,
17247 };
17248 
17249 static int __init tg3_init(void)
17250 {
17251 	return pci_register_driver(&tg3_driver);
17252 }
17253 
17254 static void __exit tg3_cleanup(void)
17255 {
17256 	pci_unregister_driver(&tg3_driver);
17257 }
17258 
17259 module_init(tg3_init);
17260 module_exit(tg3_cleanup);
17261