1 /*
2  * tg3.c: Broadcom Tigon3 ethernet driver.
3  *
4  * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5  * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6  * Copyright (C) 2004 Sun Microsystems Inc.
7  * Copyright (C) 2005-2013 Broadcom Corporation.
8  *
9  * Firmware is:
10  *	Derived from proprietary unpublished source code,
11  *	Copyright (C) 2000-2003 Broadcom Corporation.
12  *
13  *	Permission is hereby granted for the distribution of this firmware
14  *	data in hexadecimal or equivalent format, provided this copyright
15  *	notice is accompanying it.
16  */
17 
18 
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/stringify.h>
22 #include <linux/kernel.h>
23 #include <linux/types.h>
24 #include <linux/compiler.h>
25 #include <linux/slab.h>
26 #include <linux/delay.h>
27 #include <linux/in.h>
28 #include <linux/init.h>
29 #include <linux/interrupt.h>
30 #include <linux/ioport.h>
31 #include <linux/pci.h>
32 #include <linux/netdevice.h>
33 #include <linux/etherdevice.h>
34 #include <linux/skbuff.h>
35 #include <linux/ethtool.h>
36 #include <linux/mdio.h>
37 #include <linux/mii.h>
38 #include <linux/phy.h>
39 #include <linux/brcmphy.h>
40 #include <linux/if_vlan.h>
41 #include <linux/ip.h>
42 #include <linux/tcp.h>
43 #include <linux/workqueue.h>
44 #include <linux/prefetch.h>
45 #include <linux/dma-mapping.h>
46 #include <linux/firmware.h>
47 #include <linux/ssb/ssb_driver_gige.h>
48 #include <linux/hwmon.h>
49 #include <linux/hwmon-sysfs.h>
50 
51 #include <net/checksum.h>
52 #include <net/ip.h>
53 
54 #include <linux/io.h>
55 #include <asm/byteorder.h>
56 #include <linux/uaccess.h>
57 
58 #include <uapi/linux/net_tstamp.h>
59 #include <linux/ptp_clock_kernel.h>
60 
61 #ifdef CONFIG_SPARC
62 #include <asm/idprom.h>
63 #include <asm/prom.h>
64 #endif
65 
66 #define BAR_0	0
67 #define BAR_2	2
68 
69 #include "tg3.h"
70 
71 /* Functions & macros to verify TG3_FLAGS types */
72 
73 static inline int _tg3_flag(enum TG3_FLAGS flag, unsigned long *bits)
74 {
75 	return test_bit(flag, bits);
76 }
77 
78 static inline void _tg3_flag_set(enum TG3_FLAGS flag, unsigned long *bits)
79 {
80 	set_bit(flag, bits);
81 }
82 
83 static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
84 {
85 	clear_bit(flag, bits);
86 }
87 
88 #define tg3_flag(tp, flag)				\
89 	_tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags)
90 #define tg3_flag_set(tp, flag)				\
91 	_tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags)
92 #define tg3_flag_clear(tp, flag)			\
93 	_tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags)
94 
95 #define DRV_MODULE_NAME		"tg3"
96 #define TG3_MAJ_NUM			3
97 #define TG3_MIN_NUM			133
98 #define DRV_MODULE_VERSION	\
99 	__stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
100 #define DRV_MODULE_RELDATE	"Jul 29, 2013"
101 
102 #define RESET_KIND_SHUTDOWN	0
103 #define RESET_KIND_INIT		1
104 #define RESET_KIND_SUSPEND	2
105 
106 #define TG3_DEF_RX_MODE		0
107 #define TG3_DEF_TX_MODE		0
108 #define TG3_DEF_MSG_ENABLE	  \
109 	(NETIF_MSG_DRV		| \
110 	 NETIF_MSG_PROBE	| \
111 	 NETIF_MSG_LINK		| \
112 	 NETIF_MSG_TIMER	| \
113 	 NETIF_MSG_IFDOWN	| \
114 	 NETIF_MSG_IFUP		| \
115 	 NETIF_MSG_RX_ERR	| \
116 	 NETIF_MSG_TX_ERR)
117 
118 #define TG3_GRC_LCLCTL_PWRSW_DELAY	100
119 
120 /* length of time before we decide the hardware is borked,
121  * and dev->tx_timeout() should be called to fix the problem
122  */
123 
124 #define TG3_TX_TIMEOUT			(5 * HZ)
125 
126 /* hardware minimum and maximum for a single frame's data payload */
127 #define TG3_MIN_MTU			60
128 #define TG3_MAX_MTU(tp)	\
129 	(tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500)
130 
131 /* These numbers seem to be hard coded in the NIC firmware somehow.
132  * You can't change the ring sizes, but you can change where you place
133  * them in the NIC onboard memory.
134  */
135 #define TG3_RX_STD_RING_SIZE(tp) \
136 	(tg3_flag(tp, LRG_PROD_RING_CAP) ? \
137 	 TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700)
138 #define TG3_DEF_RX_RING_PENDING		200
139 #define TG3_RX_JMB_RING_SIZE(tp) \
140 	(tg3_flag(tp, LRG_PROD_RING_CAP) ? \
141 	 TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
142 #define TG3_DEF_RX_JUMBO_RING_PENDING	100
143 
144 /* Do not place this n-ring entries value into the tp struct itself,
145  * we really want to expose these constants to GCC so that modulo et
146  * al.  operations are done with shifts and masks instead of with
147  * hw multiply/modulo instructions.  Another solution would be to
148  * replace things like '% foo' with '& (foo - 1)'.
149  */
150 
151 #define TG3_TX_RING_SIZE		512
152 #define TG3_DEF_TX_RING_PENDING		(TG3_TX_RING_SIZE - 1)
153 
154 #define TG3_RX_STD_RING_BYTES(tp) \
155 	(sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
156 #define TG3_RX_JMB_RING_BYTES(tp) \
157 	(sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
158 #define TG3_RX_RCB_RING_BYTES(tp) \
159 	(sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1))
160 #define TG3_TX_RING_BYTES	(sizeof(struct tg3_tx_buffer_desc) * \
161 				 TG3_TX_RING_SIZE)
162 #define NEXT_TX(N)		(((N) + 1) & (TG3_TX_RING_SIZE - 1))
163 
164 #define TG3_DMA_BYTE_ENAB		64
165 
166 #define TG3_RX_STD_DMA_SZ		1536
167 #define TG3_RX_JMB_DMA_SZ		9046
168 
169 #define TG3_RX_DMA_TO_MAP_SZ(x)		((x) + TG3_DMA_BYTE_ENAB)
170 
171 #define TG3_RX_STD_MAP_SZ		TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
172 #define TG3_RX_JMB_MAP_SZ		TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
173 
174 #define TG3_RX_STD_BUFF_RING_SIZE(tp) \
175 	(sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
176 
177 #define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
178 	(sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
179 
180 /* Due to a hardware bug, the 5701 can only DMA to memory addresses
181  * that are at least dword aligned when used in PCIX mode.  The driver
182  * works around this bug by double copying the packet.  This workaround
183  * is built into the normal double copy length check for efficiency.
184  *
185  * However, the double copy is only necessary on those architectures
186  * where unaligned memory accesses are inefficient.  For those architectures
187  * where unaligned memory accesses incur little penalty, we can reintegrate
188  * the 5701 in the normal rx path.  Doing so saves a device structure
189  * dereference by hardcoding the double copy threshold in place.
190  */
191 #define TG3_RX_COPY_THRESHOLD		256
192 #if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
193 	#define TG3_RX_COPY_THRESH(tp)	TG3_RX_COPY_THRESHOLD
194 #else
195 	#define TG3_RX_COPY_THRESH(tp)	((tp)->rx_copy_thresh)
196 #endif
197 
198 #if (NET_IP_ALIGN != 0)
199 #define TG3_RX_OFFSET(tp)	((tp)->rx_offset)
200 #else
201 #define TG3_RX_OFFSET(tp)	(NET_SKB_PAD)
202 #endif
203 
204 /* minimum number of free TX descriptors required to wake up TX process */
205 #define TG3_TX_WAKEUP_THRESH(tnapi)		((tnapi)->tx_pending / 4)
206 #define TG3_TX_BD_DMA_MAX_2K		2048
207 #define TG3_TX_BD_DMA_MAX_4K		4096
208 
209 #define TG3_RAW_IP_ALIGN 2
210 
211 #define TG3_FW_UPDATE_TIMEOUT_SEC	5
212 #define TG3_FW_UPDATE_FREQ_SEC		(TG3_FW_UPDATE_TIMEOUT_SEC / 2)
213 
214 #define FIRMWARE_TG3		"tigon/tg3.bin"
215 #define FIRMWARE_TG357766	"tigon/tg357766.bin"
216 #define FIRMWARE_TG3TSO		"tigon/tg3_tso.bin"
217 #define FIRMWARE_TG3TSO5	"tigon/tg3_tso5.bin"
218 
219 static char version[] =
220 	DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")";
221 
222 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
223 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
224 MODULE_LICENSE("GPL");
225 MODULE_VERSION(DRV_MODULE_VERSION);
226 MODULE_FIRMWARE(FIRMWARE_TG3);
227 MODULE_FIRMWARE(FIRMWARE_TG3TSO);
228 MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
229 
230 static int tg3_debug = -1;	/* -1 == use TG3_DEF_MSG_ENABLE as value */
231 module_param(tg3_debug, int, 0);
232 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
233 
234 #define TG3_DRV_DATA_FLAG_10_100_ONLY	0x0001
235 #define TG3_DRV_DATA_FLAG_5705_10_100	0x0002
236 
237 static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl) = {
238 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
239 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
240 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
241 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
242 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
243 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
244 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
245 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
246 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
247 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
248 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
249 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
250 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
251 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
252 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
253 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
254 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
255 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
256 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901),
257 	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
258 			TG3_DRV_DATA_FLAG_5705_10_100},
259 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2),
260 	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
261 			TG3_DRV_DATA_FLAG_5705_10_100},
262 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
263 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F),
264 	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
265 			TG3_DRV_DATA_FLAG_5705_10_100},
266 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
267 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
268 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750)},
269 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
270 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
271 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F),
272 	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
273 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
274 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
275 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
276 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
277 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F),
278 	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
279 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
280 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
281 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
282 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
283 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
284 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
285 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
286 	{PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5787M,
287 			PCI_VENDOR_ID_LENOVO,
288 			TG3PCI_SUBDEVICE_ID_LENOVO_5787M),
289 	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
290 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
291 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F),
292 	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
293 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
294 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
295 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
296 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
297 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
298 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
299 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
300 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
301 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
302 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
303 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
304 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
305 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
306 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
307 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
308 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
309 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)},
310 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)},
311 	{PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
312 			PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_A),
313 	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
314 	{PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
315 			PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_B),
316 	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
317 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
318 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
319 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790),
320 	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
321 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
322 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
323 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717_C)},
324 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
325 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)},
326 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)},
327 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)},
328 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)},
329 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791),
330 	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
331 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795),
332 	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
333 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)},
334 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)},
335 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57762)},
336 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57766)},
337 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5762)},
338 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5725)},
339 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5727)},
340 	{PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
341 	{PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
342 	{PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
343 	{PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
344 	{PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
345 	{PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
346 	{PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
347 	{PCI_DEVICE(0x10cf, 0x11a2)}, /* Fujitsu 1000base-SX with BCM5703SKHB */
348 	{}
349 };
350 
351 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
352 
353 static const struct {
354 	const char string[ETH_GSTRING_LEN];
355 } ethtool_stats_keys[] = {
356 	{ "rx_octets" },
357 	{ "rx_fragments" },
358 	{ "rx_ucast_packets" },
359 	{ "rx_mcast_packets" },
360 	{ "rx_bcast_packets" },
361 	{ "rx_fcs_errors" },
362 	{ "rx_align_errors" },
363 	{ "rx_xon_pause_rcvd" },
364 	{ "rx_xoff_pause_rcvd" },
365 	{ "rx_mac_ctrl_rcvd" },
366 	{ "rx_xoff_entered" },
367 	{ "rx_frame_too_long_errors" },
368 	{ "rx_jabbers" },
369 	{ "rx_undersize_packets" },
370 	{ "rx_in_length_errors" },
371 	{ "rx_out_length_errors" },
372 	{ "rx_64_or_less_octet_packets" },
373 	{ "rx_65_to_127_octet_packets" },
374 	{ "rx_128_to_255_octet_packets" },
375 	{ "rx_256_to_511_octet_packets" },
376 	{ "rx_512_to_1023_octet_packets" },
377 	{ "rx_1024_to_1522_octet_packets" },
378 	{ "rx_1523_to_2047_octet_packets" },
379 	{ "rx_2048_to_4095_octet_packets" },
380 	{ "rx_4096_to_8191_octet_packets" },
381 	{ "rx_8192_to_9022_octet_packets" },
382 
383 	{ "tx_octets" },
384 	{ "tx_collisions" },
385 
386 	{ "tx_xon_sent" },
387 	{ "tx_xoff_sent" },
388 	{ "tx_flow_control" },
389 	{ "tx_mac_errors" },
390 	{ "tx_single_collisions" },
391 	{ "tx_mult_collisions" },
392 	{ "tx_deferred" },
393 	{ "tx_excessive_collisions" },
394 	{ "tx_late_collisions" },
395 	{ "tx_collide_2times" },
396 	{ "tx_collide_3times" },
397 	{ "tx_collide_4times" },
398 	{ "tx_collide_5times" },
399 	{ "tx_collide_6times" },
400 	{ "tx_collide_7times" },
401 	{ "tx_collide_8times" },
402 	{ "tx_collide_9times" },
403 	{ "tx_collide_10times" },
404 	{ "tx_collide_11times" },
405 	{ "tx_collide_12times" },
406 	{ "tx_collide_13times" },
407 	{ "tx_collide_14times" },
408 	{ "tx_collide_15times" },
409 	{ "tx_ucast_packets" },
410 	{ "tx_mcast_packets" },
411 	{ "tx_bcast_packets" },
412 	{ "tx_carrier_sense_errors" },
413 	{ "tx_discards" },
414 	{ "tx_errors" },
415 
416 	{ "dma_writeq_full" },
417 	{ "dma_write_prioq_full" },
418 	{ "rxbds_empty" },
419 	{ "rx_discards" },
420 	{ "rx_errors" },
421 	{ "rx_threshold_hit" },
422 
423 	{ "dma_readq_full" },
424 	{ "dma_read_prioq_full" },
425 	{ "tx_comp_queue_full" },
426 
427 	{ "ring_set_send_prod_index" },
428 	{ "ring_status_update" },
429 	{ "nic_irqs" },
430 	{ "nic_avoided_irqs" },
431 	{ "nic_tx_threshold_hit" },
432 
433 	{ "mbuf_lwm_thresh_hit" },
434 };
435 
436 #define TG3_NUM_STATS	ARRAY_SIZE(ethtool_stats_keys)
437 #define TG3_NVRAM_TEST		0
438 #define TG3_LINK_TEST		1
439 #define TG3_REGISTER_TEST	2
440 #define TG3_MEMORY_TEST		3
441 #define TG3_MAC_LOOPB_TEST	4
442 #define TG3_PHY_LOOPB_TEST	5
443 #define TG3_EXT_LOOPB_TEST	6
444 #define TG3_INTERRUPT_TEST	7
445 
446 
447 static const struct {
448 	const char string[ETH_GSTRING_LEN];
449 } ethtool_test_keys[] = {
450 	[TG3_NVRAM_TEST]	= { "nvram test        (online) " },
451 	[TG3_LINK_TEST]		= { "link test         (online) " },
452 	[TG3_REGISTER_TEST]	= { "register test     (offline)" },
453 	[TG3_MEMORY_TEST]	= { "memory test       (offline)" },
454 	[TG3_MAC_LOOPB_TEST]	= { "mac loopback test (offline)" },
455 	[TG3_PHY_LOOPB_TEST]	= { "phy loopback test (offline)" },
456 	[TG3_EXT_LOOPB_TEST]	= { "ext loopback test (offline)" },
457 	[TG3_INTERRUPT_TEST]	= { "interrupt test    (offline)" },
458 };
459 
460 #define TG3_NUM_TEST	ARRAY_SIZE(ethtool_test_keys)
461 
462 
463 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
464 {
465 	writel(val, tp->regs + off);
466 }
467 
468 static u32 tg3_read32(struct tg3 *tp, u32 off)
469 {
470 	return readl(tp->regs + off);
471 }
472 
473 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
474 {
475 	writel(val, tp->aperegs + off);
476 }
477 
478 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
479 {
480 	return readl(tp->aperegs + off);
481 }
482 
483 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
484 {
485 	unsigned long flags;
486 
487 	spin_lock_irqsave(&tp->indirect_lock, flags);
488 	pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
489 	pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
490 	spin_unlock_irqrestore(&tp->indirect_lock, flags);
491 }
492 
493 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
494 {
495 	writel(val, tp->regs + off);
496 	readl(tp->regs + off);
497 }
498 
499 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
500 {
501 	unsigned long flags;
502 	u32 val;
503 
504 	spin_lock_irqsave(&tp->indirect_lock, flags);
505 	pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
506 	pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
507 	spin_unlock_irqrestore(&tp->indirect_lock, flags);
508 	return val;
509 }
510 
511 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
512 {
513 	unsigned long flags;
514 
515 	if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
516 		pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
517 				       TG3_64BIT_REG_LOW, val);
518 		return;
519 	}
520 	if (off == TG3_RX_STD_PROD_IDX_REG) {
521 		pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
522 				       TG3_64BIT_REG_LOW, val);
523 		return;
524 	}
525 
526 	spin_lock_irqsave(&tp->indirect_lock, flags);
527 	pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
528 	pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
529 	spin_unlock_irqrestore(&tp->indirect_lock, flags);
530 
531 	/* In indirect mode when disabling interrupts, we also need
532 	 * to clear the interrupt bit in the GRC local ctrl register.
533 	 */
534 	if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
535 	    (val == 0x1)) {
536 		pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
537 				       tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
538 	}
539 }
540 
541 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
542 {
543 	unsigned long flags;
544 	u32 val;
545 
546 	spin_lock_irqsave(&tp->indirect_lock, flags);
547 	pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
548 	pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
549 	spin_unlock_irqrestore(&tp->indirect_lock, flags);
550 	return val;
551 }
552 
553 /* usec_wait specifies the wait time in usec when writing to certain registers
554  * where it is unsafe to read back the register without some delay.
555  * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
556  * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
557  */
558 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
559 {
560 	if (tg3_flag(tp, PCIX_TARGET_HWBUG) || tg3_flag(tp, ICH_WORKAROUND))
561 		/* Non-posted methods */
562 		tp->write32(tp, off, val);
563 	else {
564 		/* Posted method */
565 		tg3_write32(tp, off, val);
566 		if (usec_wait)
567 			udelay(usec_wait);
568 		tp->read32(tp, off);
569 	}
570 	/* Wait again after the read for the posted method to guarantee that
571 	 * the wait time is met.
572 	 */
573 	if (usec_wait)
574 		udelay(usec_wait);
575 }
576 
577 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
578 {
579 	tp->write32_mbox(tp, off, val);
580 	if (tg3_flag(tp, FLUSH_POSTED_WRITES) ||
581 	    (!tg3_flag(tp, MBOX_WRITE_REORDER) &&
582 	     !tg3_flag(tp, ICH_WORKAROUND)))
583 		tp->read32_mbox(tp, off);
584 }
585 
586 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
587 {
588 	void __iomem *mbox = tp->regs + off;
589 	writel(val, mbox);
590 	if (tg3_flag(tp, TXD_MBOX_HWBUG))
591 		writel(val, mbox);
592 	if (tg3_flag(tp, MBOX_WRITE_REORDER) ||
593 	    tg3_flag(tp, FLUSH_POSTED_WRITES))
594 		readl(mbox);
595 }
596 
597 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
598 {
599 	return readl(tp->regs + off + GRCMBOX_BASE);
600 }
601 
602 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
603 {
604 	writel(val, tp->regs + off + GRCMBOX_BASE);
605 }
606 
607 #define tw32_mailbox(reg, val)		tp->write32_mbox(tp, reg, val)
608 #define tw32_mailbox_f(reg, val)	tw32_mailbox_flush(tp, (reg), (val))
609 #define tw32_rx_mbox(reg, val)		tp->write32_rx_mbox(tp, reg, val)
610 #define tw32_tx_mbox(reg, val)		tp->write32_tx_mbox(tp, reg, val)
611 #define tr32_mailbox(reg)		tp->read32_mbox(tp, reg)
612 
613 #define tw32(reg, val)			tp->write32(tp, reg, val)
614 #define tw32_f(reg, val)		_tw32_flush(tp, (reg), (val), 0)
615 #define tw32_wait_f(reg, val, us)	_tw32_flush(tp, (reg), (val), (us))
616 #define tr32(reg)			tp->read32(tp, reg)
617 
618 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
619 {
620 	unsigned long flags;
621 
622 	if (tg3_asic_rev(tp) == ASIC_REV_5906 &&
623 	    (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
624 		return;
625 
626 	spin_lock_irqsave(&tp->indirect_lock, flags);
627 	if (tg3_flag(tp, SRAM_USE_CONFIG)) {
628 		pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
629 		pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
630 
631 		/* Always leave this as zero. */
632 		pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
633 	} else {
634 		tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
635 		tw32_f(TG3PCI_MEM_WIN_DATA, val);
636 
637 		/* Always leave this as zero. */
638 		tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
639 	}
640 	spin_unlock_irqrestore(&tp->indirect_lock, flags);
641 }
642 
643 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
644 {
645 	unsigned long flags;
646 
647 	if (tg3_asic_rev(tp) == ASIC_REV_5906 &&
648 	    (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
649 		*val = 0;
650 		return;
651 	}
652 
653 	spin_lock_irqsave(&tp->indirect_lock, flags);
654 	if (tg3_flag(tp, SRAM_USE_CONFIG)) {
655 		pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
656 		pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
657 
658 		/* Always leave this as zero. */
659 		pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
660 	} else {
661 		tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
662 		*val = tr32(TG3PCI_MEM_WIN_DATA);
663 
664 		/* Always leave this as zero. */
665 		tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
666 	}
667 	spin_unlock_irqrestore(&tp->indirect_lock, flags);
668 }
669 
670 static void tg3_ape_lock_init(struct tg3 *tp)
671 {
672 	int i;
673 	u32 regbase, bit;
674 
675 	if (tg3_asic_rev(tp) == ASIC_REV_5761)
676 		regbase = TG3_APE_LOCK_GRANT;
677 	else
678 		regbase = TG3_APE_PER_LOCK_GRANT;
679 
680 	/* Make sure the driver hasn't any stale locks. */
681 	for (i = TG3_APE_LOCK_PHY0; i <= TG3_APE_LOCK_GPIO; i++) {
682 		switch (i) {
683 		case TG3_APE_LOCK_PHY0:
684 		case TG3_APE_LOCK_PHY1:
685 		case TG3_APE_LOCK_PHY2:
686 		case TG3_APE_LOCK_PHY3:
687 			bit = APE_LOCK_GRANT_DRIVER;
688 			break;
689 		default:
690 			if (!tp->pci_fn)
691 				bit = APE_LOCK_GRANT_DRIVER;
692 			else
693 				bit = 1 << tp->pci_fn;
694 		}
695 		tg3_ape_write32(tp, regbase + 4 * i, bit);
696 	}
697 
698 }
699 
700 static int tg3_ape_lock(struct tg3 *tp, int locknum)
701 {
702 	int i, off;
703 	int ret = 0;
704 	u32 status, req, gnt, bit;
705 
706 	if (!tg3_flag(tp, ENABLE_APE))
707 		return 0;
708 
709 	switch (locknum) {
710 	case TG3_APE_LOCK_GPIO:
711 		if (tg3_asic_rev(tp) == ASIC_REV_5761)
712 			return 0;
713 	case TG3_APE_LOCK_GRC:
714 	case TG3_APE_LOCK_MEM:
715 		if (!tp->pci_fn)
716 			bit = APE_LOCK_REQ_DRIVER;
717 		else
718 			bit = 1 << tp->pci_fn;
719 		break;
720 	case TG3_APE_LOCK_PHY0:
721 	case TG3_APE_LOCK_PHY1:
722 	case TG3_APE_LOCK_PHY2:
723 	case TG3_APE_LOCK_PHY3:
724 		bit = APE_LOCK_REQ_DRIVER;
725 		break;
726 	default:
727 		return -EINVAL;
728 	}
729 
730 	if (tg3_asic_rev(tp) == ASIC_REV_5761) {
731 		req = TG3_APE_LOCK_REQ;
732 		gnt = TG3_APE_LOCK_GRANT;
733 	} else {
734 		req = TG3_APE_PER_LOCK_REQ;
735 		gnt = TG3_APE_PER_LOCK_GRANT;
736 	}
737 
738 	off = 4 * locknum;
739 
740 	tg3_ape_write32(tp, req + off, bit);
741 
742 	/* Wait for up to 1 millisecond to acquire lock. */
743 	for (i = 0; i < 100; i++) {
744 		status = tg3_ape_read32(tp, gnt + off);
745 		if (status == bit)
746 			break;
747 		if (pci_channel_offline(tp->pdev))
748 			break;
749 
750 		udelay(10);
751 	}
752 
753 	if (status != bit) {
754 		/* Revoke the lock request. */
755 		tg3_ape_write32(tp, gnt + off, bit);
756 		ret = -EBUSY;
757 	}
758 
759 	return ret;
760 }
761 
762 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
763 {
764 	u32 gnt, bit;
765 
766 	if (!tg3_flag(tp, ENABLE_APE))
767 		return;
768 
769 	switch (locknum) {
770 	case TG3_APE_LOCK_GPIO:
771 		if (tg3_asic_rev(tp) == ASIC_REV_5761)
772 			return;
773 	case TG3_APE_LOCK_GRC:
774 	case TG3_APE_LOCK_MEM:
775 		if (!tp->pci_fn)
776 			bit = APE_LOCK_GRANT_DRIVER;
777 		else
778 			bit = 1 << tp->pci_fn;
779 		break;
780 	case TG3_APE_LOCK_PHY0:
781 	case TG3_APE_LOCK_PHY1:
782 	case TG3_APE_LOCK_PHY2:
783 	case TG3_APE_LOCK_PHY3:
784 		bit = APE_LOCK_GRANT_DRIVER;
785 		break;
786 	default:
787 		return;
788 	}
789 
790 	if (tg3_asic_rev(tp) == ASIC_REV_5761)
791 		gnt = TG3_APE_LOCK_GRANT;
792 	else
793 		gnt = TG3_APE_PER_LOCK_GRANT;
794 
795 	tg3_ape_write32(tp, gnt + 4 * locknum, bit);
796 }
797 
798 static int tg3_ape_event_lock(struct tg3 *tp, u32 timeout_us)
799 {
800 	u32 apedata;
801 
802 	while (timeout_us) {
803 		if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
804 			return -EBUSY;
805 
806 		apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
807 		if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
808 			break;
809 
810 		tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
811 
812 		udelay(10);
813 		timeout_us -= (timeout_us > 10) ? 10 : timeout_us;
814 	}
815 
816 	return timeout_us ? 0 : -EBUSY;
817 }
818 
819 static int tg3_ape_wait_for_event(struct tg3 *tp, u32 timeout_us)
820 {
821 	u32 i, apedata;
822 
823 	for (i = 0; i < timeout_us / 10; i++) {
824 		apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
825 
826 		if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
827 			break;
828 
829 		udelay(10);
830 	}
831 
832 	return i == timeout_us / 10;
833 }
834 
835 static int tg3_ape_scratchpad_read(struct tg3 *tp, u32 *data, u32 base_off,
836 				   u32 len)
837 {
838 	int err;
839 	u32 i, bufoff, msgoff, maxlen, apedata;
840 
841 	if (!tg3_flag(tp, APE_HAS_NCSI))
842 		return 0;
843 
844 	apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
845 	if (apedata != APE_SEG_SIG_MAGIC)
846 		return -ENODEV;
847 
848 	apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
849 	if (!(apedata & APE_FW_STATUS_READY))
850 		return -EAGAIN;
851 
852 	bufoff = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_OFF) +
853 		 TG3_APE_SHMEM_BASE;
854 	msgoff = bufoff + 2 * sizeof(u32);
855 	maxlen = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_LEN);
856 
857 	while (len) {
858 		u32 length;
859 
860 		/* Cap xfer sizes to scratchpad limits. */
861 		length = (len > maxlen) ? maxlen : len;
862 		len -= length;
863 
864 		apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
865 		if (!(apedata & APE_FW_STATUS_READY))
866 			return -EAGAIN;
867 
868 		/* Wait for up to 1 msec for APE to service previous event. */
869 		err = tg3_ape_event_lock(tp, 1000);
870 		if (err)
871 			return err;
872 
873 		apedata = APE_EVENT_STATUS_DRIVER_EVNT |
874 			  APE_EVENT_STATUS_SCRTCHPD_READ |
875 			  APE_EVENT_STATUS_EVENT_PENDING;
876 		tg3_ape_write32(tp, TG3_APE_EVENT_STATUS, apedata);
877 
878 		tg3_ape_write32(tp, bufoff, base_off);
879 		tg3_ape_write32(tp, bufoff + sizeof(u32), length);
880 
881 		tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
882 		tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
883 
884 		base_off += length;
885 
886 		if (tg3_ape_wait_for_event(tp, 30000))
887 			return -EAGAIN;
888 
889 		for (i = 0; length; i += 4, length -= 4) {
890 			u32 val = tg3_ape_read32(tp, msgoff + i);
891 			memcpy(data, &val, sizeof(u32));
892 			data++;
893 		}
894 	}
895 
896 	return 0;
897 }
898 
899 static int tg3_ape_send_event(struct tg3 *tp, u32 event)
900 {
901 	int err;
902 	u32 apedata;
903 
904 	apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
905 	if (apedata != APE_SEG_SIG_MAGIC)
906 		return -EAGAIN;
907 
908 	apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
909 	if (!(apedata & APE_FW_STATUS_READY))
910 		return -EAGAIN;
911 
912 	/* Wait for up to 1 millisecond for APE to service previous event. */
913 	err = tg3_ape_event_lock(tp, 1000);
914 	if (err)
915 		return err;
916 
917 	tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
918 			event | APE_EVENT_STATUS_EVENT_PENDING);
919 
920 	tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
921 	tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
922 
923 	return 0;
924 }
925 
926 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
927 {
928 	u32 event;
929 	u32 apedata;
930 
931 	if (!tg3_flag(tp, ENABLE_APE))
932 		return;
933 
934 	switch (kind) {
935 	case RESET_KIND_INIT:
936 		tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
937 				APE_HOST_SEG_SIG_MAGIC);
938 		tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
939 				APE_HOST_SEG_LEN_MAGIC);
940 		apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
941 		tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
942 		tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
943 			APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, TG3_MIN_NUM));
944 		tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
945 				APE_HOST_BEHAV_NO_PHYLOCK);
946 		tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE,
947 				    TG3_APE_HOST_DRVR_STATE_START);
948 
949 		event = APE_EVENT_STATUS_STATE_START;
950 		break;
951 	case RESET_KIND_SHUTDOWN:
952 		/* With the interface we are currently using,
953 		 * APE does not track driver state.  Wiping
954 		 * out the HOST SEGMENT SIGNATURE forces
955 		 * the APE to assume OS absent status.
956 		 */
957 		tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0);
958 
959 		if (device_may_wakeup(&tp->pdev->dev) &&
960 		    tg3_flag(tp, WOL_ENABLE)) {
961 			tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED,
962 					    TG3_APE_HOST_WOL_SPEED_AUTO);
963 			apedata = TG3_APE_HOST_DRVR_STATE_WOL;
964 		} else
965 			apedata = TG3_APE_HOST_DRVR_STATE_UNLOAD;
966 
967 		tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, apedata);
968 
969 		event = APE_EVENT_STATUS_STATE_UNLOAD;
970 		break;
971 	default:
972 		return;
973 	}
974 
975 	event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
976 
977 	tg3_ape_send_event(tp, event);
978 }
979 
980 static void tg3_disable_ints(struct tg3 *tp)
981 {
982 	int i;
983 
984 	tw32(TG3PCI_MISC_HOST_CTRL,
985 	     (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
986 	for (i = 0; i < tp->irq_max; i++)
987 		tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001);
988 }
989 
990 static void tg3_enable_ints(struct tg3 *tp)
991 {
992 	int i;
993 
994 	tp->irq_sync = 0;
995 	wmb();
996 
997 	tw32(TG3PCI_MISC_HOST_CTRL,
998 	     (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
999 
1000 	tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE;
1001 	for (i = 0; i < tp->irq_cnt; i++) {
1002 		struct tg3_napi *tnapi = &tp->napi[i];
1003 
1004 		tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
1005 		if (tg3_flag(tp, 1SHOT_MSI))
1006 			tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
1007 
1008 		tp->coal_now |= tnapi->coal_now;
1009 	}
1010 
1011 	/* Force an initial interrupt */
1012 	if (!tg3_flag(tp, TAGGED_STATUS) &&
1013 	    (tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
1014 		tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
1015 	else
1016 		tw32(HOSTCC_MODE, tp->coal_now);
1017 
1018 	tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now);
1019 }
1020 
1021 static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
1022 {
1023 	struct tg3 *tp = tnapi->tp;
1024 	struct tg3_hw_status *sblk = tnapi->hw_status;
1025 	unsigned int work_exists = 0;
1026 
1027 	/* check for phy events */
1028 	if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
1029 		if (sblk->status & SD_STATUS_LINK_CHG)
1030 			work_exists = 1;
1031 	}
1032 
1033 	/* check for TX work to do */
1034 	if (sblk->idx[0].tx_consumer != tnapi->tx_cons)
1035 		work_exists = 1;
1036 
1037 	/* check for RX work to do */
1038 	if (tnapi->rx_rcb_prod_idx &&
1039 	    *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
1040 		work_exists = 1;
1041 
1042 	return work_exists;
1043 }
1044 
1045 /* tg3_int_reenable
1046  *  similar to tg3_enable_ints, but it accurately determines whether there
1047  *  is new work pending and can return without flushing the PIO write
1048  *  which reenables interrupts
1049  */
1050 static void tg3_int_reenable(struct tg3_napi *tnapi)
1051 {
1052 	struct tg3 *tp = tnapi->tp;
1053 
1054 	tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
1055 	mmiowb();
1056 
1057 	/* When doing tagged status, this work check is unnecessary.
1058 	 * The last_tag we write above tells the chip which piece of
1059 	 * work we've completed.
1060 	 */
1061 	if (!tg3_flag(tp, TAGGED_STATUS) && tg3_has_work(tnapi))
1062 		tw32(HOSTCC_MODE, tp->coalesce_mode |
1063 		     HOSTCC_MODE_ENABLE | tnapi->coal_now);
1064 }
1065 
1066 static void tg3_switch_clocks(struct tg3 *tp)
1067 {
1068 	u32 clock_ctrl;
1069 	u32 orig_clock_ctrl;
1070 
1071 	if (tg3_flag(tp, CPMU_PRESENT) || tg3_flag(tp, 5780_CLASS))
1072 		return;
1073 
1074 	clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
1075 
1076 	orig_clock_ctrl = clock_ctrl;
1077 	clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
1078 		       CLOCK_CTRL_CLKRUN_OENABLE |
1079 		       0x1f);
1080 	tp->pci_clock_ctrl = clock_ctrl;
1081 
1082 	if (tg3_flag(tp, 5705_PLUS)) {
1083 		if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
1084 			tw32_wait_f(TG3PCI_CLOCK_CTRL,
1085 				    clock_ctrl | CLOCK_CTRL_625_CORE, 40);
1086 		}
1087 	} else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
1088 		tw32_wait_f(TG3PCI_CLOCK_CTRL,
1089 			    clock_ctrl |
1090 			    (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
1091 			    40);
1092 		tw32_wait_f(TG3PCI_CLOCK_CTRL,
1093 			    clock_ctrl | (CLOCK_CTRL_ALTCLK),
1094 			    40);
1095 	}
1096 	tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
1097 }
1098 
1099 #define PHY_BUSY_LOOPS	5000
1100 
1101 static int __tg3_readphy(struct tg3 *tp, unsigned int phy_addr, int reg,
1102 			 u32 *val)
1103 {
1104 	u32 frame_val;
1105 	unsigned int loops;
1106 	int ret;
1107 
1108 	if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1109 		tw32_f(MAC_MI_MODE,
1110 		     (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1111 		udelay(80);
1112 	}
1113 
1114 	tg3_ape_lock(tp, tp->phy_ape_lock);
1115 
1116 	*val = 0x0;
1117 
1118 	frame_val  = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1119 		      MI_COM_PHY_ADDR_MASK);
1120 	frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1121 		      MI_COM_REG_ADDR_MASK);
1122 	frame_val |= (MI_COM_CMD_READ | MI_COM_START);
1123 
1124 	tw32_f(MAC_MI_COM, frame_val);
1125 
1126 	loops = PHY_BUSY_LOOPS;
1127 	while (loops != 0) {
1128 		udelay(10);
1129 		frame_val = tr32(MAC_MI_COM);
1130 
1131 		if ((frame_val & MI_COM_BUSY) == 0) {
1132 			udelay(5);
1133 			frame_val = tr32(MAC_MI_COM);
1134 			break;
1135 		}
1136 		loops -= 1;
1137 	}
1138 
1139 	ret = -EBUSY;
1140 	if (loops != 0) {
1141 		*val = frame_val & MI_COM_DATA_MASK;
1142 		ret = 0;
1143 	}
1144 
1145 	if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1146 		tw32_f(MAC_MI_MODE, tp->mi_mode);
1147 		udelay(80);
1148 	}
1149 
1150 	tg3_ape_unlock(tp, tp->phy_ape_lock);
1151 
1152 	return ret;
1153 }
1154 
1155 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
1156 {
1157 	return __tg3_readphy(tp, tp->phy_addr, reg, val);
1158 }
1159 
1160 static int __tg3_writephy(struct tg3 *tp, unsigned int phy_addr, int reg,
1161 			  u32 val)
1162 {
1163 	u32 frame_val;
1164 	unsigned int loops;
1165 	int ret;
1166 
1167 	if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
1168 	    (reg == MII_CTRL1000 || reg == MII_TG3_AUX_CTRL))
1169 		return 0;
1170 
1171 	if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1172 		tw32_f(MAC_MI_MODE,
1173 		     (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1174 		udelay(80);
1175 	}
1176 
1177 	tg3_ape_lock(tp, tp->phy_ape_lock);
1178 
1179 	frame_val  = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1180 		      MI_COM_PHY_ADDR_MASK);
1181 	frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1182 		      MI_COM_REG_ADDR_MASK);
1183 	frame_val |= (val & MI_COM_DATA_MASK);
1184 	frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
1185 
1186 	tw32_f(MAC_MI_COM, frame_val);
1187 
1188 	loops = PHY_BUSY_LOOPS;
1189 	while (loops != 0) {
1190 		udelay(10);
1191 		frame_val = tr32(MAC_MI_COM);
1192 		if ((frame_val & MI_COM_BUSY) == 0) {
1193 			udelay(5);
1194 			frame_val = tr32(MAC_MI_COM);
1195 			break;
1196 		}
1197 		loops -= 1;
1198 	}
1199 
1200 	ret = -EBUSY;
1201 	if (loops != 0)
1202 		ret = 0;
1203 
1204 	if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1205 		tw32_f(MAC_MI_MODE, tp->mi_mode);
1206 		udelay(80);
1207 	}
1208 
1209 	tg3_ape_unlock(tp, tp->phy_ape_lock);
1210 
1211 	return ret;
1212 }
1213 
1214 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
1215 {
1216 	return __tg3_writephy(tp, tp->phy_addr, reg, val);
1217 }
1218 
1219 static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val)
1220 {
1221 	int err;
1222 
1223 	err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1224 	if (err)
1225 		goto done;
1226 
1227 	err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1228 	if (err)
1229 		goto done;
1230 
1231 	err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1232 			   MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1233 	if (err)
1234 		goto done;
1235 
1236 	err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, val);
1237 
1238 done:
1239 	return err;
1240 }
1241 
1242 static int tg3_phy_cl45_read(struct tg3 *tp, u32 devad, u32 addr, u32 *val)
1243 {
1244 	int err;
1245 
1246 	err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1247 	if (err)
1248 		goto done;
1249 
1250 	err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1251 	if (err)
1252 		goto done;
1253 
1254 	err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1255 			   MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1256 	if (err)
1257 		goto done;
1258 
1259 	err = tg3_readphy(tp, MII_TG3_MMD_ADDRESS, val);
1260 
1261 done:
1262 	return err;
1263 }
1264 
1265 static int tg3_phydsp_read(struct tg3 *tp, u32 reg, u32 *val)
1266 {
1267 	int err;
1268 
1269 	err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1270 	if (!err)
1271 		err = tg3_readphy(tp, MII_TG3_DSP_RW_PORT, val);
1272 
1273 	return err;
1274 }
1275 
1276 static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
1277 {
1278 	int err;
1279 
1280 	err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1281 	if (!err)
1282 		err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
1283 
1284 	return err;
1285 }
1286 
1287 static int tg3_phy_auxctl_read(struct tg3 *tp, int reg, u32 *val)
1288 {
1289 	int err;
1290 
1291 	err = tg3_writephy(tp, MII_TG3_AUX_CTRL,
1292 			   (reg << MII_TG3_AUXCTL_MISC_RDSEL_SHIFT) |
1293 			   MII_TG3_AUXCTL_SHDWSEL_MISC);
1294 	if (!err)
1295 		err = tg3_readphy(tp, MII_TG3_AUX_CTRL, val);
1296 
1297 	return err;
1298 }
1299 
1300 static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set)
1301 {
1302 	if (reg == MII_TG3_AUXCTL_SHDWSEL_MISC)
1303 		set |= MII_TG3_AUXCTL_MISC_WREN;
1304 
1305 	return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg);
1306 }
1307 
1308 static int tg3_phy_toggle_auxctl_smdsp(struct tg3 *tp, bool enable)
1309 {
1310 	u32 val;
1311 	int err;
1312 
1313 	err = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
1314 
1315 	if (err)
1316 		return err;
1317 
1318 	if (enable)
1319 		val |= MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
1320 	else
1321 		val &= ~MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
1322 
1323 	err = tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
1324 				   val | MII_TG3_AUXCTL_ACTL_TX_6DB);
1325 
1326 	return err;
1327 }
1328 
1329 static int tg3_bmcr_reset(struct tg3 *tp)
1330 {
1331 	u32 phy_control;
1332 	int limit, err;
1333 
1334 	/* OK, reset it, and poll the BMCR_RESET bit until it
1335 	 * clears or we time out.
1336 	 */
1337 	phy_control = BMCR_RESET;
1338 	err = tg3_writephy(tp, MII_BMCR, phy_control);
1339 	if (err != 0)
1340 		return -EBUSY;
1341 
1342 	limit = 5000;
1343 	while (limit--) {
1344 		err = tg3_readphy(tp, MII_BMCR, &phy_control);
1345 		if (err != 0)
1346 			return -EBUSY;
1347 
1348 		if ((phy_control & BMCR_RESET) == 0) {
1349 			udelay(40);
1350 			break;
1351 		}
1352 		udelay(10);
1353 	}
1354 	if (limit < 0)
1355 		return -EBUSY;
1356 
1357 	return 0;
1358 }
1359 
1360 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
1361 {
1362 	struct tg3 *tp = bp->priv;
1363 	u32 val;
1364 
1365 	spin_lock_bh(&tp->lock);
1366 
1367 	if (tg3_readphy(tp, reg, &val))
1368 		val = -EIO;
1369 
1370 	spin_unlock_bh(&tp->lock);
1371 
1372 	return val;
1373 }
1374 
1375 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
1376 {
1377 	struct tg3 *tp = bp->priv;
1378 	u32 ret = 0;
1379 
1380 	spin_lock_bh(&tp->lock);
1381 
1382 	if (tg3_writephy(tp, reg, val))
1383 		ret = -EIO;
1384 
1385 	spin_unlock_bh(&tp->lock);
1386 
1387 	return ret;
1388 }
1389 
1390 static int tg3_mdio_reset(struct mii_bus *bp)
1391 {
1392 	return 0;
1393 }
1394 
1395 static void tg3_mdio_config_5785(struct tg3 *tp)
1396 {
1397 	u32 val;
1398 	struct phy_device *phydev;
1399 
1400 	phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1401 	switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1402 	case PHY_ID_BCM50610:
1403 	case PHY_ID_BCM50610M:
1404 		val = MAC_PHYCFG2_50610_LED_MODES;
1405 		break;
1406 	case PHY_ID_BCMAC131:
1407 		val = MAC_PHYCFG2_AC131_LED_MODES;
1408 		break;
1409 	case PHY_ID_RTL8211C:
1410 		val = MAC_PHYCFG2_RTL8211C_LED_MODES;
1411 		break;
1412 	case PHY_ID_RTL8201E:
1413 		val = MAC_PHYCFG2_RTL8201E_LED_MODES;
1414 		break;
1415 	default:
1416 		return;
1417 	}
1418 
1419 	if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
1420 		tw32(MAC_PHYCFG2, val);
1421 
1422 		val = tr32(MAC_PHYCFG1);
1423 		val &= ~(MAC_PHYCFG1_RGMII_INT |
1424 			 MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK);
1425 		val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT;
1426 		tw32(MAC_PHYCFG1, val);
1427 
1428 		return;
1429 	}
1430 
1431 	if (!tg3_flag(tp, RGMII_INBAND_DISABLE))
1432 		val |= MAC_PHYCFG2_EMODE_MASK_MASK |
1433 		       MAC_PHYCFG2_FMODE_MASK_MASK |
1434 		       MAC_PHYCFG2_GMODE_MASK_MASK |
1435 		       MAC_PHYCFG2_ACT_MASK_MASK   |
1436 		       MAC_PHYCFG2_QUAL_MASK_MASK |
1437 		       MAC_PHYCFG2_INBAND_ENABLE;
1438 
1439 	tw32(MAC_PHYCFG2, val);
1440 
1441 	val = tr32(MAC_PHYCFG1);
1442 	val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
1443 		 MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
1444 	if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1445 		if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1446 			val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
1447 		if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1448 			val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
1449 	}
1450 	val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT |
1451 	       MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV;
1452 	tw32(MAC_PHYCFG1, val);
1453 
1454 	val = tr32(MAC_EXT_RGMII_MODE);
1455 	val &= ~(MAC_RGMII_MODE_RX_INT_B |
1456 		 MAC_RGMII_MODE_RX_QUALITY |
1457 		 MAC_RGMII_MODE_RX_ACTIVITY |
1458 		 MAC_RGMII_MODE_RX_ENG_DET |
1459 		 MAC_RGMII_MODE_TX_ENABLE |
1460 		 MAC_RGMII_MODE_TX_LOWPWR |
1461 		 MAC_RGMII_MODE_TX_RESET);
1462 	if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1463 		if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1464 			val |= MAC_RGMII_MODE_RX_INT_B |
1465 			       MAC_RGMII_MODE_RX_QUALITY |
1466 			       MAC_RGMII_MODE_RX_ACTIVITY |
1467 			       MAC_RGMII_MODE_RX_ENG_DET;
1468 		if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1469 			val |= MAC_RGMII_MODE_TX_ENABLE |
1470 			       MAC_RGMII_MODE_TX_LOWPWR |
1471 			       MAC_RGMII_MODE_TX_RESET;
1472 	}
1473 	tw32(MAC_EXT_RGMII_MODE, val);
1474 }
1475 
1476 static void tg3_mdio_start(struct tg3 *tp)
1477 {
1478 	tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
1479 	tw32_f(MAC_MI_MODE, tp->mi_mode);
1480 	udelay(80);
1481 
1482 	if (tg3_flag(tp, MDIOBUS_INITED) &&
1483 	    tg3_asic_rev(tp) == ASIC_REV_5785)
1484 		tg3_mdio_config_5785(tp);
1485 }
1486 
1487 static int tg3_mdio_init(struct tg3 *tp)
1488 {
1489 	int i;
1490 	u32 reg;
1491 	struct phy_device *phydev;
1492 
1493 	if (tg3_flag(tp, 5717_PLUS)) {
1494 		u32 is_serdes;
1495 
1496 		tp->phy_addr = tp->pci_fn + 1;
1497 
1498 		if (tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0)
1499 			is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
1500 		else
1501 			is_serdes = tr32(TG3_CPMU_PHY_STRAP) &
1502 				    TG3_CPMU_PHY_STRAP_IS_SERDES;
1503 		if (is_serdes)
1504 			tp->phy_addr += 7;
1505 	} else
1506 		tp->phy_addr = TG3_PHY_MII_ADDR;
1507 
1508 	tg3_mdio_start(tp);
1509 
1510 	if (!tg3_flag(tp, USE_PHYLIB) || tg3_flag(tp, MDIOBUS_INITED))
1511 		return 0;
1512 
1513 	tp->mdio_bus = mdiobus_alloc();
1514 	if (tp->mdio_bus == NULL)
1515 		return -ENOMEM;
1516 
1517 	tp->mdio_bus->name     = "tg3 mdio bus";
1518 	snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
1519 		 (tp->pdev->bus->number << 8) | tp->pdev->devfn);
1520 	tp->mdio_bus->priv     = tp;
1521 	tp->mdio_bus->parent   = &tp->pdev->dev;
1522 	tp->mdio_bus->read     = &tg3_mdio_read;
1523 	tp->mdio_bus->write    = &tg3_mdio_write;
1524 	tp->mdio_bus->reset    = &tg3_mdio_reset;
1525 	tp->mdio_bus->phy_mask = ~(1 << TG3_PHY_MII_ADDR);
1526 	tp->mdio_bus->irq      = &tp->mdio_irq[0];
1527 
1528 	for (i = 0; i < PHY_MAX_ADDR; i++)
1529 		tp->mdio_bus->irq[i] = PHY_POLL;
1530 
1531 	/* The bus registration will look for all the PHYs on the mdio bus.
1532 	 * Unfortunately, it does not ensure the PHY is powered up before
1533 	 * accessing the PHY ID registers.  A chip reset is the
1534 	 * quickest way to bring the device back to an operational state..
1535 	 */
1536 	if (tg3_readphy(tp, MII_BMCR, &reg) || (reg & BMCR_PDOWN))
1537 		tg3_bmcr_reset(tp);
1538 
1539 	i = mdiobus_register(tp->mdio_bus);
1540 	if (i) {
1541 		dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i);
1542 		mdiobus_free(tp->mdio_bus);
1543 		return i;
1544 	}
1545 
1546 	phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1547 
1548 	if (!phydev || !phydev->drv) {
1549 		dev_warn(&tp->pdev->dev, "No PHY devices\n");
1550 		mdiobus_unregister(tp->mdio_bus);
1551 		mdiobus_free(tp->mdio_bus);
1552 		return -ENODEV;
1553 	}
1554 
1555 	switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1556 	case PHY_ID_BCM57780:
1557 		phydev->interface = PHY_INTERFACE_MODE_GMII;
1558 		phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1559 		break;
1560 	case PHY_ID_BCM50610:
1561 	case PHY_ID_BCM50610M:
1562 		phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE |
1563 				     PHY_BRCM_RX_REFCLK_UNUSED |
1564 				     PHY_BRCM_DIS_TXCRXC_NOENRGY |
1565 				     PHY_BRCM_AUTO_PWRDWN_ENABLE;
1566 		if (tg3_flag(tp, RGMII_INBAND_DISABLE))
1567 			phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
1568 		if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1569 			phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
1570 		if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1571 			phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
1572 		/* fallthru */
1573 	case PHY_ID_RTL8211C:
1574 		phydev->interface = PHY_INTERFACE_MODE_RGMII;
1575 		break;
1576 	case PHY_ID_RTL8201E:
1577 	case PHY_ID_BCMAC131:
1578 		phydev->interface = PHY_INTERFACE_MODE_MII;
1579 		phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1580 		tp->phy_flags |= TG3_PHYFLG_IS_FET;
1581 		break;
1582 	}
1583 
1584 	tg3_flag_set(tp, MDIOBUS_INITED);
1585 
1586 	if (tg3_asic_rev(tp) == ASIC_REV_5785)
1587 		tg3_mdio_config_5785(tp);
1588 
1589 	return 0;
1590 }
1591 
1592 static void tg3_mdio_fini(struct tg3 *tp)
1593 {
1594 	if (tg3_flag(tp, MDIOBUS_INITED)) {
1595 		tg3_flag_clear(tp, MDIOBUS_INITED);
1596 		mdiobus_unregister(tp->mdio_bus);
1597 		mdiobus_free(tp->mdio_bus);
1598 	}
1599 }
1600 
1601 /* tp->lock is held. */
1602 static inline void tg3_generate_fw_event(struct tg3 *tp)
1603 {
1604 	u32 val;
1605 
1606 	val = tr32(GRC_RX_CPU_EVENT);
1607 	val |= GRC_RX_CPU_DRIVER_EVENT;
1608 	tw32_f(GRC_RX_CPU_EVENT, val);
1609 
1610 	tp->last_event_jiffies = jiffies;
1611 }
1612 
1613 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1614 
1615 /* tp->lock is held. */
1616 static void tg3_wait_for_event_ack(struct tg3 *tp)
1617 {
1618 	int i;
1619 	unsigned int delay_cnt;
1620 	long time_remain;
1621 
1622 	/* If enough time has passed, no wait is necessary. */
1623 	time_remain = (long)(tp->last_event_jiffies + 1 +
1624 		      usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1625 		      (long)jiffies;
1626 	if (time_remain < 0)
1627 		return;
1628 
1629 	/* Check if we can shorten the wait time. */
1630 	delay_cnt = jiffies_to_usecs(time_remain);
1631 	if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1632 		delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1633 	delay_cnt = (delay_cnt >> 3) + 1;
1634 
1635 	for (i = 0; i < delay_cnt; i++) {
1636 		if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1637 			break;
1638 		if (pci_channel_offline(tp->pdev))
1639 			break;
1640 
1641 		udelay(8);
1642 	}
1643 }
1644 
1645 /* tp->lock is held. */
1646 static void tg3_phy_gather_ump_data(struct tg3 *tp, u32 *data)
1647 {
1648 	u32 reg, val;
1649 
1650 	val = 0;
1651 	if (!tg3_readphy(tp, MII_BMCR, &reg))
1652 		val = reg << 16;
1653 	if (!tg3_readphy(tp, MII_BMSR, &reg))
1654 		val |= (reg & 0xffff);
1655 	*data++ = val;
1656 
1657 	val = 0;
1658 	if (!tg3_readphy(tp, MII_ADVERTISE, &reg))
1659 		val = reg << 16;
1660 	if (!tg3_readphy(tp, MII_LPA, &reg))
1661 		val |= (reg & 0xffff);
1662 	*data++ = val;
1663 
1664 	val = 0;
1665 	if (!(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) {
1666 		if (!tg3_readphy(tp, MII_CTRL1000, &reg))
1667 			val = reg << 16;
1668 		if (!tg3_readphy(tp, MII_STAT1000, &reg))
1669 			val |= (reg & 0xffff);
1670 	}
1671 	*data++ = val;
1672 
1673 	if (!tg3_readphy(tp, MII_PHYADDR, &reg))
1674 		val = reg << 16;
1675 	else
1676 		val = 0;
1677 	*data++ = val;
1678 }
1679 
1680 /* tp->lock is held. */
1681 static void tg3_ump_link_report(struct tg3 *tp)
1682 {
1683 	u32 data[4];
1684 
1685 	if (!tg3_flag(tp, 5780_CLASS) || !tg3_flag(tp, ENABLE_ASF))
1686 		return;
1687 
1688 	tg3_phy_gather_ump_data(tp, data);
1689 
1690 	tg3_wait_for_event_ack(tp);
1691 
1692 	tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1693 	tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1694 	tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x0, data[0]);
1695 	tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x4, data[1]);
1696 	tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x8, data[2]);
1697 	tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0xc, data[3]);
1698 
1699 	tg3_generate_fw_event(tp);
1700 }
1701 
1702 /* tp->lock is held. */
1703 static void tg3_stop_fw(struct tg3 *tp)
1704 {
1705 	if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
1706 		/* Wait for RX cpu to ACK the previous event. */
1707 		tg3_wait_for_event_ack(tp);
1708 
1709 		tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
1710 
1711 		tg3_generate_fw_event(tp);
1712 
1713 		/* Wait for RX cpu to ACK this event. */
1714 		tg3_wait_for_event_ack(tp);
1715 	}
1716 }
1717 
1718 /* tp->lock is held. */
1719 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
1720 {
1721 	tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
1722 		      NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
1723 
1724 	if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1725 		switch (kind) {
1726 		case RESET_KIND_INIT:
1727 			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1728 				      DRV_STATE_START);
1729 			break;
1730 
1731 		case RESET_KIND_SHUTDOWN:
1732 			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1733 				      DRV_STATE_UNLOAD);
1734 			break;
1735 
1736 		case RESET_KIND_SUSPEND:
1737 			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1738 				      DRV_STATE_SUSPEND);
1739 			break;
1740 
1741 		default:
1742 			break;
1743 		}
1744 	}
1745 }
1746 
1747 /* tp->lock is held. */
1748 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
1749 {
1750 	if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1751 		switch (kind) {
1752 		case RESET_KIND_INIT:
1753 			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1754 				      DRV_STATE_START_DONE);
1755 			break;
1756 
1757 		case RESET_KIND_SHUTDOWN:
1758 			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1759 				      DRV_STATE_UNLOAD_DONE);
1760 			break;
1761 
1762 		default:
1763 			break;
1764 		}
1765 	}
1766 }
1767 
1768 /* tp->lock is held. */
1769 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
1770 {
1771 	if (tg3_flag(tp, ENABLE_ASF)) {
1772 		switch (kind) {
1773 		case RESET_KIND_INIT:
1774 			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1775 				      DRV_STATE_START);
1776 			break;
1777 
1778 		case RESET_KIND_SHUTDOWN:
1779 			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1780 				      DRV_STATE_UNLOAD);
1781 			break;
1782 
1783 		case RESET_KIND_SUSPEND:
1784 			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1785 				      DRV_STATE_SUSPEND);
1786 			break;
1787 
1788 		default:
1789 			break;
1790 		}
1791 	}
1792 }
1793 
1794 static int tg3_poll_fw(struct tg3 *tp)
1795 {
1796 	int i;
1797 	u32 val;
1798 
1799 	if (tg3_flag(tp, NO_FWARE_REPORTED))
1800 		return 0;
1801 
1802 	if (tg3_flag(tp, IS_SSB_CORE)) {
1803 		/* We don't use firmware. */
1804 		return 0;
1805 	}
1806 
1807 	if (tg3_asic_rev(tp) == ASIC_REV_5906) {
1808 		/* Wait up to 20ms for init done. */
1809 		for (i = 0; i < 200; i++) {
1810 			if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
1811 				return 0;
1812 			if (pci_channel_offline(tp->pdev))
1813 				return -ENODEV;
1814 
1815 			udelay(100);
1816 		}
1817 		return -ENODEV;
1818 	}
1819 
1820 	/* Wait for firmware initialization to complete. */
1821 	for (i = 0; i < 100000; i++) {
1822 		tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
1823 		if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1824 			break;
1825 		if (pci_channel_offline(tp->pdev)) {
1826 			if (!tg3_flag(tp, NO_FWARE_REPORTED)) {
1827 				tg3_flag_set(tp, NO_FWARE_REPORTED);
1828 				netdev_info(tp->dev, "No firmware running\n");
1829 			}
1830 
1831 			break;
1832 		}
1833 
1834 		udelay(10);
1835 	}
1836 
1837 	/* Chip might not be fitted with firmware.  Some Sun onboard
1838 	 * parts are configured like that.  So don't signal the timeout
1839 	 * of the above loop as an error, but do report the lack of
1840 	 * running firmware once.
1841 	 */
1842 	if (i >= 100000 && !tg3_flag(tp, NO_FWARE_REPORTED)) {
1843 		tg3_flag_set(tp, NO_FWARE_REPORTED);
1844 
1845 		netdev_info(tp->dev, "No firmware running\n");
1846 	}
1847 
1848 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) {
1849 		/* The 57765 A0 needs a little more
1850 		 * time to do some important work.
1851 		 */
1852 		mdelay(10);
1853 	}
1854 
1855 	return 0;
1856 }
1857 
1858 static void tg3_link_report(struct tg3 *tp)
1859 {
1860 	if (!netif_carrier_ok(tp->dev)) {
1861 		netif_info(tp, link, tp->dev, "Link is down\n");
1862 		tg3_ump_link_report(tp);
1863 	} else if (netif_msg_link(tp)) {
1864 		netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n",
1865 			    (tp->link_config.active_speed == SPEED_1000 ?
1866 			     1000 :
1867 			     (tp->link_config.active_speed == SPEED_100 ?
1868 			      100 : 10)),
1869 			    (tp->link_config.active_duplex == DUPLEX_FULL ?
1870 			     "full" : "half"));
1871 
1872 		netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n",
1873 			    (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
1874 			    "on" : "off",
1875 			    (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
1876 			    "on" : "off");
1877 
1878 		if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
1879 			netdev_info(tp->dev, "EEE is %s\n",
1880 				    tp->setlpicnt ? "enabled" : "disabled");
1881 
1882 		tg3_ump_link_report(tp);
1883 	}
1884 
1885 	tp->link_up = netif_carrier_ok(tp->dev);
1886 }
1887 
1888 static u32 tg3_decode_flowctrl_1000T(u32 adv)
1889 {
1890 	u32 flowctrl = 0;
1891 
1892 	if (adv & ADVERTISE_PAUSE_CAP) {
1893 		flowctrl |= FLOW_CTRL_RX;
1894 		if (!(adv & ADVERTISE_PAUSE_ASYM))
1895 			flowctrl |= FLOW_CTRL_TX;
1896 	} else if (adv & ADVERTISE_PAUSE_ASYM)
1897 		flowctrl |= FLOW_CTRL_TX;
1898 
1899 	return flowctrl;
1900 }
1901 
1902 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1903 {
1904 	u16 miireg;
1905 
1906 	if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1907 		miireg = ADVERTISE_1000XPAUSE;
1908 	else if (flow_ctrl & FLOW_CTRL_TX)
1909 		miireg = ADVERTISE_1000XPSE_ASYM;
1910 	else if (flow_ctrl & FLOW_CTRL_RX)
1911 		miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1912 	else
1913 		miireg = 0;
1914 
1915 	return miireg;
1916 }
1917 
1918 static u32 tg3_decode_flowctrl_1000X(u32 adv)
1919 {
1920 	u32 flowctrl = 0;
1921 
1922 	if (adv & ADVERTISE_1000XPAUSE) {
1923 		flowctrl |= FLOW_CTRL_RX;
1924 		if (!(adv & ADVERTISE_1000XPSE_ASYM))
1925 			flowctrl |= FLOW_CTRL_TX;
1926 	} else if (adv & ADVERTISE_1000XPSE_ASYM)
1927 		flowctrl |= FLOW_CTRL_TX;
1928 
1929 	return flowctrl;
1930 }
1931 
1932 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1933 {
1934 	u8 cap = 0;
1935 
1936 	if (lcladv & rmtadv & ADVERTISE_1000XPAUSE) {
1937 		cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1938 	} else if (lcladv & rmtadv & ADVERTISE_1000XPSE_ASYM) {
1939 		if (lcladv & ADVERTISE_1000XPAUSE)
1940 			cap = FLOW_CTRL_RX;
1941 		if (rmtadv & ADVERTISE_1000XPAUSE)
1942 			cap = FLOW_CTRL_TX;
1943 	}
1944 
1945 	return cap;
1946 }
1947 
1948 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1949 {
1950 	u8 autoneg;
1951 	u8 flowctrl = 0;
1952 	u32 old_rx_mode = tp->rx_mode;
1953 	u32 old_tx_mode = tp->tx_mode;
1954 
1955 	if (tg3_flag(tp, USE_PHYLIB))
1956 		autoneg = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]->autoneg;
1957 	else
1958 		autoneg = tp->link_config.autoneg;
1959 
1960 	if (autoneg == AUTONEG_ENABLE && tg3_flag(tp, PAUSE_AUTONEG)) {
1961 		if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
1962 			flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1963 		else
1964 			flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1965 	} else
1966 		flowctrl = tp->link_config.flowctrl;
1967 
1968 	tp->link_config.active_flowctrl = flowctrl;
1969 
1970 	if (flowctrl & FLOW_CTRL_RX)
1971 		tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1972 	else
1973 		tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1974 
1975 	if (old_rx_mode != tp->rx_mode)
1976 		tw32_f(MAC_RX_MODE, tp->rx_mode);
1977 
1978 	if (flowctrl & FLOW_CTRL_TX)
1979 		tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1980 	else
1981 		tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1982 
1983 	if (old_tx_mode != tp->tx_mode)
1984 		tw32_f(MAC_TX_MODE, tp->tx_mode);
1985 }
1986 
1987 static void tg3_adjust_link(struct net_device *dev)
1988 {
1989 	u8 oldflowctrl, linkmesg = 0;
1990 	u32 mac_mode, lcl_adv, rmt_adv;
1991 	struct tg3 *tp = netdev_priv(dev);
1992 	struct phy_device *phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1993 
1994 	spin_lock_bh(&tp->lock);
1995 
1996 	mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
1997 				    MAC_MODE_HALF_DUPLEX);
1998 
1999 	oldflowctrl = tp->link_config.active_flowctrl;
2000 
2001 	if (phydev->link) {
2002 		lcl_adv = 0;
2003 		rmt_adv = 0;
2004 
2005 		if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
2006 			mac_mode |= MAC_MODE_PORT_MODE_MII;
2007 		else if (phydev->speed == SPEED_1000 ||
2008 			 tg3_asic_rev(tp) != ASIC_REV_5785)
2009 			mac_mode |= MAC_MODE_PORT_MODE_GMII;
2010 		else
2011 			mac_mode |= MAC_MODE_PORT_MODE_MII;
2012 
2013 		if (phydev->duplex == DUPLEX_HALF)
2014 			mac_mode |= MAC_MODE_HALF_DUPLEX;
2015 		else {
2016 			lcl_adv = mii_advertise_flowctrl(
2017 				  tp->link_config.flowctrl);
2018 
2019 			if (phydev->pause)
2020 				rmt_adv = LPA_PAUSE_CAP;
2021 			if (phydev->asym_pause)
2022 				rmt_adv |= LPA_PAUSE_ASYM;
2023 		}
2024 
2025 		tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
2026 	} else
2027 		mac_mode |= MAC_MODE_PORT_MODE_GMII;
2028 
2029 	if (mac_mode != tp->mac_mode) {
2030 		tp->mac_mode = mac_mode;
2031 		tw32_f(MAC_MODE, tp->mac_mode);
2032 		udelay(40);
2033 	}
2034 
2035 	if (tg3_asic_rev(tp) == ASIC_REV_5785) {
2036 		if (phydev->speed == SPEED_10)
2037 			tw32(MAC_MI_STAT,
2038 			     MAC_MI_STAT_10MBPS_MODE |
2039 			     MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
2040 		else
2041 			tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
2042 	}
2043 
2044 	if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
2045 		tw32(MAC_TX_LENGTHS,
2046 		     ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2047 		      (6 << TX_LENGTHS_IPG_SHIFT) |
2048 		      (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
2049 	else
2050 		tw32(MAC_TX_LENGTHS,
2051 		     ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2052 		      (6 << TX_LENGTHS_IPG_SHIFT) |
2053 		      (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
2054 
2055 	if (phydev->link != tp->old_link ||
2056 	    phydev->speed != tp->link_config.active_speed ||
2057 	    phydev->duplex != tp->link_config.active_duplex ||
2058 	    oldflowctrl != tp->link_config.active_flowctrl)
2059 		linkmesg = 1;
2060 
2061 	tp->old_link = phydev->link;
2062 	tp->link_config.active_speed = phydev->speed;
2063 	tp->link_config.active_duplex = phydev->duplex;
2064 
2065 	spin_unlock_bh(&tp->lock);
2066 
2067 	if (linkmesg)
2068 		tg3_link_report(tp);
2069 }
2070 
2071 static int tg3_phy_init(struct tg3 *tp)
2072 {
2073 	struct phy_device *phydev;
2074 
2075 	if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)
2076 		return 0;
2077 
2078 	/* Bring the PHY back to a known state. */
2079 	tg3_bmcr_reset(tp);
2080 
2081 	phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
2082 
2083 	/* Attach the MAC to the PHY. */
2084 	phydev = phy_connect(tp->dev, dev_name(&phydev->dev),
2085 			     tg3_adjust_link, phydev->interface);
2086 	if (IS_ERR(phydev)) {
2087 		dev_err(&tp->pdev->dev, "Could not attach to PHY\n");
2088 		return PTR_ERR(phydev);
2089 	}
2090 
2091 	/* Mask with MAC supported features. */
2092 	switch (phydev->interface) {
2093 	case PHY_INTERFACE_MODE_GMII:
2094 	case PHY_INTERFACE_MODE_RGMII:
2095 		if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
2096 			phydev->supported &= (PHY_GBIT_FEATURES |
2097 					      SUPPORTED_Pause |
2098 					      SUPPORTED_Asym_Pause);
2099 			break;
2100 		}
2101 		/* fallthru */
2102 	case PHY_INTERFACE_MODE_MII:
2103 		phydev->supported &= (PHY_BASIC_FEATURES |
2104 				      SUPPORTED_Pause |
2105 				      SUPPORTED_Asym_Pause);
2106 		break;
2107 	default:
2108 		phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
2109 		return -EINVAL;
2110 	}
2111 
2112 	tp->phy_flags |= TG3_PHYFLG_IS_CONNECTED;
2113 
2114 	phydev->advertising = phydev->supported;
2115 
2116 	return 0;
2117 }
2118 
2119 static void tg3_phy_start(struct tg3 *tp)
2120 {
2121 	struct phy_device *phydev;
2122 
2123 	if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
2124 		return;
2125 
2126 	phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
2127 
2128 	if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
2129 		tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
2130 		phydev->speed = tp->link_config.speed;
2131 		phydev->duplex = tp->link_config.duplex;
2132 		phydev->autoneg = tp->link_config.autoneg;
2133 		phydev->advertising = tp->link_config.advertising;
2134 	}
2135 
2136 	phy_start(phydev);
2137 
2138 	phy_start_aneg(phydev);
2139 }
2140 
2141 static void tg3_phy_stop(struct tg3 *tp)
2142 {
2143 	if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
2144 		return;
2145 
2146 	phy_stop(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
2147 }
2148 
2149 static void tg3_phy_fini(struct tg3 *tp)
2150 {
2151 	if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
2152 		phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
2153 		tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED;
2154 	}
2155 }
2156 
2157 static int tg3_phy_set_extloopbk(struct tg3 *tp)
2158 {
2159 	int err;
2160 	u32 val;
2161 
2162 	if (tp->phy_flags & TG3_PHYFLG_IS_FET)
2163 		return 0;
2164 
2165 	if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2166 		/* Cannot do read-modify-write on 5401 */
2167 		err = tg3_phy_auxctl_write(tp,
2168 					   MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2169 					   MII_TG3_AUXCTL_ACTL_EXTLOOPBK |
2170 					   0x4c20);
2171 		goto done;
2172 	}
2173 
2174 	err = tg3_phy_auxctl_read(tp,
2175 				  MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2176 	if (err)
2177 		return err;
2178 
2179 	val |= MII_TG3_AUXCTL_ACTL_EXTLOOPBK;
2180 	err = tg3_phy_auxctl_write(tp,
2181 				   MII_TG3_AUXCTL_SHDWSEL_AUXCTL, val);
2182 
2183 done:
2184 	return err;
2185 }
2186 
2187 static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
2188 {
2189 	u32 phytest;
2190 
2191 	if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2192 		u32 phy;
2193 
2194 		tg3_writephy(tp, MII_TG3_FET_TEST,
2195 			     phytest | MII_TG3_FET_SHADOW_EN);
2196 		if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) {
2197 			if (enable)
2198 				phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD;
2199 			else
2200 				phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD;
2201 			tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy);
2202 		}
2203 		tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2204 	}
2205 }
2206 
2207 static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
2208 {
2209 	u32 reg;
2210 
2211 	if (!tg3_flag(tp, 5705_PLUS) ||
2212 	    (tg3_flag(tp, 5717_PLUS) &&
2213 	     (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
2214 		return;
2215 
2216 	if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2217 		tg3_phy_fet_toggle_apd(tp, enable);
2218 		return;
2219 	}
2220 
2221 	reg = MII_TG3_MISC_SHDW_WREN |
2222 	      MII_TG3_MISC_SHDW_SCR5_SEL |
2223 	      MII_TG3_MISC_SHDW_SCR5_LPED |
2224 	      MII_TG3_MISC_SHDW_SCR5_DLPTLM |
2225 	      MII_TG3_MISC_SHDW_SCR5_SDTL |
2226 	      MII_TG3_MISC_SHDW_SCR5_C125OE;
2227 	if (tg3_asic_rev(tp) != ASIC_REV_5784 || !enable)
2228 		reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
2229 
2230 	tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
2231 
2232 
2233 	reg = MII_TG3_MISC_SHDW_WREN |
2234 	      MII_TG3_MISC_SHDW_APD_SEL |
2235 	      MII_TG3_MISC_SHDW_APD_WKTM_84MS;
2236 	if (enable)
2237 		reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
2238 
2239 	tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
2240 }
2241 
2242 static void tg3_phy_toggle_automdix(struct tg3 *tp, bool enable)
2243 {
2244 	u32 phy;
2245 
2246 	if (!tg3_flag(tp, 5705_PLUS) ||
2247 	    (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
2248 		return;
2249 
2250 	if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2251 		u32 ephy;
2252 
2253 		if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
2254 			u32 reg = MII_TG3_FET_SHDW_MISCCTRL;
2255 
2256 			tg3_writephy(tp, MII_TG3_FET_TEST,
2257 				     ephy | MII_TG3_FET_SHADOW_EN);
2258 			if (!tg3_readphy(tp, reg, &phy)) {
2259 				if (enable)
2260 					phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2261 				else
2262 					phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2263 				tg3_writephy(tp, reg, phy);
2264 			}
2265 			tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
2266 		}
2267 	} else {
2268 		int ret;
2269 
2270 		ret = tg3_phy_auxctl_read(tp,
2271 					  MII_TG3_AUXCTL_SHDWSEL_MISC, &phy);
2272 		if (!ret) {
2273 			if (enable)
2274 				phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2275 			else
2276 				phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2277 			tg3_phy_auxctl_write(tp,
2278 					     MII_TG3_AUXCTL_SHDWSEL_MISC, phy);
2279 		}
2280 	}
2281 }
2282 
2283 static void tg3_phy_set_wirespeed(struct tg3 *tp)
2284 {
2285 	int ret;
2286 	u32 val;
2287 
2288 	if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED)
2289 		return;
2290 
2291 	ret = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, &val);
2292 	if (!ret)
2293 		tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_MISC,
2294 				     val | MII_TG3_AUXCTL_MISC_WIRESPD_EN);
2295 }
2296 
2297 static void tg3_phy_apply_otp(struct tg3 *tp)
2298 {
2299 	u32 otp, phy;
2300 
2301 	if (!tp->phy_otp)
2302 		return;
2303 
2304 	otp = tp->phy_otp;
2305 
2306 	if (tg3_phy_toggle_auxctl_smdsp(tp, true))
2307 		return;
2308 
2309 	phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
2310 	phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
2311 	tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
2312 
2313 	phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
2314 	      ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
2315 	tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
2316 
2317 	phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
2318 	phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
2319 	tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
2320 
2321 	phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
2322 	tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
2323 
2324 	phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
2325 	tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
2326 
2327 	phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
2328 	      ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
2329 	tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
2330 
2331 	tg3_phy_toggle_auxctl_smdsp(tp, false);
2332 }
2333 
2334 static void tg3_eee_pull_config(struct tg3 *tp, struct ethtool_eee *eee)
2335 {
2336 	u32 val;
2337 	struct ethtool_eee *dest = &tp->eee;
2338 
2339 	if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2340 		return;
2341 
2342 	if (eee)
2343 		dest = eee;
2344 
2345 	if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, TG3_CL45_D7_EEERES_STAT, &val))
2346 		return;
2347 
2348 	/* Pull eee_active */
2349 	if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T ||
2350 	    val == TG3_CL45_D7_EEERES_STAT_LP_100TX) {
2351 		dest->eee_active = 1;
2352 	} else
2353 		dest->eee_active = 0;
2354 
2355 	/* Pull lp advertised settings */
2356 	if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, MDIO_AN_EEE_LPABLE, &val))
2357 		return;
2358 	dest->lp_advertised = mmd_eee_adv_to_ethtool_adv_t(val);
2359 
2360 	/* Pull advertised and eee_enabled settings */
2361 	if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, &val))
2362 		return;
2363 	dest->eee_enabled = !!val;
2364 	dest->advertised = mmd_eee_adv_to_ethtool_adv_t(val);
2365 
2366 	/* Pull tx_lpi_enabled */
2367 	val = tr32(TG3_CPMU_EEE_MODE);
2368 	dest->tx_lpi_enabled = !!(val & TG3_CPMU_EEEMD_LPI_IN_TX);
2369 
2370 	/* Pull lpi timer value */
2371 	dest->tx_lpi_timer = tr32(TG3_CPMU_EEE_DBTMR1) & 0xffff;
2372 }
2373 
2374 static void tg3_phy_eee_adjust(struct tg3 *tp, bool current_link_up)
2375 {
2376 	u32 val;
2377 
2378 	if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2379 		return;
2380 
2381 	tp->setlpicnt = 0;
2382 
2383 	if (tp->link_config.autoneg == AUTONEG_ENABLE &&
2384 	    current_link_up &&
2385 	    tp->link_config.active_duplex == DUPLEX_FULL &&
2386 	    (tp->link_config.active_speed == SPEED_100 ||
2387 	     tp->link_config.active_speed == SPEED_1000)) {
2388 		u32 eeectl;
2389 
2390 		if (tp->link_config.active_speed == SPEED_1000)
2391 			eeectl = TG3_CPMU_EEE_CTRL_EXIT_16_5_US;
2392 		else
2393 			eeectl = TG3_CPMU_EEE_CTRL_EXIT_36_US;
2394 
2395 		tw32(TG3_CPMU_EEE_CTRL, eeectl);
2396 
2397 		tg3_eee_pull_config(tp, NULL);
2398 		if (tp->eee.eee_active)
2399 			tp->setlpicnt = 2;
2400 	}
2401 
2402 	if (!tp->setlpicnt) {
2403 		if (current_link_up &&
2404 		   !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2405 			tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0000);
2406 			tg3_phy_toggle_auxctl_smdsp(tp, false);
2407 		}
2408 
2409 		val = tr32(TG3_CPMU_EEE_MODE);
2410 		tw32(TG3_CPMU_EEE_MODE, val & ~TG3_CPMU_EEEMD_LPI_ENABLE);
2411 	}
2412 }
2413 
2414 static void tg3_phy_eee_enable(struct tg3 *tp)
2415 {
2416 	u32 val;
2417 
2418 	if (tp->link_config.active_speed == SPEED_1000 &&
2419 	    (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2420 	     tg3_asic_rev(tp) == ASIC_REV_5719 ||
2421 	     tg3_flag(tp, 57765_CLASS)) &&
2422 	    !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2423 		val = MII_TG3_DSP_TAP26_ALNOKO |
2424 		      MII_TG3_DSP_TAP26_RMRXSTO;
2425 		tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
2426 		tg3_phy_toggle_auxctl_smdsp(tp, false);
2427 	}
2428 
2429 	val = tr32(TG3_CPMU_EEE_MODE);
2430 	tw32(TG3_CPMU_EEE_MODE, val | TG3_CPMU_EEEMD_LPI_ENABLE);
2431 }
2432 
2433 static int tg3_wait_macro_done(struct tg3 *tp)
2434 {
2435 	int limit = 100;
2436 
2437 	while (limit--) {
2438 		u32 tmp32;
2439 
2440 		if (!tg3_readphy(tp, MII_TG3_DSP_CONTROL, &tmp32)) {
2441 			if ((tmp32 & 0x1000) == 0)
2442 				break;
2443 		}
2444 	}
2445 	if (limit < 0)
2446 		return -EBUSY;
2447 
2448 	return 0;
2449 }
2450 
2451 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
2452 {
2453 	static const u32 test_pat[4][6] = {
2454 	{ 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
2455 	{ 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
2456 	{ 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
2457 	{ 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
2458 	};
2459 	int chan;
2460 
2461 	for (chan = 0; chan < 4; chan++) {
2462 		int i;
2463 
2464 		tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2465 			     (chan * 0x2000) | 0x0200);
2466 		tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2467 
2468 		for (i = 0; i < 6; i++)
2469 			tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
2470 				     test_pat[chan][i]);
2471 
2472 		tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2473 		if (tg3_wait_macro_done(tp)) {
2474 			*resetp = 1;
2475 			return -EBUSY;
2476 		}
2477 
2478 		tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2479 			     (chan * 0x2000) | 0x0200);
2480 		tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0082);
2481 		if (tg3_wait_macro_done(tp)) {
2482 			*resetp = 1;
2483 			return -EBUSY;
2484 		}
2485 
2486 		tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0802);
2487 		if (tg3_wait_macro_done(tp)) {
2488 			*resetp = 1;
2489 			return -EBUSY;
2490 		}
2491 
2492 		for (i = 0; i < 6; i += 2) {
2493 			u32 low, high;
2494 
2495 			if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
2496 			    tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
2497 			    tg3_wait_macro_done(tp)) {
2498 				*resetp = 1;
2499 				return -EBUSY;
2500 			}
2501 			low &= 0x7fff;
2502 			high &= 0x000f;
2503 			if (low != test_pat[chan][i] ||
2504 			    high != test_pat[chan][i+1]) {
2505 				tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
2506 				tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
2507 				tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
2508 
2509 				return -EBUSY;
2510 			}
2511 		}
2512 	}
2513 
2514 	return 0;
2515 }
2516 
2517 static int tg3_phy_reset_chanpat(struct tg3 *tp)
2518 {
2519 	int chan;
2520 
2521 	for (chan = 0; chan < 4; chan++) {
2522 		int i;
2523 
2524 		tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2525 			     (chan * 0x2000) | 0x0200);
2526 		tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2527 		for (i = 0; i < 6; i++)
2528 			tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
2529 		tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2530 		if (tg3_wait_macro_done(tp))
2531 			return -EBUSY;
2532 	}
2533 
2534 	return 0;
2535 }
2536 
2537 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
2538 {
2539 	u32 reg32, phy9_orig;
2540 	int retries, do_phy_reset, err;
2541 
2542 	retries = 10;
2543 	do_phy_reset = 1;
2544 	do {
2545 		if (do_phy_reset) {
2546 			err = tg3_bmcr_reset(tp);
2547 			if (err)
2548 				return err;
2549 			do_phy_reset = 0;
2550 		}
2551 
2552 		/* Disable transmitter and interrupt.  */
2553 		if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
2554 			continue;
2555 
2556 		reg32 |= 0x3000;
2557 		tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2558 
2559 		/* Set full-duplex, 1000 mbps.  */
2560 		tg3_writephy(tp, MII_BMCR,
2561 			     BMCR_FULLDPLX | BMCR_SPEED1000);
2562 
2563 		/* Set to master mode.  */
2564 		if (tg3_readphy(tp, MII_CTRL1000, &phy9_orig))
2565 			continue;
2566 
2567 		tg3_writephy(tp, MII_CTRL1000,
2568 			     CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
2569 
2570 		err = tg3_phy_toggle_auxctl_smdsp(tp, true);
2571 		if (err)
2572 			return err;
2573 
2574 		/* Block the PHY control access.  */
2575 		tg3_phydsp_write(tp, 0x8005, 0x0800);
2576 
2577 		err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
2578 		if (!err)
2579 			break;
2580 	} while (--retries);
2581 
2582 	err = tg3_phy_reset_chanpat(tp);
2583 	if (err)
2584 		return err;
2585 
2586 	tg3_phydsp_write(tp, 0x8005, 0x0000);
2587 
2588 	tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
2589 	tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000);
2590 
2591 	tg3_phy_toggle_auxctl_smdsp(tp, false);
2592 
2593 	tg3_writephy(tp, MII_CTRL1000, phy9_orig);
2594 
2595 	if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
2596 		reg32 &= ~0x3000;
2597 		tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2598 	} else if (!err)
2599 		err = -EBUSY;
2600 
2601 	return err;
2602 }
2603 
2604 static void tg3_carrier_off(struct tg3 *tp)
2605 {
2606 	netif_carrier_off(tp->dev);
2607 	tp->link_up = false;
2608 }
2609 
2610 static void tg3_warn_mgmt_link_flap(struct tg3 *tp)
2611 {
2612 	if (tg3_flag(tp, ENABLE_ASF))
2613 		netdev_warn(tp->dev,
2614 			    "Management side-band traffic will be interrupted during phy settings change\n");
2615 }
2616 
2617 /* This will reset the tigon3 PHY if there is no valid
2618  * link unless the FORCE argument is non-zero.
2619  */
2620 static int tg3_phy_reset(struct tg3 *tp)
2621 {
2622 	u32 val, cpmuctrl;
2623 	int err;
2624 
2625 	if (tg3_asic_rev(tp) == ASIC_REV_5906) {
2626 		val = tr32(GRC_MISC_CFG);
2627 		tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
2628 		udelay(40);
2629 	}
2630 	err  = tg3_readphy(tp, MII_BMSR, &val);
2631 	err |= tg3_readphy(tp, MII_BMSR, &val);
2632 	if (err != 0)
2633 		return -EBUSY;
2634 
2635 	if (netif_running(tp->dev) && tp->link_up) {
2636 		netif_carrier_off(tp->dev);
2637 		tg3_link_report(tp);
2638 	}
2639 
2640 	if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
2641 	    tg3_asic_rev(tp) == ASIC_REV_5704 ||
2642 	    tg3_asic_rev(tp) == ASIC_REV_5705) {
2643 		err = tg3_phy_reset_5703_4_5(tp);
2644 		if (err)
2645 			return err;
2646 		goto out;
2647 	}
2648 
2649 	cpmuctrl = 0;
2650 	if (tg3_asic_rev(tp) == ASIC_REV_5784 &&
2651 	    tg3_chip_rev(tp) != CHIPREV_5784_AX) {
2652 		cpmuctrl = tr32(TG3_CPMU_CTRL);
2653 		if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
2654 			tw32(TG3_CPMU_CTRL,
2655 			     cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
2656 	}
2657 
2658 	err = tg3_bmcr_reset(tp);
2659 	if (err)
2660 		return err;
2661 
2662 	if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
2663 		val = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
2664 		tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, val);
2665 
2666 		tw32(TG3_CPMU_CTRL, cpmuctrl);
2667 	}
2668 
2669 	if (tg3_chip_rev(tp) == CHIPREV_5784_AX ||
2670 	    tg3_chip_rev(tp) == CHIPREV_5761_AX) {
2671 		val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2672 		if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
2673 		    CPMU_LSPD_1000MB_MACCLK_12_5) {
2674 			val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2675 			udelay(40);
2676 			tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2677 		}
2678 	}
2679 
2680 	if (tg3_flag(tp, 5717_PLUS) &&
2681 	    (tp->phy_flags & TG3_PHYFLG_MII_SERDES))
2682 		return 0;
2683 
2684 	tg3_phy_apply_otp(tp);
2685 
2686 	if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
2687 		tg3_phy_toggle_apd(tp, true);
2688 	else
2689 		tg3_phy_toggle_apd(tp, false);
2690 
2691 out:
2692 	if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) &&
2693 	    !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2694 		tg3_phydsp_write(tp, 0x201f, 0x2aaa);
2695 		tg3_phydsp_write(tp, 0x000a, 0x0323);
2696 		tg3_phy_toggle_auxctl_smdsp(tp, false);
2697 	}
2698 
2699 	if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) {
2700 		tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2701 		tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2702 	}
2703 
2704 	if (tp->phy_flags & TG3_PHYFLG_BER_BUG) {
2705 		if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2706 			tg3_phydsp_write(tp, 0x000a, 0x310b);
2707 			tg3_phydsp_write(tp, 0x201f, 0x9506);
2708 			tg3_phydsp_write(tp, 0x401f, 0x14e2);
2709 			tg3_phy_toggle_auxctl_smdsp(tp, false);
2710 		}
2711 	} else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) {
2712 		if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2713 			tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
2714 			if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) {
2715 				tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
2716 				tg3_writephy(tp, MII_TG3_TEST1,
2717 					     MII_TG3_TEST1_TRIM_EN | 0x4);
2718 			} else
2719 				tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
2720 
2721 			tg3_phy_toggle_auxctl_smdsp(tp, false);
2722 		}
2723 	}
2724 
2725 	/* Set Extended packet length bit (bit 14) on all chips that */
2726 	/* support jumbo frames */
2727 	if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2728 		/* Cannot do read-modify-write on 5401 */
2729 		tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
2730 	} else if (tg3_flag(tp, JUMBO_CAPABLE)) {
2731 		/* Set bit 14 with read-modify-write to preserve other bits */
2732 		err = tg3_phy_auxctl_read(tp,
2733 					  MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2734 		if (!err)
2735 			tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2736 					   val | MII_TG3_AUXCTL_ACTL_EXTPKTLEN);
2737 	}
2738 
2739 	/* Set phy register 0x10 bit 0 to high fifo elasticity to support
2740 	 * jumbo frames transmission.
2741 	 */
2742 	if (tg3_flag(tp, JUMBO_CAPABLE)) {
2743 		if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &val))
2744 			tg3_writephy(tp, MII_TG3_EXT_CTRL,
2745 				     val | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
2746 	}
2747 
2748 	if (tg3_asic_rev(tp) == ASIC_REV_5906) {
2749 		/* adjust output voltage */
2750 		tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
2751 	}
2752 
2753 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5762_A0)
2754 		tg3_phydsp_write(tp, 0xffb, 0x4000);
2755 
2756 	tg3_phy_toggle_automdix(tp, true);
2757 	tg3_phy_set_wirespeed(tp);
2758 	return 0;
2759 }
2760 
2761 #define TG3_GPIO_MSG_DRVR_PRES		 0x00000001
2762 #define TG3_GPIO_MSG_NEED_VAUX		 0x00000002
2763 #define TG3_GPIO_MSG_MASK		 (TG3_GPIO_MSG_DRVR_PRES | \
2764 					  TG3_GPIO_MSG_NEED_VAUX)
2765 #define TG3_GPIO_MSG_ALL_DRVR_PRES_MASK \
2766 	((TG3_GPIO_MSG_DRVR_PRES << 0) | \
2767 	 (TG3_GPIO_MSG_DRVR_PRES << 4) | \
2768 	 (TG3_GPIO_MSG_DRVR_PRES << 8) | \
2769 	 (TG3_GPIO_MSG_DRVR_PRES << 12))
2770 
2771 #define TG3_GPIO_MSG_ALL_NEED_VAUX_MASK \
2772 	((TG3_GPIO_MSG_NEED_VAUX << 0) | \
2773 	 (TG3_GPIO_MSG_NEED_VAUX << 4) | \
2774 	 (TG3_GPIO_MSG_NEED_VAUX << 8) | \
2775 	 (TG3_GPIO_MSG_NEED_VAUX << 12))
2776 
2777 static inline u32 tg3_set_function_status(struct tg3 *tp, u32 newstat)
2778 {
2779 	u32 status, shift;
2780 
2781 	if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2782 	    tg3_asic_rev(tp) == ASIC_REV_5719)
2783 		status = tg3_ape_read32(tp, TG3_APE_GPIO_MSG);
2784 	else
2785 		status = tr32(TG3_CPMU_DRV_STATUS);
2786 
2787 	shift = TG3_APE_GPIO_MSG_SHIFT + 4 * tp->pci_fn;
2788 	status &= ~(TG3_GPIO_MSG_MASK << shift);
2789 	status |= (newstat << shift);
2790 
2791 	if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2792 	    tg3_asic_rev(tp) == ASIC_REV_5719)
2793 		tg3_ape_write32(tp, TG3_APE_GPIO_MSG, status);
2794 	else
2795 		tw32(TG3_CPMU_DRV_STATUS, status);
2796 
2797 	return status >> TG3_APE_GPIO_MSG_SHIFT;
2798 }
2799 
2800 static inline int tg3_pwrsrc_switch_to_vmain(struct tg3 *tp)
2801 {
2802 	if (!tg3_flag(tp, IS_NIC))
2803 		return 0;
2804 
2805 	if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2806 	    tg3_asic_rev(tp) == ASIC_REV_5719 ||
2807 	    tg3_asic_rev(tp) == ASIC_REV_5720) {
2808 		if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2809 			return -EIO;
2810 
2811 		tg3_set_function_status(tp, TG3_GPIO_MSG_DRVR_PRES);
2812 
2813 		tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2814 			    TG3_GRC_LCLCTL_PWRSW_DELAY);
2815 
2816 		tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2817 	} else {
2818 		tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2819 			    TG3_GRC_LCLCTL_PWRSW_DELAY);
2820 	}
2821 
2822 	return 0;
2823 }
2824 
2825 static void tg3_pwrsrc_die_with_vmain(struct tg3 *tp)
2826 {
2827 	u32 grc_local_ctrl;
2828 
2829 	if (!tg3_flag(tp, IS_NIC) ||
2830 	    tg3_asic_rev(tp) == ASIC_REV_5700 ||
2831 	    tg3_asic_rev(tp) == ASIC_REV_5701)
2832 		return;
2833 
2834 	grc_local_ctrl = tp->grc_local_ctrl | GRC_LCLCTRL_GPIO_OE1;
2835 
2836 	tw32_wait_f(GRC_LOCAL_CTRL,
2837 		    grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2838 		    TG3_GRC_LCLCTL_PWRSW_DELAY);
2839 
2840 	tw32_wait_f(GRC_LOCAL_CTRL,
2841 		    grc_local_ctrl,
2842 		    TG3_GRC_LCLCTL_PWRSW_DELAY);
2843 
2844 	tw32_wait_f(GRC_LOCAL_CTRL,
2845 		    grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2846 		    TG3_GRC_LCLCTL_PWRSW_DELAY);
2847 }
2848 
2849 static void tg3_pwrsrc_switch_to_vaux(struct tg3 *tp)
2850 {
2851 	if (!tg3_flag(tp, IS_NIC))
2852 		return;
2853 
2854 	if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
2855 	    tg3_asic_rev(tp) == ASIC_REV_5701) {
2856 		tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2857 			    (GRC_LCLCTRL_GPIO_OE0 |
2858 			     GRC_LCLCTRL_GPIO_OE1 |
2859 			     GRC_LCLCTRL_GPIO_OE2 |
2860 			     GRC_LCLCTRL_GPIO_OUTPUT0 |
2861 			     GRC_LCLCTRL_GPIO_OUTPUT1),
2862 			    TG3_GRC_LCLCTL_PWRSW_DELAY);
2863 	} else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
2864 		   tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
2865 		/* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2866 		u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
2867 				     GRC_LCLCTRL_GPIO_OE1 |
2868 				     GRC_LCLCTRL_GPIO_OE2 |
2869 				     GRC_LCLCTRL_GPIO_OUTPUT0 |
2870 				     GRC_LCLCTRL_GPIO_OUTPUT1 |
2871 				     tp->grc_local_ctrl;
2872 		tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2873 			    TG3_GRC_LCLCTL_PWRSW_DELAY);
2874 
2875 		grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
2876 		tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2877 			    TG3_GRC_LCLCTL_PWRSW_DELAY);
2878 
2879 		grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
2880 		tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2881 			    TG3_GRC_LCLCTL_PWRSW_DELAY);
2882 	} else {
2883 		u32 no_gpio2;
2884 		u32 grc_local_ctrl = 0;
2885 
2886 		/* Workaround to prevent overdrawing Amps. */
2887 		if (tg3_asic_rev(tp) == ASIC_REV_5714) {
2888 			grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
2889 			tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2890 				    grc_local_ctrl,
2891 				    TG3_GRC_LCLCTL_PWRSW_DELAY);
2892 		}
2893 
2894 		/* On 5753 and variants, GPIO2 cannot be used. */
2895 		no_gpio2 = tp->nic_sram_data_cfg &
2896 			   NIC_SRAM_DATA_CFG_NO_GPIO2;
2897 
2898 		grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
2899 				  GRC_LCLCTRL_GPIO_OE1 |
2900 				  GRC_LCLCTRL_GPIO_OE2 |
2901 				  GRC_LCLCTRL_GPIO_OUTPUT1 |
2902 				  GRC_LCLCTRL_GPIO_OUTPUT2;
2903 		if (no_gpio2) {
2904 			grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
2905 					    GRC_LCLCTRL_GPIO_OUTPUT2);
2906 		}
2907 		tw32_wait_f(GRC_LOCAL_CTRL,
2908 			    tp->grc_local_ctrl | grc_local_ctrl,
2909 			    TG3_GRC_LCLCTL_PWRSW_DELAY);
2910 
2911 		grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
2912 
2913 		tw32_wait_f(GRC_LOCAL_CTRL,
2914 			    tp->grc_local_ctrl | grc_local_ctrl,
2915 			    TG3_GRC_LCLCTL_PWRSW_DELAY);
2916 
2917 		if (!no_gpio2) {
2918 			grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
2919 			tw32_wait_f(GRC_LOCAL_CTRL,
2920 				    tp->grc_local_ctrl | grc_local_ctrl,
2921 				    TG3_GRC_LCLCTL_PWRSW_DELAY);
2922 		}
2923 	}
2924 }
2925 
2926 static void tg3_frob_aux_power_5717(struct tg3 *tp, bool wol_enable)
2927 {
2928 	u32 msg = 0;
2929 
2930 	/* Serialize power state transitions */
2931 	if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2932 		return;
2933 
2934 	if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE) || wol_enable)
2935 		msg = TG3_GPIO_MSG_NEED_VAUX;
2936 
2937 	msg = tg3_set_function_status(tp, msg);
2938 
2939 	if (msg & TG3_GPIO_MSG_ALL_DRVR_PRES_MASK)
2940 		goto done;
2941 
2942 	if (msg & TG3_GPIO_MSG_ALL_NEED_VAUX_MASK)
2943 		tg3_pwrsrc_switch_to_vaux(tp);
2944 	else
2945 		tg3_pwrsrc_die_with_vmain(tp);
2946 
2947 done:
2948 	tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2949 }
2950 
2951 static void tg3_frob_aux_power(struct tg3 *tp, bool include_wol)
2952 {
2953 	bool need_vaux = false;
2954 
2955 	/* The GPIOs do something completely different on 57765. */
2956 	if (!tg3_flag(tp, IS_NIC) || tg3_flag(tp, 57765_CLASS))
2957 		return;
2958 
2959 	if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2960 	    tg3_asic_rev(tp) == ASIC_REV_5719 ||
2961 	    tg3_asic_rev(tp) == ASIC_REV_5720) {
2962 		tg3_frob_aux_power_5717(tp, include_wol ?
2963 					tg3_flag(tp, WOL_ENABLE) != 0 : 0);
2964 		return;
2965 	}
2966 
2967 	if (tp->pdev_peer && tp->pdev_peer != tp->pdev) {
2968 		struct net_device *dev_peer;
2969 
2970 		dev_peer = pci_get_drvdata(tp->pdev_peer);
2971 
2972 		/* remove_one() may have been run on the peer. */
2973 		if (dev_peer) {
2974 			struct tg3 *tp_peer = netdev_priv(dev_peer);
2975 
2976 			if (tg3_flag(tp_peer, INIT_COMPLETE))
2977 				return;
2978 
2979 			if ((include_wol && tg3_flag(tp_peer, WOL_ENABLE)) ||
2980 			    tg3_flag(tp_peer, ENABLE_ASF))
2981 				need_vaux = true;
2982 		}
2983 	}
2984 
2985 	if ((include_wol && tg3_flag(tp, WOL_ENABLE)) ||
2986 	    tg3_flag(tp, ENABLE_ASF))
2987 		need_vaux = true;
2988 
2989 	if (need_vaux)
2990 		tg3_pwrsrc_switch_to_vaux(tp);
2991 	else
2992 		tg3_pwrsrc_die_with_vmain(tp);
2993 }
2994 
2995 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
2996 {
2997 	if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
2998 		return 1;
2999 	else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) {
3000 		if (speed != SPEED_10)
3001 			return 1;
3002 	} else if (speed == SPEED_10)
3003 		return 1;
3004 
3005 	return 0;
3006 }
3007 
3008 static bool tg3_phy_power_bug(struct tg3 *tp)
3009 {
3010 	switch (tg3_asic_rev(tp)) {
3011 	case ASIC_REV_5700:
3012 	case ASIC_REV_5704:
3013 		return true;
3014 	case ASIC_REV_5780:
3015 		if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
3016 			return true;
3017 		return false;
3018 	case ASIC_REV_5717:
3019 		if (!tp->pci_fn)
3020 			return true;
3021 		return false;
3022 	case ASIC_REV_5719:
3023 	case ASIC_REV_5720:
3024 		if ((tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
3025 		    !tp->pci_fn)
3026 			return true;
3027 		return false;
3028 	}
3029 
3030 	return false;
3031 }
3032 
3033 static bool tg3_phy_led_bug(struct tg3 *tp)
3034 {
3035 	switch (tg3_asic_rev(tp)) {
3036 	case ASIC_REV_5719:
3037 	case ASIC_REV_5720:
3038 		if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
3039 		    !tp->pci_fn)
3040 			return true;
3041 		return false;
3042 	}
3043 
3044 	return false;
3045 }
3046 
3047 static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
3048 {
3049 	u32 val;
3050 
3051 	if (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)
3052 		return;
3053 
3054 	if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
3055 		if (tg3_asic_rev(tp) == ASIC_REV_5704) {
3056 			u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
3057 			u32 serdes_cfg = tr32(MAC_SERDES_CFG);
3058 
3059 			sg_dig_ctrl |=
3060 				SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
3061 			tw32(SG_DIG_CTRL, sg_dig_ctrl);
3062 			tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
3063 		}
3064 		return;
3065 	}
3066 
3067 	if (tg3_asic_rev(tp) == ASIC_REV_5906) {
3068 		tg3_bmcr_reset(tp);
3069 		val = tr32(GRC_MISC_CFG);
3070 		tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
3071 		udelay(40);
3072 		return;
3073 	} else if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
3074 		u32 phytest;
3075 		if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
3076 			u32 phy;
3077 
3078 			tg3_writephy(tp, MII_ADVERTISE, 0);
3079 			tg3_writephy(tp, MII_BMCR,
3080 				     BMCR_ANENABLE | BMCR_ANRESTART);
3081 
3082 			tg3_writephy(tp, MII_TG3_FET_TEST,
3083 				     phytest | MII_TG3_FET_SHADOW_EN);
3084 			if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) {
3085 				phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD;
3086 				tg3_writephy(tp,
3087 					     MII_TG3_FET_SHDW_AUXMODE4,
3088 					     phy);
3089 			}
3090 			tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
3091 		}
3092 		return;
3093 	} else if (do_low_power) {
3094 		if (!tg3_phy_led_bug(tp))
3095 			tg3_writephy(tp, MII_TG3_EXT_CTRL,
3096 				     MII_TG3_EXT_CTRL_FORCE_LED_OFF);
3097 
3098 		val = MII_TG3_AUXCTL_PCTL_100TX_LPWR |
3099 		      MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
3100 		      MII_TG3_AUXCTL_PCTL_VREG_11V;
3101 		tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, val);
3102 	}
3103 
3104 	/* The PHY should not be powered down on some chips because
3105 	 * of bugs.
3106 	 */
3107 	if (tg3_phy_power_bug(tp))
3108 		return;
3109 
3110 	if (tg3_chip_rev(tp) == CHIPREV_5784_AX ||
3111 	    tg3_chip_rev(tp) == CHIPREV_5761_AX) {
3112 		val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
3113 		val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
3114 		val |= CPMU_LSPD_1000MB_MACCLK_12_5;
3115 		tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
3116 	}
3117 
3118 	tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
3119 }
3120 
3121 /* tp->lock is held. */
3122 static int tg3_nvram_lock(struct tg3 *tp)
3123 {
3124 	if (tg3_flag(tp, NVRAM)) {
3125 		int i;
3126 
3127 		if (tp->nvram_lock_cnt == 0) {
3128 			tw32(NVRAM_SWARB, SWARB_REQ_SET1);
3129 			for (i = 0; i < 8000; i++) {
3130 				if (tr32(NVRAM_SWARB) & SWARB_GNT1)
3131 					break;
3132 				udelay(20);
3133 			}
3134 			if (i == 8000) {
3135 				tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
3136 				return -ENODEV;
3137 			}
3138 		}
3139 		tp->nvram_lock_cnt++;
3140 	}
3141 	return 0;
3142 }
3143 
3144 /* tp->lock is held. */
3145 static void tg3_nvram_unlock(struct tg3 *tp)
3146 {
3147 	if (tg3_flag(tp, NVRAM)) {
3148 		if (tp->nvram_lock_cnt > 0)
3149 			tp->nvram_lock_cnt--;
3150 		if (tp->nvram_lock_cnt == 0)
3151 			tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
3152 	}
3153 }
3154 
3155 /* tp->lock is held. */
3156 static void tg3_enable_nvram_access(struct tg3 *tp)
3157 {
3158 	if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
3159 		u32 nvaccess = tr32(NVRAM_ACCESS);
3160 
3161 		tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
3162 	}
3163 }
3164 
3165 /* tp->lock is held. */
3166 static void tg3_disable_nvram_access(struct tg3 *tp)
3167 {
3168 	if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
3169 		u32 nvaccess = tr32(NVRAM_ACCESS);
3170 
3171 		tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
3172 	}
3173 }
3174 
3175 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
3176 					u32 offset, u32 *val)
3177 {
3178 	u32 tmp;
3179 	int i;
3180 
3181 	if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
3182 		return -EINVAL;
3183 
3184 	tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
3185 					EEPROM_ADDR_DEVID_MASK |
3186 					EEPROM_ADDR_READ);
3187 	tw32(GRC_EEPROM_ADDR,
3188 	     tmp |
3189 	     (0 << EEPROM_ADDR_DEVID_SHIFT) |
3190 	     ((offset << EEPROM_ADDR_ADDR_SHIFT) &
3191 	      EEPROM_ADDR_ADDR_MASK) |
3192 	     EEPROM_ADDR_READ | EEPROM_ADDR_START);
3193 
3194 	for (i = 0; i < 1000; i++) {
3195 		tmp = tr32(GRC_EEPROM_ADDR);
3196 
3197 		if (tmp & EEPROM_ADDR_COMPLETE)
3198 			break;
3199 		msleep(1);
3200 	}
3201 	if (!(tmp & EEPROM_ADDR_COMPLETE))
3202 		return -EBUSY;
3203 
3204 	tmp = tr32(GRC_EEPROM_DATA);
3205 
3206 	/*
3207 	 * The data will always be opposite the native endian
3208 	 * format.  Perform a blind byteswap to compensate.
3209 	 */
3210 	*val = swab32(tmp);
3211 
3212 	return 0;
3213 }
3214 
3215 #define NVRAM_CMD_TIMEOUT 10000
3216 
3217 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
3218 {
3219 	int i;
3220 
3221 	tw32(NVRAM_CMD, nvram_cmd);
3222 	for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
3223 		udelay(10);
3224 		if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
3225 			udelay(10);
3226 			break;
3227 		}
3228 	}
3229 
3230 	if (i == NVRAM_CMD_TIMEOUT)
3231 		return -EBUSY;
3232 
3233 	return 0;
3234 }
3235 
3236 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
3237 {
3238 	if (tg3_flag(tp, NVRAM) &&
3239 	    tg3_flag(tp, NVRAM_BUFFERED) &&
3240 	    tg3_flag(tp, FLASH) &&
3241 	    !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
3242 	    (tp->nvram_jedecnum == JEDEC_ATMEL))
3243 
3244 		addr = ((addr / tp->nvram_pagesize) <<
3245 			ATMEL_AT45DB0X1B_PAGE_POS) +
3246 		       (addr % tp->nvram_pagesize);
3247 
3248 	return addr;
3249 }
3250 
3251 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
3252 {
3253 	if (tg3_flag(tp, NVRAM) &&
3254 	    tg3_flag(tp, NVRAM_BUFFERED) &&
3255 	    tg3_flag(tp, FLASH) &&
3256 	    !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
3257 	    (tp->nvram_jedecnum == JEDEC_ATMEL))
3258 
3259 		addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
3260 			tp->nvram_pagesize) +
3261 		       (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
3262 
3263 	return addr;
3264 }
3265 
3266 /* NOTE: Data read in from NVRAM is byteswapped according to
3267  * the byteswapping settings for all other register accesses.
3268  * tg3 devices are BE devices, so on a BE machine, the data
3269  * returned will be exactly as it is seen in NVRAM.  On a LE
3270  * machine, the 32-bit value will be byteswapped.
3271  */
3272 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
3273 {
3274 	int ret;
3275 
3276 	if (!tg3_flag(tp, NVRAM))
3277 		return tg3_nvram_read_using_eeprom(tp, offset, val);
3278 
3279 	offset = tg3_nvram_phys_addr(tp, offset);
3280 
3281 	if (offset > NVRAM_ADDR_MSK)
3282 		return -EINVAL;
3283 
3284 	ret = tg3_nvram_lock(tp);
3285 	if (ret)
3286 		return ret;
3287 
3288 	tg3_enable_nvram_access(tp);
3289 
3290 	tw32(NVRAM_ADDR, offset);
3291 	ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
3292 		NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
3293 
3294 	if (ret == 0)
3295 		*val = tr32(NVRAM_RDDATA);
3296 
3297 	tg3_disable_nvram_access(tp);
3298 
3299 	tg3_nvram_unlock(tp);
3300 
3301 	return ret;
3302 }
3303 
3304 /* Ensures NVRAM data is in bytestream format. */
3305 static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val)
3306 {
3307 	u32 v;
3308 	int res = tg3_nvram_read(tp, offset, &v);
3309 	if (!res)
3310 		*val = cpu_to_be32(v);
3311 	return res;
3312 }
3313 
3314 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
3315 				    u32 offset, u32 len, u8 *buf)
3316 {
3317 	int i, j, rc = 0;
3318 	u32 val;
3319 
3320 	for (i = 0; i < len; i += 4) {
3321 		u32 addr;
3322 		__be32 data;
3323 
3324 		addr = offset + i;
3325 
3326 		memcpy(&data, buf + i, 4);
3327 
3328 		/*
3329 		 * The SEEPROM interface expects the data to always be opposite
3330 		 * the native endian format.  We accomplish this by reversing
3331 		 * all the operations that would have been performed on the
3332 		 * data from a call to tg3_nvram_read_be32().
3333 		 */
3334 		tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data)));
3335 
3336 		val = tr32(GRC_EEPROM_ADDR);
3337 		tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
3338 
3339 		val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
3340 			EEPROM_ADDR_READ);
3341 		tw32(GRC_EEPROM_ADDR, val |
3342 			(0 << EEPROM_ADDR_DEVID_SHIFT) |
3343 			(addr & EEPROM_ADDR_ADDR_MASK) |
3344 			EEPROM_ADDR_START |
3345 			EEPROM_ADDR_WRITE);
3346 
3347 		for (j = 0; j < 1000; j++) {
3348 			val = tr32(GRC_EEPROM_ADDR);
3349 
3350 			if (val & EEPROM_ADDR_COMPLETE)
3351 				break;
3352 			msleep(1);
3353 		}
3354 		if (!(val & EEPROM_ADDR_COMPLETE)) {
3355 			rc = -EBUSY;
3356 			break;
3357 		}
3358 	}
3359 
3360 	return rc;
3361 }
3362 
3363 /* offset and length are dword aligned */
3364 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
3365 		u8 *buf)
3366 {
3367 	int ret = 0;
3368 	u32 pagesize = tp->nvram_pagesize;
3369 	u32 pagemask = pagesize - 1;
3370 	u32 nvram_cmd;
3371 	u8 *tmp;
3372 
3373 	tmp = kmalloc(pagesize, GFP_KERNEL);
3374 	if (tmp == NULL)
3375 		return -ENOMEM;
3376 
3377 	while (len) {
3378 		int j;
3379 		u32 phy_addr, page_off, size;
3380 
3381 		phy_addr = offset & ~pagemask;
3382 
3383 		for (j = 0; j < pagesize; j += 4) {
3384 			ret = tg3_nvram_read_be32(tp, phy_addr + j,
3385 						  (__be32 *) (tmp + j));
3386 			if (ret)
3387 				break;
3388 		}
3389 		if (ret)
3390 			break;
3391 
3392 		page_off = offset & pagemask;
3393 		size = pagesize;
3394 		if (len < size)
3395 			size = len;
3396 
3397 		len -= size;
3398 
3399 		memcpy(tmp + page_off, buf, size);
3400 
3401 		offset = offset + (pagesize - page_off);
3402 
3403 		tg3_enable_nvram_access(tp);
3404 
3405 		/*
3406 		 * Before we can erase the flash page, we need
3407 		 * to issue a special "write enable" command.
3408 		 */
3409 		nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3410 
3411 		if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3412 			break;
3413 
3414 		/* Erase the target page */
3415 		tw32(NVRAM_ADDR, phy_addr);
3416 
3417 		nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
3418 			NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
3419 
3420 		if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3421 			break;
3422 
3423 		/* Issue another write enable to start the write. */
3424 		nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3425 
3426 		if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3427 			break;
3428 
3429 		for (j = 0; j < pagesize; j += 4) {
3430 			__be32 data;
3431 
3432 			data = *((__be32 *) (tmp + j));
3433 
3434 			tw32(NVRAM_WRDATA, be32_to_cpu(data));
3435 
3436 			tw32(NVRAM_ADDR, phy_addr + j);
3437 
3438 			nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
3439 				NVRAM_CMD_WR;
3440 
3441 			if (j == 0)
3442 				nvram_cmd |= NVRAM_CMD_FIRST;
3443 			else if (j == (pagesize - 4))
3444 				nvram_cmd |= NVRAM_CMD_LAST;
3445 
3446 			ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3447 			if (ret)
3448 				break;
3449 		}
3450 		if (ret)
3451 			break;
3452 	}
3453 
3454 	nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3455 	tg3_nvram_exec_cmd(tp, nvram_cmd);
3456 
3457 	kfree(tmp);
3458 
3459 	return ret;
3460 }
3461 
3462 /* offset and length are dword aligned */
3463 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
3464 		u8 *buf)
3465 {
3466 	int i, ret = 0;
3467 
3468 	for (i = 0; i < len; i += 4, offset += 4) {
3469 		u32 page_off, phy_addr, nvram_cmd;
3470 		__be32 data;
3471 
3472 		memcpy(&data, buf + i, 4);
3473 		tw32(NVRAM_WRDATA, be32_to_cpu(data));
3474 
3475 		page_off = offset % tp->nvram_pagesize;
3476 
3477 		phy_addr = tg3_nvram_phys_addr(tp, offset);
3478 
3479 		nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
3480 
3481 		if (page_off == 0 || i == 0)
3482 			nvram_cmd |= NVRAM_CMD_FIRST;
3483 		if (page_off == (tp->nvram_pagesize - 4))
3484 			nvram_cmd |= NVRAM_CMD_LAST;
3485 
3486 		if (i == (len - 4))
3487 			nvram_cmd |= NVRAM_CMD_LAST;
3488 
3489 		if ((nvram_cmd & NVRAM_CMD_FIRST) ||
3490 		    !tg3_flag(tp, FLASH) ||
3491 		    !tg3_flag(tp, 57765_PLUS))
3492 			tw32(NVRAM_ADDR, phy_addr);
3493 
3494 		if (tg3_asic_rev(tp) != ASIC_REV_5752 &&
3495 		    !tg3_flag(tp, 5755_PLUS) &&
3496 		    (tp->nvram_jedecnum == JEDEC_ST) &&
3497 		    (nvram_cmd & NVRAM_CMD_FIRST)) {
3498 			u32 cmd;
3499 
3500 			cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3501 			ret = tg3_nvram_exec_cmd(tp, cmd);
3502 			if (ret)
3503 				break;
3504 		}
3505 		if (!tg3_flag(tp, FLASH)) {
3506 			/* We always do complete word writes to eeprom. */
3507 			nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
3508 		}
3509 
3510 		ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3511 		if (ret)
3512 			break;
3513 	}
3514 	return ret;
3515 }
3516 
3517 /* offset and length are dword aligned */
3518 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
3519 {
3520 	int ret;
3521 
3522 	if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3523 		tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
3524 		       ~GRC_LCLCTRL_GPIO_OUTPUT1);
3525 		udelay(40);
3526 	}
3527 
3528 	if (!tg3_flag(tp, NVRAM)) {
3529 		ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
3530 	} else {
3531 		u32 grc_mode;
3532 
3533 		ret = tg3_nvram_lock(tp);
3534 		if (ret)
3535 			return ret;
3536 
3537 		tg3_enable_nvram_access(tp);
3538 		if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM))
3539 			tw32(NVRAM_WRITE1, 0x406);
3540 
3541 		grc_mode = tr32(GRC_MODE);
3542 		tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
3543 
3544 		if (tg3_flag(tp, NVRAM_BUFFERED) || !tg3_flag(tp, FLASH)) {
3545 			ret = tg3_nvram_write_block_buffered(tp, offset, len,
3546 				buf);
3547 		} else {
3548 			ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
3549 				buf);
3550 		}
3551 
3552 		grc_mode = tr32(GRC_MODE);
3553 		tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
3554 
3555 		tg3_disable_nvram_access(tp);
3556 		tg3_nvram_unlock(tp);
3557 	}
3558 
3559 	if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3560 		tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
3561 		udelay(40);
3562 	}
3563 
3564 	return ret;
3565 }
3566 
3567 #define RX_CPU_SCRATCH_BASE	0x30000
3568 #define RX_CPU_SCRATCH_SIZE	0x04000
3569 #define TX_CPU_SCRATCH_BASE	0x34000
3570 #define TX_CPU_SCRATCH_SIZE	0x04000
3571 
3572 /* tp->lock is held. */
3573 static int tg3_pause_cpu(struct tg3 *tp, u32 cpu_base)
3574 {
3575 	int i;
3576 	const int iters = 10000;
3577 
3578 	for (i = 0; i < iters; i++) {
3579 		tw32(cpu_base + CPU_STATE, 0xffffffff);
3580 		tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
3581 		if (tr32(cpu_base + CPU_MODE) & CPU_MODE_HALT)
3582 			break;
3583 		if (pci_channel_offline(tp->pdev))
3584 			return -EBUSY;
3585 	}
3586 
3587 	return (i == iters) ? -EBUSY : 0;
3588 }
3589 
3590 /* tp->lock is held. */
3591 static int tg3_rxcpu_pause(struct tg3 *tp)
3592 {
3593 	int rc = tg3_pause_cpu(tp, RX_CPU_BASE);
3594 
3595 	tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3596 	tw32_f(RX_CPU_BASE + CPU_MODE,  CPU_MODE_HALT);
3597 	udelay(10);
3598 
3599 	return rc;
3600 }
3601 
3602 /* tp->lock is held. */
3603 static int tg3_txcpu_pause(struct tg3 *tp)
3604 {
3605 	return tg3_pause_cpu(tp, TX_CPU_BASE);
3606 }
3607 
3608 /* tp->lock is held. */
3609 static void tg3_resume_cpu(struct tg3 *tp, u32 cpu_base)
3610 {
3611 	tw32(cpu_base + CPU_STATE, 0xffffffff);
3612 	tw32_f(cpu_base + CPU_MODE,  0x00000000);
3613 }
3614 
3615 /* tp->lock is held. */
3616 static void tg3_rxcpu_resume(struct tg3 *tp)
3617 {
3618 	tg3_resume_cpu(tp, RX_CPU_BASE);
3619 }
3620 
3621 /* tp->lock is held. */
3622 static int tg3_halt_cpu(struct tg3 *tp, u32 cpu_base)
3623 {
3624 	int rc;
3625 
3626 	BUG_ON(cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS));
3627 
3628 	if (tg3_asic_rev(tp) == ASIC_REV_5906) {
3629 		u32 val = tr32(GRC_VCPU_EXT_CTRL);
3630 
3631 		tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
3632 		return 0;
3633 	}
3634 	if (cpu_base == RX_CPU_BASE) {
3635 		rc = tg3_rxcpu_pause(tp);
3636 	} else {
3637 		/*
3638 		 * There is only an Rx CPU for the 5750 derivative in the
3639 		 * BCM4785.
3640 		 */
3641 		if (tg3_flag(tp, IS_SSB_CORE))
3642 			return 0;
3643 
3644 		rc = tg3_txcpu_pause(tp);
3645 	}
3646 
3647 	if (rc) {
3648 		netdev_err(tp->dev, "%s timed out, %s CPU\n",
3649 			   __func__, cpu_base == RX_CPU_BASE ? "RX" : "TX");
3650 		return -ENODEV;
3651 	}
3652 
3653 	/* Clear firmware's nvram arbitration. */
3654 	if (tg3_flag(tp, NVRAM))
3655 		tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
3656 	return 0;
3657 }
3658 
3659 static int tg3_fw_data_len(struct tg3 *tp,
3660 			   const struct tg3_firmware_hdr *fw_hdr)
3661 {
3662 	int fw_len;
3663 
3664 	/* Non fragmented firmware have one firmware header followed by a
3665 	 * contiguous chunk of data to be written. The length field in that
3666 	 * header is not the length of data to be written but the complete
3667 	 * length of the bss. The data length is determined based on
3668 	 * tp->fw->size minus headers.
3669 	 *
3670 	 * Fragmented firmware have a main header followed by multiple
3671 	 * fragments. Each fragment is identical to non fragmented firmware
3672 	 * with a firmware header followed by a contiguous chunk of data. In
3673 	 * the main header, the length field is unused and set to 0xffffffff.
3674 	 * In each fragment header the length is the entire size of that
3675 	 * fragment i.e. fragment data + header length. Data length is
3676 	 * therefore length field in the header minus TG3_FW_HDR_LEN.
3677 	 */
3678 	if (tp->fw_len == 0xffffffff)
3679 		fw_len = be32_to_cpu(fw_hdr->len);
3680 	else
3681 		fw_len = tp->fw->size;
3682 
3683 	return (fw_len - TG3_FW_HDR_LEN) / sizeof(u32);
3684 }
3685 
3686 /* tp->lock is held. */
3687 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base,
3688 				 u32 cpu_scratch_base, int cpu_scratch_size,
3689 				 const struct tg3_firmware_hdr *fw_hdr)
3690 {
3691 	int err, i;
3692 	void (*write_op)(struct tg3 *, u32, u32);
3693 	int total_len = tp->fw->size;
3694 
3695 	if (cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)) {
3696 		netdev_err(tp->dev,
3697 			   "%s: Trying to load TX cpu firmware which is 5705\n",
3698 			   __func__);
3699 		return -EINVAL;
3700 	}
3701 
3702 	if (tg3_flag(tp, 5705_PLUS) && tg3_asic_rev(tp) != ASIC_REV_57766)
3703 		write_op = tg3_write_mem;
3704 	else
3705 		write_op = tg3_write_indirect_reg32;
3706 
3707 	if (tg3_asic_rev(tp) != ASIC_REV_57766) {
3708 		/* It is possible that bootcode is still loading at this point.
3709 		 * Get the nvram lock first before halting the cpu.
3710 		 */
3711 		int lock_err = tg3_nvram_lock(tp);
3712 		err = tg3_halt_cpu(tp, cpu_base);
3713 		if (!lock_err)
3714 			tg3_nvram_unlock(tp);
3715 		if (err)
3716 			goto out;
3717 
3718 		for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
3719 			write_op(tp, cpu_scratch_base + i, 0);
3720 		tw32(cpu_base + CPU_STATE, 0xffffffff);
3721 		tw32(cpu_base + CPU_MODE,
3722 		     tr32(cpu_base + CPU_MODE) | CPU_MODE_HALT);
3723 	} else {
3724 		/* Subtract additional main header for fragmented firmware and
3725 		 * advance to the first fragment
3726 		 */
3727 		total_len -= TG3_FW_HDR_LEN;
3728 		fw_hdr++;
3729 	}
3730 
3731 	do {
3732 		u32 *fw_data = (u32 *)(fw_hdr + 1);
3733 		for (i = 0; i < tg3_fw_data_len(tp, fw_hdr); i++)
3734 			write_op(tp, cpu_scratch_base +
3735 				     (be32_to_cpu(fw_hdr->base_addr) & 0xffff) +
3736 				     (i * sizeof(u32)),
3737 				 be32_to_cpu(fw_data[i]));
3738 
3739 		total_len -= be32_to_cpu(fw_hdr->len);
3740 
3741 		/* Advance to next fragment */
3742 		fw_hdr = (struct tg3_firmware_hdr *)
3743 			 ((void *)fw_hdr + be32_to_cpu(fw_hdr->len));
3744 	} while (total_len > 0);
3745 
3746 	err = 0;
3747 
3748 out:
3749 	return err;
3750 }
3751 
3752 /* tp->lock is held. */
3753 static int tg3_pause_cpu_and_set_pc(struct tg3 *tp, u32 cpu_base, u32 pc)
3754 {
3755 	int i;
3756 	const int iters = 5;
3757 
3758 	tw32(cpu_base + CPU_STATE, 0xffffffff);
3759 	tw32_f(cpu_base + CPU_PC, pc);
3760 
3761 	for (i = 0; i < iters; i++) {
3762 		if (tr32(cpu_base + CPU_PC) == pc)
3763 			break;
3764 		tw32(cpu_base + CPU_STATE, 0xffffffff);
3765 		tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
3766 		tw32_f(cpu_base + CPU_PC, pc);
3767 		udelay(1000);
3768 	}
3769 
3770 	return (i == iters) ? -EBUSY : 0;
3771 }
3772 
3773 /* tp->lock is held. */
3774 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
3775 {
3776 	const struct tg3_firmware_hdr *fw_hdr;
3777 	int err;
3778 
3779 	fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3780 
3781 	/* Firmware blob starts with version numbers, followed by
3782 	   start address and length. We are setting complete length.
3783 	   length = end_address_of_bss - start_address_of_text.
3784 	   Remainder is the blob to be loaded contiguously
3785 	   from start address. */
3786 
3787 	err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
3788 				    RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
3789 				    fw_hdr);
3790 	if (err)
3791 		return err;
3792 
3793 	err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
3794 				    TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
3795 				    fw_hdr);
3796 	if (err)
3797 		return err;
3798 
3799 	/* Now startup only the RX cpu. */
3800 	err = tg3_pause_cpu_and_set_pc(tp, RX_CPU_BASE,
3801 				       be32_to_cpu(fw_hdr->base_addr));
3802 	if (err) {
3803 		netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x "
3804 			   "should be %08x\n", __func__,
3805 			   tr32(RX_CPU_BASE + CPU_PC),
3806 				be32_to_cpu(fw_hdr->base_addr));
3807 		return -ENODEV;
3808 	}
3809 
3810 	tg3_rxcpu_resume(tp);
3811 
3812 	return 0;
3813 }
3814 
3815 static int tg3_validate_rxcpu_state(struct tg3 *tp)
3816 {
3817 	const int iters = 1000;
3818 	int i;
3819 	u32 val;
3820 
3821 	/* Wait for boot code to complete initialization and enter service
3822 	 * loop. It is then safe to download service patches
3823 	 */
3824 	for (i = 0; i < iters; i++) {
3825 		if (tr32(RX_CPU_HWBKPT) == TG3_SBROM_IN_SERVICE_LOOP)
3826 			break;
3827 
3828 		udelay(10);
3829 	}
3830 
3831 	if (i == iters) {
3832 		netdev_err(tp->dev, "Boot code not ready for service patches\n");
3833 		return -EBUSY;
3834 	}
3835 
3836 	val = tg3_read_indirect_reg32(tp, TG3_57766_FW_HANDSHAKE);
3837 	if (val & 0xff) {
3838 		netdev_warn(tp->dev,
3839 			    "Other patches exist. Not downloading EEE patch\n");
3840 		return -EEXIST;
3841 	}
3842 
3843 	return 0;
3844 }
3845 
3846 /* tp->lock is held. */
3847 static void tg3_load_57766_firmware(struct tg3 *tp)
3848 {
3849 	struct tg3_firmware_hdr *fw_hdr;
3850 
3851 	if (!tg3_flag(tp, NO_NVRAM))
3852 		return;
3853 
3854 	if (tg3_validate_rxcpu_state(tp))
3855 		return;
3856 
3857 	if (!tp->fw)
3858 		return;
3859 
3860 	/* This firmware blob has a different format than older firmware
3861 	 * releases as given below. The main difference is we have fragmented
3862 	 * data to be written to non-contiguous locations.
3863 	 *
3864 	 * In the beginning we have a firmware header identical to other
3865 	 * firmware which consists of version, base addr and length. The length
3866 	 * here is unused and set to 0xffffffff.
3867 	 *
3868 	 * This is followed by a series of firmware fragments which are
3869 	 * individually identical to previous firmware. i.e. they have the
3870 	 * firmware header and followed by data for that fragment. The version
3871 	 * field of the individual fragment header is unused.
3872 	 */
3873 
3874 	fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3875 	if (be32_to_cpu(fw_hdr->base_addr) != TG3_57766_FW_BASE_ADDR)
3876 		return;
3877 
3878 	if (tg3_rxcpu_pause(tp))
3879 		return;
3880 
3881 	/* tg3_load_firmware_cpu() will always succeed for the 57766 */
3882 	tg3_load_firmware_cpu(tp, 0, TG3_57766_FW_BASE_ADDR, 0, fw_hdr);
3883 
3884 	tg3_rxcpu_resume(tp);
3885 }
3886 
3887 /* tp->lock is held. */
3888 static int tg3_load_tso_firmware(struct tg3 *tp)
3889 {
3890 	const struct tg3_firmware_hdr *fw_hdr;
3891 	unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
3892 	int err;
3893 
3894 	if (!tg3_flag(tp, FW_TSO))
3895 		return 0;
3896 
3897 	fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3898 
3899 	/* Firmware blob starts with version numbers, followed by
3900 	   start address and length. We are setting complete length.
3901 	   length = end_address_of_bss - start_address_of_text.
3902 	   Remainder is the blob to be loaded contiguously
3903 	   from start address. */
3904 
3905 	cpu_scratch_size = tp->fw_len;
3906 
3907 	if (tg3_asic_rev(tp) == ASIC_REV_5705) {
3908 		cpu_base = RX_CPU_BASE;
3909 		cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
3910 	} else {
3911 		cpu_base = TX_CPU_BASE;
3912 		cpu_scratch_base = TX_CPU_SCRATCH_BASE;
3913 		cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
3914 	}
3915 
3916 	err = tg3_load_firmware_cpu(tp, cpu_base,
3917 				    cpu_scratch_base, cpu_scratch_size,
3918 				    fw_hdr);
3919 	if (err)
3920 		return err;
3921 
3922 	/* Now startup the cpu. */
3923 	err = tg3_pause_cpu_and_set_pc(tp, cpu_base,
3924 				       be32_to_cpu(fw_hdr->base_addr));
3925 	if (err) {
3926 		netdev_err(tp->dev,
3927 			   "%s fails to set CPU PC, is %08x should be %08x\n",
3928 			   __func__, tr32(cpu_base + CPU_PC),
3929 			   be32_to_cpu(fw_hdr->base_addr));
3930 		return -ENODEV;
3931 	}
3932 
3933 	tg3_resume_cpu(tp, cpu_base);
3934 	return 0;
3935 }
3936 
3937 
3938 /* tp->lock is held. */
3939 static void __tg3_set_mac_addr(struct tg3 *tp, bool skip_mac_1)
3940 {
3941 	u32 addr_high, addr_low;
3942 	int i;
3943 
3944 	addr_high = ((tp->dev->dev_addr[0] << 8) |
3945 		     tp->dev->dev_addr[1]);
3946 	addr_low = ((tp->dev->dev_addr[2] << 24) |
3947 		    (tp->dev->dev_addr[3] << 16) |
3948 		    (tp->dev->dev_addr[4] <<  8) |
3949 		    (tp->dev->dev_addr[5] <<  0));
3950 	for (i = 0; i < 4; i++) {
3951 		if (i == 1 && skip_mac_1)
3952 			continue;
3953 		tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
3954 		tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
3955 	}
3956 
3957 	if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
3958 	    tg3_asic_rev(tp) == ASIC_REV_5704) {
3959 		for (i = 0; i < 12; i++) {
3960 			tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
3961 			tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
3962 		}
3963 	}
3964 
3965 	addr_high = (tp->dev->dev_addr[0] +
3966 		     tp->dev->dev_addr[1] +
3967 		     tp->dev->dev_addr[2] +
3968 		     tp->dev->dev_addr[3] +
3969 		     tp->dev->dev_addr[4] +
3970 		     tp->dev->dev_addr[5]) &
3971 		TX_BACKOFF_SEED_MASK;
3972 	tw32(MAC_TX_BACKOFF_SEED, addr_high);
3973 }
3974 
3975 static void tg3_enable_register_access(struct tg3 *tp)
3976 {
3977 	/*
3978 	 * Make sure register accesses (indirect or otherwise) will function
3979 	 * correctly.
3980 	 */
3981 	pci_write_config_dword(tp->pdev,
3982 			       TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl);
3983 }
3984 
3985 static int tg3_power_up(struct tg3 *tp)
3986 {
3987 	int err;
3988 
3989 	tg3_enable_register_access(tp);
3990 
3991 	err = pci_set_power_state(tp->pdev, PCI_D0);
3992 	if (!err) {
3993 		/* Switch out of Vaux if it is a NIC */
3994 		tg3_pwrsrc_switch_to_vmain(tp);
3995 	} else {
3996 		netdev_err(tp->dev, "Transition to D0 failed\n");
3997 	}
3998 
3999 	return err;
4000 }
4001 
4002 static int tg3_setup_phy(struct tg3 *, bool);
4003 
4004 static int tg3_power_down_prepare(struct tg3 *tp)
4005 {
4006 	u32 misc_host_ctrl;
4007 	bool device_should_wake, do_low_power;
4008 
4009 	tg3_enable_register_access(tp);
4010 
4011 	/* Restore the CLKREQ setting. */
4012 	if (tg3_flag(tp, CLKREQ_BUG))
4013 		pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
4014 					 PCI_EXP_LNKCTL_CLKREQ_EN);
4015 
4016 	misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
4017 	tw32(TG3PCI_MISC_HOST_CTRL,
4018 	     misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
4019 
4020 	device_should_wake = device_may_wakeup(&tp->pdev->dev) &&
4021 			     tg3_flag(tp, WOL_ENABLE);
4022 
4023 	if (tg3_flag(tp, USE_PHYLIB)) {
4024 		do_low_power = false;
4025 		if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) &&
4026 		    !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4027 			struct phy_device *phydev;
4028 			u32 phyid, advertising;
4029 
4030 			phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
4031 
4032 			tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
4033 
4034 			tp->link_config.speed = phydev->speed;
4035 			tp->link_config.duplex = phydev->duplex;
4036 			tp->link_config.autoneg = phydev->autoneg;
4037 			tp->link_config.advertising = phydev->advertising;
4038 
4039 			advertising = ADVERTISED_TP |
4040 				      ADVERTISED_Pause |
4041 				      ADVERTISED_Autoneg |
4042 				      ADVERTISED_10baseT_Half;
4043 
4044 			if (tg3_flag(tp, ENABLE_ASF) || device_should_wake) {
4045 				if (tg3_flag(tp, WOL_SPEED_100MB))
4046 					advertising |=
4047 						ADVERTISED_100baseT_Half |
4048 						ADVERTISED_100baseT_Full |
4049 						ADVERTISED_10baseT_Full;
4050 				else
4051 					advertising |= ADVERTISED_10baseT_Full;
4052 			}
4053 
4054 			phydev->advertising = advertising;
4055 
4056 			phy_start_aneg(phydev);
4057 
4058 			phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
4059 			if (phyid != PHY_ID_BCMAC131) {
4060 				phyid &= PHY_BCM_OUI_MASK;
4061 				if (phyid == PHY_BCM_OUI_1 ||
4062 				    phyid == PHY_BCM_OUI_2 ||
4063 				    phyid == PHY_BCM_OUI_3)
4064 					do_low_power = true;
4065 			}
4066 		}
4067 	} else {
4068 		do_low_power = true;
4069 
4070 		if (!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER))
4071 			tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
4072 
4073 		if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
4074 			tg3_setup_phy(tp, false);
4075 	}
4076 
4077 	if (tg3_asic_rev(tp) == ASIC_REV_5906) {
4078 		u32 val;
4079 
4080 		val = tr32(GRC_VCPU_EXT_CTRL);
4081 		tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
4082 	} else if (!tg3_flag(tp, ENABLE_ASF)) {
4083 		int i;
4084 		u32 val;
4085 
4086 		for (i = 0; i < 200; i++) {
4087 			tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
4088 			if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
4089 				break;
4090 			msleep(1);
4091 		}
4092 	}
4093 	if (tg3_flag(tp, WOL_CAP))
4094 		tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
4095 						     WOL_DRV_STATE_SHUTDOWN |
4096 						     WOL_DRV_WOL |
4097 						     WOL_SET_MAGIC_PKT);
4098 
4099 	if (device_should_wake) {
4100 		u32 mac_mode;
4101 
4102 		if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
4103 			if (do_low_power &&
4104 			    !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
4105 				tg3_phy_auxctl_write(tp,
4106 					       MII_TG3_AUXCTL_SHDWSEL_PWRCTL,
4107 					       MII_TG3_AUXCTL_PCTL_WOL_EN |
4108 					       MII_TG3_AUXCTL_PCTL_100TX_LPWR |
4109 					       MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC);
4110 				udelay(40);
4111 			}
4112 
4113 			if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4114 				mac_mode = MAC_MODE_PORT_MODE_GMII;
4115 			else if (tp->phy_flags &
4116 				 TG3_PHYFLG_KEEP_LINK_ON_PWRDN) {
4117 				if (tp->link_config.active_speed == SPEED_1000)
4118 					mac_mode = MAC_MODE_PORT_MODE_GMII;
4119 				else
4120 					mac_mode = MAC_MODE_PORT_MODE_MII;
4121 			} else
4122 				mac_mode = MAC_MODE_PORT_MODE_MII;
4123 
4124 			mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
4125 			if (tg3_asic_rev(tp) == ASIC_REV_5700) {
4126 				u32 speed = tg3_flag(tp, WOL_SPEED_100MB) ?
4127 					     SPEED_100 : SPEED_10;
4128 				if (tg3_5700_link_polarity(tp, speed))
4129 					mac_mode |= MAC_MODE_LINK_POLARITY;
4130 				else
4131 					mac_mode &= ~MAC_MODE_LINK_POLARITY;
4132 			}
4133 		} else {
4134 			mac_mode = MAC_MODE_PORT_MODE_TBI;
4135 		}
4136 
4137 		if (!tg3_flag(tp, 5750_PLUS))
4138 			tw32(MAC_LED_CTRL, tp->led_ctrl);
4139 
4140 		mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
4141 		if ((tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) &&
4142 		    (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)))
4143 			mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
4144 
4145 		if (tg3_flag(tp, ENABLE_APE))
4146 			mac_mode |= MAC_MODE_APE_TX_EN |
4147 				    MAC_MODE_APE_RX_EN |
4148 				    MAC_MODE_TDE_ENABLE;
4149 
4150 		tw32_f(MAC_MODE, mac_mode);
4151 		udelay(100);
4152 
4153 		tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
4154 		udelay(10);
4155 	}
4156 
4157 	if (!tg3_flag(tp, WOL_SPEED_100MB) &&
4158 	    (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4159 	     tg3_asic_rev(tp) == ASIC_REV_5701)) {
4160 		u32 base_val;
4161 
4162 		base_val = tp->pci_clock_ctrl;
4163 		base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
4164 			     CLOCK_CTRL_TXCLK_DISABLE);
4165 
4166 		tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
4167 			    CLOCK_CTRL_PWRDOWN_PLL133, 40);
4168 	} else if (tg3_flag(tp, 5780_CLASS) ||
4169 		   tg3_flag(tp, CPMU_PRESENT) ||
4170 		   tg3_asic_rev(tp) == ASIC_REV_5906) {
4171 		/* do nothing */
4172 	} else if (!(tg3_flag(tp, 5750_PLUS) && tg3_flag(tp, ENABLE_ASF))) {
4173 		u32 newbits1, newbits2;
4174 
4175 		if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4176 		    tg3_asic_rev(tp) == ASIC_REV_5701) {
4177 			newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
4178 				    CLOCK_CTRL_TXCLK_DISABLE |
4179 				    CLOCK_CTRL_ALTCLK);
4180 			newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
4181 		} else if (tg3_flag(tp, 5705_PLUS)) {
4182 			newbits1 = CLOCK_CTRL_625_CORE;
4183 			newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
4184 		} else {
4185 			newbits1 = CLOCK_CTRL_ALTCLK;
4186 			newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
4187 		}
4188 
4189 		tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
4190 			    40);
4191 
4192 		tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
4193 			    40);
4194 
4195 		if (!tg3_flag(tp, 5705_PLUS)) {
4196 			u32 newbits3;
4197 
4198 			if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4199 			    tg3_asic_rev(tp) == ASIC_REV_5701) {
4200 				newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
4201 					    CLOCK_CTRL_TXCLK_DISABLE |
4202 					    CLOCK_CTRL_44MHZ_CORE);
4203 			} else {
4204 				newbits3 = CLOCK_CTRL_44MHZ_CORE;
4205 			}
4206 
4207 			tw32_wait_f(TG3PCI_CLOCK_CTRL,
4208 				    tp->pci_clock_ctrl | newbits3, 40);
4209 		}
4210 	}
4211 
4212 	if (!(device_should_wake) && !tg3_flag(tp, ENABLE_ASF))
4213 		tg3_power_down_phy(tp, do_low_power);
4214 
4215 	tg3_frob_aux_power(tp, true);
4216 
4217 	/* Workaround for unstable PLL clock */
4218 	if ((!tg3_flag(tp, IS_SSB_CORE)) &&
4219 	    ((tg3_chip_rev(tp) == CHIPREV_5750_AX) ||
4220 	     (tg3_chip_rev(tp) == CHIPREV_5750_BX))) {
4221 		u32 val = tr32(0x7d00);
4222 
4223 		val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
4224 		tw32(0x7d00, val);
4225 		if (!tg3_flag(tp, ENABLE_ASF)) {
4226 			int err;
4227 
4228 			err = tg3_nvram_lock(tp);
4229 			tg3_halt_cpu(tp, RX_CPU_BASE);
4230 			if (!err)
4231 				tg3_nvram_unlock(tp);
4232 		}
4233 	}
4234 
4235 	tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
4236 
4237 	tg3_ape_driver_state_change(tp, RESET_KIND_SHUTDOWN);
4238 
4239 	return 0;
4240 }
4241 
4242 static void tg3_power_down(struct tg3 *tp)
4243 {
4244 	pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE));
4245 	pci_set_power_state(tp->pdev, PCI_D3hot);
4246 }
4247 
4248 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
4249 {
4250 	switch (val & MII_TG3_AUX_STAT_SPDMASK) {
4251 	case MII_TG3_AUX_STAT_10HALF:
4252 		*speed = SPEED_10;
4253 		*duplex = DUPLEX_HALF;
4254 		break;
4255 
4256 	case MII_TG3_AUX_STAT_10FULL:
4257 		*speed = SPEED_10;
4258 		*duplex = DUPLEX_FULL;
4259 		break;
4260 
4261 	case MII_TG3_AUX_STAT_100HALF:
4262 		*speed = SPEED_100;
4263 		*duplex = DUPLEX_HALF;
4264 		break;
4265 
4266 	case MII_TG3_AUX_STAT_100FULL:
4267 		*speed = SPEED_100;
4268 		*duplex = DUPLEX_FULL;
4269 		break;
4270 
4271 	case MII_TG3_AUX_STAT_1000HALF:
4272 		*speed = SPEED_1000;
4273 		*duplex = DUPLEX_HALF;
4274 		break;
4275 
4276 	case MII_TG3_AUX_STAT_1000FULL:
4277 		*speed = SPEED_1000;
4278 		*duplex = DUPLEX_FULL;
4279 		break;
4280 
4281 	default:
4282 		if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4283 			*speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
4284 				 SPEED_10;
4285 			*duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
4286 				  DUPLEX_HALF;
4287 			break;
4288 		}
4289 		*speed = SPEED_UNKNOWN;
4290 		*duplex = DUPLEX_UNKNOWN;
4291 		break;
4292 	}
4293 }
4294 
4295 static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
4296 {
4297 	int err = 0;
4298 	u32 val, new_adv;
4299 
4300 	new_adv = ADVERTISE_CSMA;
4301 	new_adv |= ethtool_adv_to_mii_adv_t(advertise) & ADVERTISE_ALL;
4302 	new_adv |= mii_advertise_flowctrl(flowctrl);
4303 
4304 	err = tg3_writephy(tp, MII_ADVERTISE, new_adv);
4305 	if (err)
4306 		goto done;
4307 
4308 	if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4309 		new_adv = ethtool_adv_to_mii_ctrl1000_t(advertise);
4310 
4311 		if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4312 		    tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0)
4313 			new_adv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4314 
4315 		err = tg3_writephy(tp, MII_CTRL1000, new_adv);
4316 		if (err)
4317 			goto done;
4318 	}
4319 
4320 	if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
4321 		goto done;
4322 
4323 	tw32(TG3_CPMU_EEE_MODE,
4324 	     tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE);
4325 
4326 	err = tg3_phy_toggle_auxctl_smdsp(tp, true);
4327 	if (!err) {
4328 		u32 err2;
4329 
4330 		val = 0;
4331 		/* Advertise 100-BaseTX EEE ability */
4332 		if (advertise & ADVERTISED_100baseT_Full)
4333 			val |= MDIO_AN_EEE_ADV_100TX;
4334 		/* Advertise 1000-BaseT EEE ability */
4335 		if (advertise & ADVERTISED_1000baseT_Full)
4336 			val |= MDIO_AN_EEE_ADV_1000T;
4337 
4338 		if (!tp->eee.eee_enabled) {
4339 			val = 0;
4340 			tp->eee.advertised = 0;
4341 		} else {
4342 			tp->eee.advertised = advertise &
4343 					     (ADVERTISED_100baseT_Full |
4344 					      ADVERTISED_1000baseT_Full);
4345 		}
4346 
4347 		err = tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val);
4348 		if (err)
4349 			val = 0;
4350 
4351 		switch (tg3_asic_rev(tp)) {
4352 		case ASIC_REV_5717:
4353 		case ASIC_REV_57765:
4354 		case ASIC_REV_57766:
4355 		case ASIC_REV_5719:
4356 			/* If we advertised any eee advertisements above... */
4357 			if (val)
4358 				val = MII_TG3_DSP_TAP26_ALNOKO |
4359 				      MII_TG3_DSP_TAP26_RMRXSTO |
4360 				      MII_TG3_DSP_TAP26_OPCSINPT;
4361 			tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
4362 			/* Fall through */
4363 		case ASIC_REV_5720:
4364 		case ASIC_REV_5762:
4365 			if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val))
4366 				tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val |
4367 						 MII_TG3_DSP_CH34TP2_HIBW01);
4368 		}
4369 
4370 		err2 = tg3_phy_toggle_auxctl_smdsp(tp, false);
4371 		if (!err)
4372 			err = err2;
4373 	}
4374 
4375 done:
4376 	return err;
4377 }
4378 
4379 static void tg3_phy_copper_begin(struct tg3 *tp)
4380 {
4381 	if (tp->link_config.autoneg == AUTONEG_ENABLE ||
4382 	    (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4383 		u32 adv, fc;
4384 
4385 		if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
4386 		    !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)) {
4387 			adv = ADVERTISED_10baseT_Half |
4388 			      ADVERTISED_10baseT_Full;
4389 			if (tg3_flag(tp, WOL_SPEED_100MB))
4390 				adv |= ADVERTISED_100baseT_Half |
4391 				       ADVERTISED_100baseT_Full;
4392 			if (tp->phy_flags & TG3_PHYFLG_1G_ON_VAUX_OK)
4393 				adv |= ADVERTISED_1000baseT_Half |
4394 				       ADVERTISED_1000baseT_Full;
4395 
4396 			fc = FLOW_CTRL_TX | FLOW_CTRL_RX;
4397 		} else {
4398 			adv = tp->link_config.advertising;
4399 			if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
4400 				adv &= ~(ADVERTISED_1000baseT_Half |
4401 					 ADVERTISED_1000baseT_Full);
4402 
4403 			fc = tp->link_config.flowctrl;
4404 		}
4405 
4406 		tg3_phy_autoneg_cfg(tp, adv, fc);
4407 
4408 		if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
4409 		    (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)) {
4410 			/* Normally during power down we want to autonegotiate
4411 			 * the lowest possible speed for WOL. However, to avoid
4412 			 * link flap, we leave it untouched.
4413 			 */
4414 			return;
4415 		}
4416 
4417 		tg3_writephy(tp, MII_BMCR,
4418 			     BMCR_ANENABLE | BMCR_ANRESTART);
4419 	} else {
4420 		int i;
4421 		u32 bmcr, orig_bmcr;
4422 
4423 		tp->link_config.active_speed = tp->link_config.speed;
4424 		tp->link_config.active_duplex = tp->link_config.duplex;
4425 
4426 		if (tg3_asic_rev(tp) == ASIC_REV_5714) {
4427 			/* With autoneg disabled, 5715 only links up when the
4428 			 * advertisement register has the configured speed
4429 			 * enabled.
4430 			 */
4431 			tg3_writephy(tp, MII_ADVERTISE, ADVERTISE_ALL);
4432 		}
4433 
4434 		bmcr = 0;
4435 		switch (tp->link_config.speed) {
4436 		default:
4437 		case SPEED_10:
4438 			break;
4439 
4440 		case SPEED_100:
4441 			bmcr |= BMCR_SPEED100;
4442 			break;
4443 
4444 		case SPEED_1000:
4445 			bmcr |= BMCR_SPEED1000;
4446 			break;
4447 		}
4448 
4449 		if (tp->link_config.duplex == DUPLEX_FULL)
4450 			bmcr |= BMCR_FULLDPLX;
4451 
4452 		if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
4453 		    (bmcr != orig_bmcr)) {
4454 			tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
4455 			for (i = 0; i < 1500; i++) {
4456 				u32 tmp;
4457 
4458 				udelay(10);
4459 				if (tg3_readphy(tp, MII_BMSR, &tmp) ||
4460 				    tg3_readphy(tp, MII_BMSR, &tmp))
4461 					continue;
4462 				if (!(tmp & BMSR_LSTATUS)) {
4463 					udelay(40);
4464 					break;
4465 				}
4466 			}
4467 			tg3_writephy(tp, MII_BMCR, bmcr);
4468 			udelay(40);
4469 		}
4470 	}
4471 }
4472 
4473 static int tg3_phy_pull_config(struct tg3 *tp)
4474 {
4475 	int err;
4476 	u32 val;
4477 
4478 	err = tg3_readphy(tp, MII_BMCR, &val);
4479 	if (err)
4480 		goto done;
4481 
4482 	if (!(val & BMCR_ANENABLE)) {
4483 		tp->link_config.autoneg = AUTONEG_DISABLE;
4484 		tp->link_config.advertising = 0;
4485 		tg3_flag_clear(tp, PAUSE_AUTONEG);
4486 
4487 		err = -EIO;
4488 
4489 		switch (val & (BMCR_SPEED1000 | BMCR_SPEED100)) {
4490 		case 0:
4491 			if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
4492 				goto done;
4493 
4494 			tp->link_config.speed = SPEED_10;
4495 			break;
4496 		case BMCR_SPEED100:
4497 			if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
4498 				goto done;
4499 
4500 			tp->link_config.speed = SPEED_100;
4501 			break;
4502 		case BMCR_SPEED1000:
4503 			if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4504 				tp->link_config.speed = SPEED_1000;
4505 				break;
4506 			}
4507 			/* Fall through */
4508 		default:
4509 			goto done;
4510 		}
4511 
4512 		if (val & BMCR_FULLDPLX)
4513 			tp->link_config.duplex = DUPLEX_FULL;
4514 		else
4515 			tp->link_config.duplex = DUPLEX_HALF;
4516 
4517 		tp->link_config.flowctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
4518 
4519 		err = 0;
4520 		goto done;
4521 	}
4522 
4523 	tp->link_config.autoneg = AUTONEG_ENABLE;
4524 	tp->link_config.advertising = ADVERTISED_Autoneg;
4525 	tg3_flag_set(tp, PAUSE_AUTONEG);
4526 
4527 	if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
4528 		u32 adv;
4529 
4530 		err = tg3_readphy(tp, MII_ADVERTISE, &val);
4531 		if (err)
4532 			goto done;
4533 
4534 		adv = mii_adv_to_ethtool_adv_t(val & ADVERTISE_ALL);
4535 		tp->link_config.advertising |= adv | ADVERTISED_TP;
4536 
4537 		tp->link_config.flowctrl = tg3_decode_flowctrl_1000T(val);
4538 	} else {
4539 		tp->link_config.advertising |= ADVERTISED_FIBRE;
4540 	}
4541 
4542 	if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4543 		u32 adv;
4544 
4545 		if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
4546 			err = tg3_readphy(tp, MII_CTRL1000, &val);
4547 			if (err)
4548 				goto done;
4549 
4550 			adv = mii_ctrl1000_to_ethtool_adv_t(val);
4551 		} else {
4552 			err = tg3_readphy(tp, MII_ADVERTISE, &val);
4553 			if (err)
4554 				goto done;
4555 
4556 			adv = tg3_decode_flowctrl_1000X(val);
4557 			tp->link_config.flowctrl = adv;
4558 
4559 			val &= (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL);
4560 			adv = mii_adv_to_ethtool_adv_x(val);
4561 		}
4562 
4563 		tp->link_config.advertising |= adv;
4564 	}
4565 
4566 done:
4567 	return err;
4568 }
4569 
4570 static int tg3_init_5401phy_dsp(struct tg3 *tp)
4571 {
4572 	int err;
4573 
4574 	/* Turn off tap power management. */
4575 	/* Set Extended packet length bit */
4576 	err = tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
4577 
4578 	err |= tg3_phydsp_write(tp, 0x0012, 0x1804);
4579 	err |= tg3_phydsp_write(tp, 0x0013, 0x1204);
4580 	err |= tg3_phydsp_write(tp, 0x8006, 0x0132);
4581 	err |= tg3_phydsp_write(tp, 0x8006, 0x0232);
4582 	err |= tg3_phydsp_write(tp, 0x201f, 0x0a20);
4583 
4584 	udelay(40);
4585 
4586 	return err;
4587 }
4588 
4589 static bool tg3_phy_eee_config_ok(struct tg3 *tp)
4590 {
4591 	struct ethtool_eee eee;
4592 
4593 	if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
4594 		return true;
4595 
4596 	tg3_eee_pull_config(tp, &eee);
4597 
4598 	if (tp->eee.eee_enabled) {
4599 		if (tp->eee.advertised != eee.advertised ||
4600 		    tp->eee.tx_lpi_timer != eee.tx_lpi_timer ||
4601 		    tp->eee.tx_lpi_enabled != eee.tx_lpi_enabled)
4602 			return false;
4603 	} else {
4604 		/* EEE is disabled but we're advertising */
4605 		if (eee.advertised)
4606 			return false;
4607 	}
4608 
4609 	return true;
4610 }
4611 
4612 static bool tg3_phy_copper_an_config_ok(struct tg3 *tp, u32 *lcladv)
4613 {
4614 	u32 advmsk, tgtadv, advertising;
4615 
4616 	advertising = tp->link_config.advertising;
4617 	tgtadv = ethtool_adv_to_mii_adv_t(advertising) & ADVERTISE_ALL;
4618 
4619 	advmsk = ADVERTISE_ALL;
4620 	if (tp->link_config.active_duplex == DUPLEX_FULL) {
4621 		tgtadv |= mii_advertise_flowctrl(tp->link_config.flowctrl);
4622 		advmsk |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
4623 	}
4624 
4625 	if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
4626 		return false;
4627 
4628 	if ((*lcladv & advmsk) != tgtadv)
4629 		return false;
4630 
4631 	if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4632 		u32 tg3_ctrl;
4633 
4634 		tgtadv = ethtool_adv_to_mii_ctrl1000_t(advertising);
4635 
4636 		if (tg3_readphy(tp, MII_CTRL1000, &tg3_ctrl))
4637 			return false;
4638 
4639 		if (tgtadv &&
4640 		    (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4641 		     tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0)) {
4642 			tgtadv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4643 			tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL |
4644 				     CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
4645 		} else {
4646 			tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL);
4647 		}
4648 
4649 		if (tg3_ctrl != tgtadv)
4650 			return false;
4651 	}
4652 
4653 	return true;
4654 }
4655 
4656 static bool tg3_phy_copper_fetch_rmtadv(struct tg3 *tp, u32 *rmtadv)
4657 {
4658 	u32 lpeth = 0;
4659 
4660 	if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4661 		u32 val;
4662 
4663 		if (tg3_readphy(tp, MII_STAT1000, &val))
4664 			return false;
4665 
4666 		lpeth = mii_stat1000_to_ethtool_lpa_t(val);
4667 	}
4668 
4669 	if (tg3_readphy(tp, MII_LPA, rmtadv))
4670 		return false;
4671 
4672 	lpeth |= mii_lpa_to_ethtool_lpa_t(*rmtadv);
4673 	tp->link_config.rmt_adv = lpeth;
4674 
4675 	return true;
4676 }
4677 
4678 static bool tg3_test_and_report_link_chg(struct tg3 *tp, bool curr_link_up)
4679 {
4680 	if (curr_link_up != tp->link_up) {
4681 		if (curr_link_up) {
4682 			netif_carrier_on(tp->dev);
4683 		} else {
4684 			netif_carrier_off(tp->dev);
4685 			if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4686 				tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4687 		}
4688 
4689 		tg3_link_report(tp);
4690 		return true;
4691 	}
4692 
4693 	return false;
4694 }
4695 
4696 static void tg3_clear_mac_status(struct tg3 *tp)
4697 {
4698 	tw32(MAC_EVENT, 0);
4699 
4700 	tw32_f(MAC_STATUS,
4701 	       MAC_STATUS_SYNC_CHANGED |
4702 	       MAC_STATUS_CFG_CHANGED |
4703 	       MAC_STATUS_MI_COMPLETION |
4704 	       MAC_STATUS_LNKSTATE_CHANGED);
4705 	udelay(40);
4706 }
4707 
4708 static void tg3_setup_eee(struct tg3 *tp)
4709 {
4710 	u32 val;
4711 
4712 	val = TG3_CPMU_EEE_LNKIDL_PCIE_NL0 |
4713 	      TG3_CPMU_EEE_LNKIDL_UART_IDL;
4714 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0)
4715 		val |= TG3_CPMU_EEE_LNKIDL_APE_TX_MT;
4716 
4717 	tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL, val);
4718 
4719 	tw32_f(TG3_CPMU_EEE_CTRL,
4720 	       TG3_CPMU_EEE_CTRL_EXIT_20_1_US);
4721 
4722 	val = TG3_CPMU_EEEMD_ERLY_L1_XIT_DET |
4723 	      (tp->eee.tx_lpi_enabled ? TG3_CPMU_EEEMD_LPI_IN_TX : 0) |
4724 	      TG3_CPMU_EEEMD_LPI_IN_RX |
4725 	      TG3_CPMU_EEEMD_EEE_ENABLE;
4726 
4727 	if (tg3_asic_rev(tp) != ASIC_REV_5717)
4728 		val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN;
4729 
4730 	if (tg3_flag(tp, ENABLE_APE))
4731 		val |= TG3_CPMU_EEEMD_APE_TX_DET_EN;
4732 
4733 	tw32_f(TG3_CPMU_EEE_MODE, tp->eee.eee_enabled ? val : 0);
4734 
4735 	tw32_f(TG3_CPMU_EEE_DBTMR1,
4736 	       TG3_CPMU_DBTMR1_PCIEXIT_2047US |
4737 	       (tp->eee.tx_lpi_timer & 0xffff));
4738 
4739 	tw32_f(TG3_CPMU_EEE_DBTMR2,
4740 	       TG3_CPMU_DBTMR2_APE_TX_2047US |
4741 	       TG3_CPMU_DBTMR2_TXIDXEQ_2047US);
4742 }
4743 
4744 static int tg3_setup_copper_phy(struct tg3 *tp, bool force_reset)
4745 {
4746 	bool current_link_up;
4747 	u32 bmsr, val;
4748 	u32 lcl_adv, rmt_adv;
4749 	u16 current_speed;
4750 	u8 current_duplex;
4751 	int i, err;
4752 
4753 	tg3_clear_mac_status(tp);
4754 
4755 	if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
4756 		tw32_f(MAC_MI_MODE,
4757 		     (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
4758 		udelay(80);
4759 	}
4760 
4761 	tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, 0);
4762 
4763 	/* Some third-party PHYs need to be reset on link going
4764 	 * down.
4765 	 */
4766 	if ((tg3_asic_rev(tp) == ASIC_REV_5703 ||
4767 	     tg3_asic_rev(tp) == ASIC_REV_5704 ||
4768 	     tg3_asic_rev(tp) == ASIC_REV_5705) &&
4769 	    tp->link_up) {
4770 		tg3_readphy(tp, MII_BMSR, &bmsr);
4771 		if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4772 		    !(bmsr & BMSR_LSTATUS))
4773 			force_reset = true;
4774 	}
4775 	if (force_reset)
4776 		tg3_phy_reset(tp);
4777 
4778 	if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
4779 		tg3_readphy(tp, MII_BMSR, &bmsr);
4780 		if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
4781 		    !tg3_flag(tp, INIT_COMPLETE))
4782 			bmsr = 0;
4783 
4784 		if (!(bmsr & BMSR_LSTATUS)) {
4785 			err = tg3_init_5401phy_dsp(tp);
4786 			if (err)
4787 				return err;
4788 
4789 			tg3_readphy(tp, MII_BMSR, &bmsr);
4790 			for (i = 0; i < 1000; i++) {
4791 				udelay(10);
4792 				if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4793 				    (bmsr & BMSR_LSTATUS)) {
4794 					udelay(40);
4795 					break;
4796 				}
4797 			}
4798 
4799 			if ((tp->phy_id & TG3_PHY_ID_REV_MASK) ==
4800 			    TG3_PHY_REV_BCM5401_B0 &&
4801 			    !(bmsr & BMSR_LSTATUS) &&
4802 			    tp->link_config.active_speed == SPEED_1000) {
4803 				err = tg3_phy_reset(tp);
4804 				if (!err)
4805 					err = tg3_init_5401phy_dsp(tp);
4806 				if (err)
4807 					return err;
4808 			}
4809 		}
4810 	} else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4811 		   tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0) {
4812 		/* 5701 {A0,B0} CRC bug workaround */
4813 		tg3_writephy(tp, 0x15, 0x0a75);
4814 		tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4815 		tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
4816 		tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4817 	}
4818 
4819 	/* Clear pending interrupts... */
4820 	tg3_readphy(tp, MII_TG3_ISTAT, &val);
4821 	tg3_readphy(tp, MII_TG3_ISTAT, &val);
4822 
4823 	if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT)
4824 		tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
4825 	else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET))
4826 		tg3_writephy(tp, MII_TG3_IMASK, ~0);
4827 
4828 	if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4829 	    tg3_asic_rev(tp) == ASIC_REV_5701) {
4830 		if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
4831 			tg3_writephy(tp, MII_TG3_EXT_CTRL,
4832 				     MII_TG3_EXT_CTRL_LNK3_LED_MODE);
4833 		else
4834 			tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
4835 	}
4836 
4837 	current_link_up = false;
4838 	current_speed = SPEED_UNKNOWN;
4839 	current_duplex = DUPLEX_UNKNOWN;
4840 	tp->phy_flags &= ~TG3_PHYFLG_MDIX_STATE;
4841 	tp->link_config.rmt_adv = 0;
4842 
4843 	if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) {
4844 		err = tg3_phy_auxctl_read(tp,
4845 					  MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4846 					  &val);
4847 		if (!err && !(val & (1 << 10))) {
4848 			tg3_phy_auxctl_write(tp,
4849 					     MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4850 					     val | (1 << 10));
4851 			goto relink;
4852 		}
4853 	}
4854 
4855 	bmsr = 0;
4856 	for (i = 0; i < 100; i++) {
4857 		tg3_readphy(tp, MII_BMSR, &bmsr);
4858 		if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4859 		    (bmsr & BMSR_LSTATUS))
4860 			break;
4861 		udelay(40);
4862 	}
4863 
4864 	if (bmsr & BMSR_LSTATUS) {
4865 		u32 aux_stat, bmcr;
4866 
4867 		tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
4868 		for (i = 0; i < 2000; i++) {
4869 			udelay(10);
4870 			if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
4871 			    aux_stat)
4872 				break;
4873 		}
4874 
4875 		tg3_aux_stat_to_speed_duplex(tp, aux_stat,
4876 					     &current_speed,
4877 					     &current_duplex);
4878 
4879 		bmcr = 0;
4880 		for (i = 0; i < 200; i++) {
4881 			tg3_readphy(tp, MII_BMCR, &bmcr);
4882 			if (tg3_readphy(tp, MII_BMCR, &bmcr))
4883 				continue;
4884 			if (bmcr && bmcr != 0x7fff)
4885 				break;
4886 			udelay(10);
4887 		}
4888 
4889 		lcl_adv = 0;
4890 		rmt_adv = 0;
4891 
4892 		tp->link_config.active_speed = current_speed;
4893 		tp->link_config.active_duplex = current_duplex;
4894 
4895 		if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4896 			bool eee_config_ok = tg3_phy_eee_config_ok(tp);
4897 
4898 			if ((bmcr & BMCR_ANENABLE) &&
4899 			    eee_config_ok &&
4900 			    tg3_phy_copper_an_config_ok(tp, &lcl_adv) &&
4901 			    tg3_phy_copper_fetch_rmtadv(tp, &rmt_adv))
4902 				current_link_up = true;
4903 
4904 			/* EEE settings changes take effect only after a phy
4905 			 * reset.  If we have skipped a reset due to Link Flap
4906 			 * Avoidance being enabled, do it now.
4907 			 */
4908 			if (!eee_config_ok &&
4909 			    (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
4910 			    !force_reset) {
4911 				tg3_setup_eee(tp);
4912 				tg3_phy_reset(tp);
4913 			}
4914 		} else {
4915 			if (!(bmcr & BMCR_ANENABLE) &&
4916 			    tp->link_config.speed == current_speed &&
4917 			    tp->link_config.duplex == current_duplex) {
4918 				current_link_up = true;
4919 			}
4920 		}
4921 
4922 		if (current_link_up &&
4923 		    tp->link_config.active_duplex == DUPLEX_FULL) {
4924 			u32 reg, bit;
4925 
4926 			if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4927 				reg = MII_TG3_FET_GEN_STAT;
4928 				bit = MII_TG3_FET_GEN_STAT_MDIXSTAT;
4929 			} else {
4930 				reg = MII_TG3_EXT_STAT;
4931 				bit = MII_TG3_EXT_STAT_MDIX;
4932 			}
4933 
4934 			if (!tg3_readphy(tp, reg, &val) && (val & bit))
4935 				tp->phy_flags |= TG3_PHYFLG_MDIX_STATE;
4936 
4937 			tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
4938 		}
4939 	}
4940 
4941 relink:
4942 	if (!current_link_up || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4943 		tg3_phy_copper_begin(tp);
4944 
4945 		if (tg3_flag(tp, ROBOSWITCH)) {
4946 			current_link_up = true;
4947 			/* FIXME: when BCM5325 switch is used use 100 MBit/s */
4948 			current_speed = SPEED_1000;
4949 			current_duplex = DUPLEX_FULL;
4950 			tp->link_config.active_speed = current_speed;
4951 			tp->link_config.active_duplex = current_duplex;
4952 		}
4953 
4954 		tg3_readphy(tp, MII_BMSR, &bmsr);
4955 		if ((!tg3_readphy(tp, MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) ||
4956 		    (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
4957 			current_link_up = true;
4958 	}
4959 
4960 	tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
4961 	if (current_link_up) {
4962 		if (tp->link_config.active_speed == SPEED_100 ||
4963 		    tp->link_config.active_speed == SPEED_10)
4964 			tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4965 		else
4966 			tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4967 	} else if (tp->phy_flags & TG3_PHYFLG_IS_FET)
4968 		tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4969 	else
4970 		tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4971 
4972 	/* In order for the 5750 core in BCM4785 chip to work properly
4973 	 * in RGMII mode, the Led Control Register must be set up.
4974 	 */
4975 	if (tg3_flag(tp, RGMII_MODE)) {
4976 		u32 led_ctrl = tr32(MAC_LED_CTRL);
4977 		led_ctrl &= ~(LED_CTRL_1000MBPS_ON | LED_CTRL_100MBPS_ON);
4978 
4979 		if (tp->link_config.active_speed == SPEED_10)
4980 			led_ctrl |= LED_CTRL_LNKLED_OVERRIDE;
4981 		else if (tp->link_config.active_speed == SPEED_100)
4982 			led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE |
4983 				     LED_CTRL_100MBPS_ON);
4984 		else if (tp->link_config.active_speed == SPEED_1000)
4985 			led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE |
4986 				     LED_CTRL_1000MBPS_ON);
4987 
4988 		tw32(MAC_LED_CTRL, led_ctrl);
4989 		udelay(40);
4990 	}
4991 
4992 	tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
4993 	if (tp->link_config.active_duplex == DUPLEX_HALF)
4994 		tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
4995 
4996 	if (tg3_asic_rev(tp) == ASIC_REV_5700) {
4997 		if (current_link_up &&
4998 		    tg3_5700_link_polarity(tp, tp->link_config.active_speed))
4999 			tp->mac_mode |= MAC_MODE_LINK_POLARITY;
5000 		else
5001 			tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
5002 	}
5003 
5004 	/* ??? Without this setting Netgear GA302T PHY does not
5005 	 * ??? send/receive packets...
5006 	 */
5007 	if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 &&
5008 	    tg3_chip_rev_id(tp) == CHIPREV_ID_5700_ALTIMA) {
5009 		tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
5010 		tw32_f(MAC_MI_MODE, tp->mi_mode);
5011 		udelay(80);
5012 	}
5013 
5014 	tw32_f(MAC_MODE, tp->mac_mode);
5015 	udelay(40);
5016 
5017 	tg3_phy_eee_adjust(tp, current_link_up);
5018 
5019 	if (tg3_flag(tp, USE_LINKCHG_REG)) {
5020 		/* Polled via timer. */
5021 		tw32_f(MAC_EVENT, 0);
5022 	} else {
5023 		tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5024 	}
5025 	udelay(40);
5026 
5027 	if (tg3_asic_rev(tp) == ASIC_REV_5700 &&
5028 	    current_link_up &&
5029 	    tp->link_config.active_speed == SPEED_1000 &&
5030 	    (tg3_flag(tp, PCIX_MODE) || tg3_flag(tp, PCI_HIGH_SPEED))) {
5031 		udelay(120);
5032 		tw32_f(MAC_STATUS,
5033 		     (MAC_STATUS_SYNC_CHANGED |
5034 		      MAC_STATUS_CFG_CHANGED));
5035 		udelay(40);
5036 		tg3_write_mem(tp,
5037 			      NIC_SRAM_FIRMWARE_MBOX,
5038 			      NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
5039 	}
5040 
5041 	/* Prevent send BD corruption. */
5042 	if (tg3_flag(tp, CLKREQ_BUG)) {
5043 		if (tp->link_config.active_speed == SPEED_100 ||
5044 		    tp->link_config.active_speed == SPEED_10)
5045 			pcie_capability_clear_word(tp->pdev, PCI_EXP_LNKCTL,
5046 						   PCI_EXP_LNKCTL_CLKREQ_EN);
5047 		else
5048 			pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
5049 						 PCI_EXP_LNKCTL_CLKREQ_EN);
5050 	}
5051 
5052 	tg3_test_and_report_link_chg(tp, current_link_up);
5053 
5054 	return 0;
5055 }
5056 
5057 struct tg3_fiber_aneginfo {
5058 	int state;
5059 #define ANEG_STATE_UNKNOWN		0
5060 #define ANEG_STATE_AN_ENABLE		1
5061 #define ANEG_STATE_RESTART_INIT		2
5062 #define ANEG_STATE_RESTART		3
5063 #define ANEG_STATE_DISABLE_LINK_OK	4
5064 #define ANEG_STATE_ABILITY_DETECT_INIT	5
5065 #define ANEG_STATE_ABILITY_DETECT	6
5066 #define ANEG_STATE_ACK_DETECT_INIT	7
5067 #define ANEG_STATE_ACK_DETECT		8
5068 #define ANEG_STATE_COMPLETE_ACK_INIT	9
5069 #define ANEG_STATE_COMPLETE_ACK		10
5070 #define ANEG_STATE_IDLE_DETECT_INIT	11
5071 #define ANEG_STATE_IDLE_DETECT		12
5072 #define ANEG_STATE_LINK_OK		13
5073 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT	14
5074 #define ANEG_STATE_NEXT_PAGE_WAIT	15
5075 
5076 	u32 flags;
5077 #define MR_AN_ENABLE		0x00000001
5078 #define MR_RESTART_AN		0x00000002
5079 #define MR_AN_COMPLETE		0x00000004
5080 #define MR_PAGE_RX		0x00000008
5081 #define MR_NP_LOADED		0x00000010
5082 #define MR_TOGGLE_TX		0x00000020
5083 #define MR_LP_ADV_FULL_DUPLEX	0x00000040
5084 #define MR_LP_ADV_HALF_DUPLEX	0x00000080
5085 #define MR_LP_ADV_SYM_PAUSE	0x00000100
5086 #define MR_LP_ADV_ASYM_PAUSE	0x00000200
5087 #define MR_LP_ADV_REMOTE_FAULT1	0x00000400
5088 #define MR_LP_ADV_REMOTE_FAULT2	0x00000800
5089 #define MR_LP_ADV_NEXT_PAGE	0x00001000
5090 #define MR_TOGGLE_RX		0x00002000
5091 #define MR_NP_RX		0x00004000
5092 
5093 #define MR_LINK_OK		0x80000000
5094 
5095 	unsigned long link_time, cur_time;
5096 
5097 	u32 ability_match_cfg;
5098 	int ability_match_count;
5099 
5100 	char ability_match, idle_match, ack_match;
5101 
5102 	u32 txconfig, rxconfig;
5103 #define ANEG_CFG_NP		0x00000080
5104 #define ANEG_CFG_ACK		0x00000040
5105 #define ANEG_CFG_RF2		0x00000020
5106 #define ANEG_CFG_RF1		0x00000010
5107 #define ANEG_CFG_PS2		0x00000001
5108 #define ANEG_CFG_PS1		0x00008000
5109 #define ANEG_CFG_HD		0x00004000
5110 #define ANEG_CFG_FD		0x00002000
5111 #define ANEG_CFG_INVAL		0x00001f06
5112 
5113 };
5114 #define ANEG_OK		0
5115 #define ANEG_DONE	1
5116 #define ANEG_TIMER_ENAB	2
5117 #define ANEG_FAILED	-1
5118 
5119 #define ANEG_STATE_SETTLE_TIME	10000
5120 
5121 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
5122 				   struct tg3_fiber_aneginfo *ap)
5123 {
5124 	u16 flowctrl;
5125 	unsigned long delta;
5126 	u32 rx_cfg_reg;
5127 	int ret;
5128 
5129 	if (ap->state == ANEG_STATE_UNKNOWN) {
5130 		ap->rxconfig = 0;
5131 		ap->link_time = 0;
5132 		ap->cur_time = 0;
5133 		ap->ability_match_cfg = 0;
5134 		ap->ability_match_count = 0;
5135 		ap->ability_match = 0;
5136 		ap->idle_match = 0;
5137 		ap->ack_match = 0;
5138 	}
5139 	ap->cur_time++;
5140 
5141 	if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
5142 		rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
5143 
5144 		if (rx_cfg_reg != ap->ability_match_cfg) {
5145 			ap->ability_match_cfg = rx_cfg_reg;
5146 			ap->ability_match = 0;
5147 			ap->ability_match_count = 0;
5148 		} else {
5149 			if (++ap->ability_match_count > 1) {
5150 				ap->ability_match = 1;
5151 				ap->ability_match_cfg = rx_cfg_reg;
5152 			}
5153 		}
5154 		if (rx_cfg_reg & ANEG_CFG_ACK)
5155 			ap->ack_match = 1;
5156 		else
5157 			ap->ack_match = 0;
5158 
5159 		ap->idle_match = 0;
5160 	} else {
5161 		ap->idle_match = 1;
5162 		ap->ability_match_cfg = 0;
5163 		ap->ability_match_count = 0;
5164 		ap->ability_match = 0;
5165 		ap->ack_match = 0;
5166 
5167 		rx_cfg_reg = 0;
5168 	}
5169 
5170 	ap->rxconfig = rx_cfg_reg;
5171 	ret = ANEG_OK;
5172 
5173 	switch (ap->state) {
5174 	case ANEG_STATE_UNKNOWN:
5175 		if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
5176 			ap->state = ANEG_STATE_AN_ENABLE;
5177 
5178 		/* fallthru */
5179 	case ANEG_STATE_AN_ENABLE:
5180 		ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
5181 		if (ap->flags & MR_AN_ENABLE) {
5182 			ap->link_time = 0;
5183 			ap->cur_time = 0;
5184 			ap->ability_match_cfg = 0;
5185 			ap->ability_match_count = 0;
5186 			ap->ability_match = 0;
5187 			ap->idle_match = 0;
5188 			ap->ack_match = 0;
5189 
5190 			ap->state = ANEG_STATE_RESTART_INIT;
5191 		} else {
5192 			ap->state = ANEG_STATE_DISABLE_LINK_OK;
5193 		}
5194 		break;
5195 
5196 	case ANEG_STATE_RESTART_INIT:
5197 		ap->link_time = ap->cur_time;
5198 		ap->flags &= ~(MR_NP_LOADED);
5199 		ap->txconfig = 0;
5200 		tw32(MAC_TX_AUTO_NEG, 0);
5201 		tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5202 		tw32_f(MAC_MODE, tp->mac_mode);
5203 		udelay(40);
5204 
5205 		ret = ANEG_TIMER_ENAB;
5206 		ap->state = ANEG_STATE_RESTART;
5207 
5208 		/* fallthru */
5209 	case ANEG_STATE_RESTART:
5210 		delta = ap->cur_time - ap->link_time;
5211 		if (delta > ANEG_STATE_SETTLE_TIME)
5212 			ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
5213 		else
5214 			ret = ANEG_TIMER_ENAB;
5215 		break;
5216 
5217 	case ANEG_STATE_DISABLE_LINK_OK:
5218 		ret = ANEG_DONE;
5219 		break;
5220 
5221 	case ANEG_STATE_ABILITY_DETECT_INIT:
5222 		ap->flags &= ~(MR_TOGGLE_TX);
5223 		ap->txconfig = ANEG_CFG_FD;
5224 		flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5225 		if (flowctrl & ADVERTISE_1000XPAUSE)
5226 			ap->txconfig |= ANEG_CFG_PS1;
5227 		if (flowctrl & ADVERTISE_1000XPSE_ASYM)
5228 			ap->txconfig |= ANEG_CFG_PS2;
5229 		tw32(MAC_TX_AUTO_NEG, ap->txconfig);
5230 		tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5231 		tw32_f(MAC_MODE, tp->mac_mode);
5232 		udelay(40);
5233 
5234 		ap->state = ANEG_STATE_ABILITY_DETECT;
5235 		break;
5236 
5237 	case ANEG_STATE_ABILITY_DETECT:
5238 		if (ap->ability_match != 0 && ap->rxconfig != 0)
5239 			ap->state = ANEG_STATE_ACK_DETECT_INIT;
5240 		break;
5241 
5242 	case ANEG_STATE_ACK_DETECT_INIT:
5243 		ap->txconfig |= ANEG_CFG_ACK;
5244 		tw32(MAC_TX_AUTO_NEG, ap->txconfig);
5245 		tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5246 		tw32_f(MAC_MODE, tp->mac_mode);
5247 		udelay(40);
5248 
5249 		ap->state = ANEG_STATE_ACK_DETECT;
5250 
5251 		/* fallthru */
5252 	case ANEG_STATE_ACK_DETECT:
5253 		if (ap->ack_match != 0) {
5254 			if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
5255 			    (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
5256 				ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
5257 			} else {
5258 				ap->state = ANEG_STATE_AN_ENABLE;
5259 			}
5260 		} else if (ap->ability_match != 0 &&
5261 			   ap->rxconfig == 0) {
5262 			ap->state = ANEG_STATE_AN_ENABLE;
5263 		}
5264 		break;
5265 
5266 	case ANEG_STATE_COMPLETE_ACK_INIT:
5267 		if (ap->rxconfig & ANEG_CFG_INVAL) {
5268 			ret = ANEG_FAILED;
5269 			break;
5270 		}
5271 		ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
5272 			       MR_LP_ADV_HALF_DUPLEX |
5273 			       MR_LP_ADV_SYM_PAUSE |
5274 			       MR_LP_ADV_ASYM_PAUSE |
5275 			       MR_LP_ADV_REMOTE_FAULT1 |
5276 			       MR_LP_ADV_REMOTE_FAULT2 |
5277 			       MR_LP_ADV_NEXT_PAGE |
5278 			       MR_TOGGLE_RX |
5279 			       MR_NP_RX);
5280 		if (ap->rxconfig & ANEG_CFG_FD)
5281 			ap->flags |= MR_LP_ADV_FULL_DUPLEX;
5282 		if (ap->rxconfig & ANEG_CFG_HD)
5283 			ap->flags |= MR_LP_ADV_HALF_DUPLEX;
5284 		if (ap->rxconfig & ANEG_CFG_PS1)
5285 			ap->flags |= MR_LP_ADV_SYM_PAUSE;
5286 		if (ap->rxconfig & ANEG_CFG_PS2)
5287 			ap->flags |= MR_LP_ADV_ASYM_PAUSE;
5288 		if (ap->rxconfig & ANEG_CFG_RF1)
5289 			ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
5290 		if (ap->rxconfig & ANEG_CFG_RF2)
5291 			ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
5292 		if (ap->rxconfig & ANEG_CFG_NP)
5293 			ap->flags |= MR_LP_ADV_NEXT_PAGE;
5294 
5295 		ap->link_time = ap->cur_time;
5296 
5297 		ap->flags ^= (MR_TOGGLE_TX);
5298 		if (ap->rxconfig & 0x0008)
5299 			ap->flags |= MR_TOGGLE_RX;
5300 		if (ap->rxconfig & ANEG_CFG_NP)
5301 			ap->flags |= MR_NP_RX;
5302 		ap->flags |= MR_PAGE_RX;
5303 
5304 		ap->state = ANEG_STATE_COMPLETE_ACK;
5305 		ret = ANEG_TIMER_ENAB;
5306 		break;
5307 
5308 	case ANEG_STATE_COMPLETE_ACK:
5309 		if (ap->ability_match != 0 &&
5310 		    ap->rxconfig == 0) {
5311 			ap->state = ANEG_STATE_AN_ENABLE;
5312 			break;
5313 		}
5314 		delta = ap->cur_time - ap->link_time;
5315 		if (delta > ANEG_STATE_SETTLE_TIME) {
5316 			if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
5317 				ap->state = ANEG_STATE_IDLE_DETECT_INIT;
5318 			} else {
5319 				if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
5320 				    !(ap->flags & MR_NP_RX)) {
5321 					ap->state = ANEG_STATE_IDLE_DETECT_INIT;
5322 				} else {
5323 					ret = ANEG_FAILED;
5324 				}
5325 			}
5326 		}
5327 		break;
5328 
5329 	case ANEG_STATE_IDLE_DETECT_INIT:
5330 		ap->link_time = ap->cur_time;
5331 		tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
5332 		tw32_f(MAC_MODE, tp->mac_mode);
5333 		udelay(40);
5334 
5335 		ap->state = ANEG_STATE_IDLE_DETECT;
5336 		ret = ANEG_TIMER_ENAB;
5337 		break;
5338 
5339 	case ANEG_STATE_IDLE_DETECT:
5340 		if (ap->ability_match != 0 &&
5341 		    ap->rxconfig == 0) {
5342 			ap->state = ANEG_STATE_AN_ENABLE;
5343 			break;
5344 		}
5345 		delta = ap->cur_time - ap->link_time;
5346 		if (delta > ANEG_STATE_SETTLE_TIME) {
5347 			/* XXX another gem from the Broadcom driver :( */
5348 			ap->state = ANEG_STATE_LINK_OK;
5349 		}
5350 		break;
5351 
5352 	case ANEG_STATE_LINK_OK:
5353 		ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
5354 		ret = ANEG_DONE;
5355 		break;
5356 
5357 	case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
5358 		/* ??? unimplemented */
5359 		break;
5360 
5361 	case ANEG_STATE_NEXT_PAGE_WAIT:
5362 		/* ??? unimplemented */
5363 		break;
5364 
5365 	default:
5366 		ret = ANEG_FAILED;
5367 		break;
5368 	}
5369 
5370 	return ret;
5371 }
5372 
5373 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
5374 {
5375 	int res = 0;
5376 	struct tg3_fiber_aneginfo aninfo;
5377 	int status = ANEG_FAILED;
5378 	unsigned int tick;
5379 	u32 tmp;
5380 
5381 	tw32_f(MAC_TX_AUTO_NEG, 0);
5382 
5383 	tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
5384 	tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
5385 	udelay(40);
5386 
5387 	tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
5388 	udelay(40);
5389 
5390 	memset(&aninfo, 0, sizeof(aninfo));
5391 	aninfo.flags |= MR_AN_ENABLE;
5392 	aninfo.state = ANEG_STATE_UNKNOWN;
5393 	aninfo.cur_time = 0;
5394 	tick = 0;
5395 	while (++tick < 195000) {
5396 		status = tg3_fiber_aneg_smachine(tp, &aninfo);
5397 		if (status == ANEG_DONE || status == ANEG_FAILED)
5398 			break;
5399 
5400 		udelay(1);
5401 	}
5402 
5403 	tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
5404 	tw32_f(MAC_MODE, tp->mac_mode);
5405 	udelay(40);
5406 
5407 	*txflags = aninfo.txconfig;
5408 	*rxflags = aninfo.flags;
5409 
5410 	if (status == ANEG_DONE &&
5411 	    (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
5412 			     MR_LP_ADV_FULL_DUPLEX)))
5413 		res = 1;
5414 
5415 	return res;
5416 }
5417 
5418 static void tg3_init_bcm8002(struct tg3 *tp)
5419 {
5420 	u32 mac_status = tr32(MAC_STATUS);
5421 	int i;
5422 
5423 	/* Reset when initting first time or we have a link. */
5424 	if (tg3_flag(tp, INIT_COMPLETE) &&
5425 	    !(mac_status & MAC_STATUS_PCS_SYNCED))
5426 		return;
5427 
5428 	/* Set PLL lock range. */
5429 	tg3_writephy(tp, 0x16, 0x8007);
5430 
5431 	/* SW reset */
5432 	tg3_writephy(tp, MII_BMCR, BMCR_RESET);
5433 
5434 	/* Wait for reset to complete. */
5435 	/* XXX schedule_timeout() ... */
5436 	for (i = 0; i < 500; i++)
5437 		udelay(10);
5438 
5439 	/* Config mode; select PMA/Ch 1 regs. */
5440 	tg3_writephy(tp, 0x10, 0x8411);
5441 
5442 	/* Enable auto-lock and comdet, select txclk for tx. */
5443 	tg3_writephy(tp, 0x11, 0x0a10);
5444 
5445 	tg3_writephy(tp, 0x18, 0x00a0);
5446 	tg3_writephy(tp, 0x16, 0x41ff);
5447 
5448 	/* Assert and deassert POR. */
5449 	tg3_writephy(tp, 0x13, 0x0400);
5450 	udelay(40);
5451 	tg3_writephy(tp, 0x13, 0x0000);
5452 
5453 	tg3_writephy(tp, 0x11, 0x0a50);
5454 	udelay(40);
5455 	tg3_writephy(tp, 0x11, 0x0a10);
5456 
5457 	/* Wait for signal to stabilize */
5458 	/* XXX schedule_timeout() ... */
5459 	for (i = 0; i < 15000; i++)
5460 		udelay(10);
5461 
5462 	/* Deselect the channel register so we can read the PHYID
5463 	 * later.
5464 	 */
5465 	tg3_writephy(tp, 0x10, 0x8011);
5466 }
5467 
5468 static bool tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
5469 {
5470 	u16 flowctrl;
5471 	bool current_link_up;
5472 	u32 sg_dig_ctrl, sg_dig_status;
5473 	u32 serdes_cfg, expected_sg_dig_ctrl;
5474 	int workaround, port_a;
5475 
5476 	serdes_cfg = 0;
5477 	expected_sg_dig_ctrl = 0;
5478 	workaround = 0;
5479 	port_a = 1;
5480 	current_link_up = false;
5481 
5482 	if (tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A0 &&
5483 	    tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A1) {
5484 		workaround = 1;
5485 		if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
5486 			port_a = 0;
5487 
5488 		/* preserve bits 0-11,13,14 for signal pre-emphasis */
5489 		/* preserve bits 20-23 for voltage regulator */
5490 		serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
5491 	}
5492 
5493 	sg_dig_ctrl = tr32(SG_DIG_CTRL);
5494 
5495 	if (tp->link_config.autoneg != AUTONEG_ENABLE) {
5496 		if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
5497 			if (workaround) {
5498 				u32 val = serdes_cfg;
5499 
5500 				if (port_a)
5501 					val |= 0xc010000;
5502 				else
5503 					val |= 0x4010000;
5504 				tw32_f(MAC_SERDES_CFG, val);
5505 			}
5506 
5507 			tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
5508 		}
5509 		if (mac_status & MAC_STATUS_PCS_SYNCED) {
5510 			tg3_setup_flow_control(tp, 0, 0);
5511 			current_link_up = true;
5512 		}
5513 		goto out;
5514 	}
5515 
5516 	/* Want auto-negotiation.  */
5517 	expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
5518 
5519 	flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5520 	if (flowctrl & ADVERTISE_1000XPAUSE)
5521 		expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
5522 	if (flowctrl & ADVERTISE_1000XPSE_ASYM)
5523 		expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
5524 
5525 	if (sg_dig_ctrl != expected_sg_dig_ctrl) {
5526 		if ((tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT) &&
5527 		    tp->serdes_counter &&
5528 		    ((mac_status & (MAC_STATUS_PCS_SYNCED |
5529 				    MAC_STATUS_RCVD_CFG)) ==
5530 		     MAC_STATUS_PCS_SYNCED)) {
5531 			tp->serdes_counter--;
5532 			current_link_up = true;
5533 			goto out;
5534 		}
5535 restart_autoneg:
5536 		if (workaround)
5537 			tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
5538 		tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
5539 		udelay(5);
5540 		tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
5541 
5542 		tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
5543 		tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5544 	} else if (mac_status & (MAC_STATUS_PCS_SYNCED |
5545 				 MAC_STATUS_SIGNAL_DET)) {
5546 		sg_dig_status = tr32(SG_DIG_STATUS);
5547 		mac_status = tr32(MAC_STATUS);
5548 
5549 		if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
5550 		    (mac_status & MAC_STATUS_PCS_SYNCED)) {
5551 			u32 local_adv = 0, remote_adv = 0;
5552 
5553 			if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
5554 				local_adv |= ADVERTISE_1000XPAUSE;
5555 			if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
5556 				local_adv |= ADVERTISE_1000XPSE_ASYM;
5557 
5558 			if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
5559 				remote_adv |= LPA_1000XPAUSE;
5560 			if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
5561 				remote_adv |= LPA_1000XPAUSE_ASYM;
5562 
5563 			tp->link_config.rmt_adv =
5564 					   mii_adv_to_ethtool_adv_x(remote_adv);
5565 
5566 			tg3_setup_flow_control(tp, local_adv, remote_adv);
5567 			current_link_up = true;
5568 			tp->serdes_counter = 0;
5569 			tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5570 		} else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
5571 			if (tp->serdes_counter)
5572 				tp->serdes_counter--;
5573 			else {
5574 				if (workaround) {
5575 					u32 val = serdes_cfg;
5576 
5577 					if (port_a)
5578 						val |= 0xc010000;
5579 					else
5580 						val |= 0x4010000;
5581 
5582 					tw32_f(MAC_SERDES_CFG, val);
5583 				}
5584 
5585 				tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
5586 				udelay(40);
5587 
5588 				/* Link parallel detection - link is up */
5589 				/* only if we have PCS_SYNC and not */
5590 				/* receiving config code words */
5591 				mac_status = tr32(MAC_STATUS);
5592 				if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
5593 				    !(mac_status & MAC_STATUS_RCVD_CFG)) {
5594 					tg3_setup_flow_control(tp, 0, 0);
5595 					current_link_up = true;
5596 					tp->phy_flags |=
5597 						TG3_PHYFLG_PARALLEL_DETECT;
5598 					tp->serdes_counter =
5599 						SERDES_PARALLEL_DET_TIMEOUT;
5600 				} else
5601 					goto restart_autoneg;
5602 			}
5603 		}
5604 	} else {
5605 		tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
5606 		tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5607 	}
5608 
5609 out:
5610 	return current_link_up;
5611 }
5612 
5613 static bool tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
5614 {
5615 	bool current_link_up = false;
5616 
5617 	if (!(mac_status & MAC_STATUS_PCS_SYNCED))
5618 		goto out;
5619 
5620 	if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5621 		u32 txflags, rxflags;
5622 		int i;
5623 
5624 		if (fiber_autoneg(tp, &txflags, &rxflags)) {
5625 			u32 local_adv = 0, remote_adv = 0;
5626 
5627 			if (txflags & ANEG_CFG_PS1)
5628 				local_adv |= ADVERTISE_1000XPAUSE;
5629 			if (txflags & ANEG_CFG_PS2)
5630 				local_adv |= ADVERTISE_1000XPSE_ASYM;
5631 
5632 			if (rxflags & MR_LP_ADV_SYM_PAUSE)
5633 				remote_adv |= LPA_1000XPAUSE;
5634 			if (rxflags & MR_LP_ADV_ASYM_PAUSE)
5635 				remote_adv |= LPA_1000XPAUSE_ASYM;
5636 
5637 			tp->link_config.rmt_adv =
5638 					   mii_adv_to_ethtool_adv_x(remote_adv);
5639 
5640 			tg3_setup_flow_control(tp, local_adv, remote_adv);
5641 
5642 			current_link_up = true;
5643 		}
5644 		for (i = 0; i < 30; i++) {
5645 			udelay(20);
5646 			tw32_f(MAC_STATUS,
5647 			       (MAC_STATUS_SYNC_CHANGED |
5648 				MAC_STATUS_CFG_CHANGED));
5649 			udelay(40);
5650 			if ((tr32(MAC_STATUS) &
5651 			     (MAC_STATUS_SYNC_CHANGED |
5652 			      MAC_STATUS_CFG_CHANGED)) == 0)
5653 				break;
5654 		}
5655 
5656 		mac_status = tr32(MAC_STATUS);
5657 		if (!current_link_up &&
5658 		    (mac_status & MAC_STATUS_PCS_SYNCED) &&
5659 		    !(mac_status & MAC_STATUS_RCVD_CFG))
5660 			current_link_up = true;
5661 	} else {
5662 		tg3_setup_flow_control(tp, 0, 0);
5663 
5664 		/* Forcing 1000FD link up. */
5665 		current_link_up = true;
5666 
5667 		tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
5668 		udelay(40);
5669 
5670 		tw32_f(MAC_MODE, tp->mac_mode);
5671 		udelay(40);
5672 	}
5673 
5674 out:
5675 	return current_link_up;
5676 }
5677 
5678 static int tg3_setup_fiber_phy(struct tg3 *tp, bool force_reset)
5679 {
5680 	u32 orig_pause_cfg;
5681 	u16 orig_active_speed;
5682 	u8 orig_active_duplex;
5683 	u32 mac_status;
5684 	bool current_link_up;
5685 	int i;
5686 
5687 	orig_pause_cfg = tp->link_config.active_flowctrl;
5688 	orig_active_speed = tp->link_config.active_speed;
5689 	orig_active_duplex = tp->link_config.active_duplex;
5690 
5691 	if (!tg3_flag(tp, HW_AUTONEG) &&
5692 	    tp->link_up &&
5693 	    tg3_flag(tp, INIT_COMPLETE)) {
5694 		mac_status = tr32(MAC_STATUS);
5695 		mac_status &= (MAC_STATUS_PCS_SYNCED |
5696 			       MAC_STATUS_SIGNAL_DET |
5697 			       MAC_STATUS_CFG_CHANGED |
5698 			       MAC_STATUS_RCVD_CFG);
5699 		if (mac_status == (MAC_STATUS_PCS_SYNCED |
5700 				   MAC_STATUS_SIGNAL_DET)) {
5701 			tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5702 					    MAC_STATUS_CFG_CHANGED));
5703 			return 0;
5704 		}
5705 	}
5706 
5707 	tw32_f(MAC_TX_AUTO_NEG, 0);
5708 
5709 	tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
5710 	tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
5711 	tw32_f(MAC_MODE, tp->mac_mode);
5712 	udelay(40);
5713 
5714 	if (tp->phy_id == TG3_PHY_ID_BCM8002)
5715 		tg3_init_bcm8002(tp);
5716 
5717 	/* Enable link change event even when serdes polling.  */
5718 	tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5719 	udelay(40);
5720 
5721 	current_link_up = false;
5722 	tp->link_config.rmt_adv = 0;
5723 	mac_status = tr32(MAC_STATUS);
5724 
5725 	if (tg3_flag(tp, HW_AUTONEG))
5726 		current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
5727 	else
5728 		current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
5729 
5730 	tp->napi[0].hw_status->status =
5731 		(SD_STATUS_UPDATED |
5732 		 (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG));
5733 
5734 	for (i = 0; i < 100; i++) {
5735 		tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5736 				    MAC_STATUS_CFG_CHANGED));
5737 		udelay(5);
5738 		if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
5739 					 MAC_STATUS_CFG_CHANGED |
5740 					 MAC_STATUS_LNKSTATE_CHANGED)) == 0)
5741 			break;
5742 	}
5743 
5744 	mac_status = tr32(MAC_STATUS);
5745 	if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
5746 		current_link_up = false;
5747 		if (tp->link_config.autoneg == AUTONEG_ENABLE &&
5748 		    tp->serdes_counter == 0) {
5749 			tw32_f(MAC_MODE, (tp->mac_mode |
5750 					  MAC_MODE_SEND_CONFIGS));
5751 			udelay(1);
5752 			tw32_f(MAC_MODE, tp->mac_mode);
5753 		}
5754 	}
5755 
5756 	if (current_link_up) {
5757 		tp->link_config.active_speed = SPEED_1000;
5758 		tp->link_config.active_duplex = DUPLEX_FULL;
5759 		tw32(MAC_LED_CTRL, (tp->led_ctrl |
5760 				    LED_CTRL_LNKLED_OVERRIDE |
5761 				    LED_CTRL_1000MBPS_ON));
5762 	} else {
5763 		tp->link_config.active_speed = SPEED_UNKNOWN;
5764 		tp->link_config.active_duplex = DUPLEX_UNKNOWN;
5765 		tw32(MAC_LED_CTRL, (tp->led_ctrl |
5766 				    LED_CTRL_LNKLED_OVERRIDE |
5767 				    LED_CTRL_TRAFFIC_OVERRIDE));
5768 	}
5769 
5770 	if (!tg3_test_and_report_link_chg(tp, current_link_up)) {
5771 		u32 now_pause_cfg = tp->link_config.active_flowctrl;
5772 		if (orig_pause_cfg != now_pause_cfg ||
5773 		    orig_active_speed != tp->link_config.active_speed ||
5774 		    orig_active_duplex != tp->link_config.active_duplex)
5775 			tg3_link_report(tp);
5776 	}
5777 
5778 	return 0;
5779 }
5780 
5781 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, bool force_reset)
5782 {
5783 	int err = 0;
5784 	u32 bmsr, bmcr;
5785 	u16 current_speed = SPEED_UNKNOWN;
5786 	u8 current_duplex = DUPLEX_UNKNOWN;
5787 	bool current_link_up = false;
5788 	u32 local_adv, remote_adv, sgsr;
5789 
5790 	if ((tg3_asic_rev(tp) == ASIC_REV_5719 ||
5791 	     tg3_asic_rev(tp) == ASIC_REV_5720) &&
5792 	     !tg3_readphy(tp, SERDES_TG3_1000X_STATUS, &sgsr) &&
5793 	     (sgsr & SERDES_TG3_SGMII_MODE)) {
5794 
5795 		if (force_reset)
5796 			tg3_phy_reset(tp);
5797 
5798 		tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
5799 
5800 		if (!(sgsr & SERDES_TG3_LINK_UP)) {
5801 			tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5802 		} else {
5803 			current_link_up = true;
5804 			if (sgsr & SERDES_TG3_SPEED_1000) {
5805 				current_speed = SPEED_1000;
5806 				tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5807 			} else if (sgsr & SERDES_TG3_SPEED_100) {
5808 				current_speed = SPEED_100;
5809 				tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
5810 			} else {
5811 				current_speed = SPEED_10;
5812 				tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
5813 			}
5814 
5815 			if (sgsr & SERDES_TG3_FULL_DUPLEX)
5816 				current_duplex = DUPLEX_FULL;
5817 			else
5818 				current_duplex = DUPLEX_HALF;
5819 		}
5820 
5821 		tw32_f(MAC_MODE, tp->mac_mode);
5822 		udelay(40);
5823 
5824 		tg3_clear_mac_status(tp);
5825 
5826 		goto fiber_setup_done;
5827 	}
5828 
5829 	tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5830 	tw32_f(MAC_MODE, tp->mac_mode);
5831 	udelay(40);
5832 
5833 	tg3_clear_mac_status(tp);
5834 
5835 	if (force_reset)
5836 		tg3_phy_reset(tp);
5837 
5838 	tp->link_config.rmt_adv = 0;
5839 
5840 	err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5841 	err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5842 	if (tg3_asic_rev(tp) == ASIC_REV_5714) {
5843 		if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5844 			bmsr |= BMSR_LSTATUS;
5845 		else
5846 			bmsr &= ~BMSR_LSTATUS;
5847 	}
5848 
5849 	err |= tg3_readphy(tp, MII_BMCR, &bmcr);
5850 
5851 	if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
5852 	    (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
5853 		/* do nothing, just check for link up at the end */
5854 	} else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5855 		u32 adv, newadv;
5856 
5857 		err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5858 		newadv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
5859 				 ADVERTISE_1000XPAUSE |
5860 				 ADVERTISE_1000XPSE_ASYM |
5861 				 ADVERTISE_SLCT);
5862 
5863 		newadv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5864 		newadv |= ethtool_adv_to_mii_adv_x(tp->link_config.advertising);
5865 
5866 		if ((newadv != adv) || !(bmcr & BMCR_ANENABLE)) {
5867 			tg3_writephy(tp, MII_ADVERTISE, newadv);
5868 			bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
5869 			tg3_writephy(tp, MII_BMCR, bmcr);
5870 
5871 			tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5872 			tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
5873 			tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5874 
5875 			return err;
5876 		}
5877 	} else {
5878 		u32 new_bmcr;
5879 
5880 		bmcr &= ~BMCR_SPEED1000;
5881 		new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
5882 
5883 		if (tp->link_config.duplex == DUPLEX_FULL)
5884 			new_bmcr |= BMCR_FULLDPLX;
5885 
5886 		if (new_bmcr != bmcr) {
5887 			/* BMCR_SPEED1000 is a reserved bit that needs
5888 			 * to be set on write.
5889 			 */
5890 			new_bmcr |= BMCR_SPEED1000;
5891 
5892 			/* Force a linkdown */
5893 			if (tp->link_up) {
5894 				u32 adv;
5895 
5896 				err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5897 				adv &= ~(ADVERTISE_1000XFULL |
5898 					 ADVERTISE_1000XHALF |
5899 					 ADVERTISE_SLCT);
5900 				tg3_writephy(tp, MII_ADVERTISE, adv);
5901 				tg3_writephy(tp, MII_BMCR, bmcr |
5902 							   BMCR_ANRESTART |
5903 							   BMCR_ANENABLE);
5904 				udelay(10);
5905 				tg3_carrier_off(tp);
5906 			}
5907 			tg3_writephy(tp, MII_BMCR, new_bmcr);
5908 			bmcr = new_bmcr;
5909 			err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5910 			err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5911 			if (tg3_asic_rev(tp) == ASIC_REV_5714) {
5912 				if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5913 					bmsr |= BMSR_LSTATUS;
5914 				else
5915 					bmsr &= ~BMSR_LSTATUS;
5916 			}
5917 			tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5918 		}
5919 	}
5920 
5921 	if (bmsr & BMSR_LSTATUS) {
5922 		current_speed = SPEED_1000;
5923 		current_link_up = true;
5924 		if (bmcr & BMCR_FULLDPLX)
5925 			current_duplex = DUPLEX_FULL;
5926 		else
5927 			current_duplex = DUPLEX_HALF;
5928 
5929 		local_adv = 0;
5930 		remote_adv = 0;
5931 
5932 		if (bmcr & BMCR_ANENABLE) {
5933 			u32 common;
5934 
5935 			err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
5936 			err |= tg3_readphy(tp, MII_LPA, &remote_adv);
5937 			common = local_adv & remote_adv;
5938 			if (common & (ADVERTISE_1000XHALF |
5939 				      ADVERTISE_1000XFULL)) {
5940 				if (common & ADVERTISE_1000XFULL)
5941 					current_duplex = DUPLEX_FULL;
5942 				else
5943 					current_duplex = DUPLEX_HALF;
5944 
5945 				tp->link_config.rmt_adv =
5946 					   mii_adv_to_ethtool_adv_x(remote_adv);
5947 			} else if (!tg3_flag(tp, 5780_CLASS)) {
5948 				/* Link is up via parallel detect */
5949 			} else {
5950 				current_link_up = false;
5951 			}
5952 		}
5953 	}
5954 
5955 fiber_setup_done:
5956 	if (current_link_up && current_duplex == DUPLEX_FULL)
5957 		tg3_setup_flow_control(tp, local_adv, remote_adv);
5958 
5959 	tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
5960 	if (tp->link_config.active_duplex == DUPLEX_HALF)
5961 		tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
5962 
5963 	tw32_f(MAC_MODE, tp->mac_mode);
5964 	udelay(40);
5965 
5966 	tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5967 
5968 	tp->link_config.active_speed = current_speed;
5969 	tp->link_config.active_duplex = current_duplex;
5970 
5971 	tg3_test_and_report_link_chg(tp, current_link_up);
5972 	return err;
5973 }
5974 
5975 static void tg3_serdes_parallel_detect(struct tg3 *tp)
5976 {
5977 	if (tp->serdes_counter) {
5978 		/* Give autoneg time to complete. */
5979 		tp->serdes_counter--;
5980 		return;
5981 	}
5982 
5983 	if (!tp->link_up &&
5984 	    (tp->link_config.autoneg == AUTONEG_ENABLE)) {
5985 		u32 bmcr;
5986 
5987 		tg3_readphy(tp, MII_BMCR, &bmcr);
5988 		if (bmcr & BMCR_ANENABLE) {
5989 			u32 phy1, phy2;
5990 
5991 			/* Select shadow register 0x1f */
5992 			tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x7c00);
5993 			tg3_readphy(tp, MII_TG3_MISC_SHDW, &phy1);
5994 
5995 			/* Select expansion interrupt status register */
5996 			tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
5997 					 MII_TG3_DSP_EXP1_INT_STAT);
5998 			tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5999 			tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
6000 
6001 			if ((phy1 & 0x10) && !(phy2 & 0x20)) {
6002 				/* We have signal detect and not receiving
6003 				 * config code words, link is up by parallel
6004 				 * detection.
6005 				 */
6006 
6007 				bmcr &= ~BMCR_ANENABLE;
6008 				bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
6009 				tg3_writephy(tp, MII_BMCR, bmcr);
6010 				tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT;
6011 			}
6012 		}
6013 	} else if (tp->link_up &&
6014 		   (tp->link_config.autoneg == AUTONEG_ENABLE) &&
6015 		   (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
6016 		u32 phy2;
6017 
6018 		/* Select expansion interrupt status register */
6019 		tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
6020 				 MII_TG3_DSP_EXP1_INT_STAT);
6021 		tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
6022 		if (phy2 & 0x20) {
6023 			u32 bmcr;
6024 
6025 			/* Config code words received, turn on autoneg. */
6026 			tg3_readphy(tp, MII_BMCR, &bmcr);
6027 			tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
6028 
6029 			tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
6030 
6031 		}
6032 	}
6033 }
6034 
6035 static int tg3_setup_phy(struct tg3 *tp, bool force_reset)
6036 {
6037 	u32 val;
6038 	int err;
6039 
6040 	if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
6041 		err = tg3_setup_fiber_phy(tp, force_reset);
6042 	else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
6043 		err = tg3_setup_fiber_mii_phy(tp, force_reset);
6044 	else
6045 		err = tg3_setup_copper_phy(tp, force_reset);
6046 
6047 	if (tg3_chip_rev(tp) == CHIPREV_5784_AX) {
6048 		u32 scale;
6049 
6050 		val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
6051 		if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
6052 			scale = 65;
6053 		else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
6054 			scale = 6;
6055 		else
6056 			scale = 12;
6057 
6058 		val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
6059 		val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
6060 		tw32(GRC_MISC_CFG, val);
6061 	}
6062 
6063 	val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
6064 	      (6 << TX_LENGTHS_IPG_SHIFT);
6065 	if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
6066 	    tg3_asic_rev(tp) == ASIC_REV_5762)
6067 		val |= tr32(MAC_TX_LENGTHS) &
6068 		       (TX_LENGTHS_JMB_FRM_LEN_MSK |
6069 			TX_LENGTHS_CNT_DWN_VAL_MSK);
6070 
6071 	if (tp->link_config.active_speed == SPEED_1000 &&
6072 	    tp->link_config.active_duplex == DUPLEX_HALF)
6073 		tw32(MAC_TX_LENGTHS, val |
6074 		     (0xff << TX_LENGTHS_SLOT_TIME_SHIFT));
6075 	else
6076 		tw32(MAC_TX_LENGTHS, val |
6077 		     (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
6078 
6079 	if (!tg3_flag(tp, 5705_PLUS)) {
6080 		if (tp->link_up) {
6081 			tw32(HOSTCC_STAT_COAL_TICKS,
6082 			     tp->coal.stats_block_coalesce_usecs);
6083 		} else {
6084 			tw32(HOSTCC_STAT_COAL_TICKS, 0);
6085 		}
6086 	}
6087 
6088 	if (tg3_flag(tp, ASPM_WORKAROUND)) {
6089 		val = tr32(PCIE_PWR_MGMT_THRESH);
6090 		if (!tp->link_up)
6091 			val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
6092 			      tp->pwrmgmt_thresh;
6093 		else
6094 			val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
6095 		tw32(PCIE_PWR_MGMT_THRESH, val);
6096 	}
6097 
6098 	return err;
6099 }
6100 
6101 /* tp->lock must be held */
6102 static u64 tg3_refclk_read(struct tg3 *tp)
6103 {
6104 	u64 stamp = tr32(TG3_EAV_REF_CLCK_LSB);
6105 	return stamp | (u64)tr32(TG3_EAV_REF_CLCK_MSB) << 32;
6106 }
6107 
6108 /* tp->lock must be held */
6109 static void tg3_refclk_write(struct tg3 *tp, u64 newval)
6110 {
6111 	u32 clock_ctl = tr32(TG3_EAV_REF_CLCK_CTL);
6112 
6113 	tw32(TG3_EAV_REF_CLCK_CTL, clock_ctl | TG3_EAV_REF_CLCK_CTL_STOP);
6114 	tw32(TG3_EAV_REF_CLCK_LSB, newval & 0xffffffff);
6115 	tw32(TG3_EAV_REF_CLCK_MSB, newval >> 32);
6116 	tw32_f(TG3_EAV_REF_CLCK_CTL, clock_ctl | TG3_EAV_REF_CLCK_CTL_RESUME);
6117 }
6118 
6119 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync);
6120 static inline void tg3_full_unlock(struct tg3 *tp);
6121 static int tg3_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info)
6122 {
6123 	struct tg3 *tp = netdev_priv(dev);
6124 
6125 	info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
6126 				SOF_TIMESTAMPING_RX_SOFTWARE |
6127 				SOF_TIMESTAMPING_SOFTWARE;
6128 
6129 	if (tg3_flag(tp, PTP_CAPABLE)) {
6130 		info->so_timestamping |= SOF_TIMESTAMPING_TX_HARDWARE |
6131 					SOF_TIMESTAMPING_RX_HARDWARE |
6132 					SOF_TIMESTAMPING_RAW_HARDWARE;
6133 	}
6134 
6135 	if (tp->ptp_clock)
6136 		info->phc_index = ptp_clock_index(tp->ptp_clock);
6137 	else
6138 		info->phc_index = -1;
6139 
6140 	info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
6141 
6142 	info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
6143 			   (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
6144 			   (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
6145 			   (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT);
6146 	return 0;
6147 }
6148 
6149 static int tg3_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
6150 {
6151 	struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6152 	bool neg_adj = false;
6153 	u32 correction = 0;
6154 
6155 	if (ppb < 0) {
6156 		neg_adj = true;
6157 		ppb = -ppb;
6158 	}
6159 
6160 	/* Frequency adjustment is performed using hardware with a 24 bit
6161 	 * accumulator and a programmable correction value. On each clk, the
6162 	 * correction value gets added to the accumulator and when it
6163 	 * overflows, the time counter is incremented/decremented.
6164 	 *
6165 	 * So conversion from ppb to correction value is
6166 	 *		ppb * (1 << 24) / 1000000000
6167 	 */
6168 	correction = div_u64((u64)ppb * (1 << 24), 1000000000ULL) &
6169 		     TG3_EAV_REF_CLK_CORRECT_MASK;
6170 
6171 	tg3_full_lock(tp, 0);
6172 
6173 	if (correction)
6174 		tw32(TG3_EAV_REF_CLK_CORRECT_CTL,
6175 		     TG3_EAV_REF_CLK_CORRECT_EN |
6176 		     (neg_adj ? TG3_EAV_REF_CLK_CORRECT_NEG : 0) | correction);
6177 	else
6178 		tw32(TG3_EAV_REF_CLK_CORRECT_CTL, 0);
6179 
6180 	tg3_full_unlock(tp);
6181 
6182 	return 0;
6183 }
6184 
6185 static int tg3_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
6186 {
6187 	struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6188 
6189 	tg3_full_lock(tp, 0);
6190 	tp->ptp_adjust += delta;
6191 	tg3_full_unlock(tp);
6192 
6193 	return 0;
6194 }
6195 
6196 static int tg3_ptp_gettime(struct ptp_clock_info *ptp, struct timespec *ts)
6197 {
6198 	u64 ns;
6199 	u32 remainder;
6200 	struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6201 
6202 	tg3_full_lock(tp, 0);
6203 	ns = tg3_refclk_read(tp);
6204 	ns += tp->ptp_adjust;
6205 	tg3_full_unlock(tp);
6206 
6207 	ts->tv_sec = div_u64_rem(ns, 1000000000, &remainder);
6208 	ts->tv_nsec = remainder;
6209 
6210 	return 0;
6211 }
6212 
6213 static int tg3_ptp_settime(struct ptp_clock_info *ptp,
6214 			   const struct timespec *ts)
6215 {
6216 	u64 ns;
6217 	struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6218 
6219 	ns = timespec_to_ns(ts);
6220 
6221 	tg3_full_lock(tp, 0);
6222 	tg3_refclk_write(tp, ns);
6223 	tp->ptp_adjust = 0;
6224 	tg3_full_unlock(tp);
6225 
6226 	return 0;
6227 }
6228 
6229 static int tg3_ptp_enable(struct ptp_clock_info *ptp,
6230 			  struct ptp_clock_request *rq, int on)
6231 {
6232 	struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6233 	u32 clock_ctl;
6234 	int rval = 0;
6235 
6236 	switch (rq->type) {
6237 	case PTP_CLK_REQ_PEROUT:
6238 		if (rq->perout.index != 0)
6239 			return -EINVAL;
6240 
6241 		tg3_full_lock(tp, 0);
6242 		clock_ctl = tr32(TG3_EAV_REF_CLCK_CTL);
6243 		clock_ctl &= ~TG3_EAV_CTL_TSYNC_GPIO_MASK;
6244 
6245 		if (on) {
6246 			u64 nsec;
6247 
6248 			nsec = rq->perout.start.sec * 1000000000ULL +
6249 			       rq->perout.start.nsec;
6250 
6251 			if (rq->perout.period.sec || rq->perout.period.nsec) {
6252 				netdev_warn(tp->dev,
6253 					    "Device supports only a one-shot timesync output, period must be 0\n");
6254 				rval = -EINVAL;
6255 				goto err_out;
6256 			}
6257 
6258 			if (nsec & (1ULL << 63)) {
6259 				netdev_warn(tp->dev,
6260 					    "Start value (nsec) is over limit. Maximum size of start is only 63 bits\n");
6261 				rval = -EINVAL;
6262 				goto err_out;
6263 			}
6264 
6265 			tw32(TG3_EAV_WATCHDOG0_LSB, (nsec & 0xffffffff));
6266 			tw32(TG3_EAV_WATCHDOG0_MSB,
6267 			     TG3_EAV_WATCHDOG0_EN |
6268 			     ((nsec >> 32) & TG3_EAV_WATCHDOG_MSB_MASK));
6269 
6270 			tw32(TG3_EAV_REF_CLCK_CTL,
6271 			     clock_ctl | TG3_EAV_CTL_TSYNC_WDOG0);
6272 		} else {
6273 			tw32(TG3_EAV_WATCHDOG0_MSB, 0);
6274 			tw32(TG3_EAV_REF_CLCK_CTL, clock_ctl);
6275 		}
6276 
6277 err_out:
6278 		tg3_full_unlock(tp);
6279 		return rval;
6280 
6281 	default:
6282 		break;
6283 	}
6284 
6285 	return -EOPNOTSUPP;
6286 }
6287 
6288 static const struct ptp_clock_info tg3_ptp_caps = {
6289 	.owner		= THIS_MODULE,
6290 	.name		= "tg3 clock",
6291 	.max_adj	= 250000000,
6292 	.n_alarm	= 0,
6293 	.n_ext_ts	= 0,
6294 	.n_per_out	= 1,
6295 	.pps		= 0,
6296 	.adjfreq	= tg3_ptp_adjfreq,
6297 	.adjtime	= tg3_ptp_adjtime,
6298 	.gettime	= tg3_ptp_gettime,
6299 	.settime	= tg3_ptp_settime,
6300 	.enable		= tg3_ptp_enable,
6301 };
6302 
6303 static void tg3_hwclock_to_timestamp(struct tg3 *tp, u64 hwclock,
6304 				     struct skb_shared_hwtstamps *timestamp)
6305 {
6306 	memset(timestamp, 0, sizeof(struct skb_shared_hwtstamps));
6307 	timestamp->hwtstamp  = ns_to_ktime((hwclock & TG3_TSTAMP_MASK) +
6308 					   tp->ptp_adjust);
6309 }
6310 
6311 /* tp->lock must be held */
6312 static void tg3_ptp_init(struct tg3 *tp)
6313 {
6314 	if (!tg3_flag(tp, PTP_CAPABLE))
6315 		return;
6316 
6317 	/* Initialize the hardware clock to the system time. */
6318 	tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()));
6319 	tp->ptp_adjust = 0;
6320 	tp->ptp_info = tg3_ptp_caps;
6321 }
6322 
6323 /* tp->lock must be held */
6324 static void tg3_ptp_resume(struct tg3 *tp)
6325 {
6326 	if (!tg3_flag(tp, PTP_CAPABLE))
6327 		return;
6328 
6329 	tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()) + tp->ptp_adjust);
6330 	tp->ptp_adjust = 0;
6331 }
6332 
6333 static void tg3_ptp_fini(struct tg3 *tp)
6334 {
6335 	if (!tg3_flag(tp, PTP_CAPABLE) || !tp->ptp_clock)
6336 		return;
6337 
6338 	ptp_clock_unregister(tp->ptp_clock);
6339 	tp->ptp_clock = NULL;
6340 	tp->ptp_adjust = 0;
6341 }
6342 
6343 static inline int tg3_irq_sync(struct tg3 *tp)
6344 {
6345 	return tp->irq_sync;
6346 }
6347 
6348 static inline void tg3_rd32_loop(struct tg3 *tp, u32 *dst, u32 off, u32 len)
6349 {
6350 	int i;
6351 
6352 	dst = (u32 *)((u8 *)dst + off);
6353 	for (i = 0; i < len; i += sizeof(u32))
6354 		*dst++ = tr32(off + i);
6355 }
6356 
6357 static void tg3_dump_legacy_regs(struct tg3 *tp, u32 *regs)
6358 {
6359 	tg3_rd32_loop(tp, regs, TG3PCI_VENDOR, 0xb0);
6360 	tg3_rd32_loop(tp, regs, MAILBOX_INTERRUPT_0, 0x200);
6361 	tg3_rd32_loop(tp, regs, MAC_MODE, 0x4f0);
6362 	tg3_rd32_loop(tp, regs, SNDDATAI_MODE, 0xe0);
6363 	tg3_rd32_loop(tp, regs, SNDDATAC_MODE, 0x04);
6364 	tg3_rd32_loop(tp, regs, SNDBDS_MODE, 0x80);
6365 	tg3_rd32_loop(tp, regs, SNDBDI_MODE, 0x48);
6366 	tg3_rd32_loop(tp, regs, SNDBDC_MODE, 0x04);
6367 	tg3_rd32_loop(tp, regs, RCVLPC_MODE, 0x20);
6368 	tg3_rd32_loop(tp, regs, RCVLPC_SELLST_BASE, 0x15c);
6369 	tg3_rd32_loop(tp, regs, RCVDBDI_MODE, 0x0c);
6370 	tg3_rd32_loop(tp, regs, RCVDBDI_JUMBO_BD, 0x3c);
6371 	tg3_rd32_loop(tp, regs, RCVDBDI_BD_PROD_IDX_0, 0x44);
6372 	tg3_rd32_loop(tp, regs, RCVDCC_MODE, 0x04);
6373 	tg3_rd32_loop(tp, regs, RCVBDI_MODE, 0x20);
6374 	tg3_rd32_loop(tp, regs, RCVCC_MODE, 0x14);
6375 	tg3_rd32_loop(tp, regs, RCVLSC_MODE, 0x08);
6376 	tg3_rd32_loop(tp, regs, MBFREE_MODE, 0x08);
6377 	tg3_rd32_loop(tp, regs, HOSTCC_MODE, 0x100);
6378 
6379 	if (tg3_flag(tp, SUPPORT_MSIX))
6380 		tg3_rd32_loop(tp, regs, HOSTCC_RXCOL_TICKS_VEC1, 0x180);
6381 
6382 	tg3_rd32_loop(tp, regs, MEMARB_MODE, 0x10);
6383 	tg3_rd32_loop(tp, regs, BUFMGR_MODE, 0x58);
6384 	tg3_rd32_loop(tp, regs, RDMAC_MODE, 0x08);
6385 	tg3_rd32_loop(tp, regs, WDMAC_MODE, 0x08);
6386 	tg3_rd32_loop(tp, regs, RX_CPU_MODE, 0x04);
6387 	tg3_rd32_loop(tp, regs, RX_CPU_STATE, 0x04);
6388 	tg3_rd32_loop(tp, regs, RX_CPU_PGMCTR, 0x04);
6389 	tg3_rd32_loop(tp, regs, RX_CPU_HWBKPT, 0x04);
6390 
6391 	if (!tg3_flag(tp, 5705_PLUS)) {
6392 		tg3_rd32_loop(tp, regs, TX_CPU_MODE, 0x04);
6393 		tg3_rd32_loop(tp, regs, TX_CPU_STATE, 0x04);
6394 		tg3_rd32_loop(tp, regs, TX_CPU_PGMCTR, 0x04);
6395 	}
6396 
6397 	tg3_rd32_loop(tp, regs, GRCMBOX_INTERRUPT_0, 0x110);
6398 	tg3_rd32_loop(tp, regs, FTQ_RESET, 0x120);
6399 	tg3_rd32_loop(tp, regs, MSGINT_MODE, 0x0c);
6400 	tg3_rd32_loop(tp, regs, DMAC_MODE, 0x04);
6401 	tg3_rd32_loop(tp, regs, GRC_MODE, 0x4c);
6402 
6403 	if (tg3_flag(tp, NVRAM))
6404 		tg3_rd32_loop(tp, regs, NVRAM_CMD, 0x24);
6405 }
6406 
6407 static void tg3_dump_state(struct tg3 *tp)
6408 {
6409 	int i;
6410 	u32 *regs;
6411 
6412 	regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC);
6413 	if (!regs)
6414 		return;
6415 
6416 	if (tg3_flag(tp, PCI_EXPRESS)) {
6417 		/* Read up to but not including private PCI registers */
6418 		for (i = 0; i < TG3_PCIE_TLDLPL_PORT; i += sizeof(u32))
6419 			regs[i / sizeof(u32)] = tr32(i);
6420 	} else
6421 		tg3_dump_legacy_regs(tp, regs);
6422 
6423 	for (i = 0; i < TG3_REG_BLK_SIZE / sizeof(u32); i += 4) {
6424 		if (!regs[i + 0] && !regs[i + 1] &&
6425 		    !regs[i + 2] && !regs[i + 3])
6426 			continue;
6427 
6428 		netdev_err(tp->dev, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
6429 			   i * 4,
6430 			   regs[i + 0], regs[i + 1], regs[i + 2], regs[i + 3]);
6431 	}
6432 
6433 	kfree(regs);
6434 
6435 	for (i = 0; i < tp->irq_cnt; i++) {
6436 		struct tg3_napi *tnapi = &tp->napi[i];
6437 
6438 		/* SW status block */
6439 		netdev_err(tp->dev,
6440 			 "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
6441 			   i,
6442 			   tnapi->hw_status->status,
6443 			   tnapi->hw_status->status_tag,
6444 			   tnapi->hw_status->rx_jumbo_consumer,
6445 			   tnapi->hw_status->rx_consumer,
6446 			   tnapi->hw_status->rx_mini_consumer,
6447 			   tnapi->hw_status->idx[0].rx_producer,
6448 			   tnapi->hw_status->idx[0].tx_consumer);
6449 
6450 		netdev_err(tp->dev,
6451 		"%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n",
6452 			   i,
6453 			   tnapi->last_tag, tnapi->last_irq_tag,
6454 			   tnapi->tx_prod, tnapi->tx_cons, tnapi->tx_pending,
6455 			   tnapi->rx_rcb_ptr,
6456 			   tnapi->prodring.rx_std_prod_idx,
6457 			   tnapi->prodring.rx_std_cons_idx,
6458 			   tnapi->prodring.rx_jmb_prod_idx,
6459 			   tnapi->prodring.rx_jmb_cons_idx);
6460 	}
6461 }
6462 
6463 /* This is called whenever we suspect that the system chipset is re-
6464  * ordering the sequence of MMIO to the tx send mailbox. The symptom
6465  * is bogus tx completions. We try to recover by setting the
6466  * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
6467  * in the workqueue.
6468  */
6469 static void tg3_tx_recover(struct tg3 *tp)
6470 {
6471 	BUG_ON(tg3_flag(tp, MBOX_WRITE_REORDER) ||
6472 	       tp->write32_tx_mbox == tg3_write_indirect_mbox);
6473 
6474 	netdev_warn(tp->dev,
6475 		    "The system may be re-ordering memory-mapped I/O "
6476 		    "cycles to the network device, attempting to recover. "
6477 		    "Please report the problem to the driver maintainer "
6478 		    "and include system chipset information.\n");
6479 
6480 	tg3_flag_set(tp, TX_RECOVERY_PENDING);
6481 }
6482 
6483 static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
6484 {
6485 	/* Tell compiler to fetch tx indices from memory. */
6486 	barrier();
6487 	return tnapi->tx_pending -
6488 	       ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1));
6489 }
6490 
6491 /* Tigon3 never reports partial packet sends.  So we do not
6492  * need special logic to handle SKBs that have not had all
6493  * of their frags sent yet, like SunGEM does.
6494  */
6495 static void tg3_tx(struct tg3_napi *tnapi)
6496 {
6497 	struct tg3 *tp = tnapi->tp;
6498 	u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer;
6499 	u32 sw_idx = tnapi->tx_cons;
6500 	struct netdev_queue *txq;
6501 	int index = tnapi - tp->napi;
6502 	unsigned int pkts_compl = 0, bytes_compl = 0;
6503 
6504 	if (tg3_flag(tp, ENABLE_TSS))
6505 		index--;
6506 
6507 	txq = netdev_get_tx_queue(tp->dev, index);
6508 
6509 	while (sw_idx != hw_idx) {
6510 		struct tg3_tx_ring_info *ri = &tnapi->tx_buffers[sw_idx];
6511 		struct sk_buff *skb = ri->skb;
6512 		int i, tx_bug = 0;
6513 
6514 		if (unlikely(skb == NULL)) {
6515 			tg3_tx_recover(tp);
6516 			return;
6517 		}
6518 
6519 		if (tnapi->tx_ring[sw_idx].len_flags & TXD_FLAG_HWTSTAMP) {
6520 			struct skb_shared_hwtstamps timestamp;
6521 			u64 hwclock = tr32(TG3_TX_TSTAMP_LSB);
6522 			hwclock |= (u64)tr32(TG3_TX_TSTAMP_MSB) << 32;
6523 
6524 			tg3_hwclock_to_timestamp(tp, hwclock, &timestamp);
6525 
6526 			skb_tstamp_tx(skb, &timestamp);
6527 		}
6528 
6529 		pci_unmap_single(tp->pdev,
6530 				 dma_unmap_addr(ri, mapping),
6531 				 skb_headlen(skb),
6532 				 PCI_DMA_TODEVICE);
6533 
6534 		ri->skb = NULL;
6535 
6536 		while (ri->fragmented) {
6537 			ri->fragmented = false;
6538 			sw_idx = NEXT_TX(sw_idx);
6539 			ri = &tnapi->tx_buffers[sw_idx];
6540 		}
6541 
6542 		sw_idx = NEXT_TX(sw_idx);
6543 
6544 		for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
6545 			ri = &tnapi->tx_buffers[sw_idx];
6546 			if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
6547 				tx_bug = 1;
6548 
6549 			pci_unmap_page(tp->pdev,
6550 				       dma_unmap_addr(ri, mapping),
6551 				       skb_frag_size(&skb_shinfo(skb)->frags[i]),
6552 				       PCI_DMA_TODEVICE);
6553 
6554 			while (ri->fragmented) {
6555 				ri->fragmented = false;
6556 				sw_idx = NEXT_TX(sw_idx);
6557 				ri = &tnapi->tx_buffers[sw_idx];
6558 			}
6559 
6560 			sw_idx = NEXT_TX(sw_idx);
6561 		}
6562 
6563 		pkts_compl++;
6564 		bytes_compl += skb->len;
6565 
6566 		dev_kfree_skb(skb);
6567 
6568 		if (unlikely(tx_bug)) {
6569 			tg3_tx_recover(tp);
6570 			return;
6571 		}
6572 	}
6573 
6574 	netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
6575 
6576 	tnapi->tx_cons = sw_idx;
6577 
6578 	/* Need to make the tx_cons update visible to tg3_start_xmit()
6579 	 * before checking for netif_queue_stopped().  Without the
6580 	 * memory barrier, there is a small possibility that tg3_start_xmit()
6581 	 * will miss it and cause the queue to be stopped forever.
6582 	 */
6583 	smp_mb();
6584 
6585 	if (unlikely(netif_tx_queue_stopped(txq) &&
6586 		     (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) {
6587 		__netif_tx_lock(txq, smp_processor_id());
6588 		if (netif_tx_queue_stopped(txq) &&
6589 		    (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))
6590 			netif_tx_wake_queue(txq);
6591 		__netif_tx_unlock(txq);
6592 	}
6593 }
6594 
6595 static void tg3_frag_free(bool is_frag, void *data)
6596 {
6597 	if (is_frag)
6598 		put_page(virt_to_head_page(data));
6599 	else
6600 		kfree(data);
6601 }
6602 
6603 static void tg3_rx_data_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
6604 {
6605 	unsigned int skb_size = SKB_DATA_ALIGN(map_sz + TG3_RX_OFFSET(tp)) +
6606 		   SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
6607 
6608 	if (!ri->data)
6609 		return;
6610 
6611 	pci_unmap_single(tp->pdev, dma_unmap_addr(ri, mapping),
6612 			 map_sz, PCI_DMA_FROMDEVICE);
6613 	tg3_frag_free(skb_size <= PAGE_SIZE, ri->data);
6614 	ri->data = NULL;
6615 }
6616 
6617 
6618 /* Returns size of skb allocated or < 0 on error.
6619  *
6620  * We only need to fill in the address because the other members
6621  * of the RX descriptor are invariant, see tg3_init_rings.
6622  *
6623  * Note the purposeful assymetry of cpu vs. chip accesses.  For
6624  * posting buffers we only dirty the first cache line of the RX
6625  * descriptor (containing the address).  Whereas for the RX status
6626  * buffers the cpu only reads the last cacheline of the RX descriptor
6627  * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
6628  */
6629 static int tg3_alloc_rx_data(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
6630 			     u32 opaque_key, u32 dest_idx_unmasked,
6631 			     unsigned int *frag_size)
6632 {
6633 	struct tg3_rx_buffer_desc *desc;
6634 	struct ring_info *map;
6635 	u8 *data;
6636 	dma_addr_t mapping;
6637 	int skb_size, data_size, dest_idx;
6638 
6639 	switch (opaque_key) {
6640 	case RXD_OPAQUE_RING_STD:
6641 		dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
6642 		desc = &tpr->rx_std[dest_idx];
6643 		map = &tpr->rx_std_buffers[dest_idx];
6644 		data_size = tp->rx_pkt_map_sz;
6645 		break;
6646 
6647 	case RXD_OPAQUE_RING_JUMBO:
6648 		dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
6649 		desc = &tpr->rx_jmb[dest_idx].std;
6650 		map = &tpr->rx_jmb_buffers[dest_idx];
6651 		data_size = TG3_RX_JMB_MAP_SZ;
6652 		break;
6653 
6654 	default:
6655 		return -EINVAL;
6656 	}
6657 
6658 	/* Do not overwrite any of the map or rp information
6659 	 * until we are sure we can commit to a new buffer.
6660 	 *
6661 	 * Callers depend upon this behavior and assume that
6662 	 * we leave everything unchanged if we fail.
6663 	 */
6664 	skb_size = SKB_DATA_ALIGN(data_size + TG3_RX_OFFSET(tp)) +
6665 		   SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
6666 	if (skb_size <= PAGE_SIZE) {
6667 		data = netdev_alloc_frag(skb_size);
6668 		*frag_size = skb_size;
6669 	} else {
6670 		data = kmalloc(skb_size, GFP_ATOMIC);
6671 		*frag_size = 0;
6672 	}
6673 	if (!data)
6674 		return -ENOMEM;
6675 
6676 	mapping = pci_map_single(tp->pdev,
6677 				 data + TG3_RX_OFFSET(tp),
6678 				 data_size,
6679 				 PCI_DMA_FROMDEVICE);
6680 	if (unlikely(pci_dma_mapping_error(tp->pdev, mapping))) {
6681 		tg3_frag_free(skb_size <= PAGE_SIZE, data);
6682 		return -EIO;
6683 	}
6684 
6685 	map->data = data;
6686 	dma_unmap_addr_set(map, mapping, mapping);
6687 
6688 	desc->addr_hi = ((u64)mapping >> 32);
6689 	desc->addr_lo = ((u64)mapping & 0xffffffff);
6690 
6691 	return data_size;
6692 }
6693 
6694 /* We only need to move over in the address because the other
6695  * members of the RX descriptor are invariant.  See notes above
6696  * tg3_alloc_rx_data for full details.
6697  */
6698 static void tg3_recycle_rx(struct tg3_napi *tnapi,
6699 			   struct tg3_rx_prodring_set *dpr,
6700 			   u32 opaque_key, int src_idx,
6701 			   u32 dest_idx_unmasked)
6702 {
6703 	struct tg3 *tp = tnapi->tp;
6704 	struct tg3_rx_buffer_desc *src_desc, *dest_desc;
6705 	struct ring_info *src_map, *dest_map;
6706 	struct tg3_rx_prodring_set *spr = &tp->napi[0].prodring;
6707 	int dest_idx;
6708 
6709 	switch (opaque_key) {
6710 	case RXD_OPAQUE_RING_STD:
6711 		dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
6712 		dest_desc = &dpr->rx_std[dest_idx];
6713 		dest_map = &dpr->rx_std_buffers[dest_idx];
6714 		src_desc = &spr->rx_std[src_idx];
6715 		src_map = &spr->rx_std_buffers[src_idx];
6716 		break;
6717 
6718 	case RXD_OPAQUE_RING_JUMBO:
6719 		dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
6720 		dest_desc = &dpr->rx_jmb[dest_idx].std;
6721 		dest_map = &dpr->rx_jmb_buffers[dest_idx];
6722 		src_desc = &spr->rx_jmb[src_idx].std;
6723 		src_map = &spr->rx_jmb_buffers[src_idx];
6724 		break;
6725 
6726 	default:
6727 		return;
6728 	}
6729 
6730 	dest_map->data = src_map->data;
6731 	dma_unmap_addr_set(dest_map, mapping,
6732 			   dma_unmap_addr(src_map, mapping));
6733 	dest_desc->addr_hi = src_desc->addr_hi;
6734 	dest_desc->addr_lo = src_desc->addr_lo;
6735 
6736 	/* Ensure that the update to the skb happens after the physical
6737 	 * addresses have been transferred to the new BD location.
6738 	 */
6739 	smp_wmb();
6740 
6741 	src_map->data = NULL;
6742 }
6743 
6744 /* The RX ring scheme is composed of multiple rings which post fresh
6745  * buffers to the chip, and one special ring the chip uses to report
6746  * status back to the host.
6747  *
6748  * The special ring reports the status of received packets to the
6749  * host.  The chip does not write into the original descriptor the
6750  * RX buffer was obtained from.  The chip simply takes the original
6751  * descriptor as provided by the host, updates the status and length
6752  * field, then writes this into the next status ring entry.
6753  *
6754  * Each ring the host uses to post buffers to the chip is described
6755  * by a TG3_BDINFO entry in the chips SRAM area.  When a packet arrives,
6756  * it is first placed into the on-chip ram.  When the packet's length
6757  * is known, it walks down the TG3_BDINFO entries to select the ring.
6758  * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
6759  * which is within the range of the new packet's length is chosen.
6760  *
6761  * The "separate ring for rx status" scheme may sound queer, but it makes
6762  * sense from a cache coherency perspective.  If only the host writes
6763  * to the buffer post rings, and only the chip writes to the rx status
6764  * rings, then cache lines never move beyond shared-modified state.
6765  * If both the host and chip were to write into the same ring, cache line
6766  * eviction could occur since both entities want it in an exclusive state.
6767  */
6768 static int tg3_rx(struct tg3_napi *tnapi, int budget)
6769 {
6770 	struct tg3 *tp = tnapi->tp;
6771 	u32 work_mask, rx_std_posted = 0;
6772 	u32 std_prod_idx, jmb_prod_idx;
6773 	u32 sw_idx = tnapi->rx_rcb_ptr;
6774 	u16 hw_idx;
6775 	int received;
6776 	struct tg3_rx_prodring_set *tpr = &tnapi->prodring;
6777 
6778 	hw_idx = *(tnapi->rx_rcb_prod_idx);
6779 	/*
6780 	 * We need to order the read of hw_idx and the read of
6781 	 * the opaque cookie.
6782 	 */
6783 	rmb();
6784 	work_mask = 0;
6785 	received = 0;
6786 	std_prod_idx = tpr->rx_std_prod_idx;
6787 	jmb_prod_idx = tpr->rx_jmb_prod_idx;
6788 	while (sw_idx != hw_idx && budget > 0) {
6789 		struct ring_info *ri;
6790 		struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
6791 		unsigned int len;
6792 		struct sk_buff *skb;
6793 		dma_addr_t dma_addr;
6794 		u32 opaque_key, desc_idx, *post_ptr;
6795 		u8 *data;
6796 		u64 tstamp = 0;
6797 
6798 		desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
6799 		opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
6800 		if (opaque_key == RXD_OPAQUE_RING_STD) {
6801 			ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx];
6802 			dma_addr = dma_unmap_addr(ri, mapping);
6803 			data = ri->data;
6804 			post_ptr = &std_prod_idx;
6805 			rx_std_posted++;
6806 		} else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
6807 			ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx];
6808 			dma_addr = dma_unmap_addr(ri, mapping);
6809 			data = ri->data;
6810 			post_ptr = &jmb_prod_idx;
6811 		} else
6812 			goto next_pkt_nopost;
6813 
6814 		work_mask |= opaque_key;
6815 
6816 		if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
6817 		    (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
6818 		drop_it:
6819 			tg3_recycle_rx(tnapi, tpr, opaque_key,
6820 				       desc_idx, *post_ptr);
6821 		drop_it_no_recycle:
6822 			/* Other statistics kept track of by card. */
6823 			tp->rx_dropped++;
6824 			goto next_pkt;
6825 		}
6826 
6827 		prefetch(data + TG3_RX_OFFSET(tp));
6828 		len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
6829 		      ETH_FCS_LEN;
6830 
6831 		if ((desc->type_flags & RXD_FLAG_PTPSTAT_MASK) ==
6832 		     RXD_FLAG_PTPSTAT_PTPV1 ||
6833 		    (desc->type_flags & RXD_FLAG_PTPSTAT_MASK) ==
6834 		     RXD_FLAG_PTPSTAT_PTPV2) {
6835 			tstamp = tr32(TG3_RX_TSTAMP_LSB);
6836 			tstamp |= (u64)tr32(TG3_RX_TSTAMP_MSB) << 32;
6837 		}
6838 
6839 		if (len > TG3_RX_COPY_THRESH(tp)) {
6840 			int skb_size;
6841 			unsigned int frag_size;
6842 
6843 			skb_size = tg3_alloc_rx_data(tp, tpr, opaque_key,
6844 						    *post_ptr, &frag_size);
6845 			if (skb_size < 0)
6846 				goto drop_it;
6847 
6848 			pci_unmap_single(tp->pdev, dma_addr, skb_size,
6849 					 PCI_DMA_FROMDEVICE);
6850 
6851 			skb = build_skb(data, frag_size);
6852 			if (!skb) {
6853 				tg3_frag_free(frag_size != 0, data);
6854 				goto drop_it_no_recycle;
6855 			}
6856 			skb_reserve(skb, TG3_RX_OFFSET(tp));
6857 			/* Ensure that the update to the data happens
6858 			 * after the usage of the old DMA mapping.
6859 			 */
6860 			smp_wmb();
6861 
6862 			ri->data = NULL;
6863 
6864 		} else {
6865 			tg3_recycle_rx(tnapi, tpr, opaque_key,
6866 				       desc_idx, *post_ptr);
6867 
6868 			skb = netdev_alloc_skb(tp->dev,
6869 					       len + TG3_RAW_IP_ALIGN);
6870 			if (skb == NULL)
6871 				goto drop_it_no_recycle;
6872 
6873 			skb_reserve(skb, TG3_RAW_IP_ALIGN);
6874 			pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
6875 			memcpy(skb->data,
6876 			       data + TG3_RX_OFFSET(tp),
6877 			       len);
6878 			pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
6879 		}
6880 
6881 		skb_put(skb, len);
6882 		if (tstamp)
6883 			tg3_hwclock_to_timestamp(tp, tstamp,
6884 						 skb_hwtstamps(skb));
6885 
6886 		if ((tp->dev->features & NETIF_F_RXCSUM) &&
6887 		    (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
6888 		    (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
6889 		      >> RXD_TCPCSUM_SHIFT) == 0xffff))
6890 			skb->ip_summed = CHECKSUM_UNNECESSARY;
6891 		else
6892 			skb_checksum_none_assert(skb);
6893 
6894 		skb->protocol = eth_type_trans(skb, tp->dev);
6895 
6896 		if (len > (tp->dev->mtu + ETH_HLEN) &&
6897 		    skb->protocol != htons(ETH_P_8021Q)) {
6898 			dev_kfree_skb(skb);
6899 			goto drop_it_no_recycle;
6900 		}
6901 
6902 		if (desc->type_flags & RXD_FLAG_VLAN &&
6903 		    !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG))
6904 			__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
6905 					       desc->err_vlan & RXD_VLAN_MASK);
6906 
6907 		napi_gro_receive(&tnapi->napi, skb);
6908 
6909 		received++;
6910 		budget--;
6911 
6912 next_pkt:
6913 		(*post_ptr)++;
6914 
6915 		if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
6916 			tpr->rx_std_prod_idx = std_prod_idx &
6917 					       tp->rx_std_ring_mask;
6918 			tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6919 				     tpr->rx_std_prod_idx);
6920 			work_mask &= ~RXD_OPAQUE_RING_STD;
6921 			rx_std_posted = 0;
6922 		}
6923 next_pkt_nopost:
6924 		sw_idx++;
6925 		sw_idx &= tp->rx_ret_ring_mask;
6926 
6927 		/* Refresh hw_idx to see if there is new work */
6928 		if (sw_idx == hw_idx) {
6929 			hw_idx = *(tnapi->rx_rcb_prod_idx);
6930 			rmb();
6931 		}
6932 	}
6933 
6934 	/* ACK the status ring. */
6935 	tnapi->rx_rcb_ptr = sw_idx;
6936 	tw32_rx_mbox(tnapi->consmbox, sw_idx);
6937 
6938 	/* Refill RX ring(s). */
6939 	if (!tg3_flag(tp, ENABLE_RSS)) {
6940 		/* Sync BD data before updating mailbox */
6941 		wmb();
6942 
6943 		if (work_mask & RXD_OPAQUE_RING_STD) {
6944 			tpr->rx_std_prod_idx = std_prod_idx &
6945 					       tp->rx_std_ring_mask;
6946 			tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6947 				     tpr->rx_std_prod_idx);
6948 		}
6949 		if (work_mask & RXD_OPAQUE_RING_JUMBO) {
6950 			tpr->rx_jmb_prod_idx = jmb_prod_idx &
6951 					       tp->rx_jmb_ring_mask;
6952 			tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
6953 				     tpr->rx_jmb_prod_idx);
6954 		}
6955 		mmiowb();
6956 	} else if (work_mask) {
6957 		/* rx_std_buffers[] and rx_jmb_buffers[] entries must be
6958 		 * updated before the producer indices can be updated.
6959 		 */
6960 		smp_wmb();
6961 
6962 		tpr->rx_std_prod_idx = std_prod_idx & tp->rx_std_ring_mask;
6963 		tpr->rx_jmb_prod_idx = jmb_prod_idx & tp->rx_jmb_ring_mask;
6964 
6965 		if (tnapi != &tp->napi[1]) {
6966 			tp->rx_refill = true;
6967 			napi_schedule(&tp->napi[1].napi);
6968 		}
6969 	}
6970 
6971 	return received;
6972 }
6973 
6974 static void tg3_poll_link(struct tg3 *tp)
6975 {
6976 	/* handle link change and other phy events */
6977 	if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
6978 		struct tg3_hw_status *sblk = tp->napi[0].hw_status;
6979 
6980 		if (sblk->status & SD_STATUS_LINK_CHG) {
6981 			sblk->status = SD_STATUS_UPDATED |
6982 				       (sblk->status & ~SD_STATUS_LINK_CHG);
6983 			spin_lock(&tp->lock);
6984 			if (tg3_flag(tp, USE_PHYLIB)) {
6985 				tw32_f(MAC_STATUS,
6986 				     (MAC_STATUS_SYNC_CHANGED |
6987 				      MAC_STATUS_CFG_CHANGED |
6988 				      MAC_STATUS_MI_COMPLETION |
6989 				      MAC_STATUS_LNKSTATE_CHANGED));
6990 				udelay(40);
6991 			} else
6992 				tg3_setup_phy(tp, false);
6993 			spin_unlock(&tp->lock);
6994 		}
6995 	}
6996 }
6997 
6998 static int tg3_rx_prodring_xfer(struct tg3 *tp,
6999 				struct tg3_rx_prodring_set *dpr,
7000 				struct tg3_rx_prodring_set *spr)
7001 {
7002 	u32 si, di, cpycnt, src_prod_idx;
7003 	int i, err = 0;
7004 
7005 	while (1) {
7006 		src_prod_idx = spr->rx_std_prod_idx;
7007 
7008 		/* Make sure updates to the rx_std_buffers[] entries and the
7009 		 * standard producer index are seen in the correct order.
7010 		 */
7011 		smp_rmb();
7012 
7013 		if (spr->rx_std_cons_idx == src_prod_idx)
7014 			break;
7015 
7016 		if (spr->rx_std_cons_idx < src_prod_idx)
7017 			cpycnt = src_prod_idx - spr->rx_std_cons_idx;
7018 		else
7019 			cpycnt = tp->rx_std_ring_mask + 1 -
7020 				 spr->rx_std_cons_idx;
7021 
7022 		cpycnt = min(cpycnt,
7023 			     tp->rx_std_ring_mask + 1 - dpr->rx_std_prod_idx);
7024 
7025 		si = spr->rx_std_cons_idx;
7026 		di = dpr->rx_std_prod_idx;
7027 
7028 		for (i = di; i < di + cpycnt; i++) {
7029 			if (dpr->rx_std_buffers[i].data) {
7030 				cpycnt = i - di;
7031 				err = -ENOSPC;
7032 				break;
7033 			}
7034 		}
7035 
7036 		if (!cpycnt)
7037 			break;
7038 
7039 		/* Ensure that updates to the rx_std_buffers ring and the
7040 		 * shadowed hardware producer ring from tg3_recycle_skb() are
7041 		 * ordered correctly WRT the skb check above.
7042 		 */
7043 		smp_rmb();
7044 
7045 		memcpy(&dpr->rx_std_buffers[di],
7046 		       &spr->rx_std_buffers[si],
7047 		       cpycnt * sizeof(struct ring_info));
7048 
7049 		for (i = 0; i < cpycnt; i++, di++, si++) {
7050 			struct tg3_rx_buffer_desc *sbd, *dbd;
7051 			sbd = &spr->rx_std[si];
7052 			dbd = &dpr->rx_std[di];
7053 			dbd->addr_hi = sbd->addr_hi;
7054 			dbd->addr_lo = sbd->addr_lo;
7055 		}
7056 
7057 		spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) &
7058 				       tp->rx_std_ring_mask;
7059 		dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) &
7060 				       tp->rx_std_ring_mask;
7061 	}
7062 
7063 	while (1) {
7064 		src_prod_idx = spr->rx_jmb_prod_idx;
7065 
7066 		/* Make sure updates to the rx_jmb_buffers[] entries and
7067 		 * the jumbo producer index are seen in the correct order.
7068 		 */
7069 		smp_rmb();
7070 
7071 		if (spr->rx_jmb_cons_idx == src_prod_idx)
7072 			break;
7073 
7074 		if (spr->rx_jmb_cons_idx < src_prod_idx)
7075 			cpycnt = src_prod_idx - spr->rx_jmb_cons_idx;
7076 		else
7077 			cpycnt = tp->rx_jmb_ring_mask + 1 -
7078 				 spr->rx_jmb_cons_idx;
7079 
7080 		cpycnt = min(cpycnt,
7081 			     tp->rx_jmb_ring_mask + 1 - dpr->rx_jmb_prod_idx);
7082 
7083 		si = spr->rx_jmb_cons_idx;
7084 		di = dpr->rx_jmb_prod_idx;
7085 
7086 		for (i = di; i < di + cpycnt; i++) {
7087 			if (dpr->rx_jmb_buffers[i].data) {
7088 				cpycnt = i - di;
7089 				err = -ENOSPC;
7090 				break;
7091 			}
7092 		}
7093 
7094 		if (!cpycnt)
7095 			break;
7096 
7097 		/* Ensure that updates to the rx_jmb_buffers ring and the
7098 		 * shadowed hardware producer ring from tg3_recycle_skb() are
7099 		 * ordered correctly WRT the skb check above.
7100 		 */
7101 		smp_rmb();
7102 
7103 		memcpy(&dpr->rx_jmb_buffers[di],
7104 		       &spr->rx_jmb_buffers[si],
7105 		       cpycnt * sizeof(struct ring_info));
7106 
7107 		for (i = 0; i < cpycnt; i++, di++, si++) {
7108 			struct tg3_rx_buffer_desc *sbd, *dbd;
7109 			sbd = &spr->rx_jmb[si].std;
7110 			dbd = &dpr->rx_jmb[di].std;
7111 			dbd->addr_hi = sbd->addr_hi;
7112 			dbd->addr_lo = sbd->addr_lo;
7113 		}
7114 
7115 		spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) &
7116 				       tp->rx_jmb_ring_mask;
7117 		dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) &
7118 				       tp->rx_jmb_ring_mask;
7119 	}
7120 
7121 	return err;
7122 }
7123 
7124 static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
7125 {
7126 	struct tg3 *tp = tnapi->tp;
7127 
7128 	/* run TX completion thread */
7129 	if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
7130 		tg3_tx(tnapi);
7131 		if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7132 			return work_done;
7133 	}
7134 
7135 	if (!tnapi->rx_rcb_prod_idx)
7136 		return work_done;
7137 
7138 	/* run RX thread, within the bounds set by NAPI.
7139 	 * All RX "locking" is done by ensuring outside
7140 	 * code synchronizes with tg3->napi.poll()
7141 	 */
7142 	if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
7143 		work_done += tg3_rx(tnapi, budget - work_done);
7144 
7145 	if (tg3_flag(tp, ENABLE_RSS) && tnapi == &tp->napi[1]) {
7146 		struct tg3_rx_prodring_set *dpr = &tp->napi[0].prodring;
7147 		int i, err = 0;
7148 		u32 std_prod_idx = dpr->rx_std_prod_idx;
7149 		u32 jmb_prod_idx = dpr->rx_jmb_prod_idx;
7150 
7151 		tp->rx_refill = false;
7152 		for (i = 1; i <= tp->rxq_cnt; i++)
7153 			err |= tg3_rx_prodring_xfer(tp, dpr,
7154 						    &tp->napi[i].prodring);
7155 
7156 		wmb();
7157 
7158 		if (std_prod_idx != dpr->rx_std_prod_idx)
7159 			tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
7160 				     dpr->rx_std_prod_idx);
7161 
7162 		if (jmb_prod_idx != dpr->rx_jmb_prod_idx)
7163 			tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
7164 				     dpr->rx_jmb_prod_idx);
7165 
7166 		mmiowb();
7167 
7168 		if (err)
7169 			tw32_f(HOSTCC_MODE, tp->coal_now);
7170 	}
7171 
7172 	return work_done;
7173 }
7174 
7175 static inline void tg3_reset_task_schedule(struct tg3 *tp)
7176 {
7177 	if (!test_and_set_bit(TG3_FLAG_RESET_TASK_PENDING, tp->tg3_flags))
7178 		schedule_work(&tp->reset_task);
7179 }
7180 
7181 static inline void tg3_reset_task_cancel(struct tg3 *tp)
7182 {
7183 	cancel_work_sync(&tp->reset_task);
7184 	tg3_flag_clear(tp, RESET_TASK_PENDING);
7185 	tg3_flag_clear(tp, TX_RECOVERY_PENDING);
7186 }
7187 
7188 static int tg3_poll_msix(struct napi_struct *napi, int budget)
7189 {
7190 	struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
7191 	struct tg3 *tp = tnapi->tp;
7192 	int work_done = 0;
7193 	struct tg3_hw_status *sblk = tnapi->hw_status;
7194 
7195 	while (1) {
7196 		work_done = tg3_poll_work(tnapi, work_done, budget);
7197 
7198 		if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7199 			goto tx_recovery;
7200 
7201 		if (unlikely(work_done >= budget))
7202 			break;
7203 
7204 		/* tp->last_tag is used in tg3_int_reenable() below
7205 		 * to tell the hw how much work has been processed,
7206 		 * so we must read it before checking for more work.
7207 		 */
7208 		tnapi->last_tag = sblk->status_tag;
7209 		tnapi->last_irq_tag = tnapi->last_tag;
7210 		rmb();
7211 
7212 		/* check for RX/TX work to do */
7213 		if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons &&
7214 			   *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) {
7215 
7216 			/* This test here is not race free, but will reduce
7217 			 * the number of interrupts by looping again.
7218 			 */
7219 			if (tnapi == &tp->napi[1] && tp->rx_refill)
7220 				continue;
7221 
7222 			napi_complete(napi);
7223 			/* Reenable interrupts. */
7224 			tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
7225 
7226 			/* This test here is synchronized by napi_schedule()
7227 			 * and napi_complete() to close the race condition.
7228 			 */
7229 			if (unlikely(tnapi == &tp->napi[1] && tp->rx_refill)) {
7230 				tw32(HOSTCC_MODE, tp->coalesce_mode |
7231 						  HOSTCC_MODE_ENABLE |
7232 						  tnapi->coal_now);
7233 			}
7234 			mmiowb();
7235 			break;
7236 		}
7237 	}
7238 
7239 	return work_done;
7240 
7241 tx_recovery:
7242 	/* work_done is guaranteed to be less than budget. */
7243 	napi_complete(napi);
7244 	tg3_reset_task_schedule(tp);
7245 	return work_done;
7246 }
7247 
7248 static void tg3_process_error(struct tg3 *tp)
7249 {
7250 	u32 val;
7251 	bool real_error = false;
7252 
7253 	if (tg3_flag(tp, ERROR_PROCESSED))
7254 		return;
7255 
7256 	/* Check Flow Attention register */
7257 	val = tr32(HOSTCC_FLOW_ATTN);
7258 	if (val & ~HOSTCC_FLOW_ATTN_MBUF_LWM) {
7259 		netdev_err(tp->dev, "FLOW Attention error.  Resetting chip.\n");
7260 		real_error = true;
7261 	}
7262 
7263 	if (tr32(MSGINT_STATUS) & ~MSGINT_STATUS_MSI_REQ) {
7264 		netdev_err(tp->dev, "MSI Status error.  Resetting chip.\n");
7265 		real_error = true;
7266 	}
7267 
7268 	if (tr32(RDMAC_STATUS) || tr32(WDMAC_STATUS)) {
7269 		netdev_err(tp->dev, "DMA Status error.  Resetting chip.\n");
7270 		real_error = true;
7271 	}
7272 
7273 	if (!real_error)
7274 		return;
7275 
7276 	tg3_dump_state(tp);
7277 
7278 	tg3_flag_set(tp, ERROR_PROCESSED);
7279 	tg3_reset_task_schedule(tp);
7280 }
7281 
7282 static int tg3_poll(struct napi_struct *napi, int budget)
7283 {
7284 	struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
7285 	struct tg3 *tp = tnapi->tp;
7286 	int work_done = 0;
7287 	struct tg3_hw_status *sblk = tnapi->hw_status;
7288 
7289 	while (1) {
7290 		if (sblk->status & SD_STATUS_ERROR)
7291 			tg3_process_error(tp);
7292 
7293 		tg3_poll_link(tp);
7294 
7295 		work_done = tg3_poll_work(tnapi, work_done, budget);
7296 
7297 		if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7298 			goto tx_recovery;
7299 
7300 		if (unlikely(work_done >= budget))
7301 			break;
7302 
7303 		if (tg3_flag(tp, TAGGED_STATUS)) {
7304 			/* tp->last_tag is used in tg3_int_reenable() below
7305 			 * to tell the hw how much work has been processed,
7306 			 * so we must read it before checking for more work.
7307 			 */
7308 			tnapi->last_tag = sblk->status_tag;
7309 			tnapi->last_irq_tag = tnapi->last_tag;
7310 			rmb();
7311 		} else
7312 			sblk->status &= ~SD_STATUS_UPDATED;
7313 
7314 		if (likely(!tg3_has_work(tnapi))) {
7315 			napi_complete(napi);
7316 			tg3_int_reenable(tnapi);
7317 			break;
7318 		}
7319 	}
7320 
7321 	return work_done;
7322 
7323 tx_recovery:
7324 	/* work_done is guaranteed to be less than budget. */
7325 	napi_complete(napi);
7326 	tg3_reset_task_schedule(tp);
7327 	return work_done;
7328 }
7329 
7330 static void tg3_napi_disable(struct tg3 *tp)
7331 {
7332 	int i;
7333 
7334 	for (i = tp->irq_cnt - 1; i >= 0; i--)
7335 		napi_disable(&tp->napi[i].napi);
7336 }
7337 
7338 static void tg3_napi_enable(struct tg3 *tp)
7339 {
7340 	int i;
7341 
7342 	for (i = 0; i < tp->irq_cnt; i++)
7343 		napi_enable(&tp->napi[i].napi);
7344 }
7345 
7346 static void tg3_napi_init(struct tg3 *tp)
7347 {
7348 	int i;
7349 
7350 	netif_napi_add(tp->dev, &tp->napi[0].napi, tg3_poll, 64);
7351 	for (i = 1; i < tp->irq_cnt; i++)
7352 		netif_napi_add(tp->dev, &tp->napi[i].napi, tg3_poll_msix, 64);
7353 }
7354 
7355 static void tg3_napi_fini(struct tg3 *tp)
7356 {
7357 	int i;
7358 
7359 	for (i = 0; i < tp->irq_cnt; i++)
7360 		netif_napi_del(&tp->napi[i].napi);
7361 }
7362 
7363 static inline void tg3_netif_stop(struct tg3 *tp)
7364 {
7365 	tp->dev->trans_start = jiffies;	/* prevent tx timeout */
7366 	tg3_napi_disable(tp);
7367 	netif_carrier_off(tp->dev);
7368 	netif_tx_disable(tp->dev);
7369 }
7370 
7371 /* tp->lock must be held */
7372 static inline void tg3_netif_start(struct tg3 *tp)
7373 {
7374 	tg3_ptp_resume(tp);
7375 
7376 	/* NOTE: unconditional netif_tx_wake_all_queues is only
7377 	 * appropriate so long as all callers are assured to
7378 	 * have free tx slots (such as after tg3_init_hw)
7379 	 */
7380 	netif_tx_wake_all_queues(tp->dev);
7381 
7382 	if (tp->link_up)
7383 		netif_carrier_on(tp->dev);
7384 
7385 	tg3_napi_enable(tp);
7386 	tp->napi[0].hw_status->status |= SD_STATUS_UPDATED;
7387 	tg3_enable_ints(tp);
7388 }
7389 
7390 static void tg3_irq_quiesce(struct tg3 *tp)
7391 {
7392 	int i;
7393 
7394 	BUG_ON(tp->irq_sync);
7395 
7396 	tp->irq_sync = 1;
7397 	smp_mb();
7398 
7399 	for (i = 0; i < tp->irq_cnt; i++)
7400 		synchronize_irq(tp->napi[i].irq_vec);
7401 }
7402 
7403 /* Fully shutdown all tg3 driver activity elsewhere in the system.
7404  * If irq_sync is non-zero, then the IRQ handler must be synchronized
7405  * with as well.  Most of the time, this is not necessary except when
7406  * shutting down the device.
7407  */
7408 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
7409 {
7410 	spin_lock_bh(&tp->lock);
7411 	if (irq_sync)
7412 		tg3_irq_quiesce(tp);
7413 }
7414 
7415 static inline void tg3_full_unlock(struct tg3 *tp)
7416 {
7417 	spin_unlock_bh(&tp->lock);
7418 }
7419 
7420 /* One-shot MSI handler - Chip automatically disables interrupt
7421  * after sending MSI so driver doesn't have to do it.
7422  */
7423 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
7424 {
7425 	struct tg3_napi *tnapi = dev_id;
7426 	struct tg3 *tp = tnapi->tp;
7427 
7428 	prefetch(tnapi->hw_status);
7429 	if (tnapi->rx_rcb)
7430 		prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7431 
7432 	if (likely(!tg3_irq_sync(tp)))
7433 		napi_schedule(&tnapi->napi);
7434 
7435 	return IRQ_HANDLED;
7436 }
7437 
7438 /* MSI ISR - No need to check for interrupt sharing and no need to
7439  * flush status block and interrupt mailbox. PCI ordering rules
7440  * guarantee that MSI will arrive after the status block.
7441  */
7442 static irqreturn_t tg3_msi(int irq, void *dev_id)
7443 {
7444 	struct tg3_napi *tnapi = dev_id;
7445 	struct tg3 *tp = tnapi->tp;
7446 
7447 	prefetch(tnapi->hw_status);
7448 	if (tnapi->rx_rcb)
7449 		prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7450 	/*
7451 	 * Writing any value to intr-mbox-0 clears PCI INTA# and
7452 	 * chip-internal interrupt pending events.
7453 	 * Writing non-zero to intr-mbox-0 additional tells the
7454 	 * NIC to stop sending us irqs, engaging "in-intr-handler"
7455 	 * event coalescing.
7456 	 */
7457 	tw32_mailbox(tnapi->int_mbox, 0x00000001);
7458 	if (likely(!tg3_irq_sync(tp)))
7459 		napi_schedule(&tnapi->napi);
7460 
7461 	return IRQ_RETVAL(1);
7462 }
7463 
7464 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
7465 {
7466 	struct tg3_napi *tnapi = dev_id;
7467 	struct tg3 *tp = tnapi->tp;
7468 	struct tg3_hw_status *sblk = tnapi->hw_status;
7469 	unsigned int handled = 1;
7470 
7471 	/* In INTx mode, it is possible for the interrupt to arrive at
7472 	 * the CPU before the status block posted prior to the interrupt.
7473 	 * Reading the PCI State register will confirm whether the
7474 	 * interrupt is ours and will flush the status block.
7475 	 */
7476 	if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
7477 		if (tg3_flag(tp, CHIP_RESETTING) ||
7478 		    (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7479 			handled = 0;
7480 			goto out;
7481 		}
7482 	}
7483 
7484 	/*
7485 	 * Writing any value to intr-mbox-0 clears PCI INTA# and
7486 	 * chip-internal interrupt pending events.
7487 	 * Writing non-zero to intr-mbox-0 additional tells the
7488 	 * NIC to stop sending us irqs, engaging "in-intr-handler"
7489 	 * event coalescing.
7490 	 *
7491 	 * Flush the mailbox to de-assert the IRQ immediately to prevent
7492 	 * spurious interrupts.  The flush impacts performance but
7493 	 * excessive spurious interrupts can be worse in some cases.
7494 	 */
7495 	tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
7496 	if (tg3_irq_sync(tp))
7497 		goto out;
7498 	sblk->status &= ~SD_STATUS_UPDATED;
7499 	if (likely(tg3_has_work(tnapi))) {
7500 		prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7501 		napi_schedule(&tnapi->napi);
7502 	} else {
7503 		/* No work, shared interrupt perhaps?  re-enable
7504 		 * interrupts, and flush that PCI write
7505 		 */
7506 		tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
7507 			       0x00000000);
7508 	}
7509 out:
7510 	return IRQ_RETVAL(handled);
7511 }
7512 
7513 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
7514 {
7515 	struct tg3_napi *tnapi = dev_id;
7516 	struct tg3 *tp = tnapi->tp;
7517 	struct tg3_hw_status *sblk = tnapi->hw_status;
7518 	unsigned int handled = 1;
7519 
7520 	/* In INTx mode, it is possible for the interrupt to arrive at
7521 	 * the CPU before the status block posted prior to the interrupt.
7522 	 * Reading the PCI State register will confirm whether the
7523 	 * interrupt is ours and will flush the status block.
7524 	 */
7525 	if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) {
7526 		if (tg3_flag(tp, CHIP_RESETTING) ||
7527 		    (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7528 			handled = 0;
7529 			goto out;
7530 		}
7531 	}
7532 
7533 	/*
7534 	 * writing any value to intr-mbox-0 clears PCI INTA# and
7535 	 * chip-internal interrupt pending events.
7536 	 * writing non-zero to intr-mbox-0 additional tells the
7537 	 * NIC to stop sending us irqs, engaging "in-intr-handler"
7538 	 * event coalescing.
7539 	 *
7540 	 * Flush the mailbox to de-assert the IRQ immediately to prevent
7541 	 * spurious interrupts.  The flush impacts performance but
7542 	 * excessive spurious interrupts can be worse in some cases.
7543 	 */
7544 	tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
7545 
7546 	/*
7547 	 * In a shared interrupt configuration, sometimes other devices'
7548 	 * interrupts will scream.  We record the current status tag here
7549 	 * so that the above check can report that the screaming interrupts
7550 	 * are unhandled.  Eventually they will be silenced.
7551 	 */
7552 	tnapi->last_irq_tag = sblk->status_tag;
7553 
7554 	if (tg3_irq_sync(tp))
7555 		goto out;
7556 
7557 	prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7558 
7559 	napi_schedule(&tnapi->napi);
7560 
7561 out:
7562 	return IRQ_RETVAL(handled);
7563 }
7564 
7565 /* ISR for interrupt test */
7566 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
7567 {
7568 	struct tg3_napi *tnapi = dev_id;
7569 	struct tg3 *tp = tnapi->tp;
7570 	struct tg3_hw_status *sblk = tnapi->hw_status;
7571 
7572 	if ((sblk->status & SD_STATUS_UPDATED) ||
7573 	    !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7574 		tg3_disable_ints(tp);
7575 		return IRQ_RETVAL(1);
7576 	}
7577 	return IRQ_RETVAL(0);
7578 }
7579 
7580 #ifdef CONFIG_NET_POLL_CONTROLLER
7581 static void tg3_poll_controller(struct net_device *dev)
7582 {
7583 	int i;
7584 	struct tg3 *tp = netdev_priv(dev);
7585 
7586 	if (tg3_irq_sync(tp))
7587 		return;
7588 
7589 	for (i = 0; i < tp->irq_cnt; i++)
7590 		tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]);
7591 }
7592 #endif
7593 
7594 static void tg3_tx_timeout(struct net_device *dev)
7595 {
7596 	struct tg3 *tp = netdev_priv(dev);
7597 
7598 	if (netif_msg_tx_err(tp)) {
7599 		netdev_err(dev, "transmit timed out, resetting\n");
7600 		tg3_dump_state(tp);
7601 	}
7602 
7603 	tg3_reset_task_schedule(tp);
7604 }
7605 
7606 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
7607 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
7608 {
7609 	u32 base = (u32) mapping & 0xffffffff;
7610 
7611 	return (base > 0xffffdcc0) && (base + len + 8 < base);
7612 }
7613 
7614 /* Test for TSO DMA buffers that cross into regions which are within MSS bytes
7615  * of any 4GB boundaries: 4G, 8G, etc
7616  */
7617 static inline int tg3_4g_tso_overflow_test(struct tg3 *tp, dma_addr_t mapping,
7618 					   u32 len, u32 mss)
7619 {
7620 	if (tg3_asic_rev(tp) == ASIC_REV_5762 && mss) {
7621 		u32 base = (u32) mapping & 0xffffffff;
7622 
7623 		return ((base + len + (mss & 0x3fff)) < base);
7624 	}
7625 	return 0;
7626 }
7627 
7628 /* Test for DMA addresses > 40-bit */
7629 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
7630 					  int len)
7631 {
7632 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
7633 	if (tg3_flag(tp, 40BIT_DMA_BUG))
7634 		return ((u64) mapping + len) > DMA_BIT_MASK(40);
7635 	return 0;
7636 #else
7637 	return 0;
7638 #endif
7639 }
7640 
7641 static inline void tg3_tx_set_bd(struct tg3_tx_buffer_desc *txbd,
7642 				 dma_addr_t mapping, u32 len, u32 flags,
7643 				 u32 mss, u32 vlan)
7644 {
7645 	txbd->addr_hi = ((u64) mapping >> 32);
7646 	txbd->addr_lo = ((u64) mapping & 0xffffffff);
7647 	txbd->len_flags = (len << TXD_LEN_SHIFT) | (flags & 0x0000ffff);
7648 	txbd->vlan_tag = (mss << TXD_MSS_SHIFT) | (vlan << TXD_VLAN_TAG_SHIFT);
7649 }
7650 
7651 static bool tg3_tx_frag_set(struct tg3_napi *tnapi, u32 *entry, u32 *budget,
7652 			    dma_addr_t map, u32 len, u32 flags,
7653 			    u32 mss, u32 vlan)
7654 {
7655 	struct tg3 *tp = tnapi->tp;
7656 	bool hwbug = false;
7657 
7658 	if (tg3_flag(tp, SHORT_DMA_BUG) && len <= 8)
7659 		hwbug = true;
7660 
7661 	if (tg3_4g_overflow_test(map, len))
7662 		hwbug = true;
7663 
7664 	if (tg3_4g_tso_overflow_test(tp, map, len, mss))
7665 		hwbug = true;
7666 
7667 	if (tg3_40bit_overflow_test(tp, map, len))
7668 		hwbug = true;
7669 
7670 	if (tp->dma_limit) {
7671 		u32 prvidx = *entry;
7672 		u32 tmp_flag = flags & ~TXD_FLAG_END;
7673 		while (len > tp->dma_limit && *budget) {
7674 			u32 frag_len = tp->dma_limit;
7675 			len -= tp->dma_limit;
7676 
7677 			/* Avoid the 8byte DMA problem */
7678 			if (len <= 8) {
7679 				len += tp->dma_limit / 2;
7680 				frag_len = tp->dma_limit / 2;
7681 			}
7682 
7683 			tnapi->tx_buffers[*entry].fragmented = true;
7684 
7685 			tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7686 				      frag_len, tmp_flag, mss, vlan);
7687 			*budget -= 1;
7688 			prvidx = *entry;
7689 			*entry = NEXT_TX(*entry);
7690 
7691 			map += frag_len;
7692 		}
7693 
7694 		if (len) {
7695 			if (*budget) {
7696 				tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7697 					      len, flags, mss, vlan);
7698 				*budget -= 1;
7699 				*entry = NEXT_TX(*entry);
7700 			} else {
7701 				hwbug = true;
7702 				tnapi->tx_buffers[prvidx].fragmented = false;
7703 			}
7704 		}
7705 	} else {
7706 		tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7707 			      len, flags, mss, vlan);
7708 		*entry = NEXT_TX(*entry);
7709 	}
7710 
7711 	return hwbug;
7712 }
7713 
7714 static void tg3_tx_skb_unmap(struct tg3_napi *tnapi, u32 entry, int last)
7715 {
7716 	int i;
7717 	struct sk_buff *skb;
7718 	struct tg3_tx_ring_info *txb = &tnapi->tx_buffers[entry];
7719 
7720 	skb = txb->skb;
7721 	txb->skb = NULL;
7722 
7723 	pci_unmap_single(tnapi->tp->pdev,
7724 			 dma_unmap_addr(txb, mapping),
7725 			 skb_headlen(skb),
7726 			 PCI_DMA_TODEVICE);
7727 
7728 	while (txb->fragmented) {
7729 		txb->fragmented = false;
7730 		entry = NEXT_TX(entry);
7731 		txb = &tnapi->tx_buffers[entry];
7732 	}
7733 
7734 	for (i = 0; i <= last; i++) {
7735 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
7736 
7737 		entry = NEXT_TX(entry);
7738 		txb = &tnapi->tx_buffers[entry];
7739 
7740 		pci_unmap_page(tnapi->tp->pdev,
7741 			       dma_unmap_addr(txb, mapping),
7742 			       skb_frag_size(frag), PCI_DMA_TODEVICE);
7743 
7744 		while (txb->fragmented) {
7745 			txb->fragmented = false;
7746 			entry = NEXT_TX(entry);
7747 			txb = &tnapi->tx_buffers[entry];
7748 		}
7749 	}
7750 }
7751 
7752 /* Workaround 4GB and 40-bit hardware DMA bugs. */
7753 static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
7754 				       struct sk_buff **pskb,
7755 				       u32 *entry, u32 *budget,
7756 				       u32 base_flags, u32 mss, u32 vlan)
7757 {
7758 	struct tg3 *tp = tnapi->tp;
7759 	struct sk_buff *new_skb, *skb = *pskb;
7760 	dma_addr_t new_addr = 0;
7761 	int ret = 0;
7762 
7763 	if (tg3_asic_rev(tp) != ASIC_REV_5701)
7764 		new_skb = skb_copy(skb, GFP_ATOMIC);
7765 	else {
7766 		int more_headroom = 4 - ((unsigned long)skb->data & 3);
7767 
7768 		new_skb = skb_copy_expand(skb,
7769 					  skb_headroom(skb) + more_headroom,
7770 					  skb_tailroom(skb), GFP_ATOMIC);
7771 	}
7772 
7773 	if (!new_skb) {
7774 		ret = -1;
7775 	} else {
7776 		/* New SKB is guaranteed to be linear. */
7777 		new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
7778 					  PCI_DMA_TODEVICE);
7779 		/* Make sure the mapping succeeded */
7780 		if (pci_dma_mapping_error(tp->pdev, new_addr)) {
7781 			dev_kfree_skb(new_skb);
7782 			ret = -1;
7783 		} else {
7784 			u32 save_entry = *entry;
7785 
7786 			base_flags |= TXD_FLAG_END;
7787 
7788 			tnapi->tx_buffers[*entry].skb = new_skb;
7789 			dma_unmap_addr_set(&tnapi->tx_buffers[*entry],
7790 					   mapping, new_addr);
7791 
7792 			if (tg3_tx_frag_set(tnapi, entry, budget, new_addr,
7793 					    new_skb->len, base_flags,
7794 					    mss, vlan)) {
7795 				tg3_tx_skb_unmap(tnapi, save_entry, -1);
7796 				dev_kfree_skb(new_skb);
7797 				ret = -1;
7798 			}
7799 		}
7800 	}
7801 
7802 	dev_kfree_skb(skb);
7803 	*pskb = new_skb;
7804 	return ret;
7805 }
7806 
7807 static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *);
7808 
7809 /* Use GSO to workaround a rare TSO bug that may be triggered when the
7810  * TSO header is greater than 80 bytes.
7811  */
7812 static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
7813 {
7814 	struct sk_buff *segs, *nskb;
7815 	u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3;
7816 
7817 	/* Estimate the number of fragments in the worst case */
7818 	if (unlikely(tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)) {
7819 		netif_stop_queue(tp->dev);
7820 
7821 		/* netif_tx_stop_queue() must be done before checking
7822 		 * checking tx index in tg3_tx_avail() below, because in
7823 		 * tg3_tx(), we update tx index before checking for
7824 		 * netif_tx_queue_stopped().
7825 		 */
7826 		smp_mb();
7827 		if (tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)
7828 			return NETDEV_TX_BUSY;
7829 
7830 		netif_wake_queue(tp->dev);
7831 	}
7832 
7833 	segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
7834 	if (IS_ERR(segs))
7835 		goto tg3_tso_bug_end;
7836 
7837 	do {
7838 		nskb = segs;
7839 		segs = segs->next;
7840 		nskb->next = NULL;
7841 		tg3_start_xmit(nskb, tp->dev);
7842 	} while (segs);
7843 
7844 tg3_tso_bug_end:
7845 	dev_kfree_skb(skb);
7846 
7847 	return NETDEV_TX_OK;
7848 }
7849 
7850 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
7851  * support TG3_FLAG_HW_TSO_1 or firmware TSO only.
7852  */
7853 static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
7854 {
7855 	struct tg3 *tp = netdev_priv(dev);
7856 	u32 len, entry, base_flags, mss, vlan = 0;
7857 	u32 budget;
7858 	int i = -1, would_hit_hwbug;
7859 	dma_addr_t mapping;
7860 	struct tg3_napi *tnapi;
7861 	struct netdev_queue *txq;
7862 	unsigned int last;
7863 
7864 	txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
7865 	tnapi = &tp->napi[skb_get_queue_mapping(skb)];
7866 	if (tg3_flag(tp, ENABLE_TSS))
7867 		tnapi++;
7868 
7869 	budget = tg3_tx_avail(tnapi);
7870 
7871 	/* We are running in BH disabled context with netif_tx_lock
7872 	 * and TX reclaim runs via tp->napi.poll inside of a software
7873 	 * interrupt.  Furthermore, IRQ processing runs lockless so we have
7874 	 * no IRQ context deadlocks to worry about either.  Rejoice!
7875 	 */
7876 	if (unlikely(budget <= (skb_shinfo(skb)->nr_frags + 1))) {
7877 		if (!netif_tx_queue_stopped(txq)) {
7878 			netif_tx_stop_queue(txq);
7879 
7880 			/* This is a hard error, log it. */
7881 			netdev_err(dev,
7882 				   "BUG! Tx Ring full when queue awake!\n");
7883 		}
7884 		return NETDEV_TX_BUSY;
7885 	}
7886 
7887 	entry = tnapi->tx_prod;
7888 	base_flags = 0;
7889 	if (skb->ip_summed == CHECKSUM_PARTIAL)
7890 		base_flags |= TXD_FLAG_TCPUDP_CSUM;
7891 
7892 	mss = skb_shinfo(skb)->gso_size;
7893 	if (mss) {
7894 		struct iphdr *iph;
7895 		u32 tcp_opt_len, hdr_len;
7896 
7897 		if (skb_header_cloned(skb) &&
7898 		    pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
7899 			goto drop;
7900 
7901 		iph = ip_hdr(skb);
7902 		tcp_opt_len = tcp_optlen(skb);
7903 
7904 		hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb) - ETH_HLEN;
7905 
7906 		if (!skb_is_gso_v6(skb)) {
7907 			iph->check = 0;
7908 			iph->tot_len = htons(mss + hdr_len);
7909 		}
7910 
7911 		if (unlikely((ETH_HLEN + hdr_len) > 80) &&
7912 		    tg3_flag(tp, TSO_BUG))
7913 			return tg3_tso_bug(tp, skb);
7914 
7915 		base_flags |= (TXD_FLAG_CPU_PRE_DMA |
7916 			       TXD_FLAG_CPU_POST_DMA);
7917 
7918 		if (tg3_flag(tp, HW_TSO_1) ||
7919 		    tg3_flag(tp, HW_TSO_2) ||
7920 		    tg3_flag(tp, HW_TSO_3)) {
7921 			tcp_hdr(skb)->check = 0;
7922 			base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
7923 		} else
7924 			tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
7925 								 iph->daddr, 0,
7926 								 IPPROTO_TCP,
7927 								 0);
7928 
7929 		if (tg3_flag(tp, HW_TSO_3)) {
7930 			mss |= (hdr_len & 0xc) << 12;
7931 			if (hdr_len & 0x10)
7932 				base_flags |= 0x00000010;
7933 			base_flags |= (hdr_len & 0x3e0) << 5;
7934 		} else if (tg3_flag(tp, HW_TSO_2))
7935 			mss |= hdr_len << 9;
7936 		else if (tg3_flag(tp, HW_TSO_1) ||
7937 			 tg3_asic_rev(tp) == ASIC_REV_5705) {
7938 			if (tcp_opt_len || iph->ihl > 5) {
7939 				int tsflags;
7940 
7941 				tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
7942 				mss |= (tsflags << 11);
7943 			}
7944 		} else {
7945 			if (tcp_opt_len || iph->ihl > 5) {
7946 				int tsflags;
7947 
7948 				tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
7949 				base_flags |= tsflags << 12;
7950 			}
7951 		}
7952 	}
7953 
7954 	if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
7955 	    !mss && skb->len > VLAN_ETH_FRAME_LEN)
7956 		base_flags |= TXD_FLAG_JMB_PKT;
7957 
7958 	if (vlan_tx_tag_present(skb)) {
7959 		base_flags |= TXD_FLAG_VLAN;
7960 		vlan = vlan_tx_tag_get(skb);
7961 	}
7962 
7963 	if ((unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) &&
7964 	    tg3_flag(tp, TX_TSTAMP_EN)) {
7965 		skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
7966 		base_flags |= TXD_FLAG_HWTSTAMP;
7967 	}
7968 
7969 	len = skb_headlen(skb);
7970 
7971 	mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
7972 	if (pci_dma_mapping_error(tp->pdev, mapping))
7973 		goto drop;
7974 
7975 
7976 	tnapi->tx_buffers[entry].skb = skb;
7977 	dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
7978 
7979 	would_hit_hwbug = 0;
7980 
7981 	if (tg3_flag(tp, 5701_DMA_BUG))
7982 		would_hit_hwbug = 1;
7983 
7984 	if (tg3_tx_frag_set(tnapi, &entry, &budget, mapping, len, base_flags |
7985 			  ((skb_shinfo(skb)->nr_frags == 0) ? TXD_FLAG_END : 0),
7986 			    mss, vlan)) {
7987 		would_hit_hwbug = 1;
7988 	} else if (skb_shinfo(skb)->nr_frags > 0) {
7989 		u32 tmp_mss = mss;
7990 
7991 		if (!tg3_flag(tp, HW_TSO_1) &&
7992 		    !tg3_flag(tp, HW_TSO_2) &&
7993 		    !tg3_flag(tp, HW_TSO_3))
7994 			tmp_mss = 0;
7995 
7996 		/* Now loop through additional data
7997 		 * fragments, and queue them.
7998 		 */
7999 		last = skb_shinfo(skb)->nr_frags - 1;
8000 		for (i = 0; i <= last; i++) {
8001 			skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
8002 
8003 			len = skb_frag_size(frag);
8004 			mapping = skb_frag_dma_map(&tp->pdev->dev, frag, 0,
8005 						   len, DMA_TO_DEVICE);
8006 
8007 			tnapi->tx_buffers[entry].skb = NULL;
8008 			dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
8009 					   mapping);
8010 			if (dma_mapping_error(&tp->pdev->dev, mapping))
8011 				goto dma_error;
8012 
8013 			if (!budget ||
8014 			    tg3_tx_frag_set(tnapi, &entry, &budget, mapping,
8015 					    len, base_flags |
8016 					    ((i == last) ? TXD_FLAG_END : 0),
8017 					    tmp_mss, vlan)) {
8018 				would_hit_hwbug = 1;
8019 				break;
8020 			}
8021 		}
8022 	}
8023 
8024 	if (would_hit_hwbug) {
8025 		tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i);
8026 
8027 		/* If the workaround fails due to memory/mapping
8028 		 * failure, silently drop this packet.
8029 		 */
8030 		entry = tnapi->tx_prod;
8031 		budget = tg3_tx_avail(tnapi);
8032 		if (tigon3_dma_hwbug_workaround(tnapi, &skb, &entry, &budget,
8033 						base_flags, mss, vlan))
8034 			goto drop_nofree;
8035 	}
8036 
8037 	skb_tx_timestamp(skb);
8038 	netdev_tx_sent_queue(txq, skb->len);
8039 
8040 	/* Sync BD data before updating mailbox */
8041 	wmb();
8042 
8043 	/* Packets are ready, update Tx producer idx local and on card. */
8044 	tw32_tx_mbox(tnapi->prodmbox, entry);
8045 
8046 	tnapi->tx_prod = entry;
8047 	if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
8048 		netif_tx_stop_queue(txq);
8049 
8050 		/* netif_tx_stop_queue() must be done before checking
8051 		 * checking tx index in tg3_tx_avail() below, because in
8052 		 * tg3_tx(), we update tx index before checking for
8053 		 * netif_tx_queue_stopped().
8054 		 */
8055 		smp_mb();
8056 		if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
8057 			netif_tx_wake_queue(txq);
8058 	}
8059 
8060 	mmiowb();
8061 	return NETDEV_TX_OK;
8062 
8063 dma_error:
8064 	tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, --i);
8065 	tnapi->tx_buffers[tnapi->tx_prod].skb = NULL;
8066 drop:
8067 	dev_kfree_skb(skb);
8068 drop_nofree:
8069 	tp->tx_dropped++;
8070 	return NETDEV_TX_OK;
8071 }
8072 
8073 static void tg3_mac_loopback(struct tg3 *tp, bool enable)
8074 {
8075 	if (enable) {
8076 		tp->mac_mode &= ~(MAC_MODE_HALF_DUPLEX |
8077 				  MAC_MODE_PORT_MODE_MASK);
8078 
8079 		tp->mac_mode |= MAC_MODE_PORT_INT_LPBACK;
8080 
8081 		if (!tg3_flag(tp, 5705_PLUS))
8082 			tp->mac_mode |= MAC_MODE_LINK_POLARITY;
8083 
8084 		if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
8085 			tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
8086 		else
8087 			tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
8088 	} else {
8089 		tp->mac_mode &= ~MAC_MODE_PORT_INT_LPBACK;
8090 
8091 		if (tg3_flag(tp, 5705_PLUS) ||
8092 		    (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) ||
8093 		    tg3_asic_rev(tp) == ASIC_REV_5700)
8094 			tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
8095 	}
8096 
8097 	tw32(MAC_MODE, tp->mac_mode);
8098 	udelay(40);
8099 }
8100 
8101 static int tg3_phy_lpbk_set(struct tg3 *tp, u32 speed, bool extlpbk)
8102 {
8103 	u32 val, bmcr, mac_mode, ptest = 0;
8104 
8105 	tg3_phy_toggle_apd(tp, false);
8106 	tg3_phy_toggle_automdix(tp, false);
8107 
8108 	if (extlpbk && tg3_phy_set_extloopbk(tp))
8109 		return -EIO;
8110 
8111 	bmcr = BMCR_FULLDPLX;
8112 	switch (speed) {
8113 	case SPEED_10:
8114 		break;
8115 	case SPEED_100:
8116 		bmcr |= BMCR_SPEED100;
8117 		break;
8118 	case SPEED_1000:
8119 	default:
8120 		if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
8121 			speed = SPEED_100;
8122 			bmcr |= BMCR_SPEED100;
8123 		} else {
8124 			speed = SPEED_1000;
8125 			bmcr |= BMCR_SPEED1000;
8126 		}
8127 	}
8128 
8129 	if (extlpbk) {
8130 		if (!(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
8131 			tg3_readphy(tp, MII_CTRL1000, &val);
8132 			val |= CTL1000_AS_MASTER |
8133 			       CTL1000_ENABLE_MASTER;
8134 			tg3_writephy(tp, MII_CTRL1000, val);
8135 		} else {
8136 			ptest = MII_TG3_FET_PTEST_TRIM_SEL |
8137 				MII_TG3_FET_PTEST_TRIM_2;
8138 			tg3_writephy(tp, MII_TG3_FET_PTEST, ptest);
8139 		}
8140 	} else
8141 		bmcr |= BMCR_LOOPBACK;
8142 
8143 	tg3_writephy(tp, MII_BMCR, bmcr);
8144 
8145 	/* The write needs to be flushed for the FETs */
8146 	if (tp->phy_flags & TG3_PHYFLG_IS_FET)
8147 		tg3_readphy(tp, MII_BMCR, &bmcr);
8148 
8149 	udelay(40);
8150 
8151 	if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
8152 	    tg3_asic_rev(tp) == ASIC_REV_5785) {
8153 		tg3_writephy(tp, MII_TG3_FET_PTEST, ptest |
8154 			     MII_TG3_FET_PTEST_FRC_TX_LINK |
8155 			     MII_TG3_FET_PTEST_FRC_TX_LOCK);
8156 
8157 		/* The write needs to be flushed for the AC131 */
8158 		tg3_readphy(tp, MII_TG3_FET_PTEST, &val);
8159 	}
8160 
8161 	/* Reset to prevent losing 1st rx packet intermittently */
8162 	if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
8163 	    tg3_flag(tp, 5780_CLASS)) {
8164 		tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8165 		udelay(10);
8166 		tw32_f(MAC_RX_MODE, tp->rx_mode);
8167 	}
8168 
8169 	mac_mode = tp->mac_mode &
8170 		   ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
8171 	if (speed == SPEED_1000)
8172 		mac_mode |= MAC_MODE_PORT_MODE_GMII;
8173 	else
8174 		mac_mode |= MAC_MODE_PORT_MODE_MII;
8175 
8176 	if (tg3_asic_rev(tp) == ASIC_REV_5700) {
8177 		u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK;
8178 
8179 		if (masked_phy_id == TG3_PHY_ID_BCM5401)
8180 			mac_mode &= ~MAC_MODE_LINK_POLARITY;
8181 		else if (masked_phy_id == TG3_PHY_ID_BCM5411)
8182 			mac_mode |= MAC_MODE_LINK_POLARITY;
8183 
8184 		tg3_writephy(tp, MII_TG3_EXT_CTRL,
8185 			     MII_TG3_EXT_CTRL_LNK3_LED_MODE);
8186 	}
8187 
8188 	tw32(MAC_MODE, mac_mode);
8189 	udelay(40);
8190 
8191 	return 0;
8192 }
8193 
8194 static void tg3_set_loopback(struct net_device *dev, netdev_features_t features)
8195 {
8196 	struct tg3 *tp = netdev_priv(dev);
8197 
8198 	if (features & NETIF_F_LOOPBACK) {
8199 		if (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)
8200 			return;
8201 
8202 		spin_lock_bh(&tp->lock);
8203 		tg3_mac_loopback(tp, true);
8204 		netif_carrier_on(tp->dev);
8205 		spin_unlock_bh(&tp->lock);
8206 		netdev_info(dev, "Internal MAC loopback mode enabled.\n");
8207 	} else {
8208 		if (!(tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
8209 			return;
8210 
8211 		spin_lock_bh(&tp->lock);
8212 		tg3_mac_loopback(tp, false);
8213 		/* Force link status check */
8214 		tg3_setup_phy(tp, true);
8215 		spin_unlock_bh(&tp->lock);
8216 		netdev_info(dev, "Internal MAC loopback mode disabled.\n");
8217 	}
8218 }
8219 
8220 static netdev_features_t tg3_fix_features(struct net_device *dev,
8221 	netdev_features_t features)
8222 {
8223 	struct tg3 *tp = netdev_priv(dev);
8224 
8225 	if (dev->mtu > ETH_DATA_LEN && tg3_flag(tp, 5780_CLASS))
8226 		features &= ~NETIF_F_ALL_TSO;
8227 
8228 	return features;
8229 }
8230 
8231 static int tg3_set_features(struct net_device *dev, netdev_features_t features)
8232 {
8233 	netdev_features_t changed = dev->features ^ features;
8234 
8235 	if ((changed & NETIF_F_LOOPBACK) && netif_running(dev))
8236 		tg3_set_loopback(dev, features);
8237 
8238 	return 0;
8239 }
8240 
8241 static void tg3_rx_prodring_free(struct tg3 *tp,
8242 				 struct tg3_rx_prodring_set *tpr)
8243 {
8244 	int i;
8245 
8246 	if (tpr != &tp->napi[0].prodring) {
8247 		for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx;
8248 		     i = (i + 1) & tp->rx_std_ring_mask)
8249 			tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
8250 					tp->rx_pkt_map_sz);
8251 
8252 		if (tg3_flag(tp, JUMBO_CAPABLE)) {
8253 			for (i = tpr->rx_jmb_cons_idx;
8254 			     i != tpr->rx_jmb_prod_idx;
8255 			     i = (i + 1) & tp->rx_jmb_ring_mask) {
8256 				tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
8257 						TG3_RX_JMB_MAP_SZ);
8258 			}
8259 		}
8260 
8261 		return;
8262 	}
8263 
8264 	for (i = 0; i <= tp->rx_std_ring_mask; i++)
8265 		tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
8266 				tp->rx_pkt_map_sz);
8267 
8268 	if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
8269 		for (i = 0; i <= tp->rx_jmb_ring_mask; i++)
8270 			tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
8271 					TG3_RX_JMB_MAP_SZ);
8272 	}
8273 }
8274 
8275 /* Initialize rx rings for packet processing.
8276  *
8277  * The chip has been shut down and the driver detached from
8278  * the networking, so no interrupts or new tx packets will
8279  * end up in the driver.  tp->{tx,}lock are held and thus
8280  * we may not sleep.
8281  */
8282 static int tg3_rx_prodring_alloc(struct tg3 *tp,
8283 				 struct tg3_rx_prodring_set *tpr)
8284 {
8285 	u32 i, rx_pkt_dma_sz;
8286 
8287 	tpr->rx_std_cons_idx = 0;
8288 	tpr->rx_std_prod_idx = 0;
8289 	tpr->rx_jmb_cons_idx = 0;
8290 	tpr->rx_jmb_prod_idx = 0;
8291 
8292 	if (tpr != &tp->napi[0].prodring) {
8293 		memset(&tpr->rx_std_buffers[0], 0,
8294 		       TG3_RX_STD_BUFF_RING_SIZE(tp));
8295 		if (tpr->rx_jmb_buffers)
8296 			memset(&tpr->rx_jmb_buffers[0], 0,
8297 			       TG3_RX_JMB_BUFF_RING_SIZE(tp));
8298 		goto done;
8299 	}
8300 
8301 	/* Zero out all descriptors. */
8302 	memset(tpr->rx_std, 0, TG3_RX_STD_RING_BYTES(tp));
8303 
8304 	rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ;
8305 	if (tg3_flag(tp, 5780_CLASS) &&
8306 	    tp->dev->mtu > ETH_DATA_LEN)
8307 		rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ;
8308 	tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz);
8309 
8310 	/* Initialize invariants of the rings, we only set this
8311 	 * stuff once.  This works because the card does not
8312 	 * write into the rx buffer posting rings.
8313 	 */
8314 	for (i = 0; i <= tp->rx_std_ring_mask; i++) {
8315 		struct tg3_rx_buffer_desc *rxd;
8316 
8317 		rxd = &tpr->rx_std[i];
8318 		rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT;
8319 		rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
8320 		rxd->opaque = (RXD_OPAQUE_RING_STD |
8321 			       (i << RXD_OPAQUE_INDEX_SHIFT));
8322 	}
8323 
8324 	/* Now allocate fresh SKBs for each rx ring. */
8325 	for (i = 0; i < tp->rx_pending; i++) {
8326 		unsigned int frag_size;
8327 
8328 		if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_STD, i,
8329 				      &frag_size) < 0) {
8330 			netdev_warn(tp->dev,
8331 				    "Using a smaller RX standard ring. Only "
8332 				    "%d out of %d buffers were allocated "
8333 				    "successfully\n", i, tp->rx_pending);
8334 			if (i == 0)
8335 				goto initfail;
8336 			tp->rx_pending = i;
8337 			break;
8338 		}
8339 	}
8340 
8341 	if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
8342 		goto done;
8343 
8344 	memset(tpr->rx_jmb, 0, TG3_RX_JMB_RING_BYTES(tp));
8345 
8346 	if (!tg3_flag(tp, JUMBO_RING_ENABLE))
8347 		goto done;
8348 
8349 	for (i = 0; i <= tp->rx_jmb_ring_mask; i++) {
8350 		struct tg3_rx_buffer_desc *rxd;
8351 
8352 		rxd = &tpr->rx_jmb[i].std;
8353 		rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT;
8354 		rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
8355 				  RXD_FLAG_JUMBO;
8356 		rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
8357 		       (i << RXD_OPAQUE_INDEX_SHIFT));
8358 	}
8359 
8360 	for (i = 0; i < tp->rx_jumbo_pending; i++) {
8361 		unsigned int frag_size;
8362 
8363 		if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_JUMBO, i,
8364 				      &frag_size) < 0) {
8365 			netdev_warn(tp->dev,
8366 				    "Using a smaller RX jumbo ring. Only %d "
8367 				    "out of %d buffers were allocated "
8368 				    "successfully\n", i, tp->rx_jumbo_pending);
8369 			if (i == 0)
8370 				goto initfail;
8371 			tp->rx_jumbo_pending = i;
8372 			break;
8373 		}
8374 	}
8375 
8376 done:
8377 	return 0;
8378 
8379 initfail:
8380 	tg3_rx_prodring_free(tp, tpr);
8381 	return -ENOMEM;
8382 }
8383 
8384 static void tg3_rx_prodring_fini(struct tg3 *tp,
8385 				 struct tg3_rx_prodring_set *tpr)
8386 {
8387 	kfree(tpr->rx_std_buffers);
8388 	tpr->rx_std_buffers = NULL;
8389 	kfree(tpr->rx_jmb_buffers);
8390 	tpr->rx_jmb_buffers = NULL;
8391 	if (tpr->rx_std) {
8392 		dma_free_coherent(&tp->pdev->dev, TG3_RX_STD_RING_BYTES(tp),
8393 				  tpr->rx_std, tpr->rx_std_mapping);
8394 		tpr->rx_std = NULL;
8395 	}
8396 	if (tpr->rx_jmb) {
8397 		dma_free_coherent(&tp->pdev->dev, TG3_RX_JMB_RING_BYTES(tp),
8398 				  tpr->rx_jmb, tpr->rx_jmb_mapping);
8399 		tpr->rx_jmb = NULL;
8400 	}
8401 }
8402 
8403 static int tg3_rx_prodring_init(struct tg3 *tp,
8404 				struct tg3_rx_prodring_set *tpr)
8405 {
8406 	tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp),
8407 				      GFP_KERNEL);
8408 	if (!tpr->rx_std_buffers)
8409 		return -ENOMEM;
8410 
8411 	tpr->rx_std = dma_alloc_coherent(&tp->pdev->dev,
8412 					 TG3_RX_STD_RING_BYTES(tp),
8413 					 &tpr->rx_std_mapping,
8414 					 GFP_KERNEL);
8415 	if (!tpr->rx_std)
8416 		goto err_out;
8417 
8418 	if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
8419 		tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp),
8420 					      GFP_KERNEL);
8421 		if (!tpr->rx_jmb_buffers)
8422 			goto err_out;
8423 
8424 		tpr->rx_jmb = dma_alloc_coherent(&tp->pdev->dev,
8425 						 TG3_RX_JMB_RING_BYTES(tp),
8426 						 &tpr->rx_jmb_mapping,
8427 						 GFP_KERNEL);
8428 		if (!tpr->rx_jmb)
8429 			goto err_out;
8430 	}
8431 
8432 	return 0;
8433 
8434 err_out:
8435 	tg3_rx_prodring_fini(tp, tpr);
8436 	return -ENOMEM;
8437 }
8438 
8439 /* Free up pending packets in all rx/tx rings.
8440  *
8441  * The chip has been shut down and the driver detached from
8442  * the networking, so no interrupts or new tx packets will
8443  * end up in the driver.  tp->{tx,}lock is not held and we are not
8444  * in an interrupt context and thus may sleep.
8445  */
8446 static void tg3_free_rings(struct tg3 *tp)
8447 {
8448 	int i, j;
8449 
8450 	for (j = 0; j < tp->irq_cnt; j++) {
8451 		struct tg3_napi *tnapi = &tp->napi[j];
8452 
8453 		tg3_rx_prodring_free(tp, &tnapi->prodring);
8454 
8455 		if (!tnapi->tx_buffers)
8456 			continue;
8457 
8458 		for (i = 0; i < TG3_TX_RING_SIZE; i++) {
8459 			struct sk_buff *skb = tnapi->tx_buffers[i].skb;
8460 
8461 			if (!skb)
8462 				continue;
8463 
8464 			tg3_tx_skb_unmap(tnapi, i,
8465 					 skb_shinfo(skb)->nr_frags - 1);
8466 
8467 			dev_kfree_skb_any(skb);
8468 		}
8469 		netdev_tx_reset_queue(netdev_get_tx_queue(tp->dev, j));
8470 	}
8471 }
8472 
8473 /* Initialize tx/rx rings for packet processing.
8474  *
8475  * The chip has been shut down and the driver detached from
8476  * the networking, so no interrupts or new tx packets will
8477  * end up in the driver.  tp->{tx,}lock are held and thus
8478  * we may not sleep.
8479  */
8480 static int tg3_init_rings(struct tg3 *tp)
8481 {
8482 	int i;
8483 
8484 	/* Free up all the SKBs. */
8485 	tg3_free_rings(tp);
8486 
8487 	for (i = 0; i < tp->irq_cnt; i++) {
8488 		struct tg3_napi *tnapi = &tp->napi[i];
8489 
8490 		tnapi->last_tag = 0;
8491 		tnapi->last_irq_tag = 0;
8492 		tnapi->hw_status->status = 0;
8493 		tnapi->hw_status->status_tag = 0;
8494 		memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8495 
8496 		tnapi->tx_prod = 0;
8497 		tnapi->tx_cons = 0;
8498 		if (tnapi->tx_ring)
8499 			memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES);
8500 
8501 		tnapi->rx_rcb_ptr = 0;
8502 		if (tnapi->rx_rcb)
8503 			memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
8504 
8505 		if (tg3_rx_prodring_alloc(tp, &tnapi->prodring)) {
8506 			tg3_free_rings(tp);
8507 			return -ENOMEM;
8508 		}
8509 	}
8510 
8511 	return 0;
8512 }
8513 
8514 static void tg3_mem_tx_release(struct tg3 *tp)
8515 {
8516 	int i;
8517 
8518 	for (i = 0; i < tp->irq_max; i++) {
8519 		struct tg3_napi *tnapi = &tp->napi[i];
8520 
8521 		if (tnapi->tx_ring) {
8522 			dma_free_coherent(&tp->pdev->dev, TG3_TX_RING_BYTES,
8523 				tnapi->tx_ring, tnapi->tx_desc_mapping);
8524 			tnapi->tx_ring = NULL;
8525 		}
8526 
8527 		kfree(tnapi->tx_buffers);
8528 		tnapi->tx_buffers = NULL;
8529 	}
8530 }
8531 
8532 static int tg3_mem_tx_acquire(struct tg3 *tp)
8533 {
8534 	int i;
8535 	struct tg3_napi *tnapi = &tp->napi[0];
8536 
8537 	/* If multivector TSS is enabled, vector 0 does not handle
8538 	 * tx interrupts.  Don't allocate any resources for it.
8539 	 */
8540 	if (tg3_flag(tp, ENABLE_TSS))
8541 		tnapi++;
8542 
8543 	for (i = 0; i < tp->txq_cnt; i++, tnapi++) {
8544 		tnapi->tx_buffers = kzalloc(sizeof(struct tg3_tx_ring_info) *
8545 					    TG3_TX_RING_SIZE, GFP_KERNEL);
8546 		if (!tnapi->tx_buffers)
8547 			goto err_out;
8548 
8549 		tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev,
8550 						    TG3_TX_RING_BYTES,
8551 						    &tnapi->tx_desc_mapping,
8552 						    GFP_KERNEL);
8553 		if (!tnapi->tx_ring)
8554 			goto err_out;
8555 	}
8556 
8557 	return 0;
8558 
8559 err_out:
8560 	tg3_mem_tx_release(tp);
8561 	return -ENOMEM;
8562 }
8563 
8564 static void tg3_mem_rx_release(struct tg3 *tp)
8565 {
8566 	int i;
8567 
8568 	for (i = 0; i < tp->irq_max; i++) {
8569 		struct tg3_napi *tnapi = &tp->napi[i];
8570 
8571 		tg3_rx_prodring_fini(tp, &tnapi->prodring);
8572 
8573 		if (!tnapi->rx_rcb)
8574 			continue;
8575 
8576 		dma_free_coherent(&tp->pdev->dev,
8577 				  TG3_RX_RCB_RING_BYTES(tp),
8578 				  tnapi->rx_rcb,
8579 				  tnapi->rx_rcb_mapping);
8580 		tnapi->rx_rcb = NULL;
8581 	}
8582 }
8583 
8584 static int tg3_mem_rx_acquire(struct tg3 *tp)
8585 {
8586 	unsigned int i, limit;
8587 
8588 	limit = tp->rxq_cnt;
8589 
8590 	/* If RSS is enabled, we need a (dummy) producer ring
8591 	 * set on vector zero.  This is the true hw prodring.
8592 	 */
8593 	if (tg3_flag(tp, ENABLE_RSS))
8594 		limit++;
8595 
8596 	for (i = 0; i < limit; i++) {
8597 		struct tg3_napi *tnapi = &tp->napi[i];
8598 
8599 		if (tg3_rx_prodring_init(tp, &tnapi->prodring))
8600 			goto err_out;
8601 
8602 		/* If multivector RSS is enabled, vector 0
8603 		 * does not handle rx or tx interrupts.
8604 		 * Don't allocate any resources for it.
8605 		 */
8606 		if (!i && tg3_flag(tp, ENABLE_RSS))
8607 			continue;
8608 
8609 		tnapi->rx_rcb = dma_zalloc_coherent(&tp->pdev->dev,
8610 						    TG3_RX_RCB_RING_BYTES(tp),
8611 						    &tnapi->rx_rcb_mapping,
8612 						    GFP_KERNEL);
8613 		if (!tnapi->rx_rcb)
8614 			goto err_out;
8615 	}
8616 
8617 	return 0;
8618 
8619 err_out:
8620 	tg3_mem_rx_release(tp);
8621 	return -ENOMEM;
8622 }
8623 
8624 /*
8625  * Must not be invoked with interrupt sources disabled and
8626  * the hardware shutdown down.
8627  */
8628 static void tg3_free_consistent(struct tg3 *tp)
8629 {
8630 	int i;
8631 
8632 	for (i = 0; i < tp->irq_cnt; i++) {
8633 		struct tg3_napi *tnapi = &tp->napi[i];
8634 
8635 		if (tnapi->hw_status) {
8636 			dma_free_coherent(&tp->pdev->dev, TG3_HW_STATUS_SIZE,
8637 					  tnapi->hw_status,
8638 					  tnapi->status_mapping);
8639 			tnapi->hw_status = NULL;
8640 		}
8641 	}
8642 
8643 	tg3_mem_rx_release(tp);
8644 	tg3_mem_tx_release(tp);
8645 
8646 	if (tp->hw_stats) {
8647 		dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats),
8648 				  tp->hw_stats, tp->stats_mapping);
8649 		tp->hw_stats = NULL;
8650 	}
8651 }
8652 
8653 /*
8654  * Must not be invoked with interrupt sources disabled and
8655  * the hardware shutdown down.  Can sleep.
8656  */
8657 static int tg3_alloc_consistent(struct tg3 *tp)
8658 {
8659 	int i;
8660 
8661 	tp->hw_stats = dma_zalloc_coherent(&tp->pdev->dev,
8662 					   sizeof(struct tg3_hw_stats),
8663 					   &tp->stats_mapping, GFP_KERNEL);
8664 	if (!tp->hw_stats)
8665 		goto err_out;
8666 
8667 	for (i = 0; i < tp->irq_cnt; i++) {
8668 		struct tg3_napi *tnapi = &tp->napi[i];
8669 		struct tg3_hw_status *sblk;
8670 
8671 		tnapi->hw_status = dma_zalloc_coherent(&tp->pdev->dev,
8672 						       TG3_HW_STATUS_SIZE,
8673 						       &tnapi->status_mapping,
8674 						       GFP_KERNEL);
8675 		if (!tnapi->hw_status)
8676 			goto err_out;
8677 
8678 		sblk = tnapi->hw_status;
8679 
8680 		if (tg3_flag(tp, ENABLE_RSS)) {
8681 			u16 *prodptr = NULL;
8682 
8683 			/*
8684 			 * When RSS is enabled, the status block format changes
8685 			 * slightly.  The "rx_jumbo_consumer", "reserved",
8686 			 * and "rx_mini_consumer" members get mapped to the
8687 			 * other three rx return ring producer indexes.
8688 			 */
8689 			switch (i) {
8690 			case 1:
8691 				prodptr = &sblk->idx[0].rx_producer;
8692 				break;
8693 			case 2:
8694 				prodptr = &sblk->rx_jumbo_consumer;
8695 				break;
8696 			case 3:
8697 				prodptr = &sblk->reserved;
8698 				break;
8699 			case 4:
8700 				prodptr = &sblk->rx_mini_consumer;
8701 				break;
8702 			}
8703 			tnapi->rx_rcb_prod_idx = prodptr;
8704 		} else {
8705 			tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer;
8706 		}
8707 	}
8708 
8709 	if (tg3_mem_tx_acquire(tp) || tg3_mem_rx_acquire(tp))
8710 		goto err_out;
8711 
8712 	return 0;
8713 
8714 err_out:
8715 	tg3_free_consistent(tp);
8716 	return -ENOMEM;
8717 }
8718 
8719 #define MAX_WAIT_CNT 1000
8720 
8721 /* To stop a block, clear the enable bit and poll till it
8722  * clears.  tp->lock is held.
8723  */
8724 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, bool silent)
8725 {
8726 	unsigned int i;
8727 	u32 val;
8728 
8729 	if (tg3_flag(tp, 5705_PLUS)) {
8730 		switch (ofs) {
8731 		case RCVLSC_MODE:
8732 		case DMAC_MODE:
8733 		case MBFREE_MODE:
8734 		case BUFMGR_MODE:
8735 		case MEMARB_MODE:
8736 			/* We can't enable/disable these bits of the
8737 			 * 5705/5750, just say success.
8738 			 */
8739 			return 0;
8740 
8741 		default:
8742 			break;
8743 		}
8744 	}
8745 
8746 	val = tr32(ofs);
8747 	val &= ~enable_bit;
8748 	tw32_f(ofs, val);
8749 
8750 	for (i = 0; i < MAX_WAIT_CNT; i++) {
8751 		if (pci_channel_offline(tp->pdev)) {
8752 			dev_err(&tp->pdev->dev,
8753 				"tg3_stop_block device offline, "
8754 				"ofs=%lx enable_bit=%x\n",
8755 				ofs, enable_bit);
8756 			return -ENODEV;
8757 		}
8758 
8759 		udelay(100);
8760 		val = tr32(ofs);
8761 		if ((val & enable_bit) == 0)
8762 			break;
8763 	}
8764 
8765 	if (i == MAX_WAIT_CNT && !silent) {
8766 		dev_err(&tp->pdev->dev,
8767 			"tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
8768 			ofs, enable_bit);
8769 		return -ENODEV;
8770 	}
8771 
8772 	return 0;
8773 }
8774 
8775 /* tp->lock is held. */
8776 static int tg3_abort_hw(struct tg3 *tp, bool silent)
8777 {
8778 	int i, err;
8779 
8780 	tg3_disable_ints(tp);
8781 
8782 	if (pci_channel_offline(tp->pdev)) {
8783 		tp->rx_mode &= ~(RX_MODE_ENABLE | TX_MODE_ENABLE);
8784 		tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
8785 		err = -ENODEV;
8786 		goto err_no_dev;
8787 	}
8788 
8789 	tp->rx_mode &= ~RX_MODE_ENABLE;
8790 	tw32_f(MAC_RX_MODE, tp->rx_mode);
8791 	udelay(10);
8792 
8793 	err  = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
8794 	err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
8795 	err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
8796 	err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
8797 	err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
8798 	err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
8799 
8800 	err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
8801 	err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
8802 	err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
8803 	err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
8804 	err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
8805 	err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
8806 	err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
8807 
8808 	tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
8809 	tw32_f(MAC_MODE, tp->mac_mode);
8810 	udelay(40);
8811 
8812 	tp->tx_mode &= ~TX_MODE_ENABLE;
8813 	tw32_f(MAC_TX_MODE, tp->tx_mode);
8814 
8815 	for (i = 0; i < MAX_WAIT_CNT; i++) {
8816 		udelay(100);
8817 		if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
8818 			break;
8819 	}
8820 	if (i >= MAX_WAIT_CNT) {
8821 		dev_err(&tp->pdev->dev,
8822 			"%s timed out, TX_MODE_ENABLE will not clear "
8823 			"MAC_TX_MODE=%08x\n", __func__, tr32(MAC_TX_MODE));
8824 		err |= -ENODEV;
8825 	}
8826 
8827 	err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
8828 	err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
8829 	err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
8830 
8831 	tw32(FTQ_RESET, 0xffffffff);
8832 	tw32(FTQ_RESET, 0x00000000);
8833 
8834 	err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
8835 	err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
8836 
8837 err_no_dev:
8838 	for (i = 0; i < tp->irq_cnt; i++) {
8839 		struct tg3_napi *tnapi = &tp->napi[i];
8840 		if (tnapi->hw_status)
8841 			memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8842 	}
8843 
8844 	return err;
8845 }
8846 
8847 /* Save PCI command register before chip reset */
8848 static void tg3_save_pci_state(struct tg3 *tp)
8849 {
8850 	pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
8851 }
8852 
8853 /* Restore PCI state after chip reset */
8854 static void tg3_restore_pci_state(struct tg3 *tp)
8855 {
8856 	u32 val;
8857 
8858 	/* Re-enable indirect register accesses. */
8859 	pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
8860 			       tp->misc_host_ctrl);
8861 
8862 	/* Set MAX PCI retry to zero. */
8863 	val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
8864 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0 &&
8865 	    tg3_flag(tp, PCIX_MODE))
8866 		val |= PCISTATE_RETRY_SAME_DMA;
8867 	/* Allow reads and writes to the APE register and memory space. */
8868 	if (tg3_flag(tp, ENABLE_APE))
8869 		val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
8870 		       PCISTATE_ALLOW_APE_SHMEM_WR |
8871 		       PCISTATE_ALLOW_APE_PSPACE_WR;
8872 	pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
8873 
8874 	pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
8875 
8876 	if (!tg3_flag(tp, PCI_EXPRESS)) {
8877 		pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
8878 				      tp->pci_cacheline_sz);
8879 		pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
8880 				      tp->pci_lat_timer);
8881 	}
8882 
8883 	/* Make sure PCI-X relaxed ordering bit is clear. */
8884 	if (tg3_flag(tp, PCIX_MODE)) {
8885 		u16 pcix_cmd;
8886 
8887 		pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8888 				     &pcix_cmd);
8889 		pcix_cmd &= ~PCI_X_CMD_ERO;
8890 		pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8891 				      pcix_cmd);
8892 	}
8893 
8894 	if (tg3_flag(tp, 5780_CLASS)) {
8895 
8896 		/* Chip reset on 5780 will reset MSI enable bit,
8897 		 * so need to restore it.
8898 		 */
8899 		if (tg3_flag(tp, USING_MSI)) {
8900 			u16 ctrl;
8901 
8902 			pci_read_config_word(tp->pdev,
8903 					     tp->msi_cap + PCI_MSI_FLAGS,
8904 					     &ctrl);
8905 			pci_write_config_word(tp->pdev,
8906 					      tp->msi_cap + PCI_MSI_FLAGS,
8907 					      ctrl | PCI_MSI_FLAGS_ENABLE);
8908 			val = tr32(MSGINT_MODE);
8909 			tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
8910 		}
8911 	}
8912 }
8913 
8914 /* tp->lock is held. */
8915 static int tg3_chip_reset(struct tg3 *tp)
8916 {
8917 	u32 val;
8918 	void (*write_op)(struct tg3 *, u32, u32);
8919 	int i, err;
8920 
8921 	tg3_nvram_lock(tp);
8922 
8923 	tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
8924 
8925 	/* No matching tg3_nvram_unlock() after this because
8926 	 * chip reset below will undo the nvram lock.
8927 	 */
8928 	tp->nvram_lock_cnt = 0;
8929 
8930 	/* GRC_MISC_CFG core clock reset will clear the memory
8931 	 * enable bit in PCI register 4 and the MSI enable bit
8932 	 * on some chips, so we save relevant registers here.
8933 	 */
8934 	tg3_save_pci_state(tp);
8935 
8936 	if (tg3_asic_rev(tp) == ASIC_REV_5752 ||
8937 	    tg3_flag(tp, 5755_PLUS))
8938 		tw32(GRC_FASTBOOT_PC, 0);
8939 
8940 	/*
8941 	 * We must avoid the readl() that normally takes place.
8942 	 * It locks machines, causes machine checks, and other
8943 	 * fun things.  So, temporarily disable the 5701
8944 	 * hardware workaround, while we do the reset.
8945 	 */
8946 	write_op = tp->write32;
8947 	if (write_op == tg3_write_flush_reg32)
8948 		tp->write32 = tg3_write32;
8949 
8950 	/* Prevent the irq handler from reading or writing PCI registers
8951 	 * during chip reset when the memory enable bit in the PCI command
8952 	 * register may be cleared.  The chip does not generate interrupt
8953 	 * at this time, but the irq handler may still be called due to irq
8954 	 * sharing or irqpoll.
8955 	 */
8956 	tg3_flag_set(tp, CHIP_RESETTING);
8957 	for (i = 0; i < tp->irq_cnt; i++) {
8958 		struct tg3_napi *tnapi = &tp->napi[i];
8959 		if (tnapi->hw_status) {
8960 			tnapi->hw_status->status = 0;
8961 			tnapi->hw_status->status_tag = 0;
8962 		}
8963 		tnapi->last_tag = 0;
8964 		tnapi->last_irq_tag = 0;
8965 	}
8966 	smp_mb();
8967 
8968 	for (i = 0; i < tp->irq_cnt; i++)
8969 		synchronize_irq(tp->napi[i].irq_vec);
8970 
8971 	if (tg3_asic_rev(tp) == ASIC_REV_57780) {
8972 		val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
8973 		tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
8974 	}
8975 
8976 	/* do the reset */
8977 	val = GRC_MISC_CFG_CORECLK_RESET;
8978 
8979 	if (tg3_flag(tp, PCI_EXPRESS)) {
8980 		/* Force PCIe 1.0a mode */
8981 		if (tg3_asic_rev(tp) != ASIC_REV_5785 &&
8982 		    !tg3_flag(tp, 57765_PLUS) &&
8983 		    tr32(TG3_PCIE_PHY_TSTCTL) ==
8984 		    (TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM))
8985 			tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM);
8986 
8987 		if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0) {
8988 			tw32(GRC_MISC_CFG, (1 << 29));
8989 			val |= (1 << 29);
8990 		}
8991 	}
8992 
8993 	if (tg3_asic_rev(tp) == ASIC_REV_5906) {
8994 		tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
8995 		tw32(GRC_VCPU_EXT_CTRL,
8996 		     tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
8997 	}
8998 
8999 	/* Manage gphy power for all CPMU absent PCIe devices. */
9000 	if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, CPMU_PRESENT))
9001 		val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
9002 
9003 	tw32(GRC_MISC_CFG, val);
9004 
9005 	/* restore 5701 hardware bug workaround write method */
9006 	tp->write32 = write_op;
9007 
9008 	/* Unfortunately, we have to delay before the PCI read back.
9009 	 * Some 575X chips even will not respond to a PCI cfg access
9010 	 * when the reset command is given to the chip.
9011 	 *
9012 	 * How do these hardware designers expect things to work
9013 	 * properly if the PCI write is posted for a long period
9014 	 * of time?  It is always necessary to have some method by
9015 	 * which a register read back can occur to push the write
9016 	 * out which does the reset.
9017 	 *
9018 	 * For most tg3 variants the trick below was working.
9019 	 * Ho hum...
9020 	 */
9021 	udelay(120);
9022 
9023 	/* Flush PCI posted writes.  The normal MMIO registers
9024 	 * are inaccessible at this time so this is the only
9025 	 * way to make this reliably (actually, this is no longer
9026 	 * the case, see above).  I tried to use indirect
9027 	 * register read/write but this upset some 5701 variants.
9028 	 */
9029 	pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
9030 
9031 	udelay(120);
9032 
9033 	if (tg3_flag(tp, PCI_EXPRESS) && pci_is_pcie(tp->pdev)) {
9034 		u16 val16;
9035 
9036 		if (tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A0) {
9037 			int j;
9038 			u32 cfg_val;
9039 
9040 			/* Wait for link training to complete.  */
9041 			for (j = 0; j < 5000; j++)
9042 				udelay(100);
9043 
9044 			pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
9045 			pci_write_config_dword(tp->pdev, 0xc4,
9046 					       cfg_val | (1 << 15));
9047 		}
9048 
9049 		/* Clear the "no snoop" and "relaxed ordering" bits. */
9050 		val16 = PCI_EXP_DEVCTL_RELAX_EN | PCI_EXP_DEVCTL_NOSNOOP_EN;
9051 		/*
9052 		 * Older PCIe devices only support the 128 byte
9053 		 * MPS setting.  Enforce the restriction.
9054 		 */
9055 		if (!tg3_flag(tp, CPMU_PRESENT))
9056 			val16 |= PCI_EXP_DEVCTL_PAYLOAD;
9057 		pcie_capability_clear_word(tp->pdev, PCI_EXP_DEVCTL, val16);
9058 
9059 		/* Clear error status */
9060 		pcie_capability_write_word(tp->pdev, PCI_EXP_DEVSTA,
9061 				      PCI_EXP_DEVSTA_CED |
9062 				      PCI_EXP_DEVSTA_NFED |
9063 				      PCI_EXP_DEVSTA_FED |
9064 				      PCI_EXP_DEVSTA_URD);
9065 	}
9066 
9067 	tg3_restore_pci_state(tp);
9068 
9069 	tg3_flag_clear(tp, CHIP_RESETTING);
9070 	tg3_flag_clear(tp, ERROR_PROCESSED);
9071 
9072 	val = 0;
9073 	if (tg3_flag(tp, 5780_CLASS))
9074 		val = tr32(MEMARB_MODE);
9075 	tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
9076 
9077 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A3) {
9078 		tg3_stop_fw(tp);
9079 		tw32(0x5000, 0x400);
9080 	}
9081 
9082 	if (tg3_flag(tp, IS_SSB_CORE)) {
9083 		/*
9084 		 * BCM4785: In order to avoid repercussions from using
9085 		 * potentially defective internal ROM, stop the Rx RISC CPU,
9086 		 * which is not required.
9087 		 */
9088 		tg3_stop_fw(tp);
9089 		tg3_halt_cpu(tp, RX_CPU_BASE);
9090 	}
9091 
9092 	err = tg3_poll_fw(tp);
9093 	if (err)
9094 		return err;
9095 
9096 	tw32(GRC_MODE, tp->grc_mode);
9097 
9098 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0) {
9099 		val = tr32(0xc4);
9100 
9101 		tw32(0xc4, val | (1 << 15));
9102 	}
9103 
9104 	if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
9105 	    tg3_asic_rev(tp) == ASIC_REV_5705) {
9106 		tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
9107 		if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0)
9108 			tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
9109 		tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
9110 	}
9111 
9112 	if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
9113 		tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
9114 		val = tp->mac_mode;
9115 	} else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
9116 		tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
9117 		val = tp->mac_mode;
9118 	} else
9119 		val = 0;
9120 
9121 	tw32_f(MAC_MODE, val);
9122 	udelay(40);
9123 
9124 	tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
9125 
9126 	tg3_mdio_start(tp);
9127 
9128 	if (tg3_flag(tp, PCI_EXPRESS) &&
9129 	    tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0 &&
9130 	    tg3_asic_rev(tp) != ASIC_REV_5785 &&
9131 	    !tg3_flag(tp, 57765_PLUS)) {
9132 		val = tr32(0x7c00);
9133 
9134 		tw32(0x7c00, val | (1 << 25));
9135 	}
9136 
9137 	if (tg3_asic_rev(tp) == ASIC_REV_5720) {
9138 		val = tr32(TG3_CPMU_CLCK_ORIDE);
9139 		tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
9140 	}
9141 
9142 	/* Reprobe ASF enable state.  */
9143 	tg3_flag_clear(tp, ENABLE_ASF);
9144 	tp->phy_flags &= ~(TG3_PHYFLG_1G_ON_VAUX_OK |
9145 			   TG3_PHYFLG_KEEP_LINK_ON_PWRDN);
9146 
9147 	tg3_flag_clear(tp, ASF_NEW_HANDSHAKE);
9148 	tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
9149 	if (val == NIC_SRAM_DATA_SIG_MAGIC) {
9150 		u32 nic_cfg;
9151 
9152 		tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
9153 		if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
9154 			tg3_flag_set(tp, ENABLE_ASF);
9155 			tp->last_event_jiffies = jiffies;
9156 			if (tg3_flag(tp, 5750_PLUS))
9157 				tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
9158 
9159 			tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &nic_cfg);
9160 			if (nic_cfg & NIC_SRAM_1G_ON_VAUX_OK)
9161 				tp->phy_flags |= TG3_PHYFLG_1G_ON_VAUX_OK;
9162 			if (nic_cfg & NIC_SRAM_LNK_FLAP_AVOID)
9163 				tp->phy_flags |= TG3_PHYFLG_KEEP_LINK_ON_PWRDN;
9164 		}
9165 	}
9166 
9167 	return 0;
9168 }
9169 
9170 static void tg3_get_nstats(struct tg3 *, struct rtnl_link_stats64 *);
9171 static void tg3_get_estats(struct tg3 *, struct tg3_ethtool_stats *);
9172 
9173 /* tp->lock is held. */
9174 static int tg3_halt(struct tg3 *tp, int kind, bool silent)
9175 {
9176 	int err;
9177 
9178 	tg3_stop_fw(tp);
9179 
9180 	tg3_write_sig_pre_reset(tp, kind);
9181 
9182 	tg3_abort_hw(tp, silent);
9183 	err = tg3_chip_reset(tp);
9184 
9185 	__tg3_set_mac_addr(tp, false);
9186 
9187 	tg3_write_sig_legacy(tp, kind);
9188 	tg3_write_sig_post_reset(tp, kind);
9189 
9190 	if (tp->hw_stats) {
9191 		/* Save the stats across chip resets... */
9192 		tg3_get_nstats(tp, &tp->net_stats_prev);
9193 		tg3_get_estats(tp, &tp->estats_prev);
9194 
9195 		/* And make sure the next sample is new data */
9196 		memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
9197 	}
9198 
9199 	if (err)
9200 		return err;
9201 
9202 	return 0;
9203 }
9204 
9205 static int tg3_set_mac_addr(struct net_device *dev, void *p)
9206 {
9207 	struct tg3 *tp = netdev_priv(dev);
9208 	struct sockaddr *addr = p;
9209 	int err = 0;
9210 	bool skip_mac_1 = false;
9211 
9212 	if (!is_valid_ether_addr(addr->sa_data))
9213 		return -EADDRNOTAVAIL;
9214 
9215 	memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
9216 
9217 	if (!netif_running(dev))
9218 		return 0;
9219 
9220 	if (tg3_flag(tp, ENABLE_ASF)) {
9221 		u32 addr0_high, addr0_low, addr1_high, addr1_low;
9222 
9223 		addr0_high = tr32(MAC_ADDR_0_HIGH);
9224 		addr0_low = tr32(MAC_ADDR_0_LOW);
9225 		addr1_high = tr32(MAC_ADDR_1_HIGH);
9226 		addr1_low = tr32(MAC_ADDR_1_LOW);
9227 
9228 		/* Skip MAC addr 1 if ASF is using it. */
9229 		if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
9230 		    !(addr1_high == 0 && addr1_low == 0))
9231 			skip_mac_1 = true;
9232 	}
9233 	spin_lock_bh(&tp->lock);
9234 	__tg3_set_mac_addr(tp, skip_mac_1);
9235 	spin_unlock_bh(&tp->lock);
9236 
9237 	return err;
9238 }
9239 
9240 /* tp->lock is held. */
9241 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
9242 			   dma_addr_t mapping, u32 maxlen_flags,
9243 			   u32 nic_addr)
9244 {
9245 	tg3_write_mem(tp,
9246 		      (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
9247 		      ((u64) mapping >> 32));
9248 	tg3_write_mem(tp,
9249 		      (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
9250 		      ((u64) mapping & 0xffffffff));
9251 	tg3_write_mem(tp,
9252 		      (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
9253 		       maxlen_flags);
9254 
9255 	if (!tg3_flag(tp, 5705_PLUS))
9256 		tg3_write_mem(tp,
9257 			      (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
9258 			      nic_addr);
9259 }
9260 
9261 
9262 static void tg3_coal_tx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
9263 {
9264 	int i = 0;
9265 
9266 	if (!tg3_flag(tp, ENABLE_TSS)) {
9267 		tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
9268 		tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
9269 		tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
9270 	} else {
9271 		tw32(HOSTCC_TXCOL_TICKS, 0);
9272 		tw32(HOSTCC_TXMAX_FRAMES, 0);
9273 		tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
9274 
9275 		for (; i < tp->txq_cnt; i++) {
9276 			u32 reg;
9277 
9278 			reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18;
9279 			tw32(reg, ec->tx_coalesce_usecs);
9280 			reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18;
9281 			tw32(reg, ec->tx_max_coalesced_frames);
9282 			reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18;
9283 			tw32(reg, ec->tx_max_coalesced_frames_irq);
9284 		}
9285 	}
9286 
9287 	for (; i < tp->irq_max - 1; i++) {
9288 		tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0);
9289 		tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0);
9290 		tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
9291 	}
9292 }
9293 
9294 static void tg3_coal_rx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
9295 {
9296 	int i = 0;
9297 	u32 limit = tp->rxq_cnt;
9298 
9299 	if (!tg3_flag(tp, ENABLE_RSS)) {
9300 		tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
9301 		tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
9302 		tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
9303 		limit--;
9304 	} else {
9305 		tw32(HOSTCC_RXCOL_TICKS, 0);
9306 		tw32(HOSTCC_RXMAX_FRAMES, 0);
9307 		tw32(HOSTCC_RXCOAL_MAXF_INT, 0);
9308 	}
9309 
9310 	for (; i < limit; i++) {
9311 		u32 reg;
9312 
9313 		reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18;
9314 		tw32(reg, ec->rx_coalesce_usecs);
9315 		reg = HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18;
9316 		tw32(reg, ec->rx_max_coalesced_frames);
9317 		reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18;
9318 		tw32(reg, ec->rx_max_coalesced_frames_irq);
9319 	}
9320 
9321 	for (; i < tp->irq_max - 1; i++) {
9322 		tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0);
9323 		tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0);
9324 		tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
9325 	}
9326 }
9327 
9328 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
9329 {
9330 	tg3_coal_tx_init(tp, ec);
9331 	tg3_coal_rx_init(tp, ec);
9332 
9333 	if (!tg3_flag(tp, 5705_PLUS)) {
9334 		u32 val = ec->stats_block_coalesce_usecs;
9335 
9336 		tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
9337 		tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
9338 
9339 		if (!tp->link_up)
9340 			val = 0;
9341 
9342 		tw32(HOSTCC_STAT_COAL_TICKS, val);
9343 	}
9344 }
9345 
9346 /* tp->lock is held. */
9347 static void tg3_tx_rcbs_disable(struct tg3 *tp)
9348 {
9349 	u32 txrcb, limit;
9350 
9351 	/* Disable all transmit rings but the first. */
9352 	if (!tg3_flag(tp, 5705_PLUS))
9353 		limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16;
9354 	else if (tg3_flag(tp, 5717_PLUS))
9355 		limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4;
9356 	else if (tg3_flag(tp, 57765_CLASS) ||
9357 		 tg3_asic_rev(tp) == ASIC_REV_5762)
9358 		limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2;
9359 	else
9360 		limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
9361 
9362 	for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
9363 	     txrcb < limit; txrcb += TG3_BDINFO_SIZE)
9364 		tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS,
9365 			      BDINFO_FLAGS_DISABLED);
9366 }
9367 
9368 /* tp->lock is held. */
9369 static void tg3_tx_rcbs_init(struct tg3 *tp)
9370 {
9371 	int i = 0;
9372 	u32 txrcb = NIC_SRAM_SEND_RCB;
9373 
9374 	if (tg3_flag(tp, ENABLE_TSS))
9375 		i++;
9376 
9377 	for (; i < tp->irq_max; i++, txrcb += TG3_BDINFO_SIZE) {
9378 		struct tg3_napi *tnapi = &tp->napi[i];
9379 
9380 		if (!tnapi->tx_ring)
9381 			continue;
9382 
9383 		tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
9384 			       (TG3_TX_RING_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT),
9385 			       NIC_SRAM_TX_BUFFER_DESC);
9386 	}
9387 }
9388 
9389 /* tp->lock is held. */
9390 static void tg3_rx_ret_rcbs_disable(struct tg3 *tp)
9391 {
9392 	u32 rxrcb, limit;
9393 
9394 	/* Disable all receive return rings but the first. */
9395 	if (tg3_flag(tp, 5717_PLUS))
9396 		limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17;
9397 	else if (!tg3_flag(tp, 5705_PLUS))
9398 		limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
9399 	else if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
9400 		 tg3_asic_rev(tp) == ASIC_REV_5762 ||
9401 		 tg3_flag(tp, 57765_CLASS))
9402 		limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4;
9403 	else
9404 		limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
9405 
9406 	for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
9407 	     rxrcb < limit; rxrcb += TG3_BDINFO_SIZE)
9408 		tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS,
9409 			      BDINFO_FLAGS_DISABLED);
9410 }
9411 
9412 /* tp->lock is held. */
9413 static void tg3_rx_ret_rcbs_init(struct tg3 *tp)
9414 {
9415 	int i = 0;
9416 	u32 rxrcb = NIC_SRAM_RCV_RET_RCB;
9417 
9418 	if (tg3_flag(tp, ENABLE_RSS))
9419 		i++;
9420 
9421 	for (; i < tp->irq_max; i++, rxrcb += TG3_BDINFO_SIZE) {
9422 		struct tg3_napi *tnapi = &tp->napi[i];
9423 
9424 		if (!tnapi->rx_rcb)
9425 			continue;
9426 
9427 		tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
9428 			       (tp->rx_ret_ring_mask + 1) <<
9429 				BDINFO_FLAGS_MAXLEN_SHIFT, 0);
9430 	}
9431 }
9432 
9433 /* tp->lock is held. */
9434 static void tg3_rings_reset(struct tg3 *tp)
9435 {
9436 	int i;
9437 	u32 stblk;
9438 	struct tg3_napi *tnapi = &tp->napi[0];
9439 
9440 	tg3_tx_rcbs_disable(tp);
9441 
9442 	tg3_rx_ret_rcbs_disable(tp);
9443 
9444 	/* Disable interrupts */
9445 	tw32_mailbox_f(tp->napi[0].int_mbox, 1);
9446 	tp->napi[0].chk_msi_cnt = 0;
9447 	tp->napi[0].last_rx_cons = 0;
9448 	tp->napi[0].last_tx_cons = 0;
9449 
9450 	/* Zero mailbox registers. */
9451 	if (tg3_flag(tp, SUPPORT_MSIX)) {
9452 		for (i = 1; i < tp->irq_max; i++) {
9453 			tp->napi[i].tx_prod = 0;
9454 			tp->napi[i].tx_cons = 0;
9455 			if (tg3_flag(tp, ENABLE_TSS))
9456 				tw32_mailbox(tp->napi[i].prodmbox, 0);
9457 			tw32_rx_mbox(tp->napi[i].consmbox, 0);
9458 			tw32_mailbox_f(tp->napi[i].int_mbox, 1);
9459 			tp->napi[i].chk_msi_cnt = 0;
9460 			tp->napi[i].last_rx_cons = 0;
9461 			tp->napi[i].last_tx_cons = 0;
9462 		}
9463 		if (!tg3_flag(tp, ENABLE_TSS))
9464 			tw32_mailbox(tp->napi[0].prodmbox, 0);
9465 	} else {
9466 		tp->napi[0].tx_prod = 0;
9467 		tp->napi[0].tx_cons = 0;
9468 		tw32_mailbox(tp->napi[0].prodmbox, 0);
9469 		tw32_rx_mbox(tp->napi[0].consmbox, 0);
9470 	}
9471 
9472 	/* Make sure the NIC-based send BD rings are disabled. */
9473 	if (!tg3_flag(tp, 5705_PLUS)) {
9474 		u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW;
9475 		for (i = 0; i < 16; i++)
9476 			tw32_tx_mbox(mbox + i * 8, 0);
9477 	}
9478 
9479 	/* Clear status block in ram. */
9480 	memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
9481 
9482 	/* Set status block DMA address */
9483 	tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
9484 	     ((u64) tnapi->status_mapping >> 32));
9485 	tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
9486 	     ((u64) tnapi->status_mapping & 0xffffffff));
9487 
9488 	stblk = HOSTCC_STATBLCK_RING1;
9489 
9490 	for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) {
9491 		u64 mapping = (u64)tnapi->status_mapping;
9492 		tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32);
9493 		tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff);
9494 		stblk += 8;
9495 
9496 		/* Clear status block in ram. */
9497 		memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
9498 	}
9499 
9500 	tg3_tx_rcbs_init(tp);
9501 	tg3_rx_ret_rcbs_init(tp);
9502 }
9503 
9504 static void tg3_setup_rxbd_thresholds(struct tg3 *tp)
9505 {
9506 	u32 val, bdcache_maxcnt, host_rep_thresh, nic_rep_thresh;
9507 
9508 	if (!tg3_flag(tp, 5750_PLUS) ||
9509 	    tg3_flag(tp, 5780_CLASS) ||
9510 	    tg3_asic_rev(tp) == ASIC_REV_5750 ||
9511 	    tg3_asic_rev(tp) == ASIC_REV_5752 ||
9512 	    tg3_flag(tp, 57765_PLUS))
9513 		bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5700;
9514 	else if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
9515 		 tg3_asic_rev(tp) == ASIC_REV_5787)
9516 		bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5755;
9517 	else
9518 		bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5906;
9519 
9520 	nic_rep_thresh = min(bdcache_maxcnt / 2, tp->rx_std_max_post);
9521 	host_rep_thresh = max_t(u32, tp->rx_pending / 8, 1);
9522 
9523 	val = min(nic_rep_thresh, host_rep_thresh);
9524 	tw32(RCVBDI_STD_THRESH, val);
9525 
9526 	if (tg3_flag(tp, 57765_PLUS))
9527 		tw32(STD_REPLENISH_LWM, bdcache_maxcnt);
9528 
9529 	if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
9530 		return;
9531 
9532 	bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700;
9533 
9534 	host_rep_thresh = max_t(u32, tp->rx_jumbo_pending / 8, 1);
9535 
9536 	val = min(bdcache_maxcnt / 2, host_rep_thresh);
9537 	tw32(RCVBDI_JUMBO_THRESH, val);
9538 
9539 	if (tg3_flag(tp, 57765_PLUS))
9540 		tw32(JMB_REPLENISH_LWM, bdcache_maxcnt);
9541 }
9542 
9543 static inline u32 calc_crc(unsigned char *buf, int len)
9544 {
9545 	u32 reg;
9546 	u32 tmp;
9547 	int j, k;
9548 
9549 	reg = 0xffffffff;
9550 
9551 	for (j = 0; j < len; j++) {
9552 		reg ^= buf[j];
9553 
9554 		for (k = 0; k < 8; k++) {
9555 			tmp = reg & 0x01;
9556 
9557 			reg >>= 1;
9558 
9559 			if (tmp)
9560 				reg ^= 0xedb88320;
9561 		}
9562 	}
9563 
9564 	return ~reg;
9565 }
9566 
9567 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
9568 {
9569 	/* accept or reject all multicast frames */
9570 	tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
9571 	tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
9572 	tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
9573 	tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
9574 }
9575 
9576 static void __tg3_set_rx_mode(struct net_device *dev)
9577 {
9578 	struct tg3 *tp = netdev_priv(dev);
9579 	u32 rx_mode;
9580 
9581 	rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
9582 				  RX_MODE_KEEP_VLAN_TAG);
9583 
9584 #if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE)
9585 	/* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
9586 	 * flag clear.
9587 	 */
9588 	if (!tg3_flag(tp, ENABLE_ASF))
9589 		rx_mode |= RX_MODE_KEEP_VLAN_TAG;
9590 #endif
9591 
9592 	if (dev->flags & IFF_PROMISC) {
9593 		/* Promiscuous mode. */
9594 		rx_mode |= RX_MODE_PROMISC;
9595 	} else if (dev->flags & IFF_ALLMULTI) {
9596 		/* Accept all multicast. */
9597 		tg3_set_multi(tp, 1);
9598 	} else if (netdev_mc_empty(dev)) {
9599 		/* Reject all multicast. */
9600 		tg3_set_multi(tp, 0);
9601 	} else {
9602 		/* Accept one or more multicast(s). */
9603 		struct netdev_hw_addr *ha;
9604 		u32 mc_filter[4] = { 0, };
9605 		u32 regidx;
9606 		u32 bit;
9607 		u32 crc;
9608 
9609 		netdev_for_each_mc_addr(ha, dev) {
9610 			crc = calc_crc(ha->addr, ETH_ALEN);
9611 			bit = ~crc & 0x7f;
9612 			regidx = (bit & 0x60) >> 5;
9613 			bit &= 0x1f;
9614 			mc_filter[regidx] |= (1 << bit);
9615 		}
9616 
9617 		tw32(MAC_HASH_REG_0, mc_filter[0]);
9618 		tw32(MAC_HASH_REG_1, mc_filter[1]);
9619 		tw32(MAC_HASH_REG_2, mc_filter[2]);
9620 		tw32(MAC_HASH_REG_3, mc_filter[3]);
9621 	}
9622 
9623 	if (rx_mode != tp->rx_mode) {
9624 		tp->rx_mode = rx_mode;
9625 		tw32_f(MAC_RX_MODE, rx_mode);
9626 		udelay(10);
9627 	}
9628 }
9629 
9630 static void tg3_rss_init_dflt_indir_tbl(struct tg3 *tp, u32 qcnt)
9631 {
9632 	int i;
9633 
9634 	for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
9635 		tp->rss_ind_tbl[i] = ethtool_rxfh_indir_default(i, qcnt);
9636 }
9637 
9638 static void tg3_rss_check_indir_tbl(struct tg3 *tp)
9639 {
9640 	int i;
9641 
9642 	if (!tg3_flag(tp, SUPPORT_MSIX))
9643 		return;
9644 
9645 	if (tp->rxq_cnt == 1) {
9646 		memset(&tp->rss_ind_tbl[0], 0, sizeof(tp->rss_ind_tbl));
9647 		return;
9648 	}
9649 
9650 	/* Validate table against current IRQ count */
9651 	for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) {
9652 		if (tp->rss_ind_tbl[i] >= tp->rxq_cnt)
9653 			break;
9654 	}
9655 
9656 	if (i != TG3_RSS_INDIR_TBL_SIZE)
9657 		tg3_rss_init_dflt_indir_tbl(tp, tp->rxq_cnt);
9658 }
9659 
9660 static void tg3_rss_write_indir_tbl(struct tg3 *tp)
9661 {
9662 	int i = 0;
9663 	u32 reg = MAC_RSS_INDIR_TBL_0;
9664 
9665 	while (i < TG3_RSS_INDIR_TBL_SIZE) {
9666 		u32 val = tp->rss_ind_tbl[i];
9667 		i++;
9668 		for (; i % 8; i++) {
9669 			val <<= 4;
9670 			val |= tp->rss_ind_tbl[i];
9671 		}
9672 		tw32(reg, val);
9673 		reg += 4;
9674 	}
9675 }
9676 
9677 static inline u32 tg3_lso_rd_dma_workaround_bit(struct tg3 *tp)
9678 {
9679 	if (tg3_asic_rev(tp) == ASIC_REV_5719)
9680 		return TG3_LSO_RD_DMA_TX_LENGTH_WA_5719;
9681 	else
9682 		return TG3_LSO_RD_DMA_TX_LENGTH_WA_5720;
9683 }
9684 
9685 /* tp->lock is held. */
9686 static int tg3_reset_hw(struct tg3 *tp, bool reset_phy)
9687 {
9688 	u32 val, rdmac_mode;
9689 	int i, err, limit;
9690 	struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
9691 
9692 	tg3_disable_ints(tp);
9693 
9694 	tg3_stop_fw(tp);
9695 
9696 	tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
9697 
9698 	if (tg3_flag(tp, INIT_COMPLETE))
9699 		tg3_abort_hw(tp, 1);
9700 
9701 	if ((tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
9702 	    !(tp->phy_flags & TG3_PHYFLG_USER_CONFIGURED)) {
9703 		tg3_phy_pull_config(tp);
9704 		tg3_eee_pull_config(tp, NULL);
9705 		tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
9706 	}
9707 
9708 	/* Enable MAC control of LPI */
9709 	if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
9710 		tg3_setup_eee(tp);
9711 
9712 	if (reset_phy)
9713 		tg3_phy_reset(tp);
9714 
9715 	err = tg3_chip_reset(tp);
9716 	if (err)
9717 		return err;
9718 
9719 	tg3_write_sig_legacy(tp, RESET_KIND_INIT);
9720 
9721 	if (tg3_chip_rev(tp) == CHIPREV_5784_AX) {
9722 		val = tr32(TG3_CPMU_CTRL);
9723 		val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
9724 		tw32(TG3_CPMU_CTRL, val);
9725 
9726 		val = tr32(TG3_CPMU_LSPD_10MB_CLK);
9727 		val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
9728 		val |= CPMU_LSPD_10MB_MACCLK_6_25;
9729 		tw32(TG3_CPMU_LSPD_10MB_CLK, val);
9730 
9731 		val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
9732 		val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
9733 		val |= CPMU_LNK_AWARE_MACCLK_6_25;
9734 		tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
9735 
9736 		val = tr32(TG3_CPMU_HST_ACC);
9737 		val &= ~CPMU_HST_ACC_MACCLK_MASK;
9738 		val |= CPMU_HST_ACC_MACCLK_6_25;
9739 		tw32(TG3_CPMU_HST_ACC, val);
9740 	}
9741 
9742 	if (tg3_asic_rev(tp) == ASIC_REV_57780) {
9743 		val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK;
9744 		val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN |
9745 		       PCIE_PWR_MGMT_L1_THRESH_4MS;
9746 		tw32(PCIE_PWR_MGMT_THRESH, val);
9747 
9748 		val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK;
9749 		tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS);
9750 
9751 		tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR);
9752 
9753 		val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
9754 		tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
9755 	}
9756 
9757 	if (tg3_flag(tp, L1PLLPD_EN)) {
9758 		u32 grc_mode = tr32(GRC_MODE);
9759 
9760 		/* Access the lower 1K of PL PCIE block registers. */
9761 		val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9762 		tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
9763 
9764 		val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1);
9765 		tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1,
9766 		     val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN);
9767 
9768 		tw32(GRC_MODE, grc_mode);
9769 	}
9770 
9771 	if (tg3_flag(tp, 57765_CLASS)) {
9772 		if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) {
9773 			u32 grc_mode = tr32(GRC_MODE);
9774 
9775 			/* Access the lower 1K of PL PCIE block registers. */
9776 			val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9777 			tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
9778 
9779 			val = tr32(TG3_PCIE_TLDLPL_PORT +
9780 				   TG3_PCIE_PL_LO_PHYCTL5);
9781 			tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5,
9782 			     val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ);
9783 
9784 			tw32(GRC_MODE, grc_mode);
9785 		}
9786 
9787 		if (tg3_chip_rev(tp) != CHIPREV_57765_AX) {
9788 			u32 grc_mode;
9789 
9790 			/* Fix transmit hangs */
9791 			val = tr32(TG3_CPMU_PADRNG_CTL);
9792 			val |= TG3_CPMU_PADRNG_CTL_RDIV2;
9793 			tw32(TG3_CPMU_PADRNG_CTL, val);
9794 
9795 			grc_mode = tr32(GRC_MODE);
9796 
9797 			/* Access the lower 1K of DL PCIE block registers. */
9798 			val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9799 			tw32(GRC_MODE, val | GRC_MODE_PCIE_DL_SEL);
9800 
9801 			val = tr32(TG3_PCIE_TLDLPL_PORT +
9802 				   TG3_PCIE_DL_LO_FTSMAX);
9803 			val &= ~TG3_PCIE_DL_LO_FTSMAX_MSK;
9804 			tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_DL_LO_FTSMAX,
9805 			     val | TG3_PCIE_DL_LO_FTSMAX_VAL);
9806 
9807 			tw32(GRC_MODE, grc_mode);
9808 		}
9809 
9810 		val = tr32(TG3_CPMU_LSPD_10MB_CLK);
9811 		val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
9812 		val |= CPMU_LSPD_10MB_MACCLK_6_25;
9813 		tw32(TG3_CPMU_LSPD_10MB_CLK, val);
9814 	}
9815 
9816 	/* This works around an issue with Athlon chipsets on
9817 	 * B3 tigon3 silicon.  This bit has no effect on any
9818 	 * other revision.  But do not set this on PCI Express
9819 	 * chips and don't even touch the clocks if the CPMU is present.
9820 	 */
9821 	if (!tg3_flag(tp, CPMU_PRESENT)) {
9822 		if (!tg3_flag(tp, PCI_EXPRESS))
9823 			tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
9824 		tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
9825 	}
9826 
9827 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0 &&
9828 	    tg3_flag(tp, PCIX_MODE)) {
9829 		val = tr32(TG3PCI_PCISTATE);
9830 		val |= PCISTATE_RETRY_SAME_DMA;
9831 		tw32(TG3PCI_PCISTATE, val);
9832 	}
9833 
9834 	if (tg3_flag(tp, ENABLE_APE)) {
9835 		/* Allow reads and writes to the
9836 		 * APE register and memory space.
9837 		 */
9838 		val = tr32(TG3PCI_PCISTATE);
9839 		val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
9840 		       PCISTATE_ALLOW_APE_SHMEM_WR |
9841 		       PCISTATE_ALLOW_APE_PSPACE_WR;
9842 		tw32(TG3PCI_PCISTATE, val);
9843 	}
9844 
9845 	if (tg3_chip_rev(tp) == CHIPREV_5704_BX) {
9846 		/* Enable some hw fixes.  */
9847 		val = tr32(TG3PCI_MSI_DATA);
9848 		val |= (1 << 26) | (1 << 28) | (1 << 29);
9849 		tw32(TG3PCI_MSI_DATA, val);
9850 	}
9851 
9852 	/* Descriptor ring init may make accesses to the
9853 	 * NIC SRAM area to setup the TX descriptors, so we
9854 	 * can only do this after the hardware has been
9855 	 * successfully reset.
9856 	 */
9857 	err = tg3_init_rings(tp);
9858 	if (err)
9859 		return err;
9860 
9861 	if (tg3_flag(tp, 57765_PLUS)) {
9862 		val = tr32(TG3PCI_DMA_RW_CTRL) &
9863 		      ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
9864 		if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0)
9865 			val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK;
9866 		if (!tg3_flag(tp, 57765_CLASS) &&
9867 		    tg3_asic_rev(tp) != ASIC_REV_5717 &&
9868 		    tg3_asic_rev(tp) != ASIC_REV_5762)
9869 			val |= DMA_RWCTRL_TAGGED_STAT_WA;
9870 		tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
9871 	} else if (tg3_asic_rev(tp) != ASIC_REV_5784 &&
9872 		   tg3_asic_rev(tp) != ASIC_REV_5761) {
9873 		/* This value is determined during the probe time DMA
9874 		 * engine test, tg3_test_dma.
9875 		 */
9876 		tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
9877 	}
9878 
9879 	tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
9880 			  GRC_MODE_4X_NIC_SEND_RINGS |
9881 			  GRC_MODE_NO_TX_PHDR_CSUM |
9882 			  GRC_MODE_NO_RX_PHDR_CSUM);
9883 	tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
9884 
9885 	/* Pseudo-header checksum is done by hardware logic and not
9886 	 * the offload processers, so make the chip do the pseudo-
9887 	 * header checksums on receive.  For transmit it is more
9888 	 * convenient to do the pseudo-header checksum in software
9889 	 * as Linux does that on transmit for us in all cases.
9890 	 */
9891 	tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
9892 
9893 	val = GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP;
9894 	if (tp->rxptpctl)
9895 		tw32(TG3_RX_PTP_CTL,
9896 		     tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK);
9897 
9898 	if (tg3_flag(tp, PTP_CAPABLE))
9899 		val |= GRC_MODE_TIME_SYNC_ENABLE;
9900 
9901 	tw32(GRC_MODE, tp->grc_mode | val);
9902 
9903 	/* Setup the timer prescalar register.  Clock is always 66Mhz. */
9904 	val = tr32(GRC_MISC_CFG);
9905 	val &= ~0xff;
9906 	val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
9907 	tw32(GRC_MISC_CFG, val);
9908 
9909 	/* Initialize MBUF/DESC pool. */
9910 	if (tg3_flag(tp, 5750_PLUS)) {
9911 		/* Do nothing.  */
9912 	} else if (tg3_asic_rev(tp) != ASIC_REV_5705) {
9913 		tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
9914 		if (tg3_asic_rev(tp) == ASIC_REV_5704)
9915 			tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
9916 		else
9917 			tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
9918 		tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
9919 		tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
9920 	} else if (tg3_flag(tp, TSO_CAPABLE)) {
9921 		int fw_len;
9922 
9923 		fw_len = tp->fw_len;
9924 		fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
9925 		tw32(BUFMGR_MB_POOL_ADDR,
9926 		     NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
9927 		tw32(BUFMGR_MB_POOL_SIZE,
9928 		     NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
9929 	}
9930 
9931 	if (tp->dev->mtu <= ETH_DATA_LEN) {
9932 		tw32(BUFMGR_MB_RDMA_LOW_WATER,
9933 		     tp->bufmgr_config.mbuf_read_dma_low_water);
9934 		tw32(BUFMGR_MB_MACRX_LOW_WATER,
9935 		     tp->bufmgr_config.mbuf_mac_rx_low_water);
9936 		tw32(BUFMGR_MB_HIGH_WATER,
9937 		     tp->bufmgr_config.mbuf_high_water);
9938 	} else {
9939 		tw32(BUFMGR_MB_RDMA_LOW_WATER,
9940 		     tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
9941 		tw32(BUFMGR_MB_MACRX_LOW_WATER,
9942 		     tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
9943 		tw32(BUFMGR_MB_HIGH_WATER,
9944 		     tp->bufmgr_config.mbuf_high_water_jumbo);
9945 	}
9946 	tw32(BUFMGR_DMA_LOW_WATER,
9947 	     tp->bufmgr_config.dma_low_water);
9948 	tw32(BUFMGR_DMA_HIGH_WATER,
9949 	     tp->bufmgr_config.dma_high_water);
9950 
9951 	val = BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE;
9952 	if (tg3_asic_rev(tp) == ASIC_REV_5719)
9953 		val |= BUFMGR_MODE_NO_TX_UNDERRUN;
9954 	if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
9955 	    tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
9956 	    tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0)
9957 		val |= BUFMGR_MODE_MBLOW_ATTN_ENAB;
9958 	tw32(BUFMGR_MODE, val);
9959 	for (i = 0; i < 2000; i++) {
9960 		if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
9961 			break;
9962 		udelay(10);
9963 	}
9964 	if (i >= 2000) {
9965 		netdev_err(tp->dev, "%s cannot enable BUFMGR\n", __func__);
9966 		return -ENODEV;
9967 	}
9968 
9969 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5906_A1)
9970 		tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
9971 
9972 	tg3_setup_rxbd_thresholds(tp);
9973 
9974 	/* Initialize TG3_BDINFO's at:
9975 	 *  RCVDBDI_STD_BD:	standard eth size rx ring
9976 	 *  RCVDBDI_JUMBO_BD:	jumbo frame rx ring
9977 	 *  RCVDBDI_MINI_BD:	small frame rx ring (??? does not work)
9978 	 *
9979 	 * like so:
9980 	 *  TG3_BDINFO_HOST_ADDR:	high/low parts of DMA address of ring
9981 	 *  TG3_BDINFO_MAXLEN_FLAGS:	(rx max buffer size << 16) |
9982 	 *                              ring attribute flags
9983 	 *  TG3_BDINFO_NIC_ADDR:	location of descriptors in nic SRAM
9984 	 *
9985 	 * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
9986 	 * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
9987 	 *
9988 	 * The size of each ring is fixed in the firmware, but the location is
9989 	 * configurable.
9990 	 */
9991 	tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
9992 	     ((u64) tpr->rx_std_mapping >> 32));
9993 	tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
9994 	     ((u64) tpr->rx_std_mapping & 0xffffffff));
9995 	if (!tg3_flag(tp, 5717_PLUS))
9996 		tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
9997 		     NIC_SRAM_RX_BUFFER_DESC);
9998 
9999 	/* Disable the mini ring */
10000 	if (!tg3_flag(tp, 5705_PLUS))
10001 		tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
10002 		     BDINFO_FLAGS_DISABLED);
10003 
10004 	/* Program the jumbo buffer descriptor ring control
10005 	 * blocks on those devices that have them.
10006 	 */
10007 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
10008 	    (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))) {
10009 
10010 		if (tg3_flag(tp, JUMBO_RING_ENABLE)) {
10011 			tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
10012 			     ((u64) tpr->rx_jmb_mapping >> 32));
10013 			tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
10014 			     ((u64) tpr->rx_jmb_mapping & 0xffffffff));
10015 			val = TG3_RX_JMB_RING_SIZE(tp) <<
10016 			      BDINFO_FLAGS_MAXLEN_SHIFT;
10017 			tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
10018 			     val | BDINFO_FLAGS_USE_EXT_RECV);
10019 			if (!tg3_flag(tp, USE_JUMBO_BDFLAG) ||
10020 			    tg3_flag(tp, 57765_CLASS) ||
10021 			    tg3_asic_rev(tp) == ASIC_REV_5762)
10022 				tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
10023 				     NIC_SRAM_RX_JUMBO_BUFFER_DESC);
10024 		} else {
10025 			tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
10026 			     BDINFO_FLAGS_DISABLED);
10027 		}
10028 
10029 		if (tg3_flag(tp, 57765_PLUS)) {
10030 			val = TG3_RX_STD_RING_SIZE(tp);
10031 			val <<= BDINFO_FLAGS_MAXLEN_SHIFT;
10032 			val |= (TG3_RX_STD_DMA_SZ << 2);
10033 		} else
10034 			val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT;
10035 	} else
10036 		val = TG3_RX_STD_MAX_SIZE_5700 << BDINFO_FLAGS_MAXLEN_SHIFT;
10037 
10038 	tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val);
10039 
10040 	tpr->rx_std_prod_idx = tp->rx_pending;
10041 	tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx);
10042 
10043 	tpr->rx_jmb_prod_idx =
10044 		tg3_flag(tp, JUMBO_RING_ENABLE) ? tp->rx_jumbo_pending : 0;
10045 	tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx);
10046 
10047 	tg3_rings_reset(tp);
10048 
10049 	/* Initialize MAC address and backoff seed. */
10050 	__tg3_set_mac_addr(tp, false);
10051 
10052 	/* MTU + ethernet header + FCS + optional VLAN tag */
10053 	tw32(MAC_RX_MTU_SIZE,
10054 	     tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
10055 
10056 	/* The slot time is changed by tg3_setup_phy if we
10057 	 * run at gigabit with half duplex.
10058 	 */
10059 	val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
10060 	      (6 << TX_LENGTHS_IPG_SHIFT) |
10061 	      (32 << TX_LENGTHS_SLOT_TIME_SHIFT);
10062 
10063 	if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
10064 	    tg3_asic_rev(tp) == ASIC_REV_5762)
10065 		val |= tr32(MAC_TX_LENGTHS) &
10066 		       (TX_LENGTHS_JMB_FRM_LEN_MSK |
10067 			TX_LENGTHS_CNT_DWN_VAL_MSK);
10068 
10069 	tw32(MAC_TX_LENGTHS, val);
10070 
10071 	/* Receive rules. */
10072 	tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
10073 	tw32(RCVLPC_CONFIG, 0x0181);
10074 
10075 	/* Calculate RDMAC_MODE setting early, we need it to determine
10076 	 * the RCVLPC_STATE_ENABLE mask.
10077 	 */
10078 	rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
10079 		      RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
10080 		      RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
10081 		      RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
10082 		      RDMAC_MODE_LNGREAD_ENAB);
10083 
10084 	if (tg3_asic_rev(tp) == ASIC_REV_5717)
10085 		rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS;
10086 
10087 	if (tg3_asic_rev(tp) == ASIC_REV_5784 ||
10088 	    tg3_asic_rev(tp) == ASIC_REV_5785 ||
10089 	    tg3_asic_rev(tp) == ASIC_REV_57780)
10090 		rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
10091 			      RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
10092 			      RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
10093 
10094 	if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
10095 	    tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
10096 		if (tg3_flag(tp, TSO_CAPABLE) &&
10097 		    tg3_asic_rev(tp) == ASIC_REV_5705) {
10098 			rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
10099 		} else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
10100 			   !tg3_flag(tp, IS_5788)) {
10101 			rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
10102 		}
10103 	}
10104 
10105 	if (tg3_flag(tp, PCI_EXPRESS))
10106 		rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
10107 
10108 	if (tg3_asic_rev(tp) == ASIC_REV_57766) {
10109 		tp->dma_limit = 0;
10110 		if (tp->dev->mtu <= ETH_DATA_LEN) {
10111 			rdmac_mode |= RDMAC_MODE_JMB_2K_MMRR;
10112 			tp->dma_limit = TG3_TX_BD_DMA_MAX_2K;
10113 		}
10114 	}
10115 
10116 	if (tg3_flag(tp, HW_TSO_1) ||
10117 	    tg3_flag(tp, HW_TSO_2) ||
10118 	    tg3_flag(tp, HW_TSO_3))
10119 		rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN;
10120 
10121 	if (tg3_flag(tp, 57765_PLUS) ||
10122 	    tg3_asic_rev(tp) == ASIC_REV_5785 ||
10123 	    tg3_asic_rev(tp) == ASIC_REV_57780)
10124 		rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;
10125 
10126 	if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
10127 	    tg3_asic_rev(tp) == ASIC_REV_5762)
10128 		rdmac_mode |= tr32(RDMAC_MODE) & RDMAC_MODE_H2BNC_VLAN_DET;
10129 
10130 	if (tg3_asic_rev(tp) == ASIC_REV_5761 ||
10131 	    tg3_asic_rev(tp) == ASIC_REV_5784 ||
10132 	    tg3_asic_rev(tp) == ASIC_REV_5785 ||
10133 	    tg3_asic_rev(tp) == ASIC_REV_57780 ||
10134 	    tg3_flag(tp, 57765_PLUS)) {
10135 		u32 tgtreg;
10136 
10137 		if (tg3_asic_rev(tp) == ASIC_REV_5762)
10138 			tgtreg = TG3_RDMA_RSRVCTRL_REG2;
10139 		else
10140 			tgtreg = TG3_RDMA_RSRVCTRL_REG;
10141 
10142 		val = tr32(tgtreg);
10143 		if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
10144 		    tg3_asic_rev(tp) == ASIC_REV_5762) {
10145 			val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK |
10146 				 TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK |
10147 				 TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK);
10148 			val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B |
10149 			       TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
10150 			       TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K;
10151 		}
10152 		tw32(tgtreg, val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
10153 	}
10154 
10155 	if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
10156 	    tg3_asic_rev(tp) == ASIC_REV_5720 ||
10157 	    tg3_asic_rev(tp) == ASIC_REV_5762) {
10158 		u32 tgtreg;
10159 
10160 		if (tg3_asic_rev(tp) == ASIC_REV_5762)
10161 			tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL2;
10162 		else
10163 			tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL;
10164 
10165 		val = tr32(tgtreg);
10166 		tw32(tgtreg, val |
10167 		     TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K |
10168 		     TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K);
10169 	}
10170 
10171 	/* Receive/send statistics. */
10172 	if (tg3_flag(tp, 5750_PLUS)) {
10173 		val = tr32(RCVLPC_STATS_ENABLE);
10174 		val &= ~RCVLPC_STATSENAB_DACK_FIX;
10175 		tw32(RCVLPC_STATS_ENABLE, val);
10176 	} else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
10177 		   tg3_flag(tp, TSO_CAPABLE)) {
10178 		val = tr32(RCVLPC_STATS_ENABLE);
10179 		val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
10180 		tw32(RCVLPC_STATS_ENABLE, val);
10181 	} else {
10182 		tw32(RCVLPC_STATS_ENABLE, 0xffffff);
10183 	}
10184 	tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
10185 	tw32(SNDDATAI_STATSENAB, 0xffffff);
10186 	tw32(SNDDATAI_STATSCTRL,
10187 	     (SNDDATAI_SCTRL_ENABLE |
10188 	      SNDDATAI_SCTRL_FASTUPD));
10189 
10190 	/* Setup host coalescing engine. */
10191 	tw32(HOSTCC_MODE, 0);
10192 	for (i = 0; i < 2000; i++) {
10193 		if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
10194 			break;
10195 		udelay(10);
10196 	}
10197 
10198 	__tg3_set_coalesce(tp, &tp->coal);
10199 
10200 	if (!tg3_flag(tp, 5705_PLUS)) {
10201 		/* Status/statistics block address.  See tg3_timer,
10202 		 * the tg3_periodic_fetch_stats call there, and
10203 		 * tg3_get_stats to see how this works for 5705/5750 chips.
10204 		 */
10205 		tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
10206 		     ((u64) tp->stats_mapping >> 32));
10207 		tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
10208 		     ((u64) tp->stats_mapping & 0xffffffff));
10209 		tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
10210 
10211 		tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
10212 
10213 		/* Clear statistics and status block memory areas */
10214 		for (i = NIC_SRAM_STATS_BLK;
10215 		     i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
10216 		     i += sizeof(u32)) {
10217 			tg3_write_mem(tp, i, 0);
10218 			udelay(40);
10219 		}
10220 	}
10221 
10222 	tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
10223 
10224 	tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
10225 	tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
10226 	if (!tg3_flag(tp, 5705_PLUS))
10227 		tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
10228 
10229 	if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
10230 		tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
10231 		/* reset to prevent losing 1st rx packet intermittently */
10232 		tw32_f(MAC_RX_MODE, RX_MODE_RESET);
10233 		udelay(10);
10234 	}
10235 
10236 	tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
10237 			MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE |
10238 			MAC_MODE_FHDE_ENABLE;
10239 	if (tg3_flag(tp, ENABLE_APE))
10240 		tp->mac_mode |= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
10241 	if (!tg3_flag(tp, 5705_PLUS) &&
10242 	    !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
10243 	    tg3_asic_rev(tp) != ASIC_REV_5700)
10244 		tp->mac_mode |= MAC_MODE_LINK_POLARITY;
10245 	tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
10246 	udelay(40);
10247 
10248 	/* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
10249 	 * If TG3_FLAG_IS_NIC is zero, we should read the
10250 	 * register to preserve the GPIO settings for LOMs. The GPIOs,
10251 	 * whether used as inputs or outputs, are set by boot code after
10252 	 * reset.
10253 	 */
10254 	if (!tg3_flag(tp, IS_NIC)) {
10255 		u32 gpio_mask;
10256 
10257 		gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
10258 			    GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
10259 			    GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
10260 
10261 		if (tg3_asic_rev(tp) == ASIC_REV_5752)
10262 			gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
10263 				     GRC_LCLCTRL_GPIO_OUTPUT3;
10264 
10265 		if (tg3_asic_rev(tp) == ASIC_REV_5755)
10266 			gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
10267 
10268 		tp->grc_local_ctrl &= ~gpio_mask;
10269 		tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
10270 
10271 		/* GPIO1 must be driven high for eeprom write protect */
10272 		if (tg3_flag(tp, EEPROM_WRITE_PROT))
10273 			tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
10274 					       GRC_LCLCTRL_GPIO_OUTPUT1);
10275 	}
10276 	tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
10277 	udelay(100);
10278 
10279 	if (tg3_flag(tp, USING_MSIX)) {
10280 		val = tr32(MSGINT_MODE);
10281 		val |= MSGINT_MODE_ENABLE;
10282 		if (tp->irq_cnt > 1)
10283 			val |= MSGINT_MODE_MULTIVEC_EN;
10284 		if (!tg3_flag(tp, 1SHOT_MSI))
10285 			val |= MSGINT_MODE_ONE_SHOT_DISABLE;
10286 		tw32(MSGINT_MODE, val);
10287 	}
10288 
10289 	if (!tg3_flag(tp, 5705_PLUS)) {
10290 		tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
10291 		udelay(40);
10292 	}
10293 
10294 	val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
10295 	       WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
10296 	       WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
10297 	       WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
10298 	       WDMAC_MODE_LNGREAD_ENAB);
10299 
10300 	if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
10301 	    tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
10302 		if (tg3_flag(tp, TSO_CAPABLE) &&
10303 		    (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 ||
10304 		     tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A2)) {
10305 			/* nothing */
10306 		} else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
10307 			   !tg3_flag(tp, IS_5788)) {
10308 			val |= WDMAC_MODE_RX_ACCEL;
10309 		}
10310 	}
10311 
10312 	/* Enable host coalescing bug fix */
10313 	if (tg3_flag(tp, 5755_PLUS))
10314 		val |= WDMAC_MODE_STATUS_TAG_FIX;
10315 
10316 	if (tg3_asic_rev(tp) == ASIC_REV_5785)
10317 		val |= WDMAC_MODE_BURST_ALL_DATA;
10318 
10319 	tw32_f(WDMAC_MODE, val);
10320 	udelay(40);
10321 
10322 	if (tg3_flag(tp, PCIX_MODE)) {
10323 		u16 pcix_cmd;
10324 
10325 		pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
10326 				     &pcix_cmd);
10327 		if (tg3_asic_rev(tp) == ASIC_REV_5703) {
10328 			pcix_cmd &= ~PCI_X_CMD_MAX_READ;
10329 			pcix_cmd |= PCI_X_CMD_READ_2K;
10330 		} else if (tg3_asic_rev(tp) == ASIC_REV_5704) {
10331 			pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
10332 			pcix_cmd |= PCI_X_CMD_READ_2K;
10333 		}
10334 		pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
10335 				      pcix_cmd);
10336 	}
10337 
10338 	tw32_f(RDMAC_MODE, rdmac_mode);
10339 	udelay(40);
10340 
10341 	if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
10342 	    tg3_asic_rev(tp) == ASIC_REV_5720) {
10343 		for (i = 0; i < TG3_NUM_RDMA_CHANNELS; i++) {
10344 			if (tr32(TG3_RDMA_LENGTH + (i << 2)) > TG3_MAX_MTU(tp))
10345 				break;
10346 		}
10347 		if (i < TG3_NUM_RDMA_CHANNELS) {
10348 			val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
10349 			val |= tg3_lso_rd_dma_workaround_bit(tp);
10350 			tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
10351 			tg3_flag_set(tp, 5719_5720_RDMA_BUG);
10352 		}
10353 	}
10354 
10355 	tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
10356 	if (!tg3_flag(tp, 5705_PLUS))
10357 		tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
10358 
10359 	if (tg3_asic_rev(tp) == ASIC_REV_5761)
10360 		tw32(SNDDATAC_MODE,
10361 		     SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
10362 	else
10363 		tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
10364 
10365 	tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
10366 	tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
10367 	val = RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ;
10368 	if (tg3_flag(tp, LRG_PROD_RING_CAP))
10369 		val |= RCVDBDI_MODE_LRG_RING_SZ;
10370 	tw32(RCVDBDI_MODE, val);
10371 	tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
10372 	if (tg3_flag(tp, HW_TSO_1) ||
10373 	    tg3_flag(tp, HW_TSO_2) ||
10374 	    tg3_flag(tp, HW_TSO_3))
10375 		tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
10376 	val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE;
10377 	if (tg3_flag(tp, ENABLE_TSS))
10378 		val |= SNDBDI_MODE_MULTI_TXQ_EN;
10379 	tw32(SNDBDI_MODE, val);
10380 	tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
10381 
10382 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) {
10383 		err = tg3_load_5701_a0_firmware_fix(tp);
10384 		if (err)
10385 			return err;
10386 	}
10387 
10388 	if (tg3_asic_rev(tp) == ASIC_REV_57766) {
10389 		/* Ignore any errors for the firmware download. If download
10390 		 * fails, the device will operate with EEE disabled
10391 		 */
10392 		tg3_load_57766_firmware(tp);
10393 	}
10394 
10395 	if (tg3_flag(tp, TSO_CAPABLE)) {
10396 		err = tg3_load_tso_firmware(tp);
10397 		if (err)
10398 			return err;
10399 	}
10400 
10401 	tp->tx_mode = TX_MODE_ENABLE;
10402 
10403 	if (tg3_flag(tp, 5755_PLUS) ||
10404 	    tg3_asic_rev(tp) == ASIC_REV_5906)
10405 		tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX;
10406 
10407 	if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
10408 	    tg3_asic_rev(tp) == ASIC_REV_5762) {
10409 		val = TX_MODE_JMB_FRM_LEN | TX_MODE_CNT_DN_MODE;
10410 		tp->tx_mode &= ~val;
10411 		tp->tx_mode |= tr32(MAC_TX_MODE) & val;
10412 	}
10413 
10414 	tw32_f(MAC_TX_MODE, tp->tx_mode);
10415 	udelay(100);
10416 
10417 	if (tg3_flag(tp, ENABLE_RSS)) {
10418 		tg3_rss_write_indir_tbl(tp);
10419 
10420 		/* Setup the "secret" hash key. */
10421 		tw32(MAC_RSS_HASH_KEY_0, 0x5f865437);
10422 		tw32(MAC_RSS_HASH_KEY_1, 0xe4ac62cc);
10423 		tw32(MAC_RSS_HASH_KEY_2, 0x50103a45);
10424 		tw32(MAC_RSS_HASH_KEY_3, 0x36621985);
10425 		tw32(MAC_RSS_HASH_KEY_4, 0xbf14c0e8);
10426 		tw32(MAC_RSS_HASH_KEY_5, 0x1bc27a1e);
10427 		tw32(MAC_RSS_HASH_KEY_6, 0x84f4b556);
10428 		tw32(MAC_RSS_HASH_KEY_7, 0x094ea6fe);
10429 		tw32(MAC_RSS_HASH_KEY_8, 0x7dda01e7);
10430 		tw32(MAC_RSS_HASH_KEY_9, 0xc04d7481);
10431 	}
10432 
10433 	tp->rx_mode = RX_MODE_ENABLE;
10434 	if (tg3_flag(tp, 5755_PLUS))
10435 		tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
10436 
10437 	if (tg3_asic_rev(tp) == ASIC_REV_5762)
10438 		tp->rx_mode |= RX_MODE_IPV4_FRAG_FIX;
10439 
10440 	if (tg3_flag(tp, ENABLE_RSS))
10441 		tp->rx_mode |= RX_MODE_RSS_ENABLE |
10442 			       RX_MODE_RSS_ITBL_HASH_BITS_7 |
10443 			       RX_MODE_RSS_IPV6_HASH_EN |
10444 			       RX_MODE_RSS_TCP_IPV6_HASH_EN |
10445 			       RX_MODE_RSS_IPV4_HASH_EN |
10446 			       RX_MODE_RSS_TCP_IPV4_HASH_EN;
10447 
10448 	tw32_f(MAC_RX_MODE, tp->rx_mode);
10449 	udelay(10);
10450 
10451 	tw32(MAC_LED_CTRL, tp->led_ctrl);
10452 
10453 	tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
10454 	if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
10455 		tw32_f(MAC_RX_MODE, RX_MODE_RESET);
10456 		udelay(10);
10457 	}
10458 	tw32_f(MAC_RX_MODE, tp->rx_mode);
10459 	udelay(10);
10460 
10461 	if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
10462 		if ((tg3_asic_rev(tp) == ASIC_REV_5704) &&
10463 		    !(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) {
10464 			/* Set drive transmission level to 1.2V  */
10465 			/* only if the signal pre-emphasis bit is not set  */
10466 			val = tr32(MAC_SERDES_CFG);
10467 			val &= 0xfffff000;
10468 			val |= 0x880;
10469 			tw32(MAC_SERDES_CFG, val);
10470 		}
10471 		if (tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A1)
10472 			tw32(MAC_SERDES_CFG, 0x616000);
10473 	}
10474 
10475 	/* Prevent chip from dropping frames when flow control
10476 	 * is enabled.
10477 	 */
10478 	if (tg3_flag(tp, 57765_CLASS))
10479 		val = 1;
10480 	else
10481 		val = 2;
10482 	tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val);
10483 
10484 	if (tg3_asic_rev(tp) == ASIC_REV_5704 &&
10485 	    (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
10486 		/* Use hardware link auto-negotiation */
10487 		tg3_flag_set(tp, HW_AUTONEG);
10488 	}
10489 
10490 	if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
10491 	    tg3_asic_rev(tp) == ASIC_REV_5714) {
10492 		u32 tmp;
10493 
10494 		tmp = tr32(SERDES_RX_CTRL);
10495 		tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
10496 		tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
10497 		tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
10498 		tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
10499 	}
10500 
10501 	if (!tg3_flag(tp, USE_PHYLIB)) {
10502 		if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10503 			tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
10504 
10505 		err = tg3_setup_phy(tp, false);
10506 		if (err)
10507 			return err;
10508 
10509 		if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
10510 		    !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
10511 			u32 tmp;
10512 
10513 			/* Clear CRC stats. */
10514 			if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
10515 				tg3_writephy(tp, MII_TG3_TEST1,
10516 					     tmp | MII_TG3_TEST1_CRC_EN);
10517 				tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &tmp);
10518 			}
10519 		}
10520 	}
10521 
10522 	__tg3_set_rx_mode(tp->dev);
10523 
10524 	/* Initialize receive rules. */
10525 	tw32(MAC_RCV_RULE_0,  0xc2000000 & RCV_RULE_DISABLE_MASK);
10526 	tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
10527 	tw32(MAC_RCV_RULE_1,  0x86000004 & RCV_RULE_DISABLE_MASK);
10528 	tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
10529 
10530 	if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS))
10531 		limit = 8;
10532 	else
10533 		limit = 16;
10534 	if (tg3_flag(tp, ENABLE_ASF))
10535 		limit -= 4;
10536 	switch (limit) {
10537 	case 16:
10538 		tw32(MAC_RCV_RULE_15,  0); tw32(MAC_RCV_VALUE_15,  0);
10539 	case 15:
10540 		tw32(MAC_RCV_RULE_14,  0); tw32(MAC_RCV_VALUE_14,  0);
10541 	case 14:
10542 		tw32(MAC_RCV_RULE_13,  0); tw32(MAC_RCV_VALUE_13,  0);
10543 	case 13:
10544 		tw32(MAC_RCV_RULE_12,  0); tw32(MAC_RCV_VALUE_12,  0);
10545 	case 12:
10546 		tw32(MAC_RCV_RULE_11,  0); tw32(MAC_RCV_VALUE_11,  0);
10547 	case 11:
10548 		tw32(MAC_RCV_RULE_10,  0); tw32(MAC_RCV_VALUE_10,  0);
10549 	case 10:
10550 		tw32(MAC_RCV_RULE_9,  0); tw32(MAC_RCV_VALUE_9,  0);
10551 	case 9:
10552 		tw32(MAC_RCV_RULE_8,  0); tw32(MAC_RCV_VALUE_8,  0);
10553 	case 8:
10554 		tw32(MAC_RCV_RULE_7,  0); tw32(MAC_RCV_VALUE_7,  0);
10555 	case 7:
10556 		tw32(MAC_RCV_RULE_6,  0); tw32(MAC_RCV_VALUE_6,  0);
10557 	case 6:
10558 		tw32(MAC_RCV_RULE_5,  0); tw32(MAC_RCV_VALUE_5,  0);
10559 	case 5:
10560 		tw32(MAC_RCV_RULE_4,  0); tw32(MAC_RCV_VALUE_4,  0);
10561 	case 4:
10562 		/* tw32(MAC_RCV_RULE_3,  0); tw32(MAC_RCV_VALUE_3,  0); */
10563 	case 3:
10564 		/* tw32(MAC_RCV_RULE_2,  0); tw32(MAC_RCV_VALUE_2,  0); */
10565 	case 2:
10566 	case 1:
10567 
10568 	default:
10569 		break;
10570 	}
10571 
10572 	if (tg3_flag(tp, ENABLE_APE))
10573 		/* Write our heartbeat update interval to APE. */
10574 		tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
10575 				APE_HOST_HEARTBEAT_INT_DISABLE);
10576 
10577 	tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
10578 
10579 	return 0;
10580 }
10581 
10582 /* Called at device open time to get the chip ready for
10583  * packet processing.  Invoked with tp->lock held.
10584  */
10585 static int tg3_init_hw(struct tg3 *tp, bool reset_phy)
10586 {
10587 	/* Chip may have been just powered on. If so, the boot code may still
10588 	 * be running initialization. Wait for it to finish to avoid races in
10589 	 * accessing the hardware.
10590 	 */
10591 	tg3_enable_register_access(tp);
10592 	tg3_poll_fw(tp);
10593 
10594 	tg3_switch_clocks(tp);
10595 
10596 	tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
10597 
10598 	return tg3_reset_hw(tp, reset_phy);
10599 }
10600 
10601 static void tg3_sd_scan_scratchpad(struct tg3 *tp, struct tg3_ocir *ocir)
10602 {
10603 	int i;
10604 
10605 	for (i = 0; i < TG3_SD_NUM_RECS; i++, ocir++) {
10606 		u32 off = i * TG3_OCIR_LEN, len = TG3_OCIR_LEN;
10607 
10608 		tg3_ape_scratchpad_read(tp, (u32 *) ocir, off, len);
10609 		off += len;
10610 
10611 		if (ocir->signature != TG3_OCIR_SIG_MAGIC ||
10612 		    !(ocir->version_flags & TG3_OCIR_FLAG_ACTIVE))
10613 			memset(ocir, 0, TG3_OCIR_LEN);
10614 	}
10615 }
10616 
10617 /* sysfs attributes for hwmon */
10618 static ssize_t tg3_show_temp(struct device *dev,
10619 			     struct device_attribute *devattr, char *buf)
10620 {
10621 	struct pci_dev *pdev = to_pci_dev(dev);
10622 	struct net_device *netdev = pci_get_drvdata(pdev);
10623 	struct tg3 *tp = netdev_priv(netdev);
10624 	struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
10625 	u32 temperature;
10626 
10627 	spin_lock_bh(&tp->lock);
10628 	tg3_ape_scratchpad_read(tp, &temperature, attr->index,
10629 				sizeof(temperature));
10630 	spin_unlock_bh(&tp->lock);
10631 	return sprintf(buf, "%u\n", temperature);
10632 }
10633 
10634 
10635 static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, tg3_show_temp, NULL,
10636 			  TG3_TEMP_SENSOR_OFFSET);
10637 static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, tg3_show_temp, NULL,
10638 			  TG3_TEMP_CAUTION_OFFSET);
10639 static SENSOR_DEVICE_ATTR(temp1_max, S_IRUGO, tg3_show_temp, NULL,
10640 			  TG3_TEMP_MAX_OFFSET);
10641 
10642 static struct attribute *tg3_attributes[] = {
10643 	&sensor_dev_attr_temp1_input.dev_attr.attr,
10644 	&sensor_dev_attr_temp1_crit.dev_attr.attr,
10645 	&sensor_dev_attr_temp1_max.dev_attr.attr,
10646 	NULL
10647 };
10648 
10649 static const struct attribute_group tg3_group = {
10650 	.attrs = tg3_attributes,
10651 };
10652 
10653 static void tg3_hwmon_close(struct tg3 *tp)
10654 {
10655 	if (tp->hwmon_dev) {
10656 		hwmon_device_unregister(tp->hwmon_dev);
10657 		tp->hwmon_dev = NULL;
10658 		sysfs_remove_group(&tp->pdev->dev.kobj, &tg3_group);
10659 	}
10660 }
10661 
10662 static void tg3_hwmon_open(struct tg3 *tp)
10663 {
10664 	int i, err;
10665 	u32 size = 0;
10666 	struct pci_dev *pdev = tp->pdev;
10667 	struct tg3_ocir ocirs[TG3_SD_NUM_RECS];
10668 
10669 	tg3_sd_scan_scratchpad(tp, ocirs);
10670 
10671 	for (i = 0; i < TG3_SD_NUM_RECS; i++) {
10672 		if (!ocirs[i].src_data_length)
10673 			continue;
10674 
10675 		size += ocirs[i].src_hdr_length;
10676 		size += ocirs[i].src_data_length;
10677 	}
10678 
10679 	if (!size)
10680 		return;
10681 
10682 	/* Register hwmon sysfs hooks */
10683 	err = sysfs_create_group(&pdev->dev.kobj, &tg3_group);
10684 	if (err) {
10685 		dev_err(&pdev->dev, "Cannot create sysfs group, aborting\n");
10686 		return;
10687 	}
10688 
10689 	tp->hwmon_dev = hwmon_device_register(&pdev->dev);
10690 	if (IS_ERR(tp->hwmon_dev)) {
10691 		tp->hwmon_dev = NULL;
10692 		dev_err(&pdev->dev, "Cannot register hwmon device, aborting\n");
10693 		sysfs_remove_group(&pdev->dev.kobj, &tg3_group);
10694 	}
10695 }
10696 
10697 
10698 #define TG3_STAT_ADD32(PSTAT, REG) \
10699 do {	u32 __val = tr32(REG); \
10700 	(PSTAT)->low += __val; \
10701 	if ((PSTAT)->low < __val) \
10702 		(PSTAT)->high += 1; \
10703 } while (0)
10704 
10705 static void tg3_periodic_fetch_stats(struct tg3 *tp)
10706 {
10707 	struct tg3_hw_stats *sp = tp->hw_stats;
10708 
10709 	if (!tp->link_up)
10710 		return;
10711 
10712 	TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
10713 	TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
10714 	TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
10715 	TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
10716 	TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
10717 	TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
10718 	TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
10719 	TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
10720 	TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
10721 	TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
10722 	TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
10723 	TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
10724 	TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
10725 	if (unlikely(tg3_flag(tp, 5719_5720_RDMA_BUG) &&
10726 		     (sp->tx_ucast_packets.low + sp->tx_mcast_packets.low +
10727 		      sp->tx_bcast_packets.low) > TG3_NUM_RDMA_CHANNELS)) {
10728 		u32 val;
10729 
10730 		val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
10731 		val &= ~tg3_lso_rd_dma_workaround_bit(tp);
10732 		tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
10733 		tg3_flag_clear(tp, 5719_5720_RDMA_BUG);
10734 	}
10735 
10736 	TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
10737 	TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
10738 	TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
10739 	TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
10740 	TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
10741 	TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
10742 	TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
10743 	TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
10744 	TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
10745 	TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
10746 	TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
10747 	TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
10748 	TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
10749 	TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
10750 
10751 	TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
10752 	if (tg3_asic_rev(tp) != ASIC_REV_5717 &&
10753 	    tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0 &&
10754 	    tg3_chip_rev_id(tp) != CHIPREV_ID_5720_A0) {
10755 		TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
10756 	} else {
10757 		u32 val = tr32(HOSTCC_FLOW_ATTN);
10758 		val = (val & HOSTCC_FLOW_ATTN_MBUF_LWM) ? 1 : 0;
10759 		if (val) {
10760 			tw32(HOSTCC_FLOW_ATTN, HOSTCC_FLOW_ATTN_MBUF_LWM);
10761 			sp->rx_discards.low += val;
10762 			if (sp->rx_discards.low < val)
10763 				sp->rx_discards.high += 1;
10764 		}
10765 		sp->mbuf_lwm_thresh_hit = sp->rx_discards;
10766 	}
10767 	TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
10768 }
10769 
10770 static void tg3_chk_missed_msi(struct tg3 *tp)
10771 {
10772 	u32 i;
10773 
10774 	for (i = 0; i < tp->irq_cnt; i++) {
10775 		struct tg3_napi *tnapi = &tp->napi[i];
10776 
10777 		if (tg3_has_work(tnapi)) {
10778 			if (tnapi->last_rx_cons == tnapi->rx_rcb_ptr &&
10779 			    tnapi->last_tx_cons == tnapi->tx_cons) {
10780 				if (tnapi->chk_msi_cnt < 1) {
10781 					tnapi->chk_msi_cnt++;
10782 					return;
10783 				}
10784 				tg3_msi(0, tnapi);
10785 			}
10786 		}
10787 		tnapi->chk_msi_cnt = 0;
10788 		tnapi->last_rx_cons = tnapi->rx_rcb_ptr;
10789 		tnapi->last_tx_cons = tnapi->tx_cons;
10790 	}
10791 }
10792 
10793 static void tg3_timer(unsigned long __opaque)
10794 {
10795 	struct tg3 *tp = (struct tg3 *) __opaque;
10796 
10797 	if (tp->irq_sync || tg3_flag(tp, RESET_TASK_PENDING))
10798 		goto restart_timer;
10799 
10800 	spin_lock(&tp->lock);
10801 
10802 	if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
10803 	    tg3_flag(tp, 57765_CLASS))
10804 		tg3_chk_missed_msi(tp);
10805 
10806 	if (tg3_flag(tp, FLUSH_POSTED_WRITES)) {
10807 		/* BCM4785: Flush posted writes from GbE to host memory. */
10808 		tr32(HOSTCC_MODE);
10809 	}
10810 
10811 	if (!tg3_flag(tp, TAGGED_STATUS)) {
10812 		/* All of this garbage is because when using non-tagged
10813 		 * IRQ status the mailbox/status_block protocol the chip
10814 		 * uses with the cpu is race prone.
10815 		 */
10816 		if (tp->napi[0].hw_status->status & SD_STATUS_UPDATED) {
10817 			tw32(GRC_LOCAL_CTRL,
10818 			     tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
10819 		} else {
10820 			tw32(HOSTCC_MODE, tp->coalesce_mode |
10821 			     HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW);
10822 		}
10823 
10824 		if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
10825 			spin_unlock(&tp->lock);
10826 			tg3_reset_task_schedule(tp);
10827 			goto restart_timer;
10828 		}
10829 	}
10830 
10831 	/* This part only runs once per second. */
10832 	if (!--tp->timer_counter) {
10833 		if (tg3_flag(tp, 5705_PLUS))
10834 			tg3_periodic_fetch_stats(tp);
10835 
10836 		if (tp->setlpicnt && !--tp->setlpicnt)
10837 			tg3_phy_eee_enable(tp);
10838 
10839 		if (tg3_flag(tp, USE_LINKCHG_REG)) {
10840 			u32 mac_stat;
10841 			int phy_event;
10842 
10843 			mac_stat = tr32(MAC_STATUS);
10844 
10845 			phy_event = 0;
10846 			if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) {
10847 				if (mac_stat & MAC_STATUS_MI_INTERRUPT)
10848 					phy_event = 1;
10849 			} else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
10850 				phy_event = 1;
10851 
10852 			if (phy_event)
10853 				tg3_setup_phy(tp, false);
10854 		} else if (tg3_flag(tp, POLL_SERDES)) {
10855 			u32 mac_stat = tr32(MAC_STATUS);
10856 			int need_setup = 0;
10857 
10858 			if (tp->link_up &&
10859 			    (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
10860 				need_setup = 1;
10861 			}
10862 			if (!tp->link_up &&
10863 			    (mac_stat & (MAC_STATUS_PCS_SYNCED |
10864 					 MAC_STATUS_SIGNAL_DET))) {
10865 				need_setup = 1;
10866 			}
10867 			if (need_setup) {
10868 				if (!tp->serdes_counter) {
10869 					tw32_f(MAC_MODE,
10870 					     (tp->mac_mode &
10871 					      ~MAC_MODE_PORT_MODE_MASK));
10872 					udelay(40);
10873 					tw32_f(MAC_MODE, tp->mac_mode);
10874 					udelay(40);
10875 				}
10876 				tg3_setup_phy(tp, false);
10877 			}
10878 		} else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
10879 			   tg3_flag(tp, 5780_CLASS)) {
10880 			tg3_serdes_parallel_detect(tp);
10881 		}
10882 
10883 		tp->timer_counter = tp->timer_multiplier;
10884 	}
10885 
10886 	/* Heartbeat is only sent once every 2 seconds.
10887 	 *
10888 	 * The heartbeat is to tell the ASF firmware that the host
10889 	 * driver is still alive.  In the event that the OS crashes,
10890 	 * ASF needs to reset the hardware to free up the FIFO space
10891 	 * that may be filled with rx packets destined for the host.
10892 	 * If the FIFO is full, ASF will no longer function properly.
10893 	 *
10894 	 * Unintended resets have been reported on real time kernels
10895 	 * where the timer doesn't run on time.  Netpoll will also have
10896 	 * same problem.
10897 	 *
10898 	 * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
10899 	 * to check the ring condition when the heartbeat is expiring
10900 	 * before doing the reset.  This will prevent most unintended
10901 	 * resets.
10902 	 */
10903 	if (!--tp->asf_counter) {
10904 		if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
10905 			tg3_wait_for_event_ack(tp);
10906 
10907 			tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
10908 				      FWCMD_NICDRV_ALIVE3);
10909 			tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
10910 			tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX,
10911 				      TG3_FW_UPDATE_TIMEOUT_SEC);
10912 
10913 			tg3_generate_fw_event(tp);
10914 		}
10915 		tp->asf_counter = tp->asf_multiplier;
10916 	}
10917 
10918 	spin_unlock(&tp->lock);
10919 
10920 restart_timer:
10921 	tp->timer.expires = jiffies + tp->timer_offset;
10922 	add_timer(&tp->timer);
10923 }
10924 
10925 static void tg3_timer_init(struct tg3 *tp)
10926 {
10927 	if (tg3_flag(tp, TAGGED_STATUS) &&
10928 	    tg3_asic_rev(tp) != ASIC_REV_5717 &&
10929 	    !tg3_flag(tp, 57765_CLASS))
10930 		tp->timer_offset = HZ;
10931 	else
10932 		tp->timer_offset = HZ / 10;
10933 
10934 	BUG_ON(tp->timer_offset > HZ);
10935 
10936 	tp->timer_multiplier = (HZ / tp->timer_offset);
10937 	tp->asf_multiplier = (HZ / tp->timer_offset) *
10938 			     TG3_FW_UPDATE_FREQ_SEC;
10939 
10940 	init_timer(&tp->timer);
10941 	tp->timer.data = (unsigned long) tp;
10942 	tp->timer.function = tg3_timer;
10943 }
10944 
10945 static void tg3_timer_start(struct tg3 *tp)
10946 {
10947 	tp->asf_counter   = tp->asf_multiplier;
10948 	tp->timer_counter = tp->timer_multiplier;
10949 
10950 	tp->timer.expires = jiffies + tp->timer_offset;
10951 	add_timer(&tp->timer);
10952 }
10953 
10954 static void tg3_timer_stop(struct tg3 *tp)
10955 {
10956 	del_timer_sync(&tp->timer);
10957 }
10958 
10959 /* Restart hardware after configuration changes, self-test, etc.
10960  * Invoked with tp->lock held.
10961  */
10962 static int tg3_restart_hw(struct tg3 *tp, bool reset_phy)
10963 	__releases(tp->lock)
10964 	__acquires(tp->lock)
10965 {
10966 	int err;
10967 
10968 	err = tg3_init_hw(tp, reset_phy);
10969 	if (err) {
10970 		netdev_err(tp->dev,
10971 			   "Failed to re-initialize device, aborting\n");
10972 		tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10973 		tg3_full_unlock(tp);
10974 		tg3_timer_stop(tp);
10975 		tp->irq_sync = 0;
10976 		tg3_napi_enable(tp);
10977 		dev_close(tp->dev);
10978 		tg3_full_lock(tp, 0);
10979 	}
10980 	return err;
10981 }
10982 
10983 static void tg3_reset_task(struct work_struct *work)
10984 {
10985 	struct tg3 *tp = container_of(work, struct tg3, reset_task);
10986 	int err;
10987 
10988 	tg3_full_lock(tp, 0);
10989 
10990 	if (!netif_running(tp->dev)) {
10991 		tg3_flag_clear(tp, RESET_TASK_PENDING);
10992 		tg3_full_unlock(tp);
10993 		return;
10994 	}
10995 
10996 	tg3_full_unlock(tp);
10997 
10998 	tg3_phy_stop(tp);
10999 
11000 	tg3_netif_stop(tp);
11001 
11002 	tg3_full_lock(tp, 1);
11003 
11004 	if (tg3_flag(tp, TX_RECOVERY_PENDING)) {
11005 		tp->write32_tx_mbox = tg3_write32_tx_mbox;
11006 		tp->write32_rx_mbox = tg3_write_flush_reg32;
11007 		tg3_flag_set(tp, MBOX_WRITE_REORDER);
11008 		tg3_flag_clear(tp, TX_RECOVERY_PENDING);
11009 	}
11010 
11011 	tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
11012 	err = tg3_init_hw(tp, true);
11013 	if (err)
11014 		goto out;
11015 
11016 	tg3_netif_start(tp);
11017 
11018 out:
11019 	tg3_full_unlock(tp);
11020 
11021 	if (!err)
11022 		tg3_phy_start(tp);
11023 
11024 	tg3_flag_clear(tp, RESET_TASK_PENDING);
11025 }
11026 
11027 static int tg3_request_irq(struct tg3 *tp, int irq_num)
11028 {
11029 	irq_handler_t fn;
11030 	unsigned long flags;
11031 	char *name;
11032 	struct tg3_napi *tnapi = &tp->napi[irq_num];
11033 
11034 	if (tp->irq_cnt == 1)
11035 		name = tp->dev->name;
11036 	else {
11037 		name = &tnapi->irq_lbl[0];
11038 		snprintf(name, IFNAMSIZ, "%s-%d", tp->dev->name, irq_num);
11039 		name[IFNAMSIZ-1] = 0;
11040 	}
11041 
11042 	if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
11043 		fn = tg3_msi;
11044 		if (tg3_flag(tp, 1SHOT_MSI))
11045 			fn = tg3_msi_1shot;
11046 		flags = 0;
11047 	} else {
11048 		fn = tg3_interrupt;
11049 		if (tg3_flag(tp, TAGGED_STATUS))
11050 			fn = tg3_interrupt_tagged;
11051 		flags = IRQF_SHARED;
11052 	}
11053 
11054 	return request_irq(tnapi->irq_vec, fn, flags, name, tnapi);
11055 }
11056 
11057 static int tg3_test_interrupt(struct tg3 *tp)
11058 {
11059 	struct tg3_napi *tnapi = &tp->napi[0];
11060 	struct net_device *dev = tp->dev;
11061 	int err, i, intr_ok = 0;
11062 	u32 val;
11063 
11064 	if (!netif_running(dev))
11065 		return -ENODEV;
11066 
11067 	tg3_disable_ints(tp);
11068 
11069 	free_irq(tnapi->irq_vec, tnapi);
11070 
11071 	/*
11072 	 * Turn off MSI one shot mode.  Otherwise this test has no
11073 	 * observable way to know whether the interrupt was delivered.
11074 	 */
11075 	if (tg3_flag(tp, 57765_PLUS)) {
11076 		val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE;
11077 		tw32(MSGINT_MODE, val);
11078 	}
11079 
11080 	err = request_irq(tnapi->irq_vec, tg3_test_isr,
11081 			  IRQF_SHARED, dev->name, tnapi);
11082 	if (err)
11083 		return err;
11084 
11085 	tnapi->hw_status->status &= ~SD_STATUS_UPDATED;
11086 	tg3_enable_ints(tp);
11087 
11088 	tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
11089 	       tnapi->coal_now);
11090 
11091 	for (i = 0; i < 5; i++) {
11092 		u32 int_mbox, misc_host_ctrl;
11093 
11094 		int_mbox = tr32_mailbox(tnapi->int_mbox);
11095 		misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
11096 
11097 		if ((int_mbox != 0) ||
11098 		    (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
11099 			intr_ok = 1;
11100 			break;
11101 		}
11102 
11103 		if (tg3_flag(tp, 57765_PLUS) &&
11104 		    tnapi->hw_status->status_tag != tnapi->last_tag)
11105 			tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
11106 
11107 		msleep(10);
11108 	}
11109 
11110 	tg3_disable_ints(tp);
11111 
11112 	free_irq(tnapi->irq_vec, tnapi);
11113 
11114 	err = tg3_request_irq(tp, 0);
11115 
11116 	if (err)
11117 		return err;
11118 
11119 	if (intr_ok) {
11120 		/* Reenable MSI one shot mode. */
11121 		if (tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, 1SHOT_MSI)) {
11122 			val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE;
11123 			tw32(MSGINT_MODE, val);
11124 		}
11125 		return 0;
11126 	}
11127 
11128 	return -EIO;
11129 }
11130 
11131 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
11132  * successfully restored
11133  */
11134 static int tg3_test_msi(struct tg3 *tp)
11135 {
11136 	int err;
11137 	u16 pci_cmd;
11138 
11139 	if (!tg3_flag(tp, USING_MSI))
11140 		return 0;
11141 
11142 	/* Turn off SERR reporting in case MSI terminates with Master
11143 	 * Abort.
11144 	 */
11145 	pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
11146 	pci_write_config_word(tp->pdev, PCI_COMMAND,
11147 			      pci_cmd & ~PCI_COMMAND_SERR);
11148 
11149 	err = tg3_test_interrupt(tp);
11150 
11151 	pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
11152 
11153 	if (!err)
11154 		return 0;
11155 
11156 	/* other failures */
11157 	if (err != -EIO)
11158 		return err;
11159 
11160 	/* MSI test failed, go back to INTx mode */
11161 	netdev_warn(tp->dev, "No interrupt was generated using MSI. Switching "
11162 		    "to INTx mode. Please report this failure to the PCI "
11163 		    "maintainer and include system chipset information\n");
11164 
11165 	free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
11166 
11167 	pci_disable_msi(tp->pdev);
11168 
11169 	tg3_flag_clear(tp, USING_MSI);
11170 	tp->napi[0].irq_vec = tp->pdev->irq;
11171 
11172 	err = tg3_request_irq(tp, 0);
11173 	if (err)
11174 		return err;
11175 
11176 	/* Need to reset the chip because the MSI cycle may have terminated
11177 	 * with Master Abort.
11178 	 */
11179 	tg3_full_lock(tp, 1);
11180 
11181 	tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11182 	err = tg3_init_hw(tp, true);
11183 
11184 	tg3_full_unlock(tp);
11185 
11186 	if (err)
11187 		free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
11188 
11189 	return err;
11190 }
11191 
11192 static int tg3_request_firmware(struct tg3 *tp)
11193 {
11194 	const struct tg3_firmware_hdr *fw_hdr;
11195 
11196 	if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) {
11197 		netdev_err(tp->dev, "Failed to load firmware \"%s\"\n",
11198 			   tp->fw_needed);
11199 		return -ENOENT;
11200 	}
11201 
11202 	fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
11203 
11204 	/* Firmware blob starts with version numbers, followed by
11205 	 * start address and _full_ length including BSS sections
11206 	 * (which must be longer than the actual data, of course
11207 	 */
11208 
11209 	tp->fw_len = be32_to_cpu(fw_hdr->len);	/* includes bss */
11210 	if (tp->fw_len < (tp->fw->size - TG3_FW_HDR_LEN)) {
11211 		netdev_err(tp->dev, "bogus length %d in \"%s\"\n",
11212 			   tp->fw_len, tp->fw_needed);
11213 		release_firmware(tp->fw);
11214 		tp->fw = NULL;
11215 		return -EINVAL;
11216 	}
11217 
11218 	/* We no longer need firmware; we have it. */
11219 	tp->fw_needed = NULL;
11220 	return 0;
11221 }
11222 
11223 static u32 tg3_irq_count(struct tg3 *tp)
11224 {
11225 	u32 irq_cnt = max(tp->rxq_cnt, tp->txq_cnt);
11226 
11227 	if (irq_cnt > 1) {
11228 		/* We want as many rx rings enabled as there are cpus.
11229 		 * In multiqueue MSI-X mode, the first MSI-X vector
11230 		 * only deals with link interrupts, etc, so we add
11231 		 * one to the number of vectors we are requesting.
11232 		 */
11233 		irq_cnt = min_t(unsigned, irq_cnt + 1, tp->irq_max);
11234 	}
11235 
11236 	return irq_cnt;
11237 }
11238 
11239 static bool tg3_enable_msix(struct tg3 *tp)
11240 {
11241 	int i, rc;
11242 	struct msix_entry msix_ent[TG3_IRQ_MAX_VECS];
11243 
11244 	tp->txq_cnt = tp->txq_req;
11245 	tp->rxq_cnt = tp->rxq_req;
11246 	if (!tp->rxq_cnt)
11247 		tp->rxq_cnt = netif_get_num_default_rss_queues();
11248 	if (tp->rxq_cnt > tp->rxq_max)
11249 		tp->rxq_cnt = tp->rxq_max;
11250 
11251 	/* Disable multiple TX rings by default.  Simple round-robin hardware
11252 	 * scheduling of the TX rings can cause starvation of rings with
11253 	 * small packets when other rings have TSO or jumbo packets.
11254 	 */
11255 	if (!tp->txq_req)
11256 		tp->txq_cnt = 1;
11257 
11258 	tp->irq_cnt = tg3_irq_count(tp);
11259 
11260 	for (i = 0; i < tp->irq_max; i++) {
11261 		msix_ent[i].entry  = i;
11262 		msix_ent[i].vector = 0;
11263 	}
11264 
11265 	rc = pci_enable_msix(tp->pdev, msix_ent, tp->irq_cnt);
11266 	if (rc < 0) {
11267 		return false;
11268 	} else if (rc != 0) {
11269 		if (pci_enable_msix(tp->pdev, msix_ent, rc))
11270 			return false;
11271 		netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n",
11272 			      tp->irq_cnt, rc);
11273 		tp->irq_cnt = rc;
11274 		tp->rxq_cnt = max(rc - 1, 1);
11275 		if (tp->txq_cnt)
11276 			tp->txq_cnt = min(tp->rxq_cnt, tp->txq_max);
11277 	}
11278 
11279 	for (i = 0; i < tp->irq_max; i++)
11280 		tp->napi[i].irq_vec = msix_ent[i].vector;
11281 
11282 	if (netif_set_real_num_rx_queues(tp->dev, tp->rxq_cnt)) {
11283 		pci_disable_msix(tp->pdev);
11284 		return false;
11285 	}
11286 
11287 	if (tp->irq_cnt == 1)
11288 		return true;
11289 
11290 	tg3_flag_set(tp, ENABLE_RSS);
11291 
11292 	if (tp->txq_cnt > 1)
11293 		tg3_flag_set(tp, ENABLE_TSS);
11294 
11295 	netif_set_real_num_tx_queues(tp->dev, tp->txq_cnt);
11296 
11297 	return true;
11298 }
11299 
11300 static void tg3_ints_init(struct tg3 *tp)
11301 {
11302 	if ((tg3_flag(tp, SUPPORT_MSI) || tg3_flag(tp, SUPPORT_MSIX)) &&
11303 	    !tg3_flag(tp, TAGGED_STATUS)) {
11304 		/* All MSI supporting chips should support tagged
11305 		 * status.  Assert that this is the case.
11306 		 */
11307 		netdev_warn(tp->dev,
11308 			    "MSI without TAGGED_STATUS? Not using MSI\n");
11309 		goto defcfg;
11310 	}
11311 
11312 	if (tg3_flag(tp, SUPPORT_MSIX) && tg3_enable_msix(tp))
11313 		tg3_flag_set(tp, USING_MSIX);
11314 	else if (tg3_flag(tp, SUPPORT_MSI) && pci_enable_msi(tp->pdev) == 0)
11315 		tg3_flag_set(tp, USING_MSI);
11316 
11317 	if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
11318 		u32 msi_mode = tr32(MSGINT_MODE);
11319 		if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1)
11320 			msi_mode |= MSGINT_MODE_MULTIVEC_EN;
11321 		if (!tg3_flag(tp, 1SHOT_MSI))
11322 			msi_mode |= MSGINT_MODE_ONE_SHOT_DISABLE;
11323 		tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
11324 	}
11325 defcfg:
11326 	if (!tg3_flag(tp, USING_MSIX)) {
11327 		tp->irq_cnt = 1;
11328 		tp->napi[0].irq_vec = tp->pdev->irq;
11329 	}
11330 
11331 	if (tp->irq_cnt == 1) {
11332 		tp->txq_cnt = 1;
11333 		tp->rxq_cnt = 1;
11334 		netif_set_real_num_tx_queues(tp->dev, 1);
11335 		netif_set_real_num_rx_queues(tp->dev, 1);
11336 	}
11337 }
11338 
11339 static void tg3_ints_fini(struct tg3 *tp)
11340 {
11341 	if (tg3_flag(tp, USING_MSIX))
11342 		pci_disable_msix(tp->pdev);
11343 	else if (tg3_flag(tp, USING_MSI))
11344 		pci_disable_msi(tp->pdev);
11345 	tg3_flag_clear(tp, USING_MSI);
11346 	tg3_flag_clear(tp, USING_MSIX);
11347 	tg3_flag_clear(tp, ENABLE_RSS);
11348 	tg3_flag_clear(tp, ENABLE_TSS);
11349 }
11350 
11351 static int tg3_start(struct tg3 *tp, bool reset_phy, bool test_irq,
11352 		     bool init)
11353 {
11354 	struct net_device *dev = tp->dev;
11355 	int i, err;
11356 
11357 	/*
11358 	 * Setup interrupts first so we know how
11359 	 * many NAPI resources to allocate
11360 	 */
11361 	tg3_ints_init(tp);
11362 
11363 	tg3_rss_check_indir_tbl(tp);
11364 
11365 	/* The placement of this call is tied
11366 	 * to the setup and use of Host TX descriptors.
11367 	 */
11368 	err = tg3_alloc_consistent(tp);
11369 	if (err)
11370 		goto out_ints_fini;
11371 
11372 	tg3_napi_init(tp);
11373 
11374 	tg3_napi_enable(tp);
11375 
11376 	for (i = 0; i < tp->irq_cnt; i++) {
11377 		struct tg3_napi *tnapi = &tp->napi[i];
11378 		err = tg3_request_irq(tp, i);
11379 		if (err) {
11380 			for (i--; i >= 0; i--) {
11381 				tnapi = &tp->napi[i];
11382 				free_irq(tnapi->irq_vec, tnapi);
11383 			}
11384 			goto out_napi_fini;
11385 		}
11386 	}
11387 
11388 	tg3_full_lock(tp, 0);
11389 
11390 	if (init)
11391 		tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
11392 
11393 	err = tg3_init_hw(tp, reset_phy);
11394 	if (err) {
11395 		tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11396 		tg3_free_rings(tp);
11397 	}
11398 
11399 	tg3_full_unlock(tp);
11400 
11401 	if (err)
11402 		goto out_free_irq;
11403 
11404 	if (test_irq && tg3_flag(tp, USING_MSI)) {
11405 		err = tg3_test_msi(tp);
11406 
11407 		if (err) {
11408 			tg3_full_lock(tp, 0);
11409 			tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11410 			tg3_free_rings(tp);
11411 			tg3_full_unlock(tp);
11412 
11413 			goto out_napi_fini;
11414 		}
11415 
11416 		if (!tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
11417 			u32 val = tr32(PCIE_TRANSACTION_CFG);
11418 
11419 			tw32(PCIE_TRANSACTION_CFG,
11420 			     val | PCIE_TRANS_CFG_1SHOT_MSI);
11421 		}
11422 	}
11423 
11424 	tg3_phy_start(tp);
11425 
11426 	tg3_hwmon_open(tp);
11427 
11428 	tg3_full_lock(tp, 0);
11429 
11430 	tg3_timer_start(tp);
11431 	tg3_flag_set(tp, INIT_COMPLETE);
11432 	tg3_enable_ints(tp);
11433 
11434 	if (init)
11435 		tg3_ptp_init(tp);
11436 	else
11437 		tg3_ptp_resume(tp);
11438 
11439 
11440 	tg3_full_unlock(tp);
11441 
11442 	netif_tx_start_all_queues(dev);
11443 
11444 	/*
11445 	 * Reset loopback feature if it was turned on while the device was down
11446 	 * make sure that it's installed properly now.
11447 	 */
11448 	if (dev->features & NETIF_F_LOOPBACK)
11449 		tg3_set_loopback(dev, dev->features);
11450 
11451 	return 0;
11452 
11453 out_free_irq:
11454 	for (i = tp->irq_cnt - 1; i >= 0; i--) {
11455 		struct tg3_napi *tnapi = &tp->napi[i];
11456 		free_irq(tnapi->irq_vec, tnapi);
11457 	}
11458 
11459 out_napi_fini:
11460 	tg3_napi_disable(tp);
11461 	tg3_napi_fini(tp);
11462 	tg3_free_consistent(tp);
11463 
11464 out_ints_fini:
11465 	tg3_ints_fini(tp);
11466 
11467 	return err;
11468 }
11469 
11470 static void tg3_stop(struct tg3 *tp)
11471 {
11472 	int i;
11473 
11474 	tg3_reset_task_cancel(tp);
11475 	tg3_netif_stop(tp);
11476 
11477 	tg3_timer_stop(tp);
11478 
11479 	tg3_hwmon_close(tp);
11480 
11481 	tg3_phy_stop(tp);
11482 
11483 	tg3_full_lock(tp, 1);
11484 
11485 	tg3_disable_ints(tp);
11486 
11487 	tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11488 	tg3_free_rings(tp);
11489 	tg3_flag_clear(tp, INIT_COMPLETE);
11490 
11491 	tg3_full_unlock(tp);
11492 
11493 	for (i = tp->irq_cnt - 1; i >= 0; i--) {
11494 		struct tg3_napi *tnapi = &tp->napi[i];
11495 		free_irq(tnapi->irq_vec, tnapi);
11496 	}
11497 
11498 	tg3_ints_fini(tp);
11499 
11500 	tg3_napi_fini(tp);
11501 
11502 	tg3_free_consistent(tp);
11503 }
11504 
11505 static int tg3_open(struct net_device *dev)
11506 {
11507 	struct tg3 *tp = netdev_priv(dev);
11508 	int err;
11509 
11510 	if (tp->fw_needed) {
11511 		err = tg3_request_firmware(tp);
11512 		if (tg3_asic_rev(tp) == ASIC_REV_57766) {
11513 			if (err) {
11514 				netdev_warn(tp->dev, "EEE capability disabled\n");
11515 				tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
11516 			} else if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
11517 				netdev_warn(tp->dev, "EEE capability restored\n");
11518 				tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
11519 			}
11520 		} else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) {
11521 			if (err)
11522 				return err;
11523 		} else if (err) {
11524 			netdev_warn(tp->dev, "TSO capability disabled\n");
11525 			tg3_flag_clear(tp, TSO_CAPABLE);
11526 		} else if (!tg3_flag(tp, TSO_CAPABLE)) {
11527 			netdev_notice(tp->dev, "TSO capability restored\n");
11528 			tg3_flag_set(tp, TSO_CAPABLE);
11529 		}
11530 	}
11531 
11532 	tg3_carrier_off(tp);
11533 
11534 	err = tg3_power_up(tp);
11535 	if (err)
11536 		return err;
11537 
11538 	tg3_full_lock(tp, 0);
11539 
11540 	tg3_disable_ints(tp);
11541 	tg3_flag_clear(tp, INIT_COMPLETE);
11542 
11543 	tg3_full_unlock(tp);
11544 
11545 	err = tg3_start(tp,
11546 			!(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN),
11547 			true, true);
11548 	if (err) {
11549 		tg3_frob_aux_power(tp, false);
11550 		pci_set_power_state(tp->pdev, PCI_D3hot);
11551 	}
11552 
11553 	if (tg3_flag(tp, PTP_CAPABLE)) {
11554 		tp->ptp_clock = ptp_clock_register(&tp->ptp_info,
11555 						   &tp->pdev->dev);
11556 		if (IS_ERR(tp->ptp_clock))
11557 			tp->ptp_clock = NULL;
11558 	}
11559 
11560 	return err;
11561 }
11562 
11563 static int tg3_close(struct net_device *dev)
11564 {
11565 	struct tg3 *tp = netdev_priv(dev);
11566 
11567 	tg3_ptp_fini(tp);
11568 
11569 	tg3_stop(tp);
11570 
11571 	/* Clear stats across close / open calls */
11572 	memset(&tp->net_stats_prev, 0, sizeof(tp->net_stats_prev));
11573 	memset(&tp->estats_prev, 0, sizeof(tp->estats_prev));
11574 
11575 	tg3_power_down_prepare(tp);
11576 
11577 	tg3_carrier_off(tp);
11578 
11579 	return 0;
11580 }
11581 
11582 static inline u64 get_stat64(tg3_stat64_t *val)
11583 {
11584        return ((u64)val->high << 32) | ((u64)val->low);
11585 }
11586 
11587 static u64 tg3_calc_crc_errors(struct tg3 *tp)
11588 {
11589 	struct tg3_hw_stats *hw_stats = tp->hw_stats;
11590 
11591 	if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
11592 	    (tg3_asic_rev(tp) == ASIC_REV_5700 ||
11593 	     tg3_asic_rev(tp) == ASIC_REV_5701)) {
11594 		u32 val;
11595 
11596 		if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
11597 			tg3_writephy(tp, MII_TG3_TEST1,
11598 				     val | MII_TG3_TEST1_CRC_EN);
11599 			tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &val);
11600 		} else
11601 			val = 0;
11602 
11603 		tp->phy_crc_errors += val;
11604 
11605 		return tp->phy_crc_errors;
11606 	}
11607 
11608 	return get_stat64(&hw_stats->rx_fcs_errors);
11609 }
11610 
11611 #define ESTAT_ADD(member) \
11612 	estats->member =	old_estats->member + \
11613 				get_stat64(&hw_stats->member)
11614 
11615 static void tg3_get_estats(struct tg3 *tp, struct tg3_ethtool_stats *estats)
11616 {
11617 	struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
11618 	struct tg3_hw_stats *hw_stats = tp->hw_stats;
11619 
11620 	ESTAT_ADD(rx_octets);
11621 	ESTAT_ADD(rx_fragments);
11622 	ESTAT_ADD(rx_ucast_packets);
11623 	ESTAT_ADD(rx_mcast_packets);
11624 	ESTAT_ADD(rx_bcast_packets);
11625 	ESTAT_ADD(rx_fcs_errors);
11626 	ESTAT_ADD(rx_align_errors);
11627 	ESTAT_ADD(rx_xon_pause_rcvd);
11628 	ESTAT_ADD(rx_xoff_pause_rcvd);
11629 	ESTAT_ADD(rx_mac_ctrl_rcvd);
11630 	ESTAT_ADD(rx_xoff_entered);
11631 	ESTAT_ADD(rx_frame_too_long_errors);
11632 	ESTAT_ADD(rx_jabbers);
11633 	ESTAT_ADD(rx_undersize_packets);
11634 	ESTAT_ADD(rx_in_length_errors);
11635 	ESTAT_ADD(rx_out_length_errors);
11636 	ESTAT_ADD(rx_64_or_less_octet_packets);
11637 	ESTAT_ADD(rx_65_to_127_octet_packets);
11638 	ESTAT_ADD(rx_128_to_255_octet_packets);
11639 	ESTAT_ADD(rx_256_to_511_octet_packets);
11640 	ESTAT_ADD(rx_512_to_1023_octet_packets);
11641 	ESTAT_ADD(rx_1024_to_1522_octet_packets);
11642 	ESTAT_ADD(rx_1523_to_2047_octet_packets);
11643 	ESTAT_ADD(rx_2048_to_4095_octet_packets);
11644 	ESTAT_ADD(rx_4096_to_8191_octet_packets);
11645 	ESTAT_ADD(rx_8192_to_9022_octet_packets);
11646 
11647 	ESTAT_ADD(tx_octets);
11648 	ESTAT_ADD(tx_collisions);
11649 	ESTAT_ADD(tx_xon_sent);
11650 	ESTAT_ADD(tx_xoff_sent);
11651 	ESTAT_ADD(tx_flow_control);
11652 	ESTAT_ADD(tx_mac_errors);
11653 	ESTAT_ADD(tx_single_collisions);
11654 	ESTAT_ADD(tx_mult_collisions);
11655 	ESTAT_ADD(tx_deferred);
11656 	ESTAT_ADD(tx_excessive_collisions);
11657 	ESTAT_ADD(tx_late_collisions);
11658 	ESTAT_ADD(tx_collide_2times);
11659 	ESTAT_ADD(tx_collide_3times);
11660 	ESTAT_ADD(tx_collide_4times);
11661 	ESTAT_ADD(tx_collide_5times);
11662 	ESTAT_ADD(tx_collide_6times);
11663 	ESTAT_ADD(tx_collide_7times);
11664 	ESTAT_ADD(tx_collide_8times);
11665 	ESTAT_ADD(tx_collide_9times);
11666 	ESTAT_ADD(tx_collide_10times);
11667 	ESTAT_ADD(tx_collide_11times);
11668 	ESTAT_ADD(tx_collide_12times);
11669 	ESTAT_ADD(tx_collide_13times);
11670 	ESTAT_ADD(tx_collide_14times);
11671 	ESTAT_ADD(tx_collide_15times);
11672 	ESTAT_ADD(tx_ucast_packets);
11673 	ESTAT_ADD(tx_mcast_packets);
11674 	ESTAT_ADD(tx_bcast_packets);
11675 	ESTAT_ADD(tx_carrier_sense_errors);
11676 	ESTAT_ADD(tx_discards);
11677 	ESTAT_ADD(tx_errors);
11678 
11679 	ESTAT_ADD(dma_writeq_full);
11680 	ESTAT_ADD(dma_write_prioq_full);
11681 	ESTAT_ADD(rxbds_empty);
11682 	ESTAT_ADD(rx_discards);
11683 	ESTAT_ADD(rx_errors);
11684 	ESTAT_ADD(rx_threshold_hit);
11685 
11686 	ESTAT_ADD(dma_readq_full);
11687 	ESTAT_ADD(dma_read_prioq_full);
11688 	ESTAT_ADD(tx_comp_queue_full);
11689 
11690 	ESTAT_ADD(ring_set_send_prod_index);
11691 	ESTAT_ADD(ring_status_update);
11692 	ESTAT_ADD(nic_irqs);
11693 	ESTAT_ADD(nic_avoided_irqs);
11694 	ESTAT_ADD(nic_tx_threshold_hit);
11695 
11696 	ESTAT_ADD(mbuf_lwm_thresh_hit);
11697 }
11698 
11699 static void tg3_get_nstats(struct tg3 *tp, struct rtnl_link_stats64 *stats)
11700 {
11701 	struct rtnl_link_stats64 *old_stats = &tp->net_stats_prev;
11702 	struct tg3_hw_stats *hw_stats = tp->hw_stats;
11703 
11704 	stats->rx_packets = old_stats->rx_packets +
11705 		get_stat64(&hw_stats->rx_ucast_packets) +
11706 		get_stat64(&hw_stats->rx_mcast_packets) +
11707 		get_stat64(&hw_stats->rx_bcast_packets);
11708 
11709 	stats->tx_packets = old_stats->tx_packets +
11710 		get_stat64(&hw_stats->tx_ucast_packets) +
11711 		get_stat64(&hw_stats->tx_mcast_packets) +
11712 		get_stat64(&hw_stats->tx_bcast_packets);
11713 
11714 	stats->rx_bytes = old_stats->rx_bytes +
11715 		get_stat64(&hw_stats->rx_octets);
11716 	stats->tx_bytes = old_stats->tx_bytes +
11717 		get_stat64(&hw_stats->tx_octets);
11718 
11719 	stats->rx_errors = old_stats->rx_errors +
11720 		get_stat64(&hw_stats->rx_errors);
11721 	stats->tx_errors = old_stats->tx_errors +
11722 		get_stat64(&hw_stats->tx_errors) +
11723 		get_stat64(&hw_stats->tx_mac_errors) +
11724 		get_stat64(&hw_stats->tx_carrier_sense_errors) +
11725 		get_stat64(&hw_stats->tx_discards);
11726 
11727 	stats->multicast = old_stats->multicast +
11728 		get_stat64(&hw_stats->rx_mcast_packets);
11729 	stats->collisions = old_stats->collisions +
11730 		get_stat64(&hw_stats->tx_collisions);
11731 
11732 	stats->rx_length_errors = old_stats->rx_length_errors +
11733 		get_stat64(&hw_stats->rx_frame_too_long_errors) +
11734 		get_stat64(&hw_stats->rx_undersize_packets);
11735 
11736 	stats->rx_over_errors = old_stats->rx_over_errors +
11737 		get_stat64(&hw_stats->rxbds_empty);
11738 	stats->rx_frame_errors = old_stats->rx_frame_errors +
11739 		get_stat64(&hw_stats->rx_align_errors);
11740 	stats->tx_aborted_errors = old_stats->tx_aborted_errors +
11741 		get_stat64(&hw_stats->tx_discards);
11742 	stats->tx_carrier_errors = old_stats->tx_carrier_errors +
11743 		get_stat64(&hw_stats->tx_carrier_sense_errors);
11744 
11745 	stats->rx_crc_errors = old_stats->rx_crc_errors +
11746 		tg3_calc_crc_errors(tp);
11747 
11748 	stats->rx_missed_errors = old_stats->rx_missed_errors +
11749 		get_stat64(&hw_stats->rx_discards);
11750 
11751 	stats->rx_dropped = tp->rx_dropped;
11752 	stats->tx_dropped = tp->tx_dropped;
11753 }
11754 
11755 static int tg3_get_regs_len(struct net_device *dev)
11756 {
11757 	return TG3_REG_BLK_SIZE;
11758 }
11759 
11760 static void tg3_get_regs(struct net_device *dev,
11761 		struct ethtool_regs *regs, void *_p)
11762 {
11763 	struct tg3 *tp = netdev_priv(dev);
11764 
11765 	regs->version = 0;
11766 
11767 	memset(_p, 0, TG3_REG_BLK_SIZE);
11768 
11769 	if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11770 		return;
11771 
11772 	tg3_full_lock(tp, 0);
11773 
11774 	tg3_dump_legacy_regs(tp, (u32 *)_p);
11775 
11776 	tg3_full_unlock(tp);
11777 }
11778 
11779 static int tg3_get_eeprom_len(struct net_device *dev)
11780 {
11781 	struct tg3 *tp = netdev_priv(dev);
11782 
11783 	return tp->nvram_size;
11784 }
11785 
11786 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
11787 {
11788 	struct tg3 *tp = netdev_priv(dev);
11789 	int ret;
11790 	u8  *pd;
11791 	u32 i, offset, len, b_offset, b_count;
11792 	__be32 val;
11793 
11794 	if (tg3_flag(tp, NO_NVRAM))
11795 		return -EINVAL;
11796 
11797 	offset = eeprom->offset;
11798 	len = eeprom->len;
11799 	eeprom->len = 0;
11800 
11801 	eeprom->magic = TG3_EEPROM_MAGIC;
11802 
11803 	if (offset & 3) {
11804 		/* adjustments to start on required 4 byte boundary */
11805 		b_offset = offset & 3;
11806 		b_count = 4 - b_offset;
11807 		if (b_count > len) {
11808 			/* i.e. offset=1 len=2 */
11809 			b_count = len;
11810 		}
11811 		ret = tg3_nvram_read_be32(tp, offset-b_offset, &val);
11812 		if (ret)
11813 			return ret;
11814 		memcpy(data, ((char *)&val) + b_offset, b_count);
11815 		len -= b_count;
11816 		offset += b_count;
11817 		eeprom->len += b_count;
11818 	}
11819 
11820 	/* read bytes up to the last 4 byte boundary */
11821 	pd = &data[eeprom->len];
11822 	for (i = 0; i < (len - (len & 3)); i += 4) {
11823 		ret = tg3_nvram_read_be32(tp, offset + i, &val);
11824 		if (ret) {
11825 			eeprom->len += i;
11826 			return ret;
11827 		}
11828 		memcpy(pd + i, &val, 4);
11829 	}
11830 	eeprom->len += i;
11831 
11832 	if (len & 3) {
11833 		/* read last bytes not ending on 4 byte boundary */
11834 		pd = &data[eeprom->len];
11835 		b_count = len & 3;
11836 		b_offset = offset + len - b_count;
11837 		ret = tg3_nvram_read_be32(tp, b_offset, &val);
11838 		if (ret)
11839 			return ret;
11840 		memcpy(pd, &val, b_count);
11841 		eeprom->len += b_count;
11842 	}
11843 	return 0;
11844 }
11845 
11846 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
11847 {
11848 	struct tg3 *tp = netdev_priv(dev);
11849 	int ret;
11850 	u32 offset, len, b_offset, odd_len;
11851 	u8 *buf;
11852 	__be32 start, end;
11853 
11854 	if (tg3_flag(tp, NO_NVRAM) ||
11855 	    eeprom->magic != TG3_EEPROM_MAGIC)
11856 		return -EINVAL;
11857 
11858 	offset = eeprom->offset;
11859 	len = eeprom->len;
11860 
11861 	if ((b_offset = (offset & 3))) {
11862 		/* adjustments to start on required 4 byte boundary */
11863 		ret = tg3_nvram_read_be32(tp, offset-b_offset, &start);
11864 		if (ret)
11865 			return ret;
11866 		len += b_offset;
11867 		offset &= ~3;
11868 		if (len < 4)
11869 			len = 4;
11870 	}
11871 
11872 	odd_len = 0;
11873 	if (len & 3) {
11874 		/* adjustments to end on required 4 byte boundary */
11875 		odd_len = 1;
11876 		len = (len + 3) & ~3;
11877 		ret = tg3_nvram_read_be32(tp, offset+len-4, &end);
11878 		if (ret)
11879 			return ret;
11880 	}
11881 
11882 	buf = data;
11883 	if (b_offset || odd_len) {
11884 		buf = kmalloc(len, GFP_KERNEL);
11885 		if (!buf)
11886 			return -ENOMEM;
11887 		if (b_offset)
11888 			memcpy(buf, &start, 4);
11889 		if (odd_len)
11890 			memcpy(buf+len-4, &end, 4);
11891 		memcpy(buf + b_offset, data, eeprom->len);
11892 	}
11893 
11894 	ret = tg3_nvram_write_block(tp, offset, len, buf);
11895 
11896 	if (buf != data)
11897 		kfree(buf);
11898 
11899 	return ret;
11900 }
11901 
11902 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
11903 {
11904 	struct tg3 *tp = netdev_priv(dev);
11905 
11906 	if (tg3_flag(tp, USE_PHYLIB)) {
11907 		struct phy_device *phydev;
11908 		if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
11909 			return -EAGAIN;
11910 		phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
11911 		return phy_ethtool_gset(phydev, cmd);
11912 	}
11913 
11914 	cmd->supported = (SUPPORTED_Autoneg);
11915 
11916 	if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
11917 		cmd->supported |= (SUPPORTED_1000baseT_Half |
11918 				   SUPPORTED_1000baseT_Full);
11919 
11920 	if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
11921 		cmd->supported |= (SUPPORTED_100baseT_Half |
11922 				  SUPPORTED_100baseT_Full |
11923 				  SUPPORTED_10baseT_Half |
11924 				  SUPPORTED_10baseT_Full |
11925 				  SUPPORTED_TP);
11926 		cmd->port = PORT_TP;
11927 	} else {
11928 		cmd->supported |= SUPPORTED_FIBRE;
11929 		cmd->port = PORT_FIBRE;
11930 	}
11931 
11932 	cmd->advertising = tp->link_config.advertising;
11933 	if (tg3_flag(tp, PAUSE_AUTONEG)) {
11934 		if (tp->link_config.flowctrl & FLOW_CTRL_RX) {
11935 			if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
11936 				cmd->advertising |= ADVERTISED_Pause;
11937 			} else {
11938 				cmd->advertising |= ADVERTISED_Pause |
11939 						    ADVERTISED_Asym_Pause;
11940 			}
11941 		} else if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
11942 			cmd->advertising |= ADVERTISED_Asym_Pause;
11943 		}
11944 	}
11945 	if (netif_running(dev) && tp->link_up) {
11946 		ethtool_cmd_speed_set(cmd, tp->link_config.active_speed);
11947 		cmd->duplex = tp->link_config.active_duplex;
11948 		cmd->lp_advertising = tp->link_config.rmt_adv;
11949 		if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
11950 			if (tp->phy_flags & TG3_PHYFLG_MDIX_STATE)
11951 				cmd->eth_tp_mdix = ETH_TP_MDI_X;
11952 			else
11953 				cmd->eth_tp_mdix = ETH_TP_MDI;
11954 		}
11955 	} else {
11956 		ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN);
11957 		cmd->duplex = DUPLEX_UNKNOWN;
11958 		cmd->eth_tp_mdix = ETH_TP_MDI_INVALID;
11959 	}
11960 	cmd->phy_address = tp->phy_addr;
11961 	cmd->transceiver = XCVR_INTERNAL;
11962 	cmd->autoneg = tp->link_config.autoneg;
11963 	cmd->maxtxpkt = 0;
11964 	cmd->maxrxpkt = 0;
11965 	return 0;
11966 }
11967 
11968 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
11969 {
11970 	struct tg3 *tp = netdev_priv(dev);
11971 	u32 speed = ethtool_cmd_speed(cmd);
11972 
11973 	if (tg3_flag(tp, USE_PHYLIB)) {
11974 		struct phy_device *phydev;
11975 		if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
11976 			return -EAGAIN;
11977 		phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
11978 		return phy_ethtool_sset(phydev, cmd);
11979 	}
11980 
11981 	if (cmd->autoneg != AUTONEG_ENABLE &&
11982 	    cmd->autoneg != AUTONEG_DISABLE)
11983 		return -EINVAL;
11984 
11985 	if (cmd->autoneg == AUTONEG_DISABLE &&
11986 	    cmd->duplex != DUPLEX_FULL &&
11987 	    cmd->duplex != DUPLEX_HALF)
11988 		return -EINVAL;
11989 
11990 	if (cmd->autoneg == AUTONEG_ENABLE) {
11991 		u32 mask = ADVERTISED_Autoneg |
11992 			   ADVERTISED_Pause |
11993 			   ADVERTISED_Asym_Pause;
11994 
11995 		if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
11996 			mask |= ADVERTISED_1000baseT_Half |
11997 				ADVERTISED_1000baseT_Full;
11998 
11999 		if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
12000 			mask |= ADVERTISED_100baseT_Half |
12001 				ADVERTISED_100baseT_Full |
12002 				ADVERTISED_10baseT_Half |
12003 				ADVERTISED_10baseT_Full |
12004 				ADVERTISED_TP;
12005 		else
12006 			mask |= ADVERTISED_FIBRE;
12007 
12008 		if (cmd->advertising & ~mask)
12009 			return -EINVAL;
12010 
12011 		mask &= (ADVERTISED_1000baseT_Half |
12012 			 ADVERTISED_1000baseT_Full |
12013 			 ADVERTISED_100baseT_Half |
12014 			 ADVERTISED_100baseT_Full |
12015 			 ADVERTISED_10baseT_Half |
12016 			 ADVERTISED_10baseT_Full);
12017 
12018 		cmd->advertising &= mask;
12019 	} else {
12020 		if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) {
12021 			if (speed != SPEED_1000)
12022 				return -EINVAL;
12023 
12024 			if (cmd->duplex != DUPLEX_FULL)
12025 				return -EINVAL;
12026 		} else {
12027 			if (speed != SPEED_100 &&
12028 			    speed != SPEED_10)
12029 				return -EINVAL;
12030 		}
12031 	}
12032 
12033 	tg3_full_lock(tp, 0);
12034 
12035 	tp->link_config.autoneg = cmd->autoneg;
12036 	if (cmd->autoneg == AUTONEG_ENABLE) {
12037 		tp->link_config.advertising = (cmd->advertising |
12038 					      ADVERTISED_Autoneg);
12039 		tp->link_config.speed = SPEED_UNKNOWN;
12040 		tp->link_config.duplex = DUPLEX_UNKNOWN;
12041 	} else {
12042 		tp->link_config.advertising = 0;
12043 		tp->link_config.speed = speed;
12044 		tp->link_config.duplex = cmd->duplex;
12045 	}
12046 
12047 	tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
12048 
12049 	tg3_warn_mgmt_link_flap(tp);
12050 
12051 	if (netif_running(dev))
12052 		tg3_setup_phy(tp, true);
12053 
12054 	tg3_full_unlock(tp);
12055 
12056 	return 0;
12057 }
12058 
12059 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
12060 {
12061 	struct tg3 *tp = netdev_priv(dev);
12062 
12063 	strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
12064 	strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
12065 	strlcpy(info->fw_version, tp->fw_ver, sizeof(info->fw_version));
12066 	strlcpy(info->bus_info, pci_name(tp->pdev), sizeof(info->bus_info));
12067 }
12068 
12069 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
12070 {
12071 	struct tg3 *tp = netdev_priv(dev);
12072 
12073 	if (tg3_flag(tp, WOL_CAP) && device_can_wakeup(&tp->pdev->dev))
12074 		wol->supported = WAKE_MAGIC;
12075 	else
12076 		wol->supported = 0;
12077 	wol->wolopts = 0;
12078 	if (tg3_flag(tp, WOL_ENABLE) && device_can_wakeup(&tp->pdev->dev))
12079 		wol->wolopts = WAKE_MAGIC;
12080 	memset(&wol->sopass, 0, sizeof(wol->sopass));
12081 }
12082 
12083 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
12084 {
12085 	struct tg3 *tp = netdev_priv(dev);
12086 	struct device *dp = &tp->pdev->dev;
12087 
12088 	if (wol->wolopts & ~WAKE_MAGIC)
12089 		return -EINVAL;
12090 	if ((wol->wolopts & WAKE_MAGIC) &&
12091 	    !(tg3_flag(tp, WOL_CAP) && device_can_wakeup(dp)))
12092 		return -EINVAL;
12093 
12094 	device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC);
12095 
12096 	spin_lock_bh(&tp->lock);
12097 	if (device_may_wakeup(dp))
12098 		tg3_flag_set(tp, WOL_ENABLE);
12099 	else
12100 		tg3_flag_clear(tp, WOL_ENABLE);
12101 	spin_unlock_bh(&tp->lock);
12102 
12103 	return 0;
12104 }
12105 
12106 static u32 tg3_get_msglevel(struct net_device *dev)
12107 {
12108 	struct tg3 *tp = netdev_priv(dev);
12109 	return tp->msg_enable;
12110 }
12111 
12112 static void tg3_set_msglevel(struct net_device *dev, u32 value)
12113 {
12114 	struct tg3 *tp = netdev_priv(dev);
12115 	tp->msg_enable = value;
12116 }
12117 
12118 static int tg3_nway_reset(struct net_device *dev)
12119 {
12120 	struct tg3 *tp = netdev_priv(dev);
12121 	int r;
12122 
12123 	if (!netif_running(dev))
12124 		return -EAGAIN;
12125 
12126 	if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
12127 		return -EINVAL;
12128 
12129 	tg3_warn_mgmt_link_flap(tp);
12130 
12131 	if (tg3_flag(tp, USE_PHYLIB)) {
12132 		if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
12133 			return -EAGAIN;
12134 		r = phy_start_aneg(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
12135 	} else {
12136 		u32 bmcr;
12137 
12138 		spin_lock_bh(&tp->lock);
12139 		r = -EINVAL;
12140 		tg3_readphy(tp, MII_BMCR, &bmcr);
12141 		if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
12142 		    ((bmcr & BMCR_ANENABLE) ||
12143 		     (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT))) {
12144 			tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
12145 						   BMCR_ANENABLE);
12146 			r = 0;
12147 		}
12148 		spin_unlock_bh(&tp->lock);
12149 	}
12150 
12151 	return r;
12152 }
12153 
12154 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
12155 {
12156 	struct tg3 *tp = netdev_priv(dev);
12157 
12158 	ering->rx_max_pending = tp->rx_std_ring_mask;
12159 	if (tg3_flag(tp, JUMBO_RING_ENABLE))
12160 		ering->rx_jumbo_max_pending = tp->rx_jmb_ring_mask;
12161 	else
12162 		ering->rx_jumbo_max_pending = 0;
12163 
12164 	ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
12165 
12166 	ering->rx_pending = tp->rx_pending;
12167 	if (tg3_flag(tp, JUMBO_RING_ENABLE))
12168 		ering->rx_jumbo_pending = tp->rx_jumbo_pending;
12169 	else
12170 		ering->rx_jumbo_pending = 0;
12171 
12172 	ering->tx_pending = tp->napi[0].tx_pending;
12173 }
12174 
12175 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
12176 {
12177 	struct tg3 *tp = netdev_priv(dev);
12178 	int i, irq_sync = 0, err = 0;
12179 
12180 	if ((ering->rx_pending > tp->rx_std_ring_mask) ||
12181 	    (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) ||
12182 	    (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
12183 	    (ering->tx_pending <= MAX_SKB_FRAGS) ||
12184 	    (tg3_flag(tp, TSO_BUG) &&
12185 	     (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
12186 		return -EINVAL;
12187 
12188 	if (netif_running(dev)) {
12189 		tg3_phy_stop(tp);
12190 		tg3_netif_stop(tp);
12191 		irq_sync = 1;
12192 	}
12193 
12194 	tg3_full_lock(tp, irq_sync);
12195 
12196 	tp->rx_pending = ering->rx_pending;
12197 
12198 	if (tg3_flag(tp, MAX_RXPEND_64) &&
12199 	    tp->rx_pending > 63)
12200 		tp->rx_pending = 63;
12201 	tp->rx_jumbo_pending = ering->rx_jumbo_pending;
12202 
12203 	for (i = 0; i < tp->irq_max; i++)
12204 		tp->napi[i].tx_pending = ering->tx_pending;
12205 
12206 	if (netif_running(dev)) {
12207 		tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12208 		err = tg3_restart_hw(tp, false);
12209 		if (!err)
12210 			tg3_netif_start(tp);
12211 	}
12212 
12213 	tg3_full_unlock(tp);
12214 
12215 	if (irq_sync && !err)
12216 		tg3_phy_start(tp);
12217 
12218 	return err;
12219 }
12220 
12221 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
12222 {
12223 	struct tg3 *tp = netdev_priv(dev);
12224 
12225 	epause->autoneg = !!tg3_flag(tp, PAUSE_AUTONEG);
12226 
12227 	if (tp->link_config.flowctrl & FLOW_CTRL_RX)
12228 		epause->rx_pause = 1;
12229 	else
12230 		epause->rx_pause = 0;
12231 
12232 	if (tp->link_config.flowctrl & FLOW_CTRL_TX)
12233 		epause->tx_pause = 1;
12234 	else
12235 		epause->tx_pause = 0;
12236 }
12237 
12238 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
12239 {
12240 	struct tg3 *tp = netdev_priv(dev);
12241 	int err = 0;
12242 
12243 	if (tp->link_config.autoneg == AUTONEG_ENABLE)
12244 		tg3_warn_mgmt_link_flap(tp);
12245 
12246 	if (tg3_flag(tp, USE_PHYLIB)) {
12247 		u32 newadv;
12248 		struct phy_device *phydev;
12249 
12250 		phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
12251 
12252 		if (!(phydev->supported & SUPPORTED_Pause) ||
12253 		    (!(phydev->supported & SUPPORTED_Asym_Pause) &&
12254 		     (epause->rx_pause != epause->tx_pause)))
12255 			return -EINVAL;
12256 
12257 		tp->link_config.flowctrl = 0;
12258 		if (epause->rx_pause) {
12259 			tp->link_config.flowctrl |= FLOW_CTRL_RX;
12260 
12261 			if (epause->tx_pause) {
12262 				tp->link_config.flowctrl |= FLOW_CTRL_TX;
12263 				newadv = ADVERTISED_Pause;
12264 			} else
12265 				newadv = ADVERTISED_Pause |
12266 					 ADVERTISED_Asym_Pause;
12267 		} else if (epause->tx_pause) {
12268 			tp->link_config.flowctrl |= FLOW_CTRL_TX;
12269 			newadv = ADVERTISED_Asym_Pause;
12270 		} else
12271 			newadv = 0;
12272 
12273 		if (epause->autoneg)
12274 			tg3_flag_set(tp, PAUSE_AUTONEG);
12275 		else
12276 			tg3_flag_clear(tp, PAUSE_AUTONEG);
12277 
12278 		if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
12279 			u32 oldadv = phydev->advertising &
12280 				     (ADVERTISED_Pause | ADVERTISED_Asym_Pause);
12281 			if (oldadv != newadv) {
12282 				phydev->advertising &=
12283 					~(ADVERTISED_Pause |
12284 					  ADVERTISED_Asym_Pause);
12285 				phydev->advertising |= newadv;
12286 				if (phydev->autoneg) {
12287 					/*
12288 					 * Always renegotiate the link to
12289 					 * inform our link partner of our
12290 					 * flow control settings, even if the
12291 					 * flow control is forced.  Let
12292 					 * tg3_adjust_link() do the final
12293 					 * flow control setup.
12294 					 */
12295 					return phy_start_aneg(phydev);
12296 				}
12297 			}
12298 
12299 			if (!epause->autoneg)
12300 				tg3_setup_flow_control(tp, 0, 0);
12301 		} else {
12302 			tp->link_config.advertising &=
12303 					~(ADVERTISED_Pause |
12304 					  ADVERTISED_Asym_Pause);
12305 			tp->link_config.advertising |= newadv;
12306 		}
12307 	} else {
12308 		int irq_sync = 0;
12309 
12310 		if (netif_running(dev)) {
12311 			tg3_netif_stop(tp);
12312 			irq_sync = 1;
12313 		}
12314 
12315 		tg3_full_lock(tp, irq_sync);
12316 
12317 		if (epause->autoneg)
12318 			tg3_flag_set(tp, PAUSE_AUTONEG);
12319 		else
12320 			tg3_flag_clear(tp, PAUSE_AUTONEG);
12321 		if (epause->rx_pause)
12322 			tp->link_config.flowctrl |= FLOW_CTRL_RX;
12323 		else
12324 			tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
12325 		if (epause->tx_pause)
12326 			tp->link_config.flowctrl |= FLOW_CTRL_TX;
12327 		else
12328 			tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
12329 
12330 		if (netif_running(dev)) {
12331 			tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12332 			err = tg3_restart_hw(tp, false);
12333 			if (!err)
12334 				tg3_netif_start(tp);
12335 		}
12336 
12337 		tg3_full_unlock(tp);
12338 	}
12339 
12340 	tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
12341 
12342 	return err;
12343 }
12344 
12345 static int tg3_get_sset_count(struct net_device *dev, int sset)
12346 {
12347 	switch (sset) {
12348 	case ETH_SS_TEST:
12349 		return TG3_NUM_TEST;
12350 	case ETH_SS_STATS:
12351 		return TG3_NUM_STATS;
12352 	default:
12353 		return -EOPNOTSUPP;
12354 	}
12355 }
12356 
12357 static int tg3_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
12358 			 u32 *rules __always_unused)
12359 {
12360 	struct tg3 *tp = netdev_priv(dev);
12361 
12362 	if (!tg3_flag(tp, SUPPORT_MSIX))
12363 		return -EOPNOTSUPP;
12364 
12365 	switch (info->cmd) {
12366 	case ETHTOOL_GRXRINGS:
12367 		if (netif_running(tp->dev))
12368 			info->data = tp->rxq_cnt;
12369 		else {
12370 			info->data = num_online_cpus();
12371 			if (info->data > TG3_RSS_MAX_NUM_QS)
12372 				info->data = TG3_RSS_MAX_NUM_QS;
12373 		}
12374 
12375 		/* The first interrupt vector only
12376 		 * handles link interrupts.
12377 		 */
12378 		info->data -= 1;
12379 		return 0;
12380 
12381 	default:
12382 		return -EOPNOTSUPP;
12383 	}
12384 }
12385 
12386 static u32 tg3_get_rxfh_indir_size(struct net_device *dev)
12387 {
12388 	u32 size = 0;
12389 	struct tg3 *tp = netdev_priv(dev);
12390 
12391 	if (tg3_flag(tp, SUPPORT_MSIX))
12392 		size = TG3_RSS_INDIR_TBL_SIZE;
12393 
12394 	return size;
12395 }
12396 
12397 static int tg3_get_rxfh_indir(struct net_device *dev, u32 *indir)
12398 {
12399 	struct tg3 *tp = netdev_priv(dev);
12400 	int i;
12401 
12402 	for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
12403 		indir[i] = tp->rss_ind_tbl[i];
12404 
12405 	return 0;
12406 }
12407 
12408 static int tg3_set_rxfh_indir(struct net_device *dev, const u32 *indir)
12409 {
12410 	struct tg3 *tp = netdev_priv(dev);
12411 	size_t i;
12412 
12413 	for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
12414 		tp->rss_ind_tbl[i] = indir[i];
12415 
12416 	if (!netif_running(dev) || !tg3_flag(tp, ENABLE_RSS))
12417 		return 0;
12418 
12419 	/* It is legal to write the indirection
12420 	 * table while the device is running.
12421 	 */
12422 	tg3_full_lock(tp, 0);
12423 	tg3_rss_write_indir_tbl(tp);
12424 	tg3_full_unlock(tp);
12425 
12426 	return 0;
12427 }
12428 
12429 static void tg3_get_channels(struct net_device *dev,
12430 			     struct ethtool_channels *channel)
12431 {
12432 	struct tg3 *tp = netdev_priv(dev);
12433 	u32 deflt_qs = netif_get_num_default_rss_queues();
12434 
12435 	channel->max_rx = tp->rxq_max;
12436 	channel->max_tx = tp->txq_max;
12437 
12438 	if (netif_running(dev)) {
12439 		channel->rx_count = tp->rxq_cnt;
12440 		channel->tx_count = tp->txq_cnt;
12441 	} else {
12442 		if (tp->rxq_req)
12443 			channel->rx_count = tp->rxq_req;
12444 		else
12445 			channel->rx_count = min(deflt_qs, tp->rxq_max);
12446 
12447 		if (tp->txq_req)
12448 			channel->tx_count = tp->txq_req;
12449 		else
12450 			channel->tx_count = min(deflt_qs, tp->txq_max);
12451 	}
12452 }
12453 
12454 static int tg3_set_channels(struct net_device *dev,
12455 			    struct ethtool_channels *channel)
12456 {
12457 	struct tg3 *tp = netdev_priv(dev);
12458 
12459 	if (!tg3_flag(tp, SUPPORT_MSIX))
12460 		return -EOPNOTSUPP;
12461 
12462 	if (channel->rx_count > tp->rxq_max ||
12463 	    channel->tx_count > tp->txq_max)
12464 		return -EINVAL;
12465 
12466 	tp->rxq_req = channel->rx_count;
12467 	tp->txq_req = channel->tx_count;
12468 
12469 	if (!netif_running(dev))
12470 		return 0;
12471 
12472 	tg3_stop(tp);
12473 
12474 	tg3_carrier_off(tp);
12475 
12476 	tg3_start(tp, true, false, false);
12477 
12478 	return 0;
12479 }
12480 
12481 static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
12482 {
12483 	switch (stringset) {
12484 	case ETH_SS_STATS:
12485 		memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
12486 		break;
12487 	case ETH_SS_TEST:
12488 		memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
12489 		break;
12490 	default:
12491 		WARN_ON(1);	/* we need a WARN() */
12492 		break;
12493 	}
12494 }
12495 
12496 static int tg3_set_phys_id(struct net_device *dev,
12497 			    enum ethtool_phys_id_state state)
12498 {
12499 	struct tg3 *tp = netdev_priv(dev);
12500 
12501 	if (!netif_running(tp->dev))
12502 		return -EAGAIN;
12503 
12504 	switch (state) {
12505 	case ETHTOOL_ID_ACTIVE:
12506 		return 1;	/* cycle on/off once per second */
12507 
12508 	case ETHTOOL_ID_ON:
12509 		tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
12510 		     LED_CTRL_1000MBPS_ON |
12511 		     LED_CTRL_100MBPS_ON |
12512 		     LED_CTRL_10MBPS_ON |
12513 		     LED_CTRL_TRAFFIC_OVERRIDE |
12514 		     LED_CTRL_TRAFFIC_BLINK |
12515 		     LED_CTRL_TRAFFIC_LED);
12516 		break;
12517 
12518 	case ETHTOOL_ID_OFF:
12519 		tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
12520 		     LED_CTRL_TRAFFIC_OVERRIDE);
12521 		break;
12522 
12523 	case ETHTOOL_ID_INACTIVE:
12524 		tw32(MAC_LED_CTRL, tp->led_ctrl);
12525 		break;
12526 	}
12527 
12528 	return 0;
12529 }
12530 
12531 static void tg3_get_ethtool_stats(struct net_device *dev,
12532 				   struct ethtool_stats *estats, u64 *tmp_stats)
12533 {
12534 	struct tg3 *tp = netdev_priv(dev);
12535 
12536 	if (tp->hw_stats)
12537 		tg3_get_estats(tp, (struct tg3_ethtool_stats *)tmp_stats);
12538 	else
12539 		memset(tmp_stats, 0, sizeof(struct tg3_ethtool_stats));
12540 }
12541 
12542 static __be32 *tg3_vpd_readblock(struct tg3 *tp, u32 *vpdlen)
12543 {
12544 	int i;
12545 	__be32 *buf;
12546 	u32 offset = 0, len = 0;
12547 	u32 magic, val;
12548 
12549 	if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &magic))
12550 		return NULL;
12551 
12552 	if (magic == TG3_EEPROM_MAGIC) {
12553 		for (offset = TG3_NVM_DIR_START;
12554 		     offset < TG3_NVM_DIR_END;
12555 		     offset += TG3_NVM_DIRENT_SIZE) {
12556 			if (tg3_nvram_read(tp, offset, &val))
12557 				return NULL;
12558 
12559 			if ((val >> TG3_NVM_DIRTYPE_SHIFT) ==
12560 			    TG3_NVM_DIRTYPE_EXTVPD)
12561 				break;
12562 		}
12563 
12564 		if (offset != TG3_NVM_DIR_END) {
12565 			len = (val & TG3_NVM_DIRTYPE_LENMSK) * 4;
12566 			if (tg3_nvram_read(tp, offset + 4, &offset))
12567 				return NULL;
12568 
12569 			offset = tg3_nvram_logical_addr(tp, offset);
12570 		}
12571 	}
12572 
12573 	if (!offset || !len) {
12574 		offset = TG3_NVM_VPD_OFF;
12575 		len = TG3_NVM_VPD_LEN;
12576 	}
12577 
12578 	buf = kmalloc(len, GFP_KERNEL);
12579 	if (buf == NULL)
12580 		return NULL;
12581 
12582 	if (magic == TG3_EEPROM_MAGIC) {
12583 		for (i = 0; i < len; i += 4) {
12584 			/* The data is in little-endian format in NVRAM.
12585 			 * Use the big-endian read routines to preserve
12586 			 * the byte order as it exists in NVRAM.
12587 			 */
12588 			if (tg3_nvram_read_be32(tp, offset + i, &buf[i/4]))
12589 				goto error;
12590 		}
12591 	} else {
12592 		u8 *ptr;
12593 		ssize_t cnt;
12594 		unsigned int pos = 0;
12595 
12596 		ptr = (u8 *)&buf[0];
12597 		for (i = 0; pos < len && i < 3; i++, pos += cnt, ptr += cnt) {
12598 			cnt = pci_read_vpd(tp->pdev, pos,
12599 					   len - pos, ptr);
12600 			if (cnt == -ETIMEDOUT || cnt == -EINTR)
12601 				cnt = 0;
12602 			else if (cnt < 0)
12603 				goto error;
12604 		}
12605 		if (pos != len)
12606 			goto error;
12607 	}
12608 
12609 	*vpdlen = len;
12610 
12611 	return buf;
12612 
12613 error:
12614 	kfree(buf);
12615 	return NULL;
12616 }
12617 
12618 #define NVRAM_TEST_SIZE 0x100
12619 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE	0x14
12620 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE	0x18
12621 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE	0x1c
12622 #define NVRAM_SELFBOOT_FORMAT1_4_SIZE	0x20
12623 #define NVRAM_SELFBOOT_FORMAT1_5_SIZE	0x24
12624 #define NVRAM_SELFBOOT_FORMAT1_6_SIZE	0x50
12625 #define NVRAM_SELFBOOT_HW_SIZE 0x20
12626 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
12627 
12628 static int tg3_test_nvram(struct tg3 *tp)
12629 {
12630 	u32 csum, magic, len;
12631 	__be32 *buf;
12632 	int i, j, k, err = 0, size;
12633 
12634 	if (tg3_flag(tp, NO_NVRAM))
12635 		return 0;
12636 
12637 	if (tg3_nvram_read(tp, 0, &magic) != 0)
12638 		return -EIO;
12639 
12640 	if (magic == TG3_EEPROM_MAGIC)
12641 		size = NVRAM_TEST_SIZE;
12642 	else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
12643 		if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
12644 		    TG3_EEPROM_SB_FORMAT_1) {
12645 			switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
12646 			case TG3_EEPROM_SB_REVISION_0:
12647 				size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
12648 				break;
12649 			case TG3_EEPROM_SB_REVISION_2:
12650 				size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
12651 				break;
12652 			case TG3_EEPROM_SB_REVISION_3:
12653 				size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
12654 				break;
12655 			case TG3_EEPROM_SB_REVISION_4:
12656 				size = NVRAM_SELFBOOT_FORMAT1_4_SIZE;
12657 				break;
12658 			case TG3_EEPROM_SB_REVISION_5:
12659 				size = NVRAM_SELFBOOT_FORMAT1_5_SIZE;
12660 				break;
12661 			case TG3_EEPROM_SB_REVISION_6:
12662 				size = NVRAM_SELFBOOT_FORMAT1_6_SIZE;
12663 				break;
12664 			default:
12665 				return -EIO;
12666 			}
12667 		} else
12668 			return 0;
12669 	} else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
12670 		size = NVRAM_SELFBOOT_HW_SIZE;
12671 	else
12672 		return -EIO;
12673 
12674 	buf = kmalloc(size, GFP_KERNEL);
12675 	if (buf == NULL)
12676 		return -ENOMEM;
12677 
12678 	err = -EIO;
12679 	for (i = 0, j = 0; i < size; i += 4, j++) {
12680 		err = tg3_nvram_read_be32(tp, i, &buf[j]);
12681 		if (err)
12682 			break;
12683 	}
12684 	if (i < size)
12685 		goto out;
12686 
12687 	/* Selfboot format */
12688 	magic = be32_to_cpu(buf[0]);
12689 	if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
12690 	    TG3_EEPROM_MAGIC_FW) {
12691 		u8 *buf8 = (u8 *) buf, csum8 = 0;
12692 
12693 		if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
12694 		    TG3_EEPROM_SB_REVISION_2) {
12695 			/* For rev 2, the csum doesn't include the MBA. */
12696 			for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
12697 				csum8 += buf8[i];
12698 			for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
12699 				csum8 += buf8[i];
12700 		} else {
12701 			for (i = 0; i < size; i++)
12702 				csum8 += buf8[i];
12703 		}
12704 
12705 		if (csum8 == 0) {
12706 			err = 0;
12707 			goto out;
12708 		}
12709 
12710 		err = -EIO;
12711 		goto out;
12712 	}
12713 
12714 	if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
12715 	    TG3_EEPROM_MAGIC_HW) {
12716 		u8 data[NVRAM_SELFBOOT_DATA_SIZE];
12717 		u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
12718 		u8 *buf8 = (u8 *) buf;
12719 
12720 		/* Separate the parity bits and the data bytes.  */
12721 		for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
12722 			if ((i == 0) || (i == 8)) {
12723 				int l;
12724 				u8 msk;
12725 
12726 				for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
12727 					parity[k++] = buf8[i] & msk;
12728 				i++;
12729 			} else if (i == 16) {
12730 				int l;
12731 				u8 msk;
12732 
12733 				for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
12734 					parity[k++] = buf8[i] & msk;
12735 				i++;
12736 
12737 				for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
12738 					parity[k++] = buf8[i] & msk;
12739 				i++;
12740 			}
12741 			data[j++] = buf8[i];
12742 		}
12743 
12744 		err = -EIO;
12745 		for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
12746 			u8 hw8 = hweight8(data[i]);
12747 
12748 			if ((hw8 & 0x1) && parity[i])
12749 				goto out;
12750 			else if (!(hw8 & 0x1) && !parity[i])
12751 				goto out;
12752 		}
12753 		err = 0;
12754 		goto out;
12755 	}
12756 
12757 	err = -EIO;
12758 
12759 	/* Bootstrap checksum at offset 0x10 */
12760 	csum = calc_crc((unsigned char *) buf, 0x10);
12761 	if (csum != le32_to_cpu(buf[0x10/4]))
12762 		goto out;
12763 
12764 	/* Manufacturing block starts at offset 0x74, checksum at 0xfc */
12765 	csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
12766 	if (csum != le32_to_cpu(buf[0xfc/4]))
12767 		goto out;
12768 
12769 	kfree(buf);
12770 
12771 	buf = tg3_vpd_readblock(tp, &len);
12772 	if (!buf)
12773 		return -ENOMEM;
12774 
12775 	i = pci_vpd_find_tag((u8 *)buf, 0, len, PCI_VPD_LRDT_RO_DATA);
12776 	if (i > 0) {
12777 		j = pci_vpd_lrdt_size(&((u8 *)buf)[i]);
12778 		if (j < 0)
12779 			goto out;
12780 
12781 		if (i + PCI_VPD_LRDT_TAG_SIZE + j > len)
12782 			goto out;
12783 
12784 		i += PCI_VPD_LRDT_TAG_SIZE;
12785 		j = pci_vpd_find_info_keyword((u8 *)buf, i, j,
12786 					      PCI_VPD_RO_KEYWORD_CHKSUM);
12787 		if (j > 0) {
12788 			u8 csum8 = 0;
12789 
12790 			j += PCI_VPD_INFO_FLD_HDR_SIZE;
12791 
12792 			for (i = 0; i <= j; i++)
12793 				csum8 += ((u8 *)buf)[i];
12794 
12795 			if (csum8)
12796 				goto out;
12797 		}
12798 	}
12799 
12800 	err = 0;
12801 
12802 out:
12803 	kfree(buf);
12804 	return err;
12805 }
12806 
12807 #define TG3_SERDES_TIMEOUT_SEC	2
12808 #define TG3_COPPER_TIMEOUT_SEC	6
12809 
12810 static int tg3_test_link(struct tg3 *tp)
12811 {
12812 	int i, max;
12813 
12814 	if (!netif_running(tp->dev))
12815 		return -ENODEV;
12816 
12817 	if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
12818 		max = TG3_SERDES_TIMEOUT_SEC;
12819 	else
12820 		max = TG3_COPPER_TIMEOUT_SEC;
12821 
12822 	for (i = 0; i < max; i++) {
12823 		if (tp->link_up)
12824 			return 0;
12825 
12826 		if (msleep_interruptible(1000))
12827 			break;
12828 	}
12829 
12830 	return -EIO;
12831 }
12832 
12833 /* Only test the commonly used registers */
12834 static int tg3_test_registers(struct tg3 *tp)
12835 {
12836 	int i, is_5705, is_5750;
12837 	u32 offset, read_mask, write_mask, val, save_val, read_val;
12838 	static struct {
12839 		u16 offset;
12840 		u16 flags;
12841 #define TG3_FL_5705	0x1
12842 #define TG3_FL_NOT_5705	0x2
12843 #define TG3_FL_NOT_5788	0x4
12844 #define TG3_FL_NOT_5750	0x8
12845 		u32 read_mask;
12846 		u32 write_mask;
12847 	} reg_tbl[] = {
12848 		/* MAC Control Registers */
12849 		{ MAC_MODE, TG3_FL_NOT_5705,
12850 			0x00000000, 0x00ef6f8c },
12851 		{ MAC_MODE, TG3_FL_5705,
12852 			0x00000000, 0x01ef6b8c },
12853 		{ MAC_STATUS, TG3_FL_NOT_5705,
12854 			0x03800107, 0x00000000 },
12855 		{ MAC_STATUS, TG3_FL_5705,
12856 			0x03800100, 0x00000000 },
12857 		{ MAC_ADDR_0_HIGH, 0x0000,
12858 			0x00000000, 0x0000ffff },
12859 		{ MAC_ADDR_0_LOW, 0x0000,
12860 			0x00000000, 0xffffffff },
12861 		{ MAC_RX_MTU_SIZE, 0x0000,
12862 			0x00000000, 0x0000ffff },
12863 		{ MAC_TX_MODE, 0x0000,
12864 			0x00000000, 0x00000070 },
12865 		{ MAC_TX_LENGTHS, 0x0000,
12866 			0x00000000, 0x00003fff },
12867 		{ MAC_RX_MODE, TG3_FL_NOT_5705,
12868 			0x00000000, 0x000007fc },
12869 		{ MAC_RX_MODE, TG3_FL_5705,
12870 			0x00000000, 0x000007dc },
12871 		{ MAC_HASH_REG_0, 0x0000,
12872 			0x00000000, 0xffffffff },
12873 		{ MAC_HASH_REG_1, 0x0000,
12874 			0x00000000, 0xffffffff },
12875 		{ MAC_HASH_REG_2, 0x0000,
12876 			0x00000000, 0xffffffff },
12877 		{ MAC_HASH_REG_3, 0x0000,
12878 			0x00000000, 0xffffffff },
12879 
12880 		/* Receive Data and Receive BD Initiator Control Registers. */
12881 		{ RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
12882 			0x00000000, 0xffffffff },
12883 		{ RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
12884 			0x00000000, 0xffffffff },
12885 		{ RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
12886 			0x00000000, 0x00000003 },
12887 		{ RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
12888 			0x00000000, 0xffffffff },
12889 		{ RCVDBDI_STD_BD+0, 0x0000,
12890 			0x00000000, 0xffffffff },
12891 		{ RCVDBDI_STD_BD+4, 0x0000,
12892 			0x00000000, 0xffffffff },
12893 		{ RCVDBDI_STD_BD+8, 0x0000,
12894 			0x00000000, 0xffff0002 },
12895 		{ RCVDBDI_STD_BD+0xc, 0x0000,
12896 			0x00000000, 0xffffffff },
12897 
12898 		/* Receive BD Initiator Control Registers. */
12899 		{ RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
12900 			0x00000000, 0xffffffff },
12901 		{ RCVBDI_STD_THRESH, TG3_FL_5705,
12902 			0x00000000, 0x000003ff },
12903 		{ RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
12904 			0x00000000, 0xffffffff },
12905 
12906 		/* Host Coalescing Control Registers. */
12907 		{ HOSTCC_MODE, TG3_FL_NOT_5705,
12908 			0x00000000, 0x00000004 },
12909 		{ HOSTCC_MODE, TG3_FL_5705,
12910 			0x00000000, 0x000000f6 },
12911 		{ HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
12912 			0x00000000, 0xffffffff },
12913 		{ HOSTCC_RXCOL_TICKS, TG3_FL_5705,
12914 			0x00000000, 0x000003ff },
12915 		{ HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
12916 			0x00000000, 0xffffffff },
12917 		{ HOSTCC_TXCOL_TICKS, TG3_FL_5705,
12918 			0x00000000, 0x000003ff },
12919 		{ HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
12920 			0x00000000, 0xffffffff },
12921 		{ HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
12922 			0x00000000, 0x000000ff },
12923 		{ HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
12924 			0x00000000, 0xffffffff },
12925 		{ HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
12926 			0x00000000, 0x000000ff },
12927 		{ HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
12928 			0x00000000, 0xffffffff },
12929 		{ HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
12930 			0x00000000, 0xffffffff },
12931 		{ HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
12932 			0x00000000, 0xffffffff },
12933 		{ HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
12934 			0x00000000, 0x000000ff },
12935 		{ HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
12936 			0x00000000, 0xffffffff },
12937 		{ HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
12938 			0x00000000, 0x000000ff },
12939 		{ HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
12940 			0x00000000, 0xffffffff },
12941 		{ HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
12942 			0x00000000, 0xffffffff },
12943 		{ HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
12944 			0x00000000, 0xffffffff },
12945 		{ HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
12946 			0x00000000, 0xffffffff },
12947 		{ HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
12948 			0x00000000, 0xffffffff },
12949 		{ HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
12950 			0xffffffff, 0x00000000 },
12951 		{ HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
12952 			0xffffffff, 0x00000000 },
12953 
12954 		/* Buffer Manager Control Registers. */
12955 		{ BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
12956 			0x00000000, 0x007fff80 },
12957 		{ BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
12958 			0x00000000, 0x007fffff },
12959 		{ BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
12960 			0x00000000, 0x0000003f },
12961 		{ BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
12962 			0x00000000, 0x000001ff },
12963 		{ BUFMGR_MB_HIGH_WATER, 0x0000,
12964 			0x00000000, 0x000001ff },
12965 		{ BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
12966 			0xffffffff, 0x00000000 },
12967 		{ BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
12968 			0xffffffff, 0x00000000 },
12969 
12970 		/* Mailbox Registers */
12971 		{ GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
12972 			0x00000000, 0x000001ff },
12973 		{ GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
12974 			0x00000000, 0x000001ff },
12975 		{ GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
12976 			0x00000000, 0x000007ff },
12977 		{ GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
12978 			0x00000000, 0x000001ff },
12979 
12980 		{ 0xffff, 0x0000, 0x00000000, 0x00000000 },
12981 	};
12982 
12983 	is_5705 = is_5750 = 0;
12984 	if (tg3_flag(tp, 5705_PLUS)) {
12985 		is_5705 = 1;
12986 		if (tg3_flag(tp, 5750_PLUS))
12987 			is_5750 = 1;
12988 	}
12989 
12990 	for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
12991 		if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
12992 			continue;
12993 
12994 		if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
12995 			continue;
12996 
12997 		if (tg3_flag(tp, IS_5788) &&
12998 		    (reg_tbl[i].flags & TG3_FL_NOT_5788))
12999 			continue;
13000 
13001 		if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
13002 			continue;
13003 
13004 		offset = (u32) reg_tbl[i].offset;
13005 		read_mask = reg_tbl[i].read_mask;
13006 		write_mask = reg_tbl[i].write_mask;
13007 
13008 		/* Save the original register content */
13009 		save_val = tr32(offset);
13010 
13011 		/* Determine the read-only value. */
13012 		read_val = save_val & read_mask;
13013 
13014 		/* Write zero to the register, then make sure the read-only bits
13015 		 * are not changed and the read/write bits are all zeros.
13016 		 */
13017 		tw32(offset, 0);
13018 
13019 		val = tr32(offset);
13020 
13021 		/* Test the read-only and read/write bits. */
13022 		if (((val & read_mask) != read_val) || (val & write_mask))
13023 			goto out;
13024 
13025 		/* Write ones to all the bits defined by RdMask and WrMask, then
13026 		 * make sure the read-only bits are not changed and the
13027 		 * read/write bits are all ones.
13028 		 */
13029 		tw32(offset, read_mask | write_mask);
13030 
13031 		val = tr32(offset);
13032 
13033 		/* Test the read-only bits. */
13034 		if ((val & read_mask) != read_val)
13035 			goto out;
13036 
13037 		/* Test the read/write bits. */
13038 		if ((val & write_mask) != write_mask)
13039 			goto out;
13040 
13041 		tw32(offset, save_val);
13042 	}
13043 
13044 	return 0;
13045 
13046 out:
13047 	if (netif_msg_hw(tp))
13048 		netdev_err(tp->dev,
13049 			   "Register test failed at offset %x\n", offset);
13050 	tw32(offset, save_val);
13051 	return -EIO;
13052 }
13053 
13054 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
13055 {
13056 	static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
13057 	int i;
13058 	u32 j;
13059 
13060 	for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
13061 		for (j = 0; j < len; j += 4) {
13062 			u32 val;
13063 
13064 			tg3_write_mem(tp, offset + j, test_pattern[i]);
13065 			tg3_read_mem(tp, offset + j, &val);
13066 			if (val != test_pattern[i])
13067 				return -EIO;
13068 		}
13069 	}
13070 	return 0;
13071 }
13072 
13073 static int tg3_test_memory(struct tg3 *tp)
13074 {
13075 	static struct mem_entry {
13076 		u32 offset;
13077 		u32 len;
13078 	} mem_tbl_570x[] = {
13079 		{ 0x00000000, 0x00b50},
13080 		{ 0x00002000, 0x1c000},
13081 		{ 0xffffffff, 0x00000}
13082 	}, mem_tbl_5705[] = {
13083 		{ 0x00000100, 0x0000c},
13084 		{ 0x00000200, 0x00008},
13085 		{ 0x00004000, 0x00800},
13086 		{ 0x00006000, 0x01000},
13087 		{ 0x00008000, 0x02000},
13088 		{ 0x00010000, 0x0e000},
13089 		{ 0xffffffff, 0x00000}
13090 	}, mem_tbl_5755[] = {
13091 		{ 0x00000200, 0x00008},
13092 		{ 0x00004000, 0x00800},
13093 		{ 0x00006000, 0x00800},
13094 		{ 0x00008000, 0x02000},
13095 		{ 0x00010000, 0x0c000},
13096 		{ 0xffffffff, 0x00000}
13097 	}, mem_tbl_5906[] = {
13098 		{ 0x00000200, 0x00008},
13099 		{ 0x00004000, 0x00400},
13100 		{ 0x00006000, 0x00400},
13101 		{ 0x00008000, 0x01000},
13102 		{ 0x00010000, 0x01000},
13103 		{ 0xffffffff, 0x00000}
13104 	}, mem_tbl_5717[] = {
13105 		{ 0x00000200, 0x00008},
13106 		{ 0x00010000, 0x0a000},
13107 		{ 0x00020000, 0x13c00},
13108 		{ 0xffffffff, 0x00000}
13109 	}, mem_tbl_57765[] = {
13110 		{ 0x00000200, 0x00008},
13111 		{ 0x00004000, 0x00800},
13112 		{ 0x00006000, 0x09800},
13113 		{ 0x00010000, 0x0a000},
13114 		{ 0xffffffff, 0x00000}
13115 	};
13116 	struct mem_entry *mem_tbl;
13117 	int err = 0;
13118 	int i;
13119 
13120 	if (tg3_flag(tp, 5717_PLUS))
13121 		mem_tbl = mem_tbl_5717;
13122 	else if (tg3_flag(tp, 57765_CLASS) ||
13123 		 tg3_asic_rev(tp) == ASIC_REV_5762)
13124 		mem_tbl = mem_tbl_57765;
13125 	else if (tg3_flag(tp, 5755_PLUS))
13126 		mem_tbl = mem_tbl_5755;
13127 	else if (tg3_asic_rev(tp) == ASIC_REV_5906)
13128 		mem_tbl = mem_tbl_5906;
13129 	else if (tg3_flag(tp, 5705_PLUS))
13130 		mem_tbl = mem_tbl_5705;
13131 	else
13132 		mem_tbl = mem_tbl_570x;
13133 
13134 	for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
13135 		err = tg3_do_mem_test(tp, mem_tbl[i].offset, mem_tbl[i].len);
13136 		if (err)
13137 			break;
13138 	}
13139 
13140 	return err;
13141 }
13142 
13143 #define TG3_TSO_MSS		500
13144 
13145 #define TG3_TSO_IP_HDR_LEN	20
13146 #define TG3_TSO_TCP_HDR_LEN	20
13147 #define TG3_TSO_TCP_OPT_LEN	12
13148 
13149 static const u8 tg3_tso_header[] = {
13150 0x08, 0x00,
13151 0x45, 0x00, 0x00, 0x00,
13152 0x00, 0x00, 0x40, 0x00,
13153 0x40, 0x06, 0x00, 0x00,
13154 0x0a, 0x00, 0x00, 0x01,
13155 0x0a, 0x00, 0x00, 0x02,
13156 0x0d, 0x00, 0xe0, 0x00,
13157 0x00, 0x00, 0x01, 0x00,
13158 0x00, 0x00, 0x02, 0x00,
13159 0x80, 0x10, 0x10, 0x00,
13160 0x14, 0x09, 0x00, 0x00,
13161 0x01, 0x01, 0x08, 0x0a,
13162 0x11, 0x11, 0x11, 0x11,
13163 0x11, 0x11, 0x11, 0x11,
13164 };
13165 
13166 static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, bool tso_loopback)
13167 {
13168 	u32 rx_start_idx, rx_idx, tx_idx, opaque_key;
13169 	u32 base_flags = 0, mss = 0, desc_idx, coal_now, data_off, val;
13170 	u32 budget;
13171 	struct sk_buff *skb;
13172 	u8 *tx_data, *rx_data;
13173 	dma_addr_t map;
13174 	int num_pkts, tx_len, rx_len, i, err;
13175 	struct tg3_rx_buffer_desc *desc;
13176 	struct tg3_napi *tnapi, *rnapi;
13177 	struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
13178 
13179 	tnapi = &tp->napi[0];
13180 	rnapi = &tp->napi[0];
13181 	if (tp->irq_cnt > 1) {
13182 		if (tg3_flag(tp, ENABLE_RSS))
13183 			rnapi = &tp->napi[1];
13184 		if (tg3_flag(tp, ENABLE_TSS))
13185 			tnapi = &tp->napi[1];
13186 	}
13187 	coal_now = tnapi->coal_now | rnapi->coal_now;
13188 
13189 	err = -EIO;
13190 
13191 	tx_len = pktsz;
13192 	skb = netdev_alloc_skb(tp->dev, tx_len);
13193 	if (!skb)
13194 		return -ENOMEM;
13195 
13196 	tx_data = skb_put(skb, tx_len);
13197 	memcpy(tx_data, tp->dev->dev_addr, 6);
13198 	memset(tx_data + 6, 0x0, 8);
13199 
13200 	tw32(MAC_RX_MTU_SIZE, tx_len + ETH_FCS_LEN);
13201 
13202 	if (tso_loopback) {
13203 		struct iphdr *iph = (struct iphdr *)&tx_data[ETH_HLEN];
13204 
13205 		u32 hdr_len = TG3_TSO_IP_HDR_LEN + TG3_TSO_TCP_HDR_LEN +
13206 			      TG3_TSO_TCP_OPT_LEN;
13207 
13208 		memcpy(tx_data + ETH_ALEN * 2, tg3_tso_header,
13209 		       sizeof(tg3_tso_header));
13210 		mss = TG3_TSO_MSS;
13211 
13212 		val = tx_len - ETH_ALEN * 2 - sizeof(tg3_tso_header);
13213 		num_pkts = DIV_ROUND_UP(val, TG3_TSO_MSS);
13214 
13215 		/* Set the total length field in the IP header */
13216 		iph->tot_len = htons((u16)(mss + hdr_len));
13217 
13218 		base_flags = (TXD_FLAG_CPU_PRE_DMA |
13219 			      TXD_FLAG_CPU_POST_DMA);
13220 
13221 		if (tg3_flag(tp, HW_TSO_1) ||
13222 		    tg3_flag(tp, HW_TSO_2) ||
13223 		    tg3_flag(tp, HW_TSO_3)) {
13224 			struct tcphdr *th;
13225 			val = ETH_HLEN + TG3_TSO_IP_HDR_LEN;
13226 			th = (struct tcphdr *)&tx_data[val];
13227 			th->check = 0;
13228 		} else
13229 			base_flags |= TXD_FLAG_TCPUDP_CSUM;
13230 
13231 		if (tg3_flag(tp, HW_TSO_3)) {
13232 			mss |= (hdr_len & 0xc) << 12;
13233 			if (hdr_len & 0x10)
13234 				base_flags |= 0x00000010;
13235 			base_flags |= (hdr_len & 0x3e0) << 5;
13236 		} else if (tg3_flag(tp, HW_TSO_2))
13237 			mss |= hdr_len << 9;
13238 		else if (tg3_flag(tp, HW_TSO_1) ||
13239 			 tg3_asic_rev(tp) == ASIC_REV_5705) {
13240 			mss |= (TG3_TSO_TCP_OPT_LEN << 9);
13241 		} else {
13242 			base_flags |= (TG3_TSO_TCP_OPT_LEN << 10);
13243 		}
13244 
13245 		data_off = ETH_ALEN * 2 + sizeof(tg3_tso_header);
13246 	} else {
13247 		num_pkts = 1;
13248 		data_off = ETH_HLEN;
13249 
13250 		if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
13251 		    tx_len > VLAN_ETH_FRAME_LEN)
13252 			base_flags |= TXD_FLAG_JMB_PKT;
13253 	}
13254 
13255 	for (i = data_off; i < tx_len; i++)
13256 		tx_data[i] = (u8) (i & 0xff);
13257 
13258 	map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
13259 	if (pci_dma_mapping_error(tp->pdev, map)) {
13260 		dev_kfree_skb(skb);
13261 		return -EIO;
13262 	}
13263 
13264 	val = tnapi->tx_prod;
13265 	tnapi->tx_buffers[val].skb = skb;
13266 	dma_unmap_addr_set(&tnapi->tx_buffers[val], mapping, map);
13267 
13268 	tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
13269 	       rnapi->coal_now);
13270 
13271 	udelay(10);
13272 
13273 	rx_start_idx = rnapi->hw_status->idx[0].rx_producer;
13274 
13275 	budget = tg3_tx_avail(tnapi);
13276 	if (tg3_tx_frag_set(tnapi, &val, &budget, map, tx_len,
13277 			    base_flags | TXD_FLAG_END, mss, 0)) {
13278 		tnapi->tx_buffers[val].skb = NULL;
13279 		dev_kfree_skb(skb);
13280 		return -EIO;
13281 	}
13282 
13283 	tnapi->tx_prod++;
13284 
13285 	/* Sync BD data before updating mailbox */
13286 	wmb();
13287 
13288 	tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod);
13289 	tr32_mailbox(tnapi->prodmbox);
13290 
13291 	udelay(10);
13292 
13293 	/* 350 usec to allow enough time on some 10/100 Mbps devices.  */
13294 	for (i = 0; i < 35; i++) {
13295 		tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
13296 		       coal_now);
13297 
13298 		udelay(10);
13299 
13300 		tx_idx = tnapi->hw_status->idx[0].tx_consumer;
13301 		rx_idx = rnapi->hw_status->idx[0].rx_producer;
13302 		if ((tx_idx == tnapi->tx_prod) &&
13303 		    (rx_idx == (rx_start_idx + num_pkts)))
13304 			break;
13305 	}
13306 
13307 	tg3_tx_skb_unmap(tnapi, tnapi->tx_prod - 1, -1);
13308 	dev_kfree_skb(skb);
13309 
13310 	if (tx_idx != tnapi->tx_prod)
13311 		goto out;
13312 
13313 	if (rx_idx != rx_start_idx + num_pkts)
13314 		goto out;
13315 
13316 	val = data_off;
13317 	while (rx_idx != rx_start_idx) {
13318 		desc = &rnapi->rx_rcb[rx_start_idx++];
13319 		desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
13320 		opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
13321 
13322 		if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
13323 		    (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
13324 			goto out;
13325 
13326 		rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT)
13327 			 - ETH_FCS_LEN;
13328 
13329 		if (!tso_loopback) {
13330 			if (rx_len != tx_len)
13331 				goto out;
13332 
13333 			if (pktsz <= TG3_RX_STD_DMA_SZ - ETH_FCS_LEN) {
13334 				if (opaque_key != RXD_OPAQUE_RING_STD)
13335 					goto out;
13336 			} else {
13337 				if (opaque_key != RXD_OPAQUE_RING_JUMBO)
13338 					goto out;
13339 			}
13340 		} else if ((desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
13341 			   (desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
13342 			    >> RXD_TCPCSUM_SHIFT != 0xffff) {
13343 			goto out;
13344 		}
13345 
13346 		if (opaque_key == RXD_OPAQUE_RING_STD) {
13347 			rx_data = tpr->rx_std_buffers[desc_idx].data;
13348 			map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx],
13349 					     mapping);
13350 		} else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
13351 			rx_data = tpr->rx_jmb_buffers[desc_idx].data;
13352 			map = dma_unmap_addr(&tpr->rx_jmb_buffers[desc_idx],
13353 					     mapping);
13354 		} else
13355 			goto out;
13356 
13357 		pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len,
13358 					    PCI_DMA_FROMDEVICE);
13359 
13360 		rx_data += TG3_RX_OFFSET(tp);
13361 		for (i = data_off; i < rx_len; i++, val++) {
13362 			if (*(rx_data + i) != (u8) (val & 0xff))
13363 				goto out;
13364 		}
13365 	}
13366 
13367 	err = 0;
13368 
13369 	/* tg3_free_rings will unmap and free the rx_data */
13370 out:
13371 	return err;
13372 }
13373 
13374 #define TG3_STD_LOOPBACK_FAILED		1
13375 #define TG3_JMB_LOOPBACK_FAILED		2
13376 #define TG3_TSO_LOOPBACK_FAILED		4
13377 #define TG3_LOOPBACK_FAILED \
13378 	(TG3_STD_LOOPBACK_FAILED | \
13379 	 TG3_JMB_LOOPBACK_FAILED | \
13380 	 TG3_TSO_LOOPBACK_FAILED)
13381 
13382 static int tg3_test_loopback(struct tg3 *tp, u64 *data, bool do_extlpbk)
13383 {
13384 	int err = -EIO;
13385 	u32 eee_cap;
13386 	u32 jmb_pkt_sz = 9000;
13387 
13388 	if (tp->dma_limit)
13389 		jmb_pkt_sz = tp->dma_limit - ETH_HLEN;
13390 
13391 	eee_cap = tp->phy_flags & TG3_PHYFLG_EEE_CAP;
13392 	tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
13393 
13394 	if (!netif_running(tp->dev)) {
13395 		data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13396 		data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13397 		if (do_extlpbk)
13398 			data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13399 		goto done;
13400 	}
13401 
13402 	err = tg3_reset_hw(tp, true);
13403 	if (err) {
13404 		data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13405 		data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13406 		if (do_extlpbk)
13407 			data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13408 		goto done;
13409 	}
13410 
13411 	if (tg3_flag(tp, ENABLE_RSS)) {
13412 		int i;
13413 
13414 		/* Reroute all rx packets to the 1st queue */
13415 		for (i = MAC_RSS_INDIR_TBL_0;
13416 		     i < MAC_RSS_INDIR_TBL_0 + TG3_RSS_INDIR_TBL_SIZE; i += 4)
13417 			tw32(i, 0x0);
13418 	}
13419 
13420 	/* HW errata - mac loopback fails in some cases on 5780.
13421 	 * Normal traffic and PHY loopback are not affected by
13422 	 * errata.  Also, the MAC loopback test is deprecated for
13423 	 * all newer ASIC revisions.
13424 	 */
13425 	if (tg3_asic_rev(tp) != ASIC_REV_5780 &&
13426 	    !tg3_flag(tp, CPMU_PRESENT)) {
13427 		tg3_mac_loopback(tp, true);
13428 
13429 		if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13430 			data[TG3_MAC_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED;
13431 
13432 		if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13433 		    tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13434 			data[TG3_MAC_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED;
13435 
13436 		tg3_mac_loopback(tp, false);
13437 	}
13438 
13439 	if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
13440 	    !tg3_flag(tp, USE_PHYLIB)) {
13441 		int i;
13442 
13443 		tg3_phy_lpbk_set(tp, 0, false);
13444 
13445 		/* Wait for link */
13446 		for (i = 0; i < 100; i++) {
13447 			if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
13448 				break;
13449 			mdelay(1);
13450 		}
13451 
13452 		if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13453 			data[TG3_PHY_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED;
13454 		if (tg3_flag(tp, TSO_CAPABLE) &&
13455 		    tg3_run_loopback(tp, ETH_FRAME_LEN, true))
13456 			data[TG3_PHY_LOOPB_TEST] |= TG3_TSO_LOOPBACK_FAILED;
13457 		if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13458 		    tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13459 			data[TG3_PHY_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED;
13460 
13461 		if (do_extlpbk) {
13462 			tg3_phy_lpbk_set(tp, 0, true);
13463 
13464 			/* All link indications report up, but the hardware
13465 			 * isn't really ready for about 20 msec.  Double it
13466 			 * to be sure.
13467 			 */
13468 			mdelay(40);
13469 
13470 			if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13471 				data[TG3_EXT_LOOPB_TEST] |=
13472 							TG3_STD_LOOPBACK_FAILED;
13473 			if (tg3_flag(tp, TSO_CAPABLE) &&
13474 			    tg3_run_loopback(tp, ETH_FRAME_LEN, true))
13475 				data[TG3_EXT_LOOPB_TEST] |=
13476 							TG3_TSO_LOOPBACK_FAILED;
13477 			if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13478 			    tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13479 				data[TG3_EXT_LOOPB_TEST] |=
13480 							TG3_JMB_LOOPBACK_FAILED;
13481 		}
13482 
13483 		/* Re-enable gphy autopowerdown. */
13484 		if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
13485 			tg3_phy_toggle_apd(tp, true);
13486 	}
13487 
13488 	err = (data[TG3_MAC_LOOPB_TEST] | data[TG3_PHY_LOOPB_TEST] |
13489 	       data[TG3_EXT_LOOPB_TEST]) ? -EIO : 0;
13490 
13491 done:
13492 	tp->phy_flags |= eee_cap;
13493 
13494 	return err;
13495 }
13496 
13497 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
13498 			  u64 *data)
13499 {
13500 	struct tg3 *tp = netdev_priv(dev);
13501 	bool doextlpbk = etest->flags & ETH_TEST_FL_EXTERNAL_LB;
13502 
13503 	if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
13504 		if (tg3_power_up(tp)) {
13505 			etest->flags |= ETH_TEST_FL_FAILED;
13506 			memset(data, 1, sizeof(u64) * TG3_NUM_TEST);
13507 			return;
13508 		}
13509 		tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
13510 	}
13511 
13512 	memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
13513 
13514 	if (tg3_test_nvram(tp) != 0) {
13515 		etest->flags |= ETH_TEST_FL_FAILED;
13516 		data[TG3_NVRAM_TEST] = 1;
13517 	}
13518 	if (!doextlpbk && tg3_test_link(tp)) {
13519 		etest->flags |= ETH_TEST_FL_FAILED;
13520 		data[TG3_LINK_TEST] = 1;
13521 	}
13522 	if (etest->flags & ETH_TEST_FL_OFFLINE) {
13523 		int err, err2 = 0, irq_sync = 0;
13524 
13525 		if (netif_running(dev)) {
13526 			tg3_phy_stop(tp);
13527 			tg3_netif_stop(tp);
13528 			irq_sync = 1;
13529 		}
13530 
13531 		tg3_full_lock(tp, irq_sync);
13532 		tg3_halt(tp, RESET_KIND_SUSPEND, 1);
13533 		err = tg3_nvram_lock(tp);
13534 		tg3_halt_cpu(tp, RX_CPU_BASE);
13535 		if (!tg3_flag(tp, 5705_PLUS))
13536 			tg3_halt_cpu(tp, TX_CPU_BASE);
13537 		if (!err)
13538 			tg3_nvram_unlock(tp);
13539 
13540 		if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
13541 			tg3_phy_reset(tp);
13542 
13543 		if (tg3_test_registers(tp) != 0) {
13544 			etest->flags |= ETH_TEST_FL_FAILED;
13545 			data[TG3_REGISTER_TEST] = 1;
13546 		}
13547 
13548 		if (tg3_test_memory(tp) != 0) {
13549 			etest->flags |= ETH_TEST_FL_FAILED;
13550 			data[TG3_MEMORY_TEST] = 1;
13551 		}
13552 
13553 		if (doextlpbk)
13554 			etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE;
13555 
13556 		if (tg3_test_loopback(tp, data, doextlpbk))
13557 			etest->flags |= ETH_TEST_FL_FAILED;
13558 
13559 		tg3_full_unlock(tp);
13560 
13561 		if (tg3_test_interrupt(tp) != 0) {
13562 			etest->flags |= ETH_TEST_FL_FAILED;
13563 			data[TG3_INTERRUPT_TEST] = 1;
13564 		}
13565 
13566 		tg3_full_lock(tp, 0);
13567 
13568 		tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
13569 		if (netif_running(dev)) {
13570 			tg3_flag_set(tp, INIT_COMPLETE);
13571 			err2 = tg3_restart_hw(tp, true);
13572 			if (!err2)
13573 				tg3_netif_start(tp);
13574 		}
13575 
13576 		tg3_full_unlock(tp);
13577 
13578 		if (irq_sync && !err2)
13579 			tg3_phy_start(tp);
13580 	}
13581 	if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
13582 		tg3_power_down_prepare(tp);
13583 
13584 }
13585 
13586 static int tg3_hwtstamp_ioctl(struct net_device *dev,
13587 			      struct ifreq *ifr, int cmd)
13588 {
13589 	struct tg3 *tp = netdev_priv(dev);
13590 	struct hwtstamp_config stmpconf;
13591 
13592 	if (!tg3_flag(tp, PTP_CAPABLE))
13593 		return -EINVAL;
13594 
13595 	if (copy_from_user(&stmpconf, ifr->ifr_data, sizeof(stmpconf)))
13596 		return -EFAULT;
13597 
13598 	if (stmpconf.flags)
13599 		return -EINVAL;
13600 
13601 	switch (stmpconf.tx_type) {
13602 	case HWTSTAMP_TX_ON:
13603 		tg3_flag_set(tp, TX_TSTAMP_EN);
13604 		break;
13605 	case HWTSTAMP_TX_OFF:
13606 		tg3_flag_clear(tp, TX_TSTAMP_EN);
13607 		break;
13608 	default:
13609 		return -ERANGE;
13610 	}
13611 
13612 	switch (stmpconf.rx_filter) {
13613 	case HWTSTAMP_FILTER_NONE:
13614 		tp->rxptpctl = 0;
13615 		break;
13616 	case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
13617 		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13618 			       TG3_RX_PTP_CTL_ALL_V1_EVENTS;
13619 		break;
13620 	case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
13621 		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13622 			       TG3_RX_PTP_CTL_SYNC_EVNT;
13623 		break;
13624 	case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
13625 		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13626 			       TG3_RX_PTP_CTL_DELAY_REQ;
13627 		break;
13628 	case HWTSTAMP_FILTER_PTP_V2_EVENT:
13629 		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13630 			       TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13631 		break;
13632 	case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
13633 		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13634 			       TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13635 		break;
13636 	case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
13637 		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13638 			       TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13639 		break;
13640 	case HWTSTAMP_FILTER_PTP_V2_SYNC:
13641 		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13642 			       TG3_RX_PTP_CTL_SYNC_EVNT;
13643 		break;
13644 	case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
13645 		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13646 			       TG3_RX_PTP_CTL_SYNC_EVNT;
13647 		break;
13648 	case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
13649 		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13650 			       TG3_RX_PTP_CTL_SYNC_EVNT;
13651 		break;
13652 	case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
13653 		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13654 			       TG3_RX_PTP_CTL_DELAY_REQ;
13655 		break;
13656 	case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
13657 		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13658 			       TG3_RX_PTP_CTL_DELAY_REQ;
13659 		break;
13660 	case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
13661 		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13662 			       TG3_RX_PTP_CTL_DELAY_REQ;
13663 		break;
13664 	default:
13665 		return -ERANGE;
13666 	}
13667 
13668 	if (netif_running(dev) && tp->rxptpctl)
13669 		tw32(TG3_RX_PTP_CTL,
13670 		     tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK);
13671 
13672 	return copy_to_user(ifr->ifr_data, &stmpconf, sizeof(stmpconf)) ?
13673 		-EFAULT : 0;
13674 }
13675 
13676 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
13677 {
13678 	struct mii_ioctl_data *data = if_mii(ifr);
13679 	struct tg3 *tp = netdev_priv(dev);
13680 	int err;
13681 
13682 	if (tg3_flag(tp, USE_PHYLIB)) {
13683 		struct phy_device *phydev;
13684 		if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
13685 			return -EAGAIN;
13686 		phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
13687 		return phy_mii_ioctl(phydev, ifr, cmd);
13688 	}
13689 
13690 	switch (cmd) {
13691 	case SIOCGMIIPHY:
13692 		data->phy_id = tp->phy_addr;
13693 
13694 		/* fallthru */
13695 	case SIOCGMIIREG: {
13696 		u32 mii_regval;
13697 
13698 		if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
13699 			break;			/* We have no PHY */
13700 
13701 		if (!netif_running(dev))
13702 			return -EAGAIN;
13703 
13704 		spin_lock_bh(&tp->lock);
13705 		err = __tg3_readphy(tp, data->phy_id & 0x1f,
13706 				    data->reg_num & 0x1f, &mii_regval);
13707 		spin_unlock_bh(&tp->lock);
13708 
13709 		data->val_out = mii_regval;
13710 
13711 		return err;
13712 	}
13713 
13714 	case SIOCSMIIREG:
13715 		if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
13716 			break;			/* We have no PHY */
13717 
13718 		if (!netif_running(dev))
13719 			return -EAGAIN;
13720 
13721 		spin_lock_bh(&tp->lock);
13722 		err = __tg3_writephy(tp, data->phy_id & 0x1f,
13723 				     data->reg_num & 0x1f, data->val_in);
13724 		spin_unlock_bh(&tp->lock);
13725 
13726 		return err;
13727 
13728 	case SIOCSHWTSTAMP:
13729 		return tg3_hwtstamp_ioctl(dev, ifr, cmd);
13730 
13731 	default:
13732 		/* do nothing */
13733 		break;
13734 	}
13735 	return -EOPNOTSUPP;
13736 }
13737 
13738 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
13739 {
13740 	struct tg3 *tp = netdev_priv(dev);
13741 
13742 	memcpy(ec, &tp->coal, sizeof(*ec));
13743 	return 0;
13744 }
13745 
13746 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
13747 {
13748 	struct tg3 *tp = netdev_priv(dev);
13749 	u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
13750 	u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
13751 
13752 	if (!tg3_flag(tp, 5705_PLUS)) {
13753 		max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
13754 		max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
13755 		max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
13756 		min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
13757 	}
13758 
13759 	if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
13760 	    (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
13761 	    (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
13762 	    (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
13763 	    (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
13764 	    (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
13765 	    (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
13766 	    (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
13767 	    (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
13768 	    (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
13769 		return -EINVAL;
13770 
13771 	/* No rx interrupts will be generated if both are zero */
13772 	if ((ec->rx_coalesce_usecs == 0) &&
13773 	    (ec->rx_max_coalesced_frames == 0))
13774 		return -EINVAL;
13775 
13776 	/* No tx interrupts will be generated if both are zero */
13777 	if ((ec->tx_coalesce_usecs == 0) &&
13778 	    (ec->tx_max_coalesced_frames == 0))
13779 		return -EINVAL;
13780 
13781 	/* Only copy relevant parameters, ignore all others. */
13782 	tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
13783 	tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
13784 	tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
13785 	tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
13786 	tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
13787 	tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
13788 	tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
13789 	tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
13790 	tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
13791 
13792 	if (netif_running(dev)) {
13793 		tg3_full_lock(tp, 0);
13794 		__tg3_set_coalesce(tp, &tp->coal);
13795 		tg3_full_unlock(tp);
13796 	}
13797 	return 0;
13798 }
13799 
13800 static int tg3_set_eee(struct net_device *dev, struct ethtool_eee *edata)
13801 {
13802 	struct tg3 *tp = netdev_priv(dev);
13803 
13804 	if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
13805 		netdev_warn(tp->dev, "Board does not support EEE!\n");
13806 		return -EOPNOTSUPP;
13807 	}
13808 
13809 	if (edata->advertised != tp->eee.advertised) {
13810 		netdev_warn(tp->dev,
13811 			    "Direct manipulation of EEE advertisement is not supported\n");
13812 		return -EINVAL;
13813 	}
13814 
13815 	if (edata->tx_lpi_timer > TG3_CPMU_DBTMR1_LNKIDLE_MAX) {
13816 		netdev_warn(tp->dev,
13817 			    "Maximal Tx Lpi timer supported is %#x(u)\n",
13818 			    TG3_CPMU_DBTMR1_LNKIDLE_MAX);
13819 		return -EINVAL;
13820 	}
13821 
13822 	tp->eee = *edata;
13823 
13824 	tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
13825 	tg3_warn_mgmt_link_flap(tp);
13826 
13827 	if (netif_running(tp->dev)) {
13828 		tg3_full_lock(tp, 0);
13829 		tg3_setup_eee(tp);
13830 		tg3_phy_reset(tp);
13831 		tg3_full_unlock(tp);
13832 	}
13833 
13834 	return 0;
13835 }
13836 
13837 static int tg3_get_eee(struct net_device *dev, struct ethtool_eee *edata)
13838 {
13839 	struct tg3 *tp = netdev_priv(dev);
13840 
13841 	if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
13842 		netdev_warn(tp->dev,
13843 			    "Board does not support EEE!\n");
13844 		return -EOPNOTSUPP;
13845 	}
13846 
13847 	*edata = tp->eee;
13848 	return 0;
13849 }
13850 
13851 static const struct ethtool_ops tg3_ethtool_ops = {
13852 	.get_settings		= tg3_get_settings,
13853 	.set_settings		= tg3_set_settings,
13854 	.get_drvinfo		= tg3_get_drvinfo,
13855 	.get_regs_len		= tg3_get_regs_len,
13856 	.get_regs		= tg3_get_regs,
13857 	.get_wol		= tg3_get_wol,
13858 	.set_wol		= tg3_set_wol,
13859 	.get_msglevel		= tg3_get_msglevel,
13860 	.set_msglevel		= tg3_set_msglevel,
13861 	.nway_reset		= tg3_nway_reset,
13862 	.get_link		= ethtool_op_get_link,
13863 	.get_eeprom_len		= tg3_get_eeprom_len,
13864 	.get_eeprom		= tg3_get_eeprom,
13865 	.set_eeprom		= tg3_set_eeprom,
13866 	.get_ringparam		= tg3_get_ringparam,
13867 	.set_ringparam		= tg3_set_ringparam,
13868 	.get_pauseparam		= tg3_get_pauseparam,
13869 	.set_pauseparam		= tg3_set_pauseparam,
13870 	.self_test		= tg3_self_test,
13871 	.get_strings		= tg3_get_strings,
13872 	.set_phys_id		= tg3_set_phys_id,
13873 	.get_ethtool_stats	= tg3_get_ethtool_stats,
13874 	.get_coalesce		= tg3_get_coalesce,
13875 	.set_coalesce		= tg3_set_coalesce,
13876 	.get_sset_count		= tg3_get_sset_count,
13877 	.get_rxnfc		= tg3_get_rxnfc,
13878 	.get_rxfh_indir_size    = tg3_get_rxfh_indir_size,
13879 	.get_rxfh_indir		= tg3_get_rxfh_indir,
13880 	.set_rxfh_indir		= tg3_set_rxfh_indir,
13881 	.get_channels		= tg3_get_channels,
13882 	.set_channels		= tg3_set_channels,
13883 	.get_ts_info		= tg3_get_ts_info,
13884 	.get_eee		= tg3_get_eee,
13885 	.set_eee		= tg3_set_eee,
13886 };
13887 
13888 static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *dev,
13889 						struct rtnl_link_stats64 *stats)
13890 {
13891 	struct tg3 *tp = netdev_priv(dev);
13892 
13893 	spin_lock_bh(&tp->lock);
13894 	if (!tp->hw_stats) {
13895 		spin_unlock_bh(&tp->lock);
13896 		return &tp->net_stats_prev;
13897 	}
13898 
13899 	tg3_get_nstats(tp, stats);
13900 	spin_unlock_bh(&tp->lock);
13901 
13902 	return stats;
13903 }
13904 
13905 static void tg3_set_rx_mode(struct net_device *dev)
13906 {
13907 	struct tg3 *tp = netdev_priv(dev);
13908 
13909 	if (!netif_running(dev))
13910 		return;
13911 
13912 	tg3_full_lock(tp, 0);
13913 	__tg3_set_rx_mode(dev);
13914 	tg3_full_unlock(tp);
13915 }
13916 
13917 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
13918 			       int new_mtu)
13919 {
13920 	dev->mtu = new_mtu;
13921 
13922 	if (new_mtu > ETH_DATA_LEN) {
13923 		if (tg3_flag(tp, 5780_CLASS)) {
13924 			netdev_update_features(dev);
13925 			tg3_flag_clear(tp, TSO_CAPABLE);
13926 		} else {
13927 			tg3_flag_set(tp, JUMBO_RING_ENABLE);
13928 		}
13929 	} else {
13930 		if (tg3_flag(tp, 5780_CLASS)) {
13931 			tg3_flag_set(tp, TSO_CAPABLE);
13932 			netdev_update_features(dev);
13933 		}
13934 		tg3_flag_clear(tp, JUMBO_RING_ENABLE);
13935 	}
13936 }
13937 
13938 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
13939 {
13940 	struct tg3 *tp = netdev_priv(dev);
13941 	int err;
13942 	bool reset_phy = false;
13943 
13944 	if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
13945 		return -EINVAL;
13946 
13947 	if (!netif_running(dev)) {
13948 		/* We'll just catch it later when the
13949 		 * device is up'd.
13950 		 */
13951 		tg3_set_mtu(dev, tp, new_mtu);
13952 		return 0;
13953 	}
13954 
13955 	tg3_phy_stop(tp);
13956 
13957 	tg3_netif_stop(tp);
13958 
13959 	tg3_full_lock(tp, 1);
13960 
13961 	tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
13962 
13963 	tg3_set_mtu(dev, tp, new_mtu);
13964 
13965 	/* Reset PHY, otherwise the read DMA engine will be in a mode that
13966 	 * breaks all requests to 256 bytes.
13967 	 */
13968 	if (tg3_asic_rev(tp) == ASIC_REV_57766)
13969 		reset_phy = true;
13970 
13971 	err = tg3_restart_hw(tp, reset_phy);
13972 
13973 	if (!err)
13974 		tg3_netif_start(tp);
13975 
13976 	tg3_full_unlock(tp);
13977 
13978 	if (!err)
13979 		tg3_phy_start(tp);
13980 
13981 	return err;
13982 }
13983 
13984 static const struct net_device_ops tg3_netdev_ops = {
13985 	.ndo_open		= tg3_open,
13986 	.ndo_stop		= tg3_close,
13987 	.ndo_start_xmit		= tg3_start_xmit,
13988 	.ndo_get_stats64	= tg3_get_stats64,
13989 	.ndo_validate_addr	= eth_validate_addr,
13990 	.ndo_set_rx_mode	= tg3_set_rx_mode,
13991 	.ndo_set_mac_address	= tg3_set_mac_addr,
13992 	.ndo_do_ioctl		= tg3_ioctl,
13993 	.ndo_tx_timeout		= tg3_tx_timeout,
13994 	.ndo_change_mtu		= tg3_change_mtu,
13995 	.ndo_fix_features	= tg3_fix_features,
13996 	.ndo_set_features	= tg3_set_features,
13997 #ifdef CONFIG_NET_POLL_CONTROLLER
13998 	.ndo_poll_controller	= tg3_poll_controller,
13999 #endif
14000 };
14001 
14002 static void tg3_get_eeprom_size(struct tg3 *tp)
14003 {
14004 	u32 cursize, val, magic;
14005 
14006 	tp->nvram_size = EEPROM_CHIP_SIZE;
14007 
14008 	if (tg3_nvram_read(tp, 0, &magic) != 0)
14009 		return;
14010 
14011 	if ((magic != TG3_EEPROM_MAGIC) &&
14012 	    ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
14013 	    ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
14014 		return;
14015 
14016 	/*
14017 	 * Size the chip by reading offsets at increasing powers of two.
14018 	 * When we encounter our validation signature, we know the addressing
14019 	 * has wrapped around, and thus have our chip size.
14020 	 */
14021 	cursize = 0x10;
14022 
14023 	while (cursize < tp->nvram_size) {
14024 		if (tg3_nvram_read(tp, cursize, &val) != 0)
14025 			return;
14026 
14027 		if (val == magic)
14028 			break;
14029 
14030 		cursize <<= 1;
14031 	}
14032 
14033 	tp->nvram_size = cursize;
14034 }
14035 
14036 static void tg3_get_nvram_size(struct tg3 *tp)
14037 {
14038 	u32 val;
14039 
14040 	if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &val) != 0)
14041 		return;
14042 
14043 	/* Selfboot format */
14044 	if (val != TG3_EEPROM_MAGIC) {
14045 		tg3_get_eeprom_size(tp);
14046 		return;
14047 	}
14048 
14049 	if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
14050 		if (val != 0) {
14051 			/* This is confusing.  We want to operate on the
14052 			 * 16-bit value at offset 0xf2.  The tg3_nvram_read()
14053 			 * call will read from NVRAM and byteswap the data
14054 			 * according to the byteswapping settings for all
14055 			 * other register accesses.  This ensures the data we
14056 			 * want will always reside in the lower 16-bits.
14057 			 * However, the data in NVRAM is in LE format, which
14058 			 * means the data from the NVRAM read will always be
14059 			 * opposite the endianness of the CPU.  The 16-bit
14060 			 * byteswap then brings the data to CPU endianness.
14061 			 */
14062 			tp->nvram_size = swab16((u16)(val & 0x0000ffff)) * 1024;
14063 			return;
14064 		}
14065 	}
14066 	tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14067 }
14068 
14069 static void tg3_get_nvram_info(struct tg3 *tp)
14070 {
14071 	u32 nvcfg1;
14072 
14073 	nvcfg1 = tr32(NVRAM_CFG1);
14074 	if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
14075 		tg3_flag_set(tp, FLASH);
14076 	} else {
14077 		nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14078 		tw32(NVRAM_CFG1, nvcfg1);
14079 	}
14080 
14081 	if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
14082 	    tg3_flag(tp, 5780_CLASS)) {
14083 		switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
14084 		case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
14085 			tp->nvram_jedecnum = JEDEC_ATMEL;
14086 			tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
14087 			tg3_flag_set(tp, NVRAM_BUFFERED);
14088 			break;
14089 		case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
14090 			tp->nvram_jedecnum = JEDEC_ATMEL;
14091 			tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
14092 			break;
14093 		case FLASH_VENDOR_ATMEL_EEPROM:
14094 			tp->nvram_jedecnum = JEDEC_ATMEL;
14095 			tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14096 			tg3_flag_set(tp, NVRAM_BUFFERED);
14097 			break;
14098 		case FLASH_VENDOR_ST:
14099 			tp->nvram_jedecnum = JEDEC_ST;
14100 			tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
14101 			tg3_flag_set(tp, NVRAM_BUFFERED);
14102 			break;
14103 		case FLASH_VENDOR_SAIFUN:
14104 			tp->nvram_jedecnum = JEDEC_SAIFUN;
14105 			tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
14106 			break;
14107 		case FLASH_VENDOR_SST_SMALL:
14108 		case FLASH_VENDOR_SST_LARGE:
14109 			tp->nvram_jedecnum = JEDEC_SST;
14110 			tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
14111 			break;
14112 		}
14113 	} else {
14114 		tp->nvram_jedecnum = JEDEC_ATMEL;
14115 		tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
14116 		tg3_flag_set(tp, NVRAM_BUFFERED);
14117 	}
14118 }
14119 
14120 static void tg3_nvram_get_pagesize(struct tg3 *tp, u32 nvmcfg1)
14121 {
14122 	switch (nvmcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
14123 	case FLASH_5752PAGE_SIZE_256:
14124 		tp->nvram_pagesize = 256;
14125 		break;
14126 	case FLASH_5752PAGE_SIZE_512:
14127 		tp->nvram_pagesize = 512;
14128 		break;
14129 	case FLASH_5752PAGE_SIZE_1K:
14130 		tp->nvram_pagesize = 1024;
14131 		break;
14132 	case FLASH_5752PAGE_SIZE_2K:
14133 		tp->nvram_pagesize = 2048;
14134 		break;
14135 	case FLASH_5752PAGE_SIZE_4K:
14136 		tp->nvram_pagesize = 4096;
14137 		break;
14138 	case FLASH_5752PAGE_SIZE_264:
14139 		tp->nvram_pagesize = 264;
14140 		break;
14141 	case FLASH_5752PAGE_SIZE_528:
14142 		tp->nvram_pagesize = 528;
14143 		break;
14144 	}
14145 }
14146 
14147 static void tg3_get_5752_nvram_info(struct tg3 *tp)
14148 {
14149 	u32 nvcfg1;
14150 
14151 	nvcfg1 = tr32(NVRAM_CFG1);
14152 
14153 	/* NVRAM protection for TPM */
14154 	if (nvcfg1 & (1 << 27))
14155 		tg3_flag_set(tp, PROTECTED_NVRAM);
14156 
14157 	switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14158 	case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
14159 	case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
14160 		tp->nvram_jedecnum = JEDEC_ATMEL;
14161 		tg3_flag_set(tp, NVRAM_BUFFERED);
14162 		break;
14163 	case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14164 		tp->nvram_jedecnum = JEDEC_ATMEL;
14165 		tg3_flag_set(tp, NVRAM_BUFFERED);
14166 		tg3_flag_set(tp, FLASH);
14167 		break;
14168 	case FLASH_5752VENDOR_ST_M45PE10:
14169 	case FLASH_5752VENDOR_ST_M45PE20:
14170 	case FLASH_5752VENDOR_ST_M45PE40:
14171 		tp->nvram_jedecnum = JEDEC_ST;
14172 		tg3_flag_set(tp, NVRAM_BUFFERED);
14173 		tg3_flag_set(tp, FLASH);
14174 		break;
14175 	}
14176 
14177 	if (tg3_flag(tp, FLASH)) {
14178 		tg3_nvram_get_pagesize(tp, nvcfg1);
14179 	} else {
14180 		/* For eeprom, set pagesize to maximum eeprom size */
14181 		tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14182 
14183 		nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14184 		tw32(NVRAM_CFG1, nvcfg1);
14185 	}
14186 }
14187 
14188 static void tg3_get_5755_nvram_info(struct tg3 *tp)
14189 {
14190 	u32 nvcfg1, protect = 0;
14191 
14192 	nvcfg1 = tr32(NVRAM_CFG1);
14193 
14194 	/* NVRAM protection for TPM */
14195 	if (nvcfg1 & (1 << 27)) {
14196 		tg3_flag_set(tp, PROTECTED_NVRAM);
14197 		protect = 1;
14198 	}
14199 
14200 	nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
14201 	switch (nvcfg1) {
14202 	case FLASH_5755VENDOR_ATMEL_FLASH_1:
14203 	case FLASH_5755VENDOR_ATMEL_FLASH_2:
14204 	case FLASH_5755VENDOR_ATMEL_FLASH_3:
14205 	case FLASH_5755VENDOR_ATMEL_FLASH_5:
14206 		tp->nvram_jedecnum = JEDEC_ATMEL;
14207 		tg3_flag_set(tp, NVRAM_BUFFERED);
14208 		tg3_flag_set(tp, FLASH);
14209 		tp->nvram_pagesize = 264;
14210 		if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
14211 		    nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
14212 			tp->nvram_size = (protect ? 0x3e200 :
14213 					  TG3_NVRAM_SIZE_512KB);
14214 		else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
14215 			tp->nvram_size = (protect ? 0x1f200 :
14216 					  TG3_NVRAM_SIZE_256KB);
14217 		else
14218 			tp->nvram_size = (protect ? 0x1f200 :
14219 					  TG3_NVRAM_SIZE_128KB);
14220 		break;
14221 	case FLASH_5752VENDOR_ST_M45PE10:
14222 	case FLASH_5752VENDOR_ST_M45PE20:
14223 	case FLASH_5752VENDOR_ST_M45PE40:
14224 		tp->nvram_jedecnum = JEDEC_ST;
14225 		tg3_flag_set(tp, NVRAM_BUFFERED);
14226 		tg3_flag_set(tp, FLASH);
14227 		tp->nvram_pagesize = 256;
14228 		if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
14229 			tp->nvram_size = (protect ?
14230 					  TG3_NVRAM_SIZE_64KB :
14231 					  TG3_NVRAM_SIZE_128KB);
14232 		else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
14233 			tp->nvram_size = (protect ?
14234 					  TG3_NVRAM_SIZE_64KB :
14235 					  TG3_NVRAM_SIZE_256KB);
14236 		else
14237 			tp->nvram_size = (protect ?
14238 					  TG3_NVRAM_SIZE_128KB :
14239 					  TG3_NVRAM_SIZE_512KB);
14240 		break;
14241 	}
14242 }
14243 
14244 static void tg3_get_5787_nvram_info(struct tg3 *tp)
14245 {
14246 	u32 nvcfg1;
14247 
14248 	nvcfg1 = tr32(NVRAM_CFG1);
14249 
14250 	switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14251 	case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
14252 	case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
14253 	case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
14254 	case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
14255 		tp->nvram_jedecnum = JEDEC_ATMEL;
14256 		tg3_flag_set(tp, NVRAM_BUFFERED);
14257 		tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14258 
14259 		nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14260 		tw32(NVRAM_CFG1, nvcfg1);
14261 		break;
14262 	case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14263 	case FLASH_5755VENDOR_ATMEL_FLASH_1:
14264 	case FLASH_5755VENDOR_ATMEL_FLASH_2:
14265 	case FLASH_5755VENDOR_ATMEL_FLASH_3:
14266 		tp->nvram_jedecnum = JEDEC_ATMEL;
14267 		tg3_flag_set(tp, NVRAM_BUFFERED);
14268 		tg3_flag_set(tp, FLASH);
14269 		tp->nvram_pagesize = 264;
14270 		break;
14271 	case FLASH_5752VENDOR_ST_M45PE10:
14272 	case FLASH_5752VENDOR_ST_M45PE20:
14273 	case FLASH_5752VENDOR_ST_M45PE40:
14274 		tp->nvram_jedecnum = JEDEC_ST;
14275 		tg3_flag_set(tp, NVRAM_BUFFERED);
14276 		tg3_flag_set(tp, FLASH);
14277 		tp->nvram_pagesize = 256;
14278 		break;
14279 	}
14280 }
14281 
14282 static void tg3_get_5761_nvram_info(struct tg3 *tp)
14283 {
14284 	u32 nvcfg1, protect = 0;
14285 
14286 	nvcfg1 = tr32(NVRAM_CFG1);
14287 
14288 	/* NVRAM protection for TPM */
14289 	if (nvcfg1 & (1 << 27)) {
14290 		tg3_flag_set(tp, PROTECTED_NVRAM);
14291 		protect = 1;
14292 	}
14293 
14294 	nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
14295 	switch (nvcfg1) {
14296 	case FLASH_5761VENDOR_ATMEL_ADB021D:
14297 	case FLASH_5761VENDOR_ATMEL_ADB041D:
14298 	case FLASH_5761VENDOR_ATMEL_ADB081D:
14299 	case FLASH_5761VENDOR_ATMEL_ADB161D:
14300 	case FLASH_5761VENDOR_ATMEL_MDB021D:
14301 	case FLASH_5761VENDOR_ATMEL_MDB041D:
14302 	case FLASH_5761VENDOR_ATMEL_MDB081D:
14303 	case FLASH_5761VENDOR_ATMEL_MDB161D:
14304 		tp->nvram_jedecnum = JEDEC_ATMEL;
14305 		tg3_flag_set(tp, NVRAM_BUFFERED);
14306 		tg3_flag_set(tp, FLASH);
14307 		tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14308 		tp->nvram_pagesize = 256;
14309 		break;
14310 	case FLASH_5761VENDOR_ST_A_M45PE20:
14311 	case FLASH_5761VENDOR_ST_A_M45PE40:
14312 	case FLASH_5761VENDOR_ST_A_M45PE80:
14313 	case FLASH_5761VENDOR_ST_A_M45PE16:
14314 	case FLASH_5761VENDOR_ST_M_M45PE20:
14315 	case FLASH_5761VENDOR_ST_M_M45PE40:
14316 	case FLASH_5761VENDOR_ST_M_M45PE80:
14317 	case FLASH_5761VENDOR_ST_M_M45PE16:
14318 		tp->nvram_jedecnum = JEDEC_ST;
14319 		tg3_flag_set(tp, NVRAM_BUFFERED);
14320 		tg3_flag_set(tp, FLASH);
14321 		tp->nvram_pagesize = 256;
14322 		break;
14323 	}
14324 
14325 	if (protect) {
14326 		tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
14327 	} else {
14328 		switch (nvcfg1) {
14329 		case FLASH_5761VENDOR_ATMEL_ADB161D:
14330 		case FLASH_5761VENDOR_ATMEL_MDB161D:
14331 		case FLASH_5761VENDOR_ST_A_M45PE16:
14332 		case FLASH_5761VENDOR_ST_M_M45PE16:
14333 			tp->nvram_size = TG3_NVRAM_SIZE_2MB;
14334 			break;
14335 		case FLASH_5761VENDOR_ATMEL_ADB081D:
14336 		case FLASH_5761VENDOR_ATMEL_MDB081D:
14337 		case FLASH_5761VENDOR_ST_A_M45PE80:
14338 		case FLASH_5761VENDOR_ST_M_M45PE80:
14339 			tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14340 			break;
14341 		case FLASH_5761VENDOR_ATMEL_ADB041D:
14342 		case FLASH_5761VENDOR_ATMEL_MDB041D:
14343 		case FLASH_5761VENDOR_ST_A_M45PE40:
14344 		case FLASH_5761VENDOR_ST_M_M45PE40:
14345 			tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14346 			break;
14347 		case FLASH_5761VENDOR_ATMEL_ADB021D:
14348 		case FLASH_5761VENDOR_ATMEL_MDB021D:
14349 		case FLASH_5761VENDOR_ST_A_M45PE20:
14350 		case FLASH_5761VENDOR_ST_M_M45PE20:
14351 			tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14352 			break;
14353 		}
14354 	}
14355 }
14356 
14357 static void tg3_get_5906_nvram_info(struct tg3 *tp)
14358 {
14359 	tp->nvram_jedecnum = JEDEC_ATMEL;
14360 	tg3_flag_set(tp, NVRAM_BUFFERED);
14361 	tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14362 }
14363 
14364 static void tg3_get_57780_nvram_info(struct tg3 *tp)
14365 {
14366 	u32 nvcfg1;
14367 
14368 	nvcfg1 = tr32(NVRAM_CFG1);
14369 
14370 	switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14371 	case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
14372 	case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
14373 		tp->nvram_jedecnum = JEDEC_ATMEL;
14374 		tg3_flag_set(tp, NVRAM_BUFFERED);
14375 		tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14376 
14377 		nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14378 		tw32(NVRAM_CFG1, nvcfg1);
14379 		return;
14380 	case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14381 	case FLASH_57780VENDOR_ATMEL_AT45DB011D:
14382 	case FLASH_57780VENDOR_ATMEL_AT45DB011B:
14383 	case FLASH_57780VENDOR_ATMEL_AT45DB021D:
14384 	case FLASH_57780VENDOR_ATMEL_AT45DB021B:
14385 	case FLASH_57780VENDOR_ATMEL_AT45DB041D:
14386 	case FLASH_57780VENDOR_ATMEL_AT45DB041B:
14387 		tp->nvram_jedecnum = JEDEC_ATMEL;
14388 		tg3_flag_set(tp, NVRAM_BUFFERED);
14389 		tg3_flag_set(tp, FLASH);
14390 
14391 		switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14392 		case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14393 		case FLASH_57780VENDOR_ATMEL_AT45DB011D:
14394 		case FLASH_57780VENDOR_ATMEL_AT45DB011B:
14395 			tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14396 			break;
14397 		case FLASH_57780VENDOR_ATMEL_AT45DB021D:
14398 		case FLASH_57780VENDOR_ATMEL_AT45DB021B:
14399 			tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14400 			break;
14401 		case FLASH_57780VENDOR_ATMEL_AT45DB041D:
14402 		case FLASH_57780VENDOR_ATMEL_AT45DB041B:
14403 			tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14404 			break;
14405 		}
14406 		break;
14407 	case FLASH_5752VENDOR_ST_M45PE10:
14408 	case FLASH_5752VENDOR_ST_M45PE20:
14409 	case FLASH_5752VENDOR_ST_M45PE40:
14410 		tp->nvram_jedecnum = JEDEC_ST;
14411 		tg3_flag_set(tp, NVRAM_BUFFERED);
14412 		tg3_flag_set(tp, FLASH);
14413 
14414 		switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14415 		case FLASH_5752VENDOR_ST_M45PE10:
14416 			tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14417 			break;
14418 		case FLASH_5752VENDOR_ST_M45PE20:
14419 			tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14420 			break;
14421 		case FLASH_5752VENDOR_ST_M45PE40:
14422 			tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14423 			break;
14424 		}
14425 		break;
14426 	default:
14427 		tg3_flag_set(tp, NO_NVRAM);
14428 		return;
14429 	}
14430 
14431 	tg3_nvram_get_pagesize(tp, nvcfg1);
14432 	if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14433 		tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14434 }
14435 
14436 
14437 static void tg3_get_5717_nvram_info(struct tg3 *tp)
14438 {
14439 	u32 nvcfg1;
14440 
14441 	nvcfg1 = tr32(NVRAM_CFG1);
14442 
14443 	switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14444 	case FLASH_5717VENDOR_ATMEL_EEPROM:
14445 	case FLASH_5717VENDOR_MICRO_EEPROM:
14446 		tp->nvram_jedecnum = JEDEC_ATMEL;
14447 		tg3_flag_set(tp, NVRAM_BUFFERED);
14448 		tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14449 
14450 		nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14451 		tw32(NVRAM_CFG1, nvcfg1);
14452 		return;
14453 	case FLASH_5717VENDOR_ATMEL_MDB011D:
14454 	case FLASH_5717VENDOR_ATMEL_ADB011B:
14455 	case FLASH_5717VENDOR_ATMEL_ADB011D:
14456 	case FLASH_5717VENDOR_ATMEL_MDB021D:
14457 	case FLASH_5717VENDOR_ATMEL_ADB021B:
14458 	case FLASH_5717VENDOR_ATMEL_ADB021D:
14459 	case FLASH_5717VENDOR_ATMEL_45USPT:
14460 		tp->nvram_jedecnum = JEDEC_ATMEL;
14461 		tg3_flag_set(tp, NVRAM_BUFFERED);
14462 		tg3_flag_set(tp, FLASH);
14463 
14464 		switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14465 		case FLASH_5717VENDOR_ATMEL_MDB021D:
14466 			/* Detect size with tg3_nvram_get_size() */
14467 			break;
14468 		case FLASH_5717VENDOR_ATMEL_ADB021B:
14469 		case FLASH_5717VENDOR_ATMEL_ADB021D:
14470 			tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14471 			break;
14472 		default:
14473 			tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14474 			break;
14475 		}
14476 		break;
14477 	case FLASH_5717VENDOR_ST_M_M25PE10:
14478 	case FLASH_5717VENDOR_ST_A_M25PE10:
14479 	case FLASH_5717VENDOR_ST_M_M45PE10:
14480 	case FLASH_5717VENDOR_ST_A_M45PE10:
14481 	case FLASH_5717VENDOR_ST_M_M25PE20:
14482 	case FLASH_5717VENDOR_ST_A_M25PE20:
14483 	case FLASH_5717VENDOR_ST_M_M45PE20:
14484 	case FLASH_5717VENDOR_ST_A_M45PE20:
14485 	case FLASH_5717VENDOR_ST_25USPT:
14486 	case FLASH_5717VENDOR_ST_45USPT:
14487 		tp->nvram_jedecnum = JEDEC_ST;
14488 		tg3_flag_set(tp, NVRAM_BUFFERED);
14489 		tg3_flag_set(tp, FLASH);
14490 
14491 		switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14492 		case FLASH_5717VENDOR_ST_M_M25PE20:
14493 		case FLASH_5717VENDOR_ST_M_M45PE20:
14494 			/* Detect size with tg3_nvram_get_size() */
14495 			break;
14496 		case FLASH_5717VENDOR_ST_A_M25PE20:
14497 		case FLASH_5717VENDOR_ST_A_M45PE20:
14498 			tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14499 			break;
14500 		default:
14501 			tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14502 			break;
14503 		}
14504 		break;
14505 	default:
14506 		tg3_flag_set(tp, NO_NVRAM);
14507 		return;
14508 	}
14509 
14510 	tg3_nvram_get_pagesize(tp, nvcfg1);
14511 	if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14512 		tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14513 }
14514 
14515 static void tg3_get_5720_nvram_info(struct tg3 *tp)
14516 {
14517 	u32 nvcfg1, nvmpinstrp;
14518 
14519 	nvcfg1 = tr32(NVRAM_CFG1);
14520 	nvmpinstrp = nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK;
14521 
14522 	if (tg3_asic_rev(tp) == ASIC_REV_5762) {
14523 		if (!(nvcfg1 & NVRAM_CFG1_5762VENDOR_MASK)) {
14524 			tg3_flag_set(tp, NO_NVRAM);
14525 			return;
14526 		}
14527 
14528 		switch (nvmpinstrp) {
14529 		case FLASH_5762_EEPROM_HD:
14530 			nvmpinstrp = FLASH_5720_EEPROM_HD;
14531 			break;
14532 		case FLASH_5762_EEPROM_LD:
14533 			nvmpinstrp = FLASH_5720_EEPROM_LD;
14534 			break;
14535 		case FLASH_5720VENDOR_M_ST_M45PE20:
14536 			/* This pinstrap supports multiple sizes, so force it
14537 			 * to read the actual size from location 0xf0.
14538 			 */
14539 			nvmpinstrp = FLASH_5720VENDOR_ST_45USPT;
14540 			break;
14541 		}
14542 	}
14543 
14544 	switch (nvmpinstrp) {
14545 	case FLASH_5720_EEPROM_HD:
14546 	case FLASH_5720_EEPROM_LD:
14547 		tp->nvram_jedecnum = JEDEC_ATMEL;
14548 		tg3_flag_set(tp, NVRAM_BUFFERED);
14549 
14550 		nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14551 		tw32(NVRAM_CFG1, nvcfg1);
14552 		if (nvmpinstrp == FLASH_5720_EEPROM_HD)
14553 			tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14554 		else
14555 			tp->nvram_pagesize = ATMEL_AT24C02_CHIP_SIZE;
14556 		return;
14557 	case FLASH_5720VENDOR_M_ATMEL_DB011D:
14558 	case FLASH_5720VENDOR_A_ATMEL_DB011B:
14559 	case FLASH_5720VENDOR_A_ATMEL_DB011D:
14560 	case FLASH_5720VENDOR_M_ATMEL_DB021D:
14561 	case FLASH_5720VENDOR_A_ATMEL_DB021B:
14562 	case FLASH_5720VENDOR_A_ATMEL_DB021D:
14563 	case FLASH_5720VENDOR_M_ATMEL_DB041D:
14564 	case FLASH_5720VENDOR_A_ATMEL_DB041B:
14565 	case FLASH_5720VENDOR_A_ATMEL_DB041D:
14566 	case FLASH_5720VENDOR_M_ATMEL_DB081D:
14567 	case FLASH_5720VENDOR_A_ATMEL_DB081D:
14568 	case FLASH_5720VENDOR_ATMEL_45USPT:
14569 		tp->nvram_jedecnum = JEDEC_ATMEL;
14570 		tg3_flag_set(tp, NVRAM_BUFFERED);
14571 		tg3_flag_set(tp, FLASH);
14572 
14573 		switch (nvmpinstrp) {
14574 		case FLASH_5720VENDOR_M_ATMEL_DB021D:
14575 		case FLASH_5720VENDOR_A_ATMEL_DB021B:
14576 		case FLASH_5720VENDOR_A_ATMEL_DB021D:
14577 			tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14578 			break;
14579 		case FLASH_5720VENDOR_M_ATMEL_DB041D:
14580 		case FLASH_5720VENDOR_A_ATMEL_DB041B:
14581 		case FLASH_5720VENDOR_A_ATMEL_DB041D:
14582 			tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14583 			break;
14584 		case FLASH_5720VENDOR_M_ATMEL_DB081D:
14585 		case FLASH_5720VENDOR_A_ATMEL_DB081D:
14586 			tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14587 			break;
14588 		default:
14589 			if (tg3_asic_rev(tp) != ASIC_REV_5762)
14590 				tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14591 			break;
14592 		}
14593 		break;
14594 	case FLASH_5720VENDOR_M_ST_M25PE10:
14595 	case FLASH_5720VENDOR_M_ST_M45PE10:
14596 	case FLASH_5720VENDOR_A_ST_M25PE10:
14597 	case FLASH_5720VENDOR_A_ST_M45PE10:
14598 	case FLASH_5720VENDOR_M_ST_M25PE20:
14599 	case FLASH_5720VENDOR_M_ST_M45PE20:
14600 	case FLASH_5720VENDOR_A_ST_M25PE20:
14601 	case FLASH_5720VENDOR_A_ST_M45PE20:
14602 	case FLASH_5720VENDOR_M_ST_M25PE40:
14603 	case FLASH_5720VENDOR_M_ST_M45PE40:
14604 	case FLASH_5720VENDOR_A_ST_M25PE40:
14605 	case FLASH_5720VENDOR_A_ST_M45PE40:
14606 	case FLASH_5720VENDOR_M_ST_M25PE80:
14607 	case FLASH_5720VENDOR_M_ST_M45PE80:
14608 	case FLASH_5720VENDOR_A_ST_M25PE80:
14609 	case FLASH_5720VENDOR_A_ST_M45PE80:
14610 	case FLASH_5720VENDOR_ST_25USPT:
14611 	case FLASH_5720VENDOR_ST_45USPT:
14612 		tp->nvram_jedecnum = JEDEC_ST;
14613 		tg3_flag_set(tp, NVRAM_BUFFERED);
14614 		tg3_flag_set(tp, FLASH);
14615 
14616 		switch (nvmpinstrp) {
14617 		case FLASH_5720VENDOR_M_ST_M25PE20:
14618 		case FLASH_5720VENDOR_M_ST_M45PE20:
14619 		case FLASH_5720VENDOR_A_ST_M25PE20:
14620 		case FLASH_5720VENDOR_A_ST_M45PE20:
14621 			tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14622 			break;
14623 		case FLASH_5720VENDOR_M_ST_M25PE40:
14624 		case FLASH_5720VENDOR_M_ST_M45PE40:
14625 		case FLASH_5720VENDOR_A_ST_M25PE40:
14626 		case FLASH_5720VENDOR_A_ST_M45PE40:
14627 			tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14628 			break;
14629 		case FLASH_5720VENDOR_M_ST_M25PE80:
14630 		case FLASH_5720VENDOR_M_ST_M45PE80:
14631 		case FLASH_5720VENDOR_A_ST_M25PE80:
14632 		case FLASH_5720VENDOR_A_ST_M45PE80:
14633 			tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14634 			break;
14635 		default:
14636 			if (tg3_asic_rev(tp) != ASIC_REV_5762)
14637 				tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14638 			break;
14639 		}
14640 		break;
14641 	default:
14642 		tg3_flag_set(tp, NO_NVRAM);
14643 		return;
14644 	}
14645 
14646 	tg3_nvram_get_pagesize(tp, nvcfg1);
14647 	if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14648 		tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14649 
14650 	if (tg3_asic_rev(tp) == ASIC_REV_5762) {
14651 		u32 val;
14652 
14653 		if (tg3_nvram_read(tp, 0, &val))
14654 			return;
14655 
14656 		if (val != TG3_EEPROM_MAGIC &&
14657 		    (val & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW)
14658 			tg3_flag_set(tp, NO_NVRAM);
14659 	}
14660 }
14661 
14662 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
14663 static void tg3_nvram_init(struct tg3 *tp)
14664 {
14665 	if (tg3_flag(tp, IS_SSB_CORE)) {
14666 		/* No NVRAM and EEPROM on the SSB Broadcom GigE core. */
14667 		tg3_flag_clear(tp, NVRAM);
14668 		tg3_flag_clear(tp, NVRAM_BUFFERED);
14669 		tg3_flag_set(tp, NO_NVRAM);
14670 		return;
14671 	}
14672 
14673 	tw32_f(GRC_EEPROM_ADDR,
14674 	     (EEPROM_ADDR_FSM_RESET |
14675 	      (EEPROM_DEFAULT_CLOCK_PERIOD <<
14676 	       EEPROM_ADDR_CLKPERD_SHIFT)));
14677 
14678 	msleep(1);
14679 
14680 	/* Enable seeprom accesses. */
14681 	tw32_f(GRC_LOCAL_CTRL,
14682 	     tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
14683 	udelay(100);
14684 
14685 	if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
14686 	    tg3_asic_rev(tp) != ASIC_REV_5701) {
14687 		tg3_flag_set(tp, NVRAM);
14688 
14689 		if (tg3_nvram_lock(tp)) {
14690 			netdev_warn(tp->dev,
14691 				    "Cannot get nvram lock, %s failed\n",
14692 				    __func__);
14693 			return;
14694 		}
14695 		tg3_enable_nvram_access(tp);
14696 
14697 		tp->nvram_size = 0;
14698 
14699 		if (tg3_asic_rev(tp) == ASIC_REV_5752)
14700 			tg3_get_5752_nvram_info(tp);
14701 		else if (tg3_asic_rev(tp) == ASIC_REV_5755)
14702 			tg3_get_5755_nvram_info(tp);
14703 		else if (tg3_asic_rev(tp) == ASIC_REV_5787 ||
14704 			 tg3_asic_rev(tp) == ASIC_REV_5784 ||
14705 			 tg3_asic_rev(tp) == ASIC_REV_5785)
14706 			tg3_get_5787_nvram_info(tp);
14707 		else if (tg3_asic_rev(tp) == ASIC_REV_5761)
14708 			tg3_get_5761_nvram_info(tp);
14709 		else if (tg3_asic_rev(tp) == ASIC_REV_5906)
14710 			tg3_get_5906_nvram_info(tp);
14711 		else if (tg3_asic_rev(tp) == ASIC_REV_57780 ||
14712 			 tg3_flag(tp, 57765_CLASS))
14713 			tg3_get_57780_nvram_info(tp);
14714 		else if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
14715 			 tg3_asic_rev(tp) == ASIC_REV_5719)
14716 			tg3_get_5717_nvram_info(tp);
14717 		else if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
14718 			 tg3_asic_rev(tp) == ASIC_REV_5762)
14719 			tg3_get_5720_nvram_info(tp);
14720 		else
14721 			tg3_get_nvram_info(tp);
14722 
14723 		if (tp->nvram_size == 0)
14724 			tg3_get_nvram_size(tp);
14725 
14726 		tg3_disable_nvram_access(tp);
14727 		tg3_nvram_unlock(tp);
14728 
14729 	} else {
14730 		tg3_flag_clear(tp, NVRAM);
14731 		tg3_flag_clear(tp, NVRAM_BUFFERED);
14732 
14733 		tg3_get_eeprom_size(tp);
14734 	}
14735 }
14736 
14737 struct subsys_tbl_ent {
14738 	u16 subsys_vendor, subsys_devid;
14739 	u32 phy_id;
14740 };
14741 
14742 static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
14743 	/* Broadcom boards. */
14744 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
14745 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6, TG3_PHY_ID_BCM5401 },
14746 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
14747 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5, TG3_PHY_ID_BCM5701 },
14748 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
14749 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6, TG3_PHY_ID_BCM8002 },
14750 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
14751 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9, 0 },
14752 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
14753 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1, TG3_PHY_ID_BCM5701 },
14754 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
14755 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8, TG3_PHY_ID_BCM5701 },
14756 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
14757 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7, 0 },
14758 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
14759 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10, TG3_PHY_ID_BCM5701 },
14760 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
14761 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12, TG3_PHY_ID_BCM5701 },
14762 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
14763 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1, TG3_PHY_ID_BCM5703 },
14764 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
14765 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2, TG3_PHY_ID_BCM5703 },
14766 
14767 	/* 3com boards. */
14768 	{ TG3PCI_SUBVENDOR_ID_3COM,
14769 	  TG3PCI_SUBDEVICE_ID_3COM_3C996T, TG3_PHY_ID_BCM5401 },
14770 	{ TG3PCI_SUBVENDOR_ID_3COM,
14771 	  TG3PCI_SUBDEVICE_ID_3COM_3C996BT, TG3_PHY_ID_BCM5701 },
14772 	{ TG3PCI_SUBVENDOR_ID_3COM,
14773 	  TG3PCI_SUBDEVICE_ID_3COM_3C996SX, 0 },
14774 	{ TG3PCI_SUBVENDOR_ID_3COM,
14775 	  TG3PCI_SUBDEVICE_ID_3COM_3C1000T, TG3_PHY_ID_BCM5701 },
14776 	{ TG3PCI_SUBVENDOR_ID_3COM,
14777 	  TG3PCI_SUBDEVICE_ID_3COM_3C940BR01, TG3_PHY_ID_BCM5701 },
14778 
14779 	/* DELL boards. */
14780 	{ TG3PCI_SUBVENDOR_ID_DELL,
14781 	  TG3PCI_SUBDEVICE_ID_DELL_VIPER, TG3_PHY_ID_BCM5401 },
14782 	{ TG3PCI_SUBVENDOR_ID_DELL,
14783 	  TG3PCI_SUBDEVICE_ID_DELL_JAGUAR, TG3_PHY_ID_BCM5401 },
14784 	{ TG3PCI_SUBVENDOR_ID_DELL,
14785 	  TG3PCI_SUBDEVICE_ID_DELL_MERLOT, TG3_PHY_ID_BCM5411 },
14786 	{ TG3PCI_SUBVENDOR_ID_DELL,
14787 	  TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT, TG3_PHY_ID_BCM5411 },
14788 
14789 	/* Compaq boards. */
14790 	{ TG3PCI_SUBVENDOR_ID_COMPAQ,
14791 	  TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE, TG3_PHY_ID_BCM5701 },
14792 	{ TG3PCI_SUBVENDOR_ID_COMPAQ,
14793 	  TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2, TG3_PHY_ID_BCM5701 },
14794 	{ TG3PCI_SUBVENDOR_ID_COMPAQ,
14795 	  TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING, 0 },
14796 	{ TG3PCI_SUBVENDOR_ID_COMPAQ,
14797 	  TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780, TG3_PHY_ID_BCM5701 },
14798 	{ TG3PCI_SUBVENDOR_ID_COMPAQ,
14799 	  TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2, TG3_PHY_ID_BCM5701 },
14800 
14801 	/* IBM boards. */
14802 	{ TG3PCI_SUBVENDOR_ID_IBM,
14803 	  TG3PCI_SUBDEVICE_ID_IBM_5703SAX2, 0 }
14804 };
14805 
14806 static struct subsys_tbl_ent *tg3_lookup_by_subsys(struct tg3 *tp)
14807 {
14808 	int i;
14809 
14810 	for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
14811 		if ((subsys_id_to_phy_id[i].subsys_vendor ==
14812 		     tp->pdev->subsystem_vendor) &&
14813 		    (subsys_id_to_phy_id[i].subsys_devid ==
14814 		     tp->pdev->subsystem_device))
14815 			return &subsys_id_to_phy_id[i];
14816 	}
14817 	return NULL;
14818 }
14819 
14820 static void tg3_get_eeprom_hw_cfg(struct tg3 *tp)
14821 {
14822 	u32 val;
14823 
14824 	tp->phy_id = TG3_PHY_ID_INVALID;
14825 	tp->led_ctrl = LED_CTRL_MODE_PHY_1;
14826 
14827 	/* Assume an onboard device and WOL capable by default.  */
14828 	tg3_flag_set(tp, EEPROM_WRITE_PROT);
14829 	tg3_flag_set(tp, WOL_CAP);
14830 
14831 	if (tg3_asic_rev(tp) == ASIC_REV_5906) {
14832 		if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
14833 			tg3_flag_clear(tp, EEPROM_WRITE_PROT);
14834 			tg3_flag_set(tp, IS_NIC);
14835 		}
14836 		val = tr32(VCPU_CFGSHDW);
14837 		if (val & VCPU_CFGSHDW_ASPM_DBNC)
14838 			tg3_flag_set(tp, ASPM_WORKAROUND);
14839 		if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
14840 		    (val & VCPU_CFGSHDW_WOL_MAGPKT)) {
14841 			tg3_flag_set(tp, WOL_ENABLE);
14842 			device_set_wakeup_enable(&tp->pdev->dev, true);
14843 		}
14844 		goto done;
14845 	}
14846 
14847 	tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
14848 	if (val == NIC_SRAM_DATA_SIG_MAGIC) {
14849 		u32 nic_cfg, led_cfg;
14850 		u32 nic_phy_id, ver, cfg2 = 0, cfg4 = 0, eeprom_phy_id;
14851 		int eeprom_phy_serdes = 0;
14852 
14853 		tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
14854 		tp->nic_sram_data_cfg = nic_cfg;
14855 
14856 		tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
14857 		ver >>= NIC_SRAM_DATA_VER_SHIFT;
14858 		if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
14859 		    tg3_asic_rev(tp) != ASIC_REV_5701 &&
14860 		    tg3_asic_rev(tp) != ASIC_REV_5703 &&
14861 		    (ver > 0) && (ver < 0x100))
14862 			tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
14863 
14864 		if (tg3_asic_rev(tp) == ASIC_REV_5785)
14865 			tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
14866 
14867 		if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
14868 		    NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
14869 			eeprom_phy_serdes = 1;
14870 
14871 		tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
14872 		if (nic_phy_id != 0) {
14873 			u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
14874 			u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
14875 
14876 			eeprom_phy_id  = (id1 >> 16) << 10;
14877 			eeprom_phy_id |= (id2 & 0xfc00) << 16;
14878 			eeprom_phy_id |= (id2 & 0x03ff) <<  0;
14879 		} else
14880 			eeprom_phy_id = 0;
14881 
14882 		tp->phy_id = eeprom_phy_id;
14883 		if (eeprom_phy_serdes) {
14884 			if (!tg3_flag(tp, 5705_PLUS))
14885 				tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
14886 			else
14887 				tp->phy_flags |= TG3_PHYFLG_MII_SERDES;
14888 		}
14889 
14890 		if (tg3_flag(tp, 5750_PLUS))
14891 			led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
14892 				    SHASTA_EXT_LED_MODE_MASK);
14893 		else
14894 			led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
14895 
14896 		switch (led_cfg) {
14897 		default:
14898 		case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
14899 			tp->led_ctrl = LED_CTRL_MODE_PHY_1;
14900 			break;
14901 
14902 		case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
14903 			tp->led_ctrl = LED_CTRL_MODE_PHY_2;
14904 			break;
14905 
14906 		case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
14907 			tp->led_ctrl = LED_CTRL_MODE_MAC;
14908 
14909 			/* Default to PHY_1_MODE if 0 (MAC_MODE) is
14910 			 * read on some older 5700/5701 bootcode.
14911 			 */
14912 			if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
14913 			    tg3_asic_rev(tp) == ASIC_REV_5701)
14914 				tp->led_ctrl = LED_CTRL_MODE_PHY_1;
14915 
14916 			break;
14917 
14918 		case SHASTA_EXT_LED_SHARED:
14919 			tp->led_ctrl = LED_CTRL_MODE_SHARED;
14920 			if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0 &&
14921 			    tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A1)
14922 				tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
14923 						 LED_CTRL_MODE_PHY_2);
14924 			break;
14925 
14926 		case SHASTA_EXT_LED_MAC:
14927 			tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
14928 			break;
14929 
14930 		case SHASTA_EXT_LED_COMBO:
14931 			tp->led_ctrl = LED_CTRL_MODE_COMBO;
14932 			if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0)
14933 				tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
14934 						 LED_CTRL_MODE_PHY_2);
14935 			break;
14936 
14937 		}
14938 
14939 		if ((tg3_asic_rev(tp) == ASIC_REV_5700 ||
14940 		     tg3_asic_rev(tp) == ASIC_REV_5701) &&
14941 		    tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
14942 			tp->led_ctrl = LED_CTRL_MODE_PHY_2;
14943 
14944 		if (tg3_chip_rev(tp) == CHIPREV_5784_AX)
14945 			tp->led_ctrl = LED_CTRL_MODE_PHY_1;
14946 
14947 		if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
14948 			tg3_flag_set(tp, EEPROM_WRITE_PROT);
14949 			if ((tp->pdev->subsystem_vendor ==
14950 			     PCI_VENDOR_ID_ARIMA) &&
14951 			    (tp->pdev->subsystem_device == 0x205a ||
14952 			     tp->pdev->subsystem_device == 0x2063))
14953 				tg3_flag_clear(tp, EEPROM_WRITE_PROT);
14954 		} else {
14955 			tg3_flag_clear(tp, EEPROM_WRITE_PROT);
14956 			tg3_flag_set(tp, IS_NIC);
14957 		}
14958 
14959 		if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
14960 			tg3_flag_set(tp, ENABLE_ASF);
14961 			if (tg3_flag(tp, 5750_PLUS))
14962 				tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
14963 		}
14964 
14965 		if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
14966 		    tg3_flag(tp, 5750_PLUS))
14967 			tg3_flag_set(tp, ENABLE_APE);
14968 
14969 		if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES &&
14970 		    !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
14971 			tg3_flag_clear(tp, WOL_CAP);
14972 
14973 		if (tg3_flag(tp, WOL_CAP) &&
14974 		    (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)) {
14975 			tg3_flag_set(tp, WOL_ENABLE);
14976 			device_set_wakeup_enable(&tp->pdev->dev, true);
14977 		}
14978 
14979 		if (cfg2 & (1 << 17))
14980 			tp->phy_flags |= TG3_PHYFLG_CAPACITIVE_COUPLING;
14981 
14982 		/* serdes signal pre-emphasis in register 0x590 set by */
14983 		/* bootcode if bit 18 is set */
14984 		if (cfg2 & (1 << 18))
14985 			tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS;
14986 
14987 		if ((tg3_flag(tp, 57765_PLUS) ||
14988 		     (tg3_asic_rev(tp) == ASIC_REV_5784 &&
14989 		      tg3_chip_rev(tp) != CHIPREV_5784_AX)) &&
14990 		    (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
14991 			tp->phy_flags |= TG3_PHYFLG_ENABLE_APD;
14992 
14993 		if (tg3_flag(tp, PCI_EXPRESS)) {
14994 			u32 cfg3;
14995 
14996 			tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
14997 			if (tg3_asic_rev(tp) != ASIC_REV_5785 &&
14998 			    !tg3_flag(tp, 57765_PLUS) &&
14999 			    (cfg3 & NIC_SRAM_ASPM_DEBOUNCE))
15000 				tg3_flag_set(tp, ASPM_WORKAROUND);
15001 			if (cfg3 & NIC_SRAM_LNK_FLAP_AVOID)
15002 				tp->phy_flags |= TG3_PHYFLG_KEEP_LINK_ON_PWRDN;
15003 			if (cfg3 & NIC_SRAM_1G_ON_VAUX_OK)
15004 				tp->phy_flags |= TG3_PHYFLG_1G_ON_VAUX_OK;
15005 		}
15006 
15007 		if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE)
15008 			tg3_flag_set(tp, RGMII_INBAND_DISABLE);
15009 		if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
15010 			tg3_flag_set(tp, RGMII_EXT_IBND_RX_EN);
15011 		if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
15012 			tg3_flag_set(tp, RGMII_EXT_IBND_TX_EN);
15013 	}
15014 done:
15015 	if (tg3_flag(tp, WOL_CAP))
15016 		device_set_wakeup_enable(&tp->pdev->dev,
15017 					 tg3_flag(tp, WOL_ENABLE));
15018 	else
15019 		device_set_wakeup_capable(&tp->pdev->dev, false);
15020 }
15021 
15022 static int tg3_ape_otp_read(struct tg3 *tp, u32 offset, u32 *val)
15023 {
15024 	int i, err;
15025 	u32 val2, off = offset * 8;
15026 
15027 	err = tg3_nvram_lock(tp);
15028 	if (err)
15029 		return err;
15030 
15031 	tg3_ape_write32(tp, TG3_APE_OTP_ADDR, off | APE_OTP_ADDR_CPU_ENABLE);
15032 	tg3_ape_write32(tp, TG3_APE_OTP_CTRL, APE_OTP_CTRL_PROG_EN |
15033 			APE_OTP_CTRL_CMD_RD | APE_OTP_CTRL_START);
15034 	tg3_ape_read32(tp, TG3_APE_OTP_CTRL);
15035 	udelay(10);
15036 
15037 	for (i = 0; i < 100; i++) {
15038 		val2 = tg3_ape_read32(tp, TG3_APE_OTP_STATUS);
15039 		if (val2 & APE_OTP_STATUS_CMD_DONE) {
15040 			*val = tg3_ape_read32(tp, TG3_APE_OTP_RD_DATA);
15041 			break;
15042 		}
15043 		udelay(10);
15044 	}
15045 
15046 	tg3_ape_write32(tp, TG3_APE_OTP_CTRL, 0);
15047 
15048 	tg3_nvram_unlock(tp);
15049 	if (val2 & APE_OTP_STATUS_CMD_DONE)
15050 		return 0;
15051 
15052 	return -EBUSY;
15053 }
15054 
15055 static int tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
15056 {
15057 	int i;
15058 	u32 val;
15059 
15060 	tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
15061 	tw32(OTP_CTRL, cmd);
15062 
15063 	/* Wait for up to 1 ms for command to execute. */
15064 	for (i = 0; i < 100; i++) {
15065 		val = tr32(OTP_STATUS);
15066 		if (val & OTP_STATUS_CMD_DONE)
15067 			break;
15068 		udelay(10);
15069 	}
15070 
15071 	return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
15072 }
15073 
15074 /* Read the gphy configuration from the OTP region of the chip.  The gphy
15075  * configuration is a 32-bit value that straddles the alignment boundary.
15076  * We do two 32-bit reads and then shift and merge the results.
15077  */
15078 static u32 tg3_read_otp_phycfg(struct tg3 *tp)
15079 {
15080 	u32 bhalf_otp, thalf_otp;
15081 
15082 	tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
15083 
15084 	if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
15085 		return 0;
15086 
15087 	tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
15088 
15089 	if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
15090 		return 0;
15091 
15092 	thalf_otp = tr32(OTP_READ_DATA);
15093 
15094 	tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
15095 
15096 	if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
15097 		return 0;
15098 
15099 	bhalf_otp = tr32(OTP_READ_DATA);
15100 
15101 	return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
15102 }
15103 
15104 static void tg3_phy_init_link_config(struct tg3 *tp)
15105 {
15106 	u32 adv = ADVERTISED_Autoneg;
15107 
15108 	if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
15109 		adv |= ADVERTISED_1000baseT_Half |
15110 		       ADVERTISED_1000baseT_Full;
15111 
15112 	if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
15113 		adv |= ADVERTISED_100baseT_Half |
15114 		       ADVERTISED_100baseT_Full |
15115 		       ADVERTISED_10baseT_Half |
15116 		       ADVERTISED_10baseT_Full |
15117 		       ADVERTISED_TP;
15118 	else
15119 		adv |= ADVERTISED_FIBRE;
15120 
15121 	tp->link_config.advertising = adv;
15122 	tp->link_config.speed = SPEED_UNKNOWN;
15123 	tp->link_config.duplex = DUPLEX_UNKNOWN;
15124 	tp->link_config.autoneg = AUTONEG_ENABLE;
15125 	tp->link_config.active_speed = SPEED_UNKNOWN;
15126 	tp->link_config.active_duplex = DUPLEX_UNKNOWN;
15127 
15128 	tp->old_link = -1;
15129 }
15130 
15131 static int tg3_phy_probe(struct tg3 *tp)
15132 {
15133 	u32 hw_phy_id_1, hw_phy_id_2;
15134 	u32 hw_phy_id, hw_phy_id_masked;
15135 	int err;
15136 
15137 	/* flow control autonegotiation is default behavior */
15138 	tg3_flag_set(tp, PAUSE_AUTONEG);
15139 	tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
15140 
15141 	if (tg3_flag(tp, ENABLE_APE)) {
15142 		switch (tp->pci_fn) {
15143 		case 0:
15144 			tp->phy_ape_lock = TG3_APE_LOCK_PHY0;
15145 			break;
15146 		case 1:
15147 			tp->phy_ape_lock = TG3_APE_LOCK_PHY1;
15148 			break;
15149 		case 2:
15150 			tp->phy_ape_lock = TG3_APE_LOCK_PHY2;
15151 			break;
15152 		case 3:
15153 			tp->phy_ape_lock = TG3_APE_LOCK_PHY3;
15154 			break;
15155 		}
15156 	}
15157 
15158 	if (!tg3_flag(tp, ENABLE_ASF) &&
15159 	    !(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
15160 	    !(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
15161 		tp->phy_flags &= ~(TG3_PHYFLG_1G_ON_VAUX_OK |
15162 				   TG3_PHYFLG_KEEP_LINK_ON_PWRDN);
15163 
15164 	if (tg3_flag(tp, USE_PHYLIB))
15165 		return tg3_phy_init(tp);
15166 
15167 	/* Reading the PHY ID register can conflict with ASF
15168 	 * firmware access to the PHY hardware.
15169 	 */
15170 	err = 0;
15171 	if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)) {
15172 		hw_phy_id = hw_phy_id_masked = TG3_PHY_ID_INVALID;
15173 	} else {
15174 		/* Now read the physical PHY_ID from the chip and verify
15175 		 * that it is sane.  If it doesn't look good, we fall back
15176 		 * to either the hard-coded table based PHY_ID and failing
15177 		 * that the value found in the eeprom area.
15178 		 */
15179 		err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
15180 		err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
15181 
15182 		hw_phy_id  = (hw_phy_id_1 & 0xffff) << 10;
15183 		hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
15184 		hw_phy_id |= (hw_phy_id_2 & 0x03ff) <<  0;
15185 
15186 		hw_phy_id_masked = hw_phy_id & TG3_PHY_ID_MASK;
15187 	}
15188 
15189 	if (!err && TG3_KNOWN_PHY_ID(hw_phy_id_masked)) {
15190 		tp->phy_id = hw_phy_id;
15191 		if (hw_phy_id_masked == TG3_PHY_ID_BCM8002)
15192 			tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
15193 		else
15194 			tp->phy_flags &= ~TG3_PHYFLG_PHY_SERDES;
15195 	} else {
15196 		if (tp->phy_id != TG3_PHY_ID_INVALID) {
15197 			/* Do nothing, phy ID already set up in
15198 			 * tg3_get_eeprom_hw_cfg().
15199 			 */
15200 		} else {
15201 			struct subsys_tbl_ent *p;
15202 
15203 			/* No eeprom signature?  Try the hardcoded
15204 			 * subsys device table.
15205 			 */
15206 			p = tg3_lookup_by_subsys(tp);
15207 			if (p) {
15208 				tp->phy_id = p->phy_id;
15209 			} else if (!tg3_flag(tp, IS_SSB_CORE)) {
15210 				/* For now we saw the IDs 0xbc050cd0,
15211 				 * 0xbc050f80 and 0xbc050c30 on devices
15212 				 * connected to an BCM4785 and there are
15213 				 * probably more. Just assume that the phy is
15214 				 * supported when it is connected to a SSB core
15215 				 * for now.
15216 				 */
15217 				return -ENODEV;
15218 			}
15219 
15220 			if (!tp->phy_id ||
15221 			    tp->phy_id == TG3_PHY_ID_BCM8002)
15222 				tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
15223 		}
15224 	}
15225 
15226 	if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
15227 	    (tg3_asic_rev(tp) == ASIC_REV_5719 ||
15228 	     tg3_asic_rev(tp) == ASIC_REV_5720 ||
15229 	     tg3_asic_rev(tp) == ASIC_REV_57766 ||
15230 	     tg3_asic_rev(tp) == ASIC_REV_5762 ||
15231 	     (tg3_asic_rev(tp) == ASIC_REV_5717 &&
15232 	      tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0) ||
15233 	     (tg3_asic_rev(tp) == ASIC_REV_57765 &&
15234 	      tg3_chip_rev_id(tp) != CHIPREV_ID_57765_A0))) {
15235 		tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
15236 
15237 		tp->eee.supported = SUPPORTED_100baseT_Full |
15238 				    SUPPORTED_1000baseT_Full;
15239 		tp->eee.advertised = ADVERTISED_100baseT_Full |
15240 				     ADVERTISED_1000baseT_Full;
15241 		tp->eee.eee_enabled = 1;
15242 		tp->eee.tx_lpi_enabled = 1;
15243 		tp->eee.tx_lpi_timer = TG3_CPMU_DBTMR1_LNKIDLE_2047US;
15244 	}
15245 
15246 	tg3_phy_init_link_config(tp);
15247 
15248 	if (!(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
15249 	    !(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
15250 	    !tg3_flag(tp, ENABLE_APE) &&
15251 	    !tg3_flag(tp, ENABLE_ASF)) {
15252 		u32 bmsr, dummy;
15253 
15254 		tg3_readphy(tp, MII_BMSR, &bmsr);
15255 		if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
15256 		    (bmsr & BMSR_LSTATUS))
15257 			goto skip_phy_reset;
15258 
15259 		err = tg3_phy_reset(tp);
15260 		if (err)
15261 			return err;
15262 
15263 		tg3_phy_set_wirespeed(tp);
15264 
15265 		if (!tg3_phy_copper_an_config_ok(tp, &dummy)) {
15266 			tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
15267 					    tp->link_config.flowctrl);
15268 
15269 			tg3_writephy(tp, MII_BMCR,
15270 				     BMCR_ANENABLE | BMCR_ANRESTART);
15271 		}
15272 	}
15273 
15274 skip_phy_reset:
15275 	if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
15276 		err = tg3_init_5401phy_dsp(tp);
15277 		if (err)
15278 			return err;
15279 
15280 		err = tg3_init_5401phy_dsp(tp);
15281 	}
15282 
15283 	return err;
15284 }
15285 
15286 static void tg3_read_vpd(struct tg3 *tp)
15287 {
15288 	u8 *vpd_data;
15289 	unsigned int block_end, rosize, len;
15290 	u32 vpdlen;
15291 	int j, i = 0;
15292 
15293 	vpd_data = (u8 *)tg3_vpd_readblock(tp, &vpdlen);
15294 	if (!vpd_data)
15295 		goto out_no_vpd;
15296 
15297 	i = pci_vpd_find_tag(vpd_data, 0, vpdlen, PCI_VPD_LRDT_RO_DATA);
15298 	if (i < 0)
15299 		goto out_not_found;
15300 
15301 	rosize = pci_vpd_lrdt_size(&vpd_data[i]);
15302 	block_end = i + PCI_VPD_LRDT_TAG_SIZE + rosize;
15303 	i += PCI_VPD_LRDT_TAG_SIZE;
15304 
15305 	if (block_end > vpdlen)
15306 		goto out_not_found;
15307 
15308 	j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
15309 				      PCI_VPD_RO_KEYWORD_MFR_ID);
15310 	if (j > 0) {
15311 		len = pci_vpd_info_field_size(&vpd_data[j]);
15312 
15313 		j += PCI_VPD_INFO_FLD_HDR_SIZE;
15314 		if (j + len > block_end || len != 4 ||
15315 		    memcmp(&vpd_data[j], "1028", 4))
15316 			goto partno;
15317 
15318 		j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
15319 					      PCI_VPD_RO_KEYWORD_VENDOR0);
15320 		if (j < 0)
15321 			goto partno;
15322 
15323 		len = pci_vpd_info_field_size(&vpd_data[j]);
15324 
15325 		j += PCI_VPD_INFO_FLD_HDR_SIZE;
15326 		if (j + len > block_end)
15327 			goto partno;
15328 
15329 		if (len >= sizeof(tp->fw_ver))
15330 			len = sizeof(tp->fw_ver) - 1;
15331 		memset(tp->fw_ver, 0, sizeof(tp->fw_ver));
15332 		snprintf(tp->fw_ver, sizeof(tp->fw_ver), "%.*s bc ", len,
15333 			 &vpd_data[j]);
15334 	}
15335 
15336 partno:
15337 	i = pci_vpd_find_info_keyword(vpd_data, i, rosize,
15338 				      PCI_VPD_RO_KEYWORD_PARTNO);
15339 	if (i < 0)
15340 		goto out_not_found;
15341 
15342 	len = pci_vpd_info_field_size(&vpd_data[i]);
15343 
15344 	i += PCI_VPD_INFO_FLD_HDR_SIZE;
15345 	if (len > TG3_BPN_SIZE ||
15346 	    (len + i) > vpdlen)
15347 		goto out_not_found;
15348 
15349 	memcpy(tp->board_part_number, &vpd_data[i], len);
15350 
15351 out_not_found:
15352 	kfree(vpd_data);
15353 	if (tp->board_part_number[0])
15354 		return;
15355 
15356 out_no_vpd:
15357 	if (tg3_asic_rev(tp) == ASIC_REV_5717) {
15358 		if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
15359 		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C)
15360 			strcpy(tp->board_part_number, "BCM5717");
15361 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718)
15362 			strcpy(tp->board_part_number, "BCM5718");
15363 		else
15364 			goto nomatch;
15365 	} else if (tg3_asic_rev(tp) == ASIC_REV_57780) {
15366 		if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780)
15367 			strcpy(tp->board_part_number, "BCM57780");
15368 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760)
15369 			strcpy(tp->board_part_number, "BCM57760");
15370 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790)
15371 			strcpy(tp->board_part_number, "BCM57790");
15372 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788)
15373 			strcpy(tp->board_part_number, "BCM57788");
15374 		else
15375 			goto nomatch;
15376 	} else if (tg3_asic_rev(tp) == ASIC_REV_57765) {
15377 		if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761)
15378 			strcpy(tp->board_part_number, "BCM57761");
15379 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765)
15380 			strcpy(tp->board_part_number, "BCM57765");
15381 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781)
15382 			strcpy(tp->board_part_number, "BCM57781");
15383 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785)
15384 			strcpy(tp->board_part_number, "BCM57785");
15385 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791)
15386 			strcpy(tp->board_part_number, "BCM57791");
15387 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
15388 			strcpy(tp->board_part_number, "BCM57795");
15389 		else
15390 			goto nomatch;
15391 	} else if (tg3_asic_rev(tp) == ASIC_REV_57766) {
15392 		if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762)
15393 			strcpy(tp->board_part_number, "BCM57762");
15394 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766)
15395 			strcpy(tp->board_part_number, "BCM57766");
15396 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782)
15397 			strcpy(tp->board_part_number, "BCM57782");
15398 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
15399 			strcpy(tp->board_part_number, "BCM57786");
15400 		else
15401 			goto nomatch;
15402 	} else if (tg3_asic_rev(tp) == ASIC_REV_5906) {
15403 		strcpy(tp->board_part_number, "BCM95906");
15404 	} else {
15405 nomatch:
15406 		strcpy(tp->board_part_number, "none");
15407 	}
15408 }
15409 
15410 static int tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
15411 {
15412 	u32 val;
15413 
15414 	if (tg3_nvram_read(tp, offset, &val) ||
15415 	    (val & 0xfc000000) != 0x0c000000 ||
15416 	    tg3_nvram_read(tp, offset + 4, &val) ||
15417 	    val != 0)
15418 		return 0;
15419 
15420 	return 1;
15421 }
15422 
15423 static void tg3_read_bc_ver(struct tg3 *tp)
15424 {
15425 	u32 val, offset, start, ver_offset;
15426 	int i, dst_off;
15427 	bool newver = false;
15428 
15429 	if (tg3_nvram_read(tp, 0xc, &offset) ||
15430 	    tg3_nvram_read(tp, 0x4, &start))
15431 		return;
15432 
15433 	offset = tg3_nvram_logical_addr(tp, offset);
15434 
15435 	if (tg3_nvram_read(tp, offset, &val))
15436 		return;
15437 
15438 	if ((val & 0xfc000000) == 0x0c000000) {
15439 		if (tg3_nvram_read(tp, offset + 4, &val))
15440 			return;
15441 
15442 		if (val == 0)
15443 			newver = true;
15444 	}
15445 
15446 	dst_off = strlen(tp->fw_ver);
15447 
15448 	if (newver) {
15449 		if (TG3_VER_SIZE - dst_off < 16 ||
15450 		    tg3_nvram_read(tp, offset + 8, &ver_offset))
15451 			return;
15452 
15453 		offset = offset + ver_offset - start;
15454 		for (i = 0; i < 16; i += 4) {
15455 			__be32 v;
15456 			if (tg3_nvram_read_be32(tp, offset + i, &v))
15457 				return;
15458 
15459 			memcpy(tp->fw_ver + dst_off + i, &v, sizeof(v));
15460 		}
15461 	} else {
15462 		u32 major, minor;
15463 
15464 		if (tg3_nvram_read(tp, TG3_NVM_PTREV_BCVER, &ver_offset))
15465 			return;
15466 
15467 		major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >>
15468 			TG3_NVM_BCVER_MAJSFT;
15469 		minor = ver_offset & TG3_NVM_BCVER_MINMSK;
15470 		snprintf(&tp->fw_ver[dst_off], TG3_VER_SIZE - dst_off,
15471 			 "v%d.%02d", major, minor);
15472 	}
15473 }
15474 
15475 static void tg3_read_hwsb_ver(struct tg3 *tp)
15476 {
15477 	u32 val, major, minor;
15478 
15479 	/* Use native endian representation */
15480 	if (tg3_nvram_read(tp, TG3_NVM_HWSB_CFG1, &val))
15481 		return;
15482 
15483 	major = (val & TG3_NVM_HWSB_CFG1_MAJMSK) >>
15484 		TG3_NVM_HWSB_CFG1_MAJSFT;
15485 	minor = (val & TG3_NVM_HWSB_CFG1_MINMSK) >>
15486 		TG3_NVM_HWSB_CFG1_MINSFT;
15487 
15488 	snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor);
15489 }
15490 
15491 static void tg3_read_sb_ver(struct tg3 *tp, u32 val)
15492 {
15493 	u32 offset, major, minor, build;
15494 
15495 	strncat(tp->fw_ver, "sb", TG3_VER_SIZE - strlen(tp->fw_ver) - 1);
15496 
15497 	if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1)
15498 		return;
15499 
15500 	switch (val & TG3_EEPROM_SB_REVISION_MASK) {
15501 	case TG3_EEPROM_SB_REVISION_0:
15502 		offset = TG3_EEPROM_SB_F1R0_EDH_OFF;
15503 		break;
15504 	case TG3_EEPROM_SB_REVISION_2:
15505 		offset = TG3_EEPROM_SB_F1R2_EDH_OFF;
15506 		break;
15507 	case TG3_EEPROM_SB_REVISION_3:
15508 		offset = TG3_EEPROM_SB_F1R3_EDH_OFF;
15509 		break;
15510 	case TG3_EEPROM_SB_REVISION_4:
15511 		offset = TG3_EEPROM_SB_F1R4_EDH_OFF;
15512 		break;
15513 	case TG3_EEPROM_SB_REVISION_5:
15514 		offset = TG3_EEPROM_SB_F1R5_EDH_OFF;
15515 		break;
15516 	case TG3_EEPROM_SB_REVISION_6:
15517 		offset = TG3_EEPROM_SB_F1R6_EDH_OFF;
15518 		break;
15519 	default:
15520 		return;
15521 	}
15522 
15523 	if (tg3_nvram_read(tp, offset, &val))
15524 		return;
15525 
15526 	build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >>
15527 		TG3_EEPROM_SB_EDH_BLD_SHFT;
15528 	major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >>
15529 		TG3_EEPROM_SB_EDH_MAJ_SHFT;
15530 	minor =  val & TG3_EEPROM_SB_EDH_MIN_MASK;
15531 
15532 	if (minor > 99 || build > 26)
15533 		return;
15534 
15535 	offset = strlen(tp->fw_ver);
15536 	snprintf(&tp->fw_ver[offset], TG3_VER_SIZE - offset,
15537 		 " v%d.%02d", major, minor);
15538 
15539 	if (build > 0) {
15540 		offset = strlen(tp->fw_ver);
15541 		if (offset < TG3_VER_SIZE - 1)
15542 			tp->fw_ver[offset] = 'a' + build - 1;
15543 	}
15544 }
15545 
15546 static void tg3_read_mgmtfw_ver(struct tg3 *tp)
15547 {
15548 	u32 val, offset, start;
15549 	int i, vlen;
15550 
15551 	for (offset = TG3_NVM_DIR_START;
15552 	     offset < TG3_NVM_DIR_END;
15553 	     offset += TG3_NVM_DIRENT_SIZE) {
15554 		if (tg3_nvram_read(tp, offset, &val))
15555 			return;
15556 
15557 		if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
15558 			break;
15559 	}
15560 
15561 	if (offset == TG3_NVM_DIR_END)
15562 		return;
15563 
15564 	if (!tg3_flag(tp, 5705_PLUS))
15565 		start = 0x08000000;
15566 	else if (tg3_nvram_read(tp, offset - 4, &start))
15567 		return;
15568 
15569 	if (tg3_nvram_read(tp, offset + 4, &offset) ||
15570 	    !tg3_fw_img_is_valid(tp, offset) ||
15571 	    tg3_nvram_read(tp, offset + 8, &val))
15572 		return;
15573 
15574 	offset += val - start;
15575 
15576 	vlen = strlen(tp->fw_ver);
15577 
15578 	tp->fw_ver[vlen++] = ',';
15579 	tp->fw_ver[vlen++] = ' ';
15580 
15581 	for (i = 0; i < 4; i++) {
15582 		__be32 v;
15583 		if (tg3_nvram_read_be32(tp, offset, &v))
15584 			return;
15585 
15586 		offset += sizeof(v);
15587 
15588 		if (vlen > TG3_VER_SIZE - sizeof(v)) {
15589 			memcpy(&tp->fw_ver[vlen], &v, TG3_VER_SIZE - vlen);
15590 			break;
15591 		}
15592 
15593 		memcpy(&tp->fw_ver[vlen], &v, sizeof(v));
15594 		vlen += sizeof(v);
15595 	}
15596 }
15597 
15598 static void tg3_probe_ncsi(struct tg3 *tp)
15599 {
15600 	u32 apedata;
15601 
15602 	apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
15603 	if (apedata != APE_SEG_SIG_MAGIC)
15604 		return;
15605 
15606 	apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
15607 	if (!(apedata & APE_FW_STATUS_READY))
15608 		return;
15609 
15610 	if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI)
15611 		tg3_flag_set(tp, APE_HAS_NCSI);
15612 }
15613 
15614 static void tg3_read_dash_ver(struct tg3 *tp)
15615 {
15616 	int vlen;
15617 	u32 apedata;
15618 	char *fwtype;
15619 
15620 	apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION);
15621 
15622 	if (tg3_flag(tp, APE_HAS_NCSI))
15623 		fwtype = "NCSI";
15624 	else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725)
15625 		fwtype = "SMASH";
15626 	else
15627 		fwtype = "DASH";
15628 
15629 	vlen = strlen(tp->fw_ver);
15630 
15631 	snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " %s v%d.%d.%d.%d",
15632 		 fwtype,
15633 		 (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT,
15634 		 (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT,
15635 		 (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT,
15636 		 (apedata & APE_FW_VERSION_BLDMSK));
15637 }
15638 
15639 static void tg3_read_otp_ver(struct tg3 *tp)
15640 {
15641 	u32 val, val2;
15642 
15643 	if (tg3_asic_rev(tp) != ASIC_REV_5762)
15644 		return;
15645 
15646 	if (!tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0, &val) &&
15647 	    !tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0 + 4, &val2) &&
15648 	    TG3_OTP_MAGIC0_VALID(val)) {
15649 		u64 val64 = (u64) val << 32 | val2;
15650 		u32 ver = 0;
15651 		int i, vlen;
15652 
15653 		for (i = 0; i < 7; i++) {
15654 			if ((val64 & 0xff) == 0)
15655 				break;
15656 			ver = val64 & 0xff;
15657 			val64 >>= 8;
15658 		}
15659 		vlen = strlen(tp->fw_ver);
15660 		snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " .%02d", ver);
15661 	}
15662 }
15663 
15664 static void tg3_read_fw_ver(struct tg3 *tp)
15665 {
15666 	u32 val;
15667 	bool vpd_vers = false;
15668 
15669 	if (tp->fw_ver[0] != 0)
15670 		vpd_vers = true;
15671 
15672 	if (tg3_flag(tp, NO_NVRAM)) {
15673 		strcat(tp->fw_ver, "sb");
15674 		tg3_read_otp_ver(tp);
15675 		return;
15676 	}
15677 
15678 	if (tg3_nvram_read(tp, 0, &val))
15679 		return;
15680 
15681 	if (val == TG3_EEPROM_MAGIC)
15682 		tg3_read_bc_ver(tp);
15683 	else if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW)
15684 		tg3_read_sb_ver(tp, val);
15685 	else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
15686 		tg3_read_hwsb_ver(tp);
15687 
15688 	if (tg3_flag(tp, ENABLE_ASF)) {
15689 		if (tg3_flag(tp, ENABLE_APE)) {
15690 			tg3_probe_ncsi(tp);
15691 			if (!vpd_vers)
15692 				tg3_read_dash_ver(tp);
15693 		} else if (!vpd_vers) {
15694 			tg3_read_mgmtfw_ver(tp);
15695 		}
15696 	}
15697 
15698 	tp->fw_ver[TG3_VER_SIZE - 1] = 0;
15699 }
15700 
15701 static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp)
15702 {
15703 	if (tg3_flag(tp, LRG_PROD_RING_CAP))
15704 		return TG3_RX_RET_MAX_SIZE_5717;
15705 	else if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))
15706 		return TG3_RX_RET_MAX_SIZE_5700;
15707 	else
15708 		return TG3_RX_RET_MAX_SIZE_5705;
15709 }
15710 
15711 static DEFINE_PCI_DEVICE_TABLE(tg3_write_reorder_chipsets) = {
15712 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C) },
15713 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE) },
15714 	{ PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8385_0) },
15715 	{ },
15716 };
15717 
15718 static struct pci_dev *tg3_find_peer(struct tg3 *tp)
15719 {
15720 	struct pci_dev *peer;
15721 	unsigned int func, devnr = tp->pdev->devfn & ~7;
15722 
15723 	for (func = 0; func < 8; func++) {
15724 		peer = pci_get_slot(tp->pdev->bus, devnr | func);
15725 		if (peer && peer != tp->pdev)
15726 			break;
15727 		pci_dev_put(peer);
15728 	}
15729 	/* 5704 can be configured in single-port mode, set peer to
15730 	 * tp->pdev in that case.
15731 	 */
15732 	if (!peer) {
15733 		peer = tp->pdev;
15734 		return peer;
15735 	}
15736 
15737 	/*
15738 	 * We don't need to keep the refcount elevated; there's no way
15739 	 * to remove one half of this device without removing the other
15740 	 */
15741 	pci_dev_put(peer);
15742 
15743 	return peer;
15744 }
15745 
15746 static void tg3_detect_asic_rev(struct tg3 *tp, u32 misc_ctrl_reg)
15747 {
15748 	tp->pci_chip_rev_id = misc_ctrl_reg >> MISC_HOST_CTRL_CHIPREV_SHIFT;
15749 	if (tg3_asic_rev(tp) == ASIC_REV_USE_PROD_ID_REG) {
15750 		u32 reg;
15751 
15752 		/* All devices that use the alternate
15753 		 * ASIC REV location have a CPMU.
15754 		 */
15755 		tg3_flag_set(tp, CPMU_PRESENT);
15756 
15757 		if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
15758 		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
15759 		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
15760 		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
15761 		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 ||
15762 		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 ||
15763 		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 ||
15764 		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727)
15765 			reg = TG3PCI_GEN2_PRODID_ASICREV;
15766 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 ||
15767 			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 ||
15768 			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 ||
15769 			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 ||
15770 			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
15771 			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
15772 			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762 ||
15773 			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766 ||
15774 			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782 ||
15775 			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
15776 			reg = TG3PCI_GEN15_PRODID_ASICREV;
15777 		else
15778 			reg = TG3PCI_PRODID_ASICREV;
15779 
15780 		pci_read_config_dword(tp->pdev, reg, &tp->pci_chip_rev_id);
15781 	}
15782 
15783 	/* Wrong chip ID in 5752 A0. This code can be removed later
15784 	 * as A0 is not in production.
15785 	 */
15786 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5752_A0_HW)
15787 		tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
15788 
15789 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5717_C0)
15790 		tp->pci_chip_rev_id = CHIPREV_ID_5720_A0;
15791 
15792 	if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
15793 	    tg3_asic_rev(tp) == ASIC_REV_5719 ||
15794 	    tg3_asic_rev(tp) == ASIC_REV_5720)
15795 		tg3_flag_set(tp, 5717_PLUS);
15796 
15797 	if (tg3_asic_rev(tp) == ASIC_REV_57765 ||
15798 	    tg3_asic_rev(tp) == ASIC_REV_57766)
15799 		tg3_flag_set(tp, 57765_CLASS);
15800 
15801 	if (tg3_flag(tp, 57765_CLASS) || tg3_flag(tp, 5717_PLUS) ||
15802 	     tg3_asic_rev(tp) == ASIC_REV_5762)
15803 		tg3_flag_set(tp, 57765_PLUS);
15804 
15805 	/* Intentionally exclude ASIC_REV_5906 */
15806 	if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
15807 	    tg3_asic_rev(tp) == ASIC_REV_5787 ||
15808 	    tg3_asic_rev(tp) == ASIC_REV_5784 ||
15809 	    tg3_asic_rev(tp) == ASIC_REV_5761 ||
15810 	    tg3_asic_rev(tp) == ASIC_REV_5785 ||
15811 	    tg3_asic_rev(tp) == ASIC_REV_57780 ||
15812 	    tg3_flag(tp, 57765_PLUS))
15813 		tg3_flag_set(tp, 5755_PLUS);
15814 
15815 	if (tg3_asic_rev(tp) == ASIC_REV_5780 ||
15816 	    tg3_asic_rev(tp) == ASIC_REV_5714)
15817 		tg3_flag_set(tp, 5780_CLASS);
15818 
15819 	if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
15820 	    tg3_asic_rev(tp) == ASIC_REV_5752 ||
15821 	    tg3_asic_rev(tp) == ASIC_REV_5906 ||
15822 	    tg3_flag(tp, 5755_PLUS) ||
15823 	    tg3_flag(tp, 5780_CLASS))
15824 		tg3_flag_set(tp, 5750_PLUS);
15825 
15826 	if (tg3_asic_rev(tp) == ASIC_REV_5705 ||
15827 	    tg3_flag(tp, 5750_PLUS))
15828 		tg3_flag_set(tp, 5705_PLUS);
15829 }
15830 
15831 static bool tg3_10_100_only_device(struct tg3 *tp,
15832 				   const struct pci_device_id *ent)
15833 {
15834 	u32 grc_misc_cfg = tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK;
15835 
15836 	if ((tg3_asic_rev(tp) == ASIC_REV_5703 &&
15837 	     (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
15838 	    (tp->phy_flags & TG3_PHYFLG_IS_FET))
15839 		return true;
15840 
15841 	if (ent->driver_data & TG3_DRV_DATA_FLAG_10_100_ONLY) {
15842 		if (tg3_asic_rev(tp) == ASIC_REV_5705) {
15843 			if (ent->driver_data & TG3_DRV_DATA_FLAG_5705_10_100)
15844 				return true;
15845 		} else {
15846 			return true;
15847 		}
15848 	}
15849 
15850 	return false;
15851 }
15852 
15853 static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
15854 {
15855 	u32 misc_ctrl_reg;
15856 	u32 pci_state_reg, grc_misc_cfg;
15857 	u32 val;
15858 	u16 pci_cmd;
15859 	int err;
15860 
15861 	/* Force memory write invalidate off.  If we leave it on,
15862 	 * then on 5700_BX chips we have to enable a workaround.
15863 	 * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
15864 	 * to match the cacheline size.  The Broadcom driver have this
15865 	 * workaround but turns MWI off all the times so never uses
15866 	 * it.  This seems to suggest that the workaround is insufficient.
15867 	 */
15868 	pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
15869 	pci_cmd &= ~PCI_COMMAND_INVALIDATE;
15870 	pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
15871 
15872 	/* Important! -- Make sure register accesses are byteswapped
15873 	 * correctly.  Also, for those chips that require it, make
15874 	 * sure that indirect register accesses are enabled before
15875 	 * the first operation.
15876 	 */
15877 	pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
15878 			      &misc_ctrl_reg);
15879 	tp->misc_host_ctrl |= (misc_ctrl_reg &
15880 			       MISC_HOST_CTRL_CHIPREV);
15881 	pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
15882 			       tp->misc_host_ctrl);
15883 
15884 	tg3_detect_asic_rev(tp, misc_ctrl_reg);
15885 
15886 	/* If we have 5702/03 A1 or A2 on certain ICH chipsets,
15887 	 * we need to disable memory and use config. cycles
15888 	 * only to access all registers. The 5702/03 chips
15889 	 * can mistakenly decode the special cycles from the
15890 	 * ICH chipsets as memory write cycles, causing corruption
15891 	 * of register and memory space. Only certain ICH bridges
15892 	 * will drive special cycles with non-zero data during the
15893 	 * address phase which can fall within the 5703's address
15894 	 * range. This is not an ICH bug as the PCI spec allows
15895 	 * non-zero address during special cycles. However, only
15896 	 * these ICH bridges are known to drive non-zero addresses
15897 	 * during special cycles.
15898 	 *
15899 	 * Since special cycles do not cross PCI bridges, we only
15900 	 * enable this workaround if the 5703 is on the secondary
15901 	 * bus of these ICH bridges.
15902 	 */
15903 	if ((tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A1) ||
15904 	    (tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A2)) {
15905 		static struct tg3_dev_id {
15906 			u32	vendor;
15907 			u32	device;
15908 			u32	rev;
15909 		} ich_chipsets[] = {
15910 			{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
15911 			  PCI_ANY_ID },
15912 			{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
15913 			  PCI_ANY_ID },
15914 			{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
15915 			  0xa },
15916 			{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
15917 			  PCI_ANY_ID },
15918 			{ },
15919 		};
15920 		struct tg3_dev_id *pci_id = &ich_chipsets[0];
15921 		struct pci_dev *bridge = NULL;
15922 
15923 		while (pci_id->vendor != 0) {
15924 			bridge = pci_get_device(pci_id->vendor, pci_id->device,
15925 						bridge);
15926 			if (!bridge) {
15927 				pci_id++;
15928 				continue;
15929 			}
15930 			if (pci_id->rev != PCI_ANY_ID) {
15931 				if (bridge->revision > pci_id->rev)
15932 					continue;
15933 			}
15934 			if (bridge->subordinate &&
15935 			    (bridge->subordinate->number ==
15936 			     tp->pdev->bus->number)) {
15937 				tg3_flag_set(tp, ICH_WORKAROUND);
15938 				pci_dev_put(bridge);
15939 				break;
15940 			}
15941 		}
15942 	}
15943 
15944 	if (tg3_asic_rev(tp) == ASIC_REV_5701) {
15945 		static struct tg3_dev_id {
15946 			u32	vendor;
15947 			u32	device;
15948 		} bridge_chipsets[] = {
15949 			{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
15950 			{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
15951 			{ },
15952 		};
15953 		struct tg3_dev_id *pci_id = &bridge_chipsets[0];
15954 		struct pci_dev *bridge = NULL;
15955 
15956 		while (pci_id->vendor != 0) {
15957 			bridge = pci_get_device(pci_id->vendor,
15958 						pci_id->device,
15959 						bridge);
15960 			if (!bridge) {
15961 				pci_id++;
15962 				continue;
15963 			}
15964 			if (bridge->subordinate &&
15965 			    (bridge->subordinate->number <=
15966 			     tp->pdev->bus->number) &&
15967 			    (bridge->subordinate->busn_res.end >=
15968 			     tp->pdev->bus->number)) {
15969 				tg3_flag_set(tp, 5701_DMA_BUG);
15970 				pci_dev_put(bridge);
15971 				break;
15972 			}
15973 		}
15974 	}
15975 
15976 	/* The EPB bridge inside 5714, 5715, and 5780 cannot support
15977 	 * DMA addresses > 40-bit. This bridge may have other additional
15978 	 * 57xx devices behind it in some 4-port NIC designs for example.
15979 	 * Any tg3 device found behind the bridge will also need the 40-bit
15980 	 * DMA workaround.
15981 	 */
15982 	if (tg3_flag(tp, 5780_CLASS)) {
15983 		tg3_flag_set(tp, 40BIT_DMA_BUG);
15984 		tp->msi_cap = tp->pdev->msi_cap;
15985 	} else {
15986 		struct pci_dev *bridge = NULL;
15987 
15988 		do {
15989 			bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
15990 						PCI_DEVICE_ID_SERVERWORKS_EPB,
15991 						bridge);
15992 			if (bridge && bridge->subordinate &&
15993 			    (bridge->subordinate->number <=
15994 			     tp->pdev->bus->number) &&
15995 			    (bridge->subordinate->busn_res.end >=
15996 			     tp->pdev->bus->number)) {
15997 				tg3_flag_set(tp, 40BIT_DMA_BUG);
15998 				pci_dev_put(bridge);
15999 				break;
16000 			}
16001 		} while (bridge);
16002 	}
16003 
16004 	if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
16005 	    tg3_asic_rev(tp) == ASIC_REV_5714)
16006 		tp->pdev_peer = tg3_find_peer(tp);
16007 
16008 	/* Determine TSO capabilities */
16009 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0)
16010 		; /* Do nothing. HW bug. */
16011 	else if (tg3_flag(tp, 57765_PLUS))
16012 		tg3_flag_set(tp, HW_TSO_3);
16013 	else if (tg3_flag(tp, 5755_PLUS) ||
16014 		 tg3_asic_rev(tp) == ASIC_REV_5906)
16015 		tg3_flag_set(tp, HW_TSO_2);
16016 	else if (tg3_flag(tp, 5750_PLUS)) {
16017 		tg3_flag_set(tp, HW_TSO_1);
16018 		tg3_flag_set(tp, TSO_BUG);
16019 		if (tg3_asic_rev(tp) == ASIC_REV_5750 &&
16020 		    tg3_chip_rev_id(tp) >= CHIPREV_ID_5750_C2)
16021 			tg3_flag_clear(tp, TSO_BUG);
16022 	} else if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
16023 		   tg3_asic_rev(tp) != ASIC_REV_5701 &&
16024 		   tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
16025 		tg3_flag_set(tp, FW_TSO);
16026 		tg3_flag_set(tp, TSO_BUG);
16027 		if (tg3_asic_rev(tp) == ASIC_REV_5705)
16028 			tp->fw_needed = FIRMWARE_TG3TSO5;
16029 		else
16030 			tp->fw_needed = FIRMWARE_TG3TSO;
16031 	}
16032 
16033 	/* Selectively allow TSO based on operating conditions */
16034 	if (tg3_flag(tp, HW_TSO_1) ||
16035 	    tg3_flag(tp, HW_TSO_2) ||
16036 	    tg3_flag(tp, HW_TSO_3) ||
16037 	    tg3_flag(tp, FW_TSO)) {
16038 		/* For firmware TSO, assume ASF is disabled.
16039 		 * We'll disable TSO later if we discover ASF
16040 		 * is enabled in tg3_get_eeprom_hw_cfg().
16041 		 */
16042 		tg3_flag_set(tp, TSO_CAPABLE);
16043 	} else {
16044 		tg3_flag_clear(tp, TSO_CAPABLE);
16045 		tg3_flag_clear(tp, TSO_BUG);
16046 		tp->fw_needed = NULL;
16047 	}
16048 
16049 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0)
16050 		tp->fw_needed = FIRMWARE_TG3;
16051 
16052 	if (tg3_asic_rev(tp) == ASIC_REV_57766)
16053 		tp->fw_needed = FIRMWARE_TG357766;
16054 
16055 	tp->irq_max = 1;
16056 
16057 	if (tg3_flag(tp, 5750_PLUS)) {
16058 		tg3_flag_set(tp, SUPPORT_MSI);
16059 		if (tg3_chip_rev(tp) == CHIPREV_5750_AX ||
16060 		    tg3_chip_rev(tp) == CHIPREV_5750_BX ||
16061 		    (tg3_asic_rev(tp) == ASIC_REV_5714 &&
16062 		     tg3_chip_rev_id(tp) <= CHIPREV_ID_5714_A2 &&
16063 		     tp->pdev_peer == tp->pdev))
16064 			tg3_flag_clear(tp, SUPPORT_MSI);
16065 
16066 		if (tg3_flag(tp, 5755_PLUS) ||
16067 		    tg3_asic_rev(tp) == ASIC_REV_5906) {
16068 			tg3_flag_set(tp, 1SHOT_MSI);
16069 		}
16070 
16071 		if (tg3_flag(tp, 57765_PLUS)) {
16072 			tg3_flag_set(tp, SUPPORT_MSIX);
16073 			tp->irq_max = TG3_IRQ_MAX_VECS;
16074 		}
16075 	}
16076 
16077 	tp->txq_max = 1;
16078 	tp->rxq_max = 1;
16079 	if (tp->irq_max > 1) {
16080 		tp->rxq_max = TG3_RSS_MAX_NUM_QS;
16081 		tg3_rss_init_dflt_indir_tbl(tp, TG3_RSS_MAX_NUM_QS);
16082 
16083 		if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
16084 		    tg3_asic_rev(tp) == ASIC_REV_5720)
16085 			tp->txq_max = tp->irq_max - 1;
16086 	}
16087 
16088 	if (tg3_flag(tp, 5755_PLUS) ||
16089 	    tg3_asic_rev(tp) == ASIC_REV_5906)
16090 		tg3_flag_set(tp, SHORT_DMA_BUG);
16091 
16092 	if (tg3_asic_rev(tp) == ASIC_REV_5719)
16093 		tp->dma_limit = TG3_TX_BD_DMA_MAX_4K;
16094 
16095 	if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16096 	    tg3_asic_rev(tp) == ASIC_REV_5719 ||
16097 	    tg3_asic_rev(tp) == ASIC_REV_5720 ||
16098 	    tg3_asic_rev(tp) == ASIC_REV_5762)
16099 		tg3_flag_set(tp, LRG_PROD_RING_CAP);
16100 
16101 	if (tg3_flag(tp, 57765_PLUS) &&
16102 	    tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0)
16103 		tg3_flag_set(tp, USE_JUMBO_BDFLAG);
16104 
16105 	if (!tg3_flag(tp, 5705_PLUS) ||
16106 	    tg3_flag(tp, 5780_CLASS) ||
16107 	    tg3_flag(tp, USE_JUMBO_BDFLAG))
16108 		tg3_flag_set(tp, JUMBO_CAPABLE);
16109 
16110 	pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
16111 			      &pci_state_reg);
16112 
16113 	if (pci_is_pcie(tp->pdev)) {
16114 		u16 lnkctl;
16115 
16116 		tg3_flag_set(tp, PCI_EXPRESS);
16117 
16118 		pcie_capability_read_word(tp->pdev, PCI_EXP_LNKCTL, &lnkctl);
16119 		if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
16120 			if (tg3_asic_rev(tp) == ASIC_REV_5906) {
16121 				tg3_flag_clear(tp, HW_TSO_2);
16122 				tg3_flag_clear(tp, TSO_CAPABLE);
16123 			}
16124 			if (tg3_asic_rev(tp) == ASIC_REV_5784 ||
16125 			    tg3_asic_rev(tp) == ASIC_REV_5761 ||
16126 			    tg3_chip_rev_id(tp) == CHIPREV_ID_57780_A0 ||
16127 			    tg3_chip_rev_id(tp) == CHIPREV_ID_57780_A1)
16128 				tg3_flag_set(tp, CLKREQ_BUG);
16129 		} else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5717_A0) {
16130 			tg3_flag_set(tp, L1PLLPD_EN);
16131 		}
16132 	} else if (tg3_asic_rev(tp) == ASIC_REV_5785) {
16133 		/* BCM5785 devices are effectively PCIe devices, and should
16134 		 * follow PCIe codepaths, but do not have a PCIe capabilities
16135 		 * section.
16136 		 */
16137 		tg3_flag_set(tp, PCI_EXPRESS);
16138 	} else if (!tg3_flag(tp, 5705_PLUS) ||
16139 		   tg3_flag(tp, 5780_CLASS)) {
16140 		tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
16141 		if (!tp->pcix_cap) {
16142 			dev_err(&tp->pdev->dev,
16143 				"Cannot find PCI-X capability, aborting\n");
16144 			return -EIO;
16145 		}
16146 
16147 		if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE))
16148 			tg3_flag_set(tp, PCIX_MODE);
16149 	}
16150 
16151 	/* If we have an AMD 762 or VIA K8T800 chipset, write
16152 	 * reordering to the mailbox registers done by the host
16153 	 * controller can cause major troubles.  We read back from
16154 	 * every mailbox register write to force the writes to be
16155 	 * posted to the chip in order.
16156 	 */
16157 	if (pci_dev_present(tg3_write_reorder_chipsets) &&
16158 	    !tg3_flag(tp, PCI_EXPRESS))
16159 		tg3_flag_set(tp, MBOX_WRITE_REORDER);
16160 
16161 	pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
16162 			     &tp->pci_cacheline_sz);
16163 	pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER,
16164 			     &tp->pci_lat_timer);
16165 	if (tg3_asic_rev(tp) == ASIC_REV_5703 &&
16166 	    tp->pci_lat_timer < 64) {
16167 		tp->pci_lat_timer = 64;
16168 		pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
16169 				      tp->pci_lat_timer);
16170 	}
16171 
16172 	/* Important! -- It is critical that the PCI-X hw workaround
16173 	 * situation is decided before the first MMIO register access.
16174 	 */
16175 	if (tg3_chip_rev(tp) == CHIPREV_5700_BX) {
16176 		/* 5700 BX chips need to have their TX producer index
16177 		 * mailboxes written twice to workaround a bug.
16178 		 */
16179 		tg3_flag_set(tp, TXD_MBOX_HWBUG);
16180 
16181 		/* If we are in PCI-X mode, enable register write workaround.
16182 		 *
16183 		 * The workaround is to use indirect register accesses
16184 		 * for all chip writes not to mailbox registers.
16185 		 */
16186 		if (tg3_flag(tp, PCIX_MODE)) {
16187 			u32 pm_reg;
16188 
16189 			tg3_flag_set(tp, PCIX_TARGET_HWBUG);
16190 
16191 			/* The chip can have it's power management PCI config
16192 			 * space registers clobbered due to this bug.
16193 			 * So explicitly force the chip into D0 here.
16194 			 */
16195 			pci_read_config_dword(tp->pdev,
16196 					      tp->pdev->pm_cap + PCI_PM_CTRL,
16197 					      &pm_reg);
16198 			pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
16199 			pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
16200 			pci_write_config_dword(tp->pdev,
16201 					       tp->pdev->pm_cap + PCI_PM_CTRL,
16202 					       pm_reg);
16203 
16204 			/* Also, force SERR#/PERR# in PCI command. */
16205 			pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
16206 			pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
16207 			pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
16208 		}
16209 	}
16210 
16211 	if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
16212 		tg3_flag_set(tp, PCI_HIGH_SPEED);
16213 	if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
16214 		tg3_flag_set(tp, PCI_32BIT);
16215 
16216 	/* Chip-specific fixup from Broadcom driver */
16217 	if ((tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0) &&
16218 	    (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
16219 		pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
16220 		pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
16221 	}
16222 
16223 	/* Default fast path register access methods */
16224 	tp->read32 = tg3_read32;
16225 	tp->write32 = tg3_write32;
16226 	tp->read32_mbox = tg3_read32;
16227 	tp->write32_mbox = tg3_write32;
16228 	tp->write32_tx_mbox = tg3_write32;
16229 	tp->write32_rx_mbox = tg3_write32;
16230 
16231 	/* Various workaround register access methods */
16232 	if (tg3_flag(tp, PCIX_TARGET_HWBUG))
16233 		tp->write32 = tg3_write_indirect_reg32;
16234 	else if (tg3_asic_rev(tp) == ASIC_REV_5701 ||
16235 		 (tg3_flag(tp, PCI_EXPRESS) &&
16236 		  tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A0)) {
16237 		/*
16238 		 * Back to back register writes can cause problems on these
16239 		 * chips, the workaround is to read back all reg writes
16240 		 * except those to mailbox regs.
16241 		 *
16242 		 * See tg3_write_indirect_reg32().
16243 		 */
16244 		tp->write32 = tg3_write_flush_reg32;
16245 	}
16246 
16247 	if (tg3_flag(tp, TXD_MBOX_HWBUG) || tg3_flag(tp, MBOX_WRITE_REORDER)) {
16248 		tp->write32_tx_mbox = tg3_write32_tx_mbox;
16249 		if (tg3_flag(tp, MBOX_WRITE_REORDER))
16250 			tp->write32_rx_mbox = tg3_write_flush_reg32;
16251 	}
16252 
16253 	if (tg3_flag(tp, ICH_WORKAROUND)) {
16254 		tp->read32 = tg3_read_indirect_reg32;
16255 		tp->write32 = tg3_write_indirect_reg32;
16256 		tp->read32_mbox = tg3_read_indirect_mbox;
16257 		tp->write32_mbox = tg3_write_indirect_mbox;
16258 		tp->write32_tx_mbox = tg3_write_indirect_mbox;
16259 		tp->write32_rx_mbox = tg3_write_indirect_mbox;
16260 
16261 		iounmap(tp->regs);
16262 		tp->regs = NULL;
16263 
16264 		pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
16265 		pci_cmd &= ~PCI_COMMAND_MEMORY;
16266 		pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
16267 	}
16268 	if (tg3_asic_rev(tp) == ASIC_REV_5906) {
16269 		tp->read32_mbox = tg3_read32_mbox_5906;
16270 		tp->write32_mbox = tg3_write32_mbox_5906;
16271 		tp->write32_tx_mbox = tg3_write32_mbox_5906;
16272 		tp->write32_rx_mbox = tg3_write32_mbox_5906;
16273 	}
16274 
16275 	if (tp->write32 == tg3_write_indirect_reg32 ||
16276 	    (tg3_flag(tp, PCIX_MODE) &&
16277 	     (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16278 	      tg3_asic_rev(tp) == ASIC_REV_5701)))
16279 		tg3_flag_set(tp, SRAM_USE_CONFIG);
16280 
16281 	/* The memory arbiter has to be enabled in order for SRAM accesses
16282 	 * to succeed.  Normally on powerup the tg3 chip firmware will make
16283 	 * sure it is enabled, but other entities such as system netboot
16284 	 * code might disable it.
16285 	 */
16286 	val = tr32(MEMARB_MODE);
16287 	tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
16288 
16289 	tp->pci_fn = PCI_FUNC(tp->pdev->devfn) & 3;
16290 	if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
16291 	    tg3_flag(tp, 5780_CLASS)) {
16292 		if (tg3_flag(tp, PCIX_MODE)) {
16293 			pci_read_config_dword(tp->pdev,
16294 					      tp->pcix_cap + PCI_X_STATUS,
16295 					      &val);
16296 			tp->pci_fn = val & 0x7;
16297 		}
16298 	} else if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16299 		   tg3_asic_rev(tp) == ASIC_REV_5719 ||
16300 		   tg3_asic_rev(tp) == ASIC_REV_5720) {
16301 		tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val);
16302 		if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) != NIC_SRAM_CPMUSTAT_SIG)
16303 			val = tr32(TG3_CPMU_STATUS);
16304 
16305 		if (tg3_asic_rev(tp) == ASIC_REV_5717)
16306 			tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5717) ? 1 : 0;
16307 		else
16308 			tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5719) >>
16309 				     TG3_CPMU_STATUS_FSHFT_5719;
16310 	}
16311 
16312 	if (tg3_flag(tp, FLUSH_POSTED_WRITES)) {
16313 		tp->write32_tx_mbox = tg3_write_flush_reg32;
16314 		tp->write32_rx_mbox = tg3_write_flush_reg32;
16315 	}
16316 
16317 	/* Get eeprom hw config before calling tg3_set_power_state().
16318 	 * In particular, the TG3_FLAG_IS_NIC flag must be
16319 	 * determined before calling tg3_set_power_state() so that
16320 	 * we know whether or not to switch out of Vaux power.
16321 	 * When the flag is set, it means that GPIO1 is used for eeprom
16322 	 * write protect and also implies that it is a LOM where GPIOs
16323 	 * are not used to switch power.
16324 	 */
16325 	tg3_get_eeprom_hw_cfg(tp);
16326 
16327 	if (tg3_flag(tp, FW_TSO) && tg3_flag(tp, ENABLE_ASF)) {
16328 		tg3_flag_clear(tp, TSO_CAPABLE);
16329 		tg3_flag_clear(tp, TSO_BUG);
16330 		tp->fw_needed = NULL;
16331 	}
16332 
16333 	if (tg3_flag(tp, ENABLE_APE)) {
16334 		/* Allow reads and writes to the
16335 		 * APE register and memory space.
16336 		 */
16337 		pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
16338 				 PCISTATE_ALLOW_APE_SHMEM_WR |
16339 				 PCISTATE_ALLOW_APE_PSPACE_WR;
16340 		pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
16341 				       pci_state_reg);
16342 
16343 		tg3_ape_lock_init(tp);
16344 	}
16345 
16346 	/* Set up tp->grc_local_ctrl before calling
16347 	 * tg3_pwrsrc_switch_to_vmain().  GPIO1 driven high
16348 	 * will bring 5700's external PHY out of reset.
16349 	 * It is also used as eeprom write protect on LOMs.
16350 	 */
16351 	tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
16352 	if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16353 	    tg3_flag(tp, EEPROM_WRITE_PROT))
16354 		tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
16355 				       GRC_LCLCTRL_GPIO_OUTPUT1);
16356 	/* Unused GPIO3 must be driven as output on 5752 because there
16357 	 * are no pull-up resistors on unused GPIO pins.
16358 	 */
16359 	else if (tg3_asic_rev(tp) == ASIC_REV_5752)
16360 		tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
16361 
16362 	if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
16363 	    tg3_asic_rev(tp) == ASIC_REV_57780 ||
16364 	    tg3_flag(tp, 57765_CLASS))
16365 		tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
16366 
16367 	if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
16368 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
16369 		/* Turn off the debug UART. */
16370 		tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
16371 		if (tg3_flag(tp, IS_NIC))
16372 			/* Keep VMain power. */
16373 			tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
16374 					      GRC_LCLCTRL_GPIO_OUTPUT0;
16375 	}
16376 
16377 	if (tg3_asic_rev(tp) == ASIC_REV_5762)
16378 		tp->grc_local_ctrl |=
16379 			tr32(GRC_LOCAL_CTRL) & GRC_LCLCTRL_GPIO_UART_SEL;
16380 
16381 	/* Switch out of Vaux if it is a NIC */
16382 	tg3_pwrsrc_switch_to_vmain(tp);
16383 
16384 	/* Derive initial jumbo mode from MTU assigned in
16385 	 * ether_setup() via the alloc_etherdev() call
16386 	 */
16387 	if (tp->dev->mtu > ETH_DATA_LEN && !tg3_flag(tp, 5780_CLASS))
16388 		tg3_flag_set(tp, JUMBO_RING_ENABLE);
16389 
16390 	/* Determine WakeOnLan speed to use. */
16391 	if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16392 	    tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
16393 	    tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0 ||
16394 	    tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B2) {
16395 		tg3_flag_clear(tp, WOL_SPEED_100MB);
16396 	} else {
16397 		tg3_flag_set(tp, WOL_SPEED_100MB);
16398 	}
16399 
16400 	if (tg3_asic_rev(tp) == ASIC_REV_5906)
16401 		tp->phy_flags |= TG3_PHYFLG_IS_FET;
16402 
16403 	/* A few boards don't want Ethernet@WireSpeed phy feature */
16404 	if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16405 	    (tg3_asic_rev(tp) == ASIC_REV_5705 &&
16406 	     (tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) &&
16407 	     (tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A1)) ||
16408 	    (tp->phy_flags & TG3_PHYFLG_IS_FET) ||
16409 	    (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
16410 		tp->phy_flags |= TG3_PHYFLG_NO_ETH_WIRE_SPEED;
16411 
16412 	if (tg3_chip_rev(tp) == CHIPREV_5703_AX ||
16413 	    tg3_chip_rev(tp) == CHIPREV_5704_AX)
16414 		tp->phy_flags |= TG3_PHYFLG_ADC_BUG;
16415 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0)
16416 		tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG;
16417 
16418 	if (tg3_flag(tp, 5705_PLUS) &&
16419 	    !(tp->phy_flags & TG3_PHYFLG_IS_FET) &&
16420 	    tg3_asic_rev(tp) != ASIC_REV_5785 &&
16421 	    tg3_asic_rev(tp) != ASIC_REV_57780 &&
16422 	    !tg3_flag(tp, 57765_PLUS)) {
16423 		if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
16424 		    tg3_asic_rev(tp) == ASIC_REV_5787 ||
16425 		    tg3_asic_rev(tp) == ASIC_REV_5784 ||
16426 		    tg3_asic_rev(tp) == ASIC_REV_5761) {
16427 			if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
16428 			    tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
16429 				tp->phy_flags |= TG3_PHYFLG_JITTER_BUG;
16430 			if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
16431 				tp->phy_flags |= TG3_PHYFLG_ADJUST_TRIM;
16432 		} else
16433 			tp->phy_flags |= TG3_PHYFLG_BER_BUG;
16434 	}
16435 
16436 	if (tg3_asic_rev(tp) == ASIC_REV_5784 &&
16437 	    tg3_chip_rev(tp) != CHIPREV_5784_AX) {
16438 		tp->phy_otp = tg3_read_otp_phycfg(tp);
16439 		if (tp->phy_otp == 0)
16440 			tp->phy_otp = TG3_OTP_DEFAULT;
16441 	}
16442 
16443 	if (tg3_flag(tp, CPMU_PRESENT))
16444 		tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
16445 	else
16446 		tp->mi_mode = MAC_MI_MODE_BASE;
16447 
16448 	tp->coalesce_mode = 0;
16449 	if (tg3_chip_rev(tp) != CHIPREV_5700_AX &&
16450 	    tg3_chip_rev(tp) != CHIPREV_5700_BX)
16451 		tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
16452 
16453 	/* Set these bits to enable statistics workaround. */
16454 	if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16455 	    tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
16456 	    tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0) {
16457 		tp->coalesce_mode |= HOSTCC_MODE_ATTN;
16458 		tp->grc_mode |= GRC_MODE_IRQ_ON_FLOW_ATTN;
16459 	}
16460 
16461 	if (tg3_asic_rev(tp) == ASIC_REV_5785 ||
16462 	    tg3_asic_rev(tp) == ASIC_REV_57780)
16463 		tg3_flag_set(tp, USE_PHYLIB);
16464 
16465 	err = tg3_mdio_init(tp);
16466 	if (err)
16467 		return err;
16468 
16469 	/* Initialize data/descriptor byte/word swapping. */
16470 	val = tr32(GRC_MODE);
16471 	if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
16472 	    tg3_asic_rev(tp) == ASIC_REV_5762)
16473 		val &= (GRC_MODE_BYTE_SWAP_B2HRX_DATA |
16474 			GRC_MODE_WORD_SWAP_B2HRX_DATA |
16475 			GRC_MODE_B2HRX_ENABLE |
16476 			GRC_MODE_HTX2B_ENABLE |
16477 			GRC_MODE_HOST_STACKUP);
16478 	else
16479 		val &= GRC_MODE_HOST_STACKUP;
16480 
16481 	tw32(GRC_MODE, val | tp->grc_mode);
16482 
16483 	tg3_switch_clocks(tp);
16484 
16485 	/* Clear this out for sanity. */
16486 	tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
16487 
16488 	pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
16489 			      &pci_state_reg);
16490 	if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
16491 	    !tg3_flag(tp, PCIX_TARGET_HWBUG)) {
16492 		if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
16493 		    tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0 ||
16494 		    tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B2 ||
16495 		    tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B5) {
16496 			void __iomem *sram_base;
16497 
16498 			/* Write some dummy words into the SRAM status block
16499 			 * area, see if it reads back correctly.  If the return
16500 			 * value is bad, force enable the PCIX workaround.
16501 			 */
16502 			sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
16503 
16504 			writel(0x00000000, sram_base);
16505 			writel(0x00000000, sram_base + 4);
16506 			writel(0xffffffff, sram_base + 4);
16507 			if (readl(sram_base) != 0x00000000)
16508 				tg3_flag_set(tp, PCIX_TARGET_HWBUG);
16509 		}
16510 	}
16511 
16512 	udelay(50);
16513 	tg3_nvram_init(tp);
16514 
16515 	/* If the device has an NVRAM, no need to load patch firmware */
16516 	if (tg3_asic_rev(tp) == ASIC_REV_57766 &&
16517 	    !tg3_flag(tp, NO_NVRAM))
16518 		tp->fw_needed = NULL;
16519 
16520 	grc_misc_cfg = tr32(GRC_MISC_CFG);
16521 	grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
16522 
16523 	if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
16524 	    (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
16525 	     grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
16526 		tg3_flag_set(tp, IS_5788);
16527 
16528 	if (!tg3_flag(tp, IS_5788) &&
16529 	    tg3_asic_rev(tp) != ASIC_REV_5700)
16530 		tg3_flag_set(tp, TAGGED_STATUS);
16531 	if (tg3_flag(tp, TAGGED_STATUS)) {
16532 		tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
16533 				      HOSTCC_MODE_CLRTICK_TXBD);
16534 
16535 		tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
16536 		pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
16537 				       tp->misc_host_ctrl);
16538 	}
16539 
16540 	/* Preserve the APE MAC_MODE bits */
16541 	if (tg3_flag(tp, ENABLE_APE))
16542 		tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
16543 	else
16544 		tp->mac_mode = 0;
16545 
16546 	if (tg3_10_100_only_device(tp, ent))
16547 		tp->phy_flags |= TG3_PHYFLG_10_100_ONLY;
16548 
16549 	err = tg3_phy_probe(tp);
16550 	if (err) {
16551 		dev_err(&tp->pdev->dev, "phy probe failed, err %d\n", err);
16552 		/* ... but do not return immediately ... */
16553 		tg3_mdio_fini(tp);
16554 	}
16555 
16556 	tg3_read_vpd(tp);
16557 	tg3_read_fw_ver(tp);
16558 
16559 	if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
16560 		tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
16561 	} else {
16562 		if (tg3_asic_rev(tp) == ASIC_REV_5700)
16563 			tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
16564 		else
16565 			tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
16566 	}
16567 
16568 	/* 5700 {AX,BX} chips have a broken status block link
16569 	 * change bit implementation, so we must use the
16570 	 * status register in those cases.
16571 	 */
16572 	if (tg3_asic_rev(tp) == ASIC_REV_5700)
16573 		tg3_flag_set(tp, USE_LINKCHG_REG);
16574 	else
16575 		tg3_flag_clear(tp, USE_LINKCHG_REG);
16576 
16577 	/* The led_ctrl is set during tg3_phy_probe, here we might
16578 	 * have to force the link status polling mechanism based
16579 	 * upon subsystem IDs.
16580 	 */
16581 	if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
16582 	    tg3_asic_rev(tp) == ASIC_REV_5701 &&
16583 	    !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
16584 		tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
16585 		tg3_flag_set(tp, USE_LINKCHG_REG);
16586 	}
16587 
16588 	/* For all SERDES we poll the MAC status register. */
16589 	if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
16590 		tg3_flag_set(tp, POLL_SERDES);
16591 	else
16592 		tg3_flag_clear(tp, POLL_SERDES);
16593 
16594 	tp->rx_offset = NET_SKB_PAD + NET_IP_ALIGN;
16595 	tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;
16596 	if (tg3_asic_rev(tp) == ASIC_REV_5701 &&
16597 	    tg3_flag(tp, PCIX_MODE)) {
16598 		tp->rx_offset = NET_SKB_PAD;
16599 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
16600 		tp->rx_copy_thresh = ~(u16)0;
16601 #endif
16602 	}
16603 
16604 	tp->rx_std_ring_mask = TG3_RX_STD_RING_SIZE(tp) - 1;
16605 	tp->rx_jmb_ring_mask = TG3_RX_JMB_RING_SIZE(tp) - 1;
16606 	tp->rx_ret_ring_mask = tg3_rx_ret_ring_size(tp) - 1;
16607 
16608 	tp->rx_std_max_post = tp->rx_std_ring_mask + 1;
16609 
16610 	/* Increment the rx prod index on the rx std ring by at most
16611 	 * 8 for these chips to workaround hw errata.
16612 	 */
16613 	if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
16614 	    tg3_asic_rev(tp) == ASIC_REV_5752 ||
16615 	    tg3_asic_rev(tp) == ASIC_REV_5755)
16616 		tp->rx_std_max_post = 8;
16617 
16618 	if (tg3_flag(tp, ASPM_WORKAROUND))
16619 		tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
16620 				     PCIE_PWR_MGMT_L1_THRESH_MSK;
16621 
16622 	return err;
16623 }
16624 
16625 #ifdef CONFIG_SPARC
16626 static int tg3_get_macaddr_sparc(struct tg3 *tp)
16627 {
16628 	struct net_device *dev = tp->dev;
16629 	struct pci_dev *pdev = tp->pdev;
16630 	struct device_node *dp = pci_device_to_OF_node(pdev);
16631 	const unsigned char *addr;
16632 	int len;
16633 
16634 	addr = of_get_property(dp, "local-mac-address", &len);
16635 	if (addr && len == 6) {
16636 		memcpy(dev->dev_addr, addr, 6);
16637 		return 0;
16638 	}
16639 	return -ENODEV;
16640 }
16641 
16642 static int tg3_get_default_macaddr_sparc(struct tg3 *tp)
16643 {
16644 	struct net_device *dev = tp->dev;
16645 
16646 	memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
16647 	return 0;
16648 }
16649 #endif
16650 
16651 static int tg3_get_device_address(struct tg3 *tp)
16652 {
16653 	struct net_device *dev = tp->dev;
16654 	u32 hi, lo, mac_offset;
16655 	int addr_ok = 0;
16656 	int err;
16657 
16658 #ifdef CONFIG_SPARC
16659 	if (!tg3_get_macaddr_sparc(tp))
16660 		return 0;
16661 #endif
16662 
16663 	if (tg3_flag(tp, IS_SSB_CORE)) {
16664 		err = ssb_gige_get_macaddr(tp->pdev, &dev->dev_addr[0]);
16665 		if (!err && is_valid_ether_addr(&dev->dev_addr[0]))
16666 			return 0;
16667 	}
16668 
16669 	mac_offset = 0x7c;
16670 	if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
16671 	    tg3_flag(tp, 5780_CLASS)) {
16672 		if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
16673 			mac_offset = 0xcc;
16674 		if (tg3_nvram_lock(tp))
16675 			tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
16676 		else
16677 			tg3_nvram_unlock(tp);
16678 	} else if (tg3_flag(tp, 5717_PLUS)) {
16679 		if (tp->pci_fn & 1)
16680 			mac_offset = 0xcc;
16681 		if (tp->pci_fn > 1)
16682 			mac_offset += 0x18c;
16683 	} else if (tg3_asic_rev(tp) == ASIC_REV_5906)
16684 		mac_offset = 0x10;
16685 
16686 	/* First try to get it from MAC address mailbox. */
16687 	tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
16688 	if ((hi >> 16) == 0x484b) {
16689 		dev->dev_addr[0] = (hi >>  8) & 0xff;
16690 		dev->dev_addr[1] = (hi >>  0) & 0xff;
16691 
16692 		tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
16693 		dev->dev_addr[2] = (lo >> 24) & 0xff;
16694 		dev->dev_addr[3] = (lo >> 16) & 0xff;
16695 		dev->dev_addr[4] = (lo >>  8) & 0xff;
16696 		dev->dev_addr[5] = (lo >>  0) & 0xff;
16697 
16698 		/* Some old bootcode may report a 0 MAC address in SRAM */
16699 		addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
16700 	}
16701 	if (!addr_ok) {
16702 		/* Next, try NVRAM. */
16703 		if (!tg3_flag(tp, NO_NVRAM) &&
16704 		    !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) &&
16705 		    !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) {
16706 			memcpy(&dev->dev_addr[0], ((char *)&hi) + 2, 2);
16707 			memcpy(&dev->dev_addr[2], (char *)&lo, sizeof(lo));
16708 		}
16709 		/* Finally just fetch it out of the MAC control regs. */
16710 		else {
16711 			hi = tr32(MAC_ADDR_0_HIGH);
16712 			lo = tr32(MAC_ADDR_0_LOW);
16713 
16714 			dev->dev_addr[5] = lo & 0xff;
16715 			dev->dev_addr[4] = (lo >> 8) & 0xff;
16716 			dev->dev_addr[3] = (lo >> 16) & 0xff;
16717 			dev->dev_addr[2] = (lo >> 24) & 0xff;
16718 			dev->dev_addr[1] = hi & 0xff;
16719 			dev->dev_addr[0] = (hi >> 8) & 0xff;
16720 		}
16721 	}
16722 
16723 	if (!is_valid_ether_addr(&dev->dev_addr[0])) {
16724 #ifdef CONFIG_SPARC
16725 		if (!tg3_get_default_macaddr_sparc(tp))
16726 			return 0;
16727 #endif
16728 		return -EINVAL;
16729 	}
16730 	return 0;
16731 }
16732 
16733 #define BOUNDARY_SINGLE_CACHELINE	1
16734 #define BOUNDARY_MULTI_CACHELINE	2
16735 
16736 static u32 tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
16737 {
16738 	int cacheline_size;
16739 	u8 byte;
16740 	int goal;
16741 
16742 	pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
16743 	if (byte == 0)
16744 		cacheline_size = 1024;
16745 	else
16746 		cacheline_size = (int) byte * 4;
16747 
16748 	/* On 5703 and later chips, the boundary bits have no
16749 	 * effect.
16750 	 */
16751 	if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
16752 	    tg3_asic_rev(tp) != ASIC_REV_5701 &&
16753 	    !tg3_flag(tp, PCI_EXPRESS))
16754 		goto out;
16755 
16756 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
16757 	goal = BOUNDARY_MULTI_CACHELINE;
16758 #else
16759 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
16760 	goal = BOUNDARY_SINGLE_CACHELINE;
16761 #else
16762 	goal = 0;
16763 #endif
16764 #endif
16765 
16766 	if (tg3_flag(tp, 57765_PLUS)) {
16767 		val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
16768 		goto out;
16769 	}
16770 
16771 	if (!goal)
16772 		goto out;
16773 
16774 	/* PCI controllers on most RISC systems tend to disconnect
16775 	 * when a device tries to burst across a cache-line boundary.
16776 	 * Therefore, letting tg3 do so just wastes PCI bandwidth.
16777 	 *
16778 	 * Unfortunately, for PCI-E there are only limited
16779 	 * write-side controls for this, and thus for reads
16780 	 * we will still get the disconnects.  We'll also waste
16781 	 * these PCI cycles for both read and write for chips
16782 	 * other than 5700 and 5701 which do not implement the
16783 	 * boundary bits.
16784 	 */
16785 	if (tg3_flag(tp, PCIX_MODE) && !tg3_flag(tp, PCI_EXPRESS)) {
16786 		switch (cacheline_size) {
16787 		case 16:
16788 		case 32:
16789 		case 64:
16790 		case 128:
16791 			if (goal == BOUNDARY_SINGLE_CACHELINE) {
16792 				val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
16793 					DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
16794 			} else {
16795 				val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
16796 					DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
16797 			}
16798 			break;
16799 
16800 		case 256:
16801 			val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
16802 				DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
16803 			break;
16804 
16805 		default:
16806 			val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
16807 				DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
16808 			break;
16809 		}
16810 	} else if (tg3_flag(tp, PCI_EXPRESS)) {
16811 		switch (cacheline_size) {
16812 		case 16:
16813 		case 32:
16814 		case 64:
16815 			if (goal == BOUNDARY_SINGLE_CACHELINE) {
16816 				val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
16817 				val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
16818 				break;
16819 			}
16820 			/* fallthrough */
16821 		case 128:
16822 		default:
16823 			val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
16824 			val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
16825 			break;
16826 		}
16827 	} else {
16828 		switch (cacheline_size) {
16829 		case 16:
16830 			if (goal == BOUNDARY_SINGLE_CACHELINE) {
16831 				val |= (DMA_RWCTRL_READ_BNDRY_16 |
16832 					DMA_RWCTRL_WRITE_BNDRY_16);
16833 				break;
16834 			}
16835 			/* fallthrough */
16836 		case 32:
16837 			if (goal == BOUNDARY_SINGLE_CACHELINE) {
16838 				val |= (DMA_RWCTRL_READ_BNDRY_32 |
16839 					DMA_RWCTRL_WRITE_BNDRY_32);
16840 				break;
16841 			}
16842 			/* fallthrough */
16843 		case 64:
16844 			if (goal == BOUNDARY_SINGLE_CACHELINE) {
16845 				val |= (DMA_RWCTRL_READ_BNDRY_64 |
16846 					DMA_RWCTRL_WRITE_BNDRY_64);
16847 				break;
16848 			}
16849 			/* fallthrough */
16850 		case 128:
16851 			if (goal == BOUNDARY_SINGLE_CACHELINE) {
16852 				val |= (DMA_RWCTRL_READ_BNDRY_128 |
16853 					DMA_RWCTRL_WRITE_BNDRY_128);
16854 				break;
16855 			}
16856 			/* fallthrough */
16857 		case 256:
16858 			val |= (DMA_RWCTRL_READ_BNDRY_256 |
16859 				DMA_RWCTRL_WRITE_BNDRY_256);
16860 			break;
16861 		case 512:
16862 			val |= (DMA_RWCTRL_READ_BNDRY_512 |
16863 				DMA_RWCTRL_WRITE_BNDRY_512);
16864 			break;
16865 		case 1024:
16866 		default:
16867 			val |= (DMA_RWCTRL_READ_BNDRY_1024 |
16868 				DMA_RWCTRL_WRITE_BNDRY_1024);
16869 			break;
16870 		}
16871 	}
16872 
16873 out:
16874 	return val;
16875 }
16876 
16877 static int tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma,
16878 			   int size, bool to_device)
16879 {
16880 	struct tg3_internal_buffer_desc test_desc;
16881 	u32 sram_dma_descs;
16882 	int i, ret;
16883 
16884 	sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
16885 
16886 	tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
16887 	tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
16888 	tw32(RDMAC_STATUS, 0);
16889 	tw32(WDMAC_STATUS, 0);
16890 
16891 	tw32(BUFMGR_MODE, 0);
16892 	tw32(FTQ_RESET, 0);
16893 
16894 	test_desc.addr_hi = ((u64) buf_dma) >> 32;
16895 	test_desc.addr_lo = buf_dma & 0xffffffff;
16896 	test_desc.nic_mbuf = 0x00002100;
16897 	test_desc.len = size;
16898 
16899 	/*
16900 	 * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
16901 	 * the *second* time the tg3 driver was getting loaded after an
16902 	 * initial scan.
16903 	 *
16904 	 * Broadcom tells me:
16905 	 *   ...the DMA engine is connected to the GRC block and a DMA
16906 	 *   reset may affect the GRC block in some unpredictable way...
16907 	 *   The behavior of resets to individual blocks has not been tested.
16908 	 *
16909 	 * Broadcom noted the GRC reset will also reset all sub-components.
16910 	 */
16911 	if (to_device) {
16912 		test_desc.cqid_sqid = (13 << 8) | 2;
16913 
16914 		tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
16915 		udelay(40);
16916 	} else {
16917 		test_desc.cqid_sqid = (16 << 8) | 7;
16918 
16919 		tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
16920 		udelay(40);
16921 	}
16922 	test_desc.flags = 0x00000005;
16923 
16924 	for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
16925 		u32 val;
16926 
16927 		val = *(((u32 *)&test_desc) + i);
16928 		pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
16929 				       sram_dma_descs + (i * sizeof(u32)));
16930 		pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
16931 	}
16932 	pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
16933 
16934 	if (to_device)
16935 		tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
16936 	else
16937 		tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
16938 
16939 	ret = -ENODEV;
16940 	for (i = 0; i < 40; i++) {
16941 		u32 val;
16942 
16943 		if (to_device)
16944 			val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
16945 		else
16946 			val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
16947 		if ((val & 0xffff) == sram_dma_descs) {
16948 			ret = 0;
16949 			break;
16950 		}
16951 
16952 		udelay(100);
16953 	}
16954 
16955 	return ret;
16956 }
16957 
16958 #define TEST_BUFFER_SIZE	0x2000
16959 
16960 static DEFINE_PCI_DEVICE_TABLE(tg3_dma_wait_state_chipsets) = {
16961 	{ PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
16962 	{ },
16963 };
16964 
16965 static int tg3_test_dma(struct tg3 *tp)
16966 {
16967 	dma_addr_t buf_dma;
16968 	u32 *buf, saved_dma_rwctrl;
16969 	int ret = 0;
16970 
16971 	buf = dma_alloc_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE,
16972 				 &buf_dma, GFP_KERNEL);
16973 	if (!buf) {
16974 		ret = -ENOMEM;
16975 		goto out_nofree;
16976 	}
16977 
16978 	tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
16979 			  (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
16980 
16981 	tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
16982 
16983 	if (tg3_flag(tp, 57765_PLUS))
16984 		goto out;
16985 
16986 	if (tg3_flag(tp, PCI_EXPRESS)) {
16987 		/* DMA read watermark not used on PCIE */
16988 		tp->dma_rwctrl |= 0x00180000;
16989 	} else if (!tg3_flag(tp, PCIX_MODE)) {
16990 		if (tg3_asic_rev(tp) == ASIC_REV_5705 ||
16991 		    tg3_asic_rev(tp) == ASIC_REV_5750)
16992 			tp->dma_rwctrl |= 0x003f0000;
16993 		else
16994 			tp->dma_rwctrl |= 0x003f000f;
16995 	} else {
16996 		if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
16997 		    tg3_asic_rev(tp) == ASIC_REV_5704) {
16998 			u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
16999 			u32 read_water = 0x7;
17000 
17001 			/* If the 5704 is behind the EPB bridge, we can
17002 			 * do the less restrictive ONE_DMA workaround for
17003 			 * better performance.
17004 			 */
17005 			if (tg3_flag(tp, 40BIT_DMA_BUG) &&
17006 			    tg3_asic_rev(tp) == ASIC_REV_5704)
17007 				tp->dma_rwctrl |= 0x8000;
17008 			else if (ccval == 0x6 || ccval == 0x7)
17009 				tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
17010 
17011 			if (tg3_asic_rev(tp) == ASIC_REV_5703)
17012 				read_water = 4;
17013 			/* Set bit 23 to enable PCIX hw bug fix */
17014 			tp->dma_rwctrl |=
17015 				(read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
17016 				(0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
17017 				(1 << 23);
17018 		} else if (tg3_asic_rev(tp) == ASIC_REV_5780) {
17019 			/* 5780 always in PCIX mode */
17020 			tp->dma_rwctrl |= 0x00144000;
17021 		} else if (tg3_asic_rev(tp) == ASIC_REV_5714) {
17022 			/* 5714 always in PCIX mode */
17023 			tp->dma_rwctrl |= 0x00148000;
17024 		} else {
17025 			tp->dma_rwctrl |= 0x001b000f;
17026 		}
17027 	}
17028 	if (tg3_flag(tp, ONE_DMA_AT_ONCE))
17029 		tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
17030 
17031 	if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
17032 	    tg3_asic_rev(tp) == ASIC_REV_5704)
17033 		tp->dma_rwctrl &= 0xfffffff0;
17034 
17035 	if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
17036 	    tg3_asic_rev(tp) == ASIC_REV_5701) {
17037 		/* Remove this if it causes problems for some boards. */
17038 		tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
17039 
17040 		/* On 5700/5701 chips, we need to set this bit.
17041 		 * Otherwise the chip will issue cacheline transactions
17042 		 * to streamable DMA memory with not all the byte
17043 		 * enables turned on.  This is an error on several
17044 		 * RISC PCI controllers, in particular sparc64.
17045 		 *
17046 		 * On 5703/5704 chips, this bit has been reassigned
17047 		 * a different meaning.  In particular, it is used
17048 		 * on those chips to enable a PCI-X workaround.
17049 		 */
17050 		tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
17051 	}
17052 
17053 	tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17054 
17055 #if 0
17056 	/* Unneeded, already done by tg3_get_invariants.  */
17057 	tg3_switch_clocks(tp);
17058 #endif
17059 
17060 	if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
17061 	    tg3_asic_rev(tp) != ASIC_REV_5701)
17062 		goto out;
17063 
17064 	/* It is best to perform DMA test with maximum write burst size
17065 	 * to expose the 5700/5701 write DMA bug.
17066 	 */
17067 	saved_dma_rwctrl = tp->dma_rwctrl;
17068 	tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
17069 	tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17070 
17071 	while (1) {
17072 		u32 *p = buf, i;
17073 
17074 		for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
17075 			p[i] = i;
17076 
17077 		/* Send the buffer to the chip. */
17078 		ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, true);
17079 		if (ret) {
17080 			dev_err(&tp->pdev->dev,
17081 				"%s: Buffer write failed. err = %d\n",
17082 				__func__, ret);
17083 			break;
17084 		}
17085 
17086 #if 0
17087 		/* validate data reached card RAM correctly. */
17088 		for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
17089 			u32 val;
17090 			tg3_read_mem(tp, 0x2100 + (i*4), &val);
17091 			if (le32_to_cpu(val) != p[i]) {
17092 				dev_err(&tp->pdev->dev,
17093 					"%s: Buffer corrupted on device! "
17094 					"(%d != %d)\n", __func__, val, i);
17095 				/* ret = -ENODEV here? */
17096 			}
17097 			p[i] = 0;
17098 		}
17099 #endif
17100 		/* Now read it back. */
17101 		ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, false);
17102 		if (ret) {
17103 			dev_err(&tp->pdev->dev, "%s: Buffer read failed. "
17104 				"err = %d\n", __func__, ret);
17105 			break;
17106 		}
17107 
17108 		/* Verify it. */
17109 		for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
17110 			if (p[i] == i)
17111 				continue;
17112 
17113 			if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
17114 			    DMA_RWCTRL_WRITE_BNDRY_16) {
17115 				tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
17116 				tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
17117 				tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17118 				break;
17119 			} else {
17120 				dev_err(&tp->pdev->dev,
17121 					"%s: Buffer corrupted on read back! "
17122 					"(%d != %d)\n", __func__, p[i], i);
17123 				ret = -ENODEV;
17124 				goto out;
17125 			}
17126 		}
17127 
17128 		if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
17129 			/* Success. */
17130 			ret = 0;
17131 			break;
17132 		}
17133 	}
17134 	if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
17135 	    DMA_RWCTRL_WRITE_BNDRY_16) {
17136 		/* DMA test passed without adjusting DMA boundary,
17137 		 * now look for chipsets that are known to expose the
17138 		 * DMA bug without failing the test.
17139 		 */
17140 		if (pci_dev_present(tg3_dma_wait_state_chipsets)) {
17141 			tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
17142 			tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
17143 		} else {
17144 			/* Safe to use the calculated DMA boundary. */
17145 			tp->dma_rwctrl = saved_dma_rwctrl;
17146 		}
17147 
17148 		tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17149 	}
17150 
17151 out:
17152 	dma_free_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, buf, buf_dma);
17153 out_nofree:
17154 	return ret;
17155 }
17156 
17157 static void tg3_init_bufmgr_config(struct tg3 *tp)
17158 {
17159 	if (tg3_flag(tp, 57765_PLUS)) {
17160 		tp->bufmgr_config.mbuf_read_dma_low_water =
17161 			DEFAULT_MB_RDMA_LOW_WATER_5705;
17162 		tp->bufmgr_config.mbuf_mac_rx_low_water =
17163 			DEFAULT_MB_MACRX_LOW_WATER_57765;
17164 		tp->bufmgr_config.mbuf_high_water =
17165 			DEFAULT_MB_HIGH_WATER_57765;
17166 
17167 		tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
17168 			DEFAULT_MB_RDMA_LOW_WATER_5705;
17169 		tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
17170 			DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765;
17171 		tp->bufmgr_config.mbuf_high_water_jumbo =
17172 			DEFAULT_MB_HIGH_WATER_JUMBO_57765;
17173 	} else if (tg3_flag(tp, 5705_PLUS)) {
17174 		tp->bufmgr_config.mbuf_read_dma_low_water =
17175 			DEFAULT_MB_RDMA_LOW_WATER_5705;
17176 		tp->bufmgr_config.mbuf_mac_rx_low_water =
17177 			DEFAULT_MB_MACRX_LOW_WATER_5705;
17178 		tp->bufmgr_config.mbuf_high_water =
17179 			DEFAULT_MB_HIGH_WATER_5705;
17180 		if (tg3_asic_rev(tp) == ASIC_REV_5906) {
17181 			tp->bufmgr_config.mbuf_mac_rx_low_water =
17182 				DEFAULT_MB_MACRX_LOW_WATER_5906;
17183 			tp->bufmgr_config.mbuf_high_water =
17184 				DEFAULT_MB_HIGH_WATER_5906;
17185 		}
17186 
17187 		tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
17188 			DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
17189 		tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
17190 			DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
17191 		tp->bufmgr_config.mbuf_high_water_jumbo =
17192 			DEFAULT_MB_HIGH_WATER_JUMBO_5780;
17193 	} else {
17194 		tp->bufmgr_config.mbuf_read_dma_low_water =
17195 			DEFAULT_MB_RDMA_LOW_WATER;
17196 		tp->bufmgr_config.mbuf_mac_rx_low_water =
17197 			DEFAULT_MB_MACRX_LOW_WATER;
17198 		tp->bufmgr_config.mbuf_high_water =
17199 			DEFAULT_MB_HIGH_WATER;
17200 
17201 		tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
17202 			DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
17203 		tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
17204 			DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
17205 		tp->bufmgr_config.mbuf_high_water_jumbo =
17206 			DEFAULT_MB_HIGH_WATER_JUMBO;
17207 	}
17208 
17209 	tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
17210 	tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
17211 }
17212 
17213 static char *tg3_phy_string(struct tg3 *tp)
17214 {
17215 	switch (tp->phy_id & TG3_PHY_ID_MASK) {
17216 	case TG3_PHY_ID_BCM5400:	return "5400";
17217 	case TG3_PHY_ID_BCM5401:	return "5401";
17218 	case TG3_PHY_ID_BCM5411:	return "5411";
17219 	case TG3_PHY_ID_BCM5701:	return "5701";
17220 	case TG3_PHY_ID_BCM5703:	return "5703";
17221 	case TG3_PHY_ID_BCM5704:	return "5704";
17222 	case TG3_PHY_ID_BCM5705:	return "5705";
17223 	case TG3_PHY_ID_BCM5750:	return "5750";
17224 	case TG3_PHY_ID_BCM5752:	return "5752";
17225 	case TG3_PHY_ID_BCM5714:	return "5714";
17226 	case TG3_PHY_ID_BCM5780:	return "5780";
17227 	case TG3_PHY_ID_BCM5755:	return "5755";
17228 	case TG3_PHY_ID_BCM5787:	return "5787";
17229 	case TG3_PHY_ID_BCM5784:	return "5784";
17230 	case TG3_PHY_ID_BCM5756:	return "5722/5756";
17231 	case TG3_PHY_ID_BCM5906:	return "5906";
17232 	case TG3_PHY_ID_BCM5761:	return "5761";
17233 	case TG3_PHY_ID_BCM5718C:	return "5718C";
17234 	case TG3_PHY_ID_BCM5718S:	return "5718S";
17235 	case TG3_PHY_ID_BCM57765:	return "57765";
17236 	case TG3_PHY_ID_BCM5719C:	return "5719C";
17237 	case TG3_PHY_ID_BCM5720C:	return "5720C";
17238 	case TG3_PHY_ID_BCM5762:	return "5762C";
17239 	case TG3_PHY_ID_BCM8002:	return "8002/serdes";
17240 	case 0:			return "serdes";
17241 	default:		return "unknown";
17242 	}
17243 }
17244 
17245 static char *tg3_bus_string(struct tg3 *tp, char *str)
17246 {
17247 	if (tg3_flag(tp, PCI_EXPRESS)) {
17248 		strcpy(str, "PCI Express");
17249 		return str;
17250 	} else if (tg3_flag(tp, PCIX_MODE)) {
17251 		u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
17252 
17253 		strcpy(str, "PCIX:");
17254 
17255 		if ((clock_ctrl == 7) ||
17256 		    ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
17257 		     GRC_MISC_CFG_BOARD_ID_5704CIOBE))
17258 			strcat(str, "133MHz");
17259 		else if (clock_ctrl == 0)
17260 			strcat(str, "33MHz");
17261 		else if (clock_ctrl == 2)
17262 			strcat(str, "50MHz");
17263 		else if (clock_ctrl == 4)
17264 			strcat(str, "66MHz");
17265 		else if (clock_ctrl == 6)
17266 			strcat(str, "100MHz");
17267 	} else {
17268 		strcpy(str, "PCI:");
17269 		if (tg3_flag(tp, PCI_HIGH_SPEED))
17270 			strcat(str, "66MHz");
17271 		else
17272 			strcat(str, "33MHz");
17273 	}
17274 	if (tg3_flag(tp, PCI_32BIT))
17275 		strcat(str, ":32-bit");
17276 	else
17277 		strcat(str, ":64-bit");
17278 	return str;
17279 }
17280 
17281 static void tg3_init_coal(struct tg3 *tp)
17282 {
17283 	struct ethtool_coalesce *ec = &tp->coal;
17284 
17285 	memset(ec, 0, sizeof(*ec));
17286 	ec->cmd = ETHTOOL_GCOALESCE;
17287 	ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
17288 	ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
17289 	ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
17290 	ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
17291 	ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
17292 	ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
17293 	ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
17294 	ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
17295 	ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
17296 
17297 	if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
17298 				 HOSTCC_MODE_CLRTICK_TXBD)) {
17299 		ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
17300 		ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
17301 		ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
17302 		ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
17303 	}
17304 
17305 	if (tg3_flag(tp, 5705_PLUS)) {
17306 		ec->rx_coalesce_usecs_irq = 0;
17307 		ec->tx_coalesce_usecs_irq = 0;
17308 		ec->stats_block_coalesce_usecs = 0;
17309 	}
17310 }
17311 
17312 static int tg3_init_one(struct pci_dev *pdev,
17313 				  const struct pci_device_id *ent)
17314 {
17315 	struct net_device *dev;
17316 	struct tg3 *tp;
17317 	int i, err;
17318 	u32 sndmbx, rcvmbx, intmbx;
17319 	char str[40];
17320 	u64 dma_mask, persist_dma_mask;
17321 	netdev_features_t features = 0;
17322 
17323 	printk_once(KERN_INFO "%s\n", version);
17324 
17325 	err = pci_enable_device(pdev);
17326 	if (err) {
17327 		dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
17328 		return err;
17329 	}
17330 
17331 	err = pci_request_regions(pdev, DRV_MODULE_NAME);
17332 	if (err) {
17333 		dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
17334 		goto err_out_disable_pdev;
17335 	}
17336 
17337 	pci_set_master(pdev);
17338 
17339 	dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS);
17340 	if (!dev) {
17341 		err = -ENOMEM;
17342 		goto err_out_free_res;
17343 	}
17344 
17345 	SET_NETDEV_DEV(dev, &pdev->dev);
17346 
17347 	tp = netdev_priv(dev);
17348 	tp->pdev = pdev;
17349 	tp->dev = dev;
17350 	tp->rx_mode = TG3_DEF_RX_MODE;
17351 	tp->tx_mode = TG3_DEF_TX_MODE;
17352 	tp->irq_sync = 1;
17353 
17354 	if (tg3_debug > 0)
17355 		tp->msg_enable = tg3_debug;
17356 	else
17357 		tp->msg_enable = TG3_DEF_MSG_ENABLE;
17358 
17359 	if (pdev_is_ssb_gige_core(pdev)) {
17360 		tg3_flag_set(tp, IS_SSB_CORE);
17361 		if (ssb_gige_must_flush_posted_writes(pdev))
17362 			tg3_flag_set(tp, FLUSH_POSTED_WRITES);
17363 		if (ssb_gige_one_dma_at_once(pdev))
17364 			tg3_flag_set(tp, ONE_DMA_AT_ONCE);
17365 		if (ssb_gige_have_roboswitch(pdev))
17366 			tg3_flag_set(tp, ROBOSWITCH);
17367 		if (ssb_gige_is_rgmii(pdev))
17368 			tg3_flag_set(tp, RGMII_MODE);
17369 	}
17370 
17371 	/* The word/byte swap controls here control register access byte
17372 	 * swapping.  DMA data byte swapping is controlled in the GRC_MODE
17373 	 * setting below.
17374 	 */
17375 	tp->misc_host_ctrl =
17376 		MISC_HOST_CTRL_MASK_PCI_INT |
17377 		MISC_HOST_CTRL_WORD_SWAP |
17378 		MISC_HOST_CTRL_INDIR_ACCESS |
17379 		MISC_HOST_CTRL_PCISTATE_RW;
17380 
17381 	/* The NONFRM (non-frame) byte/word swap controls take effect
17382 	 * on descriptor entries, anything which isn't packet data.
17383 	 *
17384 	 * The StrongARM chips on the board (one for tx, one for rx)
17385 	 * are running in big-endian mode.
17386 	 */
17387 	tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
17388 			GRC_MODE_WSWAP_NONFRM_DATA);
17389 #ifdef __BIG_ENDIAN
17390 	tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
17391 #endif
17392 	spin_lock_init(&tp->lock);
17393 	spin_lock_init(&tp->indirect_lock);
17394 	INIT_WORK(&tp->reset_task, tg3_reset_task);
17395 
17396 	tp->regs = pci_ioremap_bar(pdev, BAR_0);
17397 	if (!tp->regs) {
17398 		dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
17399 		err = -ENOMEM;
17400 		goto err_out_free_dev;
17401 	}
17402 
17403 	if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
17404 	    tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761E ||
17405 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S ||
17406 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761SE ||
17407 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
17408 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
17409 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
17410 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
17411 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 ||
17412 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 ||
17413 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 ||
17414 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727) {
17415 		tg3_flag_set(tp, ENABLE_APE);
17416 		tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
17417 		if (!tp->aperegs) {
17418 			dev_err(&pdev->dev,
17419 				"Cannot map APE registers, aborting\n");
17420 			err = -ENOMEM;
17421 			goto err_out_iounmap;
17422 		}
17423 	}
17424 
17425 	tp->rx_pending = TG3_DEF_RX_RING_PENDING;
17426 	tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
17427 
17428 	dev->ethtool_ops = &tg3_ethtool_ops;
17429 	dev->watchdog_timeo = TG3_TX_TIMEOUT;
17430 	dev->netdev_ops = &tg3_netdev_ops;
17431 	dev->irq = pdev->irq;
17432 
17433 	err = tg3_get_invariants(tp, ent);
17434 	if (err) {
17435 		dev_err(&pdev->dev,
17436 			"Problem fetching invariants of chip, aborting\n");
17437 		goto err_out_apeunmap;
17438 	}
17439 
17440 	/* The EPB bridge inside 5714, 5715, and 5780 and any
17441 	 * device behind the EPB cannot support DMA addresses > 40-bit.
17442 	 * On 64-bit systems with IOMMU, use 40-bit dma_mask.
17443 	 * On 64-bit systems without IOMMU, use 64-bit dma_mask and
17444 	 * do DMA address check in tg3_start_xmit().
17445 	 */
17446 	if (tg3_flag(tp, IS_5788))
17447 		persist_dma_mask = dma_mask = DMA_BIT_MASK(32);
17448 	else if (tg3_flag(tp, 40BIT_DMA_BUG)) {
17449 		persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
17450 #ifdef CONFIG_HIGHMEM
17451 		dma_mask = DMA_BIT_MASK(64);
17452 #endif
17453 	} else
17454 		persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
17455 
17456 	/* Configure DMA attributes. */
17457 	if (dma_mask > DMA_BIT_MASK(32)) {
17458 		err = pci_set_dma_mask(pdev, dma_mask);
17459 		if (!err) {
17460 			features |= NETIF_F_HIGHDMA;
17461 			err = pci_set_consistent_dma_mask(pdev,
17462 							  persist_dma_mask);
17463 			if (err < 0) {
17464 				dev_err(&pdev->dev, "Unable to obtain 64 bit "
17465 					"DMA for consistent allocations\n");
17466 				goto err_out_apeunmap;
17467 			}
17468 		}
17469 	}
17470 	if (err || dma_mask == DMA_BIT_MASK(32)) {
17471 		err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
17472 		if (err) {
17473 			dev_err(&pdev->dev,
17474 				"No usable DMA configuration, aborting\n");
17475 			goto err_out_apeunmap;
17476 		}
17477 	}
17478 
17479 	tg3_init_bufmgr_config(tp);
17480 
17481 	features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
17482 
17483 	/* 5700 B0 chips do not support checksumming correctly due
17484 	 * to hardware bugs.
17485 	 */
17486 	if (tg3_chip_rev_id(tp) != CHIPREV_ID_5700_B0) {
17487 		features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
17488 
17489 		if (tg3_flag(tp, 5755_PLUS))
17490 			features |= NETIF_F_IPV6_CSUM;
17491 	}
17492 
17493 	/* TSO is on by default on chips that support hardware TSO.
17494 	 * Firmware TSO on older chips gives lower performance, so it
17495 	 * is off by default, but can be enabled using ethtool.
17496 	 */
17497 	if ((tg3_flag(tp, HW_TSO_1) ||
17498 	     tg3_flag(tp, HW_TSO_2) ||
17499 	     tg3_flag(tp, HW_TSO_3)) &&
17500 	    (features & NETIF_F_IP_CSUM))
17501 		features |= NETIF_F_TSO;
17502 	if (tg3_flag(tp, HW_TSO_2) || tg3_flag(tp, HW_TSO_3)) {
17503 		if (features & NETIF_F_IPV6_CSUM)
17504 			features |= NETIF_F_TSO6;
17505 		if (tg3_flag(tp, HW_TSO_3) ||
17506 		    tg3_asic_rev(tp) == ASIC_REV_5761 ||
17507 		    (tg3_asic_rev(tp) == ASIC_REV_5784 &&
17508 		     tg3_chip_rev(tp) != CHIPREV_5784_AX) ||
17509 		    tg3_asic_rev(tp) == ASIC_REV_5785 ||
17510 		    tg3_asic_rev(tp) == ASIC_REV_57780)
17511 			features |= NETIF_F_TSO_ECN;
17512 	}
17513 
17514 	dev->features |= features;
17515 	dev->vlan_features |= features;
17516 
17517 	/*
17518 	 * Add loopback capability only for a subset of devices that support
17519 	 * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY
17520 	 * loopback for the remaining devices.
17521 	 */
17522 	if (tg3_asic_rev(tp) != ASIC_REV_5780 &&
17523 	    !tg3_flag(tp, CPMU_PRESENT))
17524 		/* Add the loopback capability */
17525 		features |= NETIF_F_LOOPBACK;
17526 
17527 	dev->hw_features |= features;
17528 
17529 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 &&
17530 	    !tg3_flag(tp, TSO_CAPABLE) &&
17531 	    !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
17532 		tg3_flag_set(tp, MAX_RXPEND_64);
17533 		tp->rx_pending = 63;
17534 	}
17535 
17536 	err = tg3_get_device_address(tp);
17537 	if (err) {
17538 		dev_err(&pdev->dev,
17539 			"Could not obtain valid ethernet address, aborting\n");
17540 		goto err_out_apeunmap;
17541 	}
17542 
17543 	/*
17544 	 * Reset chip in case UNDI or EFI driver did not shutdown
17545 	 * DMA self test will enable WDMAC and we'll see (spurious)
17546 	 * pending DMA on the PCI bus at that point.
17547 	 */
17548 	if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
17549 	    (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
17550 		tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
17551 		tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
17552 	}
17553 
17554 	err = tg3_test_dma(tp);
17555 	if (err) {
17556 		dev_err(&pdev->dev, "DMA engine test failed, aborting\n");
17557 		goto err_out_apeunmap;
17558 	}
17559 
17560 	intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
17561 	rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
17562 	sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
17563 	for (i = 0; i < tp->irq_max; i++) {
17564 		struct tg3_napi *tnapi = &tp->napi[i];
17565 
17566 		tnapi->tp = tp;
17567 		tnapi->tx_pending = TG3_DEF_TX_RING_PENDING;
17568 
17569 		tnapi->int_mbox = intmbx;
17570 		if (i <= 4)
17571 			intmbx += 0x8;
17572 		else
17573 			intmbx += 0x4;
17574 
17575 		tnapi->consmbox = rcvmbx;
17576 		tnapi->prodmbox = sndmbx;
17577 
17578 		if (i)
17579 			tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1);
17580 		else
17581 			tnapi->coal_now = HOSTCC_MODE_NOW;
17582 
17583 		if (!tg3_flag(tp, SUPPORT_MSIX))
17584 			break;
17585 
17586 		/*
17587 		 * If we support MSIX, we'll be using RSS.  If we're using
17588 		 * RSS, the first vector only handles link interrupts and the
17589 		 * remaining vectors handle rx and tx interrupts.  Reuse the
17590 		 * mailbox values for the next iteration.  The values we setup
17591 		 * above are still useful for the single vectored mode.
17592 		 */
17593 		if (!i)
17594 			continue;
17595 
17596 		rcvmbx += 0x8;
17597 
17598 		if (sndmbx & 0x4)
17599 			sndmbx -= 0x4;
17600 		else
17601 			sndmbx += 0xc;
17602 	}
17603 
17604 	tg3_init_coal(tp);
17605 
17606 	pci_set_drvdata(pdev, dev);
17607 
17608 	if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
17609 	    tg3_asic_rev(tp) == ASIC_REV_5720 ||
17610 	    tg3_asic_rev(tp) == ASIC_REV_5762)
17611 		tg3_flag_set(tp, PTP_CAPABLE);
17612 
17613 	tg3_timer_init(tp);
17614 
17615 	tg3_carrier_off(tp);
17616 
17617 	err = register_netdev(dev);
17618 	if (err) {
17619 		dev_err(&pdev->dev, "Cannot register net device, aborting\n");
17620 		goto err_out_apeunmap;
17621 	}
17622 
17623 	netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
17624 		    tp->board_part_number,
17625 		    tg3_chip_rev_id(tp),
17626 		    tg3_bus_string(tp, str),
17627 		    dev->dev_addr);
17628 
17629 	if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
17630 		struct phy_device *phydev;
17631 		phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
17632 		netdev_info(dev,
17633 			    "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
17634 			    phydev->drv->name, dev_name(&phydev->dev));
17635 	} else {
17636 		char *ethtype;
17637 
17638 		if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
17639 			ethtype = "10/100Base-TX";
17640 		else if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
17641 			ethtype = "1000Base-SX";
17642 		else
17643 			ethtype = "10/100/1000Base-T";
17644 
17645 		netdev_info(dev, "attached PHY is %s (%s Ethernet) "
17646 			    "(WireSpeed[%d], EEE[%d])\n",
17647 			    tg3_phy_string(tp), ethtype,
17648 			    (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) == 0,
17649 			    (tp->phy_flags & TG3_PHYFLG_EEE_CAP) != 0);
17650 	}
17651 
17652 	netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
17653 		    (dev->features & NETIF_F_RXCSUM) != 0,
17654 		    tg3_flag(tp, USE_LINKCHG_REG) != 0,
17655 		    (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) != 0,
17656 		    tg3_flag(tp, ENABLE_ASF) != 0,
17657 		    tg3_flag(tp, TSO_CAPABLE) != 0);
17658 	netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
17659 		    tp->dma_rwctrl,
17660 		    pdev->dma_mask == DMA_BIT_MASK(32) ? 32 :
17661 		    ((u64)pdev->dma_mask) == DMA_BIT_MASK(40) ? 40 : 64);
17662 
17663 	pci_save_state(pdev);
17664 
17665 	return 0;
17666 
17667 err_out_apeunmap:
17668 	if (tp->aperegs) {
17669 		iounmap(tp->aperegs);
17670 		tp->aperegs = NULL;
17671 	}
17672 
17673 err_out_iounmap:
17674 	if (tp->regs) {
17675 		iounmap(tp->regs);
17676 		tp->regs = NULL;
17677 	}
17678 
17679 err_out_free_dev:
17680 	free_netdev(dev);
17681 
17682 err_out_free_res:
17683 	pci_release_regions(pdev);
17684 
17685 err_out_disable_pdev:
17686 	if (pci_is_enabled(pdev))
17687 		pci_disable_device(pdev);
17688 	pci_set_drvdata(pdev, NULL);
17689 	return err;
17690 }
17691 
17692 static void tg3_remove_one(struct pci_dev *pdev)
17693 {
17694 	struct net_device *dev = pci_get_drvdata(pdev);
17695 
17696 	if (dev) {
17697 		struct tg3 *tp = netdev_priv(dev);
17698 
17699 		release_firmware(tp->fw);
17700 
17701 		tg3_reset_task_cancel(tp);
17702 
17703 		if (tg3_flag(tp, USE_PHYLIB)) {
17704 			tg3_phy_fini(tp);
17705 			tg3_mdio_fini(tp);
17706 		}
17707 
17708 		unregister_netdev(dev);
17709 		if (tp->aperegs) {
17710 			iounmap(tp->aperegs);
17711 			tp->aperegs = NULL;
17712 		}
17713 		if (tp->regs) {
17714 			iounmap(tp->regs);
17715 			tp->regs = NULL;
17716 		}
17717 		free_netdev(dev);
17718 		pci_release_regions(pdev);
17719 		pci_disable_device(pdev);
17720 		pci_set_drvdata(pdev, NULL);
17721 	}
17722 }
17723 
17724 #ifdef CONFIG_PM_SLEEP
17725 static int tg3_suspend(struct device *device)
17726 {
17727 	struct pci_dev *pdev = to_pci_dev(device);
17728 	struct net_device *dev = pci_get_drvdata(pdev);
17729 	struct tg3 *tp = netdev_priv(dev);
17730 	int err;
17731 
17732 	if (!netif_running(dev))
17733 		return 0;
17734 
17735 	tg3_reset_task_cancel(tp);
17736 	tg3_phy_stop(tp);
17737 	tg3_netif_stop(tp);
17738 
17739 	tg3_timer_stop(tp);
17740 
17741 	tg3_full_lock(tp, 1);
17742 	tg3_disable_ints(tp);
17743 	tg3_full_unlock(tp);
17744 
17745 	netif_device_detach(dev);
17746 
17747 	tg3_full_lock(tp, 0);
17748 	tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
17749 	tg3_flag_clear(tp, INIT_COMPLETE);
17750 	tg3_full_unlock(tp);
17751 
17752 	err = tg3_power_down_prepare(tp);
17753 	if (err) {
17754 		int err2;
17755 
17756 		tg3_full_lock(tp, 0);
17757 
17758 		tg3_flag_set(tp, INIT_COMPLETE);
17759 		err2 = tg3_restart_hw(tp, true);
17760 		if (err2)
17761 			goto out;
17762 
17763 		tg3_timer_start(tp);
17764 
17765 		netif_device_attach(dev);
17766 		tg3_netif_start(tp);
17767 
17768 out:
17769 		tg3_full_unlock(tp);
17770 
17771 		if (!err2)
17772 			tg3_phy_start(tp);
17773 	}
17774 
17775 	return err;
17776 }
17777 
17778 static int tg3_resume(struct device *device)
17779 {
17780 	struct pci_dev *pdev = to_pci_dev(device);
17781 	struct net_device *dev = pci_get_drvdata(pdev);
17782 	struct tg3 *tp = netdev_priv(dev);
17783 	int err;
17784 
17785 	if (!netif_running(dev))
17786 		return 0;
17787 
17788 	netif_device_attach(dev);
17789 
17790 	tg3_full_lock(tp, 0);
17791 
17792 	tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
17793 
17794 	tg3_flag_set(tp, INIT_COMPLETE);
17795 	err = tg3_restart_hw(tp,
17796 			     !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN));
17797 	if (err)
17798 		goto out;
17799 
17800 	tg3_timer_start(tp);
17801 
17802 	tg3_netif_start(tp);
17803 
17804 out:
17805 	tg3_full_unlock(tp);
17806 
17807 	if (!err)
17808 		tg3_phy_start(tp);
17809 
17810 	return err;
17811 }
17812 #endif /* CONFIG_PM_SLEEP */
17813 
17814 static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume);
17815 
17816 static void tg3_shutdown(struct pci_dev *pdev)
17817 {
17818 	struct net_device *dev = pci_get_drvdata(pdev);
17819 	struct tg3 *tp = netdev_priv(dev);
17820 
17821 	rtnl_lock();
17822 	netif_device_detach(dev);
17823 
17824 	if (netif_running(dev))
17825 		dev_close(dev);
17826 
17827 	if (system_state == SYSTEM_POWER_OFF)
17828 		tg3_power_down(tp);
17829 
17830 	rtnl_unlock();
17831 }
17832 
17833 /**
17834  * tg3_io_error_detected - called when PCI error is detected
17835  * @pdev: Pointer to PCI device
17836  * @state: The current pci connection state
17837  *
17838  * This function is called after a PCI bus error affecting
17839  * this device has been detected.
17840  */
17841 static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
17842 					      pci_channel_state_t state)
17843 {
17844 	struct net_device *netdev = pci_get_drvdata(pdev);
17845 	struct tg3 *tp = netdev_priv(netdev);
17846 	pci_ers_result_t err = PCI_ERS_RESULT_NEED_RESET;
17847 
17848 	netdev_info(netdev, "PCI I/O error detected\n");
17849 
17850 	rtnl_lock();
17851 
17852 	/* We probably don't have netdev yet */
17853 	if (!netdev || !netif_running(netdev))
17854 		goto done;
17855 
17856 	tg3_phy_stop(tp);
17857 
17858 	tg3_netif_stop(tp);
17859 
17860 	tg3_timer_stop(tp);
17861 
17862 	/* Want to make sure that the reset task doesn't run */
17863 	tg3_reset_task_cancel(tp);
17864 
17865 	netif_device_detach(netdev);
17866 
17867 	/* Clean up software state, even if MMIO is blocked */
17868 	tg3_full_lock(tp, 0);
17869 	tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
17870 	tg3_full_unlock(tp);
17871 
17872 done:
17873 	if (state == pci_channel_io_perm_failure) {
17874 		if (netdev) {
17875 			tg3_napi_enable(tp);
17876 			dev_close(netdev);
17877 		}
17878 		err = PCI_ERS_RESULT_DISCONNECT;
17879 	} else {
17880 		pci_disable_device(pdev);
17881 	}
17882 
17883 	rtnl_unlock();
17884 
17885 	return err;
17886 }
17887 
17888 /**
17889  * tg3_io_slot_reset - called after the pci bus has been reset.
17890  * @pdev: Pointer to PCI device
17891  *
17892  * Restart the card from scratch, as if from a cold-boot.
17893  * At this point, the card has exprienced a hard reset,
17894  * followed by fixups by BIOS, and has its config space
17895  * set up identically to what it was at cold boot.
17896  */
17897 static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev)
17898 {
17899 	struct net_device *netdev = pci_get_drvdata(pdev);
17900 	struct tg3 *tp = netdev_priv(netdev);
17901 	pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
17902 	int err;
17903 
17904 	rtnl_lock();
17905 
17906 	if (pci_enable_device(pdev)) {
17907 		dev_err(&pdev->dev,
17908 			"Cannot re-enable PCI device after reset.\n");
17909 		goto done;
17910 	}
17911 
17912 	pci_set_master(pdev);
17913 	pci_restore_state(pdev);
17914 	pci_save_state(pdev);
17915 
17916 	if (!netdev || !netif_running(netdev)) {
17917 		rc = PCI_ERS_RESULT_RECOVERED;
17918 		goto done;
17919 	}
17920 
17921 	err = tg3_power_up(tp);
17922 	if (err)
17923 		goto done;
17924 
17925 	rc = PCI_ERS_RESULT_RECOVERED;
17926 
17927 done:
17928 	if (rc != PCI_ERS_RESULT_RECOVERED && netdev && netif_running(netdev)) {
17929 		tg3_napi_enable(tp);
17930 		dev_close(netdev);
17931 	}
17932 	rtnl_unlock();
17933 
17934 	return rc;
17935 }
17936 
17937 /**
17938  * tg3_io_resume - called when traffic can start flowing again.
17939  * @pdev: Pointer to PCI device
17940  *
17941  * This callback is called when the error recovery driver tells
17942  * us that its OK to resume normal operation.
17943  */
17944 static void tg3_io_resume(struct pci_dev *pdev)
17945 {
17946 	struct net_device *netdev = pci_get_drvdata(pdev);
17947 	struct tg3 *tp = netdev_priv(netdev);
17948 	int err;
17949 
17950 	rtnl_lock();
17951 
17952 	if (!netif_running(netdev))
17953 		goto done;
17954 
17955 	tg3_full_lock(tp, 0);
17956 	tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
17957 	tg3_flag_set(tp, INIT_COMPLETE);
17958 	err = tg3_restart_hw(tp, true);
17959 	if (err) {
17960 		tg3_full_unlock(tp);
17961 		netdev_err(netdev, "Cannot restart hardware after reset.\n");
17962 		goto done;
17963 	}
17964 
17965 	netif_device_attach(netdev);
17966 
17967 	tg3_timer_start(tp);
17968 
17969 	tg3_netif_start(tp);
17970 
17971 	tg3_full_unlock(tp);
17972 
17973 	tg3_phy_start(tp);
17974 
17975 done:
17976 	rtnl_unlock();
17977 }
17978 
17979 static const struct pci_error_handlers tg3_err_handler = {
17980 	.error_detected	= tg3_io_error_detected,
17981 	.slot_reset	= tg3_io_slot_reset,
17982 	.resume		= tg3_io_resume
17983 };
17984 
17985 static struct pci_driver tg3_driver = {
17986 	.name		= DRV_MODULE_NAME,
17987 	.id_table	= tg3_pci_tbl,
17988 	.probe		= tg3_init_one,
17989 	.remove		= tg3_remove_one,
17990 	.err_handler	= &tg3_err_handler,
17991 	.driver.pm	= &tg3_pm_ops,
17992 	.shutdown	= tg3_shutdown,
17993 };
17994 
17995 module_pci_driver(tg3_driver);
17996