1 /*
2  * tg3.c: Broadcom Tigon3 ethernet driver.
3  *
4  * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5  * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6  * Copyright (C) 2004 Sun Microsystems Inc.
7  * Copyright (C) 2005-2013 Broadcom Corporation.
8  *
9  * Firmware is:
10  *	Derived from proprietary unpublished source code,
11  *	Copyright (C) 2000-2003 Broadcom Corporation.
12  *
13  *	Permission is hereby granted for the distribution of this firmware
14  *	data in hexadecimal or equivalent format, provided this copyright
15  *	notice is accompanying it.
16  */
17 
18 
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/stringify.h>
22 #include <linux/kernel.h>
23 #include <linux/types.h>
24 #include <linux/compiler.h>
25 #include <linux/slab.h>
26 #include <linux/delay.h>
27 #include <linux/in.h>
28 #include <linux/init.h>
29 #include <linux/interrupt.h>
30 #include <linux/ioport.h>
31 #include <linux/pci.h>
32 #include <linux/netdevice.h>
33 #include <linux/etherdevice.h>
34 #include <linux/skbuff.h>
35 #include <linux/ethtool.h>
36 #include <linux/mdio.h>
37 #include <linux/mii.h>
38 #include <linux/phy.h>
39 #include <linux/brcmphy.h>
40 #include <linux/if_vlan.h>
41 #include <linux/ip.h>
42 #include <linux/tcp.h>
43 #include <linux/workqueue.h>
44 #include <linux/prefetch.h>
45 #include <linux/dma-mapping.h>
46 #include <linux/firmware.h>
47 #include <linux/ssb/ssb_driver_gige.h>
48 #include <linux/hwmon.h>
49 #include <linux/hwmon-sysfs.h>
50 
51 #include <net/checksum.h>
52 #include <net/ip.h>
53 
54 #include <linux/io.h>
55 #include <asm/byteorder.h>
56 #include <linux/uaccess.h>
57 
58 #include <uapi/linux/net_tstamp.h>
59 #include <linux/ptp_clock_kernel.h>
60 
61 #ifdef CONFIG_SPARC
62 #include <asm/idprom.h>
63 #include <asm/prom.h>
64 #endif
65 
66 #define BAR_0	0
67 #define BAR_2	2
68 
69 #include "tg3.h"
70 
71 /* Functions & macros to verify TG3_FLAGS types */
72 
73 static inline int _tg3_flag(enum TG3_FLAGS flag, unsigned long *bits)
74 {
75 	return test_bit(flag, bits);
76 }
77 
78 static inline void _tg3_flag_set(enum TG3_FLAGS flag, unsigned long *bits)
79 {
80 	set_bit(flag, bits);
81 }
82 
83 static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
84 {
85 	clear_bit(flag, bits);
86 }
87 
88 #define tg3_flag(tp, flag)				\
89 	_tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags)
90 #define tg3_flag_set(tp, flag)				\
91 	_tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags)
92 #define tg3_flag_clear(tp, flag)			\
93 	_tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags)
94 
95 #define DRV_MODULE_NAME		"tg3"
96 #define TG3_MAJ_NUM			3
97 #define TG3_MIN_NUM			132
98 #define DRV_MODULE_VERSION	\
99 	__stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
100 #define DRV_MODULE_RELDATE	"May 21, 2013"
101 
102 #define RESET_KIND_SHUTDOWN	0
103 #define RESET_KIND_INIT		1
104 #define RESET_KIND_SUSPEND	2
105 
106 #define TG3_DEF_RX_MODE		0
107 #define TG3_DEF_TX_MODE		0
108 #define TG3_DEF_MSG_ENABLE	  \
109 	(NETIF_MSG_DRV		| \
110 	 NETIF_MSG_PROBE	| \
111 	 NETIF_MSG_LINK		| \
112 	 NETIF_MSG_TIMER	| \
113 	 NETIF_MSG_IFDOWN	| \
114 	 NETIF_MSG_IFUP		| \
115 	 NETIF_MSG_RX_ERR	| \
116 	 NETIF_MSG_TX_ERR)
117 
118 #define TG3_GRC_LCLCTL_PWRSW_DELAY	100
119 
120 /* length of time before we decide the hardware is borked,
121  * and dev->tx_timeout() should be called to fix the problem
122  */
123 
124 #define TG3_TX_TIMEOUT			(5 * HZ)
125 
126 /* hardware minimum and maximum for a single frame's data payload */
127 #define TG3_MIN_MTU			60
128 #define TG3_MAX_MTU(tp)	\
129 	(tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500)
130 
131 /* These numbers seem to be hard coded in the NIC firmware somehow.
132  * You can't change the ring sizes, but you can change where you place
133  * them in the NIC onboard memory.
134  */
135 #define TG3_RX_STD_RING_SIZE(tp) \
136 	(tg3_flag(tp, LRG_PROD_RING_CAP) ? \
137 	 TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700)
138 #define TG3_DEF_RX_RING_PENDING		200
139 #define TG3_RX_JMB_RING_SIZE(tp) \
140 	(tg3_flag(tp, LRG_PROD_RING_CAP) ? \
141 	 TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
142 #define TG3_DEF_RX_JUMBO_RING_PENDING	100
143 
144 /* Do not place this n-ring entries value into the tp struct itself,
145  * we really want to expose these constants to GCC so that modulo et
146  * al.  operations are done with shifts and masks instead of with
147  * hw multiply/modulo instructions.  Another solution would be to
148  * replace things like '% foo' with '& (foo - 1)'.
149  */
150 
151 #define TG3_TX_RING_SIZE		512
152 #define TG3_DEF_TX_RING_PENDING		(TG3_TX_RING_SIZE - 1)
153 
154 #define TG3_RX_STD_RING_BYTES(tp) \
155 	(sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
156 #define TG3_RX_JMB_RING_BYTES(tp) \
157 	(sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
158 #define TG3_RX_RCB_RING_BYTES(tp) \
159 	(sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1))
160 #define TG3_TX_RING_BYTES	(sizeof(struct tg3_tx_buffer_desc) * \
161 				 TG3_TX_RING_SIZE)
162 #define NEXT_TX(N)		(((N) + 1) & (TG3_TX_RING_SIZE - 1))
163 
164 #define TG3_DMA_BYTE_ENAB		64
165 
166 #define TG3_RX_STD_DMA_SZ		1536
167 #define TG3_RX_JMB_DMA_SZ		9046
168 
169 #define TG3_RX_DMA_TO_MAP_SZ(x)		((x) + TG3_DMA_BYTE_ENAB)
170 
171 #define TG3_RX_STD_MAP_SZ		TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
172 #define TG3_RX_JMB_MAP_SZ		TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
173 
174 #define TG3_RX_STD_BUFF_RING_SIZE(tp) \
175 	(sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
176 
177 #define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
178 	(sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
179 
180 /* Due to a hardware bug, the 5701 can only DMA to memory addresses
181  * that are at least dword aligned when used in PCIX mode.  The driver
182  * works around this bug by double copying the packet.  This workaround
183  * is built into the normal double copy length check for efficiency.
184  *
185  * However, the double copy is only necessary on those architectures
186  * where unaligned memory accesses are inefficient.  For those architectures
187  * where unaligned memory accesses incur little penalty, we can reintegrate
188  * the 5701 in the normal rx path.  Doing so saves a device structure
189  * dereference by hardcoding the double copy threshold in place.
190  */
191 #define TG3_RX_COPY_THRESHOLD		256
192 #if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
193 	#define TG3_RX_COPY_THRESH(tp)	TG3_RX_COPY_THRESHOLD
194 #else
195 	#define TG3_RX_COPY_THRESH(tp)	((tp)->rx_copy_thresh)
196 #endif
197 
198 #if (NET_IP_ALIGN != 0)
199 #define TG3_RX_OFFSET(tp)	((tp)->rx_offset)
200 #else
201 #define TG3_RX_OFFSET(tp)	(NET_SKB_PAD)
202 #endif
203 
204 /* minimum number of free TX descriptors required to wake up TX process */
205 #define TG3_TX_WAKEUP_THRESH(tnapi)		((tnapi)->tx_pending / 4)
206 #define TG3_TX_BD_DMA_MAX_2K		2048
207 #define TG3_TX_BD_DMA_MAX_4K		4096
208 
209 #define TG3_RAW_IP_ALIGN 2
210 
211 #define TG3_FW_UPDATE_TIMEOUT_SEC	5
212 #define TG3_FW_UPDATE_FREQ_SEC		(TG3_FW_UPDATE_TIMEOUT_SEC / 2)
213 
214 #define FIRMWARE_TG3		"tigon/tg3.bin"
215 #define FIRMWARE_TG357766	"tigon/tg357766.bin"
216 #define FIRMWARE_TG3TSO		"tigon/tg3_tso.bin"
217 #define FIRMWARE_TG3TSO5	"tigon/tg3_tso5.bin"
218 
219 static char version[] =
220 	DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")";
221 
222 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
223 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
224 MODULE_LICENSE("GPL");
225 MODULE_VERSION(DRV_MODULE_VERSION);
226 MODULE_FIRMWARE(FIRMWARE_TG3);
227 MODULE_FIRMWARE(FIRMWARE_TG3TSO);
228 MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
229 
230 static int tg3_debug = -1;	/* -1 == use TG3_DEF_MSG_ENABLE as value */
231 module_param(tg3_debug, int, 0);
232 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
233 
234 #define TG3_DRV_DATA_FLAG_10_100_ONLY	0x0001
235 #define TG3_DRV_DATA_FLAG_5705_10_100	0x0002
236 
237 static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl) = {
238 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
239 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
240 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
241 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
242 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
243 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
244 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
245 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
246 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
247 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
248 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
249 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
250 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
251 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
252 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
253 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
254 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
255 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
256 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901),
257 	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
258 			TG3_DRV_DATA_FLAG_5705_10_100},
259 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2),
260 	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
261 			TG3_DRV_DATA_FLAG_5705_10_100},
262 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
263 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F),
264 	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
265 			TG3_DRV_DATA_FLAG_5705_10_100},
266 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
267 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
268 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750)},
269 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
270 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
271 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F),
272 	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
273 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
274 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
275 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
276 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
277 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F),
278 	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
279 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
280 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
281 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
282 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
283 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
284 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
285 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
286 	{PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5787M,
287 			PCI_VENDOR_ID_LENOVO,
288 			TG3PCI_SUBDEVICE_ID_LENOVO_5787M),
289 	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
290 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
291 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F),
292 	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
293 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
294 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
295 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
296 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
297 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
298 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
299 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
300 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
301 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
302 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
303 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
304 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
305 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
306 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
307 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
308 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
309 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)},
310 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)},
311 	{PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
312 			PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_A),
313 	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
314 	{PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
315 			PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_B),
316 	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
317 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
318 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
319 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790),
320 	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
321 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
322 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
323 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717_C)},
324 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
325 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)},
326 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)},
327 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)},
328 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)},
329 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791),
330 	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
331 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795),
332 	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
333 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)},
334 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)},
335 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57762)},
336 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57766)},
337 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5762)},
338 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5725)},
339 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5727)},
340 	{PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
341 	{PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
342 	{PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
343 	{PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
344 	{PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
345 	{PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
346 	{PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
347 	{PCI_DEVICE(0x10cf, 0x11a2)}, /* Fujitsu 1000base-SX with BCM5703SKHB */
348 	{}
349 };
350 
351 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
352 
353 static const struct {
354 	const char string[ETH_GSTRING_LEN];
355 } ethtool_stats_keys[] = {
356 	{ "rx_octets" },
357 	{ "rx_fragments" },
358 	{ "rx_ucast_packets" },
359 	{ "rx_mcast_packets" },
360 	{ "rx_bcast_packets" },
361 	{ "rx_fcs_errors" },
362 	{ "rx_align_errors" },
363 	{ "rx_xon_pause_rcvd" },
364 	{ "rx_xoff_pause_rcvd" },
365 	{ "rx_mac_ctrl_rcvd" },
366 	{ "rx_xoff_entered" },
367 	{ "rx_frame_too_long_errors" },
368 	{ "rx_jabbers" },
369 	{ "rx_undersize_packets" },
370 	{ "rx_in_length_errors" },
371 	{ "rx_out_length_errors" },
372 	{ "rx_64_or_less_octet_packets" },
373 	{ "rx_65_to_127_octet_packets" },
374 	{ "rx_128_to_255_octet_packets" },
375 	{ "rx_256_to_511_octet_packets" },
376 	{ "rx_512_to_1023_octet_packets" },
377 	{ "rx_1024_to_1522_octet_packets" },
378 	{ "rx_1523_to_2047_octet_packets" },
379 	{ "rx_2048_to_4095_octet_packets" },
380 	{ "rx_4096_to_8191_octet_packets" },
381 	{ "rx_8192_to_9022_octet_packets" },
382 
383 	{ "tx_octets" },
384 	{ "tx_collisions" },
385 
386 	{ "tx_xon_sent" },
387 	{ "tx_xoff_sent" },
388 	{ "tx_flow_control" },
389 	{ "tx_mac_errors" },
390 	{ "tx_single_collisions" },
391 	{ "tx_mult_collisions" },
392 	{ "tx_deferred" },
393 	{ "tx_excessive_collisions" },
394 	{ "tx_late_collisions" },
395 	{ "tx_collide_2times" },
396 	{ "tx_collide_3times" },
397 	{ "tx_collide_4times" },
398 	{ "tx_collide_5times" },
399 	{ "tx_collide_6times" },
400 	{ "tx_collide_7times" },
401 	{ "tx_collide_8times" },
402 	{ "tx_collide_9times" },
403 	{ "tx_collide_10times" },
404 	{ "tx_collide_11times" },
405 	{ "tx_collide_12times" },
406 	{ "tx_collide_13times" },
407 	{ "tx_collide_14times" },
408 	{ "tx_collide_15times" },
409 	{ "tx_ucast_packets" },
410 	{ "tx_mcast_packets" },
411 	{ "tx_bcast_packets" },
412 	{ "tx_carrier_sense_errors" },
413 	{ "tx_discards" },
414 	{ "tx_errors" },
415 
416 	{ "dma_writeq_full" },
417 	{ "dma_write_prioq_full" },
418 	{ "rxbds_empty" },
419 	{ "rx_discards" },
420 	{ "rx_errors" },
421 	{ "rx_threshold_hit" },
422 
423 	{ "dma_readq_full" },
424 	{ "dma_read_prioq_full" },
425 	{ "tx_comp_queue_full" },
426 
427 	{ "ring_set_send_prod_index" },
428 	{ "ring_status_update" },
429 	{ "nic_irqs" },
430 	{ "nic_avoided_irqs" },
431 	{ "nic_tx_threshold_hit" },
432 
433 	{ "mbuf_lwm_thresh_hit" },
434 };
435 
436 #define TG3_NUM_STATS	ARRAY_SIZE(ethtool_stats_keys)
437 #define TG3_NVRAM_TEST		0
438 #define TG3_LINK_TEST		1
439 #define TG3_REGISTER_TEST	2
440 #define TG3_MEMORY_TEST		3
441 #define TG3_MAC_LOOPB_TEST	4
442 #define TG3_PHY_LOOPB_TEST	5
443 #define TG3_EXT_LOOPB_TEST	6
444 #define TG3_INTERRUPT_TEST	7
445 
446 
447 static const struct {
448 	const char string[ETH_GSTRING_LEN];
449 } ethtool_test_keys[] = {
450 	[TG3_NVRAM_TEST]	= { "nvram test        (online) " },
451 	[TG3_LINK_TEST]		= { "link test         (online) " },
452 	[TG3_REGISTER_TEST]	= { "register test     (offline)" },
453 	[TG3_MEMORY_TEST]	= { "memory test       (offline)" },
454 	[TG3_MAC_LOOPB_TEST]	= { "mac loopback test (offline)" },
455 	[TG3_PHY_LOOPB_TEST]	= { "phy loopback test (offline)" },
456 	[TG3_EXT_LOOPB_TEST]	= { "ext loopback test (offline)" },
457 	[TG3_INTERRUPT_TEST]	= { "interrupt test    (offline)" },
458 };
459 
460 #define TG3_NUM_TEST	ARRAY_SIZE(ethtool_test_keys)
461 
462 
463 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
464 {
465 	writel(val, tp->regs + off);
466 }
467 
468 static u32 tg3_read32(struct tg3 *tp, u32 off)
469 {
470 	return readl(tp->regs + off);
471 }
472 
473 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
474 {
475 	writel(val, tp->aperegs + off);
476 }
477 
478 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
479 {
480 	return readl(tp->aperegs + off);
481 }
482 
483 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
484 {
485 	unsigned long flags;
486 
487 	spin_lock_irqsave(&tp->indirect_lock, flags);
488 	pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
489 	pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
490 	spin_unlock_irqrestore(&tp->indirect_lock, flags);
491 }
492 
493 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
494 {
495 	writel(val, tp->regs + off);
496 	readl(tp->regs + off);
497 }
498 
499 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
500 {
501 	unsigned long flags;
502 	u32 val;
503 
504 	spin_lock_irqsave(&tp->indirect_lock, flags);
505 	pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
506 	pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
507 	spin_unlock_irqrestore(&tp->indirect_lock, flags);
508 	return val;
509 }
510 
511 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
512 {
513 	unsigned long flags;
514 
515 	if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
516 		pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
517 				       TG3_64BIT_REG_LOW, val);
518 		return;
519 	}
520 	if (off == TG3_RX_STD_PROD_IDX_REG) {
521 		pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
522 				       TG3_64BIT_REG_LOW, val);
523 		return;
524 	}
525 
526 	spin_lock_irqsave(&tp->indirect_lock, flags);
527 	pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
528 	pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
529 	spin_unlock_irqrestore(&tp->indirect_lock, flags);
530 
531 	/* In indirect mode when disabling interrupts, we also need
532 	 * to clear the interrupt bit in the GRC local ctrl register.
533 	 */
534 	if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
535 	    (val == 0x1)) {
536 		pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
537 				       tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
538 	}
539 }
540 
541 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
542 {
543 	unsigned long flags;
544 	u32 val;
545 
546 	spin_lock_irqsave(&tp->indirect_lock, flags);
547 	pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
548 	pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
549 	spin_unlock_irqrestore(&tp->indirect_lock, flags);
550 	return val;
551 }
552 
553 /* usec_wait specifies the wait time in usec when writing to certain registers
554  * where it is unsafe to read back the register without some delay.
555  * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
556  * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
557  */
558 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
559 {
560 	if (tg3_flag(tp, PCIX_TARGET_HWBUG) || tg3_flag(tp, ICH_WORKAROUND))
561 		/* Non-posted methods */
562 		tp->write32(tp, off, val);
563 	else {
564 		/* Posted method */
565 		tg3_write32(tp, off, val);
566 		if (usec_wait)
567 			udelay(usec_wait);
568 		tp->read32(tp, off);
569 	}
570 	/* Wait again after the read for the posted method to guarantee that
571 	 * the wait time is met.
572 	 */
573 	if (usec_wait)
574 		udelay(usec_wait);
575 }
576 
577 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
578 {
579 	tp->write32_mbox(tp, off, val);
580 	if (tg3_flag(tp, FLUSH_POSTED_WRITES) ||
581 	    (!tg3_flag(tp, MBOX_WRITE_REORDER) &&
582 	     !tg3_flag(tp, ICH_WORKAROUND)))
583 		tp->read32_mbox(tp, off);
584 }
585 
586 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
587 {
588 	void __iomem *mbox = tp->regs + off;
589 	writel(val, mbox);
590 	if (tg3_flag(tp, TXD_MBOX_HWBUG))
591 		writel(val, mbox);
592 	if (tg3_flag(tp, MBOX_WRITE_REORDER) ||
593 	    tg3_flag(tp, FLUSH_POSTED_WRITES))
594 		readl(mbox);
595 }
596 
597 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
598 {
599 	return readl(tp->regs + off + GRCMBOX_BASE);
600 }
601 
602 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
603 {
604 	writel(val, tp->regs + off + GRCMBOX_BASE);
605 }
606 
607 #define tw32_mailbox(reg, val)		tp->write32_mbox(tp, reg, val)
608 #define tw32_mailbox_f(reg, val)	tw32_mailbox_flush(tp, (reg), (val))
609 #define tw32_rx_mbox(reg, val)		tp->write32_rx_mbox(tp, reg, val)
610 #define tw32_tx_mbox(reg, val)		tp->write32_tx_mbox(tp, reg, val)
611 #define tr32_mailbox(reg)		tp->read32_mbox(tp, reg)
612 
613 #define tw32(reg, val)			tp->write32(tp, reg, val)
614 #define tw32_f(reg, val)		_tw32_flush(tp, (reg), (val), 0)
615 #define tw32_wait_f(reg, val, us)	_tw32_flush(tp, (reg), (val), (us))
616 #define tr32(reg)			tp->read32(tp, reg)
617 
618 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
619 {
620 	unsigned long flags;
621 
622 	if (tg3_asic_rev(tp) == ASIC_REV_5906 &&
623 	    (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
624 		return;
625 
626 	spin_lock_irqsave(&tp->indirect_lock, flags);
627 	if (tg3_flag(tp, SRAM_USE_CONFIG)) {
628 		pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
629 		pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
630 
631 		/* Always leave this as zero. */
632 		pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
633 	} else {
634 		tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
635 		tw32_f(TG3PCI_MEM_WIN_DATA, val);
636 
637 		/* Always leave this as zero. */
638 		tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
639 	}
640 	spin_unlock_irqrestore(&tp->indirect_lock, flags);
641 }
642 
643 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
644 {
645 	unsigned long flags;
646 
647 	if (tg3_asic_rev(tp) == ASIC_REV_5906 &&
648 	    (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
649 		*val = 0;
650 		return;
651 	}
652 
653 	spin_lock_irqsave(&tp->indirect_lock, flags);
654 	if (tg3_flag(tp, SRAM_USE_CONFIG)) {
655 		pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
656 		pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
657 
658 		/* Always leave this as zero. */
659 		pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
660 	} else {
661 		tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
662 		*val = tr32(TG3PCI_MEM_WIN_DATA);
663 
664 		/* Always leave this as zero. */
665 		tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
666 	}
667 	spin_unlock_irqrestore(&tp->indirect_lock, flags);
668 }
669 
670 static void tg3_ape_lock_init(struct tg3 *tp)
671 {
672 	int i;
673 	u32 regbase, bit;
674 
675 	if (tg3_asic_rev(tp) == ASIC_REV_5761)
676 		regbase = TG3_APE_LOCK_GRANT;
677 	else
678 		regbase = TG3_APE_PER_LOCK_GRANT;
679 
680 	/* Make sure the driver hasn't any stale locks. */
681 	for (i = TG3_APE_LOCK_PHY0; i <= TG3_APE_LOCK_GPIO; i++) {
682 		switch (i) {
683 		case TG3_APE_LOCK_PHY0:
684 		case TG3_APE_LOCK_PHY1:
685 		case TG3_APE_LOCK_PHY2:
686 		case TG3_APE_LOCK_PHY3:
687 			bit = APE_LOCK_GRANT_DRIVER;
688 			break;
689 		default:
690 			if (!tp->pci_fn)
691 				bit = APE_LOCK_GRANT_DRIVER;
692 			else
693 				bit = 1 << tp->pci_fn;
694 		}
695 		tg3_ape_write32(tp, regbase + 4 * i, bit);
696 	}
697 
698 }
699 
700 static int tg3_ape_lock(struct tg3 *tp, int locknum)
701 {
702 	int i, off;
703 	int ret = 0;
704 	u32 status, req, gnt, bit;
705 
706 	if (!tg3_flag(tp, ENABLE_APE))
707 		return 0;
708 
709 	switch (locknum) {
710 	case TG3_APE_LOCK_GPIO:
711 		if (tg3_asic_rev(tp) == ASIC_REV_5761)
712 			return 0;
713 	case TG3_APE_LOCK_GRC:
714 	case TG3_APE_LOCK_MEM:
715 		if (!tp->pci_fn)
716 			bit = APE_LOCK_REQ_DRIVER;
717 		else
718 			bit = 1 << tp->pci_fn;
719 		break;
720 	case TG3_APE_LOCK_PHY0:
721 	case TG3_APE_LOCK_PHY1:
722 	case TG3_APE_LOCK_PHY2:
723 	case TG3_APE_LOCK_PHY3:
724 		bit = APE_LOCK_REQ_DRIVER;
725 		break;
726 	default:
727 		return -EINVAL;
728 	}
729 
730 	if (tg3_asic_rev(tp) == ASIC_REV_5761) {
731 		req = TG3_APE_LOCK_REQ;
732 		gnt = TG3_APE_LOCK_GRANT;
733 	} else {
734 		req = TG3_APE_PER_LOCK_REQ;
735 		gnt = TG3_APE_PER_LOCK_GRANT;
736 	}
737 
738 	off = 4 * locknum;
739 
740 	tg3_ape_write32(tp, req + off, bit);
741 
742 	/* Wait for up to 1 millisecond to acquire lock. */
743 	for (i = 0; i < 100; i++) {
744 		status = tg3_ape_read32(tp, gnt + off);
745 		if (status == bit)
746 			break;
747 		udelay(10);
748 	}
749 
750 	if (status != bit) {
751 		/* Revoke the lock request. */
752 		tg3_ape_write32(tp, gnt + off, bit);
753 		ret = -EBUSY;
754 	}
755 
756 	return ret;
757 }
758 
759 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
760 {
761 	u32 gnt, bit;
762 
763 	if (!tg3_flag(tp, ENABLE_APE))
764 		return;
765 
766 	switch (locknum) {
767 	case TG3_APE_LOCK_GPIO:
768 		if (tg3_asic_rev(tp) == ASIC_REV_5761)
769 			return;
770 	case TG3_APE_LOCK_GRC:
771 	case TG3_APE_LOCK_MEM:
772 		if (!tp->pci_fn)
773 			bit = APE_LOCK_GRANT_DRIVER;
774 		else
775 			bit = 1 << tp->pci_fn;
776 		break;
777 	case TG3_APE_LOCK_PHY0:
778 	case TG3_APE_LOCK_PHY1:
779 	case TG3_APE_LOCK_PHY2:
780 	case TG3_APE_LOCK_PHY3:
781 		bit = APE_LOCK_GRANT_DRIVER;
782 		break;
783 	default:
784 		return;
785 	}
786 
787 	if (tg3_asic_rev(tp) == ASIC_REV_5761)
788 		gnt = TG3_APE_LOCK_GRANT;
789 	else
790 		gnt = TG3_APE_PER_LOCK_GRANT;
791 
792 	tg3_ape_write32(tp, gnt + 4 * locknum, bit);
793 }
794 
795 static int tg3_ape_event_lock(struct tg3 *tp, u32 timeout_us)
796 {
797 	u32 apedata;
798 
799 	while (timeout_us) {
800 		if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
801 			return -EBUSY;
802 
803 		apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
804 		if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
805 			break;
806 
807 		tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
808 
809 		udelay(10);
810 		timeout_us -= (timeout_us > 10) ? 10 : timeout_us;
811 	}
812 
813 	return timeout_us ? 0 : -EBUSY;
814 }
815 
816 static int tg3_ape_wait_for_event(struct tg3 *tp, u32 timeout_us)
817 {
818 	u32 i, apedata;
819 
820 	for (i = 0; i < timeout_us / 10; i++) {
821 		apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
822 
823 		if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
824 			break;
825 
826 		udelay(10);
827 	}
828 
829 	return i == timeout_us / 10;
830 }
831 
832 static int tg3_ape_scratchpad_read(struct tg3 *tp, u32 *data, u32 base_off,
833 				   u32 len)
834 {
835 	int err;
836 	u32 i, bufoff, msgoff, maxlen, apedata;
837 
838 	if (!tg3_flag(tp, APE_HAS_NCSI))
839 		return 0;
840 
841 	apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
842 	if (apedata != APE_SEG_SIG_MAGIC)
843 		return -ENODEV;
844 
845 	apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
846 	if (!(apedata & APE_FW_STATUS_READY))
847 		return -EAGAIN;
848 
849 	bufoff = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_OFF) +
850 		 TG3_APE_SHMEM_BASE;
851 	msgoff = bufoff + 2 * sizeof(u32);
852 	maxlen = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_LEN);
853 
854 	while (len) {
855 		u32 length;
856 
857 		/* Cap xfer sizes to scratchpad limits. */
858 		length = (len > maxlen) ? maxlen : len;
859 		len -= length;
860 
861 		apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
862 		if (!(apedata & APE_FW_STATUS_READY))
863 			return -EAGAIN;
864 
865 		/* Wait for up to 1 msec for APE to service previous event. */
866 		err = tg3_ape_event_lock(tp, 1000);
867 		if (err)
868 			return err;
869 
870 		apedata = APE_EVENT_STATUS_DRIVER_EVNT |
871 			  APE_EVENT_STATUS_SCRTCHPD_READ |
872 			  APE_EVENT_STATUS_EVENT_PENDING;
873 		tg3_ape_write32(tp, TG3_APE_EVENT_STATUS, apedata);
874 
875 		tg3_ape_write32(tp, bufoff, base_off);
876 		tg3_ape_write32(tp, bufoff + sizeof(u32), length);
877 
878 		tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
879 		tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
880 
881 		base_off += length;
882 
883 		if (tg3_ape_wait_for_event(tp, 30000))
884 			return -EAGAIN;
885 
886 		for (i = 0; length; i += 4, length -= 4) {
887 			u32 val = tg3_ape_read32(tp, msgoff + i);
888 			memcpy(data, &val, sizeof(u32));
889 			data++;
890 		}
891 	}
892 
893 	return 0;
894 }
895 
896 static int tg3_ape_send_event(struct tg3 *tp, u32 event)
897 {
898 	int err;
899 	u32 apedata;
900 
901 	apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
902 	if (apedata != APE_SEG_SIG_MAGIC)
903 		return -EAGAIN;
904 
905 	apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
906 	if (!(apedata & APE_FW_STATUS_READY))
907 		return -EAGAIN;
908 
909 	/* Wait for up to 1 millisecond for APE to service previous event. */
910 	err = tg3_ape_event_lock(tp, 1000);
911 	if (err)
912 		return err;
913 
914 	tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
915 			event | APE_EVENT_STATUS_EVENT_PENDING);
916 
917 	tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
918 	tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
919 
920 	return 0;
921 }
922 
923 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
924 {
925 	u32 event;
926 	u32 apedata;
927 
928 	if (!tg3_flag(tp, ENABLE_APE))
929 		return;
930 
931 	switch (kind) {
932 	case RESET_KIND_INIT:
933 		tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
934 				APE_HOST_SEG_SIG_MAGIC);
935 		tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
936 				APE_HOST_SEG_LEN_MAGIC);
937 		apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
938 		tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
939 		tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
940 			APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, TG3_MIN_NUM));
941 		tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
942 				APE_HOST_BEHAV_NO_PHYLOCK);
943 		tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE,
944 				    TG3_APE_HOST_DRVR_STATE_START);
945 
946 		event = APE_EVENT_STATUS_STATE_START;
947 		break;
948 	case RESET_KIND_SHUTDOWN:
949 		/* With the interface we are currently using,
950 		 * APE does not track driver state.  Wiping
951 		 * out the HOST SEGMENT SIGNATURE forces
952 		 * the APE to assume OS absent status.
953 		 */
954 		tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0);
955 
956 		if (device_may_wakeup(&tp->pdev->dev) &&
957 		    tg3_flag(tp, WOL_ENABLE)) {
958 			tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED,
959 					    TG3_APE_HOST_WOL_SPEED_AUTO);
960 			apedata = TG3_APE_HOST_DRVR_STATE_WOL;
961 		} else
962 			apedata = TG3_APE_HOST_DRVR_STATE_UNLOAD;
963 
964 		tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, apedata);
965 
966 		event = APE_EVENT_STATUS_STATE_UNLOAD;
967 		break;
968 	case RESET_KIND_SUSPEND:
969 		event = APE_EVENT_STATUS_STATE_SUSPEND;
970 		break;
971 	default:
972 		return;
973 	}
974 
975 	event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
976 
977 	tg3_ape_send_event(tp, event);
978 }
979 
980 static void tg3_disable_ints(struct tg3 *tp)
981 {
982 	int i;
983 
984 	tw32(TG3PCI_MISC_HOST_CTRL,
985 	     (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
986 	for (i = 0; i < tp->irq_max; i++)
987 		tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001);
988 }
989 
990 static void tg3_enable_ints(struct tg3 *tp)
991 {
992 	int i;
993 
994 	tp->irq_sync = 0;
995 	wmb();
996 
997 	tw32(TG3PCI_MISC_HOST_CTRL,
998 	     (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
999 
1000 	tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE;
1001 	for (i = 0; i < tp->irq_cnt; i++) {
1002 		struct tg3_napi *tnapi = &tp->napi[i];
1003 
1004 		tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
1005 		if (tg3_flag(tp, 1SHOT_MSI))
1006 			tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
1007 
1008 		tp->coal_now |= tnapi->coal_now;
1009 	}
1010 
1011 	/* Force an initial interrupt */
1012 	if (!tg3_flag(tp, TAGGED_STATUS) &&
1013 	    (tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
1014 		tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
1015 	else
1016 		tw32(HOSTCC_MODE, tp->coal_now);
1017 
1018 	tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now);
1019 }
1020 
1021 static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
1022 {
1023 	struct tg3 *tp = tnapi->tp;
1024 	struct tg3_hw_status *sblk = tnapi->hw_status;
1025 	unsigned int work_exists = 0;
1026 
1027 	/* check for phy events */
1028 	if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
1029 		if (sblk->status & SD_STATUS_LINK_CHG)
1030 			work_exists = 1;
1031 	}
1032 
1033 	/* check for TX work to do */
1034 	if (sblk->idx[0].tx_consumer != tnapi->tx_cons)
1035 		work_exists = 1;
1036 
1037 	/* check for RX work to do */
1038 	if (tnapi->rx_rcb_prod_idx &&
1039 	    *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
1040 		work_exists = 1;
1041 
1042 	return work_exists;
1043 }
1044 
1045 /* tg3_int_reenable
1046  *  similar to tg3_enable_ints, but it accurately determines whether there
1047  *  is new work pending and can return without flushing the PIO write
1048  *  which reenables interrupts
1049  */
1050 static void tg3_int_reenable(struct tg3_napi *tnapi)
1051 {
1052 	struct tg3 *tp = tnapi->tp;
1053 
1054 	tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
1055 	mmiowb();
1056 
1057 	/* When doing tagged status, this work check is unnecessary.
1058 	 * The last_tag we write above tells the chip which piece of
1059 	 * work we've completed.
1060 	 */
1061 	if (!tg3_flag(tp, TAGGED_STATUS) && tg3_has_work(tnapi))
1062 		tw32(HOSTCC_MODE, tp->coalesce_mode |
1063 		     HOSTCC_MODE_ENABLE | tnapi->coal_now);
1064 }
1065 
1066 static void tg3_switch_clocks(struct tg3 *tp)
1067 {
1068 	u32 clock_ctrl;
1069 	u32 orig_clock_ctrl;
1070 
1071 	if (tg3_flag(tp, CPMU_PRESENT) || tg3_flag(tp, 5780_CLASS))
1072 		return;
1073 
1074 	clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
1075 
1076 	orig_clock_ctrl = clock_ctrl;
1077 	clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
1078 		       CLOCK_CTRL_CLKRUN_OENABLE |
1079 		       0x1f);
1080 	tp->pci_clock_ctrl = clock_ctrl;
1081 
1082 	if (tg3_flag(tp, 5705_PLUS)) {
1083 		if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
1084 			tw32_wait_f(TG3PCI_CLOCK_CTRL,
1085 				    clock_ctrl | CLOCK_CTRL_625_CORE, 40);
1086 		}
1087 	} else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
1088 		tw32_wait_f(TG3PCI_CLOCK_CTRL,
1089 			    clock_ctrl |
1090 			    (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
1091 			    40);
1092 		tw32_wait_f(TG3PCI_CLOCK_CTRL,
1093 			    clock_ctrl | (CLOCK_CTRL_ALTCLK),
1094 			    40);
1095 	}
1096 	tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
1097 }
1098 
1099 #define PHY_BUSY_LOOPS	5000
1100 
1101 static int __tg3_readphy(struct tg3 *tp, unsigned int phy_addr, int reg,
1102 			 u32 *val)
1103 {
1104 	u32 frame_val;
1105 	unsigned int loops;
1106 	int ret;
1107 
1108 	if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1109 		tw32_f(MAC_MI_MODE,
1110 		     (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1111 		udelay(80);
1112 	}
1113 
1114 	tg3_ape_lock(tp, tp->phy_ape_lock);
1115 
1116 	*val = 0x0;
1117 
1118 	frame_val  = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1119 		      MI_COM_PHY_ADDR_MASK);
1120 	frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1121 		      MI_COM_REG_ADDR_MASK);
1122 	frame_val |= (MI_COM_CMD_READ | MI_COM_START);
1123 
1124 	tw32_f(MAC_MI_COM, frame_val);
1125 
1126 	loops = PHY_BUSY_LOOPS;
1127 	while (loops != 0) {
1128 		udelay(10);
1129 		frame_val = tr32(MAC_MI_COM);
1130 
1131 		if ((frame_val & MI_COM_BUSY) == 0) {
1132 			udelay(5);
1133 			frame_val = tr32(MAC_MI_COM);
1134 			break;
1135 		}
1136 		loops -= 1;
1137 	}
1138 
1139 	ret = -EBUSY;
1140 	if (loops != 0) {
1141 		*val = frame_val & MI_COM_DATA_MASK;
1142 		ret = 0;
1143 	}
1144 
1145 	if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1146 		tw32_f(MAC_MI_MODE, tp->mi_mode);
1147 		udelay(80);
1148 	}
1149 
1150 	tg3_ape_unlock(tp, tp->phy_ape_lock);
1151 
1152 	return ret;
1153 }
1154 
1155 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
1156 {
1157 	return __tg3_readphy(tp, tp->phy_addr, reg, val);
1158 }
1159 
1160 static int __tg3_writephy(struct tg3 *tp, unsigned int phy_addr, int reg,
1161 			  u32 val)
1162 {
1163 	u32 frame_val;
1164 	unsigned int loops;
1165 	int ret;
1166 
1167 	if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
1168 	    (reg == MII_CTRL1000 || reg == MII_TG3_AUX_CTRL))
1169 		return 0;
1170 
1171 	if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1172 		tw32_f(MAC_MI_MODE,
1173 		     (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1174 		udelay(80);
1175 	}
1176 
1177 	tg3_ape_lock(tp, tp->phy_ape_lock);
1178 
1179 	frame_val  = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1180 		      MI_COM_PHY_ADDR_MASK);
1181 	frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1182 		      MI_COM_REG_ADDR_MASK);
1183 	frame_val |= (val & MI_COM_DATA_MASK);
1184 	frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
1185 
1186 	tw32_f(MAC_MI_COM, frame_val);
1187 
1188 	loops = PHY_BUSY_LOOPS;
1189 	while (loops != 0) {
1190 		udelay(10);
1191 		frame_val = tr32(MAC_MI_COM);
1192 		if ((frame_val & MI_COM_BUSY) == 0) {
1193 			udelay(5);
1194 			frame_val = tr32(MAC_MI_COM);
1195 			break;
1196 		}
1197 		loops -= 1;
1198 	}
1199 
1200 	ret = -EBUSY;
1201 	if (loops != 0)
1202 		ret = 0;
1203 
1204 	if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1205 		tw32_f(MAC_MI_MODE, tp->mi_mode);
1206 		udelay(80);
1207 	}
1208 
1209 	tg3_ape_unlock(tp, tp->phy_ape_lock);
1210 
1211 	return ret;
1212 }
1213 
1214 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
1215 {
1216 	return __tg3_writephy(tp, tp->phy_addr, reg, val);
1217 }
1218 
1219 static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val)
1220 {
1221 	int err;
1222 
1223 	err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1224 	if (err)
1225 		goto done;
1226 
1227 	err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1228 	if (err)
1229 		goto done;
1230 
1231 	err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1232 			   MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1233 	if (err)
1234 		goto done;
1235 
1236 	err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, val);
1237 
1238 done:
1239 	return err;
1240 }
1241 
1242 static int tg3_phy_cl45_read(struct tg3 *tp, u32 devad, u32 addr, u32 *val)
1243 {
1244 	int err;
1245 
1246 	err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1247 	if (err)
1248 		goto done;
1249 
1250 	err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1251 	if (err)
1252 		goto done;
1253 
1254 	err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1255 			   MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1256 	if (err)
1257 		goto done;
1258 
1259 	err = tg3_readphy(tp, MII_TG3_MMD_ADDRESS, val);
1260 
1261 done:
1262 	return err;
1263 }
1264 
1265 static int tg3_phydsp_read(struct tg3 *tp, u32 reg, u32 *val)
1266 {
1267 	int err;
1268 
1269 	err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1270 	if (!err)
1271 		err = tg3_readphy(tp, MII_TG3_DSP_RW_PORT, val);
1272 
1273 	return err;
1274 }
1275 
1276 static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
1277 {
1278 	int err;
1279 
1280 	err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1281 	if (!err)
1282 		err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
1283 
1284 	return err;
1285 }
1286 
1287 static int tg3_phy_auxctl_read(struct tg3 *tp, int reg, u32 *val)
1288 {
1289 	int err;
1290 
1291 	err = tg3_writephy(tp, MII_TG3_AUX_CTRL,
1292 			   (reg << MII_TG3_AUXCTL_MISC_RDSEL_SHIFT) |
1293 			   MII_TG3_AUXCTL_SHDWSEL_MISC);
1294 	if (!err)
1295 		err = tg3_readphy(tp, MII_TG3_AUX_CTRL, val);
1296 
1297 	return err;
1298 }
1299 
1300 static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set)
1301 {
1302 	if (reg == MII_TG3_AUXCTL_SHDWSEL_MISC)
1303 		set |= MII_TG3_AUXCTL_MISC_WREN;
1304 
1305 	return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg);
1306 }
1307 
1308 static int tg3_phy_toggle_auxctl_smdsp(struct tg3 *tp, bool enable)
1309 {
1310 	u32 val;
1311 	int err;
1312 
1313 	err = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
1314 
1315 	if (err)
1316 		return err;
1317 	if (enable)
1318 
1319 		val |= MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
1320 	else
1321 		val &= ~MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
1322 
1323 	err = tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
1324 				   val | MII_TG3_AUXCTL_ACTL_TX_6DB);
1325 
1326 	return err;
1327 }
1328 
1329 static int tg3_bmcr_reset(struct tg3 *tp)
1330 {
1331 	u32 phy_control;
1332 	int limit, err;
1333 
1334 	/* OK, reset it, and poll the BMCR_RESET bit until it
1335 	 * clears or we time out.
1336 	 */
1337 	phy_control = BMCR_RESET;
1338 	err = tg3_writephy(tp, MII_BMCR, phy_control);
1339 	if (err != 0)
1340 		return -EBUSY;
1341 
1342 	limit = 5000;
1343 	while (limit--) {
1344 		err = tg3_readphy(tp, MII_BMCR, &phy_control);
1345 		if (err != 0)
1346 			return -EBUSY;
1347 
1348 		if ((phy_control & BMCR_RESET) == 0) {
1349 			udelay(40);
1350 			break;
1351 		}
1352 		udelay(10);
1353 	}
1354 	if (limit < 0)
1355 		return -EBUSY;
1356 
1357 	return 0;
1358 }
1359 
1360 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
1361 {
1362 	struct tg3 *tp = bp->priv;
1363 	u32 val;
1364 
1365 	spin_lock_bh(&tp->lock);
1366 
1367 	if (tg3_readphy(tp, reg, &val))
1368 		val = -EIO;
1369 
1370 	spin_unlock_bh(&tp->lock);
1371 
1372 	return val;
1373 }
1374 
1375 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
1376 {
1377 	struct tg3 *tp = bp->priv;
1378 	u32 ret = 0;
1379 
1380 	spin_lock_bh(&tp->lock);
1381 
1382 	if (tg3_writephy(tp, reg, val))
1383 		ret = -EIO;
1384 
1385 	spin_unlock_bh(&tp->lock);
1386 
1387 	return ret;
1388 }
1389 
1390 static int tg3_mdio_reset(struct mii_bus *bp)
1391 {
1392 	return 0;
1393 }
1394 
1395 static void tg3_mdio_config_5785(struct tg3 *tp)
1396 {
1397 	u32 val;
1398 	struct phy_device *phydev;
1399 
1400 	phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1401 	switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1402 	case PHY_ID_BCM50610:
1403 	case PHY_ID_BCM50610M:
1404 		val = MAC_PHYCFG2_50610_LED_MODES;
1405 		break;
1406 	case PHY_ID_BCMAC131:
1407 		val = MAC_PHYCFG2_AC131_LED_MODES;
1408 		break;
1409 	case PHY_ID_RTL8211C:
1410 		val = MAC_PHYCFG2_RTL8211C_LED_MODES;
1411 		break;
1412 	case PHY_ID_RTL8201E:
1413 		val = MAC_PHYCFG2_RTL8201E_LED_MODES;
1414 		break;
1415 	default:
1416 		return;
1417 	}
1418 
1419 	if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
1420 		tw32(MAC_PHYCFG2, val);
1421 
1422 		val = tr32(MAC_PHYCFG1);
1423 		val &= ~(MAC_PHYCFG1_RGMII_INT |
1424 			 MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK);
1425 		val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT;
1426 		tw32(MAC_PHYCFG1, val);
1427 
1428 		return;
1429 	}
1430 
1431 	if (!tg3_flag(tp, RGMII_INBAND_DISABLE))
1432 		val |= MAC_PHYCFG2_EMODE_MASK_MASK |
1433 		       MAC_PHYCFG2_FMODE_MASK_MASK |
1434 		       MAC_PHYCFG2_GMODE_MASK_MASK |
1435 		       MAC_PHYCFG2_ACT_MASK_MASK   |
1436 		       MAC_PHYCFG2_QUAL_MASK_MASK |
1437 		       MAC_PHYCFG2_INBAND_ENABLE;
1438 
1439 	tw32(MAC_PHYCFG2, val);
1440 
1441 	val = tr32(MAC_PHYCFG1);
1442 	val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
1443 		 MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
1444 	if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1445 		if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1446 			val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
1447 		if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1448 			val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
1449 	}
1450 	val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT |
1451 	       MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV;
1452 	tw32(MAC_PHYCFG1, val);
1453 
1454 	val = tr32(MAC_EXT_RGMII_MODE);
1455 	val &= ~(MAC_RGMII_MODE_RX_INT_B |
1456 		 MAC_RGMII_MODE_RX_QUALITY |
1457 		 MAC_RGMII_MODE_RX_ACTIVITY |
1458 		 MAC_RGMII_MODE_RX_ENG_DET |
1459 		 MAC_RGMII_MODE_TX_ENABLE |
1460 		 MAC_RGMII_MODE_TX_LOWPWR |
1461 		 MAC_RGMII_MODE_TX_RESET);
1462 	if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1463 		if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1464 			val |= MAC_RGMII_MODE_RX_INT_B |
1465 			       MAC_RGMII_MODE_RX_QUALITY |
1466 			       MAC_RGMII_MODE_RX_ACTIVITY |
1467 			       MAC_RGMII_MODE_RX_ENG_DET;
1468 		if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1469 			val |= MAC_RGMII_MODE_TX_ENABLE |
1470 			       MAC_RGMII_MODE_TX_LOWPWR |
1471 			       MAC_RGMII_MODE_TX_RESET;
1472 	}
1473 	tw32(MAC_EXT_RGMII_MODE, val);
1474 }
1475 
1476 static void tg3_mdio_start(struct tg3 *tp)
1477 {
1478 	tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
1479 	tw32_f(MAC_MI_MODE, tp->mi_mode);
1480 	udelay(80);
1481 
1482 	if (tg3_flag(tp, MDIOBUS_INITED) &&
1483 	    tg3_asic_rev(tp) == ASIC_REV_5785)
1484 		tg3_mdio_config_5785(tp);
1485 }
1486 
1487 static int tg3_mdio_init(struct tg3 *tp)
1488 {
1489 	int i;
1490 	u32 reg;
1491 	struct phy_device *phydev;
1492 
1493 	if (tg3_flag(tp, 5717_PLUS)) {
1494 		u32 is_serdes;
1495 
1496 		tp->phy_addr = tp->pci_fn + 1;
1497 
1498 		if (tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0)
1499 			is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
1500 		else
1501 			is_serdes = tr32(TG3_CPMU_PHY_STRAP) &
1502 				    TG3_CPMU_PHY_STRAP_IS_SERDES;
1503 		if (is_serdes)
1504 			tp->phy_addr += 7;
1505 	} else
1506 		tp->phy_addr = TG3_PHY_MII_ADDR;
1507 
1508 	tg3_mdio_start(tp);
1509 
1510 	if (!tg3_flag(tp, USE_PHYLIB) || tg3_flag(tp, MDIOBUS_INITED))
1511 		return 0;
1512 
1513 	tp->mdio_bus = mdiobus_alloc();
1514 	if (tp->mdio_bus == NULL)
1515 		return -ENOMEM;
1516 
1517 	tp->mdio_bus->name     = "tg3 mdio bus";
1518 	snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
1519 		 (tp->pdev->bus->number << 8) | tp->pdev->devfn);
1520 	tp->mdio_bus->priv     = tp;
1521 	tp->mdio_bus->parent   = &tp->pdev->dev;
1522 	tp->mdio_bus->read     = &tg3_mdio_read;
1523 	tp->mdio_bus->write    = &tg3_mdio_write;
1524 	tp->mdio_bus->reset    = &tg3_mdio_reset;
1525 	tp->mdio_bus->phy_mask = ~(1 << TG3_PHY_MII_ADDR);
1526 	tp->mdio_bus->irq      = &tp->mdio_irq[0];
1527 
1528 	for (i = 0; i < PHY_MAX_ADDR; i++)
1529 		tp->mdio_bus->irq[i] = PHY_POLL;
1530 
1531 	/* The bus registration will look for all the PHYs on the mdio bus.
1532 	 * Unfortunately, it does not ensure the PHY is powered up before
1533 	 * accessing the PHY ID registers.  A chip reset is the
1534 	 * quickest way to bring the device back to an operational state..
1535 	 */
1536 	if (tg3_readphy(tp, MII_BMCR, &reg) || (reg & BMCR_PDOWN))
1537 		tg3_bmcr_reset(tp);
1538 
1539 	i = mdiobus_register(tp->mdio_bus);
1540 	if (i) {
1541 		dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i);
1542 		mdiobus_free(tp->mdio_bus);
1543 		return i;
1544 	}
1545 
1546 	phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1547 
1548 	if (!phydev || !phydev->drv) {
1549 		dev_warn(&tp->pdev->dev, "No PHY devices\n");
1550 		mdiobus_unregister(tp->mdio_bus);
1551 		mdiobus_free(tp->mdio_bus);
1552 		return -ENODEV;
1553 	}
1554 
1555 	switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1556 	case PHY_ID_BCM57780:
1557 		phydev->interface = PHY_INTERFACE_MODE_GMII;
1558 		phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1559 		break;
1560 	case PHY_ID_BCM50610:
1561 	case PHY_ID_BCM50610M:
1562 		phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE |
1563 				     PHY_BRCM_RX_REFCLK_UNUSED |
1564 				     PHY_BRCM_DIS_TXCRXC_NOENRGY |
1565 				     PHY_BRCM_AUTO_PWRDWN_ENABLE;
1566 		if (tg3_flag(tp, RGMII_INBAND_DISABLE))
1567 			phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
1568 		if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1569 			phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
1570 		if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1571 			phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
1572 		/* fallthru */
1573 	case PHY_ID_RTL8211C:
1574 		phydev->interface = PHY_INTERFACE_MODE_RGMII;
1575 		break;
1576 	case PHY_ID_RTL8201E:
1577 	case PHY_ID_BCMAC131:
1578 		phydev->interface = PHY_INTERFACE_MODE_MII;
1579 		phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1580 		tp->phy_flags |= TG3_PHYFLG_IS_FET;
1581 		break;
1582 	}
1583 
1584 	tg3_flag_set(tp, MDIOBUS_INITED);
1585 
1586 	if (tg3_asic_rev(tp) == ASIC_REV_5785)
1587 		tg3_mdio_config_5785(tp);
1588 
1589 	return 0;
1590 }
1591 
1592 static void tg3_mdio_fini(struct tg3 *tp)
1593 {
1594 	if (tg3_flag(tp, MDIOBUS_INITED)) {
1595 		tg3_flag_clear(tp, MDIOBUS_INITED);
1596 		mdiobus_unregister(tp->mdio_bus);
1597 		mdiobus_free(tp->mdio_bus);
1598 	}
1599 }
1600 
1601 /* tp->lock is held. */
1602 static inline void tg3_generate_fw_event(struct tg3 *tp)
1603 {
1604 	u32 val;
1605 
1606 	val = tr32(GRC_RX_CPU_EVENT);
1607 	val |= GRC_RX_CPU_DRIVER_EVENT;
1608 	tw32_f(GRC_RX_CPU_EVENT, val);
1609 
1610 	tp->last_event_jiffies = jiffies;
1611 }
1612 
1613 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1614 
1615 /* tp->lock is held. */
1616 static void tg3_wait_for_event_ack(struct tg3 *tp)
1617 {
1618 	int i;
1619 	unsigned int delay_cnt;
1620 	long time_remain;
1621 
1622 	/* If enough time has passed, no wait is necessary. */
1623 	time_remain = (long)(tp->last_event_jiffies + 1 +
1624 		      usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1625 		      (long)jiffies;
1626 	if (time_remain < 0)
1627 		return;
1628 
1629 	/* Check if we can shorten the wait time. */
1630 	delay_cnt = jiffies_to_usecs(time_remain);
1631 	if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1632 		delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1633 	delay_cnt = (delay_cnt >> 3) + 1;
1634 
1635 	for (i = 0; i < delay_cnt; i++) {
1636 		if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1637 			break;
1638 		udelay(8);
1639 	}
1640 }
1641 
1642 /* tp->lock is held. */
1643 static void tg3_phy_gather_ump_data(struct tg3 *tp, u32 *data)
1644 {
1645 	u32 reg, val;
1646 
1647 	val = 0;
1648 	if (!tg3_readphy(tp, MII_BMCR, &reg))
1649 		val = reg << 16;
1650 	if (!tg3_readphy(tp, MII_BMSR, &reg))
1651 		val |= (reg & 0xffff);
1652 	*data++ = val;
1653 
1654 	val = 0;
1655 	if (!tg3_readphy(tp, MII_ADVERTISE, &reg))
1656 		val = reg << 16;
1657 	if (!tg3_readphy(tp, MII_LPA, &reg))
1658 		val |= (reg & 0xffff);
1659 	*data++ = val;
1660 
1661 	val = 0;
1662 	if (!(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) {
1663 		if (!tg3_readphy(tp, MII_CTRL1000, &reg))
1664 			val = reg << 16;
1665 		if (!tg3_readphy(tp, MII_STAT1000, &reg))
1666 			val |= (reg & 0xffff);
1667 	}
1668 	*data++ = val;
1669 
1670 	if (!tg3_readphy(tp, MII_PHYADDR, &reg))
1671 		val = reg << 16;
1672 	else
1673 		val = 0;
1674 	*data++ = val;
1675 }
1676 
1677 /* tp->lock is held. */
1678 static void tg3_ump_link_report(struct tg3 *tp)
1679 {
1680 	u32 data[4];
1681 
1682 	if (!tg3_flag(tp, 5780_CLASS) || !tg3_flag(tp, ENABLE_ASF))
1683 		return;
1684 
1685 	tg3_phy_gather_ump_data(tp, data);
1686 
1687 	tg3_wait_for_event_ack(tp);
1688 
1689 	tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1690 	tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1691 	tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x0, data[0]);
1692 	tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x4, data[1]);
1693 	tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x8, data[2]);
1694 	tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0xc, data[3]);
1695 
1696 	tg3_generate_fw_event(tp);
1697 }
1698 
1699 /* tp->lock is held. */
1700 static void tg3_stop_fw(struct tg3 *tp)
1701 {
1702 	if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
1703 		/* Wait for RX cpu to ACK the previous event. */
1704 		tg3_wait_for_event_ack(tp);
1705 
1706 		tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
1707 
1708 		tg3_generate_fw_event(tp);
1709 
1710 		/* Wait for RX cpu to ACK this event. */
1711 		tg3_wait_for_event_ack(tp);
1712 	}
1713 }
1714 
1715 /* tp->lock is held. */
1716 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
1717 {
1718 	tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
1719 		      NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
1720 
1721 	if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1722 		switch (kind) {
1723 		case RESET_KIND_INIT:
1724 			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1725 				      DRV_STATE_START);
1726 			break;
1727 
1728 		case RESET_KIND_SHUTDOWN:
1729 			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1730 				      DRV_STATE_UNLOAD);
1731 			break;
1732 
1733 		case RESET_KIND_SUSPEND:
1734 			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1735 				      DRV_STATE_SUSPEND);
1736 			break;
1737 
1738 		default:
1739 			break;
1740 		}
1741 	}
1742 
1743 	if (kind == RESET_KIND_INIT ||
1744 	    kind == RESET_KIND_SUSPEND)
1745 		tg3_ape_driver_state_change(tp, kind);
1746 }
1747 
1748 /* tp->lock is held. */
1749 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
1750 {
1751 	if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1752 		switch (kind) {
1753 		case RESET_KIND_INIT:
1754 			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1755 				      DRV_STATE_START_DONE);
1756 			break;
1757 
1758 		case RESET_KIND_SHUTDOWN:
1759 			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1760 				      DRV_STATE_UNLOAD_DONE);
1761 			break;
1762 
1763 		default:
1764 			break;
1765 		}
1766 	}
1767 
1768 	if (kind == RESET_KIND_SHUTDOWN)
1769 		tg3_ape_driver_state_change(tp, kind);
1770 }
1771 
1772 /* tp->lock is held. */
1773 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
1774 {
1775 	if (tg3_flag(tp, ENABLE_ASF)) {
1776 		switch (kind) {
1777 		case RESET_KIND_INIT:
1778 			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1779 				      DRV_STATE_START);
1780 			break;
1781 
1782 		case RESET_KIND_SHUTDOWN:
1783 			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1784 				      DRV_STATE_UNLOAD);
1785 			break;
1786 
1787 		case RESET_KIND_SUSPEND:
1788 			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1789 				      DRV_STATE_SUSPEND);
1790 			break;
1791 
1792 		default:
1793 			break;
1794 		}
1795 	}
1796 }
1797 
1798 static int tg3_poll_fw(struct tg3 *tp)
1799 {
1800 	int i;
1801 	u32 val;
1802 
1803 	if (tg3_flag(tp, IS_SSB_CORE)) {
1804 		/* We don't use firmware. */
1805 		return 0;
1806 	}
1807 
1808 	if (tg3_asic_rev(tp) == ASIC_REV_5906) {
1809 		/* Wait up to 20ms for init done. */
1810 		for (i = 0; i < 200; i++) {
1811 			if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
1812 				return 0;
1813 			udelay(100);
1814 		}
1815 		return -ENODEV;
1816 	}
1817 
1818 	/* Wait for firmware initialization to complete. */
1819 	for (i = 0; i < 100000; i++) {
1820 		tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
1821 		if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1822 			break;
1823 		udelay(10);
1824 	}
1825 
1826 	/* Chip might not be fitted with firmware.  Some Sun onboard
1827 	 * parts are configured like that.  So don't signal the timeout
1828 	 * of the above loop as an error, but do report the lack of
1829 	 * running firmware once.
1830 	 */
1831 	if (i >= 100000 && !tg3_flag(tp, NO_FWARE_REPORTED)) {
1832 		tg3_flag_set(tp, NO_FWARE_REPORTED);
1833 
1834 		netdev_info(tp->dev, "No firmware running\n");
1835 	}
1836 
1837 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) {
1838 		/* The 57765 A0 needs a little more
1839 		 * time to do some important work.
1840 		 */
1841 		mdelay(10);
1842 	}
1843 
1844 	return 0;
1845 }
1846 
1847 static void tg3_link_report(struct tg3 *tp)
1848 {
1849 	if (!netif_carrier_ok(tp->dev)) {
1850 		netif_info(tp, link, tp->dev, "Link is down\n");
1851 		tg3_ump_link_report(tp);
1852 	} else if (netif_msg_link(tp)) {
1853 		netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n",
1854 			    (tp->link_config.active_speed == SPEED_1000 ?
1855 			     1000 :
1856 			     (tp->link_config.active_speed == SPEED_100 ?
1857 			      100 : 10)),
1858 			    (tp->link_config.active_duplex == DUPLEX_FULL ?
1859 			     "full" : "half"));
1860 
1861 		netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n",
1862 			    (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
1863 			    "on" : "off",
1864 			    (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
1865 			    "on" : "off");
1866 
1867 		if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
1868 			netdev_info(tp->dev, "EEE is %s\n",
1869 				    tp->setlpicnt ? "enabled" : "disabled");
1870 
1871 		tg3_ump_link_report(tp);
1872 	}
1873 
1874 	tp->link_up = netif_carrier_ok(tp->dev);
1875 }
1876 
1877 static u32 tg3_decode_flowctrl_1000T(u32 adv)
1878 {
1879 	u32 flowctrl = 0;
1880 
1881 	if (adv & ADVERTISE_PAUSE_CAP) {
1882 		flowctrl |= FLOW_CTRL_RX;
1883 		if (!(adv & ADVERTISE_PAUSE_ASYM))
1884 			flowctrl |= FLOW_CTRL_TX;
1885 	} else if (adv & ADVERTISE_PAUSE_ASYM)
1886 		flowctrl |= FLOW_CTRL_TX;
1887 
1888 	return flowctrl;
1889 }
1890 
1891 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1892 {
1893 	u16 miireg;
1894 
1895 	if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1896 		miireg = ADVERTISE_1000XPAUSE;
1897 	else if (flow_ctrl & FLOW_CTRL_TX)
1898 		miireg = ADVERTISE_1000XPSE_ASYM;
1899 	else if (flow_ctrl & FLOW_CTRL_RX)
1900 		miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1901 	else
1902 		miireg = 0;
1903 
1904 	return miireg;
1905 }
1906 
1907 static u32 tg3_decode_flowctrl_1000X(u32 adv)
1908 {
1909 	u32 flowctrl = 0;
1910 
1911 	if (adv & ADVERTISE_1000XPAUSE) {
1912 		flowctrl |= FLOW_CTRL_RX;
1913 		if (!(adv & ADVERTISE_1000XPSE_ASYM))
1914 			flowctrl |= FLOW_CTRL_TX;
1915 	} else if (adv & ADVERTISE_1000XPSE_ASYM)
1916 		flowctrl |= FLOW_CTRL_TX;
1917 
1918 	return flowctrl;
1919 }
1920 
1921 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1922 {
1923 	u8 cap = 0;
1924 
1925 	if (lcladv & rmtadv & ADVERTISE_1000XPAUSE) {
1926 		cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1927 	} else if (lcladv & rmtadv & ADVERTISE_1000XPSE_ASYM) {
1928 		if (lcladv & ADVERTISE_1000XPAUSE)
1929 			cap = FLOW_CTRL_RX;
1930 		if (rmtadv & ADVERTISE_1000XPAUSE)
1931 			cap = FLOW_CTRL_TX;
1932 	}
1933 
1934 	return cap;
1935 }
1936 
1937 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1938 {
1939 	u8 autoneg;
1940 	u8 flowctrl = 0;
1941 	u32 old_rx_mode = tp->rx_mode;
1942 	u32 old_tx_mode = tp->tx_mode;
1943 
1944 	if (tg3_flag(tp, USE_PHYLIB))
1945 		autoneg = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]->autoneg;
1946 	else
1947 		autoneg = tp->link_config.autoneg;
1948 
1949 	if (autoneg == AUTONEG_ENABLE && tg3_flag(tp, PAUSE_AUTONEG)) {
1950 		if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
1951 			flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1952 		else
1953 			flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1954 	} else
1955 		flowctrl = tp->link_config.flowctrl;
1956 
1957 	tp->link_config.active_flowctrl = flowctrl;
1958 
1959 	if (flowctrl & FLOW_CTRL_RX)
1960 		tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1961 	else
1962 		tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1963 
1964 	if (old_rx_mode != tp->rx_mode)
1965 		tw32_f(MAC_RX_MODE, tp->rx_mode);
1966 
1967 	if (flowctrl & FLOW_CTRL_TX)
1968 		tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1969 	else
1970 		tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1971 
1972 	if (old_tx_mode != tp->tx_mode)
1973 		tw32_f(MAC_TX_MODE, tp->tx_mode);
1974 }
1975 
1976 static void tg3_adjust_link(struct net_device *dev)
1977 {
1978 	u8 oldflowctrl, linkmesg = 0;
1979 	u32 mac_mode, lcl_adv, rmt_adv;
1980 	struct tg3 *tp = netdev_priv(dev);
1981 	struct phy_device *phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1982 
1983 	spin_lock_bh(&tp->lock);
1984 
1985 	mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
1986 				    MAC_MODE_HALF_DUPLEX);
1987 
1988 	oldflowctrl = tp->link_config.active_flowctrl;
1989 
1990 	if (phydev->link) {
1991 		lcl_adv = 0;
1992 		rmt_adv = 0;
1993 
1994 		if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
1995 			mac_mode |= MAC_MODE_PORT_MODE_MII;
1996 		else if (phydev->speed == SPEED_1000 ||
1997 			 tg3_asic_rev(tp) != ASIC_REV_5785)
1998 			mac_mode |= MAC_MODE_PORT_MODE_GMII;
1999 		else
2000 			mac_mode |= MAC_MODE_PORT_MODE_MII;
2001 
2002 		if (phydev->duplex == DUPLEX_HALF)
2003 			mac_mode |= MAC_MODE_HALF_DUPLEX;
2004 		else {
2005 			lcl_adv = mii_advertise_flowctrl(
2006 				  tp->link_config.flowctrl);
2007 
2008 			if (phydev->pause)
2009 				rmt_adv = LPA_PAUSE_CAP;
2010 			if (phydev->asym_pause)
2011 				rmt_adv |= LPA_PAUSE_ASYM;
2012 		}
2013 
2014 		tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
2015 	} else
2016 		mac_mode |= MAC_MODE_PORT_MODE_GMII;
2017 
2018 	if (mac_mode != tp->mac_mode) {
2019 		tp->mac_mode = mac_mode;
2020 		tw32_f(MAC_MODE, tp->mac_mode);
2021 		udelay(40);
2022 	}
2023 
2024 	if (tg3_asic_rev(tp) == ASIC_REV_5785) {
2025 		if (phydev->speed == SPEED_10)
2026 			tw32(MAC_MI_STAT,
2027 			     MAC_MI_STAT_10MBPS_MODE |
2028 			     MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
2029 		else
2030 			tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
2031 	}
2032 
2033 	if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
2034 		tw32(MAC_TX_LENGTHS,
2035 		     ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2036 		      (6 << TX_LENGTHS_IPG_SHIFT) |
2037 		      (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
2038 	else
2039 		tw32(MAC_TX_LENGTHS,
2040 		     ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2041 		      (6 << TX_LENGTHS_IPG_SHIFT) |
2042 		      (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
2043 
2044 	if (phydev->link != tp->old_link ||
2045 	    phydev->speed != tp->link_config.active_speed ||
2046 	    phydev->duplex != tp->link_config.active_duplex ||
2047 	    oldflowctrl != tp->link_config.active_flowctrl)
2048 		linkmesg = 1;
2049 
2050 	tp->old_link = phydev->link;
2051 	tp->link_config.active_speed = phydev->speed;
2052 	tp->link_config.active_duplex = phydev->duplex;
2053 
2054 	spin_unlock_bh(&tp->lock);
2055 
2056 	if (linkmesg)
2057 		tg3_link_report(tp);
2058 }
2059 
2060 static int tg3_phy_init(struct tg3 *tp)
2061 {
2062 	struct phy_device *phydev;
2063 
2064 	if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)
2065 		return 0;
2066 
2067 	/* Bring the PHY back to a known state. */
2068 	tg3_bmcr_reset(tp);
2069 
2070 	phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
2071 
2072 	/* Attach the MAC to the PHY. */
2073 	phydev = phy_connect(tp->dev, dev_name(&phydev->dev),
2074 			     tg3_adjust_link, phydev->interface);
2075 	if (IS_ERR(phydev)) {
2076 		dev_err(&tp->pdev->dev, "Could not attach to PHY\n");
2077 		return PTR_ERR(phydev);
2078 	}
2079 
2080 	/* Mask with MAC supported features. */
2081 	switch (phydev->interface) {
2082 	case PHY_INTERFACE_MODE_GMII:
2083 	case PHY_INTERFACE_MODE_RGMII:
2084 		if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
2085 			phydev->supported &= (PHY_GBIT_FEATURES |
2086 					      SUPPORTED_Pause |
2087 					      SUPPORTED_Asym_Pause);
2088 			break;
2089 		}
2090 		/* fallthru */
2091 	case PHY_INTERFACE_MODE_MII:
2092 		phydev->supported &= (PHY_BASIC_FEATURES |
2093 				      SUPPORTED_Pause |
2094 				      SUPPORTED_Asym_Pause);
2095 		break;
2096 	default:
2097 		phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
2098 		return -EINVAL;
2099 	}
2100 
2101 	tp->phy_flags |= TG3_PHYFLG_IS_CONNECTED;
2102 
2103 	phydev->advertising = phydev->supported;
2104 
2105 	return 0;
2106 }
2107 
2108 static void tg3_phy_start(struct tg3 *tp)
2109 {
2110 	struct phy_device *phydev;
2111 
2112 	if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
2113 		return;
2114 
2115 	phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
2116 
2117 	if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
2118 		tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
2119 		phydev->speed = tp->link_config.speed;
2120 		phydev->duplex = tp->link_config.duplex;
2121 		phydev->autoneg = tp->link_config.autoneg;
2122 		phydev->advertising = tp->link_config.advertising;
2123 	}
2124 
2125 	phy_start(phydev);
2126 
2127 	phy_start_aneg(phydev);
2128 }
2129 
2130 static void tg3_phy_stop(struct tg3 *tp)
2131 {
2132 	if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
2133 		return;
2134 
2135 	phy_stop(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
2136 }
2137 
2138 static void tg3_phy_fini(struct tg3 *tp)
2139 {
2140 	if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
2141 		phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
2142 		tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED;
2143 	}
2144 }
2145 
2146 static int tg3_phy_set_extloopbk(struct tg3 *tp)
2147 {
2148 	int err;
2149 	u32 val;
2150 
2151 	if (tp->phy_flags & TG3_PHYFLG_IS_FET)
2152 		return 0;
2153 
2154 	if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2155 		/* Cannot do read-modify-write on 5401 */
2156 		err = tg3_phy_auxctl_write(tp,
2157 					   MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2158 					   MII_TG3_AUXCTL_ACTL_EXTLOOPBK |
2159 					   0x4c20);
2160 		goto done;
2161 	}
2162 
2163 	err = tg3_phy_auxctl_read(tp,
2164 				  MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2165 	if (err)
2166 		return err;
2167 
2168 	val |= MII_TG3_AUXCTL_ACTL_EXTLOOPBK;
2169 	err = tg3_phy_auxctl_write(tp,
2170 				   MII_TG3_AUXCTL_SHDWSEL_AUXCTL, val);
2171 
2172 done:
2173 	return err;
2174 }
2175 
2176 static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
2177 {
2178 	u32 phytest;
2179 
2180 	if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2181 		u32 phy;
2182 
2183 		tg3_writephy(tp, MII_TG3_FET_TEST,
2184 			     phytest | MII_TG3_FET_SHADOW_EN);
2185 		if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) {
2186 			if (enable)
2187 				phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD;
2188 			else
2189 				phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD;
2190 			tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy);
2191 		}
2192 		tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2193 	}
2194 }
2195 
2196 static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
2197 {
2198 	u32 reg;
2199 
2200 	if (!tg3_flag(tp, 5705_PLUS) ||
2201 	    (tg3_flag(tp, 5717_PLUS) &&
2202 	     (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
2203 		return;
2204 
2205 	if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2206 		tg3_phy_fet_toggle_apd(tp, enable);
2207 		return;
2208 	}
2209 
2210 	reg = MII_TG3_MISC_SHDW_WREN |
2211 	      MII_TG3_MISC_SHDW_SCR5_SEL |
2212 	      MII_TG3_MISC_SHDW_SCR5_LPED |
2213 	      MII_TG3_MISC_SHDW_SCR5_DLPTLM |
2214 	      MII_TG3_MISC_SHDW_SCR5_SDTL |
2215 	      MII_TG3_MISC_SHDW_SCR5_C125OE;
2216 	if (tg3_asic_rev(tp) != ASIC_REV_5784 || !enable)
2217 		reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
2218 
2219 	tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
2220 
2221 
2222 	reg = MII_TG3_MISC_SHDW_WREN |
2223 	      MII_TG3_MISC_SHDW_APD_SEL |
2224 	      MII_TG3_MISC_SHDW_APD_WKTM_84MS;
2225 	if (enable)
2226 		reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
2227 
2228 	tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
2229 }
2230 
2231 static void tg3_phy_toggle_automdix(struct tg3 *tp, bool enable)
2232 {
2233 	u32 phy;
2234 
2235 	if (!tg3_flag(tp, 5705_PLUS) ||
2236 	    (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
2237 		return;
2238 
2239 	if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2240 		u32 ephy;
2241 
2242 		if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
2243 			u32 reg = MII_TG3_FET_SHDW_MISCCTRL;
2244 
2245 			tg3_writephy(tp, MII_TG3_FET_TEST,
2246 				     ephy | MII_TG3_FET_SHADOW_EN);
2247 			if (!tg3_readphy(tp, reg, &phy)) {
2248 				if (enable)
2249 					phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2250 				else
2251 					phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2252 				tg3_writephy(tp, reg, phy);
2253 			}
2254 			tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
2255 		}
2256 	} else {
2257 		int ret;
2258 
2259 		ret = tg3_phy_auxctl_read(tp,
2260 					  MII_TG3_AUXCTL_SHDWSEL_MISC, &phy);
2261 		if (!ret) {
2262 			if (enable)
2263 				phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2264 			else
2265 				phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2266 			tg3_phy_auxctl_write(tp,
2267 					     MII_TG3_AUXCTL_SHDWSEL_MISC, phy);
2268 		}
2269 	}
2270 }
2271 
2272 static void tg3_phy_set_wirespeed(struct tg3 *tp)
2273 {
2274 	int ret;
2275 	u32 val;
2276 
2277 	if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED)
2278 		return;
2279 
2280 	ret = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, &val);
2281 	if (!ret)
2282 		tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_MISC,
2283 				     val | MII_TG3_AUXCTL_MISC_WIRESPD_EN);
2284 }
2285 
2286 static void tg3_phy_apply_otp(struct tg3 *tp)
2287 {
2288 	u32 otp, phy;
2289 
2290 	if (!tp->phy_otp)
2291 		return;
2292 
2293 	otp = tp->phy_otp;
2294 
2295 	if (tg3_phy_toggle_auxctl_smdsp(tp, true))
2296 		return;
2297 
2298 	phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
2299 	phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
2300 	tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
2301 
2302 	phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
2303 	      ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
2304 	tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
2305 
2306 	phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
2307 	phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
2308 	tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
2309 
2310 	phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
2311 	tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
2312 
2313 	phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
2314 	tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
2315 
2316 	phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
2317 	      ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
2318 	tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
2319 
2320 	tg3_phy_toggle_auxctl_smdsp(tp, false);
2321 }
2322 
2323 static void tg3_phy_eee_adjust(struct tg3 *tp, bool current_link_up)
2324 {
2325 	u32 val;
2326 
2327 	if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2328 		return;
2329 
2330 	tp->setlpicnt = 0;
2331 
2332 	if (tp->link_config.autoneg == AUTONEG_ENABLE &&
2333 	    current_link_up &&
2334 	    tp->link_config.active_duplex == DUPLEX_FULL &&
2335 	    (tp->link_config.active_speed == SPEED_100 ||
2336 	     tp->link_config.active_speed == SPEED_1000)) {
2337 		u32 eeectl;
2338 
2339 		if (tp->link_config.active_speed == SPEED_1000)
2340 			eeectl = TG3_CPMU_EEE_CTRL_EXIT_16_5_US;
2341 		else
2342 			eeectl = TG3_CPMU_EEE_CTRL_EXIT_36_US;
2343 
2344 		tw32(TG3_CPMU_EEE_CTRL, eeectl);
2345 
2346 		tg3_phy_cl45_read(tp, MDIO_MMD_AN,
2347 				  TG3_CL45_D7_EEERES_STAT, &val);
2348 
2349 		if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T ||
2350 		    val == TG3_CL45_D7_EEERES_STAT_LP_100TX)
2351 			tp->setlpicnt = 2;
2352 	}
2353 
2354 	if (!tp->setlpicnt) {
2355 		if (current_link_up &&
2356 		   !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2357 			tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0000);
2358 			tg3_phy_toggle_auxctl_smdsp(tp, false);
2359 		}
2360 
2361 		val = tr32(TG3_CPMU_EEE_MODE);
2362 		tw32(TG3_CPMU_EEE_MODE, val & ~TG3_CPMU_EEEMD_LPI_ENABLE);
2363 	}
2364 }
2365 
2366 static void tg3_phy_eee_enable(struct tg3 *tp)
2367 {
2368 	u32 val;
2369 
2370 	if (tp->link_config.active_speed == SPEED_1000 &&
2371 	    (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2372 	     tg3_asic_rev(tp) == ASIC_REV_5719 ||
2373 	     tg3_flag(tp, 57765_CLASS)) &&
2374 	    !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2375 		val = MII_TG3_DSP_TAP26_ALNOKO |
2376 		      MII_TG3_DSP_TAP26_RMRXSTO;
2377 		tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
2378 		tg3_phy_toggle_auxctl_smdsp(tp, false);
2379 	}
2380 
2381 	val = tr32(TG3_CPMU_EEE_MODE);
2382 	tw32(TG3_CPMU_EEE_MODE, val | TG3_CPMU_EEEMD_LPI_ENABLE);
2383 }
2384 
2385 static int tg3_wait_macro_done(struct tg3 *tp)
2386 {
2387 	int limit = 100;
2388 
2389 	while (limit--) {
2390 		u32 tmp32;
2391 
2392 		if (!tg3_readphy(tp, MII_TG3_DSP_CONTROL, &tmp32)) {
2393 			if ((tmp32 & 0x1000) == 0)
2394 				break;
2395 		}
2396 	}
2397 	if (limit < 0)
2398 		return -EBUSY;
2399 
2400 	return 0;
2401 }
2402 
2403 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
2404 {
2405 	static const u32 test_pat[4][6] = {
2406 	{ 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
2407 	{ 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
2408 	{ 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
2409 	{ 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
2410 	};
2411 	int chan;
2412 
2413 	for (chan = 0; chan < 4; chan++) {
2414 		int i;
2415 
2416 		tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2417 			     (chan * 0x2000) | 0x0200);
2418 		tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2419 
2420 		for (i = 0; i < 6; i++)
2421 			tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
2422 				     test_pat[chan][i]);
2423 
2424 		tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2425 		if (tg3_wait_macro_done(tp)) {
2426 			*resetp = 1;
2427 			return -EBUSY;
2428 		}
2429 
2430 		tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2431 			     (chan * 0x2000) | 0x0200);
2432 		tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0082);
2433 		if (tg3_wait_macro_done(tp)) {
2434 			*resetp = 1;
2435 			return -EBUSY;
2436 		}
2437 
2438 		tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0802);
2439 		if (tg3_wait_macro_done(tp)) {
2440 			*resetp = 1;
2441 			return -EBUSY;
2442 		}
2443 
2444 		for (i = 0; i < 6; i += 2) {
2445 			u32 low, high;
2446 
2447 			if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
2448 			    tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
2449 			    tg3_wait_macro_done(tp)) {
2450 				*resetp = 1;
2451 				return -EBUSY;
2452 			}
2453 			low &= 0x7fff;
2454 			high &= 0x000f;
2455 			if (low != test_pat[chan][i] ||
2456 			    high != test_pat[chan][i+1]) {
2457 				tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
2458 				tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
2459 				tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
2460 
2461 				return -EBUSY;
2462 			}
2463 		}
2464 	}
2465 
2466 	return 0;
2467 }
2468 
2469 static int tg3_phy_reset_chanpat(struct tg3 *tp)
2470 {
2471 	int chan;
2472 
2473 	for (chan = 0; chan < 4; chan++) {
2474 		int i;
2475 
2476 		tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2477 			     (chan * 0x2000) | 0x0200);
2478 		tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2479 		for (i = 0; i < 6; i++)
2480 			tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
2481 		tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2482 		if (tg3_wait_macro_done(tp))
2483 			return -EBUSY;
2484 	}
2485 
2486 	return 0;
2487 }
2488 
2489 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
2490 {
2491 	u32 reg32, phy9_orig;
2492 	int retries, do_phy_reset, err;
2493 
2494 	retries = 10;
2495 	do_phy_reset = 1;
2496 	do {
2497 		if (do_phy_reset) {
2498 			err = tg3_bmcr_reset(tp);
2499 			if (err)
2500 				return err;
2501 			do_phy_reset = 0;
2502 		}
2503 
2504 		/* Disable transmitter and interrupt.  */
2505 		if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
2506 			continue;
2507 
2508 		reg32 |= 0x3000;
2509 		tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2510 
2511 		/* Set full-duplex, 1000 mbps.  */
2512 		tg3_writephy(tp, MII_BMCR,
2513 			     BMCR_FULLDPLX | BMCR_SPEED1000);
2514 
2515 		/* Set to master mode.  */
2516 		if (tg3_readphy(tp, MII_CTRL1000, &phy9_orig))
2517 			continue;
2518 
2519 		tg3_writephy(tp, MII_CTRL1000,
2520 			     CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
2521 
2522 		err = tg3_phy_toggle_auxctl_smdsp(tp, true);
2523 		if (err)
2524 			return err;
2525 
2526 		/* Block the PHY control access.  */
2527 		tg3_phydsp_write(tp, 0x8005, 0x0800);
2528 
2529 		err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
2530 		if (!err)
2531 			break;
2532 	} while (--retries);
2533 
2534 	err = tg3_phy_reset_chanpat(tp);
2535 	if (err)
2536 		return err;
2537 
2538 	tg3_phydsp_write(tp, 0x8005, 0x0000);
2539 
2540 	tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
2541 	tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000);
2542 
2543 	tg3_phy_toggle_auxctl_smdsp(tp, false);
2544 
2545 	tg3_writephy(tp, MII_CTRL1000, phy9_orig);
2546 
2547 	if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
2548 		reg32 &= ~0x3000;
2549 		tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2550 	} else if (!err)
2551 		err = -EBUSY;
2552 
2553 	return err;
2554 }
2555 
2556 static void tg3_carrier_off(struct tg3 *tp)
2557 {
2558 	netif_carrier_off(tp->dev);
2559 	tp->link_up = false;
2560 }
2561 
2562 static void tg3_warn_mgmt_link_flap(struct tg3 *tp)
2563 {
2564 	if (tg3_flag(tp, ENABLE_ASF))
2565 		netdev_warn(tp->dev,
2566 			    "Management side-band traffic will be interrupted during phy settings change\n");
2567 }
2568 
2569 /* This will reset the tigon3 PHY if there is no valid
2570  * link unless the FORCE argument is non-zero.
2571  */
2572 static int tg3_phy_reset(struct tg3 *tp)
2573 {
2574 	u32 val, cpmuctrl;
2575 	int err;
2576 
2577 	if (tg3_asic_rev(tp) == ASIC_REV_5906) {
2578 		val = tr32(GRC_MISC_CFG);
2579 		tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
2580 		udelay(40);
2581 	}
2582 	err  = tg3_readphy(tp, MII_BMSR, &val);
2583 	err |= tg3_readphy(tp, MII_BMSR, &val);
2584 	if (err != 0)
2585 		return -EBUSY;
2586 
2587 	if (netif_running(tp->dev) && tp->link_up) {
2588 		netif_carrier_off(tp->dev);
2589 		tg3_link_report(tp);
2590 	}
2591 
2592 	if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
2593 	    tg3_asic_rev(tp) == ASIC_REV_5704 ||
2594 	    tg3_asic_rev(tp) == ASIC_REV_5705) {
2595 		err = tg3_phy_reset_5703_4_5(tp);
2596 		if (err)
2597 			return err;
2598 		goto out;
2599 	}
2600 
2601 	cpmuctrl = 0;
2602 	if (tg3_asic_rev(tp) == ASIC_REV_5784 &&
2603 	    tg3_chip_rev(tp) != CHIPREV_5784_AX) {
2604 		cpmuctrl = tr32(TG3_CPMU_CTRL);
2605 		if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
2606 			tw32(TG3_CPMU_CTRL,
2607 			     cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
2608 	}
2609 
2610 	err = tg3_bmcr_reset(tp);
2611 	if (err)
2612 		return err;
2613 
2614 	if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
2615 		val = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
2616 		tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, val);
2617 
2618 		tw32(TG3_CPMU_CTRL, cpmuctrl);
2619 	}
2620 
2621 	if (tg3_chip_rev(tp) == CHIPREV_5784_AX ||
2622 	    tg3_chip_rev(tp) == CHIPREV_5761_AX) {
2623 		val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2624 		if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
2625 		    CPMU_LSPD_1000MB_MACCLK_12_5) {
2626 			val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2627 			udelay(40);
2628 			tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2629 		}
2630 	}
2631 
2632 	if (tg3_flag(tp, 5717_PLUS) &&
2633 	    (tp->phy_flags & TG3_PHYFLG_MII_SERDES))
2634 		return 0;
2635 
2636 	tg3_phy_apply_otp(tp);
2637 
2638 	if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
2639 		tg3_phy_toggle_apd(tp, true);
2640 	else
2641 		tg3_phy_toggle_apd(tp, false);
2642 
2643 out:
2644 	if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) &&
2645 	    !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2646 		tg3_phydsp_write(tp, 0x201f, 0x2aaa);
2647 		tg3_phydsp_write(tp, 0x000a, 0x0323);
2648 		tg3_phy_toggle_auxctl_smdsp(tp, false);
2649 	}
2650 
2651 	if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) {
2652 		tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2653 		tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2654 	}
2655 
2656 	if (tp->phy_flags & TG3_PHYFLG_BER_BUG) {
2657 		if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2658 			tg3_phydsp_write(tp, 0x000a, 0x310b);
2659 			tg3_phydsp_write(tp, 0x201f, 0x9506);
2660 			tg3_phydsp_write(tp, 0x401f, 0x14e2);
2661 			tg3_phy_toggle_auxctl_smdsp(tp, false);
2662 		}
2663 	} else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) {
2664 		if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2665 			tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
2666 			if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) {
2667 				tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
2668 				tg3_writephy(tp, MII_TG3_TEST1,
2669 					     MII_TG3_TEST1_TRIM_EN | 0x4);
2670 			} else
2671 				tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
2672 
2673 			tg3_phy_toggle_auxctl_smdsp(tp, false);
2674 		}
2675 	}
2676 
2677 	/* Set Extended packet length bit (bit 14) on all chips that */
2678 	/* support jumbo frames */
2679 	if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2680 		/* Cannot do read-modify-write on 5401 */
2681 		tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
2682 	} else if (tg3_flag(tp, JUMBO_CAPABLE)) {
2683 		/* Set bit 14 with read-modify-write to preserve other bits */
2684 		err = tg3_phy_auxctl_read(tp,
2685 					  MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2686 		if (!err)
2687 			tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2688 					   val | MII_TG3_AUXCTL_ACTL_EXTPKTLEN);
2689 	}
2690 
2691 	/* Set phy register 0x10 bit 0 to high fifo elasticity to support
2692 	 * jumbo frames transmission.
2693 	 */
2694 	if (tg3_flag(tp, JUMBO_CAPABLE)) {
2695 		if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &val))
2696 			tg3_writephy(tp, MII_TG3_EXT_CTRL,
2697 				     val | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
2698 	}
2699 
2700 	if (tg3_asic_rev(tp) == ASIC_REV_5906) {
2701 		/* adjust output voltage */
2702 		tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
2703 	}
2704 
2705 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5762_A0)
2706 		tg3_phydsp_write(tp, 0xffb, 0x4000);
2707 
2708 	tg3_phy_toggle_automdix(tp, true);
2709 	tg3_phy_set_wirespeed(tp);
2710 	return 0;
2711 }
2712 
2713 #define TG3_GPIO_MSG_DRVR_PRES		 0x00000001
2714 #define TG3_GPIO_MSG_NEED_VAUX		 0x00000002
2715 #define TG3_GPIO_MSG_MASK		 (TG3_GPIO_MSG_DRVR_PRES | \
2716 					  TG3_GPIO_MSG_NEED_VAUX)
2717 #define TG3_GPIO_MSG_ALL_DRVR_PRES_MASK \
2718 	((TG3_GPIO_MSG_DRVR_PRES << 0) | \
2719 	 (TG3_GPIO_MSG_DRVR_PRES << 4) | \
2720 	 (TG3_GPIO_MSG_DRVR_PRES << 8) | \
2721 	 (TG3_GPIO_MSG_DRVR_PRES << 12))
2722 
2723 #define TG3_GPIO_MSG_ALL_NEED_VAUX_MASK \
2724 	((TG3_GPIO_MSG_NEED_VAUX << 0) | \
2725 	 (TG3_GPIO_MSG_NEED_VAUX << 4) | \
2726 	 (TG3_GPIO_MSG_NEED_VAUX << 8) | \
2727 	 (TG3_GPIO_MSG_NEED_VAUX << 12))
2728 
2729 static inline u32 tg3_set_function_status(struct tg3 *tp, u32 newstat)
2730 {
2731 	u32 status, shift;
2732 
2733 	if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2734 	    tg3_asic_rev(tp) == ASIC_REV_5719)
2735 		status = tg3_ape_read32(tp, TG3_APE_GPIO_MSG);
2736 	else
2737 		status = tr32(TG3_CPMU_DRV_STATUS);
2738 
2739 	shift = TG3_APE_GPIO_MSG_SHIFT + 4 * tp->pci_fn;
2740 	status &= ~(TG3_GPIO_MSG_MASK << shift);
2741 	status |= (newstat << shift);
2742 
2743 	if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2744 	    tg3_asic_rev(tp) == ASIC_REV_5719)
2745 		tg3_ape_write32(tp, TG3_APE_GPIO_MSG, status);
2746 	else
2747 		tw32(TG3_CPMU_DRV_STATUS, status);
2748 
2749 	return status >> TG3_APE_GPIO_MSG_SHIFT;
2750 }
2751 
2752 static inline int tg3_pwrsrc_switch_to_vmain(struct tg3 *tp)
2753 {
2754 	if (!tg3_flag(tp, IS_NIC))
2755 		return 0;
2756 
2757 	if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2758 	    tg3_asic_rev(tp) == ASIC_REV_5719 ||
2759 	    tg3_asic_rev(tp) == ASIC_REV_5720) {
2760 		if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2761 			return -EIO;
2762 
2763 		tg3_set_function_status(tp, TG3_GPIO_MSG_DRVR_PRES);
2764 
2765 		tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2766 			    TG3_GRC_LCLCTL_PWRSW_DELAY);
2767 
2768 		tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2769 	} else {
2770 		tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2771 			    TG3_GRC_LCLCTL_PWRSW_DELAY);
2772 	}
2773 
2774 	return 0;
2775 }
2776 
2777 static void tg3_pwrsrc_die_with_vmain(struct tg3 *tp)
2778 {
2779 	u32 grc_local_ctrl;
2780 
2781 	if (!tg3_flag(tp, IS_NIC) ||
2782 	    tg3_asic_rev(tp) == ASIC_REV_5700 ||
2783 	    tg3_asic_rev(tp) == ASIC_REV_5701)
2784 		return;
2785 
2786 	grc_local_ctrl = tp->grc_local_ctrl | GRC_LCLCTRL_GPIO_OE1;
2787 
2788 	tw32_wait_f(GRC_LOCAL_CTRL,
2789 		    grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2790 		    TG3_GRC_LCLCTL_PWRSW_DELAY);
2791 
2792 	tw32_wait_f(GRC_LOCAL_CTRL,
2793 		    grc_local_ctrl,
2794 		    TG3_GRC_LCLCTL_PWRSW_DELAY);
2795 
2796 	tw32_wait_f(GRC_LOCAL_CTRL,
2797 		    grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2798 		    TG3_GRC_LCLCTL_PWRSW_DELAY);
2799 }
2800 
2801 static void tg3_pwrsrc_switch_to_vaux(struct tg3 *tp)
2802 {
2803 	if (!tg3_flag(tp, IS_NIC))
2804 		return;
2805 
2806 	if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
2807 	    tg3_asic_rev(tp) == ASIC_REV_5701) {
2808 		tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2809 			    (GRC_LCLCTRL_GPIO_OE0 |
2810 			     GRC_LCLCTRL_GPIO_OE1 |
2811 			     GRC_LCLCTRL_GPIO_OE2 |
2812 			     GRC_LCLCTRL_GPIO_OUTPUT0 |
2813 			     GRC_LCLCTRL_GPIO_OUTPUT1),
2814 			    TG3_GRC_LCLCTL_PWRSW_DELAY);
2815 	} else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
2816 		   tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
2817 		/* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2818 		u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
2819 				     GRC_LCLCTRL_GPIO_OE1 |
2820 				     GRC_LCLCTRL_GPIO_OE2 |
2821 				     GRC_LCLCTRL_GPIO_OUTPUT0 |
2822 				     GRC_LCLCTRL_GPIO_OUTPUT1 |
2823 				     tp->grc_local_ctrl;
2824 		tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2825 			    TG3_GRC_LCLCTL_PWRSW_DELAY);
2826 
2827 		grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
2828 		tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2829 			    TG3_GRC_LCLCTL_PWRSW_DELAY);
2830 
2831 		grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
2832 		tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2833 			    TG3_GRC_LCLCTL_PWRSW_DELAY);
2834 	} else {
2835 		u32 no_gpio2;
2836 		u32 grc_local_ctrl = 0;
2837 
2838 		/* Workaround to prevent overdrawing Amps. */
2839 		if (tg3_asic_rev(tp) == ASIC_REV_5714) {
2840 			grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
2841 			tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2842 				    grc_local_ctrl,
2843 				    TG3_GRC_LCLCTL_PWRSW_DELAY);
2844 		}
2845 
2846 		/* On 5753 and variants, GPIO2 cannot be used. */
2847 		no_gpio2 = tp->nic_sram_data_cfg &
2848 			   NIC_SRAM_DATA_CFG_NO_GPIO2;
2849 
2850 		grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
2851 				  GRC_LCLCTRL_GPIO_OE1 |
2852 				  GRC_LCLCTRL_GPIO_OE2 |
2853 				  GRC_LCLCTRL_GPIO_OUTPUT1 |
2854 				  GRC_LCLCTRL_GPIO_OUTPUT2;
2855 		if (no_gpio2) {
2856 			grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
2857 					    GRC_LCLCTRL_GPIO_OUTPUT2);
2858 		}
2859 		tw32_wait_f(GRC_LOCAL_CTRL,
2860 			    tp->grc_local_ctrl | grc_local_ctrl,
2861 			    TG3_GRC_LCLCTL_PWRSW_DELAY);
2862 
2863 		grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
2864 
2865 		tw32_wait_f(GRC_LOCAL_CTRL,
2866 			    tp->grc_local_ctrl | grc_local_ctrl,
2867 			    TG3_GRC_LCLCTL_PWRSW_DELAY);
2868 
2869 		if (!no_gpio2) {
2870 			grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
2871 			tw32_wait_f(GRC_LOCAL_CTRL,
2872 				    tp->grc_local_ctrl | grc_local_ctrl,
2873 				    TG3_GRC_LCLCTL_PWRSW_DELAY);
2874 		}
2875 	}
2876 }
2877 
2878 static void tg3_frob_aux_power_5717(struct tg3 *tp, bool wol_enable)
2879 {
2880 	u32 msg = 0;
2881 
2882 	/* Serialize power state transitions */
2883 	if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2884 		return;
2885 
2886 	if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE) || wol_enable)
2887 		msg = TG3_GPIO_MSG_NEED_VAUX;
2888 
2889 	msg = tg3_set_function_status(tp, msg);
2890 
2891 	if (msg & TG3_GPIO_MSG_ALL_DRVR_PRES_MASK)
2892 		goto done;
2893 
2894 	if (msg & TG3_GPIO_MSG_ALL_NEED_VAUX_MASK)
2895 		tg3_pwrsrc_switch_to_vaux(tp);
2896 	else
2897 		tg3_pwrsrc_die_with_vmain(tp);
2898 
2899 done:
2900 	tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2901 }
2902 
2903 static void tg3_frob_aux_power(struct tg3 *tp, bool include_wol)
2904 {
2905 	bool need_vaux = false;
2906 
2907 	/* The GPIOs do something completely different on 57765. */
2908 	if (!tg3_flag(tp, IS_NIC) || tg3_flag(tp, 57765_CLASS))
2909 		return;
2910 
2911 	if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2912 	    tg3_asic_rev(tp) == ASIC_REV_5719 ||
2913 	    tg3_asic_rev(tp) == ASIC_REV_5720) {
2914 		tg3_frob_aux_power_5717(tp, include_wol ?
2915 					tg3_flag(tp, WOL_ENABLE) != 0 : 0);
2916 		return;
2917 	}
2918 
2919 	if (tp->pdev_peer && tp->pdev_peer != tp->pdev) {
2920 		struct net_device *dev_peer;
2921 
2922 		dev_peer = pci_get_drvdata(tp->pdev_peer);
2923 
2924 		/* remove_one() may have been run on the peer. */
2925 		if (dev_peer) {
2926 			struct tg3 *tp_peer = netdev_priv(dev_peer);
2927 
2928 			if (tg3_flag(tp_peer, INIT_COMPLETE))
2929 				return;
2930 
2931 			if ((include_wol && tg3_flag(tp_peer, WOL_ENABLE)) ||
2932 			    tg3_flag(tp_peer, ENABLE_ASF))
2933 				need_vaux = true;
2934 		}
2935 	}
2936 
2937 	if ((include_wol && tg3_flag(tp, WOL_ENABLE)) ||
2938 	    tg3_flag(tp, ENABLE_ASF))
2939 		need_vaux = true;
2940 
2941 	if (need_vaux)
2942 		tg3_pwrsrc_switch_to_vaux(tp);
2943 	else
2944 		tg3_pwrsrc_die_with_vmain(tp);
2945 }
2946 
2947 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
2948 {
2949 	if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
2950 		return 1;
2951 	else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) {
2952 		if (speed != SPEED_10)
2953 			return 1;
2954 	} else if (speed == SPEED_10)
2955 		return 1;
2956 
2957 	return 0;
2958 }
2959 
2960 static bool tg3_phy_power_bug(struct tg3 *tp)
2961 {
2962 	switch (tg3_asic_rev(tp)) {
2963 	case ASIC_REV_5700:
2964 	case ASIC_REV_5704:
2965 		return true;
2966 	case ASIC_REV_5780:
2967 		if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
2968 			return true;
2969 		return false;
2970 	case ASIC_REV_5717:
2971 		if (!tp->pci_fn)
2972 			return true;
2973 		return false;
2974 	case ASIC_REV_5719:
2975 	case ASIC_REV_5720:
2976 		if ((tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
2977 		    !tp->pci_fn)
2978 			return true;
2979 		return false;
2980 	}
2981 
2982 	return false;
2983 }
2984 
2985 static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
2986 {
2987 	u32 val;
2988 
2989 	if (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)
2990 		return;
2991 
2992 	if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
2993 		if (tg3_asic_rev(tp) == ASIC_REV_5704) {
2994 			u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
2995 			u32 serdes_cfg = tr32(MAC_SERDES_CFG);
2996 
2997 			sg_dig_ctrl |=
2998 				SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
2999 			tw32(SG_DIG_CTRL, sg_dig_ctrl);
3000 			tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
3001 		}
3002 		return;
3003 	}
3004 
3005 	if (tg3_asic_rev(tp) == ASIC_REV_5906) {
3006 		tg3_bmcr_reset(tp);
3007 		val = tr32(GRC_MISC_CFG);
3008 		tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
3009 		udelay(40);
3010 		return;
3011 	} else if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
3012 		u32 phytest;
3013 		if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
3014 			u32 phy;
3015 
3016 			tg3_writephy(tp, MII_ADVERTISE, 0);
3017 			tg3_writephy(tp, MII_BMCR,
3018 				     BMCR_ANENABLE | BMCR_ANRESTART);
3019 
3020 			tg3_writephy(tp, MII_TG3_FET_TEST,
3021 				     phytest | MII_TG3_FET_SHADOW_EN);
3022 			if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) {
3023 				phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD;
3024 				tg3_writephy(tp,
3025 					     MII_TG3_FET_SHDW_AUXMODE4,
3026 					     phy);
3027 			}
3028 			tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
3029 		}
3030 		return;
3031 	} else if (do_low_power) {
3032 		tg3_writephy(tp, MII_TG3_EXT_CTRL,
3033 			     MII_TG3_EXT_CTRL_FORCE_LED_OFF);
3034 
3035 		val = MII_TG3_AUXCTL_PCTL_100TX_LPWR |
3036 		      MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
3037 		      MII_TG3_AUXCTL_PCTL_VREG_11V;
3038 		tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, val);
3039 	}
3040 
3041 	/* The PHY should not be powered down on some chips because
3042 	 * of bugs.
3043 	 */
3044 	if (tg3_phy_power_bug(tp))
3045 		return;
3046 
3047 	if (tg3_chip_rev(tp) == CHIPREV_5784_AX ||
3048 	    tg3_chip_rev(tp) == CHIPREV_5761_AX) {
3049 		val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
3050 		val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
3051 		val |= CPMU_LSPD_1000MB_MACCLK_12_5;
3052 		tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
3053 	}
3054 
3055 	tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
3056 }
3057 
3058 /* tp->lock is held. */
3059 static int tg3_nvram_lock(struct tg3 *tp)
3060 {
3061 	if (tg3_flag(tp, NVRAM)) {
3062 		int i;
3063 
3064 		if (tp->nvram_lock_cnt == 0) {
3065 			tw32(NVRAM_SWARB, SWARB_REQ_SET1);
3066 			for (i = 0; i < 8000; i++) {
3067 				if (tr32(NVRAM_SWARB) & SWARB_GNT1)
3068 					break;
3069 				udelay(20);
3070 			}
3071 			if (i == 8000) {
3072 				tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
3073 				return -ENODEV;
3074 			}
3075 		}
3076 		tp->nvram_lock_cnt++;
3077 	}
3078 	return 0;
3079 }
3080 
3081 /* tp->lock is held. */
3082 static void tg3_nvram_unlock(struct tg3 *tp)
3083 {
3084 	if (tg3_flag(tp, NVRAM)) {
3085 		if (tp->nvram_lock_cnt > 0)
3086 			tp->nvram_lock_cnt--;
3087 		if (tp->nvram_lock_cnt == 0)
3088 			tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
3089 	}
3090 }
3091 
3092 /* tp->lock is held. */
3093 static void tg3_enable_nvram_access(struct tg3 *tp)
3094 {
3095 	if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
3096 		u32 nvaccess = tr32(NVRAM_ACCESS);
3097 
3098 		tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
3099 	}
3100 }
3101 
3102 /* tp->lock is held. */
3103 static void tg3_disable_nvram_access(struct tg3 *tp)
3104 {
3105 	if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
3106 		u32 nvaccess = tr32(NVRAM_ACCESS);
3107 
3108 		tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
3109 	}
3110 }
3111 
3112 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
3113 					u32 offset, u32 *val)
3114 {
3115 	u32 tmp;
3116 	int i;
3117 
3118 	if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
3119 		return -EINVAL;
3120 
3121 	tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
3122 					EEPROM_ADDR_DEVID_MASK |
3123 					EEPROM_ADDR_READ);
3124 	tw32(GRC_EEPROM_ADDR,
3125 	     tmp |
3126 	     (0 << EEPROM_ADDR_DEVID_SHIFT) |
3127 	     ((offset << EEPROM_ADDR_ADDR_SHIFT) &
3128 	      EEPROM_ADDR_ADDR_MASK) |
3129 	     EEPROM_ADDR_READ | EEPROM_ADDR_START);
3130 
3131 	for (i = 0; i < 1000; i++) {
3132 		tmp = tr32(GRC_EEPROM_ADDR);
3133 
3134 		if (tmp & EEPROM_ADDR_COMPLETE)
3135 			break;
3136 		msleep(1);
3137 	}
3138 	if (!(tmp & EEPROM_ADDR_COMPLETE))
3139 		return -EBUSY;
3140 
3141 	tmp = tr32(GRC_EEPROM_DATA);
3142 
3143 	/*
3144 	 * The data will always be opposite the native endian
3145 	 * format.  Perform a blind byteswap to compensate.
3146 	 */
3147 	*val = swab32(tmp);
3148 
3149 	return 0;
3150 }
3151 
3152 #define NVRAM_CMD_TIMEOUT 10000
3153 
3154 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
3155 {
3156 	int i;
3157 
3158 	tw32(NVRAM_CMD, nvram_cmd);
3159 	for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
3160 		udelay(10);
3161 		if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
3162 			udelay(10);
3163 			break;
3164 		}
3165 	}
3166 
3167 	if (i == NVRAM_CMD_TIMEOUT)
3168 		return -EBUSY;
3169 
3170 	return 0;
3171 }
3172 
3173 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
3174 {
3175 	if (tg3_flag(tp, NVRAM) &&
3176 	    tg3_flag(tp, NVRAM_BUFFERED) &&
3177 	    tg3_flag(tp, FLASH) &&
3178 	    !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
3179 	    (tp->nvram_jedecnum == JEDEC_ATMEL))
3180 
3181 		addr = ((addr / tp->nvram_pagesize) <<
3182 			ATMEL_AT45DB0X1B_PAGE_POS) +
3183 		       (addr % tp->nvram_pagesize);
3184 
3185 	return addr;
3186 }
3187 
3188 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
3189 {
3190 	if (tg3_flag(tp, NVRAM) &&
3191 	    tg3_flag(tp, NVRAM_BUFFERED) &&
3192 	    tg3_flag(tp, FLASH) &&
3193 	    !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
3194 	    (tp->nvram_jedecnum == JEDEC_ATMEL))
3195 
3196 		addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
3197 			tp->nvram_pagesize) +
3198 		       (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
3199 
3200 	return addr;
3201 }
3202 
3203 /* NOTE: Data read in from NVRAM is byteswapped according to
3204  * the byteswapping settings for all other register accesses.
3205  * tg3 devices are BE devices, so on a BE machine, the data
3206  * returned will be exactly as it is seen in NVRAM.  On a LE
3207  * machine, the 32-bit value will be byteswapped.
3208  */
3209 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
3210 {
3211 	int ret;
3212 
3213 	if (!tg3_flag(tp, NVRAM))
3214 		return tg3_nvram_read_using_eeprom(tp, offset, val);
3215 
3216 	offset = tg3_nvram_phys_addr(tp, offset);
3217 
3218 	if (offset > NVRAM_ADDR_MSK)
3219 		return -EINVAL;
3220 
3221 	ret = tg3_nvram_lock(tp);
3222 	if (ret)
3223 		return ret;
3224 
3225 	tg3_enable_nvram_access(tp);
3226 
3227 	tw32(NVRAM_ADDR, offset);
3228 	ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
3229 		NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
3230 
3231 	if (ret == 0)
3232 		*val = tr32(NVRAM_RDDATA);
3233 
3234 	tg3_disable_nvram_access(tp);
3235 
3236 	tg3_nvram_unlock(tp);
3237 
3238 	return ret;
3239 }
3240 
3241 /* Ensures NVRAM data is in bytestream format. */
3242 static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val)
3243 {
3244 	u32 v;
3245 	int res = tg3_nvram_read(tp, offset, &v);
3246 	if (!res)
3247 		*val = cpu_to_be32(v);
3248 	return res;
3249 }
3250 
3251 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
3252 				    u32 offset, u32 len, u8 *buf)
3253 {
3254 	int i, j, rc = 0;
3255 	u32 val;
3256 
3257 	for (i = 0; i < len; i += 4) {
3258 		u32 addr;
3259 		__be32 data;
3260 
3261 		addr = offset + i;
3262 
3263 		memcpy(&data, buf + i, 4);
3264 
3265 		/*
3266 		 * The SEEPROM interface expects the data to always be opposite
3267 		 * the native endian format.  We accomplish this by reversing
3268 		 * all the operations that would have been performed on the
3269 		 * data from a call to tg3_nvram_read_be32().
3270 		 */
3271 		tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data)));
3272 
3273 		val = tr32(GRC_EEPROM_ADDR);
3274 		tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
3275 
3276 		val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
3277 			EEPROM_ADDR_READ);
3278 		tw32(GRC_EEPROM_ADDR, val |
3279 			(0 << EEPROM_ADDR_DEVID_SHIFT) |
3280 			(addr & EEPROM_ADDR_ADDR_MASK) |
3281 			EEPROM_ADDR_START |
3282 			EEPROM_ADDR_WRITE);
3283 
3284 		for (j = 0; j < 1000; j++) {
3285 			val = tr32(GRC_EEPROM_ADDR);
3286 
3287 			if (val & EEPROM_ADDR_COMPLETE)
3288 				break;
3289 			msleep(1);
3290 		}
3291 		if (!(val & EEPROM_ADDR_COMPLETE)) {
3292 			rc = -EBUSY;
3293 			break;
3294 		}
3295 	}
3296 
3297 	return rc;
3298 }
3299 
3300 /* offset and length are dword aligned */
3301 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
3302 		u8 *buf)
3303 {
3304 	int ret = 0;
3305 	u32 pagesize = tp->nvram_pagesize;
3306 	u32 pagemask = pagesize - 1;
3307 	u32 nvram_cmd;
3308 	u8 *tmp;
3309 
3310 	tmp = kmalloc(pagesize, GFP_KERNEL);
3311 	if (tmp == NULL)
3312 		return -ENOMEM;
3313 
3314 	while (len) {
3315 		int j;
3316 		u32 phy_addr, page_off, size;
3317 
3318 		phy_addr = offset & ~pagemask;
3319 
3320 		for (j = 0; j < pagesize; j += 4) {
3321 			ret = tg3_nvram_read_be32(tp, phy_addr + j,
3322 						  (__be32 *) (tmp + j));
3323 			if (ret)
3324 				break;
3325 		}
3326 		if (ret)
3327 			break;
3328 
3329 		page_off = offset & pagemask;
3330 		size = pagesize;
3331 		if (len < size)
3332 			size = len;
3333 
3334 		len -= size;
3335 
3336 		memcpy(tmp + page_off, buf, size);
3337 
3338 		offset = offset + (pagesize - page_off);
3339 
3340 		tg3_enable_nvram_access(tp);
3341 
3342 		/*
3343 		 * Before we can erase the flash page, we need
3344 		 * to issue a special "write enable" command.
3345 		 */
3346 		nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3347 
3348 		if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3349 			break;
3350 
3351 		/* Erase the target page */
3352 		tw32(NVRAM_ADDR, phy_addr);
3353 
3354 		nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
3355 			NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
3356 
3357 		if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3358 			break;
3359 
3360 		/* Issue another write enable to start the write. */
3361 		nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3362 
3363 		if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3364 			break;
3365 
3366 		for (j = 0; j < pagesize; j += 4) {
3367 			__be32 data;
3368 
3369 			data = *((__be32 *) (tmp + j));
3370 
3371 			tw32(NVRAM_WRDATA, be32_to_cpu(data));
3372 
3373 			tw32(NVRAM_ADDR, phy_addr + j);
3374 
3375 			nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
3376 				NVRAM_CMD_WR;
3377 
3378 			if (j == 0)
3379 				nvram_cmd |= NVRAM_CMD_FIRST;
3380 			else if (j == (pagesize - 4))
3381 				nvram_cmd |= NVRAM_CMD_LAST;
3382 
3383 			ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3384 			if (ret)
3385 				break;
3386 		}
3387 		if (ret)
3388 			break;
3389 	}
3390 
3391 	nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3392 	tg3_nvram_exec_cmd(tp, nvram_cmd);
3393 
3394 	kfree(tmp);
3395 
3396 	return ret;
3397 }
3398 
3399 /* offset and length are dword aligned */
3400 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
3401 		u8 *buf)
3402 {
3403 	int i, ret = 0;
3404 
3405 	for (i = 0; i < len; i += 4, offset += 4) {
3406 		u32 page_off, phy_addr, nvram_cmd;
3407 		__be32 data;
3408 
3409 		memcpy(&data, buf + i, 4);
3410 		tw32(NVRAM_WRDATA, be32_to_cpu(data));
3411 
3412 		page_off = offset % tp->nvram_pagesize;
3413 
3414 		phy_addr = tg3_nvram_phys_addr(tp, offset);
3415 
3416 		nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
3417 
3418 		if (page_off == 0 || i == 0)
3419 			nvram_cmd |= NVRAM_CMD_FIRST;
3420 		if (page_off == (tp->nvram_pagesize - 4))
3421 			nvram_cmd |= NVRAM_CMD_LAST;
3422 
3423 		if (i == (len - 4))
3424 			nvram_cmd |= NVRAM_CMD_LAST;
3425 
3426 		if ((nvram_cmd & NVRAM_CMD_FIRST) ||
3427 		    !tg3_flag(tp, FLASH) ||
3428 		    !tg3_flag(tp, 57765_PLUS))
3429 			tw32(NVRAM_ADDR, phy_addr);
3430 
3431 		if (tg3_asic_rev(tp) != ASIC_REV_5752 &&
3432 		    !tg3_flag(tp, 5755_PLUS) &&
3433 		    (tp->nvram_jedecnum == JEDEC_ST) &&
3434 		    (nvram_cmd & NVRAM_CMD_FIRST)) {
3435 			u32 cmd;
3436 
3437 			cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3438 			ret = tg3_nvram_exec_cmd(tp, cmd);
3439 			if (ret)
3440 				break;
3441 		}
3442 		if (!tg3_flag(tp, FLASH)) {
3443 			/* We always do complete word writes to eeprom. */
3444 			nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
3445 		}
3446 
3447 		ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3448 		if (ret)
3449 			break;
3450 	}
3451 	return ret;
3452 }
3453 
3454 /* offset and length are dword aligned */
3455 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
3456 {
3457 	int ret;
3458 
3459 	if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3460 		tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
3461 		       ~GRC_LCLCTRL_GPIO_OUTPUT1);
3462 		udelay(40);
3463 	}
3464 
3465 	if (!tg3_flag(tp, NVRAM)) {
3466 		ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
3467 	} else {
3468 		u32 grc_mode;
3469 
3470 		ret = tg3_nvram_lock(tp);
3471 		if (ret)
3472 			return ret;
3473 
3474 		tg3_enable_nvram_access(tp);
3475 		if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM))
3476 			tw32(NVRAM_WRITE1, 0x406);
3477 
3478 		grc_mode = tr32(GRC_MODE);
3479 		tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
3480 
3481 		if (tg3_flag(tp, NVRAM_BUFFERED) || !tg3_flag(tp, FLASH)) {
3482 			ret = tg3_nvram_write_block_buffered(tp, offset, len,
3483 				buf);
3484 		} else {
3485 			ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
3486 				buf);
3487 		}
3488 
3489 		grc_mode = tr32(GRC_MODE);
3490 		tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
3491 
3492 		tg3_disable_nvram_access(tp);
3493 		tg3_nvram_unlock(tp);
3494 	}
3495 
3496 	if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3497 		tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
3498 		udelay(40);
3499 	}
3500 
3501 	return ret;
3502 }
3503 
3504 #define RX_CPU_SCRATCH_BASE	0x30000
3505 #define RX_CPU_SCRATCH_SIZE	0x04000
3506 #define TX_CPU_SCRATCH_BASE	0x34000
3507 #define TX_CPU_SCRATCH_SIZE	0x04000
3508 
3509 /* tp->lock is held. */
3510 static int tg3_pause_cpu(struct tg3 *tp, u32 cpu_base)
3511 {
3512 	int i;
3513 	const int iters = 10000;
3514 
3515 	for (i = 0; i < iters; i++) {
3516 		tw32(cpu_base + CPU_STATE, 0xffffffff);
3517 		tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
3518 		if (tr32(cpu_base + CPU_MODE) & CPU_MODE_HALT)
3519 			break;
3520 	}
3521 
3522 	return (i == iters) ? -EBUSY : 0;
3523 }
3524 
3525 /* tp->lock is held. */
3526 static int tg3_rxcpu_pause(struct tg3 *tp)
3527 {
3528 	int rc = tg3_pause_cpu(tp, RX_CPU_BASE);
3529 
3530 	tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3531 	tw32_f(RX_CPU_BASE + CPU_MODE,  CPU_MODE_HALT);
3532 	udelay(10);
3533 
3534 	return rc;
3535 }
3536 
3537 /* tp->lock is held. */
3538 static int tg3_txcpu_pause(struct tg3 *tp)
3539 {
3540 	return tg3_pause_cpu(tp, TX_CPU_BASE);
3541 }
3542 
3543 /* tp->lock is held. */
3544 static void tg3_resume_cpu(struct tg3 *tp, u32 cpu_base)
3545 {
3546 	tw32(cpu_base + CPU_STATE, 0xffffffff);
3547 	tw32_f(cpu_base + CPU_MODE,  0x00000000);
3548 }
3549 
3550 /* tp->lock is held. */
3551 static void tg3_rxcpu_resume(struct tg3 *tp)
3552 {
3553 	tg3_resume_cpu(tp, RX_CPU_BASE);
3554 }
3555 
3556 /* tp->lock is held. */
3557 static int tg3_halt_cpu(struct tg3 *tp, u32 cpu_base)
3558 {
3559 	int rc;
3560 
3561 	BUG_ON(cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS));
3562 
3563 	if (tg3_asic_rev(tp) == ASIC_REV_5906) {
3564 		u32 val = tr32(GRC_VCPU_EXT_CTRL);
3565 
3566 		tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
3567 		return 0;
3568 	}
3569 	if (cpu_base == RX_CPU_BASE) {
3570 		rc = tg3_rxcpu_pause(tp);
3571 	} else {
3572 		/*
3573 		 * There is only an Rx CPU for the 5750 derivative in the
3574 		 * BCM4785.
3575 		 */
3576 		if (tg3_flag(tp, IS_SSB_CORE))
3577 			return 0;
3578 
3579 		rc = tg3_txcpu_pause(tp);
3580 	}
3581 
3582 	if (rc) {
3583 		netdev_err(tp->dev, "%s timed out, %s CPU\n",
3584 			   __func__, cpu_base == RX_CPU_BASE ? "RX" : "TX");
3585 		return -ENODEV;
3586 	}
3587 
3588 	/* Clear firmware's nvram arbitration. */
3589 	if (tg3_flag(tp, NVRAM))
3590 		tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
3591 	return 0;
3592 }
3593 
3594 static int tg3_fw_data_len(struct tg3 *tp,
3595 			   const struct tg3_firmware_hdr *fw_hdr)
3596 {
3597 	int fw_len;
3598 
3599 	/* Non fragmented firmware have one firmware header followed by a
3600 	 * contiguous chunk of data to be written. The length field in that
3601 	 * header is not the length of data to be written but the complete
3602 	 * length of the bss. The data length is determined based on
3603 	 * tp->fw->size minus headers.
3604 	 *
3605 	 * Fragmented firmware have a main header followed by multiple
3606 	 * fragments. Each fragment is identical to non fragmented firmware
3607 	 * with a firmware header followed by a contiguous chunk of data. In
3608 	 * the main header, the length field is unused and set to 0xffffffff.
3609 	 * In each fragment header the length is the entire size of that
3610 	 * fragment i.e. fragment data + header length. Data length is
3611 	 * therefore length field in the header minus TG3_FW_HDR_LEN.
3612 	 */
3613 	if (tp->fw_len == 0xffffffff)
3614 		fw_len = be32_to_cpu(fw_hdr->len);
3615 	else
3616 		fw_len = tp->fw->size;
3617 
3618 	return (fw_len - TG3_FW_HDR_LEN) / sizeof(u32);
3619 }
3620 
3621 /* tp->lock is held. */
3622 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base,
3623 				 u32 cpu_scratch_base, int cpu_scratch_size,
3624 				 const struct tg3_firmware_hdr *fw_hdr)
3625 {
3626 	int err, i;
3627 	void (*write_op)(struct tg3 *, u32, u32);
3628 	int total_len = tp->fw->size;
3629 
3630 	if (cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)) {
3631 		netdev_err(tp->dev,
3632 			   "%s: Trying to load TX cpu firmware which is 5705\n",
3633 			   __func__);
3634 		return -EINVAL;
3635 	}
3636 
3637 	if (tg3_flag(tp, 5705_PLUS) && tg3_asic_rev(tp) != ASIC_REV_57766)
3638 		write_op = tg3_write_mem;
3639 	else
3640 		write_op = tg3_write_indirect_reg32;
3641 
3642 	if (tg3_asic_rev(tp) != ASIC_REV_57766) {
3643 		/* It is possible that bootcode is still loading at this point.
3644 		 * Get the nvram lock first before halting the cpu.
3645 		 */
3646 		int lock_err = tg3_nvram_lock(tp);
3647 		err = tg3_halt_cpu(tp, cpu_base);
3648 		if (!lock_err)
3649 			tg3_nvram_unlock(tp);
3650 		if (err)
3651 			goto out;
3652 
3653 		for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
3654 			write_op(tp, cpu_scratch_base + i, 0);
3655 		tw32(cpu_base + CPU_STATE, 0xffffffff);
3656 		tw32(cpu_base + CPU_MODE,
3657 		     tr32(cpu_base + CPU_MODE) | CPU_MODE_HALT);
3658 	} else {
3659 		/* Subtract additional main header for fragmented firmware and
3660 		 * advance to the first fragment
3661 		 */
3662 		total_len -= TG3_FW_HDR_LEN;
3663 		fw_hdr++;
3664 	}
3665 
3666 	do {
3667 		u32 *fw_data = (u32 *)(fw_hdr + 1);
3668 		for (i = 0; i < tg3_fw_data_len(tp, fw_hdr); i++)
3669 			write_op(tp, cpu_scratch_base +
3670 				     (be32_to_cpu(fw_hdr->base_addr) & 0xffff) +
3671 				     (i * sizeof(u32)),
3672 				 be32_to_cpu(fw_data[i]));
3673 
3674 		total_len -= be32_to_cpu(fw_hdr->len);
3675 
3676 		/* Advance to next fragment */
3677 		fw_hdr = (struct tg3_firmware_hdr *)
3678 			 ((void *)fw_hdr + be32_to_cpu(fw_hdr->len));
3679 	} while (total_len > 0);
3680 
3681 	err = 0;
3682 
3683 out:
3684 	return err;
3685 }
3686 
3687 /* tp->lock is held. */
3688 static int tg3_pause_cpu_and_set_pc(struct tg3 *tp, u32 cpu_base, u32 pc)
3689 {
3690 	int i;
3691 	const int iters = 5;
3692 
3693 	tw32(cpu_base + CPU_STATE, 0xffffffff);
3694 	tw32_f(cpu_base + CPU_PC, pc);
3695 
3696 	for (i = 0; i < iters; i++) {
3697 		if (tr32(cpu_base + CPU_PC) == pc)
3698 			break;
3699 		tw32(cpu_base + CPU_STATE, 0xffffffff);
3700 		tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
3701 		tw32_f(cpu_base + CPU_PC, pc);
3702 		udelay(1000);
3703 	}
3704 
3705 	return (i == iters) ? -EBUSY : 0;
3706 }
3707 
3708 /* tp->lock is held. */
3709 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
3710 {
3711 	const struct tg3_firmware_hdr *fw_hdr;
3712 	int err;
3713 
3714 	fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3715 
3716 	/* Firmware blob starts with version numbers, followed by
3717 	   start address and length. We are setting complete length.
3718 	   length = end_address_of_bss - start_address_of_text.
3719 	   Remainder is the blob to be loaded contiguously
3720 	   from start address. */
3721 
3722 	err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
3723 				    RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
3724 				    fw_hdr);
3725 	if (err)
3726 		return err;
3727 
3728 	err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
3729 				    TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
3730 				    fw_hdr);
3731 	if (err)
3732 		return err;
3733 
3734 	/* Now startup only the RX cpu. */
3735 	err = tg3_pause_cpu_and_set_pc(tp, RX_CPU_BASE,
3736 				       be32_to_cpu(fw_hdr->base_addr));
3737 	if (err) {
3738 		netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x "
3739 			   "should be %08x\n", __func__,
3740 			   tr32(RX_CPU_BASE + CPU_PC),
3741 				be32_to_cpu(fw_hdr->base_addr));
3742 		return -ENODEV;
3743 	}
3744 
3745 	tg3_rxcpu_resume(tp);
3746 
3747 	return 0;
3748 }
3749 
3750 static int tg3_validate_rxcpu_state(struct tg3 *tp)
3751 {
3752 	const int iters = 1000;
3753 	int i;
3754 	u32 val;
3755 
3756 	/* Wait for boot code to complete initialization and enter service
3757 	 * loop. It is then safe to download service patches
3758 	 */
3759 	for (i = 0; i < iters; i++) {
3760 		if (tr32(RX_CPU_HWBKPT) == TG3_SBROM_IN_SERVICE_LOOP)
3761 			break;
3762 
3763 		udelay(10);
3764 	}
3765 
3766 	if (i == iters) {
3767 		netdev_err(tp->dev, "Boot code not ready for service patches\n");
3768 		return -EBUSY;
3769 	}
3770 
3771 	val = tg3_read_indirect_reg32(tp, TG3_57766_FW_HANDSHAKE);
3772 	if (val & 0xff) {
3773 		netdev_warn(tp->dev,
3774 			    "Other patches exist. Not downloading EEE patch\n");
3775 		return -EEXIST;
3776 	}
3777 
3778 	return 0;
3779 }
3780 
3781 /* tp->lock is held. */
3782 static void tg3_load_57766_firmware(struct tg3 *tp)
3783 {
3784 	struct tg3_firmware_hdr *fw_hdr;
3785 
3786 	if (!tg3_flag(tp, NO_NVRAM))
3787 		return;
3788 
3789 	if (tg3_validate_rxcpu_state(tp))
3790 		return;
3791 
3792 	if (!tp->fw)
3793 		return;
3794 
3795 	/* This firmware blob has a different format than older firmware
3796 	 * releases as given below. The main difference is we have fragmented
3797 	 * data to be written to non-contiguous locations.
3798 	 *
3799 	 * In the beginning we have a firmware header identical to other
3800 	 * firmware which consists of version, base addr and length. The length
3801 	 * here is unused and set to 0xffffffff.
3802 	 *
3803 	 * This is followed by a series of firmware fragments which are
3804 	 * individually identical to previous firmware. i.e. they have the
3805 	 * firmware header and followed by data for that fragment. The version
3806 	 * field of the individual fragment header is unused.
3807 	 */
3808 
3809 	fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3810 	if (be32_to_cpu(fw_hdr->base_addr) != TG3_57766_FW_BASE_ADDR)
3811 		return;
3812 
3813 	if (tg3_rxcpu_pause(tp))
3814 		return;
3815 
3816 	/* tg3_load_firmware_cpu() will always succeed for the 57766 */
3817 	tg3_load_firmware_cpu(tp, 0, TG3_57766_FW_BASE_ADDR, 0, fw_hdr);
3818 
3819 	tg3_rxcpu_resume(tp);
3820 }
3821 
3822 /* tp->lock is held. */
3823 static int tg3_load_tso_firmware(struct tg3 *tp)
3824 {
3825 	const struct tg3_firmware_hdr *fw_hdr;
3826 	unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
3827 	int err;
3828 
3829 	if (!tg3_flag(tp, FW_TSO))
3830 		return 0;
3831 
3832 	fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3833 
3834 	/* Firmware blob starts with version numbers, followed by
3835 	   start address and length. We are setting complete length.
3836 	   length = end_address_of_bss - start_address_of_text.
3837 	   Remainder is the blob to be loaded contiguously
3838 	   from start address. */
3839 
3840 	cpu_scratch_size = tp->fw_len;
3841 
3842 	if (tg3_asic_rev(tp) == ASIC_REV_5705) {
3843 		cpu_base = RX_CPU_BASE;
3844 		cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
3845 	} else {
3846 		cpu_base = TX_CPU_BASE;
3847 		cpu_scratch_base = TX_CPU_SCRATCH_BASE;
3848 		cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
3849 	}
3850 
3851 	err = tg3_load_firmware_cpu(tp, cpu_base,
3852 				    cpu_scratch_base, cpu_scratch_size,
3853 				    fw_hdr);
3854 	if (err)
3855 		return err;
3856 
3857 	/* Now startup the cpu. */
3858 	err = tg3_pause_cpu_and_set_pc(tp, cpu_base,
3859 				       be32_to_cpu(fw_hdr->base_addr));
3860 	if (err) {
3861 		netdev_err(tp->dev,
3862 			   "%s fails to set CPU PC, is %08x should be %08x\n",
3863 			   __func__, tr32(cpu_base + CPU_PC),
3864 			   be32_to_cpu(fw_hdr->base_addr));
3865 		return -ENODEV;
3866 	}
3867 
3868 	tg3_resume_cpu(tp, cpu_base);
3869 	return 0;
3870 }
3871 
3872 
3873 /* tp->lock is held. */
3874 static void __tg3_set_mac_addr(struct tg3 *tp, bool skip_mac_1)
3875 {
3876 	u32 addr_high, addr_low;
3877 	int i;
3878 
3879 	addr_high = ((tp->dev->dev_addr[0] << 8) |
3880 		     tp->dev->dev_addr[1]);
3881 	addr_low = ((tp->dev->dev_addr[2] << 24) |
3882 		    (tp->dev->dev_addr[3] << 16) |
3883 		    (tp->dev->dev_addr[4] <<  8) |
3884 		    (tp->dev->dev_addr[5] <<  0));
3885 	for (i = 0; i < 4; i++) {
3886 		if (i == 1 && skip_mac_1)
3887 			continue;
3888 		tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
3889 		tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
3890 	}
3891 
3892 	if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
3893 	    tg3_asic_rev(tp) == ASIC_REV_5704) {
3894 		for (i = 0; i < 12; i++) {
3895 			tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
3896 			tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
3897 		}
3898 	}
3899 
3900 	addr_high = (tp->dev->dev_addr[0] +
3901 		     tp->dev->dev_addr[1] +
3902 		     tp->dev->dev_addr[2] +
3903 		     tp->dev->dev_addr[3] +
3904 		     tp->dev->dev_addr[4] +
3905 		     tp->dev->dev_addr[5]) &
3906 		TX_BACKOFF_SEED_MASK;
3907 	tw32(MAC_TX_BACKOFF_SEED, addr_high);
3908 }
3909 
3910 static void tg3_enable_register_access(struct tg3 *tp)
3911 {
3912 	/*
3913 	 * Make sure register accesses (indirect or otherwise) will function
3914 	 * correctly.
3915 	 */
3916 	pci_write_config_dword(tp->pdev,
3917 			       TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl);
3918 }
3919 
3920 static int tg3_power_up(struct tg3 *tp)
3921 {
3922 	int err;
3923 
3924 	tg3_enable_register_access(tp);
3925 
3926 	err = pci_set_power_state(tp->pdev, PCI_D0);
3927 	if (!err) {
3928 		/* Switch out of Vaux if it is a NIC */
3929 		tg3_pwrsrc_switch_to_vmain(tp);
3930 	} else {
3931 		netdev_err(tp->dev, "Transition to D0 failed\n");
3932 	}
3933 
3934 	return err;
3935 }
3936 
3937 static int tg3_setup_phy(struct tg3 *, bool);
3938 
3939 static int tg3_power_down_prepare(struct tg3 *tp)
3940 {
3941 	u32 misc_host_ctrl;
3942 	bool device_should_wake, do_low_power;
3943 
3944 	tg3_enable_register_access(tp);
3945 
3946 	/* Restore the CLKREQ setting. */
3947 	if (tg3_flag(tp, CLKREQ_BUG))
3948 		pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
3949 					 PCI_EXP_LNKCTL_CLKREQ_EN);
3950 
3951 	misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
3952 	tw32(TG3PCI_MISC_HOST_CTRL,
3953 	     misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
3954 
3955 	device_should_wake = device_may_wakeup(&tp->pdev->dev) &&
3956 			     tg3_flag(tp, WOL_ENABLE);
3957 
3958 	if (tg3_flag(tp, USE_PHYLIB)) {
3959 		do_low_power = false;
3960 		if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) &&
3961 		    !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
3962 			struct phy_device *phydev;
3963 			u32 phyid, advertising;
3964 
3965 			phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
3966 
3967 			tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
3968 
3969 			tp->link_config.speed = phydev->speed;
3970 			tp->link_config.duplex = phydev->duplex;
3971 			tp->link_config.autoneg = phydev->autoneg;
3972 			tp->link_config.advertising = phydev->advertising;
3973 
3974 			advertising = ADVERTISED_TP |
3975 				      ADVERTISED_Pause |
3976 				      ADVERTISED_Autoneg |
3977 				      ADVERTISED_10baseT_Half;
3978 
3979 			if (tg3_flag(tp, ENABLE_ASF) || device_should_wake) {
3980 				if (tg3_flag(tp, WOL_SPEED_100MB))
3981 					advertising |=
3982 						ADVERTISED_100baseT_Half |
3983 						ADVERTISED_100baseT_Full |
3984 						ADVERTISED_10baseT_Full;
3985 				else
3986 					advertising |= ADVERTISED_10baseT_Full;
3987 			}
3988 
3989 			phydev->advertising = advertising;
3990 
3991 			phy_start_aneg(phydev);
3992 
3993 			phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
3994 			if (phyid != PHY_ID_BCMAC131) {
3995 				phyid &= PHY_BCM_OUI_MASK;
3996 				if (phyid == PHY_BCM_OUI_1 ||
3997 				    phyid == PHY_BCM_OUI_2 ||
3998 				    phyid == PHY_BCM_OUI_3)
3999 					do_low_power = true;
4000 			}
4001 		}
4002 	} else {
4003 		do_low_power = true;
4004 
4005 		if (!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER))
4006 			tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
4007 
4008 		if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
4009 			tg3_setup_phy(tp, false);
4010 	}
4011 
4012 	if (tg3_asic_rev(tp) == ASIC_REV_5906) {
4013 		u32 val;
4014 
4015 		val = tr32(GRC_VCPU_EXT_CTRL);
4016 		tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
4017 	} else if (!tg3_flag(tp, ENABLE_ASF)) {
4018 		int i;
4019 		u32 val;
4020 
4021 		for (i = 0; i < 200; i++) {
4022 			tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
4023 			if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
4024 				break;
4025 			msleep(1);
4026 		}
4027 	}
4028 	if (tg3_flag(tp, WOL_CAP))
4029 		tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
4030 						     WOL_DRV_STATE_SHUTDOWN |
4031 						     WOL_DRV_WOL |
4032 						     WOL_SET_MAGIC_PKT);
4033 
4034 	if (device_should_wake) {
4035 		u32 mac_mode;
4036 
4037 		if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
4038 			if (do_low_power &&
4039 			    !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
4040 				tg3_phy_auxctl_write(tp,
4041 					       MII_TG3_AUXCTL_SHDWSEL_PWRCTL,
4042 					       MII_TG3_AUXCTL_PCTL_WOL_EN |
4043 					       MII_TG3_AUXCTL_PCTL_100TX_LPWR |
4044 					       MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC);
4045 				udelay(40);
4046 			}
4047 
4048 			if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4049 				mac_mode = MAC_MODE_PORT_MODE_GMII;
4050 			else if (tp->phy_flags &
4051 				 TG3_PHYFLG_KEEP_LINK_ON_PWRDN) {
4052 				if (tp->link_config.active_speed == SPEED_1000)
4053 					mac_mode = MAC_MODE_PORT_MODE_GMII;
4054 				else
4055 					mac_mode = MAC_MODE_PORT_MODE_MII;
4056 			} else
4057 				mac_mode = MAC_MODE_PORT_MODE_MII;
4058 
4059 			mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
4060 			if (tg3_asic_rev(tp) == ASIC_REV_5700) {
4061 				u32 speed = tg3_flag(tp, WOL_SPEED_100MB) ?
4062 					     SPEED_100 : SPEED_10;
4063 				if (tg3_5700_link_polarity(tp, speed))
4064 					mac_mode |= MAC_MODE_LINK_POLARITY;
4065 				else
4066 					mac_mode &= ~MAC_MODE_LINK_POLARITY;
4067 			}
4068 		} else {
4069 			mac_mode = MAC_MODE_PORT_MODE_TBI;
4070 		}
4071 
4072 		if (!tg3_flag(tp, 5750_PLUS))
4073 			tw32(MAC_LED_CTRL, tp->led_ctrl);
4074 
4075 		mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
4076 		if ((tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) &&
4077 		    (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)))
4078 			mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
4079 
4080 		if (tg3_flag(tp, ENABLE_APE))
4081 			mac_mode |= MAC_MODE_APE_TX_EN |
4082 				    MAC_MODE_APE_RX_EN |
4083 				    MAC_MODE_TDE_ENABLE;
4084 
4085 		tw32_f(MAC_MODE, mac_mode);
4086 		udelay(100);
4087 
4088 		tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
4089 		udelay(10);
4090 	}
4091 
4092 	if (!tg3_flag(tp, WOL_SPEED_100MB) &&
4093 	    (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4094 	     tg3_asic_rev(tp) == ASIC_REV_5701)) {
4095 		u32 base_val;
4096 
4097 		base_val = tp->pci_clock_ctrl;
4098 		base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
4099 			     CLOCK_CTRL_TXCLK_DISABLE);
4100 
4101 		tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
4102 			    CLOCK_CTRL_PWRDOWN_PLL133, 40);
4103 	} else if (tg3_flag(tp, 5780_CLASS) ||
4104 		   tg3_flag(tp, CPMU_PRESENT) ||
4105 		   tg3_asic_rev(tp) == ASIC_REV_5906) {
4106 		/* do nothing */
4107 	} else if (!(tg3_flag(tp, 5750_PLUS) && tg3_flag(tp, ENABLE_ASF))) {
4108 		u32 newbits1, newbits2;
4109 
4110 		if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4111 		    tg3_asic_rev(tp) == ASIC_REV_5701) {
4112 			newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
4113 				    CLOCK_CTRL_TXCLK_DISABLE |
4114 				    CLOCK_CTRL_ALTCLK);
4115 			newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
4116 		} else if (tg3_flag(tp, 5705_PLUS)) {
4117 			newbits1 = CLOCK_CTRL_625_CORE;
4118 			newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
4119 		} else {
4120 			newbits1 = CLOCK_CTRL_ALTCLK;
4121 			newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
4122 		}
4123 
4124 		tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
4125 			    40);
4126 
4127 		tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
4128 			    40);
4129 
4130 		if (!tg3_flag(tp, 5705_PLUS)) {
4131 			u32 newbits3;
4132 
4133 			if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4134 			    tg3_asic_rev(tp) == ASIC_REV_5701) {
4135 				newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
4136 					    CLOCK_CTRL_TXCLK_DISABLE |
4137 					    CLOCK_CTRL_44MHZ_CORE);
4138 			} else {
4139 				newbits3 = CLOCK_CTRL_44MHZ_CORE;
4140 			}
4141 
4142 			tw32_wait_f(TG3PCI_CLOCK_CTRL,
4143 				    tp->pci_clock_ctrl | newbits3, 40);
4144 		}
4145 	}
4146 
4147 	if (!(device_should_wake) && !tg3_flag(tp, ENABLE_ASF))
4148 		tg3_power_down_phy(tp, do_low_power);
4149 
4150 	tg3_frob_aux_power(tp, true);
4151 
4152 	/* Workaround for unstable PLL clock */
4153 	if ((!tg3_flag(tp, IS_SSB_CORE)) &&
4154 	    ((tg3_chip_rev(tp) == CHIPREV_5750_AX) ||
4155 	     (tg3_chip_rev(tp) == CHIPREV_5750_BX))) {
4156 		u32 val = tr32(0x7d00);
4157 
4158 		val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
4159 		tw32(0x7d00, val);
4160 		if (!tg3_flag(tp, ENABLE_ASF)) {
4161 			int err;
4162 
4163 			err = tg3_nvram_lock(tp);
4164 			tg3_halt_cpu(tp, RX_CPU_BASE);
4165 			if (!err)
4166 				tg3_nvram_unlock(tp);
4167 		}
4168 	}
4169 
4170 	tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
4171 
4172 	return 0;
4173 }
4174 
4175 static void tg3_power_down(struct tg3 *tp)
4176 {
4177 	tg3_power_down_prepare(tp);
4178 
4179 	pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE));
4180 	pci_set_power_state(tp->pdev, PCI_D3hot);
4181 }
4182 
4183 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
4184 {
4185 	switch (val & MII_TG3_AUX_STAT_SPDMASK) {
4186 	case MII_TG3_AUX_STAT_10HALF:
4187 		*speed = SPEED_10;
4188 		*duplex = DUPLEX_HALF;
4189 		break;
4190 
4191 	case MII_TG3_AUX_STAT_10FULL:
4192 		*speed = SPEED_10;
4193 		*duplex = DUPLEX_FULL;
4194 		break;
4195 
4196 	case MII_TG3_AUX_STAT_100HALF:
4197 		*speed = SPEED_100;
4198 		*duplex = DUPLEX_HALF;
4199 		break;
4200 
4201 	case MII_TG3_AUX_STAT_100FULL:
4202 		*speed = SPEED_100;
4203 		*duplex = DUPLEX_FULL;
4204 		break;
4205 
4206 	case MII_TG3_AUX_STAT_1000HALF:
4207 		*speed = SPEED_1000;
4208 		*duplex = DUPLEX_HALF;
4209 		break;
4210 
4211 	case MII_TG3_AUX_STAT_1000FULL:
4212 		*speed = SPEED_1000;
4213 		*duplex = DUPLEX_FULL;
4214 		break;
4215 
4216 	default:
4217 		if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4218 			*speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
4219 				 SPEED_10;
4220 			*duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
4221 				  DUPLEX_HALF;
4222 			break;
4223 		}
4224 		*speed = SPEED_UNKNOWN;
4225 		*duplex = DUPLEX_UNKNOWN;
4226 		break;
4227 	}
4228 }
4229 
4230 static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
4231 {
4232 	int err = 0;
4233 	u32 val, new_adv;
4234 
4235 	new_adv = ADVERTISE_CSMA;
4236 	new_adv |= ethtool_adv_to_mii_adv_t(advertise) & ADVERTISE_ALL;
4237 	new_adv |= mii_advertise_flowctrl(flowctrl);
4238 
4239 	err = tg3_writephy(tp, MII_ADVERTISE, new_adv);
4240 	if (err)
4241 		goto done;
4242 
4243 	if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4244 		new_adv = ethtool_adv_to_mii_ctrl1000_t(advertise);
4245 
4246 		if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4247 		    tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0)
4248 			new_adv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4249 
4250 		err = tg3_writephy(tp, MII_CTRL1000, new_adv);
4251 		if (err)
4252 			goto done;
4253 	}
4254 
4255 	if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
4256 		goto done;
4257 
4258 	tw32(TG3_CPMU_EEE_MODE,
4259 	     tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE);
4260 
4261 	err = tg3_phy_toggle_auxctl_smdsp(tp, true);
4262 	if (!err) {
4263 		u32 err2;
4264 
4265 		val = 0;
4266 		/* Advertise 100-BaseTX EEE ability */
4267 		if (advertise & ADVERTISED_100baseT_Full)
4268 			val |= MDIO_AN_EEE_ADV_100TX;
4269 		/* Advertise 1000-BaseT EEE ability */
4270 		if (advertise & ADVERTISED_1000baseT_Full)
4271 			val |= MDIO_AN_EEE_ADV_1000T;
4272 		err = tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val);
4273 		if (err)
4274 			val = 0;
4275 
4276 		switch (tg3_asic_rev(tp)) {
4277 		case ASIC_REV_5717:
4278 		case ASIC_REV_57765:
4279 		case ASIC_REV_57766:
4280 		case ASIC_REV_5719:
4281 			/* If we advertised any eee advertisements above... */
4282 			if (val)
4283 				val = MII_TG3_DSP_TAP26_ALNOKO |
4284 				      MII_TG3_DSP_TAP26_RMRXSTO |
4285 				      MII_TG3_DSP_TAP26_OPCSINPT;
4286 			tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
4287 			/* Fall through */
4288 		case ASIC_REV_5720:
4289 		case ASIC_REV_5762:
4290 			if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val))
4291 				tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val |
4292 						 MII_TG3_DSP_CH34TP2_HIBW01);
4293 		}
4294 
4295 		err2 = tg3_phy_toggle_auxctl_smdsp(tp, false);
4296 		if (!err)
4297 			err = err2;
4298 	}
4299 
4300 done:
4301 	return err;
4302 }
4303 
4304 static void tg3_phy_copper_begin(struct tg3 *tp)
4305 {
4306 	if (tp->link_config.autoneg == AUTONEG_ENABLE ||
4307 	    (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4308 		u32 adv, fc;
4309 
4310 		if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
4311 		    !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)) {
4312 			adv = ADVERTISED_10baseT_Half |
4313 			      ADVERTISED_10baseT_Full;
4314 			if (tg3_flag(tp, WOL_SPEED_100MB))
4315 				adv |= ADVERTISED_100baseT_Half |
4316 				       ADVERTISED_100baseT_Full;
4317 			if (tp->phy_flags & TG3_PHYFLG_1G_ON_VAUX_OK)
4318 				adv |= ADVERTISED_1000baseT_Half |
4319 				       ADVERTISED_1000baseT_Full;
4320 
4321 			fc = FLOW_CTRL_TX | FLOW_CTRL_RX;
4322 		} else {
4323 			adv = tp->link_config.advertising;
4324 			if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
4325 				adv &= ~(ADVERTISED_1000baseT_Half |
4326 					 ADVERTISED_1000baseT_Full);
4327 
4328 			fc = tp->link_config.flowctrl;
4329 		}
4330 
4331 		tg3_phy_autoneg_cfg(tp, adv, fc);
4332 
4333 		if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
4334 		    (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)) {
4335 			/* Normally during power down we want to autonegotiate
4336 			 * the lowest possible speed for WOL. However, to avoid
4337 			 * link flap, we leave it untouched.
4338 			 */
4339 			return;
4340 		}
4341 
4342 		tg3_writephy(tp, MII_BMCR,
4343 			     BMCR_ANENABLE | BMCR_ANRESTART);
4344 	} else {
4345 		int i;
4346 		u32 bmcr, orig_bmcr;
4347 
4348 		tp->link_config.active_speed = tp->link_config.speed;
4349 		tp->link_config.active_duplex = tp->link_config.duplex;
4350 
4351 		if (tg3_asic_rev(tp) == ASIC_REV_5714) {
4352 			/* With autoneg disabled, 5715 only links up when the
4353 			 * advertisement register has the configured speed
4354 			 * enabled.
4355 			 */
4356 			tg3_writephy(tp, MII_ADVERTISE, ADVERTISE_ALL);
4357 		}
4358 
4359 		bmcr = 0;
4360 		switch (tp->link_config.speed) {
4361 		default:
4362 		case SPEED_10:
4363 			break;
4364 
4365 		case SPEED_100:
4366 			bmcr |= BMCR_SPEED100;
4367 			break;
4368 
4369 		case SPEED_1000:
4370 			bmcr |= BMCR_SPEED1000;
4371 			break;
4372 		}
4373 
4374 		if (tp->link_config.duplex == DUPLEX_FULL)
4375 			bmcr |= BMCR_FULLDPLX;
4376 
4377 		if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
4378 		    (bmcr != orig_bmcr)) {
4379 			tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
4380 			for (i = 0; i < 1500; i++) {
4381 				u32 tmp;
4382 
4383 				udelay(10);
4384 				if (tg3_readphy(tp, MII_BMSR, &tmp) ||
4385 				    tg3_readphy(tp, MII_BMSR, &tmp))
4386 					continue;
4387 				if (!(tmp & BMSR_LSTATUS)) {
4388 					udelay(40);
4389 					break;
4390 				}
4391 			}
4392 			tg3_writephy(tp, MII_BMCR, bmcr);
4393 			udelay(40);
4394 		}
4395 	}
4396 }
4397 
4398 static int tg3_phy_pull_config(struct tg3 *tp)
4399 {
4400 	int err;
4401 	u32 val;
4402 
4403 	err = tg3_readphy(tp, MII_BMCR, &val);
4404 	if (err)
4405 		goto done;
4406 
4407 	if (!(val & BMCR_ANENABLE)) {
4408 		tp->link_config.autoneg = AUTONEG_DISABLE;
4409 		tp->link_config.advertising = 0;
4410 		tg3_flag_clear(tp, PAUSE_AUTONEG);
4411 
4412 		err = -EIO;
4413 
4414 		switch (val & (BMCR_SPEED1000 | BMCR_SPEED100)) {
4415 		case 0:
4416 			if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
4417 				goto done;
4418 
4419 			tp->link_config.speed = SPEED_10;
4420 			break;
4421 		case BMCR_SPEED100:
4422 			if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
4423 				goto done;
4424 
4425 			tp->link_config.speed = SPEED_100;
4426 			break;
4427 		case BMCR_SPEED1000:
4428 			if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4429 				tp->link_config.speed = SPEED_1000;
4430 				break;
4431 			}
4432 			/* Fall through */
4433 		default:
4434 			goto done;
4435 		}
4436 
4437 		if (val & BMCR_FULLDPLX)
4438 			tp->link_config.duplex = DUPLEX_FULL;
4439 		else
4440 			tp->link_config.duplex = DUPLEX_HALF;
4441 
4442 		tp->link_config.flowctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
4443 
4444 		err = 0;
4445 		goto done;
4446 	}
4447 
4448 	tp->link_config.autoneg = AUTONEG_ENABLE;
4449 	tp->link_config.advertising = ADVERTISED_Autoneg;
4450 	tg3_flag_set(tp, PAUSE_AUTONEG);
4451 
4452 	if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
4453 		u32 adv;
4454 
4455 		err = tg3_readphy(tp, MII_ADVERTISE, &val);
4456 		if (err)
4457 			goto done;
4458 
4459 		adv = mii_adv_to_ethtool_adv_t(val & ADVERTISE_ALL);
4460 		tp->link_config.advertising |= adv | ADVERTISED_TP;
4461 
4462 		tp->link_config.flowctrl = tg3_decode_flowctrl_1000T(val);
4463 	} else {
4464 		tp->link_config.advertising |= ADVERTISED_FIBRE;
4465 	}
4466 
4467 	if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4468 		u32 adv;
4469 
4470 		if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
4471 			err = tg3_readphy(tp, MII_CTRL1000, &val);
4472 			if (err)
4473 				goto done;
4474 
4475 			adv = mii_ctrl1000_to_ethtool_adv_t(val);
4476 		} else {
4477 			err = tg3_readphy(tp, MII_ADVERTISE, &val);
4478 			if (err)
4479 				goto done;
4480 
4481 			adv = tg3_decode_flowctrl_1000X(val);
4482 			tp->link_config.flowctrl = adv;
4483 
4484 			val &= (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL);
4485 			adv = mii_adv_to_ethtool_adv_x(val);
4486 		}
4487 
4488 		tp->link_config.advertising |= adv;
4489 	}
4490 
4491 done:
4492 	return err;
4493 }
4494 
4495 static int tg3_init_5401phy_dsp(struct tg3 *tp)
4496 {
4497 	int err;
4498 
4499 	/* Turn off tap power management. */
4500 	/* Set Extended packet length bit */
4501 	err = tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
4502 
4503 	err |= tg3_phydsp_write(tp, 0x0012, 0x1804);
4504 	err |= tg3_phydsp_write(tp, 0x0013, 0x1204);
4505 	err |= tg3_phydsp_write(tp, 0x8006, 0x0132);
4506 	err |= tg3_phydsp_write(tp, 0x8006, 0x0232);
4507 	err |= tg3_phydsp_write(tp, 0x201f, 0x0a20);
4508 
4509 	udelay(40);
4510 
4511 	return err;
4512 }
4513 
4514 static bool tg3_phy_eee_config_ok(struct tg3 *tp)
4515 {
4516 	u32 val;
4517 	u32 tgtadv = 0;
4518 	u32 advertising = tp->link_config.advertising;
4519 
4520 	if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
4521 		return true;
4522 
4523 	if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, &val))
4524 		return false;
4525 
4526 	val &= (MDIO_AN_EEE_ADV_100TX | MDIO_AN_EEE_ADV_1000T);
4527 
4528 
4529 	if (advertising & ADVERTISED_100baseT_Full)
4530 		tgtadv |= MDIO_AN_EEE_ADV_100TX;
4531 	if (advertising & ADVERTISED_1000baseT_Full)
4532 		tgtadv |= MDIO_AN_EEE_ADV_1000T;
4533 
4534 	if (val != tgtadv)
4535 		return false;
4536 
4537 	return true;
4538 }
4539 
4540 static bool tg3_phy_copper_an_config_ok(struct tg3 *tp, u32 *lcladv)
4541 {
4542 	u32 advmsk, tgtadv, advertising;
4543 
4544 	advertising = tp->link_config.advertising;
4545 	tgtadv = ethtool_adv_to_mii_adv_t(advertising) & ADVERTISE_ALL;
4546 
4547 	advmsk = ADVERTISE_ALL;
4548 	if (tp->link_config.active_duplex == DUPLEX_FULL) {
4549 		tgtadv |= mii_advertise_flowctrl(tp->link_config.flowctrl);
4550 		advmsk |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
4551 	}
4552 
4553 	if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
4554 		return false;
4555 
4556 	if ((*lcladv & advmsk) != tgtadv)
4557 		return false;
4558 
4559 	if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4560 		u32 tg3_ctrl;
4561 
4562 		tgtadv = ethtool_adv_to_mii_ctrl1000_t(advertising);
4563 
4564 		if (tg3_readphy(tp, MII_CTRL1000, &tg3_ctrl))
4565 			return false;
4566 
4567 		if (tgtadv &&
4568 		    (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4569 		     tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0)) {
4570 			tgtadv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4571 			tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL |
4572 				     CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
4573 		} else {
4574 			tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL);
4575 		}
4576 
4577 		if (tg3_ctrl != tgtadv)
4578 			return false;
4579 	}
4580 
4581 	return true;
4582 }
4583 
4584 static bool tg3_phy_copper_fetch_rmtadv(struct tg3 *tp, u32 *rmtadv)
4585 {
4586 	u32 lpeth = 0;
4587 
4588 	if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4589 		u32 val;
4590 
4591 		if (tg3_readphy(tp, MII_STAT1000, &val))
4592 			return false;
4593 
4594 		lpeth = mii_stat1000_to_ethtool_lpa_t(val);
4595 	}
4596 
4597 	if (tg3_readphy(tp, MII_LPA, rmtadv))
4598 		return false;
4599 
4600 	lpeth |= mii_lpa_to_ethtool_lpa_t(*rmtadv);
4601 	tp->link_config.rmt_adv = lpeth;
4602 
4603 	return true;
4604 }
4605 
4606 static bool tg3_test_and_report_link_chg(struct tg3 *tp, bool curr_link_up)
4607 {
4608 	if (curr_link_up != tp->link_up) {
4609 		if (curr_link_up) {
4610 			netif_carrier_on(tp->dev);
4611 		} else {
4612 			netif_carrier_off(tp->dev);
4613 			if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4614 				tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4615 		}
4616 
4617 		tg3_link_report(tp);
4618 		return true;
4619 	}
4620 
4621 	return false;
4622 }
4623 
4624 static void tg3_clear_mac_status(struct tg3 *tp)
4625 {
4626 	tw32(MAC_EVENT, 0);
4627 
4628 	tw32_f(MAC_STATUS,
4629 	       MAC_STATUS_SYNC_CHANGED |
4630 	       MAC_STATUS_CFG_CHANGED |
4631 	       MAC_STATUS_MI_COMPLETION |
4632 	       MAC_STATUS_LNKSTATE_CHANGED);
4633 	udelay(40);
4634 }
4635 
4636 static int tg3_setup_copper_phy(struct tg3 *tp, bool force_reset)
4637 {
4638 	bool current_link_up;
4639 	u32 bmsr, val;
4640 	u32 lcl_adv, rmt_adv;
4641 	u16 current_speed;
4642 	u8 current_duplex;
4643 	int i, err;
4644 
4645 	tg3_clear_mac_status(tp);
4646 
4647 	if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
4648 		tw32_f(MAC_MI_MODE,
4649 		     (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
4650 		udelay(80);
4651 	}
4652 
4653 	tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, 0);
4654 
4655 	/* Some third-party PHYs need to be reset on link going
4656 	 * down.
4657 	 */
4658 	if ((tg3_asic_rev(tp) == ASIC_REV_5703 ||
4659 	     tg3_asic_rev(tp) == ASIC_REV_5704 ||
4660 	     tg3_asic_rev(tp) == ASIC_REV_5705) &&
4661 	    tp->link_up) {
4662 		tg3_readphy(tp, MII_BMSR, &bmsr);
4663 		if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4664 		    !(bmsr & BMSR_LSTATUS))
4665 			force_reset = true;
4666 	}
4667 	if (force_reset)
4668 		tg3_phy_reset(tp);
4669 
4670 	if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
4671 		tg3_readphy(tp, MII_BMSR, &bmsr);
4672 		if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
4673 		    !tg3_flag(tp, INIT_COMPLETE))
4674 			bmsr = 0;
4675 
4676 		if (!(bmsr & BMSR_LSTATUS)) {
4677 			err = tg3_init_5401phy_dsp(tp);
4678 			if (err)
4679 				return err;
4680 
4681 			tg3_readphy(tp, MII_BMSR, &bmsr);
4682 			for (i = 0; i < 1000; i++) {
4683 				udelay(10);
4684 				if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4685 				    (bmsr & BMSR_LSTATUS)) {
4686 					udelay(40);
4687 					break;
4688 				}
4689 			}
4690 
4691 			if ((tp->phy_id & TG3_PHY_ID_REV_MASK) ==
4692 			    TG3_PHY_REV_BCM5401_B0 &&
4693 			    !(bmsr & BMSR_LSTATUS) &&
4694 			    tp->link_config.active_speed == SPEED_1000) {
4695 				err = tg3_phy_reset(tp);
4696 				if (!err)
4697 					err = tg3_init_5401phy_dsp(tp);
4698 				if (err)
4699 					return err;
4700 			}
4701 		}
4702 	} else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4703 		   tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0) {
4704 		/* 5701 {A0,B0} CRC bug workaround */
4705 		tg3_writephy(tp, 0x15, 0x0a75);
4706 		tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4707 		tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
4708 		tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4709 	}
4710 
4711 	/* Clear pending interrupts... */
4712 	tg3_readphy(tp, MII_TG3_ISTAT, &val);
4713 	tg3_readphy(tp, MII_TG3_ISTAT, &val);
4714 
4715 	if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT)
4716 		tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
4717 	else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET))
4718 		tg3_writephy(tp, MII_TG3_IMASK, ~0);
4719 
4720 	if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4721 	    tg3_asic_rev(tp) == ASIC_REV_5701) {
4722 		if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
4723 			tg3_writephy(tp, MII_TG3_EXT_CTRL,
4724 				     MII_TG3_EXT_CTRL_LNK3_LED_MODE);
4725 		else
4726 			tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
4727 	}
4728 
4729 	current_link_up = false;
4730 	current_speed = SPEED_UNKNOWN;
4731 	current_duplex = DUPLEX_UNKNOWN;
4732 	tp->phy_flags &= ~TG3_PHYFLG_MDIX_STATE;
4733 	tp->link_config.rmt_adv = 0;
4734 
4735 	if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) {
4736 		err = tg3_phy_auxctl_read(tp,
4737 					  MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4738 					  &val);
4739 		if (!err && !(val & (1 << 10))) {
4740 			tg3_phy_auxctl_write(tp,
4741 					     MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4742 					     val | (1 << 10));
4743 			goto relink;
4744 		}
4745 	}
4746 
4747 	bmsr = 0;
4748 	for (i = 0; i < 100; i++) {
4749 		tg3_readphy(tp, MII_BMSR, &bmsr);
4750 		if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4751 		    (bmsr & BMSR_LSTATUS))
4752 			break;
4753 		udelay(40);
4754 	}
4755 
4756 	if (bmsr & BMSR_LSTATUS) {
4757 		u32 aux_stat, bmcr;
4758 
4759 		tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
4760 		for (i = 0; i < 2000; i++) {
4761 			udelay(10);
4762 			if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
4763 			    aux_stat)
4764 				break;
4765 		}
4766 
4767 		tg3_aux_stat_to_speed_duplex(tp, aux_stat,
4768 					     &current_speed,
4769 					     &current_duplex);
4770 
4771 		bmcr = 0;
4772 		for (i = 0; i < 200; i++) {
4773 			tg3_readphy(tp, MII_BMCR, &bmcr);
4774 			if (tg3_readphy(tp, MII_BMCR, &bmcr))
4775 				continue;
4776 			if (bmcr && bmcr != 0x7fff)
4777 				break;
4778 			udelay(10);
4779 		}
4780 
4781 		lcl_adv = 0;
4782 		rmt_adv = 0;
4783 
4784 		tp->link_config.active_speed = current_speed;
4785 		tp->link_config.active_duplex = current_duplex;
4786 
4787 		if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4788 			bool eee_config_ok = tg3_phy_eee_config_ok(tp);
4789 
4790 			if ((bmcr & BMCR_ANENABLE) &&
4791 			    eee_config_ok &&
4792 			    tg3_phy_copper_an_config_ok(tp, &lcl_adv) &&
4793 			    tg3_phy_copper_fetch_rmtadv(tp, &rmt_adv))
4794 				current_link_up = true;
4795 
4796 			/* EEE settings changes take effect only after a phy
4797 			 * reset.  If we have skipped a reset due to Link Flap
4798 			 * Avoidance being enabled, do it now.
4799 			 */
4800 			if (!eee_config_ok &&
4801 			    (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
4802 			    !force_reset)
4803 				tg3_phy_reset(tp);
4804 		} else {
4805 			if (!(bmcr & BMCR_ANENABLE) &&
4806 			    tp->link_config.speed == current_speed &&
4807 			    tp->link_config.duplex == current_duplex) {
4808 				current_link_up = true;
4809 			}
4810 		}
4811 
4812 		if (current_link_up &&
4813 		    tp->link_config.active_duplex == DUPLEX_FULL) {
4814 			u32 reg, bit;
4815 
4816 			if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4817 				reg = MII_TG3_FET_GEN_STAT;
4818 				bit = MII_TG3_FET_GEN_STAT_MDIXSTAT;
4819 			} else {
4820 				reg = MII_TG3_EXT_STAT;
4821 				bit = MII_TG3_EXT_STAT_MDIX;
4822 			}
4823 
4824 			if (!tg3_readphy(tp, reg, &val) && (val & bit))
4825 				tp->phy_flags |= TG3_PHYFLG_MDIX_STATE;
4826 
4827 			tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
4828 		}
4829 	}
4830 
4831 relink:
4832 	if (!current_link_up || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4833 		tg3_phy_copper_begin(tp);
4834 
4835 		if (tg3_flag(tp, ROBOSWITCH)) {
4836 			current_link_up = true;
4837 			/* FIXME: when BCM5325 switch is used use 100 MBit/s */
4838 			current_speed = SPEED_1000;
4839 			current_duplex = DUPLEX_FULL;
4840 			tp->link_config.active_speed = current_speed;
4841 			tp->link_config.active_duplex = current_duplex;
4842 		}
4843 
4844 		tg3_readphy(tp, MII_BMSR, &bmsr);
4845 		if ((!tg3_readphy(tp, MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) ||
4846 		    (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
4847 			current_link_up = true;
4848 	}
4849 
4850 	tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
4851 	if (current_link_up) {
4852 		if (tp->link_config.active_speed == SPEED_100 ||
4853 		    tp->link_config.active_speed == SPEED_10)
4854 			tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4855 		else
4856 			tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4857 	} else if (tp->phy_flags & TG3_PHYFLG_IS_FET)
4858 		tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4859 	else
4860 		tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4861 
4862 	/* In order for the 5750 core in BCM4785 chip to work properly
4863 	 * in RGMII mode, the Led Control Register must be set up.
4864 	 */
4865 	if (tg3_flag(tp, RGMII_MODE)) {
4866 		u32 led_ctrl = tr32(MAC_LED_CTRL);
4867 		led_ctrl &= ~(LED_CTRL_1000MBPS_ON | LED_CTRL_100MBPS_ON);
4868 
4869 		if (tp->link_config.active_speed == SPEED_10)
4870 			led_ctrl |= LED_CTRL_LNKLED_OVERRIDE;
4871 		else if (tp->link_config.active_speed == SPEED_100)
4872 			led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE |
4873 				     LED_CTRL_100MBPS_ON);
4874 		else if (tp->link_config.active_speed == SPEED_1000)
4875 			led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE |
4876 				     LED_CTRL_1000MBPS_ON);
4877 
4878 		tw32(MAC_LED_CTRL, led_ctrl);
4879 		udelay(40);
4880 	}
4881 
4882 	tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
4883 	if (tp->link_config.active_duplex == DUPLEX_HALF)
4884 		tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
4885 
4886 	if (tg3_asic_rev(tp) == ASIC_REV_5700) {
4887 		if (current_link_up &&
4888 		    tg3_5700_link_polarity(tp, tp->link_config.active_speed))
4889 			tp->mac_mode |= MAC_MODE_LINK_POLARITY;
4890 		else
4891 			tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
4892 	}
4893 
4894 	/* ??? Without this setting Netgear GA302T PHY does not
4895 	 * ??? send/receive packets...
4896 	 */
4897 	if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 &&
4898 	    tg3_chip_rev_id(tp) == CHIPREV_ID_5700_ALTIMA) {
4899 		tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
4900 		tw32_f(MAC_MI_MODE, tp->mi_mode);
4901 		udelay(80);
4902 	}
4903 
4904 	tw32_f(MAC_MODE, tp->mac_mode);
4905 	udelay(40);
4906 
4907 	tg3_phy_eee_adjust(tp, current_link_up);
4908 
4909 	if (tg3_flag(tp, USE_LINKCHG_REG)) {
4910 		/* Polled via timer. */
4911 		tw32_f(MAC_EVENT, 0);
4912 	} else {
4913 		tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4914 	}
4915 	udelay(40);
4916 
4917 	if (tg3_asic_rev(tp) == ASIC_REV_5700 &&
4918 	    current_link_up &&
4919 	    tp->link_config.active_speed == SPEED_1000 &&
4920 	    (tg3_flag(tp, PCIX_MODE) || tg3_flag(tp, PCI_HIGH_SPEED))) {
4921 		udelay(120);
4922 		tw32_f(MAC_STATUS,
4923 		     (MAC_STATUS_SYNC_CHANGED |
4924 		      MAC_STATUS_CFG_CHANGED));
4925 		udelay(40);
4926 		tg3_write_mem(tp,
4927 			      NIC_SRAM_FIRMWARE_MBOX,
4928 			      NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
4929 	}
4930 
4931 	/* Prevent send BD corruption. */
4932 	if (tg3_flag(tp, CLKREQ_BUG)) {
4933 		if (tp->link_config.active_speed == SPEED_100 ||
4934 		    tp->link_config.active_speed == SPEED_10)
4935 			pcie_capability_clear_word(tp->pdev, PCI_EXP_LNKCTL,
4936 						   PCI_EXP_LNKCTL_CLKREQ_EN);
4937 		else
4938 			pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
4939 						 PCI_EXP_LNKCTL_CLKREQ_EN);
4940 	}
4941 
4942 	tg3_test_and_report_link_chg(tp, current_link_up);
4943 
4944 	return 0;
4945 }
4946 
4947 struct tg3_fiber_aneginfo {
4948 	int state;
4949 #define ANEG_STATE_UNKNOWN		0
4950 #define ANEG_STATE_AN_ENABLE		1
4951 #define ANEG_STATE_RESTART_INIT		2
4952 #define ANEG_STATE_RESTART		3
4953 #define ANEG_STATE_DISABLE_LINK_OK	4
4954 #define ANEG_STATE_ABILITY_DETECT_INIT	5
4955 #define ANEG_STATE_ABILITY_DETECT	6
4956 #define ANEG_STATE_ACK_DETECT_INIT	7
4957 #define ANEG_STATE_ACK_DETECT		8
4958 #define ANEG_STATE_COMPLETE_ACK_INIT	9
4959 #define ANEG_STATE_COMPLETE_ACK		10
4960 #define ANEG_STATE_IDLE_DETECT_INIT	11
4961 #define ANEG_STATE_IDLE_DETECT		12
4962 #define ANEG_STATE_LINK_OK		13
4963 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT	14
4964 #define ANEG_STATE_NEXT_PAGE_WAIT	15
4965 
4966 	u32 flags;
4967 #define MR_AN_ENABLE		0x00000001
4968 #define MR_RESTART_AN		0x00000002
4969 #define MR_AN_COMPLETE		0x00000004
4970 #define MR_PAGE_RX		0x00000008
4971 #define MR_NP_LOADED		0x00000010
4972 #define MR_TOGGLE_TX		0x00000020
4973 #define MR_LP_ADV_FULL_DUPLEX	0x00000040
4974 #define MR_LP_ADV_HALF_DUPLEX	0x00000080
4975 #define MR_LP_ADV_SYM_PAUSE	0x00000100
4976 #define MR_LP_ADV_ASYM_PAUSE	0x00000200
4977 #define MR_LP_ADV_REMOTE_FAULT1	0x00000400
4978 #define MR_LP_ADV_REMOTE_FAULT2	0x00000800
4979 #define MR_LP_ADV_NEXT_PAGE	0x00001000
4980 #define MR_TOGGLE_RX		0x00002000
4981 #define MR_NP_RX		0x00004000
4982 
4983 #define MR_LINK_OK		0x80000000
4984 
4985 	unsigned long link_time, cur_time;
4986 
4987 	u32 ability_match_cfg;
4988 	int ability_match_count;
4989 
4990 	char ability_match, idle_match, ack_match;
4991 
4992 	u32 txconfig, rxconfig;
4993 #define ANEG_CFG_NP		0x00000080
4994 #define ANEG_CFG_ACK		0x00000040
4995 #define ANEG_CFG_RF2		0x00000020
4996 #define ANEG_CFG_RF1		0x00000010
4997 #define ANEG_CFG_PS2		0x00000001
4998 #define ANEG_CFG_PS1		0x00008000
4999 #define ANEG_CFG_HD		0x00004000
5000 #define ANEG_CFG_FD		0x00002000
5001 #define ANEG_CFG_INVAL		0x00001f06
5002 
5003 };
5004 #define ANEG_OK		0
5005 #define ANEG_DONE	1
5006 #define ANEG_TIMER_ENAB	2
5007 #define ANEG_FAILED	-1
5008 
5009 #define ANEG_STATE_SETTLE_TIME	10000
5010 
5011 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
5012 				   struct tg3_fiber_aneginfo *ap)
5013 {
5014 	u16 flowctrl;
5015 	unsigned long delta;
5016 	u32 rx_cfg_reg;
5017 	int ret;
5018 
5019 	if (ap->state == ANEG_STATE_UNKNOWN) {
5020 		ap->rxconfig = 0;
5021 		ap->link_time = 0;
5022 		ap->cur_time = 0;
5023 		ap->ability_match_cfg = 0;
5024 		ap->ability_match_count = 0;
5025 		ap->ability_match = 0;
5026 		ap->idle_match = 0;
5027 		ap->ack_match = 0;
5028 	}
5029 	ap->cur_time++;
5030 
5031 	if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
5032 		rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
5033 
5034 		if (rx_cfg_reg != ap->ability_match_cfg) {
5035 			ap->ability_match_cfg = rx_cfg_reg;
5036 			ap->ability_match = 0;
5037 			ap->ability_match_count = 0;
5038 		} else {
5039 			if (++ap->ability_match_count > 1) {
5040 				ap->ability_match = 1;
5041 				ap->ability_match_cfg = rx_cfg_reg;
5042 			}
5043 		}
5044 		if (rx_cfg_reg & ANEG_CFG_ACK)
5045 			ap->ack_match = 1;
5046 		else
5047 			ap->ack_match = 0;
5048 
5049 		ap->idle_match = 0;
5050 	} else {
5051 		ap->idle_match = 1;
5052 		ap->ability_match_cfg = 0;
5053 		ap->ability_match_count = 0;
5054 		ap->ability_match = 0;
5055 		ap->ack_match = 0;
5056 
5057 		rx_cfg_reg = 0;
5058 	}
5059 
5060 	ap->rxconfig = rx_cfg_reg;
5061 	ret = ANEG_OK;
5062 
5063 	switch (ap->state) {
5064 	case ANEG_STATE_UNKNOWN:
5065 		if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
5066 			ap->state = ANEG_STATE_AN_ENABLE;
5067 
5068 		/* fallthru */
5069 	case ANEG_STATE_AN_ENABLE:
5070 		ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
5071 		if (ap->flags & MR_AN_ENABLE) {
5072 			ap->link_time = 0;
5073 			ap->cur_time = 0;
5074 			ap->ability_match_cfg = 0;
5075 			ap->ability_match_count = 0;
5076 			ap->ability_match = 0;
5077 			ap->idle_match = 0;
5078 			ap->ack_match = 0;
5079 
5080 			ap->state = ANEG_STATE_RESTART_INIT;
5081 		} else {
5082 			ap->state = ANEG_STATE_DISABLE_LINK_OK;
5083 		}
5084 		break;
5085 
5086 	case ANEG_STATE_RESTART_INIT:
5087 		ap->link_time = ap->cur_time;
5088 		ap->flags &= ~(MR_NP_LOADED);
5089 		ap->txconfig = 0;
5090 		tw32(MAC_TX_AUTO_NEG, 0);
5091 		tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5092 		tw32_f(MAC_MODE, tp->mac_mode);
5093 		udelay(40);
5094 
5095 		ret = ANEG_TIMER_ENAB;
5096 		ap->state = ANEG_STATE_RESTART;
5097 
5098 		/* fallthru */
5099 	case ANEG_STATE_RESTART:
5100 		delta = ap->cur_time - ap->link_time;
5101 		if (delta > ANEG_STATE_SETTLE_TIME)
5102 			ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
5103 		else
5104 			ret = ANEG_TIMER_ENAB;
5105 		break;
5106 
5107 	case ANEG_STATE_DISABLE_LINK_OK:
5108 		ret = ANEG_DONE;
5109 		break;
5110 
5111 	case ANEG_STATE_ABILITY_DETECT_INIT:
5112 		ap->flags &= ~(MR_TOGGLE_TX);
5113 		ap->txconfig = ANEG_CFG_FD;
5114 		flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5115 		if (flowctrl & ADVERTISE_1000XPAUSE)
5116 			ap->txconfig |= ANEG_CFG_PS1;
5117 		if (flowctrl & ADVERTISE_1000XPSE_ASYM)
5118 			ap->txconfig |= ANEG_CFG_PS2;
5119 		tw32(MAC_TX_AUTO_NEG, ap->txconfig);
5120 		tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5121 		tw32_f(MAC_MODE, tp->mac_mode);
5122 		udelay(40);
5123 
5124 		ap->state = ANEG_STATE_ABILITY_DETECT;
5125 		break;
5126 
5127 	case ANEG_STATE_ABILITY_DETECT:
5128 		if (ap->ability_match != 0 && ap->rxconfig != 0)
5129 			ap->state = ANEG_STATE_ACK_DETECT_INIT;
5130 		break;
5131 
5132 	case ANEG_STATE_ACK_DETECT_INIT:
5133 		ap->txconfig |= ANEG_CFG_ACK;
5134 		tw32(MAC_TX_AUTO_NEG, ap->txconfig);
5135 		tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5136 		tw32_f(MAC_MODE, tp->mac_mode);
5137 		udelay(40);
5138 
5139 		ap->state = ANEG_STATE_ACK_DETECT;
5140 
5141 		/* fallthru */
5142 	case ANEG_STATE_ACK_DETECT:
5143 		if (ap->ack_match != 0) {
5144 			if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
5145 			    (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
5146 				ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
5147 			} else {
5148 				ap->state = ANEG_STATE_AN_ENABLE;
5149 			}
5150 		} else if (ap->ability_match != 0 &&
5151 			   ap->rxconfig == 0) {
5152 			ap->state = ANEG_STATE_AN_ENABLE;
5153 		}
5154 		break;
5155 
5156 	case ANEG_STATE_COMPLETE_ACK_INIT:
5157 		if (ap->rxconfig & ANEG_CFG_INVAL) {
5158 			ret = ANEG_FAILED;
5159 			break;
5160 		}
5161 		ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
5162 			       MR_LP_ADV_HALF_DUPLEX |
5163 			       MR_LP_ADV_SYM_PAUSE |
5164 			       MR_LP_ADV_ASYM_PAUSE |
5165 			       MR_LP_ADV_REMOTE_FAULT1 |
5166 			       MR_LP_ADV_REMOTE_FAULT2 |
5167 			       MR_LP_ADV_NEXT_PAGE |
5168 			       MR_TOGGLE_RX |
5169 			       MR_NP_RX);
5170 		if (ap->rxconfig & ANEG_CFG_FD)
5171 			ap->flags |= MR_LP_ADV_FULL_DUPLEX;
5172 		if (ap->rxconfig & ANEG_CFG_HD)
5173 			ap->flags |= MR_LP_ADV_HALF_DUPLEX;
5174 		if (ap->rxconfig & ANEG_CFG_PS1)
5175 			ap->flags |= MR_LP_ADV_SYM_PAUSE;
5176 		if (ap->rxconfig & ANEG_CFG_PS2)
5177 			ap->flags |= MR_LP_ADV_ASYM_PAUSE;
5178 		if (ap->rxconfig & ANEG_CFG_RF1)
5179 			ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
5180 		if (ap->rxconfig & ANEG_CFG_RF2)
5181 			ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
5182 		if (ap->rxconfig & ANEG_CFG_NP)
5183 			ap->flags |= MR_LP_ADV_NEXT_PAGE;
5184 
5185 		ap->link_time = ap->cur_time;
5186 
5187 		ap->flags ^= (MR_TOGGLE_TX);
5188 		if (ap->rxconfig & 0x0008)
5189 			ap->flags |= MR_TOGGLE_RX;
5190 		if (ap->rxconfig & ANEG_CFG_NP)
5191 			ap->flags |= MR_NP_RX;
5192 		ap->flags |= MR_PAGE_RX;
5193 
5194 		ap->state = ANEG_STATE_COMPLETE_ACK;
5195 		ret = ANEG_TIMER_ENAB;
5196 		break;
5197 
5198 	case ANEG_STATE_COMPLETE_ACK:
5199 		if (ap->ability_match != 0 &&
5200 		    ap->rxconfig == 0) {
5201 			ap->state = ANEG_STATE_AN_ENABLE;
5202 			break;
5203 		}
5204 		delta = ap->cur_time - ap->link_time;
5205 		if (delta > ANEG_STATE_SETTLE_TIME) {
5206 			if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
5207 				ap->state = ANEG_STATE_IDLE_DETECT_INIT;
5208 			} else {
5209 				if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
5210 				    !(ap->flags & MR_NP_RX)) {
5211 					ap->state = ANEG_STATE_IDLE_DETECT_INIT;
5212 				} else {
5213 					ret = ANEG_FAILED;
5214 				}
5215 			}
5216 		}
5217 		break;
5218 
5219 	case ANEG_STATE_IDLE_DETECT_INIT:
5220 		ap->link_time = ap->cur_time;
5221 		tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
5222 		tw32_f(MAC_MODE, tp->mac_mode);
5223 		udelay(40);
5224 
5225 		ap->state = ANEG_STATE_IDLE_DETECT;
5226 		ret = ANEG_TIMER_ENAB;
5227 		break;
5228 
5229 	case ANEG_STATE_IDLE_DETECT:
5230 		if (ap->ability_match != 0 &&
5231 		    ap->rxconfig == 0) {
5232 			ap->state = ANEG_STATE_AN_ENABLE;
5233 			break;
5234 		}
5235 		delta = ap->cur_time - ap->link_time;
5236 		if (delta > ANEG_STATE_SETTLE_TIME) {
5237 			/* XXX another gem from the Broadcom driver :( */
5238 			ap->state = ANEG_STATE_LINK_OK;
5239 		}
5240 		break;
5241 
5242 	case ANEG_STATE_LINK_OK:
5243 		ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
5244 		ret = ANEG_DONE;
5245 		break;
5246 
5247 	case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
5248 		/* ??? unimplemented */
5249 		break;
5250 
5251 	case ANEG_STATE_NEXT_PAGE_WAIT:
5252 		/* ??? unimplemented */
5253 		break;
5254 
5255 	default:
5256 		ret = ANEG_FAILED;
5257 		break;
5258 	}
5259 
5260 	return ret;
5261 }
5262 
5263 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
5264 {
5265 	int res = 0;
5266 	struct tg3_fiber_aneginfo aninfo;
5267 	int status = ANEG_FAILED;
5268 	unsigned int tick;
5269 	u32 tmp;
5270 
5271 	tw32_f(MAC_TX_AUTO_NEG, 0);
5272 
5273 	tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
5274 	tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
5275 	udelay(40);
5276 
5277 	tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
5278 	udelay(40);
5279 
5280 	memset(&aninfo, 0, sizeof(aninfo));
5281 	aninfo.flags |= MR_AN_ENABLE;
5282 	aninfo.state = ANEG_STATE_UNKNOWN;
5283 	aninfo.cur_time = 0;
5284 	tick = 0;
5285 	while (++tick < 195000) {
5286 		status = tg3_fiber_aneg_smachine(tp, &aninfo);
5287 		if (status == ANEG_DONE || status == ANEG_FAILED)
5288 			break;
5289 
5290 		udelay(1);
5291 	}
5292 
5293 	tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
5294 	tw32_f(MAC_MODE, tp->mac_mode);
5295 	udelay(40);
5296 
5297 	*txflags = aninfo.txconfig;
5298 	*rxflags = aninfo.flags;
5299 
5300 	if (status == ANEG_DONE &&
5301 	    (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
5302 			     MR_LP_ADV_FULL_DUPLEX)))
5303 		res = 1;
5304 
5305 	return res;
5306 }
5307 
5308 static void tg3_init_bcm8002(struct tg3 *tp)
5309 {
5310 	u32 mac_status = tr32(MAC_STATUS);
5311 	int i;
5312 
5313 	/* Reset when initting first time or we have a link. */
5314 	if (tg3_flag(tp, INIT_COMPLETE) &&
5315 	    !(mac_status & MAC_STATUS_PCS_SYNCED))
5316 		return;
5317 
5318 	/* Set PLL lock range. */
5319 	tg3_writephy(tp, 0x16, 0x8007);
5320 
5321 	/* SW reset */
5322 	tg3_writephy(tp, MII_BMCR, BMCR_RESET);
5323 
5324 	/* Wait for reset to complete. */
5325 	/* XXX schedule_timeout() ... */
5326 	for (i = 0; i < 500; i++)
5327 		udelay(10);
5328 
5329 	/* Config mode; select PMA/Ch 1 regs. */
5330 	tg3_writephy(tp, 0x10, 0x8411);
5331 
5332 	/* Enable auto-lock and comdet, select txclk for tx. */
5333 	tg3_writephy(tp, 0x11, 0x0a10);
5334 
5335 	tg3_writephy(tp, 0x18, 0x00a0);
5336 	tg3_writephy(tp, 0x16, 0x41ff);
5337 
5338 	/* Assert and deassert POR. */
5339 	tg3_writephy(tp, 0x13, 0x0400);
5340 	udelay(40);
5341 	tg3_writephy(tp, 0x13, 0x0000);
5342 
5343 	tg3_writephy(tp, 0x11, 0x0a50);
5344 	udelay(40);
5345 	tg3_writephy(tp, 0x11, 0x0a10);
5346 
5347 	/* Wait for signal to stabilize */
5348 	/* XXX schedule_timeout() ... */
5349 	for (i = 0; i < 15000; i++)
5350 		udelay(10);
5351 
5352 	/* Deselect the channel register so we can read the PHYID
5353 	 * later.
5354 	 */
5355 	tg3_writephy(tp, 0x10, 0x8011);
5356 }
5357 
5358 static bool tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
5359 {
5360 	u16 flowctrl;
5361 	bool current_link_up;
5362 	u32 sg_dig_ctrl, sg_dig_status;
5363 	u32 serdes_cfg, expected_sg_dig_ctrl;
5364 	int workaround, port_a;
5365 
5366 	serdes_cfg = 0;
5367 	expected_sg_dig_ctrl = 0;
5368 	workaround = 0;
5369 	port_a = 1;
5370 	current_link_up = false;
5371 
5372 	if (tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A0 &&
5373 	    tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A1) {
5374 		workaround = 1;
5375 		if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
5376 			port_a = 0;
5377 
5378 		/* preserve bits 0-11,13,14 for signal pre-emphasis */
5379 		/* preserve bits 20-23 for voltage regulator */
5380 		serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
5381 	}
5382 
5383 	sg_dig_ctrl = tr32(SG_DIG_CTRL);
5384 
5385 	if (tp->link_config.autoneg != AUTONEG_ENABLE) {
5386 		if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
5387 			if (workaround) {
5388 				u32 val = serdes_cfg;
5389 
5390 				if (port_a)
5391 					val |= 0xc010000;
5392 				else
5393 					val |= 0x4010000;
5394 				tw32_f(MAC_SERDES_CFG, val);
5395 			}
5396 
5397 			tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
5398 		}
5399 		if (mac_status & MAC_STATUS_PCS_SYNCED) {
5400 			tg3_setup_flow_control(tp, 0, 0);
5401 			current_link_up = true;
5402 		}
5403 		goto out;
5404 	}
5405 
5406 	/* Want auto-negotiation.  */
5407 	expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
5408 
5409 	flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5410 	if (flowctrl & ADVERTISE_1000XPAUSE)
5411 		expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
5412 	if (flowctrl & ADVERTISE_1000XPSE_ASYM)
5413 		expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
5414 
5415 	if (sg_dig_ctrl != expected_sg_dig_ctrl) {
5416 		if ((tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT) &&
5417 		    tp->serdes_counter &&
5418 		    ((mac_status & (MAC_STATUS_PCS_SYNCED |
5419 				    MAC_STATUS_RCVD_CFG)) ==
5420 		     MAC_STATUS_PCS_SYNCED)) {
5421 			tp->serdes_counter--;
5422 			current_link_up = true;
5423 			goto out;
5424 		}
5425 restart_autoneg:
5426 		if (workaround)
5427 			tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
5428 		tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
5429 		udelay(5);
5430 		tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
5431 
5432 		tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
5433 		tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5434 	} else if (mac_status & (MAC_STATUS_PCS_SYNCED |
5435 				 MAC_STATUS_SIGNAL_DET)) {
5436 		sg_dig_status = tr32(SG_DIG_STATUS);
5437 		mac_status = tr32(MAC_STATUS);
5438 
5439 		if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
5440 		    (mac_status & MAC_STATUS_PCS_SYNCED)) {
5441 			u32 local_adv = 0, remote_adv = 0;
5442 
5443 			if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
5444 				local_adv |= ADVERTISE_1000XPAUSE;
5445 			if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
5446 				local_adv |= ADVERTISE_1000XPSE_ASYM;
5447 
5448 			if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
5449 				remote_adv |= LPA_1000XPAUSE;
5450 			if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
5451 				remote_adv |= LPA_1000XPAUSE_ASYM;
5452 
5453 			tp->link_config.rmt_adv =
5454 					   mii_adv_to_ethtool_adv_x(remote_adv);
5455 
5456 			tg3_setup_flow_control(tp, local_adv, remote_adv);
5457 			current_link_up = true;
5458 			tp->serdes_counter = 0;
5459 			tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5460 		} else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
5461 			if (tp->serdes_counter)
5462 				tp->serdes_counter--;
5463 			else {
5464 				if (workaround) {
5465 					u32 val = serdes_cfg;
5466 
5467 					if (port_a)
5468 						val |= 0xc010000;
5469 					else
5470 						val |= 0x4010000;
5471 
5472 					tw32_f(MAC_SERDES_CFG, val);
5473 				}
5474 
5475 				tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
5476 				udelay(40);
5477 
5478 				/* Link parallel detection - link is up */
5479 				/* only if we have PCS_SYNC and not */
5480 				/* receiving config code words */
5481 				mac_status = tr32(MAC_STATUS);
5482 				if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
5483 				    !(mac_status & MAC_STATUS_RCVD_CFG)) {
5484 					tg3_setup_flow_control(tp, 0, 0);
5485 					current_link_up = true;
5486 					tp->phy_flags |=
5487 						TG3_PHYFLG_PARALLEL_DETECT;
5488 					tp->serdes_counter =
5489 						SERDES_PARALLEL_DET_TIMEOUT;
5490 				} else
5491 					goto restart_autoneg;
5492 			}
5493 		}
5494 	} else {
5495 		tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
5496 		tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5497 	}
5498 
5499 out:
5500 	return current_link_up;
5501 }
5502 
5503 static bool tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
5504 {
5505 	bool current_link_up = false;
5506 
5507 	if (!(mac_status & MAC_STATUS_PCS_SYNCED))
5508 		goto out;
5509 
5510 	if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5511 		u32 txflags, rxflags;
5512 		int i;
5513 
5514 		if (fiber_autoneg(tp, &txflags, &rxflags)) {
5515 			u32 local_adv = 0, remote_adv = 0;
5516 
5517 			if (txflags & ANEG_CFG_PS1)
5518 				local_adv |= ADVERTISE_1000XPAUSE;
5519 			if (txflags & ANEG_CFG_PS2)
5520 				local_adv |= ADVERTISE_1000XPSE_ASYM;
5521 
5522 			if (rxflags & MR_LP_ADV_SYM_PAUSE)
5523 				remote_adv |= LPA_1000XPAUSE;
5524 			if (rxflags & MR_LP_ADV_ASYM_PAUSE)
5525 				remote_adv |= LPA_1000XPAUSE_ASYM;
5526 
5527 			tp->link_config.rmt_adv =
5528 					   mii_adv_to_ethtool_adv_x(remote_adv);
5529 
5530 			tg3_setup_flow_control(tp, local_adv, remote_adv);
5531 
5532 			current_link_up = true;
5533 		}
5534 		for (i = 0; i < 30; i++) {
5535 			udelay(20);
5536 			tw32_f(MAC_STATUS,
5537 			       (MAC_STATUS_SYNC_CHANGED |
5538 				MAC_STATUS_CFG_CHANGED));
5539 			udelay(40);
5540 			if ((tr32(MAC_STATUS) &
5541 			     (MAC_STATUS_SYNC_CHANGED |
5542 			      MAC_STATUS_CFG_CHANGED)) == 0)
5543 				break;
5544 		}
5545 
5546 		mac_status = tr32(MAC_STATUS);
5547 		if (!current_link_up &&
5548 		    (mac_status & MAC_STATUS_PCS_SYNCED) &&
5549 		    !(mac_status & MAC_STATUS_RCVD_CFG))
5550 			current_link_up = true;
5551 	} else {
5552 		tg3_setup_flow_control(tp, 0, 0);
5553 
5554 		/* Forcing 1000FD link up. */
5555 		current_link_up = true;
5556 
5557 		tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
5558 		udelay(40);
5559 
5560 		tw32_f(MAC_MODE, tp->mac_mode);
5561 		udelay(40);
5562 	}
5563 
5564 out:
5565 	return current_link_up;
5566 }
5567 
5568 static int tg3_setup_fiber_phy(struct tg3 *tp, bool force_reset)
5569 {
5570 	u32 orig_pause_cfg;
5571 	u16 orig_active_speed;
5572 	u8 orig_active_duplex;
5573 	u32 mac_status;
5574 	bool current_link_up;
5575 	int i;
5576 
5577 	orig_pause_cfg = tp->link_config.active_flowctrl;
5578 	orig_active_speed = tp->link_config.active_speed;
5579 	orig_active_duplex = tp->link_config.active_duplex;
5580 
5581 	if (!tg3_flag(tp, HW_AUTONEG) &&
5582 	    tp->link_up &&
5583 	    tg3_flag(tp, INIT_COMPLETE)) {
5584 		mac_status = tr32(MAC_STATUS);
5585 		mac_status &= (MAC_STATUS_PCS_SYNCED |
5586 			       MAC_STATUS_SIGNAL_DET |
5587 			       MAC_STATUS_CFG_CHANGED |
5588 			       MAC_STATUS_RCVD_CFG);
5589 		if (mac_status == (MAC_STATUS_PCS_SYNCED |
5590 				   MAC_STATUS_SIGNAL_DET)) {
5591 			tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5592 					    MAC_STATUS_CFG_CHANGED));
5593 			return 0;
5594 		}
5595 	}
5596 
5597 	tw32_f(MAC_TX_AUTO_NEG, 0);
5598 
5599 	tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
5600 	tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
5601 	tw32_f(MAC_MODE, tp->mac_mode);
5602 	udelay(40);
5603 
5604 	if (tp->phy_id == TG3_PHY_ID_BCM8002)
5605 		tg3_init_bcm8002(tp);
5606 
5607 	/* Enable link change event even when serdes polling.  */
5608 	tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5609 	udelay(40);
5610 
5611 	current_link_up = false;
5612 	tp->link_config.rmt_adv = 0;
5613 	mac_status = tr32(MAC_STATUS);
5614 
5615 	if (tg3_flag(tp, HW_AUTONEG))
5616 		current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
5617 	else
5618 		current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
5619 
5620 	tp->napi[0].hw_status->status =
5621 		(SD_STATUS_UPDATED |
5622 		 (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG));
5623 
5624 	for (i = 0; i < 100; i++) {
5625 		tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5626 				    MAC_STATUS_CFG_CHANGED));
5627 		udelay(5);
5628 		if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
5629 					 MAC_STATUS_CFG_CHANGED |
5630 					 MAC_STATUS_LNKSTATE_CHANGED)) == 0)
5631 			break;
5632 	}
5633 
5634 	mac_status = tr32(MAC_STATUS);
5635 	if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
5636 		current_link_up = false;
5637 		if (tp->link_config.autoneg == AUTONEG_ENABLE &&
5638 		    tp->serdes_counter == 0) {
5639 			tw32_f(MAC_MODE, (tp->mac_mode |
5640 					  MAC_MODE_SEND_CONFIGS));
5641 			udelay(1);
5642 			tw32_f(MAC_MODE, tp->mac_mode);
5643 		}
5644 	}
5645 
5646 	if (current_link_up) {
5647 		tp->link_config.active_speed = SPEED_1000;
5648 		tp->link_config.active_duplex = DUPLEX_FULL;
5649 		tw32(MAC_LED_CTRL, (tp->led_ctrl |
5650 				    LED_CTRL_LNKLED_OVERRIDE |
5651 				    LED_CTRL_1000MBPS_ON));
5652 	} else {
5653 		tp->link_config.active_speed = SPEED_UNKNOWN;
5654 		tp->link_config.active_duplex = DUPLEX_UNKNOWN;
5655 		tw32(MAC_LED_CTRL, (tp->led_ctrl |
5656 				    LED_CTRL_LNKLED_OVERRIDE |
5657 				    LED_CTRL_TRAFFIC_OVERRIDE));
5658 	}
5659 
5660 	if (!tg3_test_and_report_link_chg(tp, current_link_up)) {
5661 		u32 now_pause_cfg = tp->link_config.active_flowctrl;
5662 		if (orig_pause_cfg != now_pause_cfg ||
5663 		    orig_active_speed != tp->link_config.active_speed ||
5664 		    orig_active_duplex != tp->link_config.active_duplex)
5665 			tg3_link_report(tp);
5666 	}
5667 
5668 	return 0;
5669 }
5670 
5671 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, bool force_reset)
5672 {
5673 	int err = 0;
5674 	u32 bmsr, bmcr;
5675 	u16 current_speed = SPEED_UNKNOWN;
5676 	u8 current_duplex = DUPLEX_UNKNOWN;
5677 	bool current_link_up = false;
5678 	u32 local_adv, remote_adv, sgsr;
5679 
5680 	if ((tg3_asic_rev(tp) == ASIC_REV_5719 ||
5681 	     tg3_asic_rev(tp) == ASIC_REV_5720) &&
5682 	     !tg3_readphy(tp, SERDES_TG3_1000X_STATUS, &sgsr) &&
5683 	     (sgsr & SERDES_TG3_SGMII_MODE)) {
5684 
5685 		if (force_reset)
5686 			tg3_phy_reset(tp);
5687 
5688 		tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
5689 
5690 		if (!(sgsr & SERDES_TG3_LINK_UP)) {
5691 			tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5692 		} else {
5693 			current_link_up = true;
5694 			if (sgsr & SERDES_TG3_SPEED_1000) {
5695 				current_speed = SPEED_1000;
5696 				tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5697 			} else if (sgsr & SERDES_TG3_SPEED_100) {
5698 				current_speed = SPEED_100;
5699 				tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
5700 			} else {
5701 				current_speed = SPEED_10;
5702 				tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
5703 			}
5704 
5705 			if (sgsr & SERDES_TG3_FULL_DUPLEX)
5706 				current_duplex = DUPLEX_FULL;
5707 			else
5708 				current_duplex = DUPLEX_HALF;
5709 		}
5710 
5711 		tw32_f(MAC_MODE, tp->mac_mode);
5712 		udelay(40);
5713 
5714 		tg3_clear_mac_status(tp);
5715 
5716 		goto fiber_setup_done;
5717 	}
5718 
5719 	tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5720 	tw32_f(MAC_MODE, tp->mac_mode);
5721 	udelay(40);
5722 
5723 	tg3_clear_mac_status(tp);
5724 
5725 	if (force_reset)
5726 		tg3_phy_reset(tp);
5727 
5728 	tp->link_config.rmt_adv = 0;
5729 
5730 	err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5731 	err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5732 	if (tg3_asic_rev(tp) == ASIC_REV_5714) {
5733 		if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5734 			bmsr |= BMSR_LSTATUS;
5735 		else
5736 			bmsr &= ~BMSR_LSTATUS;
5737 	}
5738 
5739 	err |= tg3_readphy(tp, MII_BMCR, &bmcr);
5740 
5741 	if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
5742 	    (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
5743 		/* do nothing, just check for link up at the end */
5744 	} else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5745 		u32 adv, newadv;
5746 
5747 		err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5748 		newadv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
5749 				 ADVERTISE_1000XPAUSE |
5750 				 ADVERTISE_1000XPSE_ASYM |
5751 				 ADVERTISE_SLCT);
5752 
5753 		newadv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5754 		newadv |= ethtool_adv_to_mii_adv_x(tp->link_config.advertising);
5755 
5756 		if ((newadv != adv) || !(bmcr & BMCR_ANENABLE)) {
5757 			tg3_writephy(tp, MII_ADVERTISE, newadv);
5758 			bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
5759 			tg3_writephy(tp, MII_BMCR, bmcr);
5760 
5761 			tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5762 			tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
5763 			tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5764 
5765 			return err;
5766 		}
5767 	} else {
5768 		u32 new_bmcr;
5769 
5770 		bmcr &= ~BMCR_SPEED1000;
5771 		new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
5772 
5773 		if (tp->link_config.duplex == DUPLEX_FULL)
5774 			new_bmcr |= BMCR_FULLDPLX;
5775 
5776 		if (new_bmcr != bmcr) {
5777 			/* BMCR_SPEED1000 is a reserved bit that needs
5778 			 * to be set on write.
5779 			 */
5780 			new_bmcr |= BMCR_SPEED1000;
5781 
5782 			/* Force a linkdown */
5783 			if (tp->link_up) {
5784 				u32 adv;
5785 
5786 				err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5787 				adv &= ~(ADVERTISE_1000XFULL |
5788 					 ADVERTISE_1000XHALF |
5789 					 ADVERTISE_SLCT);
5790 				tg3_writephy(tp, MII_ADVERTISE, adv);
5791 				tg3_writephy(tp, MII_BMCR, bmcr |
5792 							   BMCR_ANRESTART |
5793 							   BMCR_ANENABLE);
5794 				udelay(10);
5795 				tg3_carrier_off(tp);
5796 			}
5797 			tg3_writephy(tp, MII_BMCR, new_bmcr);
5798 			bmcr = new_bmcr;
5799 			err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5800 			err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5801 			if (tg3_asic_rev(tp) == ASIC_REV_5714) {
5802 				if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5803 					bmsr |= BMSR_LSTATUS;
5804 				else
5805 					bmsr &= ~BMSR_LSTATUS;
5806 			}
5807 			tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5808 		}
5809 	}
5810 
5811 	if (bmsr & BMSR_LSTATUS) {
5812 		current_speed = SPEED_1000;
5813 		current_link_up = true;
5814 		if (bmcr & BMCR_FULLDPLX)
5815 			current_duplex = DUPLEX_FULL;
5816 		else
5817 			current_duplex = DUPLEX_HALF;
5818 
5819 		local_adv = 0;
5820 		remote_adv = 0;
5821 
5822 		if (bmcr & BMCR_ANENABLE) {
5823 			u32 common;
5824 
5825 			err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
5826 			err |= tg3_readphy(tp, MII_LPA, &remote_adv);
5827 			common = local_adv & remote_adv;
5828 			if (common & (ADVERTISE_1000XHALF |
5829 				      ADVERTISE_1000XFULL)) {
5830 				if (common & ADVERTISE_1000XFULL)
5831 					current_duplex = DUPLEX_FULL;
5832 				else
5833 					current_duplex = DUPLEX_HALF;
5834 
5835 				tp->link_config.rmt_adv =
5836 					   mii_adv_to_ethtool_adv_x(remote_adv);
5837 			} else if (!tg3_flag(tp, 5780_CLASS)) {
5838 				/* Link is up via parallel detect */
5839 			} else {
5840 				current_link_up = false;
5841 			}
5842 		}
5843 	}
5844 
5845 fiber_setup_done:
5846 	if (current_link_up && current_duplex == DUPLEX_FULL)
5847 		tg3_setup_flow_control(tp, local_adv, remote_adv);
5848 
5849 	tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
5850 	if (tp->link_config.active_duplex == DUPLEX_HALF)
5851 		tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
5852 
5853 	tw32_f(MAC_MODE, tp->mac_mode);
5854 	udelay(40);
5855 
5856 	tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5857 
5858 	tp->link_config.active_speed = current_speed;
5859 	tp->link_config.active_duplex = current_duplex;
5860 
5861 	tg3_test_and_report_link_chg(tp, current_link_up);
5862 	return err;
5863 }
5864 
5865 static void tg3_serdes_parallel_detect(struct tg3 *tp)
5866 {
5867 	if (tp->serdes_counter) {
5868 		/* Give autoneg time to complete. */
5869 		tp->serdes_counter--;
5870 		return;
5871 	}
5872 
5873 	if (!tp->link_up &&
5874 	    (tp->link_config.autoneg == AUTONEG_ENABLE)) {
5875 		u32 bmcr;
5876 
5877 		tg3_readphy(tp, MII_BMCR, &bmcr);
5878 		if (bmcr & BMCR_ANENABLE) {
5879 			u32 phy1, phy2;
5880 
5881 			/* Select shadow register 0x1f */
5882 			tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x7c00);
5883 			tg3_readphy(tp, MII_TG3_MISC_SHDW, &phy1);
5884 
5885 			/* Select expansion interrupt status register */
5886 			tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
5887 					 MII_TG3_DSP_EXP1_INT_STAT);
5888 			tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5889 			tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5890 
5891 			if ((phy1 & 0x10) && !(phy2 & 0x20)) {
5892 				/* We have signal detect and not receiving
5893 				 * config code words, link is up by parallel
5894 				 * detection.
5895 				 */
5896 
5897 				bmcr &= ~BMCR_ANENABLE;
5898 				bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
5899 				tg3_writephy(tp, MII_BMCR, bmcr);
5900 				tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT;
5901 			}
5902 		}
5903 	} else if (tp->link_up &&
5904 		   (tp->link_config.autoneg == AUTONEG_ENABLE) &&
5905 		   (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
5906 		u32 phy2;
5907 
5908 		/* Select expansion interrupt status register */
5909 		tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
5910 				 MII_TG3_DSP_EXP1_INT_STAT);
5911 		tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5912 		if (phy2 & 0x20) {
5913 			u32 bmcr;
5914 
5915 			/* Config code words received, turn on autoneg. */
5916 			tg3_readphy(tp, MII_BMCR, &bmcr);
5917 			tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
5918 
5919 			tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5920 
5921 		}
5922 	}
5923 }
5924 
5925 static int tg3_setup_phy(struct tg3 *tp, bool force_reset)
5926 {
5927 	u32 val;
5928 	int err;
5929 
5930 	if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
5931 		err = tg3_setup_fiber_phy(tp, force_reset);
5932 	else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
5933 		err = tg3_setup_fiber_mii_phy(tp, force_reset);
5934 	else
5935 		err = tg3_setup_copper_phy(tp, force_reset);
5936 
5937 	if (tg3_chip_rev(tp) == CHIPREV_5784_AX) {
5938 		u32 scale;
5939 
5940 		val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
5941 		if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
5942 			scale = 65;
5943 		else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
5944 			scale = 6;
5945 		else
5946 			scale = 12;
5947 
5948 		val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
5949 		val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
5950 		tw32(GRC_MISC_CFG, val);
5951 	}
5952 
5953 	val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
5954 	      (6 << TX_LENGTHS_IPG_SHIFT);
5955 	if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
5956 	    tg3_asic_rev(tp) == ASIC_REV_5762)
5957 		val |= tr32(MAC_TX_LENGTHS) &
5958 		       (TX_LENGTHS_JMB_FRM_LEN_MSK |
5959 			TX_LENGTHS_CNT_DWN_VAL_MSK);
5960 
5961 	if (tp->link_config.active_speed == SPEED_1000 &&
5962 	    tp->link_config.active_duplex == DUPLEX_HALF)
5963 		tw32(MAC_TX_LENGTHS, val |
5964 		     (0xff << TX_LENGTHS_SLOT_TIME_SHIFT));
5965 	else
5966 		tw32(MAC_TX_LENGTHS, val |
5967 		     (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
5968 
5969 	if (!tg3_flag(tp, 5705_PLUS)) {
5970 		if (tp->link_up) {
5971 			tw32(HOSTCC_STAT_COAL_TICKS,
5972 			     tp->coal.stats_block_coalesce_usecs);
5973 		} else {
5974 			tw32(HOSTCC_STAT_COAL_TICKS, 0);
5975 		}
5976 	}
5977 
5978 	if (tg3_flag(tp, ASPM_WORKAROUND)) {
5979 		val = tr32(PCIE_PWR_MGMT_THRESH);
5980 		if (!tp->link_up)
5981 			val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
5982 			      tp->pwrmgmt_thresh;
5983 		else
5984 			val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
5985 		tw32(PCIE_PWR_MGMT_THRESH, val);
5986 	}
5987 
5988 	return err;
5989 }
5990 
5991 /* tp->lock must be held */
5992 static u64 tg3_refclk_read(struct tg3 *tp)
5993 {
5994 	u64 stamp = tr32(TG3_EAV_REF_CLCK_LSB);
5995 	return stamp | (u64)tr32(TG3_EAV_REF_CLCK_MSB) << 32;
5996 }
5997 
5998 /* tp->lock must be held */
5999 static void tg3_refclk_write(struct tg3 *tp, u64 newval)
6000 {
6001 	tw32(TG3_EAV_REF_CLCK_CTL, TG3_EAV_REF_CLCK_CTL_STOP);
6002 	tw32(TG3_EAV_REF_CLCK_LSB, newval & 0xffffffff);
6003 	tw32(TG3_EAV_REF_CLCK_MSB, newval >> 32);
6004 	tw32_f(TG3_EAV_REF_CLCK_CTL, TG3_EAV_REF_CLCK_CTL_RESUME);
6005 }
6006 
6007 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync);
6008 static inline void tg3_full_unlock(struct tg3 *tp);
6009 static int tg3_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info)
6010 {
6011 	struct tg3 *tp = netdev_priv(dev);
6012 
6013 	info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
6014 				SOF_TIMESTAMPING_RX_SOFTWARE |
6015 				SOF_TIMESTAMPING_SOFTWARE;
6016 
6017 	if (tg3_flag(tp, PTP_CAPABLE)) {
6018 		info->so_timestamping |= SOF_TIMESTAMPING_TX_HARDWARE |
6019 					SOF_TIMESTAMPING_RX_HARDWARE |
6020 					SOF_TIMESTAMPING_RAW_HARDWARE;
6021 	}
6022 
6023 	if (tp->ptp_clock)
6024 		info->phc_index = ptp_clock_index(tp->ptp_clock);
6025 	else
6026 		info->phc_index = -1;
6027 
6028 	info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
6029 
6030 	info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
6031 			   (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
6032 			   (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
6033 			   (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT);
6034 	return 0;
6035 }
6036 
6037 static int tg3_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
6038 {
6039 	struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6040 	bool neg_adj = false;
6041 	u32 correction = 0;
6042 
6043 	if (ppb < 0) {
6044 		neg_adj = true;
6045 		ppb = -ppb;
6046 	}
6047 
6048 	/* Frequency adjustment is performed using hardware with a 24 bit
6049 	 * accumulator and a programmable correction value. On each clk, the
6050 	 * correction value gets added to the accumulator and when it
6051 	 * overflows, the time counter is incremented/decremented.
6052 	 *
6053 	 * So conversion from ppb to correction value is
6054 	 *		ppb * (1 << 24) / 1000000000
6055 	 */
6056 	correction = div_u64((u64)ppb * (1 << 24), 1000000000ULL) &
6057 		     TG3_EAV_REF_CLK_CORRECT_MASK;
6058 
6059 	tg3_full_lock(tp, 0);
6060 
6061 	if (correction)
6062 		tw32(TG3_EAV_REF_CLK_CORRECT_CTL,
6063 		     TG3_EAV_REF_CLK_CORRECT_EN |
6064 		     (neg_adj ? TG3_EAV_REF_CLK_CORRECT_NEG : 0) | correction);
6065 	else
6066 		tw32(TG3_EAV_REF_CLK_CORRECT_CTL, 0);
6067 
6068 	tg3_full_unlock(tp);
6069 
6070 	return 0;
6071 }
6072 
6073 static int tg3_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
6074 {
6075 	struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6076 
6077 	tg3_full_lock(tp, 0);
6078 	tp->ptp_adjust += delta;
6079 	tg3_full_unlock(tp);
6080 
6081 	return 0;
6082 }
6083 
6084 static int tg3_ptp_gettime(struct ptp_clock_info *ptp, struct timespec *ts)
6085 {
6086 	u64 ns;
6087 	u32 remainder;
6088 	struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6089 
6090 	tg3_full_lock(tp, 0);
6091 	ns = tg3_refclk_read(tp);
6092 	ns += tp->ptp_adjust;
6093 	tg3_full_unlock(tp);
6094 
6095 	ts->tv_sec = div_u64_rem(ns, 1000000000, &remainder);
6096 	ts->tv_nsec = remainder;
6097 
6098 	return 0;
6099 }
6100 
6101 static int tg3_ptp_settime(struct ptp_clock_info *ptp,
6102 			   const struct timespec *ts)
6103 {
6104 	u64 ns;
6105 	struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6106 
6107 	ns = timespec_to_ns(ts);
6108 
6109 	tg3_full_lock(tp, 0);
6110 	tg3_refclk_write(tp, ns);
6111 	tp->ptp_adjust = 0;
6112 	tg3_full_unlock(tp);
6113 
6114 	return 0;
6115 }
6116 
6117 static int tg3_ptp_enable(struct ptp_clock_info *ptp,
6118 			  struct ptp_clock_request *rq, int on)
6119 {
6120 	return -EOPNOTSUPP;
6121 }
6122 
6123 static const struct ptp_clock_info tg3_ptp_caps = {
6124 	.owner		= THIS_MODULE,
6125 	.name		= "tg3 clock",
6126 	.max_adj	= 250000000,
6127 	.n_alarm	= 0,
6128 	.n_ext_ts	= 0,
6129 	.n_per_out	= 0,
6130 	.pps		= 0,
6131 	.adjfreq	= tg3_ptp_adjfreq,
6132 	.adjtime	= tg3_ptp_adjtime,
6133 	.gettime	= tg3_ptp_gettime,
6134 	.settime	= tg3_ptp_settime,
6135 	.enable		= tg3_ptp_enable,
6136 };
6137 
6138 static void tg3_hwclock_to_timestamp(struct tg3 *tp, u64 hwclock,
6139 				     struct skb_shared_hwtstamps *timestamp)
6140 {
6141 	memset(timestamp, 0, sizeof(struct skb_shared_hwtstamps));
6142 	timestamp->hwtstamp  = ns_to_ktime((hwclock & TG3_TSTAMP_MASK) +
6143 					   tp->ptp_adjust);
6144 }
6145 
6146 /* tp->lock must be held */
6147 static void tg3_ptp_init(struct tg3 *tp)
6148 {
6149 	if (!tg3_flag(tp, PTP_CAPABLE))
6150 		return;
6151 
6152 	/* Initialize the hardware clock to the system time. */
6153 	tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()));
6154 	tp->ptp_adjust = 0;
6155 	tp->ptp_info = tg3_ptp_caps;
6156 }
6157 
6158 /* tp->lock must be held */
6159 static void tg3_ptp_resume(struct tg3 *tp)
6160 {
6161 	if (!tg3_flag(tp, PTP_CAPABLE))
6162 		return;
6163 
6164 	tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()) + tp->ptp_adjust);
6165 	tp->ptp_adjust = 0;
6166 }
6167 
6168 static void tg3_ptp_fini(struct tg3 *tp)
6169 {
6170 	if (!tg3_flag(tp, PTP_CAPABLE) || !tp->ptp_clock)
6171 		return;
6172 
6173 	ptp_clock_unregister(tp->ptp_clock);
6174 	tp->ptp_clock = NULL;
6175 	tp->ptp_adjust = 0;
6176 }
6177 
6178 static inline int tg3_irq_sync(struct tg3 *tp)
6179 {
6180 	return tp->irq_sync;
6181 }
6182 
6183 static inline void tg3_rd32_loop(struct tg3 *tp, u32 *dst, u32 off, u32 len)
6184 {
6185 	int i;
6186 
6187 	dst = (u32 *)((u8 *)dst + off);
6188 	for (i = 0; i < len; i += sizeof(u32))
6189 		*dst++ = tr32(off + i);
6190 }
6191 
6192 static void tg3_dump_legacy_regs(struct tg3 *tp, u32 *regs)
6193 {
6194 	tg3_rd32_loop(tp, regs, TG3PCI_VENDOR, 0xb0);
6195 	tg3_rd32_loop(tp, regs, MAILBOX_INTERRUPT_0, 0x200);
6196 	tg3_rd32_loop(tp, regs, MAC_MODE, 0x4f0);
6197 	tg3_rd32_loop(tp, regs, SNDDATAI_MODE, 0xe0);
6198 	tg3_rd32_loop(tp, regs, SNDDATAC_MODE, 0x04);
6199 	tg3_rd32_loop(tp, regs, SNDBDS_MODE, 0x80);
6200 	tg3_rd32_loop(tp, regs, SNDBDI_MODE, 0x48);
6201 	tg3_rd32_loop(tp, regs, SNDBDC_MODE, 0x04);
6202 	tg3_rd32_loop(tp, regs, RCVLPC_MODE, 0x20);
6203 	tg3_rd32_loop(tp, regs, RCVLPC_SELLST_BASE, 0x15c);
6204 	tg3_rd32_loop(tp, regs, RCVDBDI_MODE, 0x0c);
6205 	tg3_rd32_loop(tp, regs, RCVDBDI_JUMBO_BD, 0x3c);
6206 	tg3_rd32_loop(tp, regs, RCVDBDI_BD_PROD_IDX_0, 0x44);
6207 	tg3_rd32_loop(tp, regs, RCVDCC_MODE, 0x04);
6208 	tg3_rd32_loop(tp, regs, RCVBDI_MODE, 0x20);
6209 	tg3_rd32_loop(tp, regs, RCVCC_MODE, 0x14);
6210 	tg3_rd32_loop(tp, regs, RCVLSC_MODE, 0x08);
6211 	tg3_rd32_loop(tp, regs, MBFREE_MODE, 0x08);
6212 	tg3_rd32_loop(tp, regs, HOSTCC_MODE, 0x100);
6213 
6214 	if (tg3_flag(tp, SUPPORT_MSIX))
6215 		tg3_rd32_loop(tp, regs, HOSTCC_RXCOL_TICKS_VEC1, 0x180);
6216 
6217 	tg3_rd32_loop(tp, regs, MEMARB_MODE, 0x10);
6218 	tg3_rd32_loop(tp, regs, BUFMGR_MODE, 0x58);
6219 	tg3_rd32_loop(tp, regs, RDMAC_MODE, 0x08);
6220 	tg3_rd32_loop(tp, regs, WDMAC_MODE, 0x08);
6221 	tg3_rd32_loop(tp, regs, RX_CPU_MODE, 0x04);
6222 	tg3_rd32_loop(tp, regs, RX_CPU_STATE, 0x04);
6223 	tg3_rd32_loop(tp, regs, RX_CPU_PGMCTR, 0x04);
6224 	tg3_rd32_loop(tp, regs, RX_CPU_HWBKPT, 0x04);
6225 
6226 	if (!tg3_flag(tp, 5705_PLUS)) {
6227 		tg3_rd32_loop(tp, regs, TX_CPU_MODE, 0x04);
6228 		tg3_rd32_loop(tp, regs, TX_CPU_STATE, 0x04);
6229 		tg3_rd32_loop(tp, regs, TX_CPU_PGMCTR, 0x04);
6230 	}
6231 
6232 	tg3_rd32_loop(tp, regs, GRCMBOX_INTERRUPT_0, 0x110);
6233 	tg3_rd32_loop(tp, regs, FTQ_RESET, 0x120);
6234 	tg3_rd32_loop(tp, regs, MSGINT_MODE, 0x0c);
6235 	tg3_rd32_loop(tp, regs, DMAC_MODE, 0x04);
6236 	tg3_rd32_loop(tp, regs, GRC_MODE, 0x4c);
6237 
6238 	if (tg3_flag(tp, NVRAM))
6239 		tg3_rd32_loop(tp, regs, NVRAM_CMD, 0x24);
6240 }
6241 
6242 static void tg3_dump_state(struct tg3 *tp)
6243 {
6244 	int i;
6245 	u32 *regs;
6246 
6247 	regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC);
6248 	if (!regs)
6249 		return;
6250 
6251 	if (tg3_flag(tp, PCI_EXPRESS)) {
6252 		/* Read up to but not including private PCI registers */
6253 		for (i = 0; i < TG3_PCIE_TLDLPL_PORT; i += sizeof(u32))
6254 			regs[i / sizeof(u32)] = tr32(i);
6255 	} else
6256 		tg3_dump_legacy_regs(tp, regs);
6257 
6258 	for (i = 0; i < TG3_REG_BLK_SIZE / sizeof(u32); i += 4) {
6259 		if (!regs[i + 0] && !regs[i + 1] &&
6260 		    !regs[i + 2] && !regs[i + 3])
6261 			continue;
6262 
6263 		netdev_err(tp->dev, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
6264 			   i * 4,
6265 			   regs[i + 0], regs[i + 1], regs[i + 2], regs[i + 3]);
6266 	}
6267 
6268 	kfree(regs);
6269 
6270 	for (i = 0; i < tp->irq_cnt; i++) {
6271 		struct tg3_napi *tnapi = &tp->napi[i];
6272 
6273 		/* SW status block */
6274 		netdev_err(tp->dev,
6275 			 "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
6276 			   i,
6277 			   tnapi->hw_status->status,
6278 			   tnapi->hw_status->status_tag,
6279 			   tnapi->hw_status->rx_jumbo_consumer,
6280 			   tnapi->hw_status->rx_consumer,
6281 			   tnapi->hw_status->rx_mini_consumer,
6282 			   tnapi->hw_status->idx[0].rx_producer,
6283 			   tnapi->hw_status->idx[0].tx_consumer);
6284 
6285 		netdev_err(tp->dev,
6286 		"%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n",
6287 			   i,
6288 			   tnapi->last_tag, tnapi->last_irq_tag,
6289 			   tnapi->tx_prod, tnapi->tx_cons, tnapi->tx_pending,
6290 			   tnapi->rx_rcb_ptr,
6291 			   tnapi->prodring.rx_std_prod_idx,
6292 			   tnapi->prodring.rx_std_cons_idx,
6293 			   tnapi->prodring.rx_jmb_prod_idx,
6294 			   tnapi->prodring.rx_jmb_cons_idx);
6295 	}
6296 }
6297 
6298 /* This is called whenever we suspect that the system chipset is re-
6299  * ordering the sequence of MMIO to the tx send mailbox. The symptom
6300  * is bogus tx completions. We try to recover by setting the
6301  * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
6302  * in the workqueue.
6303  */
6304 static void tg3_tx_recover(struct tg3 *tp)
6305 {
6306 	BUG_ON(tg3_flag(tp, MBOX_WRITE_REORDER) ||
6307 	       tp->write32_tx_mbox == tg3_write_indirect_mbox);
6308 
6309 	netdev_warn(tp->dev,
6310 		    "The system may be re-ordering memory-mapped I/O "
6311 		    "cycles to the network device, attempting to recover. "
6312 		    "Please report the problem to the driver maintainer "
6313 		    "and include system chipset information.\n");
6314 
6315 	spin_lock(&tp->lock);
6316 	tg3_flag_set(tp, TX_RECOVERY_PENDING);
6317 	spin_unlock(&tp->lock);
6318 }
6319 
6320 static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
6321 {
6322 	/* Tell compiler to fetch tx indices from memory. */
6323 	barrier();
6324 	return tnapi->tx_pending -
6325 	       ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1));
6326 }
6327 
6328 /* Tigon3 never reports partial packet sends.  So we do not
6329  * need special logic to handle SKBs that have not had all
6330  * of their frags sent yet, like SunGEM does.
6331  */
6332 static void tg3_tx(struct tg3_napi *tnapi)
6333 {
6334 	struct tg3 *tp = tnapi->tp;
6335 	u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer;
6336 	u32 sw_idx = tnapi->tx_cons;
6337 	struct netdev_queue *txq;
6338 	int index = tnapi - tp->napi;
6339 	unsigned int pkts_compl = 0, bytes_compl = 0;
6340 
6341 	if (tg3_flag(tp, ENABLE_TSS))
6342 		index--;
6343 
6344 	txq = netdev_get_tx_queue(tp->dev, index);
6345 
6346 	while (sw_idx != hw_idx) {
6347 		struct tg3_tx_ring_info *ri = &tnapi->tx_buffers[sw_idx];
6348 		struct sk_buff *skb = ri->skb;
6349 		int i, tx_bug = 0;
6350 
6351 		if (unlikely(skb == NULL)) {
6352 			tg3_tx_recover(tp);
6353 			return;
6354 		}
6355 
6356 		if (tnapi->tx_ring[sw_idx].len_flags & TXD_FLAG_HWTSTAMP) {
6357 			struct skb_shared_hwtstamps timestamp;
6358 			u64 hwclock = tr32(TG3_TX_TSTAMP_LSB);
6359 			hwclock |= (u64)tr32(TG3_TX_TSTAMP_MSB) << 32;
6360 
6361 			tg3_hwclock_to_timestamp(tp, hwclock, &timestamp);
6362 
6363 			skb_tstamp_tx(skb, &timestamp);
6364 		}
6365 
6366 		pci_unmap_single(tp->pdev,
6367 				 dma_unmap_addr(ri, mapping),
6368 				 skb_headlen(skb),
6369 				 PCI_DMA_TODEVICE);
6370 
6371 		ri->skb = NULL;
6372 
6373 		while (ri->fragmented) {
6374 			ri->fragmented = false;
6375 			sw_idx = NEXT_TX(sw_idx);
6376 			ri = &tnapi->tx_buffers[sw_idx];
6377 		}
6378 
6379 		sw_idx = NEXT_TX(sw_idx);
6380 
6381 		for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
6382 			ri = &tnapi->tx_buffers[sw_idx];
6383 			if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
6384 				tx_bug = 1;
6385 
6386 			pci_unmap_page(tp->pdev,
6387 				       dma_unmap_addr(ri, mapping),
6388 				       skb_frag_size(&skb_shinfo(skb)->frags[i]),
6389 				       PCI_DMA_TODEVICE);
6390 
6391 			while (ri->fragmented) {
6392 				ri->fragmented = false;
6393 				sw_idx = NEXT_TX(sw_idx);
6394 				ri = &tnapi->tx_buffers[sw_idx];
6395 			}
6396 
6397 			sw_idx = NEXT_TX(sw_idx);
6398 		}
6399 
6400 		pkts_compl++;
6401 		bytes_compl += skb->len;
6402 
6403 		dev_kfree_skb(skb);
6404 
6405 		if (unlikely(tx_bug)) {
6406 			tg3_tx_recover(tp);
6407 			return;
6408 		}
6409 	}
6410 
6411 	netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
6412 
6413 	tnapi->tx_cons = sw_idx;
6414 
6415 	/* Need to make the tx_cons update visible to tg3_start_xmit()
6416 	 * before checking for netif_queue_stopped().  Without the
6417 	 * memory barrier, there is a small possibility that tg3_start_xmit()
6418 	 * will miss it and cause the queue to be stopped forever.
6419 	 */
6420 	smp_mb();
6421 
6422 	if (unlikely(netif_tx_queue_stopped(txq) &&
6423 		     (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) {
6424 		__netif_tx_lock(txq, smp_processor_id());
6425 		if (netif_tx_queue_stopped(txq) &&
6426 		    (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))
6427 			netif_tx_wake_queue(txq);
6428 		__netif_tx_unlock(txq);
6429 	}
6430 }
6431 
6432 static void tg3_frag_free(bool is_frag, void *data)
6433 {
6434 	if (is_frag)
6435 		put_page(virt_to_head_page(data));
6436 	else
6437 		kfree(data);
6438 }
6439 
6440 static void tg3_rx_data_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
6441 {
6442 	unsigned int skb_size = SKB_DATA_ALIGN(map_sz + TG3_RX_OFFSET(tp)) +
6443 		   SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
6444 
6445 	if (!ri->data)
6446 		return;
6447 
6448 	pci_unmap_single(tp->pdev, dma_unmap_addr(ri, mapping),
6449 			 map_sz, PCI_DMA_FROMDEVICE);
6450 	tg3_frag_free(skb_size <= PAGE_SIZE, ri->data);
6451 	ri->data = NULL;
6452 }
6453 
6454 
6455 /* Returns size of skb allocated or < 0 on error.
6456  *
6457  * We only need to fill in the address because the other members
6458  * of the RX descriptor are invariant, see tg3_init_rings.
6459  *
6460  * Note the purposeful assymetry of cpu vs. chip accesses.  For
6461  * posting buffers we only dirty the first cache line of the RX
6462  * descriptor (containing the address).  Whereas for the RX status
6463  * buffers the cpu only reads the last cacheline of the RX descriptor
6464  * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
6465  */
6466 static int tg3_alloc_rx_data(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
6467 			     u32 opaque_key, u32 dest_idx_unmasked,
6468 			     unsigned int *frag_size)
6469 {
6470 	struct tg3_rx_buffer_desc *desc;
6471 	struct ring_info *map;
6472 	u8 *data;
6473 	dma_addr_t mapping;
6474 	int skb_size, data_size, dest_idx;
6475 
6476 	switch (opaque_key) {
6477 	case RXD_OPAQUE_RING_STD:
6478 		dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
6479 		desc = &tpr->rx_std[dest_idx];
6480 		map = &tpr->rx_std_buffers[dest_idx];
6481 		data_size = tp->rx_pkt_map_sz;
6482 		break;
6483 
6484 	case RXD_OPAQUE_RING_JUMBO:
6485 		dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
6486 		desc = &tpr->rx_jmb[dest_idx].std;
6487 		map = &tpr->rx_jmb_buffers[dest_idx];
6488 		data_size = TG3_RX_JMB_MAP_SZ;
6489 		break;
6490 
6491 	default:
6492 		return -EINVAL;
6493 	}
6494 
6495 	/* Do not overwrite any of the map or rp information
6496 	 * until we are sure we can commit to a new buffer.
6497 	 *
6498 	 * Callers depend upon this behavior and assume that
6499 	 * we leave everything unchanged if we fail.
6500 	 */
6501 	skb_size = SKB_DATA_ALIGN(data_size + TG3_RX_OFFSET(tp)) +
6502 		   SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
6503 	if (skb_size <= PAGE_SIZE) {
6504 		data = netdev_alloc_frag(skb_size);
6505 		*frag_size = skb_size;
6506 	} else {
6507 		data = kmalloc(skb_size, GFP_ATOMIC);
6508 		*frag_size = 0;
6509 	}
6510 	if (!data)
6511 		return -ENOMEM;
6512 
6513 	mapping = pci_map_single(tp->pdev,
6514 				 data + TG3_RX_OFFSET(tp),
6515 				 data_size,
6516 				 PCI_DMA_FROMDEVICE);
6517 	if (unlikely(pci_dma_mapping_error(tp->pdev, mapping))) {
6518 		tg3_frag_free(skb_size <= PAGE_SIZE, data);
6519 		return -EIO;
6520 	}
6521 
6522 	map->data = data;
6523 	dma_unmap_addr_set(map, mapping, mapping);
6524 
6525 	desc->addr_hi = ((u64)mapping >> 32);
6526 	desc->addr_lo = ((u64)mapping & 0xffffffff);
6527 
6528 	return data_size;
6529 }
6530 
6531 /* We only need to move over in the address because the other
6532  * members of the RX descriptor are invariant.  See notes above
6533  * tg3_alloc_rx_data for full details.
6534  */
6535 static void tg3_recycle_rx(struct tg3_napi *tnapi,
6536 			   struct tg3_rx_prodring_set *dpr,
6537 			   u32 opaque_key, int src_idx,
6538 			   u32 dest_idx_unmasked)
6539 {
6540 	struct tg3 *tp = tnapi->tp;
6541 	struct tg3_rx_buffer_desc *src_desc, *dest_desc;
6542 	struct ring_info *src_map, *dest_map;
6543 	struct tg3_rx_prodring_set *spr = &tp->napi[0].prodring;
6544 	int dest_idx;
6545 
6546 	switch (opaque_key) {
6547 	case RXD_OPAQUE_RING_STD:
6548 		dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
6549 		dest_desc = &dpr->rx_std[dest_idx];
6550 		dest_map = &dpr->rx_std_buffers[dest_idx];
6551 		src_desc = &spr->rx_std[src_idx];
6552 		src_map = &spr->rx_std_buffers[src_idx];
6553 		break;
6554 
6555 	case RXD_OPAQUE_RING_JUMBO:
6556 		dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
6557 		dest_desc = &dpr->rx_jmb[dest_idx].std;
6558 		dest_map = &dpr->rx_jmb_buffers[dest_idx];
6559 		src_desc = &spr->rx_jmb[src_idx].std;
6560 		src_map = &spr->rx_jmb_buffers[src_idx];
6561 		break;
6562 
6563 	default:
6564 		return;
6565 	}
6566 
6567 	dest_map->data = src_map->data;
6568 	dma_unmap_addr_set(dest_map, mapping,
6569 			   dma_unmap_addr(src_map, mapping));
6570 	dest_desc->addr_hi = src_desc->addr_hi;
6571 	dest_desc->addr_lo = src_desc->addr_lo;
6572 
6573 	/* Ensure that the update to the skb happens after the physical
6574 	 * addresses have been transferred to the new BD location.
6575 	 */
6576 	smp_wmb();
6577 
6578 	src_map->data = NULL;
6579 }
6580 
6581 /* The RX ring scheme is composed of multiple rings which post fresh
6582  * buffers to the chip, and one special ring the chip uses to report
6583  * status back to the host.
6584  *
6585  * The special ring reports the status of received packets to the
6586  * host.  The chip does not write into the original descriptor the
6587  * RX buffer was obtained from.  The chip simply takes the original
6588  * descriptor as provided by the host, updates the status and length
6589  * field, then writes this into the next status ring entry.
6590  *
6591  * Each ring the host uses to post buffers to the chip is described
6592  * by a TG3_BDINFO entry in the chips SRAM area.  When a packet arrives,
6593  * it is first placed into the on-chip ram.  When the packet's length
6594  * is known, it walks down the TG3_BDINFO entries to select the ring.
6595  * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
6596  * which is within the range of the new packet's length is chosen.
6597  *
6598  * The "separate ring for rx status" scheme may sound queer, but it makes
6599  * sense from a cache coherency perspective.  If only the host writes
6600  * to the buffer post rings, and only the chip writes to the rx status
6601  * rings, then cache lines never move beyond shared-modified state.
6602  * If both the host and chip were to write into the same ring, cache line
6603  * eviction could occur since both entities want it in an exclusive state.
6604  */
6605 static int tg3_rx(struct tg3_napi *tnapi, int budget)
6606 {
6607 	struct tg3 *tp = tnapi->tp;
6608 	u32 work_mask, rx_std_posted = 0;
6609 	u32 std_prod_idx, jmb_prod_idx;
6610 	u32 sw_idx = tnapi->rx_rcb_ptr;
6611 	u16 hw_idx;
6612 	int received;
6613 	struct tg3_rx_prodring_set *tpr = &tnapi->prodring;
6614 
6615 	hw_idx = *(tnapi->rx_rcb_prod_idx);
6616 	/*
6617 	 * We need to order the read of hw_idx and the read of
6618 	 * the opaque cookie.
6619 	 */
6620 	rmb();
6621 	work_mask = 0;
6622 	received = 0;
6623 	std_prod_idx = tpr->rx_std_prod_idx;
6624 	jmb_prod_idx = tpr->rx_jmb_prod_idx;
6625 	while (sw_idx != hw_idx && budget > 0) {
6626 		struct ring_info *ri;
6627 		struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
6628 		unsigned int len;
6629 		struct sk_buff *skb;
6630 		dma_addr_t dma_addr;
6631 		u32 opaque_key, desc_idx, *post_ptr;
6632 		u8 *data;
6633 		u64 tstamp = 0;
6634 
6635 		desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
6636 		opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
6637 		if (opaque_key == RXD_OPAQUE_RING_STD) {
6638 			ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx];
6639 			dma_addr = dma_unmap_addr(ri, mapping);
6640 			data = ri->data;
6641 			post_ptr = &std_prod_idx;
6642 			rx_std_posted++;
6643 		} else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
6644 			ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx];
6645 			dma_addr = dma_unmap_addr(ri, mapping);
6646 			data = ri->data;
6647 			post_ptr = &jmb_prod_idx;
6648 		} else
6649 			goto next_pkt_nopost;
6650 
6651 		work_mask |= opaque_key;
6652 
6653 		if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
6654 		    (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
6655 		drop_it:
6656 			tg3_recycle_rx(tnapi, tpr, opaque_key,
6657 				       desc_idx, *post_ptr);
6658 		drop_it_no_recycle:
6659 			/* Other statistics kept track of by card. */
6660 			tp->rx_dropped++;
6661 			goto next_pkt;
6662 		}
6663 
6664 		prefetch(data + TG3_RX_OFFSET(tp));
6665 		len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
6666 		      ETH_FCS_LEN;
6667 
6668 		if ((desc->type_flags & RXD_FLAG_PTPSTAT_MASK) ==
6669 		     RXD_FLAG_PTPSTAT_PTPV1 ||
6670 		    (desc->type_flags & RXD_FLAG_PTPSTAT_MASK) ==
6671 		     RXD_FLAG_PTPSTAT_PTPV2) {
6672 			tstamp = tr32(TG3_RX_TSTAMP_LSB);
6673 			tstamp |= (u64)tr32(TG3_RX_TSTAMP_MSB) << 32;
6674 		}
6675 
6676 		if (len > TG3_RX_COPY_THRESH(tp)) {
6677 			int skb_size;
6678 			unsigned int frag_size;
6679 
6680 			skb_size = tg3_alloc_rx_data(tp, tpr, opaque_key,
6681 						    *post_ptr, &frag_size);
6682 			if (skb_size < 0)
6683 				goto drop_it;
6684 
6685 			pci_unmap_single(tp->pdev, dma_addr, skb_size,
6686 					 PCI_DMA_FROMDEVICE);
6687 
6688 			skb = build_skb(data, frag_size);
6689 			if (!skb) {
6690 				tg3_frag_free(frag_size != 0, data);
6691 				goto drop_it_no_recycle;
6692 			}
6693 			skb_reserve(skb, TG3_RX_OFFSET(tp));
6694 			/* Ensure that the update to the data happens
6695 			 * after the usage of the old DMA mapping.
6696 			 */
6697 			smp_wmb();
6698 
6699 			ri->data = NULL;
6700 
6701 		} else {
6702 			tg3_recycle_rx(tnapi, tpr, opaque_key,
6703 				       desc_idx, *post_ptr);
6704 
6705 			skb = netdev_alloc_skb(tp->dev,
6706 					       len + TG3_RAW_IP_ALIGN);
6707 			if (skb == NULL)
6708 				goto drop_it_no_recycle;
6709 
6710 			skb_reserve(skb, TG3_RAW_IP_ALIGN);
6711 			pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
6712 			memcpy(skb->data,
6713 			       data + TG3_RX_OFFSET(tp),
6714 			       len);
6715 			pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
6716 		}
6717 
6718 		skb_put(skb, len);
6719 		if (tstamp)
6720 			tg3_hwclock_to_timestamp(tp, tstamp,
6721 						 skb_hwtstamps(skb));
6722 
6723 		if ((tp->dev->features & NETIF_F_RXCSUM) &&
6724 		    (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
6725 		    (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
6726 		      >> RXD_TCPCSUM_SHIFT) == 0xffff))
6727 			skb->ip_summed = CHECKSUM_UNNECESSARY;
6728 		else
6729 			skb_checksum_none_assert(skb);
6730 
6731 		skb->protocol = eth_type_trans(skb, tp->dev);
6732 
6733 		if (len > (tp->dev->mtu + ETH_HLEN) &&
6734 		    skb->protocol != htons(ETH_P_8021Q)) {
6735 			dev_kfree_skb(skb);
6736 			goto drop_it_no_recycle;
6737 		}
6738 
6739 		if (desc->type_flags & RXD_FLAG_VLAN &&
6740 		    !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG))
6741 			__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
6742 					       desc->err_vlan & RXD_VLAN_MASK);
6743 
6744 		napi_gro_receive(&tnapi->napi, skb);
6745 
6746 		received++;
6747 		budget--;
6748 
6749 next_pkt:
6750 		(*post_ptr)++;
6751 
6752 		if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
6753 			tpr->rx_std_prod_idx = std_prod_idx &
6754 					       tp->rx_std_ring_mask;
6755 			tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6756 				     tpr->rx_std_prod_idx);
6757 			work_mask &= ~RXD_OPAQUE_RING_STD;
6758 			rx_std_posted = 0;
6759 		}
6760 next_pkt_nopost:
6761 		sw_idx++;
6762 		sw_idx &= tp->rx_ret_ring_mask;
6763 
6764 		/* Refresh hw_idx to see if there is new work */
6765 		if (sw_idx == hw_idx) {
6766 			hw_idx = *(tnapi->rx_rcb_prod_idx);
6767 			rmb();
6768 		}
6769 	}
6770 
6771 	/* ACK the status ring. */
6772 	tnapi->rx_rcb_ptr = sw_idx;
6773 	tw32_rx_mbox(tnapi->consmbox, sw_idx);
6774 
6775 	/* Refill RX ring(s). */
6776 	if (!tg3_flag(tp, ENABLE_RSS)) {
6777 		/* Sync BD data before updating mailbox */
6778 		wmb();
6779 
6780 		if (work_mask & RXD_OPAQUE_RING_STD) {
6781 			tpr->rx_std_prod_idx = std_prod_idx &
6782 					       tp->rx_std_ring_mask;
6783 			tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6784 				     tpr->rx_std_prod_idx);
6785 		}
6786 		if (work_mask & RXD_OPAQUE_RING_JUMBO) {
6787 			tpr->rx_jmb_prod_idx = jmb_prod_idx &
6788 					       tp->rx_jmb_ring_mask;
6789 			tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
6790 				     tpr->rx_jmb_prod_idx);
6791 		}
6792 		mmiowb();
6793 	} else if (work_mask) {
6794 		/* rx_std_buffers[] and rx_jmb_buffers[] entries must be
6795 		 * updated before the producer indices can be updated.
6796 		 */
6797 		smp_wmb();
6798 
6799 		tpr->rx_std_prod_idx = std_prod_idx & tp->rx_std_ring_mask;
6800 		tpr->rx_jmb_prod_idx = jmb_prod_idx & tp->rx_jmb_ring_mask;
6801 
6802 		if (tnapi != &tp->napi[1]) {
6803 			tp->rx_refill = true;
6804 			napi_schedule(&tp->napi[1].napi);
6805 		}
6806 	}
6807 
6808 	return received;
6809 }
6810 
6811 static void tg3_poll_link(struct tg3 *tp)
6812 {
6813 	/* handle link change and other phy events */
6814 	if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
6815 		struct tg3_hw_status *sblk = tp->napi[0].hw_status;
6816 
6817 		if (sblk->status & SD_STATUS_LINK_CHG) {
6818 			sblk->status = SD_STATUS_UPDATED |
6819 				       (sblk->status & ~SD_STATUS_LINK_CHG);
6820 			spin_lock(&tp->lock);
6821 			if (tg3_flag(tp, USE_PHYLIB)) {
6822 				tw32_f(MAC_STATUS,
6823 				     (MAC_STATUS_SYNC_CHANGED |
6824 				      MAC_STATUS_CFG_CHANGED |
6825 				      MAC_STATUS_MI_COMPLETION |
6826 				      MAC_STATUS_LNKSTATE_CHANGED));
6827 				udelay(40);
6828 			} else
6829 				tg3_setup_phy(tp, false);
6830 			spin_unlock(&tp->lock);
6831 		}
6832 	}
6833 }
6834 
6835 static int tg3_rx_prodring_xfer(struct tg3 *tp,
6836 				struct tg3_rx_prodring_set *dpr,
6837 				struct tg3_rx_prodring_set *spr)
6838 {
6839 	u32 si, di, cpycnt, src_prod_idx;
6840 	int i, err = 0;
6841 
6842 	while (1) {
6843 		src_prod_idx = spr->rx_std_prod_idx;
6844 
6845 		/* Make sure updates to the rx_std_buffers[] entries and the
6846 		 * standard producer index are seen in the correct order.
6847 		 */
6848 		smp_rmb();
6849 
6850 		if (spr->rx_std_cons_idx == src_prod_idx)
6851 			break;
6852 
6853 		if (spr->rx_std_cons_idx < src_prod_idx)
6854 			cpycnt = src_prod_idx - spr->rx_std_cons_idx;
6855 		else
6856 			cpycnt = tp->rx_std_ring_mask + 1 -
6857 				 spr->rx_std_cons_idx;
6858 
6859 		cpycnt = min(cpycnt,
6860 			     tp->rx_std_ring_mask + 1 - dpr->rx_std_prod_idx);
6861 
6862 		si = spr->rx_std_cons_idx;
6863 		di = dpr->rx_std_prod_idx;
6864 
6865 		for (i = di; i < di + cpycnt; i++) {
6866 			if (dpr->rx_std_buffers[i].data) {
6867 				cpycnt = i - di;
6868 				err = -ENOSPC;
6869 				break;
6870 			}
6871 		}
6872 
6873 		if (!cpycnt)
6874 			break;
6875 
6876 		/* Ensure that updates to the rx_std_buffers ring and the
6877 		 * shadowed hardware producer ring from tg3_recycle_skb() are
6878 		 * ordered correctly WRT the skb check above.
6879 		 */
6880 		smp_rmb();
6881 
6882 		memcpy(&dpr->rx_std_buffers[di],
6883 		       &spr->rx_std_buffers[si],
6884 		       cpycnt * sizeof(struct ring_info));
6885 
6886 		for (i = 0; i < cpycnt; i++, di++, si++) {
6887 			struct tg3_rx_buffer_desc *sbd, *dbd;
6888 			sbd = &spr->rx_std[si];
6889 			dbd = &dpr->rx_std[di];
6890 			dbd->addr_hi = sbd->addr_hi;
6891 			dbd->addr_lo = sbd->addr_lo;
6892 		}
6893 
6894 		spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) &
6895 				       tp->rx_std_ring_mask;
6896 		dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) &
6897 				       tp->rx_std_ring_mask;
6898 	}
6899 
6900 	while (1) {
6901 		src_prod_idx = spr->rx_jmb_prod_idx;
6902 
6903 		/* Make sure updates to the rx_jmb_buffers[] entries and
6904 		 * the jumbo producer index are seen in the correct order.
6905 		 */
6906 		smp_rmb();
6907 
6908 		if (spr->rx_jmb_cons_idx == src_prod_idx)
6909 			break;
6910 
6911 		if (spr->rx_jmb_cons_idx < src_prod_idx)
6912 			cpycnt = src_prod_idx - spr->rx_jmb_cons_idx;
6913 		else
6914 			cpycnt = tp->rx_jmb_ring_mask + 1 -
6915 				 spr->rx_jmb_cons_idx;
6916 
6917 		cpycnt = min(cpycnt,
6918 			     tp->rx_jmb_ring_mask + 1 - dpr->rx_jmb_prod_idx);
6919 
6920 		si = spr->rx_jmb_cons_idx;
6921 		di = dpr->rx_jmb_prod_idx;
6922 
6923 		for (i = di; i < di + cpycnt; i++) {
6924 			if (dpr->rx_jmb_buffers[i].data) {
6925 				cpycnt = i - di;
6926 				err = -ENOSPC;
6927 				break;
6928 			}
6929 		}
6930 
6931 		if (!cpycnt)
6932 			break;
6933 
6934 		/* Ensure that updates to the rx_jmb_buffers ring and the
6935 		 * shadowed hardware producer ring from tg3_recycle_skb() are
6936 		 * ordered correctly WRT the skb check above.
6937 		 */
6938 		smp_rmb();
6939 
6940 		memcpy(&dpr->rx_jmb_buffers[di],
6941 		       &spr->rx_jmb_buffers[si],
6942 		       cpycnt * sizeof(struct ring_info));
6943 
6944 		for (i = 0; i < cpycnt; i++, di++, si++) {
6945 			struct tg3_rx_buffer_desc *sbd, *dbd;
6946 			sbd = &spr->rx_jmb[si].std;
6947 			dbd = &dpr->rx_jmb[di].std;
6948 			dbd->addr_hi = sbd->addr_hi;
6949 			dbd->addr_lo = sbd->addr_lo;
6950 		}
6951 
6952 		spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) &
6953 				       tp->rx_jmb_ring_mask;
6954 		dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) &
6955 				       tp->rx_jmb_ring_mask;
6956 	}
6957 
6958 	return err;
6959 }
6960 
6961 static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
6962 {
6963 	struct tg3 *tp = tnapi->tp;
6964 
6965 	/* run TX completion thread */
6966 	if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
6967 		tg3_tx(tnapi);
6968 		if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
6969 			return work_done;
6970 	}
6971 
6972 	if (!tnapi->rx_rcb_prod_idx)
6973 		return work_done;
6974 
6975 	/* run RX thread, within the bounds set by NAPI.
6976 	 * All RX "locking" is done by ensuring outside
6977 	 * code synchronizes with tg3->napi.poll()
6978 	 */
6979 	if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
6980 		work_done += tg3_rx(tnapi, budget - work_done);
6981 
6982 	if (tg3_flag(tp, ENABLE_RSS) && tnapi == &tp->napi[1]) {
6983 		struct tg3_rx_prodring_set *dpr = &tp->napi[0].prodring;
6984 		int i, err = 0;
6985 		u32 std_prod_idx = dpr->rx_std_prod_idx;
6986 		u32 jmb_prod_idx = dpr->rx_jmb_prod_idx;
6987 
6988 		tp->rx_refill = false;
6989 		for (i = 1; i <= tp->rxq_cnt; i++)
6990 			err |= tg3_rx_prodring_xfer(tp, dpr,
6991 						    &tp->napi[i].prodring);
6992 
6993 		wmb();
6994 
6995 		if (std_prod_idx != dpr->rx_std_prod_idx)
6996 			tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6997 				     dpr->rx_std_prod_idx);
6998 
6999 		if (jmb_prod_idx != dpr->rx_jmb_prod_idx)
7000 			tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
7001 				     dpr->rx_jmb_prod_idx);
7002 
7003 		mmiowb();
7004 
7005 		if (err)
7006 			tw32_f(HOSTCC_MODE, tp->coal_now);
7007 	}
7008 
7009 	return work_done;
7010 }
7011 
7012 static inline void tg3_reset_task_schedule(struct tg3 *tp)
7013 {
7014 	if (!test_and_set_bit(TG3_FLAG_RESET_TASK_PENDING, tp->tg3_flags))
7015 		schedule_work(&tp->reset_task);
7016 }
7017 
7018 static inline void tg3_reset_task_cancel(struct tg3 *tp)
7019 {
7020 	cancel_work_sync(&tp->reset_task);
7021 	tg3_flag_clear(tp, RESET_TASK_PENDING);
7022 	tg3_flag_clear(tp, TX_RECOVERY_PENDING);
7023 }
7024 
7025 static int tg3_poll_msix(struct napi_struct *napi, int budget)
7026 {
7027 	struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
7028 	struct tg3 *tp = tnapi->tp;
7029 	int work_done = 0;
7030 	struct tg3_hw_status *sblk = tnapi->hw_status;
7031 
7032 	while (1) {
7033 		work_done = tg3_poll_work(tnapi, work_done, budget);
7034 
7035 		if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7036 			goto tx_recovery;
7037 
7038 		if (unlikely(work_done >= budget))
7039 			break;
7040 
7041 		/* tp->last_tag is used in tg3_int_reenable() below
7042 		 * to tell the hw how much work has been processed,
7043 		 * so we must read it before checking for more work.
7044 		 */
7045 		tnapi->last_tag = sblk->status_tag;
7046 		tnapi->last_irq_tag = tnapi->last_tag;
7047 		rmb();
7048 
7049 		/* check for RX/TX work to do */
7050 		if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons &&
7051 			   *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) {
7052 
7053 			/* This test here is not race free, but will reduce
7054 			 * the number of interrupts by looping again.
7055 			 */
7056 			if (tnapi == &tp->napi[1] && tp->rx_refill)
7057 				continue;
7058 
7059 			napi_complete(napi);
7060 			/* Reenable interrupts. */
7061 			tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
7062 
7063 			/* This test here is synchronized by napi_schedule()
7064 			 * and napi_complete() to close the race condition.
7065 			 */
7066 			if (unlikely(tnapi == &tp->napi[1] && tp->rx_refill)) {
7067 				tw32(HOSTCC_MODE, tp->coalesce_mode |
7068 						  HOSTCC_MODE_ENABLE |
7069 						  tnapi->coal_now);
7070 			}
7071 			mmiowb();
7072 			break;
7073 		}
7074 	}
7075 
7076 	return work_done;
7077 
7078 tx_recovery:
7079 	/* work_done is guaranteed to be less than budget. */
7080 	napi_complete(napi);
7081 	tg3_reset_task_schedule(tp);
7082 	return work_done;
7083 }
7084 
7085 static void tg3_process_error(struct tg3 *tp)
7086 {
7087 	u32 val;
7088 	bool real_error = false;
7089 
7090 	if (tg3_flag(tp, ERROR_PROCESSED))
7091 		return;
7092 
7093 	/* Check Flow Attention register */
7094 	val = tr32(HOSTCC_FLOW_ATTN);
7095 	if (val & ~HOSTCC_FLOW_ATTN_MBUF_LWM) {
7096 		netdev_err(tp->dev, "FLOW Attention error.  Resetting chip.\n");
7097 		real_error = true;
7098 	}
7099 
7100 	if (tr32(MSGINT_STATUS) & ~MSGINT_STATUS_MSI_REQ) {
7101 		netdev_err(tp->dev, "MSI Status error.  Resetting chip.\n");
7102 		real_error = true;
7103 	}
7104 
7105 	if (tr32(RDMAC_STATUS) || tr32(WDMAC_STATUS)) {
7106 		netdev_err(tp->dev, "DMA Status error.  Resetting chip.\n");
7107 		real_error = true;
7108 	}
7109 
7110 	if (!real_error)
7111 		return;
7112 
7113 	tg3_dump_state(tp);
7114 
7115 	tg3_flag_set(tp, ERROR_PROCESSED);
7116 	tg3_reset_task_schedule(tp);
7117 }
7118 
7119 static int tg3_poll(struct napi_struct *napi, int budget)
7120 {
7121 	struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
7122 	struct tg3 *tp = tnapi->tp;
7123 	int work_done = 0;
7124 	struct tg3_hw_status *sblk = tnapi->hw_status;
7125 
7126 	while (1) {
7127 		if (sblk->status & SD_STATUS_ERROR)
7128 			tg3_process_error(tp);
7129 
7130 		tg3_poll_link(tp);
7131 
7132 		work_done = tg3_poll_work(tnapi, work_done, budget);
7133 
7134 		if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7135 			goto tx_recovery;
7136 
7137 		if (unlikely(work_done >= budget))
7138 			break;
7139 
7140 		if (tg3_flag(tp, TAGGED_STATUS)) {
7141 			/* tp->last_tag is used in tg3_int_reenable() below
7142 			 * to tell the hw how much work has been processed,
7143 			 * so we must read it before checking for more work.
7144 			 */
7145 			tnapi->last_tag = sblk->status_tag;
7146 			tnapi->last_irq_tag = tnapi->last_tag;
7147 			rmb();
7148 		} else
7149 			sblk->status &= ~SD_STATUS_UPDATED;
7150 
7151 		if (likely(!tg3_has_work(tnapi))) {
7152 			napi_complete(napi);
7153 			tg3_int_reenable(tnapi);
7154 			break;
7155 		}
7156 	}
7157 
7158 	return work_done;
7159 
7160 tx_recovery:
7161 	/* work_done is guaranteed to be less than budget. */
7162 	napi_complete(napi);
7163 	tg3_reset_task_schedule(tp);
7164 	return work_done;
7165 }
7166 
7167 static void tg3_napi_disable(struct tg3 *tp)
7168 {
7169 	int i;
7170 
7171 	for (i = tp->irq_cnt - 1; i >= 0; i--)
7172 		napi_disable(&tp->napi[i].napi);
7173 }
7174 
7175 static void tg3_napi_enable(struct tg3 *tp)
7176 {
7177 	int i;
7178 
7179 	for (i = 0; i < tp->irq_cnt; i++)
7180 		napi_enable(&tp->napi[i].napi);
7181 }
7182 
7183 static void tg3_napi_init(struct tg3 *tp)
7184 {
7185 	int i;
7186 
7187 	netif_napi_add(tp->dev, &tp->napi[0].napi, tg3_poll, 64);
7188 	for (i = 1; i < tp->irq_cnt; i++)
7189 		netif_napi_add(tp->dev, &tp->napi[i].napi, tg3_poll_msix, 64);
7190 }
7191 
7192 static void tg3_napi_fini(struct tg3 *tp)
7193 {
7194 	int i;
7195 
7196 	for (i = 0; i < tp->irq_cnt; i++)
7197 		netif_napi_del(&tp->napi[i].napi);
7198 }
7199 
7200 static inline void tg3_netif_stop(struct tg3 *tp)
7201 {
7202 	tp->dev->trans_start = jiffies;	/* prevent tx timeout */
7203 	tg3_napi_disable(tp);
7204 	netif_carrier_off(tp->dev);
7205 	netif_tx_disable(tp->dev);
7206 }
7207 
7208 /* tp->lock must be held */
7209 static inline void tg3_netif_start(struct tg3 *tp)
7210 {
7211 	tg3_ptp_resume(tp);
7212 
7213 	/* NOTE: unconditional netif_tx_wake_all_queues is only
7214 	 * appropriate so long as all callers are assured to
7215 	 * have free tx slots (such as after tg3_init_hw)
7216 	 */
7217 	netif_tx_wake_all_queues(tp->dev);
7218 
7219 	if (tp->link_up)
7220 		netif_carrier_on(tp->dev);
7221 
7222 	tg3_napi_enable(tp);
7223 	tp->napi[0].hw_status->status |= SD_STATUS_UPDATED;
7224 	tg3_enable_ints(tp);
7225 }
7226 
7227 static void tg3_irq_quiesce(struct tg3 *tp)
7228 {
7229 	int i;
7230 
7231 	BUG_ON(tp->irq_sync);
7232 
7233 	tp->irq_sync = 1;
7234 	smp_mb();
7235 
7236 	for (i = 0; i < tp->irq_cnt; i++)
7237 		synchronize_irq(tp->napi[i].irq_vec);
7238 }
7239 
7240 /* Fully shutdown all tg3 driver activity elsewhere in the system.
7241  * If irq_sync is non-zero, then the IRQ handler must be synchronized
7242  * with as well.  Most of the time, this is not necessary except when
7243  * shutting down the device.
7244  */
7245 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
7246 {
7247 	spin_lock_bh(&tp->lock);
7248 	if (irq_sync)
7249 		tg3_irq_quiesce(tp);
7250 }
7251 
7252 static inline void tg3_full_unlock(struct tg3 *tp)
7253 {
7254 	spin_unlock_bh(&tp->lock);
7255 }
7256 
7257 /* One-shot MSI handler - Chip automatically disables interrupt
7258  * after sending MSI so driver doesn't have to do it.
7259  */
7260 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
7261 {
7262 	struct tg3_napi *tnapi = dev_id;
7263 	struct tg3 *tp = tnapi->tp;
7264 
7265 	prefetch(tnapi->hw_status);
7266 	if (tnapi->rx_rcb)
7267 		prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7268 
7269 	if (likely(!tg3_irq_sync(tp)))
7270 		napi_schedule(&tnapi->napi);
7271 
7272 	return IRQ_HANDLED;
7273 }
7274 
7275 /* MSI ISR - No need to check for interrupt sharing and no need to
7276  * flush status block and interrupt mailbox. PCI ordering rules
7277  * guarantee that MSI will arrive after the status block.
7278  */
7279 static irqreturn_t tg3_msi(int irq, void *dev_id)
7280 {
7281 	struct tg3_napi *tnapi = dev_id;
7282 	struct tg3 *tp = tnapi->tp;
7283 
7284 	prefetch(tnapi->hw_status);
7285 	if (tnapi->rx_rcb)
7286 		prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7287 	/*
7288 	 * Writing any value to intr-mbox-0 clears PCI INTA# and
7289 	 * chip-internal interrupt pending events.
7290 	 * Writing non-zero to intr-mbox-0 additional tells the
7291 	 * NIC to stop sending us irqs, engaging "in-intr-handler"
7292 	 * event coalescing.
7293 	 */
7294 	tw32_mailbox(tnapi->int_mbox, 0x00000001);
7295 	if (likely(!tg3_irq_sync(tp)))
7296 		napi_schedule(&tnapi->napi);
7297 
7298 	return IRQ_RETVAL(1);
7299 }
7300 
7301 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
7302 {
7303 	struct tg3_napi *tnapi = dev_id;
7304 	struct tg3 *tp = tnapi->tp;
7305 	struct tg3_hw_status *sblk = tnapi->hw_status;
7306 	unsigned int handled = 1;
7307 
7308 	/* In INTx mode, it is possible for the interrupt to arrive at
7309 	 * the CPU before the status block posted prior to the interrupt.
7310 	 * Reading the PCI State register will confirm whether the
7311 	 * interrupt is ours and will flush the status block.
7312 	 */
7313 	if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
7314 		if (tg3_flag(tp, CHIP_RESETTING) ||
7315 		    (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7316 			handled = 0;
7317 			goto out;
7318 		}
7319 	}
7320 
7321 	/*
7322 	 * Writing any value to intr-mbox-0 clears PCI INTA# and
7323 	 * chip-internal interrupt pending events.
7324 	 * Writing non-zero to intr-mbox-0 additional tells the
7325 	 * NIC to stop sending us irqs, engaging "in-intr-handler"
7326 	 * event coalescing.
7327 	 *
7328 	 * Flush the mailbox to de-assert the IRQ immediately to prevent
7329 	 * spurious interrupts.  The flush impacts performance but
7330 	 * excessive spurious interrupts can be worse in some cases.
7331 	 */
7332 	tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
7333 	if (tg3_irq_sync(tp))
7334 		goto out;
7335 	sblk->status &= ~SD_STATUS_UPDATED;
7336 	if (likely(tg3_has_work(tnapi))) {
7337 		prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7338 		napi_schedule(&tnapi->napi);
7339 	} else {
7340 		/* No work, shared interrupt perhaps?  re-enable
7341 		 * interrupts, and flush that PCI write
7342 		 */
7343 		tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
7344 			       0x00000000);
7345 	}
7346 out:
7347 	return IRQ_RETVAL(handled);
7348 }
7349 
7350 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
7351 {
7352 	struct tg3_napi *tnapi = dev_id;
7353 	struct tg3 *tp = tnapi->tp;
7354 	struct tg3_hw_status *sblk = tnapi->hw_status;
7355 	unsigned int handled = 1;
7356 
7357 	/* In INTx mode, it is possible for the interrupt to arrive at
7358 	 * the CPU before the status block posted prior to the interrupt.
7359 	 * Reading the PCI State register will confirm whether the
7360 	 * interrupt is ours and will flush the status block.
7361 	 */
7362 	if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) {
7363 		if (tg3_flag(tp, CHIP_RESETTING) ||
7364 		    (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7365 			handled = 0;
7366 			goto out;
7367 		}
7368 	}
7369 
7370 	/*
7371 	 * writing any value to intr-mbox-0 clears PCI INTA# and
7372 	 * chip-internal interrupt pending events.
7373 	 * writing non-zero to intr-mbox-0 additional tells the
7374 	 * NIC to stop sending us irqs, engaging "in-intr-handler"
7375 	 * event coalescing.
7376 	 *
7377 	 * Flush the mailbox to de-assert the IRQ immediately to prevent
7378 	 * spurious interrupts.  The flush impacts performance but
7379 	 * excessive spurious interrupts can be worse in some cases.
7380 	 */
7381 	tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
7382 
7383 	/*
7384 	 * In a shared interrupt configuration, sometimes other devices'
7385 	 * interrupts will scream.  We record the current status tag here
7386 	 * so that the above check can report that the screaming interrupts
7387 	 * are unhandled.  Eventually they will be silenced.
7388 	 */
7389 	tnapi->last_irq_tag = sblk->status_tag;
7390 
7391 	if (tg3_irq_sync(tp))
7392 		goto out;
7393 
7394 	prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7395 
7396 	napi_schedule(&tnapi->napi);
7397 
7398 out:
7399 	return IRQ_RETVAL(handled);
7400 }
7401 
7402 /* ISR for interrupt test */
7403 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
7404 {
7405 	struct tg3_napi *tnapi = dev_id;
7406 	struct tg3 *tp = tnapi->tp;
7407 	struct tg3_hw_status *sblk = tnapi->hw_status;
7408 
7409 	if ((sblk->status & SD_STATUS_UPDATED) ||
7410 	    !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7411 		tg3_disable_ints(tp);
7412 		return IRQ_RETVAL(1);
7413 	}
7414 	return IRQ_RETVAL(0);
7415 }
7416 
7417 #ifdef CONFIG_NET_POLL_CONTROLLER
7418 static void tg3_poll_controller(struct net_device *dev)
7419 {
7420 	int i;
7421 	struct tg3 *tp = netdev_priv(dev);
7422 
7423 	if (tg3_irq_sync(tp))
7424 		return;
7425 
7426 	for (i = 0; i < tp->irq_cnt; i++)
7427 		tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]);
7428 }
7429 #endif
7430 
7431 static void tg3_tx_timeout(struct net_device *dev)
7432 {
7433 	struct tg3 *tp = netdev_priv(dev);
7434 
7435 	if (netif_msg_tx_err(tp)) {
7436 		netdev_err(dev, "transmit timed out, resetting\n");
7437 		tg3_dump_state(tp);
7438 	}
7439 
7440 	tg3_reset_task_schedule(tp);
7441 }
7442 
7443 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
7444 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
7445 {
7446 	u32 base = (u32) mapping & 0xffffffff;
7447 
7448 	return (base > 0xffffdcc0) && (base + len + 8 < base);
7449 }
7450 
7451 /* Test for TSO DMA buffers that cross into regions which are within MSS bytes
7452  * of any 4GB boundaries: 4G, 8G, etc
7453  */
7454 static inline int tg3_4g_tso_overflow_test(struct tg3 *tp, dma_addr_t mapping,
7455 					   u32 len, u32 mss)
7456 {
7457 	if (tg3_asic_rev(tp) == ASIC_REV_5762 && mss) {
7458 		u32 base = (u32) mapping & 0xffffffff;
7459 
7460 		return ((base + len + (mss & 0x3fff)) < base);
7461 	}
7462 	return 0;
7463 }
7464 
7465 /* Test for DMA addresses > 40-bit */
7466 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
7467 					  int len)
7468 {
7469 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
7470 	if (tg3_flag(tp, 40BIT_DMA_BUG))
7471 		return ((u64) mapping + len) > DMA_BIT_MASK(40);
7472 	return 0;
7473 #else
7474 	return 0;
7475 #endif
7476 }
7477 
7478 static inline void tg3_tx_set_bd(struct tg3_tx_buffer_desc *txbd,
7479 				 dma_addr_t mapping, u32 len, u32 flags,
7480 				 u32 mss, u32 vlan)
7481 {
7482 	txbd->addr_hi = ((u64) mapping >> 32);
7483 	txbd->addr_lo = ((u64) mapping & 0xffffffff);
7484 	txbd->len_flags = (len << TXD_LEN_SHIFT) | (flags & 0x0000ffff);
7485 	txbd->vlan_tag = (mss << TXD_MSS_SHIFT) | (vlan << TXD_VLAN_TAG_SHIFT);
7486 }
7487 
7488 static bool tg3_tx_frag_set(struct tg3_napi *tnapi, u32 *entry, u32 *budget,
7489 			    dma_addr_t map, u32 len, u32 flags,
7490 			    u32 mss, u32 vlan)
7491 {
7492 	struct tg3 *tp = tnapi->tp;
7493 	bool hwbug = false;
7494 
7495 	if (tg3_flag(tp, SHORT_DMA_BUG) && len <= 8)
7496 		hwbug = true;
7497 
7498 	if (tg3_4g_overflow_test(map, len))
7499 		hwbug = true;
7500 
7501 	if (tg3_4g_tso_overflow_test(tp, map, len, mss))
7502 		hwbug = true;
7503 
7504 	if (tg3_40bit_overflow_test(tp, map, len))
7505 		hwbug = true;
7506 
7507 	if (tp->dma_limit) {
7508 		u32 prvidx = *entry;
7509 		u32 tmp_flag = flags & ~TXD_FLAG_END;
7510 		while (len > tp->dma_limit && *budget) {
7511 			u32 frag_len = tp->dma_limit;
7512 			len -= tp->dma_limit;
7513 
7514 			/* Avoid the 8byte DMA problem */
7515 			if (len <= 8) {
7516 				len += tp->dma_limit / 2;
7517 				frag_len = tp->dma_limit / 2;
7518 			}
7519 
7520 			tnapi->tx_buffers[*entry].fragmented = true;
7521 
7522 			tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7523 				      frag_len, tmp_flag, mss, vlan);
7524 			*budget -= 1;
7525 			prvidx = *entry;
7526 			*entry = NEXT_TX(*entry);
7527 
7528 			map += frag_len;
7529 		}
7530 
7531 		if (len) {
7532 			if (*budget) {
7533 				tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7534 					      len, flags, mss, vlan);
7535 				*budget -= 1;
7536 				*entry = NEXT_TX(*entry);
7537 			} else {
7538 				hwbug = true;
7539 				tnapi->tx_buffers[prvidx].fragmented = false;
7540 			}
7541 		}
7542 	} else {
7543 		tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7544 			      len, flags, mss, vlan);
7545 		*entry = NEXT_TX(*entry);
7546 	}
7547 
7548 	return hwbug;
7549 }
7550 
7551 static void tg3_tx_skb_unmap(struct tg3_napi *tnapi, u32 entry, int last)
7552 {
7553 	int i;
7554 	struct sk_buff *skb;
7555 	struct tg3_tx_ring_info *txb = &tnapi->tx_buffers[entry];
7556 
7557 	skb = txb->skb;
7558 	txb->skb = NULL;
7559 
7560 	pci_unmap_single(tnapi->tp->pdev,
7561 			 dma_unmap_addr(txb, mapping),
7562 			 skb_headlen(skb),
7563 			 PCI_DMA_TODEVICE);
7564 
7565 	while (txb->fragmented) {
7566 		txb->fragmented = false;
7567 		entry = NEXT_TX(entry);
7568 		txb = &tnapi->tx_buffers[entry];
7569 	}
7570 
7571 	for (i = 0; i <= last; i++) {
7572 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
7573 
7574 		entry = NEXT_TX(entry);
7575 		txb = &tnapi->tx_buffers[entry];
7576 
7577 		pci_unmap_page(tnapi->tp->pdev,
7578 			       dma_unmap_addr(txb, mapping),
7579 			       skb_frag_size(frag), PCI_DMA_TODEVICE);
7580 
7581 		while (txb->fragmented) {
7582 			txb->fragmented = false;
7583 			entry = NEXT_TX(entry);
7584 			txb = &tnapi->tx_buffers[entry];
7585 		}
7586 	}
7587 }
7588 
7589 /* Workaround 4GB and 40-bit hardware DMA bugs. */
7590 static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
7591 				       struct sk_buff **pskb,
7592 				       u32 *entry, u32 *budget,
7593 				       u32 base_flags, u32 mss, u32 vlan)
7594 {
7595 	struct tg3 *tp = tnapi->tp;
7596 	struct sk_buff *new_skb, *skb = *pskb;
7597 	dma_addr_t new_addr = 0;
7598 	int ret = 0;
7599 
7600 	if (tg3_asic_rev(tp) != ASIC_REV_5701)
7601 		new_skb = skb_copy(skb, GFP_ATOMIC);
7602 	else {
7603 		int more_headroom = 4 - ((unsigned long)skb->data & 3);
7604 
7605 		new_skb = skb_copy_expand(skb,
7606 					  skb_headroom(skb) + more_headroom,
7607 					  skb_tailroom(skb), GFP_ATOMIC);
7608 	}
7609 
7610 	if (!new_skb) {
7611 		ret = -1;
7612 	} else {
7613 		/* New SKB is guaranteed to be linear. */
7614 		new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
7615 					  PCI_DMA_TODEVICE);
7616 		/* Make sure the mapping succeeded */
7617 		if (pci_dma_mapping_error(tp->pdev, new_addr)) {
7618 			dev_kfree_skb(new_skb);
7619 			ret = -1;
7620 		} else {
7621 			u32 save_entry = *entry;
7622 
7623 			base_flags |= TXD_FLAG_END;
7624 
7625 			tnapi->tx_buffers[*entry].skb = new_skb;
7626 			dma_unmap_addr_set(&tnapi->tx_buffers[*entry],
7627 					   mapping, new_addr);
7628 
7629 			if (tg3_tx_frag_set(tnapi, entry, budget, new_addr,
7630 					    new_skb->len, base_flags,
7631 					    mss, vlan)) {
7632 				tg3_tx_skb_unmap(tnapi, save_entry, -1);
7633 				dev_kfree_skb(new_skb);
7634 				ret = -1;
7635 			}
7636 		}
7637 	}
7638 
7639 	dev_kfree_skb(skb);
7640 	*pskb = new_skb;
7641 	return ret;
7642 }
7643 
7644 static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *);
7645 
7646 /* Use GSO to workaround a rare TSO bug that may be triggered when the
7647  * TSO header is greater than 80 bytes.
7648  */
7649 static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
7650 {
7651 	struct sk_buff *segs, *nskb;
7652 	u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3;
7653 
7654 	/* Estimate the number of fragments in the worst case */
7655 	if (unlikely(tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)) {
7656 		netif_stop_queue(tp->dev);
7657 
7658 		/* netif_tx_stop_queue() must be done before checking
7659 		 * checking tx index in tg3_tx_avail() below, because in
7660 		 * tg3_tx(), we update tx index before checking for
7661 		 * netif_tx_queue_stopped().
7662 		 */
7663 		smp_mb();
7664 		if (tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)
7665 			return NETDEV_TX_BUSY;
7666 
7667 		netif_wake_queue(tp->dev);
7668 	}
7669 
7670 	segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
7671 	if (IS_ERR(segs))
7672 		goto tg3_tso_bug_end;
7673 
7674 	do {
7675 		nskb = segs;
7676 		segs = segs->next;
7677 		nskb->next = NULL;
7678 		tg3_start_xmit(nskb, tp->dev);
7679 	} while (segs);
7680 
7681 tg3_tso_bug_end:
7682 	dev_kfree_skb(skb);
7683 
7684 	return NETDEV_TX_OK;
7685 }
7686 
7687 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
7688  * support TG3_FLAG_HW_TSO_1 or firmware TSO only.
7689  */
7690 static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
7691 {
7692 	struct tg3 *tp = netdev_priv(dev);
7693 	u32 len, entry, base_flags, mss, vlan = 0;
7694 	u32 budget;
7695 	int i = -1, would_hit_hwbug;
7696 	dma_addr_t mapping;
7697 	struct tg3_napi *tnapi;
7698 	struct netdev_queue *txq;
7699 	unsigned int last;
7700 
7701 	txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
7702 	tnapi = &tp->napi[skb_get_queue_mapping(skb)];
7703 	if (tg3_flag(tp, ENABLE_TSS))
7704 		tnapi++;
7705 
7706 	budget = tg3_tx_avail(tnapi);
7707 
7708 	/* We are running in BH disabled context with netif_tx_lock
7709 	 * and TX reclaim runs via tp->napi.poll inside of a software
7710 	 * interrupt.  Furthermore, IRQ processing runs lockless so we have
7711 	 * no IRQ context deadlocks to worry about either.  Rejoice!
7712 	 */
7713 	if (unlikely(budget <= (skb_shinfo(skb)->nr_frags + 1))) {
7714 		if (!netif_tx_queue_stopped(txq)) {
7715 			netif_tx_stop_queue(txq);
7716 
7717 			/* This is a hard error, log it. */
7718 			netdev_err(dev,
7719 				   "BUG! Tx Ring full when queue awake!\n");
7720 		}
7721 		return NETDEV_TX_BUSY;
7722 	}
7723 
7724 	entry = tnapi->tx_prod;
7725 	base_flags = 0;
7726 	if (skb->ip_summed == CHECKSUM_PARTIAL)
7727 		base_flags |= TXD_FLAG_TCPUDP_CSUM;
7728 
7729 	mss = skb_shinfo(skb)->gso_size;
7730 	if (mss) {
7731 		struct iphdr *iph;
7732 		u32 tcp_opt_len, hdr_len;
7733 
7734 		if (skb_header_cloned(skb) &&
7735 		    pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
7736 			goto drop;
7737 
7738 		iph = ip_hdr(skb);
7739 		tcp_opt_len = tcp_optlen(skb);
7740 
7741 		hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb) - ETH_HLEN;
7742 
7743 		if (!skb_is_gso_v6(skb)) {
7744 			iph->check = 0;
7745 			iph->tot_len = htons(mss + hdr_len);
7746 		}
7747 
7748 		if (unlikely((ETH_HLEN + hdr_len) > 80) &&
7749 		    tg3_flag(tp, TSO_BUG))
7750 			return tg3_tso_bug(tp, skb);
7751 
7752 		base_flags |= (TXD_FLAG_CPU_PRE_DMA |
7753 			       TXD_FLAG_CPU_POST_DMA);
7754 
7755 		if (tg3_flag(tp, HW_TSO_1) ||
7756 		    tg3_flag(tp, HW_TSO_2) ||
7757 		    tg3_flag(tp, HW_TSO_3)) {
7758 			tcp_hdr(skb)->check = 0;
7759 			base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
7760 		} else
7761 			tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
7762 								 iph->daddr, 0,
7763 								 IPPROTO_TCP,
7764 								 0);
7765 
7766 		if (tg3_flag(tp, HW_TSO_3)) {
7767 			mss |= (hdr_len & 0xc) << 12;
7768 			if (hdr_len & 0x10)
7769 				base_flags |= 0x00000010;
7770 			base_flags |= (hdr_len & 0x3e0) << 5;
7771 		} else if (tg3_flag(tp, HW_TSO_2))
7772 			mss |= hdr_len << 9;
7773 		else if (tg3_flag(tp, HW_TSO_1) ||
7774 			 tg3_asic_rev(tp) == ASIC_REV_5705) {
7775 			if (tcp_opt_len || iph->ihl > 5) {
7776 				int tsflags;
7777 
7778 				tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
7779 				mss |= (tsflags << 11);
7780 			}
7781 		} else {
7782 			if (tcp_opt_len || iph->ihl > 5) {
7783 				int tsflags;
7784 
7785 				tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
7786 				base_flags |= tsflags << 12;
7787 			}
7788 		}
7789 	}
7790 
7791 	if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
7792 	    !mss && skb->len > VLAN_ETH_FRAME_LEN)
7793 		base_flags |= TXD_FLAG_JMB_PKT;
7794 
7795 	if (vlan_tx_tag_present(skb)) {
7796 		base_flags |= TXD_FLAG_VLAN;
7797 		vlan = vlan_tx_tag_get(skb);
7798 	}
7799 
7800 	if ((unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) &&
7801 	    tg3_flag(tp, TX_TSTAMP_EN)) {
7802 		skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
7803 		base_flags |= TXD_FLAG_HWTSTAMP;
7804 	}
7805 
7806 	len = skb_headlen(skb);
7807 
7808 	mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
7809 	if (pci_dma_mapping_error(tp->pdev, mapping))
7810 		goto drop;
7811 
7812 
7813 	tnapi->tx_buffers[entry].skb = skb;
7814 	dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
7815 
7816 	would_hit_hwbug = 0;
7817 
7818 	if (tg3_flag(tp, 5701_DMA_BUG))
7819 		would_hit_hwbug = 1;
7820 
7821 	if (tg3_tx_frag_set(tnapi, &entry, &budget, mapping, len, base_flags |
7822 			  ((skb_shinfo(skb)->nr_frags == 0) ? TXD_FLAG_END : 0),
7823 			    mss, vlan)) {
7824 		would_hit_hwbug = 1;
7825 	} else if (skb_shinfo(skb)->nr_frags > 0) {
7826 		u32 tmp_mss = mss;
7827 
7828 		if (!tg3_flag(tp, HW_TSO_1) &&
7829 		    !tg3_flag(tp, HW_TSO_2) &&
7830 		    !tg3_flag(tp, HW_TSO_3))
7831 			tmp_mss = 0;
7832 
7833 		/* Now loop through additional data
7834 		 * fragments, and queue them.
7835 		 */
7836 		last = skb_shinfo(skb)->nr_frags - 1;
7837 		for (i = 0; i <= last; i++) {
7838 			skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
7839 
7840 			len = skb_frag_size(frag);
7841 			mapping = skb_frag_dma_map(&tp->pdev->dev, frag, 0,
7842 						   len, DMA_TO_DEVICE);
7843 
7844 			tnapi->tx_buffers[entry].skb = NULL;
7845 			dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
7846 					   mapping);
7847 			if (dma_mapping_error(&tp->pdev->dev, mapping))
7848 				goto dma_error;
7849 
7850 			if (!budget ||
7851 			    tg3_tx_frag_set(tnapi, &entry, &budget, mapping,
7852 					    len, base_flags |
7853 					    ((i == last) ? TXD_FLAG_END : 0),
7854 					    tmp_mss, vlan)) {
7855 				would_hit_hwbug = 1;
7856 				break;
7857 			}
7858 		}
7859 	}
7860 
7861 	if (would_hit_hwbug) {
7862 		tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i);
7863 
7864 		/* If the workaround fails due to memory/mapping
7865 		 * failure, silently drop this packet.
7866 		 */
7867 		entry = tnapi->tx_prod;
7868 		budget = tg3_tx_avail(tnapi);
7869 		if (tigon3_dma_hwbug_workaround(tnapi, &skb, &entry, &budget,
7870 						base_flags, mss, vlan))
7871 			goto drop_nofree;
7872 	}
7873 
7874 	skb_tx_timestamp(skb);
7875 	netdev_tx_sent_queue(txq, skb->len);
7876 
7877 	/* Sync BD data before updating mailbox */
7878 	wmb();
7879 
7880 	/* Packets are ready, update Tx producer idx local and on card. */
7881 	tw32_tx_mbox(tnapi->prodmbox, entry);
7882 
7883 	tnapi->tx_prod = entry;
7884 	if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
7885 		netif_tx_stop_queue(txq);
7886 
7887 		/* netif_tx_stop_queue() must be done before checking
7888 		 * checking tx index in tg3_tx_avail() below, because in
7889 		 * tg3_tx(), we update tx index before checking for
7890 		 * netif_tx_queue_stopped().
7891 		 */
7892 		smp_mb();
7893 		if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
7894 			netif_tx_wake_queue(txq);
7895 	}
7896 
7897 	mmiowb();
7898 	return NETDEV_TX_OK;
7899 
7900 dma_error:
7901 	tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, --i);
7902 	tnapi->tx_buffers[tnapi->tx_prod].skb = NULL;
7903 drop:
7904 	dev_kfree_skb(skb);
7905 drop_nofree:
7906 	tp->tx_dropped++;
7907 	return NETDEV_TX_OK;
7908 }
7909 
7910 static void tg3_mac_loopback(struct tg3 *tp, bool enable)
7911 {
7912 	if (enable) {
7913 		tp->mac_mode &= ~(MAC_MODE_HALF_DUPLEX |
7914 				  MAC_MODE_PORT_MODE_MASK);
7915 
7916 		tp->mac_mode |= MAC_MODE_PORT_INT_LPBACK;
7917 
7918 		if (!tg3_flag(tp, 5705_PLUS))
7919 			tp->mac_mode |= MAC_MODE_LINK_POLARITY;
7920 
7921 		if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
7922 			tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
7923 		else
7924 			tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
7925 	} else {
7926 		tp->mac_mode &= ~MAC_MODE_PORT_INT_LPBACK;
7927 
7928 		if (tg3_flag(tp, 5705_PLUS) ||
7929 		    (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) ||
7930 		    tg3_asic_rev(tp) == ASIC_REV_5700)
7931 			tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
7932 	}
7933 
7934 	tw32(MAC_MODE, tp->mac_mode);
7935 	udelay(40);
7936 }
7937 
7938 static int tg3_phy_lpbk_set(struct tg3 *tp, u32 speed, bool extlpbk)
7939 {
7940 	u32 val, bmcr, mac_mode, ptest = 0;
7941 
7942 	tg3_phy_toggle_apd(tp, false);
7943 	tg3_phy_toggle_automdix(tp, false);
7944 
7945 	if (extlpbk && tg3_phy_set_extloopbk(tp))
7946 		return -EIO;
7947 
7948 	bmcr = BMCR_FULLDPLX;
7949 	switch (speed) {
7950 	case SPEED_10:
7951 		break;
7952 	case SPEED_100:
7953 		bmcr |= BMCR_SPEED100;
7954 		break;
7955 	case SPEED_1000:
7956 	default:
7957 		if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
7958 			speed = SPEED_100;
7959 			bmcr |= BMCR_SPEED100;
7960 		} else {
7961 			speed = SPEED_1000;
7962 			bmcr |= BMCR_SPEED1000;
7963 		}
7964 	}
7965 
7966 	if (extlpbk) {
7967 		if (!(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
7968 			tg3_readphy(tp, MII_CTRL1000, &val);
7969 			val |= CTL1000_AS_MASTER |
7970 			       CTL1000_ENABLE_MASTER;
7971 			tg3_writephy(tp, MII_CTRL1000, val);
7972 		} else {
7973 			ptest = MII_TG3_FET_PTEST_TRIM_SEL |
7974 				MII_TG3_FET_PTEST_TRIM_2;
7975 			tg3_writephy(tp, MII_TG3_FET_PTEST, ptest);
7976 		}
7977 	} else
7978 		bmcr |= BMCR_LOOPBACK;
7979 
7980 	tg3_writephy(tp, MII_BMCR, bmcr);
7981 
7982 	/* The write needs to be flushed for the FETs */
7983 	if (tp->phy_flags & TG3_PHYFLG_IS_FET)
7984 		tg3_readphy(tp, MII_BMCR, &bmcr);
7985 
7986 	udelay(40);
7987 
7988 	if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
7989 	    tg3_asic_rev(tp) == ASIC_REV_5785) {
7990 		tg3_writephy(tp, MII_TG3_FET_PTEST, ptest |
7991 			     MII_TG3_FET_PTEST_FRC_TX_LINK |
7992 			     MII_TG3_FET_PTEST_FRC_TX_LOCK);
7993 
7994 		/* The write needs to be flushed for the AC131 */
7995 		tg3_readphy(tp, MII_TG3_FET_PTEST, &val);
7996 	}
7997 
7998 	/* Reset to prevent losing 1st rx packet intermittently */
7999 	if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
8000 	    tg3_flag(tp, 5780_CLASS)) {
8001 		tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8002 		udelay(10);
8003 		tw32_f(MAC_RX_MODE, tp->rx_mode);
8004 	}
8005 
8006 	mac_mode = tp->mac_mode &
8007 		   ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
8008 	if (speed == SPEED_1000)
8009 		mac_mode |= MAC_MODE_PORT_MODE_GMII;
8010 	else
8011 		mac_mode |= MAC_MODE_PORT_MODE_MII;
8012 
8013 	if (tg3_asic_rev(tp) == ASIC_REV_5700) {
8014 		u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK;
8015 
8016 		if (masked_phy_id == TG3_PHY_ID_BCM5401)
8017 			mac_mode &= ~MAC_MODE_LINK_POLARITY;
8018 		else if (masked_phy_id == TG3_PHY_ID_BCM5411)
8019 			mac_mode |= MAC_MODE_LINK_POLARITY;
8020 
8021 		tg3_writephy(tp, MII_TG3_EXT_CTRL,
8022 			     MII_TG3_EXT_CTRL_LNK3_LED_MODE);
8023 	}
8024 
8025 	tw32(MAC_MODE, mac_mode);
8026 	udelay(40);
8027 
8028 	return 0;
8029 }
8030 
8031 static void tg3_set_loopback(struct net_device *dev, netdev_features_t features)
8032 {
8033 	struct tg3 *tp = netdev_priv(dev);
8034 
8035 	if (features & NETIF_F_LOOPBACK) {
8036 		if (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)
8037 			return;
8038 
8039 		spin_lock_bh(&tp->lock);
8040 		tg3_mac_loopback(tp, true);
8041 		netif_carrier_on(tp->dev);
8042 		spin_unlock_bh(&tp->lock);
8043 		netdev_info(dev, "Internal MAC loopback mode enabled.\n");
8044 	} else {
8045 		if (!(tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
8046 			return;
8047 
8048 		spin_lock_bh(&tp->lock);
8049 		tg3_mac_loopback(tp, false);
8050 		/* Force link status check */
8051 		tg3_setup_phy(tp, true);
8052 		spin_unlock_bh(&tp->lock);
8053 		netdev_info(dev, "Internal MAC loopback mode disabled.\n");
8054 	}
8055 }
8056 
8057 static netdev_features_t tg3_fix_features(struct net_device *dev,
8058 	netdev_features_t features)
8059 {
8060 	struct tg3 *tp = netdev_priv(dev);
8061 
8062 	if (dev->mtu > ETH_DATA_LEN && tg3_flag(tp, 5780_CLASS))
8063 		features &= ~NETIF_F_ALL_TSO;
8064 
8065 	return features;
8066 }
8067 
8068 static int tg3_set_features(struct net_device *dev, netdev_features_t features)
8069 {
8070 	netdev_features_t changed = dev->features ^ features;
8071 
8072 	if ((changed & NETIF_F_LOOPBACK) && netif_running(dev))
8073 		tg3_set_loopback(dev, features);
8074 
8075 	return 0;
8076 }
8077 
8078 static void tg3_rx_prodring_free(struct tg3 *tp,
8079 				 struct tg3_rx_prodring_set *tpr)
8080 {
8081 	int i;
8082 
8083 	if (tpr != &tp->napi[0].prodring) {
8084 		for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx;
8085 		     i = (i + 1) & tp->rx_std_ring_mask)
8086 			tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
8087 					tp->rx_pkt_map_sz);
8088 
8089 		if (tg3_flag(tp, JUMBO_CAPABLE)) {
8090 			for (i = tpr->rx_jmb_cons_idx;
8091 			     i != tpr->rx_jmb_prod_idx;
8092 			     i = (i + 1) & tp->rx_jmb_ring_mask) {
8093 				tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
8094 						TG3_RX_JMB_MAP_SZ);
8095 			}
8096 		}
8097 
8098 		return;
8099 	}
8100 
8101 	for (i = 0; i <= tp->rx_std_ring_mask; i++)
8102 		tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
8103 				tp->rx_pkt_map_sz);
8104 
8105 	if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
8106 		for (i = 0; i <= tp->rx_jmb_ring_mask; i++)
8107 			tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
8108 					TG3_RX_JMB_MAP_SZ);
8109 	}
8110 }
8111 
8112 /* Initialize rx rings for packet processing.
8113  *
8114  * The chip has been shut down and the driver detached from
8115  * the networking, so no interrupts or new tx packets will
8116  * end up in the driver.  tp->{tx,}lock are held and thus
8117  * we may not sleep.
8118  */
8119 static int tg3_rx_prodring_alloc(struct tg3 *tp,
8120 				 struct tg3_rx_prodring_set *tpr)
8121 {
8122 	u32 i, rx_pkt_dma_sz;
8123 
8124 	tpr->rx_std_cons_idx = 0;
8125 	tpr->rx_std_prod_idx = 0;
8126 	tpr->rx_jmb_cons_idx = 0;
8127 	tpr->rx_jmb_prod_idx = 0;
8128 
8129 	if (tpr != &tp->napi[0].prodring) {
8130 		memset(&tpr->rx_std_buffers[0], 0,
8131 		       TG3_RX_STD_BUFF_RING_SIZE(tp));
8132 		if (tpr->rx_jmb_buffers)
8133 			memset(&tpr->rx_jmb_buffers[0], 0,
8134 			       TG3_RX_JMB_BUFF_RING_SIZE(tp));
8135 		goto done;
8136 	}
8137 
8138 	/* Zero out all descriptors. */
8139 	memset(tpr->rx_std, 0, TG3_RX_STD_RING_BYTES(tp));
8140 
8141 	rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ;
8142 	if (tg3_flag(tp, 5780_CLASS) &&
8143 	    tp->dev->mtu > ETH_DATA_LEN)
8144 		rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ;
8145 	tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz);
8146 
8147 	/* Initialize invariants of the rings, we only set this
8148 	 * stuff once.  This works because the card does not
8149 	 * write into the rx buffer posting rings.
8150 	 */
8151 	for (i = 0; i <= tp->rx_std_ring_mask; i++) {
8152 		struct tg3_rx_buffer_desc *rxd;
8153 
8154 		rxd = &tpr->rx_std[i];
8155 		rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT;
8156 		rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
8157 		rxd->opaque = (RXD_OPAQUE_RING_STD |
8158 			       (i << RXD_OPAQUE_INDEX_SHIFT));
8159 	}
8160 
8161 	/* Now allocate fresh SKBs for each rx ring. */
8162 	for (i = 0; i < tp->rx_pending; i++) {
8163 		unsigned int frag_size;
8164 
8165 		if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_STD, i,
8166 				      &frag_size) < 0) {
8167 			netdev_warn(tp->dev,
8168 				    "Using a smaller RX standard ring. Only "
8169 				    "%d out of %d buffers were allocated "
8170 				    "successfully\n", i, tp->rx_pending);
8171 			if (i == 0)
8172 				goto initfail;
8173 			tp->rx_pending = i;
8174 			break;
8175 		}
8176 	}
8177 
8178 	if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
8179 		goto done;
8180 
8181 	memset(tpr->rx_jmb, 0, TG3_RX_JMB_RING_BYTES(tp));
8182 
8183 	if (!tg3_flag(tp, JUMBO_RING_ENABLE))
8184 		goto done;
8185 
8186 	for (i = 0; i <= tp->rx_jmb_ring_mask; i++) {
8187 		struct tg3_rx_buffer_desc *rxd;
8188 
8189 		rxd = &tpr->rx_jmb[i].std;
8190 		rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT;
8191 		rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
8192 				  RXD_FLAG_JUMBO;
8193 		rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
8194 		       (i << RXD_OPAQUE_INDEX_SHIFT));
8195 	}
8196 
8197 	for (i = 0; i < tp->rx_jumbo_pending; i++) {
8198 		unsigned int frag_size;
8199 
8200 		if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_JUMBO, i,
8201 				      &frag_size) < 0) {
8202 			netdev_warn(tp->dev,
8203 				    "Using a smaller RX jumbo ring. Only %d "
8204 				    "out of %d buffers were allocated "
8205 				    "successfully\n", i, tp->rx_jumbo_pending);
8206 			if (i == 0)
8207 				goto initfail;
8208 			tp->rx_jumbo_pending = i;
8209 			break;
8210 		}
8211 	}
8212 
8213 done:
8214 	return 0;
8215 
8216 initfail:
8217 	tg3_rx_prodring_free(tp, tpr);
8218 	return -ENOMEM;
8219 }
8220 
8221 static void tg3_rx_prodring_fini(struct tg3 *tp,
8222 				 struct tg3_rx_prodring_set *tpr)
8223 {
8224 	kfree(tpr->rx_std_buffers);
8225 	tpr->rx_std_buffers = NULL;
8226 	kfree(tpr->rx_jmb_buffers);
8227 	tpr->rx_jmb_buffers = NULL;
8228 	if (tpr->rx_std) {
8229 		dma_free_coherent(&tp->pdev->dev, TG3_RX_STD_RING_BYTES(tp),
8230 				  tpr->rx_std, tpr->rx_std_mapping);
8231 		tpr->rx_std = NULL;
8232 	}
8233 	if (tpr->rx_jmb) {
8234 		dma_free_coherent(&tp->pdev->dev, TG3_RX_JMB_RING_BYTES(tp),
8235 				  tpr->rx_jmb, tpr->rx_jmb_mapping);
8236 		tpr->rx_jmb = NULL;
8237 	}
8238 }
8239 
8240 static int tg3_rx_prodring_init(struct tg3 *tp,
8241 				struct tg3_rx_prodring_set *tpr)
8242 {
8243 	tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp),
8244 				      GFP_KERNEL);
8245 	if (!tpr->rx_std_buffers)
8246 		return -ENOMEM;
8247 
8248 	tpr->rx_std = dma_alloc_coherent(&tp->pdev->dev,
8249 					 TG3_RX_STD_RING_BYTES(tp),
8250 					 &tpr->rx_std_mapping,
8251 					 GFP_KERNEL);
8252 	if (!tpr->rx_std)
8253 		goto err_out;
8254 
8255 	if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
8256 		tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp),
8257 					      GFP_KERNEL);
8258 		if (!tpr->rx_jmb_buffers)
8259 			goto err_out;
8260 
8261 		tpr->rx_jmb = dma_alloc_coherent(&tp->pdev->dev,
8262 						 TG3_RX_JMB_RING_BYTES(tp),
8263 						 &tpr->rx_jmb_mapping,
8264 						 GFP_KERNEL);
8265 		if (!tpr->rx_jmb)
8266 			goto err_out;
8267 	}
8268 
8269 	return 0;
8270 
8271 err_out:
8272 	tg3_rx_prodring_fini(tp, tpr);
8273 	return -ENOMEM;
8274 }
8275 
8276 /* Free up pending packets in all rx/tx rings.
8277  *
8278  * The chip has been shut down and the driver detached from
8279  * the networking, so no interrupts or new tx packets will
8280  * end up in the driver.  tp->{tx,}lock is not held and we are not
8281  * in an interrupt context and thus may sleep.
8282  */
8283 static void tg3_free_rings(struct tg3 *tp)
8284 {
8285 	int i, j;
8286 
8287 	for (j = 0; j < tp->irq_cnt; j++) {
8288 		struct tg3_napi *tnapi = &tp->napi[j];
8289 
8290 		tg3_rx_prodring_free(tp, &tnapi->prodring);
8291 
8292 		if (!tnapi->tx_buffers)
8293 			continue;
8294 
8295 		for (i = 0; i < TG3_TX_RING_SIZE; i++) {
8296 			struct sk_buff *skb = tnapi->tx_buffers[i].skb;
8297 
8298 			if (!skb)
8299 				continue;
8300 
8301 			tg3_tx_skb_unmap(tnapi, i,
8302 					 skb_shinfo(skb)->nr_frags - 1);
8303 
8304 			dev_kfree_skb_any(skb);
8305 		}
8306 		netdev_tx_reset_queue(netdev_get_tx_queue(tp->dev, j));
8307 	}
8308 }
8309 
8310 /* Initialize tx/rx rings for packet processing.
8311  *
8312  * The chip has been shut down and the driver detached from
8313  * the networking, so no interrupts or new tx packets will
8314  * end up in the driver.  tp->{tx,}lock are held and thus
8315  * we may not sleep.
8316  */
8317 static int tg3_init_rings(struct tg3 *tp)
8318 {
8319 	int i;
8320 
8321 	/* Free up all the SKBs. */
8322 	tg3_free_rings(tp);
8323 
8324 	for (i = 0; i < tp->irq_cnt; i++) {
8325 		struct tg3_napi *tnapi = &tp->napi[i];
8326 
8327 		tnapi->last_tag = 0;
8328 		tnapi->last_irq_tag = 0;
8329 		tnapi->hw_status->status = 0;
8330 		tnapi->hw_status->status_tag = 0;
8331 		memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8332 
8333 		tnapi->tx_prod = 0;
8334 		tnapi->tx_cons = 0;
8335 		if (tnapi->tx_ring)
8336 			memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES);
8337 
8338 		tnapi->rx_rcb_ptr = 0;
8339 		if (tnapi->rx_rcb)
8340 			memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
8341 
8342 		if (tg3_rx_prodring_alloc(tp, &tnapi->prodring)) {
8343 			tg3_free_rings(tp);
8344 			return -ENOMEM;
8345 		}
8346 	}
8347 
8348 	return 0;
8349 }
8350 
8351 static void tg3_mem_tx_release(struct tg3 *tp)
8352 {
8353 	int i;
8354 
8355 	for (i = 0; i < tp->irq_max; i++) {
8356 		struct tg3_napi *tnapi = &tp->napi[i];
8357 
8358 		if (tnapi->tx_ring) {
8359 			dma_free_coherent(&tp->pdev->dev, TG3_TX_RING_BYTES,
8360 				tnapi->tx_ring, tnapi->tx_desc_mapping);
8361 			tnapi->tx_ring = NULL;
8362 		}
8363 
8364 		kfree(tnapi->tx_buffers);
8365 		tnapi->tx_buffers = NULL;
8366 	}
8367 }
8368 
8369 static int tg3_mem_tx_acquire(struct tg3 *tp)
8370 {
8371 	int i;
8372 	struct tg3_napi *tnapi = &tp->napi[0];
8373 
8374 	/* If multivector TSS is enabled, vector 0 does not handle
8375 	 * tx interrupts.  Don't allocate any resources for it.
8376 	 */
8377 	if (tg3_flag(tp, ENABLE_TSS))
8378 		tnapi++;
8379 
8380 	for (i = 0; i < tp->txq_cnt; i++, tnapi++) {
8381 		tnapi->tx_buffers = kzalloc(sizeof(struct tg3_tx_ring_info) *
8382 					    TG3_TX_RING_SIZE, GFP_KERNEL);
8383 		if (!tnapi->tx_buffers)
8384 			goto err_out;
8385 
8386 		tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev,
8387 						    TG3_TX_RING_BYTES,
8388 						    &tnapi->tx_desc_mapping,
8389 						    GFP_KERNEL);
8390 		if (!tnapi->tx_ring)
8391 			goto err_out;
8392 	}
8393 
8394 	return 0;
8395 
8396 err_out:
8397 	tg3_mem_tx_release(tp);
8398 	return -ENOMEM;
8399 }
8400 
8401 static void tg3_mem_rx_release(struct tg3 *tp)
8402 {
8403 	int i;
8404 
8405 	for (i = 0; i < tp->irq_max; i++) {
8406 		struct tg3_napi *tnapi = &tp->napi[i];
8407 
8408 		tg3_rx_prodring_fini(tp, &tnapi->prodring);
8409 
8410 		if (!tnapi->rx_rcb)
8411 			continue;
8412 
8413 		dma_free_coherent(&tp->pdev->dev,
8414 				  TG3_RX_RCB_RING_BYTES(tp),
8415 				  tnapi->rx_rcb,
8416 				  tnapi->rx_rcb_mapping);
8417 		tnapi->rx_rcb = NULL;
8418 	}
8419 }
8420 
8421 static int tg3_mem_rx_acquire(struct tg3 *tp)
8422 {
8423 	unsigned int i, limit;
8424 
8425 	limit = tp->rxq_cnt;
8426 
8427 	/* If RSS is enabled, we need a (dummy) producer ring
8428 	 * set on vector zero.  This is the true hw prodring.
8429 	 */
8430 	if (tg3_flag(tp, ENABLE_RSS))
8431 		limit++;
8432 
8433 	for (i = 0; i < limit; i++) {
8434 		struct tg3_napi *tnapi = &tp->napi[i];
8435 
8436 		if (tg3_rx_prodring_init(tp, &tnapi->prodring))
8437 			goto err_out;
8438 
8439 		/* If multivector RSS is enabled, vector 0
8440 		 * does not handle rx or tx interrupts.
8441 		 * Don't allocate any resources for it.
8442 		 */
8443 		if (!i && tg3_flag(tp, ENABLE_RSS))
8444 			continue;
8445 
8446 		tnapi->rx_rcb = dma_alloc_coherent(&tp->pdev->dev,
8447 						   TG3_RX_RCB_RING_BYTES(tp),
8448 						   &tnapi->rx_rcb_mapping,
8449 						   GFP_KERNEL | __GFP_ZERO);
8450 		if (!tnapi->rx_rcb)
8451 			goto err_out;
8452 	}
8453 
8454 	return 0;
8455 
8456 err_out:
8457 	tg3_mem_rx_release(tp);
8458 	return -ENOMEM;
8459 }
8460 
8461 /*
8462  * Must not be invoked with interrupt sources disabled and
8463  * the hardware shutdown down.
8464  */
8465 static void tg3_free_consistent(struct tg3 *tp)
8466 {
8467 	int i;
8468 
8469 	for (i = 0; i < tp->irq_cnt; i++) {
8470 		struct tg3_napi *tnapi = &tp->napi[i];
8471 
8472 		if (tnapi->hw_status) {
8473 			dma_free_coherent(&tp->pdev->dev, TG3_HW_STATUS_SIZE,
8474 					  tnapi->hw_status,
8475 					  tnapi->status_mapping);
8476 			tnapi->hw_status = NULL;
8477 		}
8478 	}
8479 
8480 	tg3_mem_rx_release(tp);
8481 	tg3_mem_tx_release(tp);
8482 
8483 	if (tp->hw_stats) {
8484 		dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats),
8485 				  tp->hw_stats, tp->stats_mapping);
8486 		tp->hw_stats = NULL;
8487 	}
8488 }
8489 
8490 /*
8491  * Must not be invoked with interrupt sources disabled and
8492  * the hardware shutdown down.  Can sleep.
8493  */
8494 static int tg3_alloc_consistent(struct tg3 *tp)
8495 {
8496 	int i;
8497 
8498 	tp->hw_stats = dma_alloc_coherent(&tp->pdev->dev,
8499 					  sizeof(struct tg3_hw_stats),
8500 					  &tp->stats_mapping,
8501 					  GFP_KERNEL | __GFP_ZERO);
8502 	if (!tp->hw_stats)
8503 		goto err_out;
8504 
8505 	for (i = 0; i < tp->irq_cnt; i++) {
8506 		struct tg3_napi *tnapi = &tp->napi[i];
8507 		struct tg3_hw_status *sblk;
8508 
8509 		tnapi->hw_status = dma_alloc_coherent(&tp->pdev->dev,
8510 						      TG3_HW_STATUS_SIZE,
8511 						      &tnapi->status_mapping,
8512 						      GFP_KERNEL | __GFP_ZERO);
8513 		if (!tnapi->hw_status)
8514 			goto err_out;
8515 
8516 		sblk = tnapi->hw_status;
8517 
8518 		if (tg3_flag(tp, ENABLE_RSS)) {
8519 			u16 *prodptr = NULL;
8520 
8521 			/*
8522 			 * When RSS is enabled, the status block format changes
8523 			 * slightly.  The "rx_jumbo_consumer", "reserved",
8524 			 * and "rx_mini_consumer" members get mapped to the
8525 			 * other three rx return ring producer indexes.
8526 			 */
8527 			switch (i) {
8528 			case 1:
8529 				prodptr = &sblk->idx[0].rx_producer;
8530 				break;
8531 			case 2:
8532 				prodptr = &sblk->rx_jumbo_consumer;
8533 				break;
8534 			case 3:
8535 				prodptr = &sblk->reserved;
8536 				break;
8537 			case 4:
8538 				prodptr = &sblk->rx_mini_consumer;
8539 				break;
8540 			}
8541 			tnapi->rx_rcb_prod_idx = prodptr;
8542 		} else {
8543 			tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer;
8544 		}
8545 	}
8546 
8547 	if (tg3_mem_tx_acquire(tp) || tg3_mem_rx_acquire(tp))
8548 		goto err_out;
8549 
8550 	return 0;
8551 
8552 err_out:
8553 	tg3_free_consistent(tp);
8554 	return -ENOMEM;
8555 }
8556 
8557 #define MAX_WAIT_CNT 1000
8558 
8559 /* To stop a block, clear the enable bit and poll till it
8560  * clears.  tp->lock is held.
8561  */
8562 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, bool silent)
8563 {
8564 	unsigned int i;
8565 	u32 val;
8566 
8567 	if (tg3_flag(tp, 5705_PLUS)) {
8568 		switch (ofs) {
8569 		case RCVLSC_MODE:
8570 		case DMAC_MODE:
8571 		case MBFREE_MODE:
8572 		case BUFMGR_MODE:
8573 		case MEMARB_MODE:
8574 			/* We can't enable/disable these bits of the
8575 			 * 5705/5750, just say success.
8576 			 */
8577 			return 0;
8578 
8579 		default:
8580 			break;
8581 		}
8582 	}
8583 
8584 	val = tr32(ofs);
8585 	val &= ~enable_bit;
8586 	tw32_f(ofs, val);
8587 
8588 	for (i = 0; i < MAX_WAIT_CNT; i++) {
8589 		udelay(100);
8590 		val = tr32(ofs);
8591 		if ((val & enable_bit) == 0)
8592 			break;
8593 	}
8594 
8595 	if (i == MAX_WAIT_CNT && !silent) {
8596 		dev_err(&tp->pdev->dev,
8597 			"tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
8598 			ofs, enable_bit);
8599 		return -ENODEV;
8600 	}
8601 
8602 	return 0;
8603 }
8604 
8605 /* tp->lock is held. */
8606 static int tg3_abort_hw(struct tg3 *tp, bool silent)
8607 {
8608 	int i, err;
8609 
8610 	tg3_disable_ints(tp);
8611 
8612 	tp->rx_mode &= ~RX_MODE_ENABLE;
8613 	tw32_f(MAC_RX_MODE, tp->rx_mode);
8614 	udelay(10);
8615 
8616 	err  = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
8617 	err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
8618 	err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
8619 	err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
8620 	err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
8621 	err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
8622 
8623 	err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
8624 	err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
8625 	err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
8626 	err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
8627 	err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
8628 	err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
8629 	err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
8630 
8631 	tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
8632 	tw32_f(MAC_MODE, tp->mac_mode);
8633 	udelay(40);
8634 
8635 	tp->tx_mode &= ~TX_MODE_ENABLE;
8636 	tw32_f(MAC_TX_MODE, tp->tx_mode);
8637 
8638 	for (i = 0; i < MAX_WAIT_CNT; i++) {
8639 		udelay(100);
8640 		if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
8641 			break;
8642 	}
8643 	if (i >= MAX_WAIT_CNT) {
8644 		dev_err(&tp->pdev->dev,
8645 			"%s timed out, TX_MODE_ENABLE will not clear "
8646 			"MAC_TX_MODE=%08x\n", __func__, tr32(MAC_TX_MODE));
8647 		err |= -ENODEV;
8648 	}
8649 
8650 	err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
8651 	err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
8652 	err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
8653 
8654 	tw32(FTQ_RESET, 0xffffffff);
8655 	tw32(FTQ_RESET, 0x00000000);
8656 
8657 	err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
8658 	err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
8659 
8660 	for (i = 0; i < tp->irq_cnt; i++) {
8661 		struct tg3_napi *tnapi = &tp->napi[i];
8662 		if (tnapi->hw_status)
8663 			memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8664 	}
8665 
8666 	return err;
8667 }
8668 
8669 /* Save PCI command register before chip reset */
8670 static void tg3_save_pci_state(struct tg3 *tp)
8671 {
8672 	pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
8673 }
8674 
8675 /* Restore PCI state after chip reset */
8676 static void tg3_restore_pci_state(struct tg3 *tp)
8677 {
8678 	u32 val;
8679 
8680 	/* Re-enable indirect register accesses. */
8681 	pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
8682 			       tp->misc_host_ctrl);
8683 
8684 	/* Set MAX PCI retry to zero. */
8685 	val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
8686 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0 &&
8687 	    tg3_flag(tp, PCIX_MODE))
8688 		val |= PCISTATE_RETRY_SAME_DMA;
8689 	/* Allow reads and writes to the APE register and memory space. */
8690 	if (tg3_flag(tp, ENABLE_APE))
8691 		val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
8692 		       PCISTATE_ALLOW_APE_SHMEM_WR |
8693 		       PCISTATE_ALLOW_APE_PSPACE_WR;
8694 	pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
8695 
8696 	pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
8697 
8698 	if (!tg3_flag(tp, PCI_EXPRESS)) {
8699 		pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
8700 				      tp->pci_cacheline_sz);
8701 		pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
8702 				      tp->pci_lat_timer);
8703 	}
8704 
8705 	/* Make sure PCI-X relaxed ordering bit is clear. */
8706 	if (tg3_flag(tp, PCIX_MODE)) {
8707 		u16 pcix_cmd;
8708 
8709 		pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8710 				     &pcix_cmd);
8711 		pcix_cmd &= ~PCI_X_CMD_ERO;
8712 		pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8713 				      pcix_cmd);
8714 	}
8715 
8716 	if (tg3_flag(tp, 5780_CLASS)) {
8717 
8718 		/* Chip reset on 5780 will reset MSI enable bit,
8719 		 * so need to restore it.
8720 		 */
8721 		if (tg3_flag(tp, USING_MSI)) {
8722 			u16 ctrl;
8723 
8724 			pci_read_config_word(tp->pdev,
8725 					     tp->msi_cap + PCI_MSI_FLAGS,
8726 					     &ctrl);
8727 			pci_write_config_word(tp->pdev,
8728 					      tp->msi_cap + PCI_MSI_FLAGS,
8729 					      ctrl | PCI_MSI_FLAGS_ENABLE);
8730 			val = tr32(MSGINT_MODE);
8731 			tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
8732 		}
8733 	}
8734 }
8735 
8736 /* tp->lock is held. */
8737 static int tg3_chip_reset(struct tg3 *tp)
8738 {
8739 	u32 val;
8740 	void (*write_op)(struct tg3 *, u32, u32);
8741 	int i, err;
8742 
8743 	tg3_nvram_lock(tp);
8744 
8745 	tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
8746 
8747 	/* No matching tg3_nvram_unlock() after this because
8748 	 * chip reset below will undo the nvram lock.
8749 	 */
8750 	tp->nvram_lock_cnt = 0;
8751 
8752 	/* GRC_MISC_CFG core clock reset will clear the memory
8753 	 * enable bit in PCI register 4 and the MSI enable bit
8754 	 * on some chips, so we save relevant registers here.
8755 	 */
8756 	tg3_save_pci_state(tp);
8757 
8758 	if (tg3_asic_rev(tp) == ASIC_REV_5752 ||
8759 	    tg3_flag(tp, 5755_PLUS))
8760 		tw32(GRC_FASTBOOT_PC, 0);
8761 
8762 	/*
8763 	 * We must avoid the readl() that normally takes place.
8764 	 * It locks machines, causes machine checks, and other
8765 	 * fun things.  So, temporarily disable the 5701
8766 	 * hardware workaround, while we do the reset.
8767 	 */
8768 	write_op = tp->write32;
8769 	if (write_op == tg3_write_flush_reg32)
8770 		tp->write32 = tg3_write32;
8771 
8772 	/* Prevent the irq handler from reading or writing PCI registers
8773 	 * during chip reset when the memory enable bit in the PCI command
8774 	 * register may be cleared.  The chip does not generate interrupt
8775 	 * at this time, but the irq handler may still be called due to irq
8776 	 * sharing or irqpoll.
8777 	 */
8778 	tg3_flag_set(tp, CHIP_RESETTING);
8779 	for (i = 0; i < tp->irq_cnt; i++) {
8780 		struct tg3_napi *tnapi = &tp->napi[i];
8781 		if (tnapi->hw_status) {
8782 			tnapi->hw_status->status = 0;
8783 			tnapi->hw_status->status_tag = 0;
8784 		}
8785 		tnapi->last_tag = 0;
8786 		tnapi->last_irq_tag = 0;
8787 	}
8788 	smp_mb();
8789 
8790 	for (i = 0; i < tp->irq_cnt; i++)
8791 		synchronize_irq(tp->napi[i].irq_vec);
8792 
8793 	if (tg3_asic_rev(tp) == ASIC_REV_57780) {
8794 		val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
8795 		tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
8796 	}
8797 
8798 	/* do the reset */
8799 	val = GRC_MISC_CFG_CORECLK_RESET;
8800 
8801 	if (tg3_flag(tp, PCI_EXPRESS)) {
8802 		/* Force PCIe 1.0a mode */
8803 		if (tg3_asic_rev(tp) != ASIC_REV_5785 &&
8804 		    !tg3_flag(tp, 57765_PLUS) &&
8805 		    tr32(TG3_PCIE_PHY_TSTCTL) ==
8806 		    (TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM))
8807 			tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM);
8808 
8809 		if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0) {
8810 			tw32(GRC_MISC_CFG, (1 << 29));
8811 			val |= (1 << 29);
8812 		}
8813 	}
8814 
8815 	if (tg3_asic_rev(tp) == ASIC_REV_5906) {
8816 		tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
8817 		tw32(GRC_VCPU_EXT_CTRL,
8818 		     tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
8819 	}
8820 
8821 	/* Manage gphy power for all CPMU absent PCIe devices. */
8822 	if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, CPMU_PRESENT))
8823 		val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
8824 
8825 	tw32(GRC_MISC_CFG, val);
8826 
8827 	/* restore 5701 hardware bug workaround write method */
8828 	tp->write32 = write_op;
8829 
8830 	/* Unfortunately, we have to delay before the PCI read back.
8831 	 * Some 575X chips even will not respond to a PCI cfg access
8832 	 * when the reset command is given to the chip.
8833 	 *
8834 	 * How do these hardware designers expect things to work
8835 	 * properly if the PCI write is posted for a long period
8836 	 * of time?  It is always necessary to have some method by
8837 	 * which a register read back can occur to push the write
8838 	 * out which does the reset.
8839 	 *
8840 	 * For most tg3 variants the trick below was working.
8841 	 * Ho hum...
8842 	 */
8843 	udelay(120);
8844 
8845 	/* Flush PCI posted writes.  The normal MMIO registers
8846 	 * are inaccessible at this time so this is the only
8847 	 * way to make this reliably (actually, this is no longer
8848 	 * the case, see above).  I tried to use indirect
8849 	 * register read/write but this upset some 5701 variants.
8850 	 */
8851 	pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
8852 
8853 	udelay(120);
8854 
8855 	if (tg3_flag(tp, PCI_EXPRESS) && pci_is_pcie(tp->pdev)) {
8856 		u16 val16;
8857 
8858 		if (tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A0) {
8859 			int j;
8860 			u32 cfg_val;
8861 
8862 			/* Wait for link training to complete.  */
8863 			for (j = 0; j < 5000; j++)
8864 				udelay(100);
8865 
8866 			pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
8867 			pci_write_config_dword(tp->pdev, 0xc4,
8868 					       cfg_val | (1 << 15));
8869 		}
8870 
8871 		/* Clear the "no snoop" and "relaxed ordering" bits. */
8872 		val16 = PCI_EXP_DEVCTL_RELAX_EN | PCI_EXP_DEVCTL_NOSNOOP_EN;
8873 		/*
8874 		 * Older PCIe devices only support the 128 byte
8875 		 * MPS setting.  Enforce the restriction.
8876 		 */
8877 		if (!tg3_flag(tp, CPMU_PRESENT))
8878 			val16 |= PCI_EXP_DEVCTL_PAYLOAD;
8879 		pcie_capability_clear_word(tp->pdev, PCI_EXP_DEVCTL, val16);
8880 
8881 		/* Clear error status */
8882 		pcie_capability_write_word(tp->pdev, PCI_EXP_DEVSTA,
8883 				      PCI_EXP_DEVSTA_CED |
8884 				      PCI_EXP_DEVSTA_NFED |
8885 				      PCI_EXP_DEVSTA_FED |
8886 				      PCI_EXP_DEVSTA_URD);
8887 	}
8888 
8889 	tg3_restore_pci_state(tp);
8890 
8891 	tg3_flag_clear(tp, CHIP_RESETTING);
8892 	tg3_flag_clear(tp, ERROR_PROCESSED);
8893 
8894 	val = 0;
8895 	if (tg3_flag(tp, 5780_CLASS))
8896 		val = tr32(MEMARB_MODE);
8897 	tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
8898 
8899 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A3) {
8900 		tg3_stop_fw(tp);
8901 		tw32(0x5000, 0x400);
8902 	}
8903 
8904 	if (tg3_flag(tp, IS_SSB_CORE)) {
8905 		/*
8906 		 * BCM4785: In order to avoid repercussions from using
8907 		 * potentially defective internal ROM, stop the Rx RISC CPU,
8908 		 * which is not required.
8909 		 */
8910 		tg3_stop_fw(tp);
8911 		tg3_halt_cpu(tp, RX_CPU_BASE);
8912 	}
8913 
8914 	err = tg3_poll_fw(tp);
8915 	if (err)
8916 		return err;
8917 
8918 	tw32(GRC_MODE, tp->grc_mode);
8919 
8920 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0) {
8921 		val = tr32(0xc4);
8922 
8923 		tw32(0xc4, val | (1 << 15));
8924 	}
8925 
8926 	if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
8927 	    tg3_asic_rev(tp) == ASIC_REV_5705) {
8928 		tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
8929 		if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0)
8930 			tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
8931 		tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
8932 	}
8933 
8934 	if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
8935 		tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
8936 		val = tp->mac_mode;
8937 	} else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
8938 		tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
8939 		val = tp->mac_mode;
8940 	} else
8941 		val = 0;
8942 
8943 	tw32_f(MAC_MODE, val);
8944 	udelay(40);
8945 
8946 	tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
8947 
8948 	tg3_mdio_start(tp);
8949 
8950 	if (tg3_flag(tp, PCI_EXPRESS) &&
8951 	    tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0 &&
8952 	    tg3_asic_rev(tp) != ASIC_REV_5785 &&
8953 	    !tg3_flag(tp, 57765_PLUS)) {
8954 		val = tr32(0x7c00);
8955 
8956 		tw32(0x7c00, val | (1 << 25));
8957 	}
8958 
8959 	if (tg3_asic_rev(tp) == ASIC_REV_5720) {
8960 		val = tr32(TG3_CPMU_CLCK_ORIDE);
8961 		tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
8962 	}
8963 
8964 	/* Reprobe ASF enable state.  */
8965 	tg3_flag_clear(tp, ENABLE_ASF);
8966 	tp->phy_flags &= ~(TG3_PHYFLG_1G_ON_VAUX_OK |
8967 			   TG3_PHYFLG_KEEP_LINK_ON_PWRDN);
8968 
8969 	tg3_flag_clear(tp, ASF_NEW_HANDSHAKE);
8970 	tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
8971 	if (val == NIC_SRAM_DATA_SIG_MAGIC) {
8972 		u32 nic_cfg;
8973 
8974 		tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
8975 		if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
8976 			tg3_flag_set(tp, ENABLE_ASF);
8977 			tp->last_event_jiffies = jiffies;
8978 			if (tg3_flag(tp, 5750_PLUS))
8979 				tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
8980 
8981 			tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &nic_cfg);
8982 			if (nic_cfg & NIC_SRAM_1G_ON_VAUX_OK)
8983 				tp->phy_flags |= TG3_PHYFLG_1G_ON_VAUX_OK;
8984 			if (nic_cfg & NIC_SRAM_LNK_FLAP_AVOID)
8985 				tp->phy_flags |= TG3_PHYFLG_KEEP_LINK_ON_PWRDN;
8986 		}
8987 	}
8988 
8989 	return 0;
8990 }
8991 
8992 static void tg3_get_nstats(struct tg3 *, struct rtnl_link_stats64 *);
8993 static void tg3_get_estats(struct tg3 *, struct tg3_ethtool_stats *);
8994 
8995 /* tp->lock is held. */
8996 static int tg3_halt(struct tg3 *tp, int kind, bool silent)
8997 {
8998 	int err;
8999 
9000 	tg3_stop_fw(tp);
9001 
9002 	tg3_write_sig_pre_reset(tp, kind);
9003 
9004 	tg3_abort_hw(tp, silent);
9005 	err = tg3_chip_reset(tp);
9006 
9007 	__tg3_set_mac_addr(tp, false);
9008 
9009 	tg3_write_sig_legacy(tp, kind);
9010 	tg3_write_sig_post_reset(tp, kind);
9011 
9012 	if (tp->hw_stats) {
9013 		/* Save the stats across chip resets... */
9014 		tg3_get_nstats(tp, &tp->net_stats_prev);
9015 		tg3_get_estats(tp, &tp->estats_prev);
9016 
9017 		/* And make sure the next sample is new data */
9018 		memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
9019 	}
9020 
9021 	if (err)
9022 		return err;
9023 
9024 	return 0;
9025 }
9026 
9027 static int tg3_set_mac_addr(struct net_device *dev, void *p)
9028 {
9029 	struct tg3 *tp = netdev_priv(dev);
9030 	struct sockaddr *addr = p;
9031 	int err = 0;
9032 	bool skip_mac_1 = false;
9033 
9034 	if (!is_valid_ether_addr(addr->sa_data))
9035 		return -EADDRNOTAVAIL;
9036 
9037 	memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
9038 
9039 	if (!netif_running(dev))
9040 		return 0;
9041 
9042 	if (tg3_flag(tp, ENABLE_ASF)) {
9043 		u32 addr0_high, addr0_low, addr1_high, addr1_low;
9044 
9045 		addr0_high = tr32(MAC_ADDR_0_HIGH);
9046 		addr0_low = tr32(MAC_ADDR_0_LOW);
9047 		addr1_high = tr32(MAC_ADDR_1_HIGH);
9048 		addr1_low = tr32(MAC_ADDR_1_LOW);
9049 
9050 		/* Skip MAC addr 1 if ASF is using it. */
9051 		if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
9052 		    !(addr1_high == 0 && addr1_low == 0))
9053 			skip_mac_1 = true;
9054 	}
9055 	spin_lock_bh(&tp->lock);
9056 	__tg3_set_mac_addr(tp, skip_mac_1);
9057 	spin_unlock_bh(&tp->lock);
9058 
9059 	return err;
9060 }
9061 
9062 /* tp->lock is held. */
9063 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
9064 			   dma_addr_t mapping, u32 maxlen_flags,
9065 			   u32 nic_addr)
9066 {
9067 	tg3_write_mem(tp,
9068 		      (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
9069 		      ((u64) mapping >> 32));
9070 	tg3_write_mem(tp,
9071 		      (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
9072 		      ((u64) mapping & 0xffffffff));
9073 	tg3_write_mem(tp,
9074 		      (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
9075 		       maxlen_flags);
9076 
9077 	if (!tg3_flag(tp, 5705_PLUS))
9078 		tg3_write_mem(tp,
9079 			      (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
9080 			      nic_addr);
9081 }
9082 
9083 
9084 static void tg3_coal_tx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
9085 {
9086 	int i = 0;
9087 
9088 	if (!tg3_flag(tp, ENABLE_TSS)) {
9089 		tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
9090 		tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
9091 		tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
9092 	} else {
9093 		tw32(HOSTCC_TXCOL_TICKS, 0);
9094 		tw32(HOSTCC_TXMAX_FRAMES, 0);
9095 		tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
9096 
9097 		for (; i < tp->txq_cnt; i++) {
9098 			u32 reg;
9099 
9100 			reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18;
9101 			tw32(reg, ec->tx_coalesce_usecs);
9102 			reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18;
9103 			tw32(reg, ec->tx_max_coalesced_frames);
9104 			reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18;
9105 			tw32(reg, ec->tx_max_coalesced_frames_irq);
9106 		}
9107 	}
9108 
9109 	for (; i < tp->irq_max - 1; i++) {
9110 		tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0);
9111 		tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0);
9112 		tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
9113 	}
9114 }
9115 
9116 static void tg3_coal_rx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
9117 {
9118 	int i = 0;
9119 	u32 limit = tp->rxq_cnt;
9120 
9121 	if (!tg3_flag(tp, ENABLE_RSS)) {
9122 		tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
9123 		tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
9124 		tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
9125 		limit--;
9126 	} else {
9127 		tw32(HOSTCC_RXCOL_TICKS, 0);
9128 		tw32(HOSTCC_RXMAX_FRAMES, 0);
9129 		tw32(HOSTCC_RXCOAL_MAXF_INT, 0);
9130 	}
9131 
9132 	for (; i < limit; i++) {
9133 		u32 reg;
9134 
9135 		reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18;
9136 		tw32(reg, ec->rx_coalesce_usecs);
9137 		reg = HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18;
9138 		tw32(reg, ec->rx_max_coalesced_frames);
9139 		reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18;
9140 		tw32(reg, ec->rx_max_coalesced_frames_irq);
9141 	}
9142 
9143 	for (; i < tp->irq_max - 1; i++) {
9144 		tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0);
9145 		tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0);
9146 		tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
9147 	}
9148 }
9149 
9150 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
9151 {
9152 	tg3_coal_tx_init(tp, ec);
9153 	tg3_coal_rx_init(tp, ec);
9154 
9155 	if (!tg3_flag(tp, 5705_PLUS)) {
9156 		u32 val = ec->stats_block_coalesce_usecs;
9157 
9158 		tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
9159 		tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
9160 
9161 		if (!tp->link_up)
9162 			val = 0;
9163 
9164 		tw32(HOSTCC_STAT_COAL_TICKS, val);
9165 	}
9166 }
9167 
9168 /* tp->lock is held. */
9169 static void tg3_rings_reset(struct tg3 *tp)
9170 {
9171 	int i;
9172 	u32 stblk, txrcb, rxrcb, limit;
9173 	struct tg3_napi *tnapi = &tp->napi[0];
9174 
9175 	/* Disable all transmit rings but the first. */
9176 	if (!tg3_flag(tp, 5705_PLUS))
9177 		limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16;
9178 	else if (tg3_flag(tp, 5717_PLUS))
9179 		limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4;
9180 	else if (tg3_flag(tp, 57765_CLASS) ||
9181 		 tg3_asic_rev(tp) == ASIC_REV_5762)
9182 		limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2;
9183 	else
9184 		limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
9185 
9186 	for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
9187 	     txrcb < limit; txrcb += TG3_BDINFO_SIZE)
9188 		tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS,
9189 			      BDINFO_FLAGS_DISABLED);
9190 
9191 
9192 	/* Disable all receive return rings but the first. */
9193 	if (tg3_flag(tp, 5717_PLUS))
9194 		limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17;
9195 	else if (!tg3_flag(tp, 5705_PLUS))
9196 		limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
9197 	else if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
9198 		 tg3_asic_rev(tp) == ASIC_REV_5762 ||
9199 		 tg3_flag(tp, 57765_CLASS))
9200 		limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4;
9201 	else
9202 		limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
9203 
9204 	for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
9205 	     rxrcb < limit; rxrcb += TG3_BDINFO_SIZE)
9206 		tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS,
9207 			      BDINFO_FLAGS_DISABLED);
9208 
9209 	/* Disable interrupts */
9210 	tw32_mailbox_f(tp->napi[0].int_mbox, 1);
9211 	tp->napi[0].chk_msi_cnt = 0;
9212 	tp->napi[0].last_rx_cons = 0;
9213 	tp->napi[0].last_tx_cons = 0;
9214 
9215 	/* Zero mailbox registers. */
9216 	if (tg3_flag(tp, SUPPORT_MSIX)) {
9217 		for (i = 1; i < tp->irq_max; i++) {
9218 			tp->napi[i].tx_prod = 0;
9219 			tp->napi[i].tx_cons = 0;
9220 			if (tg3_flag(tp, ENABLE_TSS))
9221 				tw32_mailbox(tp->napi[i].prodmbox, 0);
9222 			tw32_rx_mbox(tp->napi[i].consmbox, 0);
9223 			tw32_mailbox_f(tp->napi[i].int_mbox, 1);
9224 			tp->napi[i].chk_msi_cnt = 0;
9225 			tp->napi[i].last_rx_cons = 0;
9226 			tp->napi[i].last_tx_cons = 0;
9227 		}
9228 		if (!tg3_flag(tp, ENABLE_TSS))
9229 			tw32_mailbox(tp->napi[0].prodmbox, 0);
9230 	} else {
9231 		tp->napi[0].tx_prod = 0;
9232 		tp->napi[0].tx_cons = 0;
9233 		tw32_mailbox(tp->napi[0].prodmbox, 0);
9234 		tw32_rx_mbox(tp->napi[0].consmbox, 0);
9235 	}
9236 
9237 	/* Make sure the NIC-based send BD rings are disabled. */
9238 	if (!tg3_flag(tp, 5705_PLUS)) {
9239 		u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW;
9240 		for (i = 0; i < 16; i++)
9241 			tw32_tx_mbox(mbox + i * 8, 0);
9242 	}
9243 
9244 	txrcb = NIC_SRAM_SEND_RCB;
9245 	rxrcb = NIC_SRAM_RCV_RET_RCB;
9246 
9247 	/* Clear status block in ram. */
9248 	memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
9249 
9250 	/* Set status block DMA address */
9251 	tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
9252 	     ((u64) tnapi->status_mapping >> 32));
9253 	tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
9254 	     ((u64) tnapi->status_mapping & 0xffffffff));
9255 
9256 	if (tnapi->tx_ring) {
9257 		tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
9258 			       (TG3_TX_RING_SIZE <<
9259 				BDINFO_FLAGS_MAXLEN_SHIFT),
9260 			       NIC_SRAM_TX_BUFFER_DESC);
9261 		txrcb += TG3_BDINFO_SIZE;
9262 	}
9263 
9264 	if (tnapi->rx_rcb) {
9265 		tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
9266 			       (tp->rx_ret_ring_mask + 1) <<
9267 				BDINFO_FLAGS_MAXLEN_SHIFT, 0);
9268 		rxrcb += TG3_BDINFO_SIZE;
9269 	}
9270 
9271 	stblk = HOSTCC_STATBLCK_RING1;
9272 
9273 	for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) {
9274 		u64 mapping = (u64)tnapi->status_mapping;
9275 		tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32);
9276 		tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff);
9277 
9278 		/* Clear status block in ram. */
9279 		memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
9280 
9281 		if (tnapi->tx_ring) {
9282 			tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
9283 				       (TG3_TX_RING_SIZE <<
9284 					BDINFO_FLAGS_MAXLEN_SHIFT),
9285 				       NIC_SRAM_TX_BUFFER_DESC);
9286 			txrcb += TG3_BDINFO_SIZE;
9287 		}
9288 
9289 		tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
9290 			       ((tp->rx_ret_ring_mask + 1) <<
9291 				BDINFO_FLAGS_MAXLEN_SHIFT), 0);
9292 
9293 		stblk += 8;
9294 		rxrcb += TG3_BDINFO_SIZE;
9295 	}
9296 }
9297 
9298 static void tg3_setup_rxbd_thresholds(struct tg3 *tp)
9299 {
9300 	u32 val, bdcache_maxcnt, host_rep_thresh, nic_rep_thresh;
9301 
9302 	if (!tg3_flag(tp, 5750_PLUS) ||
9303 	    tg3_flag(tp, 5780_CLASS) ||
9304 	    tg3_asic_rev(tp) == ASIC_REV_5750 ||
9305 	    tg3_asic_rev(tp) == ASIC_REV_5752 ||
9306 	    tg3_flag(tp, 57765_PLUS))
9307 		bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5700;
9308 	else if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
9309 		 tg3_asic_rev(tp) == ASIC_REV_5787)
9310 		bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5755;
9311 	else
9312 		bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5906;
9313 
9314 	nic_rep_thresh = min(bdcache_maxcnt / 2, tp->rx_std_max_post);
9315 	host_rep_thresh = max_t(u32, tp->rx_pending / 8, 1);
9316 
9317 	val = min(nic_rep_thresh, host_rep_thresh);
9318 	tw32(RCVBDI_STD_THRESH, val);
9319 
9320 	if (tg3_flag(tp, 57765_PLUS))
9321 		tw32(STD_REPLENISH_LWM, bdcache_maxcnt);
9322 
9323 	if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
9324 		return;
9325 
9326 	bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700;
9327 
9328 	host_rep_thresh = max_t(u32, tp->rx_jumbo_pending / 8, 1);
9329 
9330 	val = min(bdcache_maxcnt / 2, host_rep_thresh);
9331 	tw32(RCVBDI_JUMBO_THRESH, val);
9332 
9333 	if (tg3_flag(tp, 57765_PLUS))
9334 		tw32(JMB_REPLENISH_LWM, bdcache_maxcnt);
9335 }
9336 
9337 static inline u32 calc_crc(unsigned char *buf, int len)
9338 {
9339 	u32 reg;
9340 	u32 tmp;
9341 	int j, k;
9342 
9343 	reg = 0xffffffff;
9344 
9345 	for (j = 0; j < len; j++) {
9346 		reg ^= buf[j];
9347 
9348 		for (k = 0; k < 8; k++) {
9349 			tmp = reg & 0x01;
9350 
9351 			reg >>= 1;
9352 
9353 			if (tmp)
9354 				reg ^= 0xedb88320;
9355 		}
9356 	}
9357 
9358 	return ~reg;
9359 }
9360 
9361 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
9362 {
9363 	/* accept or reject all multicast frames */
9364 	tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
9365 	tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
9366 	tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
9367 	tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
9368 }
9369 
9370 static void __tg3_set_rx_mode(struct net_device *dev)
9371 {
9372 	struct tg3 *tp = netdev_priv(dev);
9373 	u32 rx_mode;
9374 
9375 	rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
9376 				  RX_MODE_KEEP_VLAN_TAG);
9377 
9378 #if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE)
9379 	/* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
9380 	 * flag clear.
9381 	 */
9382 	if (!tg3_flag(tp, ENABLE_ASF))
9383 		rx_mode |= RX_MODE_KEEP_VLAN_TAG;
9384 #endif
9385 
9386 	if (dev->flags & IFF_PROMISC) {
9387 		/* Promiscuous mode. */
9388 		rx_mode |= RX_MODE_PROMISC;
9389 	} else if (dev->flags & IFF_ALLMULTI) {
9390 		/* Accept all multicast. */
9391 		tg3_set_multi(tp, 1);
9392 	} else if (netdev_mc_empty(dev)) {
9393 		/* Reject all multicast. */
9394 		tg3_set_multi(tp, 0);
9395 	} else {
9396 		/* Accept one or more multicast(s). */
9397 		struct netdev_hw_addr *ha;
9398 		u32 mc_filter[4] = { 0, };
9399 		u32 regidx;
9400 		u32 bit;
9401 		u32 crc;
9402 
9403 		netdev_for_each_mc_addr(ha, dev) {
9404 			crc = calc_crc(ha->addr, ETH_ALEN);
9405 			bit = ~crc & 0x7f;
9406 			regidx = (bit & 0x60) >> 5;
9407 			bit &= 0x1f;
9408 			mc_filter[regidx] |= (1 << bit);
9409 		}
9410 
9411 		tw32(MAC_HASH_REG_0, mc_filter[0]);
9412 		tw32(MAC_HASH_REG_1, mc_filter[1]);
9413 		tw32(MAC_HASH_REG_2, mc_filter[2]);
9414 		tw32(MAC_HASH_REG_3, mc_filter[3]);
9415 	}
9416 
9417 	if (rx_mode != tp->rx_mode) {
9418 		tp->rx_mode = rx_mode;
9419 		tw32_f(MAC_RX_MODE, rx_mode);
9420 		udelay(10);
9421 	}
9422 }
9423 
9424 static void tg3_rss_init_dflt_indir_tbl(struct tg3 *tp, u32 qcnt)
9425 {
9426 	int i;
9427 
9428 	for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
9429 		tp->rss_ind_tbl[i] = ethtool_rxfh_indir_default(i, qcnt);
9430 }
9431 
9432 static void tg3_rss_check_indir_tbl(struct tg3 *tp)
9433 {
9434 	int i;
9435 
9436 	if (!tg3_flag(tp, SUPPORT_MSIX))
9437 		return;
9438 
9439 	if (tp->rxq_cnt == 1) {
9440 		memset(&tp->rss_ind_tbl[0], 0, sizeof(tp->rss_ind_tbl));
9441 		return;
9442 	}
9443 
9444 	/* Validate table against current IRQ count */
9445 	for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) {
9446 		if (tp->rss_ind_tbl[i] >= tp->rxq_cnt)
9447 			break;
9448 	}
9449 
9450 	if (i != TG3_RSS_INDIR_TBL_SIZE)
9451 		tg3_rss_init_dflt_indir_tbl(tp, tp->rxq_cnt);
9452 }
9453 
9454 static void tg3_rss_write_indir_tbl(struct tg3 *tp)
9455 {
9456 	int i = 0;
9457 	u32 reg = MAC_RSS_INDIR_TBL_0;
9458 
9459 	while (i < TG3_RSS_INDIR_TBL_SIZE) {
9460 		u32 val = tp->rss_ind_tbl[i];
9461 		i++;
9462 		for (; i % 8; i++) {
9463 			val <<= 4;
9464 			val |= tp->rss_ind_tbl[i];
9465 		}
9466 		tw32(reg, val);
9467 		reg += 4;
9468 	}
9469 }
9470 
9471 static inline u32 tg3_lso_rd_dma_workaround_bit(struct tg3 *tp)
9472 {
9473 	if (tg3_asic_rev(tp) == ASIC_REV_5719)
9474 		return TG3_LSO_RD_DMA_TX_LENGTH_WA_5719;
9475 	else
9476 		return TG3_LSO_RD_DMA_TX_LENGTH_WA_5720;
9477 }
9478 
9479 /* tp->lock is held. */
9480 static int tg3_reset_hw(struct tg3 *tp, bool reset_phy)
9481 {
9482 	u32 val, rdmac_mode;
9483 	int i, err, limit;
9484 	struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
9485 
9486 	tg3_disable_ints(tp);
9487 
9488 	tg3_stop_fw(tp);
9489 
9490 	tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
9491 
9492 	if (tg3_flag(tp, INIT_COMPLETE))
9493 		tg3_abort_hw(tp, 1);
9494 
9495 	/* Enable MAC control of LPI */
9496 	if (tp->phy_flags & TG3_PHYFLG_EEE_CAP) {
9497 		val = TG3_CPMU_EEE_LNKIDL_PCIE_NL0 |
9498 		      TG3_CPMU_EEE_LNKIDL_UART_IDL;
9499 		if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0)
9500 			val |= TG3_CPMU_EEE_LNKIDL_APE_TX_MT;
9501 
9502 		tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL, val);
9503 
9504 		tw32_f(TG3_CPMU_EEE_CTRL,
9505 		       TG3_CPMU_EEE_CTRL_EXIT_20_1_US);
9506 
9507 		val = TG3_CPMU_EEEMD_ERLY_L1_XIT_DET |
9508 		      TG3_CPMU_EEEMD_LPI_IN_TX |
9509 		      TG3_CPMU_EEEMD_LPI_IN_RX |
9510 		      TG3_CPMU_EEEMD_EEE_ENABLE;
9511 
9512 		if (tg3_asic_rev(tp) != ASIC_REV_5717)
9513 			val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN;
9514 
9515 		if (tg3_flag(tp, ENABLE_APE))
9516 			val |= TG3_CPMU_EEEMD_APE_TX_DET_EN;
9517 
9518 		tw32_f(TG3_CPMU_EEE_MODE, val);
9519 
9520 		tw32_f(TG3_CPMU_EEE_DBTMR1,
9521 		       TG3_CPMU_DBTMR1_PCIEXIT_2047US |
9522 		       TG3_CPMU_DBTMR1_LNKIDLE_2047US);
9523 
9524 		tw32_f(TG3_CPMU_EEE_DBTMR2,
9525 		       TG3_CPMU_DBTMR2_APE_TX_2047US |
9526 		       TG3_CPMU_DBTMR2_TXIDXEQ_2047US);
9527 	}
9528 
9529 	if ((tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
9530 	    !(tp->phy_flags & TG3_PHYFLG_USER_CONFIGURED)) {
9531 		tg3_phy_pull_config(tp);
9532 		tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
9533 	}
9534 
9535 	if (reset_phy)
9536 		tg3_phy_reset(tp);
9537 
9538 	err = tg3_chip_reset(tp);
9539 	if (err)
9540 		return err;
9541 
9542 	tg3_write_sig_legacy(tp, RESET_KIND_INIT);
9543 
9544 	if (tg3_chip_rev(tp) == CHIPREV_5784_AX) {
9545 		val = tr32(TG3_CPMU_CTRL);
9546 		val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
9547 		tw32(TG3_CPMU_CTRL, val);
9548 
9549 		val = tr32(TG3_CPMU_LSPD_10MB_CLK);
9550 		val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
9551 		val |= CPMU_LSPD_10MB_MACCLK_6_25;
9552 		tw32(TG3_CPMU_LSPD_10MB_CLK, val);
9553 
9554 		val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
9555 		val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
9556 		val |= CPMU_LNK_AWARE_MACCLK_6_25;
9557 		tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
9558 
9559 		val = tr32(TG3_CPMU_HST_ACC);
9560 		val &= ~CPMU_HST_ACC_MACCLK_MASK;
9561 		val |= CPMU_HST_ACC_MACCLK_6_25;
9562 		tw32(TG3_CPMU_HST_ACC, val);
9563 	}
9564 
9565 	if (tg3_asic_rev(tp) == ASIC_REV_57780) {
9566 		val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK;
9567 		val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN |
9568 		       PCIE_PWR_MGMT_L1_THRESH_4MS;
9569 		tw32(PCIE_PWR_MGMT_THRESH, val);
9570 
9571 		val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK;
9572 		tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS);
9573 
9574 		tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR);
9575 
9576 		val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
9577 		tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
9578 	}
9579 
9580 	if (tg3_flag(tp, L1PLLPD_EN)) {
9581 		u32 grc_mode = tr32(GRC_MODE);
9582 
9583 		/* Access the lower 1K of PL PCIE block registers. */
9584 		val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9585 		tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
9586 
9587 		val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1);
9588 		tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1,
9589 		     val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN);
9590 
9591 		tw32(GRC_MODE, grc_mode);
9592 	}
9593 
9594 	if (tg3_flag(tp, 57765_CLASS)) {
9595 		if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) {
9596 			u32 grc_mode = tr32(GRC_MODE);
9597 
9598 			/* Access the lower 1K of PL PCIE block registers. */
9599 			val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9600 			tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
9601 
9602 			val = tr32(TG3_PCIE_TLDLPL_PORT +
9603 				   TG3_PCIE_PL_LO_PHYCTL5);
9604 			tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5,
9605 			     val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ);
9606 
9607 			tw32(GRC_MODE, grc_mode);
9608 		}
9609 
9610 		if (tg3_chip_rev(tp) != CHIPREV_57765_AX) {
9611 			u32 grc_mode;
9612 
9613 			/* Fix transmit hangs */
9614 			val = tr32(TG3_CPMU_PADRNG_CTL);
9615 			val |= TG3_CPMU_PADRNG_CTL_RDIV2;
9616 			tw32(TG3_CPMU_PADRNG_CTL, val);
9617 
9618 			grc_mode = tr32(GRC_MODE);
9619 
9620 			/* Access the lower 1K of DL PCIE block registers. */
9621 			val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9622 			tw32(GRC_MODE, val | GRC_MODE_PCIE_DL_SEL);
9623 
9624 			val = tr32(TG3_PCIE_TLDLPL_PORT +
9625 				   TG3_PCIE_DL_LO_FTSMAX);
9626 			val &= ~TG3_PCIE_DL_LO_FTSMAX_MSK;
9627 			tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_DL_LO_FTSMAX,
9628 			     val | TG3_PCIE_DL_LO_FTSMAX_VAL);
9629 
9630 			tw32(GRC_MODE, grc_mode);
9631 		}
9632 
9633 		val = tr32(TG3_CPMU_LSPD_10MB_CLK);
9634 		val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
9635 		val |= CPMU_LSPD_10MB_MACCLK_6_25;
9636 		tw32(TG3_CPMU_LSPD_10MB_CLK, val);
9637 	}
9638 
9639 	/* This works around an issue with Athlon chipsets on
9640 	 * B3 tigon3 silicon.  This bit has no effect on any
9641 	 * other revision.  But do not set this on PCI Express
9642 	 * chips and don't even touch the clocks if the CPMU is present.
9643 	 */
9644 	if (!tg3_flag(tp, CPMU_PRESENT)) {
9645 		if (!tg3_flag(tp, PCI_EXPRESS))
9646 			tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
9647 		tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
9648 	}
9649 
9650 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0 &&
9651 	    tg3_flag(tp, PCIX_MODE)) {
9652 		val = tr32(TG3PCI_PCISTATE);
9653 		val |= PCISTATE_RETRY_SAME_DMA;
9654 		tw32(TG3PCI_PCISTATE, val);
9655 	}
9656 
9657 	if (tg3_flag(tp, ENABLE_APE)) {
9658 		/* Allow reads and writes to the
9659 		 * APE register and memory space.
9660 		 */
9661 		val = tr32(TG3PCI_PCISTATE);
9662 		val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
9663 		       PCISTATE_ALLOW_APE_SHMEM_WR |
9664 		       PCISTATE_ALLOW_APE_PSPACE_WR;
9665 		tw32(TG3PCI_PCISTATE, val);
9666 	}
9667 
9668 	if (tg3_chip_rev(tp) == CHIPREV_5704_BX) {
9669 		/* Enable some hw fixes.  */
9670 		val = tr32(TG3PCI_MSI_DATA);
9671 		val |= (1 << 26) | (1 << 28) | (1 << 29);
9672 		tw32(TG3PCI_MSI_DATA, val);
9673 	}
9674 
9675 	/* Descriptor ring init may make accesses to the
9676 	 * NIC SRAM area to setup the TX descriptors, so we
9677 	 * can only do this after the hardware has been
9678 	 * successfully reset.
9679 	 */
9680 	err = tg3_init_rings(tp);
9681 	if (err)
9682 		return err;
9683 
9684 	if (tg3_flag(tp, 57765_PLUS)) {
9685 		val = tr32(TG3PCI_DMA_RW_CTRL) &
9686 		      ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
9687 		if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0)
9688 			val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK;
9689 		if (!tg3_flag(tp, 57765_CLASS) &&
9690 		    tg3_asic_rev(tp) != ASIC_REV_5717 &&
9691 		    tg3_asic_rev(tp) != ASIC_REV_5762)
9692 			val |= DMA_RWCTRL_TAGGED_STAT_WA;
9693 		tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
9694 	} else if (tg3_asic_rev(tp) != ASIC_REV_5784 &&
9695 		   tg3_asic_rev(tp) != ASIC_REV_5761) {
9696 		/* This value is determined during the probe time DMA
9697 		 * engine test, tg3_test_dma.
9698 		 */
9699 		tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
9700 	}
9701 
9702 	tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
9703 			  GRC_MODE_4X_NIC_SEND_RINGS |
9704 			  GRC_MODE_NO_TX_PHDR_CSUM |
9705 			  GRC_MODE_NO_RX_PHDR_CSUM);
9706 	tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
9707 
9708 	/* Pseudo-header checksum is done by hardware logic and not
9709 	 * the offload processers, so make the chip do the pseudo-
9710 	 * header checksums on receive.  For transmit it is more
9711 	 * convenient to do the pseudo-header checksum in software
9712 	 * as Linux does that on transmit for us in all cases.
9713 	 */
9714 	tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
9715 
9716 	val = GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP;
9717 	if (tp->rxptpctl)
9718 		tw32(TG3_RX_PTP_CTL,
9719 		     tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK);
9720 
9721 	if (tg3_flag(tp, PTP_CAPABLE))
9722 		val |= GRC_MODE_TIME_SYNC_ENABLE;
9723 
9724 	tw32(GRC_MODE, tp->grc_mode | val);
9725 
9726 	/* Setup the timer prescalar register.  Clock is always 66Mhz. */
9727 	val = tr32(GRC_MISC_CFG);
9728 	val &= ~0xff;
9729 	val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
9730 	tw32(GRC_MISC_CFG, val);
9731 
9732 	/* Initialize MBUF/DESC pool. */
9733 	if (tg3_flag(tp, 5750_PLUS)) {
9734 		/* Do nothing.  */
9735 	} else if (tg3_asic_rev(tp) != ASIC_REV_5705) {
9736 		tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
9737 		if (tg3_asic_rev(tp) == ASIC_REV_5704)
9738 			tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
9739 		else
9740 			tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
9741 		tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
9742 		tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
9743 	} else if (tg3_flag(tp, TSO_CAPABLE)) {
9744 		int fw_len;
9745 
9746 		fw_len = tp->fw_len;
9747 		fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
9748 		tw32(BUFMGR_MB_POOL_ADDR,
9749 		     NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
9750 		tw32(BUFMGR_MB_POOL_SIZE,
9751 		     NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
9752 	}
9753 
9754 	if (tp->dev->mtu <= ETH_DATA_LEN) {
9755 		tw32(BUFMGR_MB_RDMA_LOW_WATER,
9756 		     tp->bufmgr_config.mbuf_read_dma_low_water);
9757 		tw32(BUFMGR_MB_MACRX_LOW_WATER,
9758 		     tp->bufmgr_config.mbuf_mac_rx_low_water);
9759 		tw32(BUFMGR_MB_HIGH_WATER,
9760 		     tp->bufmgr_config.mbuf_high_water);
9761 	} else {
9762 		tw32(BUFMGR_MB_RDMA_LOW_WATER,
9763 		     tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
9764 		tw32(BUFMGR_MB_MACRX_LOW_WATER,
9765 		     tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
9766 		tw32(BUFMGR_MB_HIGH_WATER,
9767 		     tp->bufmgr_config.mbuf_high_water_jumbo);
9768 	}
9769 	tw32(BUFMGR_DMA_LOW_WATER,
9770 	     tp->bufmgr_config.dma_low_water);
9771 	tw32(BUFMGR_DMA_HIGH_WATER,
9772 	     tp->bufmgr_config.dma_high_water);
9773 
9774 	val = BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE;
9775 	if (tg3_asic_rev(tp) == ASIC_REV_5719)
9776 		val |= BUFMGR_MODE_NO_TX_UNDERRUN;
9777 	if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
9778 	    tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
9779 	    tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0)
9780 		val |= BUFMGR_MODE_MBLOW_ATTN_ENAB;
9781 	tw32(BUFMGR_MODE, val);
9782 	for (i = 0; i < 2000; i++) {
9783 		if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
9784 			break;
9785 		udelay(10);
9786 	}
9787 	if (i >= 2000) {
9788 		netdev_err(tp->dev, "%s cannot enable BUFMGR\n", __func__);
9789 		return -ENODEV;
9790 	}
9791 
9792 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5906_A1)
9793 		tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
9794 
9795 	tg3_setup_rxbd_thresholds(tp);
9796 
9797 	/* Initialize TG3_BDINFO's at:
9798 	 *  RCVDBDI_STD_BD:	standard eth size rx ring
9799 	 *  RCVDBDI_JUMBO_BD:	jumbo frame rx ring
9800 	 *  RCVDBDI_MINI_BD:	small frame rx ring (??? does not work)
9801 	 *
9802 	 * like so:
9803 	 *  TG3_BDINFO_HOST_ADDR:	high/low parts of DMA address of ring
9804 	 *  TG3_BDINFO_MAXLEN_FLAGS:	(rx max buffer size << 16) |
9805 	 *                              ring attribute flags
9806 	 *  TG3_BDINFO_NIC_ADDR:	location of descriptors in nic SRAM
9807 	 *
9808 	 * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
9809 	 * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
9810 	 *
9811 	 * The size of each ring is fixed in the firmware, but the location is
9812 	 * configurable.
9813 	 */
9814 	tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
9815 	     ((u64) tpr->rx_std_mapping >> 32));
9816 	tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
9817 	     ((u64) tpr->rx_std_mapping & 0xffffffff));
9818 	if (!tg3_flag(tp, 5717_PLUS))
9819 		tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
9820 		     NIC_SRAM_RX_BUFFER_DESC);
9821 
9822 	/* Disable the mini ring */
9823 	if (!tg3_flag(tp, 5705_PLUS))
9824 		tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
9825 		     BDINFO_FLAGS_DISABLED);
9826 
9827 	/* Program the jumbo buffer descriptor ring control
9828 	 * blocks on those devices that have them.
9829 	 */
9830 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
9831 	    (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))) {
9832 
9833 		if (tg3_flag(tp, JUMBO_RING_ENABLE)) {
9834 			tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
9835 			     ((u64) tpr->rx_jmb_mapping >> 32));
9836 			tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
9837 			     ((u64) tpr->rx_jmb_mapping & 0xffffffff));
9838 			val = TG3_RX_JMB_RING_SIZE(tp) <<
9839 			      BDINFO_FLAGS_MAXLEN_SHIFT;
9840 			tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
9841 			     val | BDINFO_FLAGS_USE_EXT_RECV);
9842 			if (!tg3_flag(tp, USE_JUMBO_BDFLAG) ||
9843 			    tg3_flag(tp, 57765_CLASS) ||
9844 			    tg3_asic_rev(tp) == ASIC_REV_5762)
9845 				tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
9846 				     NIC_SRAM_RX_JUMBO_BUFFER_DESC);
9847 		} else {
9848 			tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
9849 			     BDINFO_FLAGS_DISABLED);
9850 		}
9851 
9852 		if (tg3_flag(tp, 57765_PLUS)) {
9853 			val = TG3_RX_STD_RING_SIZE(tp);
9854 			val <<= BDINFO_FLAGS_MAXLEN_SHIFT;
9855 			val |= (TG3_RX_STD_DMA_SZ << 2);
9856 		} else
9857 			val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT;
9858 	} else
9859 		val = TG3_RX_STD_MAX_SIZE_5700 << BDINFO_FLAGS_MAXLEN_SHIFT;
9860 
9861 	tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val);
9862 
9863 	tpr->rx_std_prod_idx = tp->rx_pending;
9864 	tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx);
9865 
9866 	tpr->rx_jmb_prod_idx =
9867 		tg3_flag(tp, JUMBO_RING_ENABLE) ? tp->rx_jumbo_pending : 0;
9868 	tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx);
9869 
9870 	tg3_rings_reset(tp);
9871 
9872 	/* Initialize MAC address and backoff seed. */
9873 	__tg3_set_mac_addr(tp, false);
9874 
9875 	/* MTU + ethernet header + FCS + optional VLAN tag */
9876 	tw32(MAC_RX_MTU_SIZE,
9877 	     tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
9878 
9879 	/* The slot time is changed by tg3_setup_phy if we
9880 	 * run at gigabit with half duplex.
9881 	 */
9882 	val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
9883 	      (6 << TX_LENGTHS_IPG_SHIFT) |
9884 	      (32 << TX_LENGTHS_SLOT_TIME_SHIFT);
9885 
9886 	if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
9887 	    tg3_asic_rev(tp) == ASIC_REV_5762)
9888 		val |= tr32(MAC_TX_LENGTHS) &
9889 		       (TX_LENGTHS_JMB_FRM_LEN_MSK |
9890 			TX_LENGTHS_CNT_DWN_VAL_MSK);
9891 
9892 	tw32(MAC_TX_LENGTHS, val);
9893 
9894 	/* Receive rules. */
9895 	tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
9896 	tw32(RCVLPC_CONFIG, 0x0181);
9897 
9898 	/* Calculate RDMAC_MODE setting early, we need it to determine
9899 	 * the RCVLPC_STATE_ENABLE mask.
9900 	 */
9901 	rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
9902 		      RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
9903 		      RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
9904 		      RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
9905 		      RDMAC_MODE_LNGREAD_ENAB);
9906 
9907 	if (tg3_asic_rev(tp) == ASIC_REV_5717)
9908 		rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS;
9909 
9910 	if (tg3_asic_rev(tp) == ASIC_REV_5784 ||
9911 	    tg3_asic_rev(tp) == ASIC_REV_5785 ||
9912 	    tg3_asic_rev(tp) == ASIC_REV_57780)
9913 		rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
9914 			      RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
9915 			      RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
9916 
9917 	if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
9918 	    tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
9919 		if (tg3_flag(tp, TSO_CAPABLE) &&
9920 		    tg3_asic_rev(tp) == ASIC_REV_5705) {
9921 			rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
9922 		} else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
9923 			   !tg3_flag(tp, IS_5788)) {
9924 			rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
9925 		}
9926 	}
9927 
9928 	if (tg3_flag(tp, PCI_EXPRESS))
9929 		rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
9930 
9931 	if (tg3_asic_rev(tp) == ASIC_REV_57766) {
9932 		tp->dma_limit = 0;
9933 		if (tp->dev->mtu <= ETH_DATA_LEN) {
9934 			rdmac_mode |= RDMAC_MODE_JMB_2K_MMRR;
9935 			tp->dma_limit = TG3_TX_BD_DMA_MAX_2K;
9936 		}
9937 	}
9938 
9939 	if (tg3_flag(tp, HW_TSO_1) ||
9940 	    tg3_flag(tp, HW_TSO_2) ||
9941 	    tg3_flag(tp, HW_TSO_3))
9942 		rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN;
9943 
9944 	if (tg3_flag(tp, 57765_PLUS) ||
9945 	    tg3_asic_rev(tp) == ASIC_REV_5785 ||
9946 	    tg3_asic_rev(tp) == ASIC_REV_57780)
9947 		rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;
9948 
9949 	if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
9950 	    tg3_asic_rev(tp) == ASIC_REV_5762)
9951 		rdmac_mode |= tr32(RDMAC_MODE) & RDMAC_MODE_H2BNC_VLAN_DET;
9952 
9953 	if (tg3_asic_rev(tp) == ASIC_REV_5761 ||
9954 	    tg3_asic_rev(tp) == ASIC_REV_5784 ||
9955 	    tg3_asic_rev(tp) == ASIC_REV_5785 ||
9956 	    tg3_asic_rev(tp) == ASIC_REV_57780 ||
9957 	    tg3_flag(tp, 57765_PLUS)) {
9958 		u32 tgtreg;
9959 
9960 		if (tg3_asic_rev(tp) == ASIC_REV_5762)
9961 			tgtreg = TG3_RDMA_RSRVCTRL_REG2;
9962 		else
9963 			tgtreg = TG3_RDMA_RSRVCTRL_REG;
9964 
9965 		val = tr32(tgtreg);
9966 		if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
9967 		    tg3_asic_rev(tp) == ASIC_REV_5762) {
9968 			val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK |
9969 				 TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK |
9970 				 TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK);
9971 			val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B |
9972 			       TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
9973 			       TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K;
9974 		}
9975 		tw32(tgtreg, val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
9976 	}
9977 
9978 	if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
9979 	    tg3_asic_rev(tp) == ASIC_REV_5720 ||
9980 	    tg3_asic_rev(tp) == ASIC_REV_5762) {
9981 		u32 tgtreg;
9982 
9983 		if (tg3_asic_rev(tp) == ASIC_REV_5762)
9984 			tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL2;
9985 		else
9986 			tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL;
9987 
9988 		val = tr32(tgtreg);
9989 		tw32(tgtreg, val |
9990 		     TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K |
9991 		     TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K);
9992 	}
9993 
9994 	/* Receive/send statistics. */
9995 	if (tg3_flag(tp, 5750_PLUS)) {
9996 		val = tr32(RCVLPC_STATS_ENABLE);
9997 		val &= ~RCVLPC_STATSENAB_DACK_FIX;
9998 		tw32(RCVLPC_STATS_ENABLE, val);
9999 	} else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
10000 		   tg3_flag(tp, TSO_CAPABLE)) {
10001 		val = tr32(RCVLPC_STATS_ENABLE);
10002 		val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
10003 		tw32(RCVLPC_STATS_ENABLE, val);
10004 	} else {
10005 		tw32(RCVLPC_STATS_ENABLE, 0xffffff);
10006 	}
10007 	tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
10008 	tw32(SNDDATAI_STATSENAB, 0xffffff);
10009 	tw32(SNDDATAI_STATSCTRL,
10010 	     (SNDDATAI_SCTRL_ENABLE |
10011 	      SNDDATAI_SCTRL_FASTUPD));
10012 
10013 	/* Setup host coalescing engine. */
10014 	tw32(HOSTCC_MODE, 0);
10015 	for (i = 0; i < 2000; i++) {
10016 		if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
10017 			break;
10018 		udelay(10);
10019 	}
10020 
10021 	__tg3_set_coalesce(tp, &tp->coal);
10022 
10023 	if (!tg3_flag(tp, 5705_PLUS)) {
10024 		/* Status/statistics block address.  See tg3_timer,
10025 		 * the tg3_periodic_fetch_stats call there, and
10026 		 * tg3_get_stats to see how this works for 5705/5750 chips.
10027 		 */
10028 		tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
10029 		     ((u64) tp->stats_mapping >> 32));
10030 		tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
10031 		     ((u64) tp->stats_mapping & 0xffffffff));
10032 		tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
10033 
10034 		tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
10035 
10036 		/* Clear statistics and status block memory areas */
10037 		for (i = NIC_SRAM_STATS_BLK;
10038 		     i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
10039 		     i += sizeof(u32)) {
10040 			tg3_write_mem(tp, i, 0);
10041 			udelay(40);
10042 		}
10043 	}
10044 
10045 	tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
10046 
10047 	tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
10048 	tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
10049 	if (!tg3_flag(tp, 5705_PLUS))
10050 		tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
10051 
10052 	if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
10053 		tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
10054 		/* reset to prevent losing 1st rx packet intermittently */
10055 		tw32_f(MAC_RX_MODE, RX_MODE_RESET);
10056 		udelay(10);
10057 	}
10058 
10059 	tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
10060 			MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE |
10061 			MAC_MODE_FHDE_ENABLE;
10062 	if (tg3_flag(tp, ENABLE_APE))
10063 		tp->mac_mode |= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
10064 	if (!tg3_flag(tp, 5705_PLUS) &&
10065 	    !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
10066 	    tg3_asic_rev(tp) != ASIC_REV_5700)
10067 		tp->mac_mode |= MAC_MODE_LINK_POLARITY;
10068 	tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
10069 	udelay(40);
10070 
10071 	/* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
10072 	 * If TG3_FLAG_IS_NIC is zero, we should read the
10073 	 * register to preserve the GPIO settings for LOMs. The GPIOs,
10074 	 * whether used as inputs or outputs, are set by boot code after
10075 	 * reset.
10076 	 */
10077 	if (!tg3_flag(tp, IS_NIC)) {
10078 		u32 gpio_mask;
10079 
10080 		gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
10081 			    GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
10082 			    GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
10083 
10084 		if (tg3_asic_rev(tp) == ASIC_REV_5752)
10085 			gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
10086 				     GRC_LCLCTRL_GPIO_OUTPUT3;
10087 
10088 		if (tg3_asic_rev(tp) == ASIC_REV_5755)
10089 			gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
10090 
10091 		tp->grc_local_ctrl &= ~gpio_mask;
10092 		tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
10093 
10094 		/* GPIO1 must be driven high for eeprom write protect */
10095 		if (tg3_flag(tp, EEPROM_WRITE_PROT))
10096 			tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
10097 					       GRC_LCLCTRL_GPIO_OUTPUT1);
10098 	}
10099 	tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
10100 	udelay(100);
10101 
10102 	if (tg3_flag(tp, USING_MSIX)) {
10103 		val = tr32(MSGINT_MODE);
10104 		val |= MSGINT_MODE_ENABLE;
10105 		if (tp->irq_cnt > 1)
10106 			val |= MSGINT_MODE_MULTIVEC_EN;
10107 		if (!tg3_flag(tp, 1SHOT_MSI))
10108 			val |= MSGINT_MODE_ONE_SHOT_DISABLE;
10109 		tw32(MSGINT_MODE, val);
10110 	}
10111 
10112 	if (!tg3_flag(tp, 5705_PLUS)) {
10113 		tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
10114 		udelay(40);
10115 	}
10116 
10117 	val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
10118 	       WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
10119 	       WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
10120 	       WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
10121 	       WDMAC_MODE_LNGREAD_ENAB);
10122 
10123 	if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
10124 	    tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
10125 		if (tg3_flag(tp, TSO_CAPABLE) &&
10126 		    (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 ||
10127 		     tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A2)) {
10128 			/* nothing */
10129 		} else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
10130 			   !tg3_flag(tp, IS_5788)) {
10131 			val |= WDMAC_MODE_RX_ACCEL;
10132 		}
10133 	}
10134 
10135 	/* Enable host coalescing bug fix */
10136 	if (tg3_flag(tp, 5755_PLUS))
10137 		val |= WDMAC_MODE_STATUS_TAG_FIX;
10138 
10139 	if (tg3_asic_rev(tp) == ASIC_REV_5785)
10140 		val |= WDMAC_MODE_BURST_ALL_DATA;
10141 
10142 	tw32_f(WDMAC_MODE, val);
10143 	udelay(40);
10144 
10145 	if (tg3_flag(tp, PCIX_MODE)) {
10146 		u16 pcix_cmd;
10147 
10148 		pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
10149 				     &pcix_cmd);
10150 		if (tg3_asic_rev(tp) == ASIC_REV_5703) {
10151 			pcix_cmd &= ~PCI_X_CMD_MAX_READ;
10152 			pcix_cmd |= PCI_X_CMD_READ_2K;
10153 		} else if (tg3_asic_rev(tp) == ASIC_REV_5704) {
10154 			pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
10155 			pcix_cmd |= PCI_X_CMD_READ_2K;
10156 		}
10157 		pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
10158 				      pcix_cmd);
10159 	}
10160 
10161 	tw32_f(RDMAC_MODE, rdmac_mode);
10162 	udelay(40);
10163 
10164 	if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
10165 	    tg3_asic_rev(tp) == ASIC_REV_5720) {
10166 		for (i = 0; i < TG3_NUM_RDMA_CHANNELS; i++) {
10167 			if (tr32(TG3_RDMA_LENGTH + (i << 2)) > TG3_MAX_MTU(tp))
10168 				break;
10169 		}
10170 		if (i < TG3_NUM_RDMA_CHANNELS) {
10171 			val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
10172 			val |= tg3_lso_rd_dma_workaround_bit(tp);
10173 			tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
10174 			tg3_flag_set(tp, 5719_5720_RDMA_BUG);
10175 		}
10176 	}
10177 
10178 	tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
10179 	if (!tg3_flag(tp, 5705_PLUS))
10180 		tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
10181 
10182 	if (tg3_asic_rev(tp) == ASIC_REV_5761)
10183 		tw32(SNDDATAC_MODE,
10184 		     SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
10185 	else
10186 		tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
10187 
10188 	tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
10189 	tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
10190 	val = RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ;
10191 	if (tg3_flag(tp, LRG_PROD_RING_CAP))
10192 		val |= RCVDBDI_MODE_LRG_RING_SZ;
10193 	tw32(RCVDBDI_MODE, val);
10194 	tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
10195 	if (tg3_flag(tp, HW_TSO_1) ||
10196 	    tg3_flag(tp, HW_TSO_2) ||
10197 	    tg3_flag(tp, HW_TSO_3))
10198 		tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
10199 	val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE;
10200 	if (tg3_flag(tp, ENABLE_TSS))
10201 		val |= SNDBDI_MODE_MULTI_TXQ_EN;
10202 	tw32(SNDBDI_MODE, val);
10203 	tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
10204 
10205 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) {
10206 		err = tg3_load_5701_a0_firmware_fix(tp);
10207 		if (err)
10208 			return err;
10209 	}
10210 
10211 	if (tg3_asic_rev(tp) == ASIC_REV_57766) {
10212 		/* Ignore any errors for the firmware download. If download
10213 		 * fails, the device will operate with EEE disabled
10214 		 */
10215 		tg3_load_57766_firmware(tp);
10216 	}
10217 
10218 	if (tg3_flag(tp, TSO_CAPABLE)) {
10219 		err = tg3_load_tso_firmware(tp);
10220 		if (err)
10221 			return err;
10222 	}
10223 
10224 	tp->tx_mode = TX_MODE_ENABLE;
10225 
10226 	if (tg3_flag(tp, 5755_PLUS) ||
10227 	    tg3_asic_rev(tp) == ASIC_REV_5906)
10228 		tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX;
10229 
10230 	if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
10231 	    tg3_asic_rev(tp) == ASIC_REV_5762) {
10232 		val = TX_MODE_JMB_FRM_LEN | TX_MODE_CNT_DN_MODE;
10233 		tp->tx_mode &= ~val;
10234 		tp->tx_mode |= tr32(MAC_TX_MODE) & val;
10235 	}
10236 
10237 	tw32_f(MAC_TX_MODE, tp->tx_mode);
10238 	udelay(100);
10239 
10240 	if (tg3_flag(tp, ENABLE_RSS)) {
10241 		tg3_rss_write_indir_tbl(tp);
10242 
10243 		/* Setup the "secret" hash key. */
10244 		tw32(MAC_RSS_HASH_KEY_0, 0x5f865437);
10245 		tw32(MAC_RSS_HASH_KEY_1, 0xe4ac62cc);
10246 		tw32(MAC_RSS_HASH_KEY_2, 0x50103a45);
10247 		tw32(MAC_RSS_HASH_KEY_3, 0x36621985);
10248 		tw32(MAC_RSS_HASH_KEY_4, 0xbf14c0e8);
10249 		tw32(MAC_RSS_HASH_KEY_5, 0x1bc27a1e);
10250 		tw32(MAC_RSS_HASH_KEY_6, 0x84f4b556);
10251 		tw32(MAC_RSS_HASH_KEY_7, 0x094ea6fe);
10252 		tw32(MAC_RSS_HASH_KEY_8, 0x7dda01e7);
10253 		tw32(MAC_RSS_HASH_KEY_9, 0xc04d7481);
10254 	}
10255 
10256 	tp->rx_mode = RX_MODE_ENABLE;
10257 	if (tg3_flag(tp, 5755_PLUS))
10258 		tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
10259 
10260 	if (tg3_flag(tp, ENABLE_RSS))
10261 		tp->rx_mode |= RX_MODE_RSS_ENABLE |
10262 			       RX_MODE_RSS_ITBL_HASH_BITS_7 |
10263 			       RX_MODE_RSS_IPV6_HASH_EN |
10264 			       RX_MODE_RSS_TCP_IPV6_HASH_EN |
10265 			       RX_MODE_RSS_IPV4_HASH_EN |
10266 			       RX_MODE_RSS_TCP_IPV4_HASH_EN;
10267 
10268 	tw32_f(MAC_RX_MODE, tp->rx_mode);
10269 	udelay(10);
10270 
10271 	tw32(MAC_LED_CTRL, tp->led_ctrl);
10272 
10273 	tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
10274 	if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
10275 		tw32_f(MAC_RX_MODE, RX_MODE_RESET);
10276 		udelay(10);
10277 	}
10278 	tw32_f(MAC_RX_MODE, tp->rx_mode);
10279 	udelay(10);
10280 
10281 	if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
10282 		if ((tg3_asic_rev(tp) == ASIC_REV_5704) &&
10283 		    !(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) {
10284 			/* Set drive transmission level to 1.2V  */
10285 			/* only if the signal pre-emphasis bit is not set  */
10286 			val = tr32(MAC_SERDES_CFG);
10287 			val &= 0xfffff000;
10288 			val |= 0x880;
10289 			tw32(MAC_SERDES_CFG, val);
10290 		}
10291 		if (tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A1)
10292 			tw32(MAC_SERDES_CFG, 0x616000);
10293 	}
10294 
10295 	/* Prevent chip from dropping frames when flow control
10296 	 * is enabled.
10297 	 */
10298 	if (tg3_flag(tp, 57765_CLASS))
10299 		val = 1;
10300 	else
10301 		val = 2;
10302 	tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val);
10303 
10304 	if (tg3_asic_rev(tp) == ASIC_REV_5704 &&
10305 	    (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
10306 		/* Use hardware link auto-negotiation */
10307 		tg3_flag_set(tp, HW_AUTONEG);
10308 	}
10309 
10310 	if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
10311 	    tg3_asic_rev(tp) == ASIC_REV_5714) {
10312 		u32 tmp;
10313 
10314 		tmp = tr32(SERDES_RX_CTRL);
10315 		tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
10316 		tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
10317 		tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
10318 		tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
10319 	}
10320 
10321 	if (!tg3_flag(tp, USE_PHYLIB)) {
10322 		if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10323 			tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
10324 
10325 		err = tg3_setup_phy(tp, false);
10326 		if (err)
10327 			return err;
10328 
10329 		if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
10330 		    !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
10331 			u32 tmp;
10332 
10333 			/* Clear CRC stats. */
10334 			if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
10335 				tg3_writephy(tp, MII_TG3_TEST1,
10336 					     tmp | MII_TG3_TEST1_CRC_EN);
10337 				tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &tmp);
10338 			}
10339 		}
10340 	}
10341 
10342 	__tg3_set_rx_mode(tp->dev);
10343 
10344 	/* Initialize receive rules. */
10345 	tw32(MAC_RCV_RULE_0,  0xc2000000 & RCV_RULE_DISABLE_MASK);
10346 	tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
10347 	tw32(MAC_RCV_RULE_1,  0x86000004 & RCV_RULE_DISABLE_MASK);
10348 	tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
10349 
10350 	if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS))
10351 		limit = 8;
10352 	else
10353 		limit = 16;
10354 	if (tg3_flag(tp, ENABLE_ASF))
10355 		limit -= 4;
10356 	switch (limit) {
10357 	case 16:
10358 		tw32(MAC_RCV_RULE_15,  0); tw32(MAC_RCV_VALUE_15,  0);
10359 	case 15:
10360 		tw32(MAC_RCV_RULE_14,  0); tw32(MAC_RCV_VALUE_14,  0);
10361 	case 14:
10362 		tw32(MAC_RCV_RULE_13,  0); tw32(MAC_RCV_VALUE_13,  0);
10363 	case 13:
10364 		tw32(MAC_RCV_RULE_12,  0); tw32(MAC_RCV_VALUE_12,  0);
10365 	case 12:
10366 		tw32(MAC_RCV_RULE_11,  0); tw32(MAC_RCV_VALUE_11,  0);
10367 	case 11:
10368 		tw32(MAC_RCV_RULE_10,  0); tw32(MAC_RCV_VALUE_10,  0);
10369 	case 10:
10370 		tw32(MAC_RCV_RULE_9,  0); tw32(MAC_RCV_VALUE_9,  0);
10371 	case 9:
10372 		tw32(MAC_RCV_RULE_8,  0); tw32(MAC_RCV_VALUE_8,  0);
10373 	case 8:
10374 		tw32(MAC_RCV_RULE_7,  0); tw32(MAC_RCV_VALUE_7,  0);
10375 	case 7:
10376 		tw32(MAC_RCV_RULE_6,  0); tw32(MAC_RCV_VALUE_6,  0);
10377 	case 6:
10378 		tw32(MAC_RCV_RULE_5,  0); tw32(MAC_RCV_VALUE_5,  0);
10379 	case 5:
10380 		tw32(MAC_RCV_RULE_4,  0); tw32(MAC_RCV_VALUE_4,  0);
10381 	case 4:
10382 		/* tw32(MAC_RCV_RULE_3,  0); tw32(MAC_RCV_VALUE_3,  0); */
10383 	case 3:
10384 		/* tw32(MAC_RCV_RULE_2,  0); tw32(MAC_RCV_VALUE_2,  0); */
10385 	case 2:
10386 	case 1:
10387 
10388 	default:
10389 		break;
10390 	}
10391 
10392 	if (tg3_flag(tp, ENABLE_APE))
10393 		/* Write our heartbeat update interval to APE. */
10394 		tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
10395 				APE_HOST_HEARTBEAT_INT_DISABLE);
10396 
10397 	tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
10398 
10399 	return 0;
10400 }
10401 
10402 /* Called at device open time to get the chip ready for
10403  * packet processing.  Invoked with tp->lock held.
10404  */
10405 static int tg3_init_hw(struct tg3 *tp, bool reset_phy)
10406 {
10407 	tg3_switch_clocks(tp);
10408 
10409 	tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
10410 
10411 	return tg3_reset_hw(tp, reset_phy);
10412 }
10413 
10414 static void tg3_sd_scan_scratchpad(struct tg3 *tp, struct tg3_ocir *ocir)
10415 {
10416 	int i;
10417 
10418 	for (i = 0; i < TG3_SD_NUM_RECS; i++, ocir++) {
10419 		u32 off = i * TG3_OCIR_LEN, len = TG3_OCIR_LEN;
10420 
10421 		tg3_ape_scratchpad_read(tp, (u32 *) ocir, off, len);
10422 		off += len;
10423 
10424 		if (ocir->signature != TG3_OCIR_SIG_MAGIC ||
10425 		    !(ocir->version_flags & TG3_OCIR_FLAG_ACTIVE))
10426 			memset(ocir, 0, TG3_OCIR_LEN);
10427 	}
10428 }
10429 
10430 /* sysfs attributes for hwmon */
10431 static ssize_t tg3_show_temp(struct device *dev,
10432 			     struct device_attribute *devattr, char *buf)
10433 {
10434 	struct pci_dev *pdev = to_pci_dev(dev);
10435 	struct net_device *netdev = pci_get_drvdata(pdev);
10436 	struct tg3 *tp = netdev_priv(netdev);
10437 	struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
10438 	u32 temperature;
10439 
10440 	spin_lock_bh(&tp->lock);
10441 	tg3_ape_scratchpad_read(tp, &temperature, attr->index,
10442 				sizeof(temperature));
10443 	spin_unlock_bh(&tp->lock);
10444 	return sprintf(buf, "%u\n", temperature);
10445 }
10446 
10447 
10448 static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, tg3_show_temp, NULL,
10449 			  TG3_TEMP_SENSOR_OFFSET);
10450 static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, tg3_show_temp, NULL,
10451 			  TG3_TEMP_CAUTION_OFFSET);
10452 static SENSOR_DEVICE_ATTR(temp1_max, S_IRUGO, tg3_show_temp, NULL,
10453 			  TG3_TEMP_MAX_OFFSET);
10454 
10455 static struct attribute *tg3_attributes[] = {
10456 	&sensor_dev_attr_temp1_input.dev_attr.attr,
10457 	&sensor_dev_attr_temp1_crit.dev_attr.attr,
10458 	&sensor_dev_attr_temp1_max.dev_attr.attr,
10459 	NULL
10460 };
10461 
10462 static const struct attribute_group tg3_group = {
10463 	.attrs = tg3_attributes,
10464 };
10465 
10466 static void tg3_hwmon_close(struct tg3 *tp)
10467 {
10468 	if (tp->hwmon_dev) {
10469 		hwmon_device_unregister(tp->hwmon_dev);
10470 		tp->hwmon_dev = NULL;
10471 		sysfs_remove_group(&tp->pdev->dev.kobj, &tg3_group);
10472 	}
10473 }
10474 
10475 static void tg3_hwmon_open(struct tg3 *tp)
10476 {
10477 	int i, err;
10478 	u32 size = 0;
10479 	struct pci_dev *pdev = tp->pdev;
10480 	struct tg3_ocir ocirs[TG3_SD_NUM_RECS];
10481 
10482 	tg3_sd_scan_scratchpad(tp, ocirs);
10483 
10484 	for (i = 0; i < TG3_SD_NUM_RECS; i++) {
10485 		if (!ocirs[i].src_data_length)
10486 			continue;
10487 
10488 		size += ocirs[i].src_hdr_length;
10489 		size += ocirs[i].src_data_length;
10490 	}
10491 
10492 	if (!size)
10493 		return;
10494 
10495 	/* Register hwmon sysfs hooks */
10496 	err = sysfs_create_group(&pdev->dev.kobj, &tg3_group);
10497 	if (err) {
10498 		dev_err(&pdev->dev, "Cannot create sysfs group, aborting\n");
10499 		return;
10500 	}
10501 
10502 	tp->hwmon_dev = hwmon_device_register(&pdev->dev);
10503 	if (IS_ERR(tp->hwmon_dev)) {
10504 		tp->hwmon_dev = NULL;
10505 		dev_err(&pdev->dev, "Cannot register hwmon device, aborting\n");
10506 		sysfs_remove_group(&pdev->dev.kobj, &tg3_group);
10507 	}
10508 }
10509 
10510 
10511 #define TG3_STAT_ADD32(PSTAT, REG) \
10512 do {	u32 __val = tr32(REG); \
10513 	(PSTAT)->low += __val; \
10514 	if ((PSTAT)->low < __val) \
10515 		(PSTAT)->high += 1; \
10516 } while (0)
10517 
10518 static void tg3_periodic_fetch_stats(struct tg3 *tp)
10519 {
10520 	struct tg3_hw_stats *sp = tp->hw_stats;
10521 
10522 	if (!tp->link_up)
10523 		return;
10524 
10525 	TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
10526 	TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
10527 	TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
10528 	TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
10529 	TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
10530 	TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
10531 	TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
10532 	TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
10533 	TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
10534 	TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
10535 	TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
10536 	TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
10537 	TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
10538 	if (unlikely(tg3_flag(tp, 5719_5720_RDMA_BUG) &&
10539 		     (sp->tx_ucast_packets.low + sp->tx_mcast_packets.low +
10540 		      sp->tx_bcast_packets.low) > TG3_NUM_RDMA_CHANNELS)) {
10541 		u32 val;
10542 
10543 		val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
10544 		val &= ~tg3_lso_rd_dma_workaround_bit(tp);
10545 		tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
10546 		tg3_flag_clear(tp, 5719_5720_RDMA_BUG);
10547 	}
10548 
10549 	TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
10550 	TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
10551 	TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
10552 	TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
10553 	TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
10554 	TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
10555 	TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
10556 	TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
10557 	TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
10558 	TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
10559 	TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
10560 	TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
10561 	TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
10562 	TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
10563 
10564 	TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
10565 	if (tg3_asic_rev(tp) != ASIC_REV_5717 &&
10566 	    tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0 &&
10567 	    tg3_chip_rev_id(tp) != CHIPREV_ID_5720_A0) {
10568 		TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
10569 	} else {
10570 		u32 val = tr32(HOSTCC_FLOW_ATTN);
10571 		val = (val & HOSTCC_FLOW_ATTN_MBUF_LWM) ? 1 : 0;
10572 		if (val) {
10573 			tw32(HOSTCC_FLOW_ATTN, HOSTCC_FLOW_ATTN_MBUF_LWM);
10574 			sp->rx_discards.low += val;
10575 			if (sp->rx_discards.low < val)
10576 				sp->rx_discards.high += 1;
10577 		}
10578 		sp->mbuf_lwm_thresh_hit = sp->rx_discards;
10579 	}
10580 	TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
10581 }
10582 
10583 static void tg3_chk_missed_msi(struct tg3 *tp)
10584 {
10585 	u32 i;
10586 
10587 	for (i = 0; i < tp->irq_cnt; i++) {
10588 		struct tg3_napi *tnapi = &tp->napi[i];
10589 
10590 		if (tg3_has_work(tnapi)) {
10591 			if (tnapi->last_rx_cons == tnapi->rx_rcb_ptr &&
10592 			    tnapi->last_tx_cons == tnapi->tx_cons) {
10593 				if (tnapi->chk_msi_cnt < 1) {
10594 					tnapi->chk_msi_cnt++;
10595 					return;
10596 				}
10597 				tg3_msi(0, tnapi);
10598 			}
10599 		}
10600 		tnapi->chk_msi_cnt = 0;
10601 		tnapi->last_rx_cons = tnapi->rx_rcb_ptr;
10602 		tnapi->last_tx_cons = tnapi->tx_cons;
10603 	}
10604 }
10605 
10606 static void tg3_timer(unsigned long __opaque)
10607 {
10608 	struct tg3 *tp = (struct tg3 *) __opaque;
10609 
10610 	if (tp->irq_sync || tg3_flag(tp, RESET_TASK_PENDING))
10611 		goto restart_timer;
10612 
10613 	spin_lock(&tp->lock);
10614 
10615 	if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
10616 	    tg3_flag(tp, 57765_CLASS))
10617 		tg3_chk_missed_msi(tp);
10618 
10619 	if (tg3_flag(tp, FLUSH_POSTED_WRITES)) {
10620 		/* BCM4785: Flush posted writes from GbE to host memory. */
10621 		tr32(HOSTCC_MODE);
10622 	}
10623 
10624 	if (!tg3_flag(tp, TAGGED_STATUS)) {
10625 		/* All of this garbage is because when using non-tagged
10626 		 * IRQ status the mailbox/status_block protocol the chip
10627 		 * uses with the cpu is race prone.
10628 		 */
10629 		if (tp->napi[0].hw_status->status & SD_STATUS_UPDATED) {
10630 			tw32(GRC_LOCAL_CTRL,
10631 			     tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
10632 		} else {
10633 			tw32(HOSTCC_MODE, tp->coalesce_mode |
10634 			     HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW);
10635 		}
10636 
10637 		if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
10638 			spin_unlock(&tp->lock);
10639 			tg3_reset_task_schedule(tp);
10640 			goto restart_timer;
10641 		}
10642 	}
10643 
10644 	/* This part only runs once per second. */
10645 	if (!--tp->timer_counter) {
10646 		if (tg3_flag(tp, 5705_PLUS))
10647 			tg3_periodic_fetch_stats(tp);
10648 
10649 		if (tp->setlpicnt && !--tp->setlpicnt)
10650 			tg3_phy_eee_enable(tp);
10651 
10652 		if (tg3_flag(tp, USE_LINKCHG_REG)) {
10653 			u32 mac_stat;
10654 			int phy_event;
10655 
10656 			mac_stat = tr32(MAC_STATUS);
10657 
10658 			phy_event = 0;
10659 			if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) {
10660 				if (mac_stat & MAC_STATUS_MI_INTERRUPT)
10661 					phy_event = 1;
10662 			} else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
10663 				phy_event = 1;
10664 
10665 			if (phy_event)
10666 				tg3_setup_phy(tp, false);
10667 		} else if (tg3_flag(tp, POLL_SERDES)) {
10668 			u32 mac_stat = tr32(MAC_STATUS);
10669 			int need_setup = 0;
10670 
10671 			if (tp->link_up &&
10672 			    (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
10673 				need_setup = 1;
10674 			}
10675 			if (!tp->link_up &&
10676 			    (mac_stat & (MAC_STATUS_PCS_SYNCED |
10677 					 MAC_STATUS_SIGNAL_DET))) {
10678 				need_setup = 1;
10679 			}
10680 			if (need_setup) {
10681 				if (!tp->serdes_counter) {
10682 					tw32_f(MAC_MODE,
10683 					     (tp->mac_mode &
10684 					      ~MAC_MODE_PORT_MODE_MASK));
10685 					udelay(40);
10686 					tw32_f(MAC_MODE, tp->mac_mode);
10687 					udelay(40);
10688 				}
10689 				tg3_setup_phy(tp, false);
10690 			}
10691 		} else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
10692 			   tg3_flag(tp, 5780_CLASS)) {
10693 			tg3_serdes_parallel_detect(tp);
10694 		}
10695 
10696 		tp->timer_counter = tp->timer_multiplier;
10697 	}
10698 
10699 	/* Heartbeat is only sent once every 2 seconds.
10700 	 *
10701 	 * The heartbeat is to tell the ASF firmware that the host
10702 	 * driver is still alive.  In the event that the OS crashes,
10703 	 * ASF needs to reset the hardware to free up the FIFO space
10704 	 * that may be filled with rx packets destined for the host.
10705 	 * If the FIFO is full, ASF will no longer function properly.
10706 	 *
10707 	 * Unintended resets have been reported on real time kernels
10708 	 * where the timer doesn't run on time.  Netpoll will also have
10709 	 * same problem.
10710 	 *
10711 	 * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
10712 	 * to check the ring condition when the heartbeat is expiring
10713 	 * before doing the reset.  This will prevent most unintended
10714 	 * resets.
10715 	 */
10716 	if (!--tp->asf_counter) {
10717 		if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
10718 			tg3_wait_for_event_ack(tp);
10719 
10720 			tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
10721 				      FWCMD_NICDRV_ALIVE3);
10722 			tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
10723 			tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX,
10724 				      TG3_FW_UPDATE_TIMEOUT_SEC);
10725 
10726 			tg3_generate_fw_event(tp);
10727 		}
10728 		tp->asf_counter = tp->asf_multiplier;
10729 	}
10730 
10731 	spin_unlock(&tp->lock);
10732 
10733 restart_timer:
10734 	tp->timer.expires = jiffies + tp->timer_offset;
10735 	add_timer(&tp->timer);
10736 }
10737 
10738 static void tg3_timer_init(struct tg3 *tp)
10739 {
10740 	if (tg3_flag(tp, TAGGED_STATUS) &&
10741 	    tg3_asic_rev(tp) != ASIC_REV_5717 &&
10742 	    !tg3_flag(tp, 57765_CLASS))
10743 		tp->timer_offset = HZ;
10744 	else
10745 		tp->timer_offset = HZ / 10;
10746 
10747 	BUG_ON(tp->timer_offset > HZ);
10748 
10749 	tp->timer_multiplier = (HZ / tp->timer_offset);
10750 	tp->asf_multiplier = (HZ / tp->timer_offset) *
10751 			     TG3_FW_UPDATE_FREQ_SEC;
10752 
10753 	init_timer(&tp->timer);
10754 	tp->timer.data = (unsigned long) tp;
10755 	tp->timer.function = tg3_timer;
10756 }
10757 
10758 static void tg3_timer_start(struct tg3 *tp)
10759 {
10760 	tp->asf_counter   = tp->asf_multiplier;
10761 	tp->timer_counter = tp->timer_multiplier;
10762 
10763 	tp->timer.expires = jiffies + tp->timer_offset;
10764 	add_timer(&tp->timer);
10765 }
10766 
10767 static void tg3_timer_stop(struct tg3 *tp)
10768 {
10769 	del_timer_sync(&tp->timer);
10770 }
10771 
10772 /* Restart hardware after configuration changes, self-test, etc.
10773  * Invoked with tp->lock held.
10774  */
10775 static int tg3_restart_hw(struct tg3 *tp, bool reset_phy)
10776 	__releases(tp->lock)
10777 	__acquires(tp->lock)
10778 {
10779 	int err;
10780 
10781 	err = tg3_init_hw(tp, reset_phy);
10782 	if (err) {
10783 		netdev_err(tp->dev,
10784 			   "Failed to re-initialize device, aborting\n");
10785 		tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10786 		tg3_full_unlock(tp);
10787 		tg3_timer_stop(tp);
10788 		tp->irq_sync = 0;
10789 		tg3_napi_enable(tp);
10790 		dev_close(tp->dev);
10791 		tg3_full_lock(tp, 0);
10792 	}
10793 	return err;
10794 }
10795 
10796 static void tg3_reset_task(struct work_struct *work)
10797 {
10798 	struct tg3 *tp = container_of(work, struct tg3, reset_task);
10799 	int err;
10800 
10801 	tg3_full_lock(tp, 0);
10802 
10803 	if (!netif_running(tp->dev)) {
10804 		tg3_flag_clear(tp, RESET_TASK_PENDING);
10805 		tg3_full_unlock(tp);
10806 		return;
10807 	}
10808 
10809 	tg3_full_unlock(tp);
10810 
10811 	tg3_phy_stop(tp);
10812 
10813 	tg3_netif_stop(tp);
10814 
10815 	tg3_full_lock(tp, 1);
10816 
10817 	if (tg3_flag(tp, TX_RECOVERY_PENDING)) {
10818 		tp->write32_tx_mbox = tg3_write32_tx_mbox;
10819 		tp->write32_rx_mbox = tg3_write_flush_reg32;
10820 		tg3_flag_set(tp, MBOX_WRITE_REORDER);
10821 		tg3_flag_clear(tp, TX_RECOVERY_PENDING);
10822 	}
10823 
10824 	tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
10825 	err = tg3_init_hw(tp, true);
10826 	if (err)
10827 		goto out;
10828 
10829 	tg3_netif_start(tp);
10830 
10831 out:
10832 	tg3_full_unlock(tp);
10833 
10834 	if (!err)
10835 		tg3_phy_start(tp);
10836 
10837 	tg3_flag_clear(tp, RESET_TASK_PENDING);
10838 }
10839 
10840 static int tg3_request_irq(struct tg3 *tp, int irq_num)
10841 {
10842 	irq_handler_t fn;
10843 	unsigned long flags;
10844 	char *name;
10845 	struct tg3_napi *tnapi = &tp->napi[irq_num];
10846 
10847 	if (tp->irq_cnt == 1)
10848 		name = tp->dev->name;
10849 	else {
10850 		name = &tnapi->irq_lbl[0];
10851 		snprintf(name, IFNAMSIZ, "%s-%d", tp->dev->name, irq_num);
10852 		name[IFNAMSIZ-1] = 0;
10853 	}
10854 
10855 	if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
10856 		fn = tg3_msi;
10857 		if (tg3_flag(tp, 1SHOT_MSI))
10858 			fn = tg3_msi_1shot;
10859 		flags = 0;
10860 	} else {
10861 		fn = tg3_interrupt;
10862 		if (tg3_flag(tp, TAGGED_STATUS))
10863 			fn = tg3_interrupt_tagged;
10864 		flags = IRQF_SHARED;
10865 	}
10866 
10867 	return request_irq(tnapi->irq_vec, fn, flags, name, tnapi);
10868 }
10869 
10870 static int tg3_test_interrupt(struct tg3 *tp)
10871 {
10872 	struct tg3_napi *tnapi = &tp->napi[0];
10873 	struct net_device *dev = tp->dev;
10874 	int err, i, intr_ok = 0;
10875 	u32 val;
10876 
10877 	if (!netif_running(dev))
10878 		return -ENODEV;
10879 
10880 	tg3_disable_ints(tp);
10881 
10882 	free_irq(tnapi->irq_vec, tnapi);
10883 
10884 	/*
10885 	 * Turn off MSI one shot mode.  Otherwise this test has no
10886 	 * observable way to know whether the interrupt was delivered.
10887 	 */
10888 	if (tg3_flag(tp, 57765_PLUS)) {
10889 		val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE;
10890 		tw32(MSGINT_MODE, val);
10891 	}
10892 
10893 	err = request_irq(tnapi->irq_vec, tg3_test_isr,
10894 			  IRQF_SHARED, dev->name, tnapi);
10895 	if (err)
10896 		return err;
10897 
10898 	tnapi->hw_status->status &= ~SD_STATUS_UPDATED;
10899 	tg3_enable_ints(tp);
10900 
10901 	tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
10902 	       tnapi->coal_now);
10903 
10904 	for (i = 0; i < 5; i++) {
10905 		u32 int_mbox, misc_host_ctrl;
10906 
10907 		int_mbox = tr32_mailbox(tnapi->int_mbox);
10908 		misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
10909 
10910 		if ((int_mbox != 0) ||
10911 		    (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
10912 			intr_ok = 1;
10913 			break;
10914 		}
10915 
10916 		if (tg3_flag(tp, 57765_PLUS) &&
10917 		    tnapi->hw_status->status_tag != tnapi->last_tag)
10918 			tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
10919 
10920 		msleep(10);
10921 	}
10922 
10923 	tg3_disable_ints(tp);
10924 
10925 	free_irq(tnapi->irq_vec, tnapi);
10926 
10927 	err = tg3_request_irq(tp, 0);
10928 
10929 	if (err)
10930 		return err;
10931 
10932 	if (intr_ok) {
10933 		/* Reenable MSI one shot mode. */
10934 		if (tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, 1SHOT_MSI)) {
10935 			val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE;
10936 			tw32(MSGINT_MODE, val);
10937 		}
10938 		return 0;
10939 	}
10940 
10941 	return -EIO;
10942 }
10943 
10944 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
10945  * successfully restored
10946  */
10947 static int tg3_test_msi(struct tg3 *tp)
10948 {
10949 	int err;
10950 	u16 pci_cmd;
10951 
10952 	if (!tg3_flag(tp, USING_MSI))
10953 		return 0;
10954 
10955 	/* Turn off SERR reporting in case MSI terminates with Master
10956 	 * Abort.
10957 	 */
10958 	pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
10959 	pci_write_config_word(tp->pdev, PCI_COMMAND,
10960 			      pci_cmd & ~PCI_COMMAND_SERR);
10961 
10962 	err = tg3_test_interrupt(tp);
10963 
10964 	pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
10965 
10966 	if (!err)
10967 		return 0;
10968 
10969 	/* other failures */
10970 	if (err != -EIO)
10971 		return err;
10972 
10973 	/* MSI test failed, go back to INTx mode */
10974 	netdev_warn(tp->dev, "No interrupt was generated using MSI. Switching "
10975 		    "to INTx mode. Please report this failure to the PCI "
10976 		    "maintainer and include system chipset information\n");
10977 
10978 	free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
10979 
10980 	pci_disable_msi(tp->pdev);
10981 
10982 	tg3_flag_clear(tp, USING_MSI);
10983 	tp->napi[0].irq_vec = tp->pdev->irq;
10984 
10985 	err = tg3_request_irq(tp, 0);
10986 	if (err)
10987 		return err;
10988 
10989 	/* Need to reset the chip because the MSI cycle may have terminated
10990 	 * with Master Abort.
10991 	 */
10992 	tg3_full_lock(tp, 1);
10993 
10994 	tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10995 	err = tg3_init_hw(tp, true);
10996 
10997 	tg3_full_unlock(tp);
10998 
10999 	if (err)
11000 		free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
11001 
11002 	return err;
11003 }
11004 
11005 static int tg3_request_firmware(struct tg3 *tp)
11006 {
11007 	const struct tg3_firmware_hdr *fw_hdr;
11008 
11009 	if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) {
11010 		netdev_err(tp->dev, "Failed to load firmware \"%s\"\n",
11011 			   tp->fw_needed);
11012 		return -ENOENT;
11013 	}
11014 
11015 	fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
11016 
11017 	/* Firmware blob starts with version numbers, followed by
11018 	 * start address and _full_ length including BSS sections
11019 	 * (which must be longer than the actual data, of course
11020 	 */
11021 
11022 	tp->fw_len = be32_to_cpu(fw_hdr->len);	/* includes bss */
11023 	if (tp->fw_len < (tp->fw->size - TG3_FW_HDR_LEN)) {
11024 		netdev_err(tp->dev, "bogus length %d in \"%s\"\n",
11025 			   tp->fw_len, tp->fw_needed);
11026 		release_firmware(tp->fw);
11027 		tp->fw = NULL;
11028 		return -EINVAL;
11029 	}
11030 
11031 	/* We no longer need firmware; we have it. */
11032 	tp->fw_needed = NULL;
11033 	return 0;
11034 }
11035 
11036 static u32 tg3_irq_count(struct tg3 *tp)
11037 {
11038 	u32 irq_cnt = max(tp->rxq_cnt, tp->txq_cnt);
11039 
11040 	if (irq_cnt > 1) {
11041 		/* We want as many rx rings enabled as there are cpus.
11042 		 * In multiqueue MSI-X mode, the first MSI-X vector
11043 		 * only deals with link interrupts, etc, so we add
11044 		 * one to the number of vectors we are requesting.
11045 		 */
11046 		irq_cnt = min_t(unsigned, irq_cnt + 1, tp->irq_max);
11047 	}
11048 
11049 	return irq_cnt;
11050 }
11051 
11052 static bool tg3_enable_msix(struct tg3 *tp)
11053 {
11054 	int i, rc;
11055 	struct msix_entry msix_ent[TG3_IRQ_MAX_VECS];
11056 
11057 	tp->txq_cnt = tp->txq_req;
11058 	tp->rxq_cnt = tp->rxq_req;
11059 	if (!tp->rxq_cnt)
11060 		tp->rxq_cnt = netif_get_num_default_rss_queues();
11061 	if (tp->rxq_cnt > tp->rxq_max)
11062 		tp->rxq_cnt = tp->rxq_max;
11063 
11064 	/* Disable multiple TX rings by default.  Simple round-robin hardware
11065 	 * scheduling of the TX rings can cause starvation of rings with
11066 	 * small packets when other rings have TSO or jumbo packets.
11067 	 */
11068 	if (!tp->txq_req)
11069 		tp->txq_cnt = 1;
11070 
11071 	tp->irq_cnt = tg3_irq_count(tp);
11072 
11073 	for (i = 0; i < tp->irq_max; i++) {
11074 		msix_ent[i].entry  = i;
11075 		msix_ent[i].vector = 0;
11076 	}
11077 
11078 	rc = pci_enable_msix(tp->pdev, msix_ent, tp->irq_cnt);
11079 	if (rc < 0) {
11080 		return false;
11081 	} else if (rc != 0) {
11082 		if (pci_enable_msix(tp->pdev, msix_ent, rc))
11083 			return false;
11084 		netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n",
11085 			      tp->irq_cnt, rc);
11086 		tp->irq_cnt = rc;
11087 		tp->rxq_cnt = max(rc - 1, 1);
11088 		if (tp->txq_cnt)
11089 			tp->txq_cnt = min(tp->rxq_cnt, tp->txq_max);
11090 	}
11091 
11092 	for (i = 0; i < tp->irq_max; i++)
11093 		tp->napi[i].irq_vec = msix_ent[i].vector;
11094 
11095 	if (netif_set_real_num_rx_queues(tp->dev, tp->rxq_cnt)) {
11096 		pci_disable_msix(tp->pdev);
11097 		return false;
11098 	}
11099 
11100 	if (tp->irq_cnt == 1)
11101 		return true;
11102 
11103 	tg3_flag_set(tp, ENABLE_RSS);
11104 
11105 	if (tp->txq_cnt > 1)
11106 		tg3_flag_set(tp, ENABLE_TSS);
11107 
11108 	netif_set_real_num_tx_queues(tp->dev, tp->txq_cnt);
11109 
11110 	return true;
11111 }
11112 
11113 static void tg3_ints_init(struct tg3 *tp)
11114 {
11115 	if ((tg3_flag(tp, SUPPORT_MSI) || tg3_flag(tp, SUPPORT_MSIX)) &&
11116 	    !tg3_flag(tp, TAGGED_STATUS)) {
11117 		/* All MSI supporting chips should support tagged
11118 		 * status.  Assert that this is the case.
11119 		 */
11120 		netdev_warn(tp->dev,
11121 			    "MSI without TAGGED_STATUS? Not using MSI\n");
11122 		goto defcfg;
11123 	}
11124 
11125 	if (tg3_flag(tp, SUPPORT_MSIX) && tg3_enable_msix(tp))
11126 		tg3_flag_set(tp, USING_MSIX);
11127 	else if (tg3_flag(tp, SUPPORT_MSI) && pci_enable_msi(tp->pdev) == 0)
11128 		tg3_flag_set(tp, USING_MSI);
11129 
11130 	if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
11131 		u32 msi_mode = tr32(MSGINT_MODE);
11132 		if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1)
11133 			msi_mode |= MSGINT_MODE_MULTIVEC_EN;
11134 		if (!tg3_flag(tp, 1SHOT_MSI))
11135 			msi_mode |= MSGINT_MODE_ONE_SHOT_DISABLE;
11136 		tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
11137 	}
11138 defcfg:
11139 	if (!tg3_flag(tp, USING_MSIX)) {
11140 		tp->irq_cnt = 1;
11141 		tp->napi[0].irq_vec = tp->pdev->irq;
11142 	}
11143 
11144 	if (tp->irq_cnt == 1) {
11145 		tp->txq_cnt = 1;
11146 		tp->rxq_cnt = 1;
11147 		netif_set_real_num_tx_queues(tp->dev, 1);
11148 		netif_set_real_num_rx_queues(tp->dev, 1);
11149 	}
11150 }
11151 
11152 static void tg3_ints_fini(struct tg3 *tp)
11153 {
11154 	if (tg3_flag(tp, USING_MSIX))
11155 		pci_disable_msix(tp->pdev);
11156 	else if (tg3_flag(tp, USING_MSI))
11157 		pci_disable_msi(tp->pdev);
11158 	tg3_flag_clear(tp, USING_MSI);
11159 	tg3_flag_clear(tp, USING_MSIX);
11160 	tg3_flag_clear(tp, ENABLE_RSS);
11161 	tg3_flag_clear(tp, ENABLE_TSS);
11162 }
11163 
11164 static int tg3_start(struct tg3 *tp, bool reset_phy, bool test_irq,
11165 		     bool init)
11166 {
11167 	struct net_device *dev = tp->dev;
11168 	int i, err;
11169 
11170 	/*
11171 	 * Setup interrupts first so we know how
11172 	 * many NAPI resources to allocate
11173 	 */
11174 	tg3_ints_init(tp);
11175 
11176 	tg3_rss_check_indir_tbl(tp);
11177 
11178 	/* The placement of this call is tied
11179 	 * to the setup and use of Host TX descriptors.
11180 	 */
11181 	err = tg3_alloc_consistent(tp);
11182 	if (err)
11183 		goto err_out1;
11184 
11185 	tg3_napi_init(tp);
11186 
11187 	tg3_napi_enable(tp);
11188 
11189 	for (i = 0; i < tp->irq_cnt; i++) {
11190 		struct tg3_napi *tnapi = &tp->napi[i];
11191 		err = tg3_request_irq(tp, i);
11192 		if (err) {
11193 			for (i--; i >= 0; i--) {
11194 				tnapi = &tp->napi[i];
11195 				free_irq(tnapi->irq_vec, tnapi);
11196 			}
11197 			goto err_out2;
11198 		}
11199 	}
11200 
11201 	tg3_full_lock(tp, 0);
11202 
11203 	err = tg3_init_hw(tp, reset_phy);
11204 	if (err) {
11205 		tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11206 		tg3_free_rings(tp);
11207 	}
11208 
11209 	tg3_full_unlock(tp);
11210 
11211 	if (err)
11212 		goto err_out3;
11213 
11214 	if (test_irq && tg3_flag(tp, USING_MSI)) {
11215 		err = tg3_test_msi(tp);
11216 
11217 		if (err) {
11218 			tg3_full_lock(tp, 0);
11219 			tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11220 			tg3_free_rings(tp);
11221 			tg3_full_unlock(tp);
11222 
11223 			goto err_out2;
11224 		}
11225 
11226 		if (!tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
11227 			u32 val = tr32(PCIE_TRANSACTION_CFG);
11228 
11229 			tw32(PCIE_TRANSACTION_CFG,
11230 			     val | PCIE_TRANS_CFG_1SHOT_MSI);
11231 		}
11232 	}
11233 
11234 	tg3_phy_start(tp);
11235 
11236 	tg3_hwmon_open(tp);
11237 
11238 	tg3_full_lock(tp, 0);
11239 
11240 	tg3_timer_start(tp);
11241 	tg3_flag_set(tp, INIT_COMPLETE);
11242 	tg3_enable_ints(tp);
11243 
11244 	if (init)
11245 		tg3_ptp_init(tp);
11246 	else
11247 		tg3_ptp_resume(tp);
11248 
11249 
11250 	tg3_full_unlock(tp);
11251 
11252 	netif_tx_start_all_queues(dev);
11253 
11254 	/*
11255 	 * Reset loopback feature if it was turned on while the device was down
11256 	 * make sure that it's installed properly now.
11257 	 */
11258 	if (dev->features & NETIF_F_LOOPBACK)
11259 		tg3_set_loopback(dev, dev->features);
11260 
11261 	return 0;
11262 
11263 err_out3:
11264 	for (i = tp->irq_cnt - 1; i >= 0; i--) {
11265 		struct tg3_napi *tnapi = &tp->napi[i];
11266 		free_irq(tnapi->irq_vec, tnapi);
11267 	}
11268 
11269 err_out2:
11270 	tg3_napi_disable(tp);
11271 	tg3_napi_fini(tp);
11272 	tg3_free_consistent(tp);
11273 
11274 err_out1:
11275 	tg3_ints_fini(tp);
11276 
11277 	return err;
11278 }
11279 
11280 static void tg3_stop(struct tg3 *tp)
11281 {
11282 	int i;
11283 
11284 	tg3_reset_task_cancel(tp);
11285 	tg3_netif_stop(tp);
11286 
11287 	tg3_timer_stop(tp);
11288 
11289 	tg3_hwmon_close(tp);
11290 
11291 	tg3_phy_stop(tp);
11292 
11293 	tg3_full_lock(tp, 1);
11294 
11295 	tg3_disable_ints(tp);
11296 
11297 	tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11298 	tg3_free_rings(tp);
11299 	tg3_flag_clear(tp, INIT_COMPLETE);
11300 
11301 	tg3_full_unlock(tp);
11302 
11303 	for (i = tp->irq_cnt - 1; i >= 0; i--) {
11304 		struct tg3_napi *tnapi = &tp->napi[i];
11305 		free_irq(tnapi->irq_vec, tnapi);
11306 	}
11307 
11308 	tg3_ints_fini(tp);
11309 
11310 	tg3_napi_fini(tp);
11311 
11312 	tg3_free_consistent(tp);
11313 }
11314 
11315 static int tg3_open(struct net_device *dev)
11316 {
11317 	struct tg3 *tp = netdev_priv(dev);
11318 	int err;
11319 
11320 	if (tp->fw_needed) {
11321 		err = tg3_request_firmware(tp);
11322 		if (tg3_asic_rev(tp) == ASIC_REV_57766) {
11323 			if (err) {
11324 				netdev_warn(tp->dev, "EEE capability disabled\n");
11325 				tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
11326 			} else if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
11327 				netdev_warn(tp->dev, "EEE capability restored\n");
11328 				tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
11329 			}
11330 		} else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) {
11331 			if (err)
11332 				return err;
11333 		} else if (err) {
11334 			netdev_warn(tp->dev, "TSO capability disabled\n");
11335 			tg3_flag_clear(tp, TSO_CAPABLE);
11336 		} else if (!tg3_flag(tp, TSO_CAPABLE)) {
11337 			netdev_notice(tp->dev, "TSO capability restored\n");
11338 			tg3_flag_set(tp, TSO_CAPABLE);
11339 		}
11340 	}
11341 
11342 	tg3_carrier_off(tp);
11343 
11344 	err = tg3_power_up(tp);
11345 	if (err)
11346 		return err;
11347 
11348 	tg3_full_lock(tp, 0);
11349 
11350 	tg3_disable_ints(tp);
11351 	tg3_flag_clear(tp, INIT_COMPLETE);
11352 
11353 	tg3_full_unlock(tp);
11354 
11355 	err = tg3_start(tp,
11356 			!(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN),
11357 			true, true);
11358 	if (err) {
11359 		tg3_frob_aux_power(tp, false);
11360 		pci_set_power_state(tp->pdev, PCI_D3hot);
11361 	}
11362 
11363 	if (tg3_flag(tp, PTP_CAPABLE)) {
11364 		tp->ptp_clock = ptp_clock_register(&tp->ptp_info,
11365 						   &tp->pdev->dev);
11366 		if (IS_ERR(tp->ptp_clock))
11367 			tp->ptp_clock = NULL;
11368 	}
11369 
11370 	return err;
11371 }
11372 
11373 static int tg3_close(struct net_device *dev)
11374 {
11375 	struct tg3 *tp = netdev_priv(dev);
11376 
11377 	tg3_ptp_fini(tp);
11378 
11379 	tg3_stop(tp);
11380 
11381 	/* Clear stats across close / open calls */
11382 	memset(&tp->net_stats_prev, 0, sizeof(tp->net_stats_prev));
11383 	memset(&tp->estats_prev, 0, sizeof(tp->estats_prev));
11384 
11385 	tg3_power_down(tp);
11386 
11387 	tg3_carrier_off(tp);
11388 
11389 	return 0;
11390 }
11391 
11392 static inline u64 get_stat64(tg3_stat64_t *val)
11393 {
11394        return ((u64)val->high << 32) | ((u64)val->low);
11395 }
11396 
11397 static u64 tg3_calc_crc_errors(struct tg3 *tp)
11398 {
11399 	struct tg3_hw_stats *hw_stats = tp->hw_stats;
11400 
11401 	if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
11402 	    (tg3_asic_rev(tp) == ASIC_REV_5700 ||
11403 	     tg3_asic_rev(tp) == ASIC_REV_5701)) {
11404 		u32 val;
11405 
11406 		if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
11407 			tg3_writephy(tp, MII_TG3_TEST1,
11408 				     val | MII_TG3_TEST1_CRC_EN);
11409 			tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &val);
11410 		} else
11411 			val = 0;
11412 
11413 		tp->phy_crc_errors += val;
11414 
11415 		return tp->phy_crc_errors;
11416 	}
11417 
11418 	return get_stat64(&hw_stats->rx_fcs_errors);
11419 }
11420 
11421 #define ESTAT_ADD(member) \
11422 	estats->member =	old_estats->member + \
11423 				get_stat64(&hw_stats->member)
11424 
11425 static void tg3_get_estats(struct tg3 *tp, struct tg3_ethtool_stats *estats)
11426 {
11427 	struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
11428 	struct tg3_hw_stats *hw_stats = tp->hw_stats;
11429 
11430 	ESTAT_ADD(rx_octets);
11431 	ESTAT_ADD(rx_fragments);
11432 	ESTAT_ADD(rx_ucast_packets);
11433 	ESTAT_ADD(rx_mcast_packets);
11434 	ESTAT_ADD(rx_bcast_packets);
11435 	ESTAT_ADD(rx_fcs_errors);
11436 	ESTAT_ADD(rx_align_errors);
11437 	ESTAT_ADD(rx_xon_pause_rcvd);
11438 	ESTAT_ADD(rx_xoff_pause_rcvd);
11439 	ESTAT_ADD(rx_mac_ctrl_rcvd);
11440 	ESTAT_ADD(rx_xoff_entered);
11441 	ESTAT_ADD(rx_frame_too_long_errors);
11442 	ESTAT_ADD(rx_jabbers);
11443 	ESTAT_ADD(rx_undersize_packets);
11444 	ESTAT_ADD(rx_in_length_errors);
11445 	ESTAT_ADD(rx_out_length_errors);
11446 	ESTAT_ADD(rx_64_or_less_octet_packets);
11447 	ESTAT_ADD(rx_65_to_127_octet_packets);
11448 	ESTAT_ADD(rx_128_to_255_octet_packets);
11449 	ESTAT_ADD(rx_256_to_511_octet_packets);
11450 	ESTAT_ADD(rx_512_to_1023_octet_packets);
11451 	ESTAT_ADD(rx_1024_to_1522_octet_packets);
11452 	ESTAT_ADD(rx_1523_to_2047_octet_packets);
11453 	ESTAT_ADD(rx_2048_to_4095_octet_packets);
11454 	ESTAT_ADD(rx_4096_to_8191_octet_packets);
11455 	ESTAT_ADD(rx_8192_to_9022_octet_packets);
11456 
11457 	ESTAT_ADD(tx_octets);
11458 	ESTAT_ADD(tx_collisions);
11459 	ESTAT_ADD(tx_xon_sent);
11460 	ESTAT_ADD(tx_xoff_sent);
11461 	ESTAT_ADD(tx_flow_control);
11462 	ESTAT_ADD(tx_mac_errors);
11463 	ESTAT_ADD(tx_single_collisions);
11464 	ESTAT_ADD(tx_mult_collisions);
11465 	ESTAT_ADD(tx_deferred);
11466 	ESTAT_ADD(tx_excessive_collisions);
11467 	ESTAT_ADD(tx_late_collisions);
11468 	ESTAT_ADD(tx_collide_2times);
11469 	ESTAT_ADD(tx_collide_3times);
11470 	ESTAT_ADD(tx_collide_4times);
11471 	ESTAT_ADD(tx_collide_5times);
11472 	ESTAT_ADD(tx_collide_6times);
11473 	ESTAT_ADD(tx_collide_7times);
11474 	ESTAT_ADD(tx_collide_8times);
11475 	ESTAT_ADD(tx_collide_9times);
11476 	ESTAT_ADD(tx_collide_10times);
11477 	ESTAT_ADD(tx_collide_11times);
11478 	ESTAT_ADD(tx_collide_12times);
11479 	ESTAT_ADD(tx_collide_13times);
11480 	ESTAT_ADD(tx_collide_14times);
11481 	ESTAT_ADD(tx_collide_15times);
11482 	ESTAT_ADD(tx_ucast_packets);
11483 	ESTAT_ADD(tx_mcast_packets);
11484 	ESTAT_ADD(tx_bcast_packets);
11485 	ESTAT_ADD(tx_carrier_sense_errors);
11486 	ESTAT_ADD(tx_discards);
11487 	ESTAT_ADD(tx_errors);
11488 
11489 	ESTAT_ADD(dma_writeq_full);
11490 	ESTAT_ADD(dma_write_prioq_full);
11491 	ESTAT_ADD(rxbds_empty);
11492 	ESTAT_ADD(rx_discards);
11493 	ESTAT_ADD(rx_errors);
11494 	ESTAT_ADD(rx_threshold_hit);
11495 
11496 	ESTAT_ADD(dma_readq_full);
11497 	ESTAT_ADD(dma_read_prioq_full);
11498 	ESTAT_ADD(tx_comp_queue_full);
11499 
11500 	ESTAT_ADD(ring_set_send_prod_index);
11501 	ESTAT_ADD(ring_status_update);
11502 	ESTAT_ADD(nic_irqs);
11503 	ESTAT_ADD(nic_avoided_irqs);
11504 	ESTAT_ADD(nic_tx_threshold_hit);
11505 
11506 	ESTAT_ADD(mbuf_lwm_thresh_hit);
11507 }
11508 
11509 static void tg3_get_nstats(struct tg3 *tp, struct rtnl_link_stats64 *stats)
11510 {
11511 	struct rtnl_link_stats64 *old_stats = &tp->net_stats_prev;
11512 	struct tg3_hw_stats *hw_stats = tp->hw_stats;
11513 
11514 	stats->rx_packets = old_stats->rx_packets +
11515 		get_stat64(&hw_stats->rx_ucast_packets) +
11516 		get_stat64(&hw_stats->rx_mcast_packets) +
11517 		get_stat64(&hw_stats->rx_bcast_packets);
11518 
11519 	stats->tx_packets = old_stats->tx_packets +
11520 		get_stat64(&hw_stats->tx_ucast_packets) +
11521 		get_stat64(&hw_stats->tx_mcast_packets) +
11522 		get_stat64(&hw_stats->tx_bcast_packets);
11523 
11524 	stats->rx_bytes = old_stats->rx_bytes +
11525 		get_stat64(&hw_stats->rx_octets);
11526 	stats->tx_bytes = old_stats->tx_bytes +
11527 		get_stat64(&hw_stats->tx_octets);
11528 
11529 	stats->rx_errors = old_stats->rx_errors +
11530 		get_stat64(&hw_stats->rx_errors);
11531 	stats->tx_errors = old_stats->tx_errors +
11532 		get_stat64(&hw_stats->tx_errors) +
11533 		get_stat64(&hw_stats->tx_mac_errors) +
11534 		get_stat64(&hw_stats->tx_carrier_sense_errors) +
11535 		get_stat64(&hw_stats->tx_discards);
11536 
11537 	stats->multicast = old_stats->multicast +
11538 		get_stat64(&hw_stats->rx_mcast_packets);
11539 	stats->collisions = old_stats->collisions +
11540 		get_stat64(&hw_stats->tx_collisions);
11541 
11542 	stats->rx_length_errors = old_stats->rx_length_errors +
11543 		get_stat64(&hw_stats->rx_frame_too_long_errors) +
11544 		get_stat64(&hw_stats->rx_undersize_packets);
11545 
11546 	stats->rx_over_errors = old_stats->rx_over_errors +
11547 		get_stat64(&hw_stats->rxbds_empty);
11548 	stats->rx_frame_errors = old_stats->rx_frame_errors +
11549 		get_stat64(&hw_stats->rx_align_errors);
11550 	stats->tx_aborted_errors = old_stats->tx_aborted_errors +
11551 		get_stat64(&hw_stats->tx_discards);
11552 	stats->tx_carrier_errors = old_stats->tx_carrier_errors +
11553 		get_stat64(&hw_stats->tx_carrier_sense_errors);
11554 
11555 	stats->rx_crc_errors = old_stats->rx_crc_errors +
11556 		tg3_calc_crc_errors(tp);
11557 
11558 	stats->rx_missed_errors = old_stats->rx_missed_errors +
11559 		get_stat64(&hw_stats->rx_discards);
11560 
11561 	stats->rx_dropped = tp->rx_dropped;
11562 	stats->tx_dropped = tp->tx_dropped;
11563 }
11564 
11565 static int tg3_get_regs_len(struct net_device *dev)
11566 {
11567 	return TG3_REG_BLK_SIZE;
11568 }
11569 
11570 static void tg3_get_regs(struct net_device *dev,
11571 		struct ethtool_regs *regs, void *_p)
11572 {
11573 	struct tg3 *tp = netdev_priv(dev);
11574 
11575 	regs->version = 0;
11576 
11577 	memset(_p, 0, TG3_REG_BLK_SIZE);
11578 
11579 	if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11580 		return;
11581 
11582 	tg3_full_lock(tp, 0);
11583 
11584 	tg3_dump_legacy_regs(tp, (u32 *)_p);
11585 
11586 	tg3_full_unlock(tp);
11587 }
11588 
11589 static int tg3_get_eeprom_len(struct net_device *dev)
11590 {
11591 	struct tg3 *tp = netdev_priv(dev);
11592 
11593 	return tp->nvram_size;
11594 }
11595 
11596 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
11597 {
11598 	struct tg3 *tp = netdev_priv(dev);
11599 	int ret;
11600 	u8  *pd;
11601 	u32 i, offset, len, b_offset, b_count;
11602 	__be32 val;
11603 
11604 	if (tg3_flag(tp, NO_NVRAM))
11605 		return -EINVAL;
11606 
11607 	if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11608 		return -EAGAIN;
11609 
11610 	offset = eeprom->offset;
11611 	len = eeprom->len;
11612 	eeprom->len = 0;
11613 
11614 	eeprom->magic = TG3_EEPROM_MAGIC;
11615 
11616 	if (offset & 3) {
11617 		/* adjustments to start on required 4 byte boundary */
11618 		b_offset = offset & 3;
11619 		b_count = 4 - b_offset;
11620 		if (b_count > len) {
11621 			/* i.e. offset=1 len=2 */
11622 			b_count = len;
11623 		}
11624 		ret = tg3_nvram_read_be32(tp, offset-b_offset, &val);
11625 		if (ret)
11626 			return ret;
11627 		memcpy(data, ((char *)&val) + b_offset, b_count);
11628 		len -= b_count;
11629 		offset += b_count;
11630 		eeprom->len += b_count;
11631 	}
11632 
11633 	/* read bytes up to the last 4 byte boundary */
11634 	pd = &data[eeprom->len];
11635 	for (i = 0; i < (len - (len & 3)); i += 4) {
11636 		ret = tg3_nvram_read_be32(tp, offset + i, &val);
11637 		if (ret) {
11638 			eeprom->len += i;
11639 			return ret;
11640 		}
11641 		memcpy(pd + i, &val, 4);
11642 	}
11643 	eeprom->len += i;
11644 
11645 	if (len & 3) {
11646 		/* read last bytes not ending on 4 byte boundary */
11647 		pd = &data[eeprom->len];
11648 		b_count = len & 3;
11649 		b_offset = offset + len - b_count;
11650 		ret = tg3_nvram_read_be32(tp, b_offset, &val);
11651 		if (ret)
11652 			return ret;
11653 		memcpy(pd, &val, b_count);
11654 		eeprom->len += b_count;
11655 	}
11656 	return 0;
11657 }
11658 
11659 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
11660 {
11661 	struct tg3 *tp = netdev_priv(dev);
11662 	int ret;
11663 	u32 offset, len, b_offset, odd_len;
11664 	u8 *buf;
11665 	__be32 start, end;
11666 
11667 	if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11668 		return -EAGAIN;
11669 
11670 	if (tg3_flag(tp, NO_NVRAM) ||
11671 	    eeprom->magic != TG3_EEPROM_MAGIC)
11672 		return -EINVAL;
11673 
11674 	offset = eeprom->offset;
11675 	len = eeprom->len;
11676 
11677 	if ((b_offset = (offset & 3))) {
11678 		/* adjustments to start on required 4 byte boundary */
11679 		ret = tg3_nvram_read_be32(tp, offset-b_offset, &start);
11680 		if (ret)
11681 			return ret;
11682 		len += b_offset;
11683 		offset &= ~3;
11684 		if (len < 4)
11685 			len = 4;
11686 	}
11687 
11688 	odd_len = 0;
11689 	if (len & 3) {
11690 		/* adjustments to end on required 4 byte boundary */
11691 		odd_len = 1;
11692 		len = (len + 3) & ~3;
11693 		ret = tg3_nvram_read_be32(tp, offset+len-4, &end);
11694 		if (ret)
11695 			return ret;
11696 	}
11697 
11698 	buf = data;
11699 	if (b_offset || odd_len) {
11700 		buf = kmalloc(len, GFP_KERNEL);
11701 		if (!buf)
11702 			return -ENOMEM;
11703 		if (b_offset)
11704 			memcpy(buf, &start, 4);
11705 		if (odd_len)
11706 			memcpy(buf+len-4, &end, 4);
11707 		memcpy(buf + b_offset, data, eeprom->len);
11708 	}
11709 
11710 	ret = tg3_nvram_write_block(tp, offset, len, buf);
11711 
11712 	if (buf != data)
11713 		kfree(buf);
11714 
11715 	return ret;
11716 }
11717 
11718 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
11719 {
11720 	struct tg3 *tp = netdev_priv(dev);
11721 
11722 	if (tg3_flag(tp, USE_PHYLIB)) {
11723 		struct phy_device *phydev;
11724 		if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
11725 			return -EAGAIN;
11726 		phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
11727 		return phy_ethtool_gset(phydev, cmd);
11728 	}
11729 
11730 	cmd->supported = (SUPPORTED_Autoneg);
11731 
11732 	if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
11733 		cmd->supported |= (SUPPORTED_1000baseT_Half |
11734 				   SUPPORTED_1000baseT_Full);
11735 
11736 	if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
11737 		cmd->supported |= (SUPPORTED_100baseT_Half |
11738 				  SUPPORTED_100baseT_Full |
11739 				  SUPPORTED_10baseT_Half |
11740 				  SUPPORTED_10baseT_Full |
11741 				  SUPPORTED_TP);
11742 		cmd->port = PORT_TP;
11743 	} else {
11744 		cmd->supported |= SUPPORTED_FIBRE;
11745 		cmd->port = PORT_FIBRE;
11746 	}
11747 
11748 	cmd->advertising = tp->link_config.advertising;
11749 	if (tg3_flag(tp, PAUSE_AUTONEG)) {
11750 		if (tp->link_config.flowctrl & FLOW_CTRL_RX) {
11751 			if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
11752 				cmd->advertising |= ADVERTISED_Pause;
11753 			} else {
11754 				cmd->advertising |= ADVERTISED_Pause |
11755 						    ADVERTISED_Asym_Pause;
11756 			}
11757 		} else if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
11758 			cmd->advertising |= ADVERTISED_Asym_Pause;
11759 		}
11760 	}
11761 	if (netif_running(dev) && tp->link_up) {
11762 		ethtool_cmd_speed_set(cmd, tp->link_config.active_speed);
11763 		cmd->duplex = tp->link_config.active_duplex;
11764 		cmd->lp_advertising = tp->link_config.rmt_adv;
11765 		if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
11766 			if (tp->phy_flags & TG3_PHYFLG_MDIX_STATE)
11767 				cmd->eth_tp_mdix = ETH_TP_MDI_X;
11768 			else
11769 				cmd->eth_tp_mdix = ETH_TP_MDI;
11770 		}
11771 	} else {
11772 		ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN);
11773 		cmd->duplex = DUPLEX_UNKNOWN;
11774 		cmd->eth_tp_mdix = ETH_TP_MDI_INVALID;
11775 	}
11776 	cmd->phy_address = tp->phy_addr;
11777 	cmd->transceiver = XCVR_INTERNAL;
11778 	cmd->autoneg = tp->link_config.autoneg;
11779 	cmd->maxtxpkt = 0;
11780 	cmd->maxrxpkt = 0;
11781 	return 0;
11782 }
11783 
11784 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
11785 {
11786 	struct tg3 *tp = netdev_priv(dev);
11787 	u32 speed = ethtool_cmd_speed(cmd);
11788 
11789 	if (tg3_flag(tp, USE_PHYLIB)) {
11790 		struct phy_device *phydev;
11791 		if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
11792 			return -EAGAIN;
11793 		phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
11794 		return phy_ethtool_sset(phydev, cmd);
11795 	}
11796 
11797 	if (cmd->autoneg != AUTONEG_ENABLE &&
11798 	    cmd->autoneg != AUTONEG_DISABLE)
11799 		return -EINVAL;
11800 
11801 	if (cmd->autoneg == AUTONEG_DISABLE &&
11802 	    cmd->duplex != DUPLEX_FULL &&
11803 	    cmd->duplex != DUPLEX_HALF)
11804 		return -EINVAL;
11805 
11806 	if (cmd->autoneg == AUTONEG_ENABLE) {
11807 		u32 mask = ADVERTISED_Autoneg |
11808 			   ADVERTISED_Pause |
11809 			   ADVERTISED_Asym_Pause;
11810 
11811 		if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
11812 			mask |= ADVERTISED_1000baseT_Half |
11813 				ADVERTISED_1000baseT_Full;
11814 
11815 		if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
11816 			mask |= ADVERTISED_100baseT_Half |
11817 				ADVERTISED_100baseT_Full |
11818 				ADVERTISED_10baseT_Half |
11819 				ADVERTISED_10baseT_Full |
11820 				ADVERTISED_TP;
11821 		else
11822 			mask |= ADVERTISED_FIBRE;
11823 
11824 		if (cmd->advertising & ~mask)
11825 			return -EINVAL;
11826 
11827 		mask &= (ADVERTISED_1000baseT_Half |
11828 			 ADVERTISED_1000baseT_Full |
11829 			 ADVERTISED_100baseT_Half |
11830 			 ADVERTISED_100baseT_Full |
11831 			 ADVERTISED_10baseT_Half |
11832 			 ADVERTISED_10baseT_Full);
11833 
11834 		cmd->advertising &= mask;
11835 	} else {
11836 		if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) {
11837 			if (speed != SPEED_1000)
11838 				return -EINVAL;
11839 
11840 			if (cmd->duplex != DUPLEX_FULL)
11841 				return -EINVAL;
11842 		} else {
11843 			if (speed != SPEED_100 &&
11844 			    speed != SPEED_10)
11845 				return -EINVAL;
11846 		}
11847 	}
11848 
11849 	tg3_full_lock(tp, 0);
11850 
11851 	tp->link_config.autoneg = cmd->autoneg;
11852 	if (cmd->autoneg == AUTONEG_ENABLE) {
11853 		tp->link_config.advertising = (cmd->advertising |
11854 					      ADVERTISED_Autoneg);
11855 		tp->link_config.speed = SPEED_UNKNOWN;
11856 		tp->link_config.duplex = DUPLEX_UNKNOWN;
11857 	} else {
11858 		tp->link_config.advertising = 0;
11859 		tp->link_config.speed = speed;
11860 		tp->link_config.duplex = cmd->duplex;
11861 	}
11862 
11863 	tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
11864 
11865 	tg3_warn_mgmt_link_flap(tp);
11866 
11867 	if (netif_running(dev))
11868 		tg3_setup_phy(tp, true);
11869 
11870 	tg3_full_unlock(tp);
11871 
11872 	return 0;
11873 }
11874 
11875 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
11876 {
11877 	struct tg3 *tp = netdev_priv(dev);
11878 
11879 	strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
11880 	strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
11881 	strlcpy(info->fw_version, tp->fw_ver, sizeof(info->fw_version));
11882 	strlcpy(info->bus_info, pci_name(tp->pdev), sizeof(info->bus_info));
11883 }
11884 
11885 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
11886 {
11887 	struct tg3 *tp = netdev_priv(dev);
11888 
11889 	if (tg3_flag(tp, WOL_CAP) && device_can_wakeup(&tp->pdev->dev))
11890 		wol->supported = WAKE_MAGIC;
11891 	else
11892 		wol->supported = 0;
11893 	wol->wolopts = 0;
11894 	if (tg3_flag(tp, WOL_ENABLE) && device_can_wakeup(&tp->pdev->dev))
11895 		wol->wolopts = WAKE_MAGIC;
11896 	memset(&wol->sopass, 0, sizeof(wol->sopass));
11897 }
11898 
11899 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
11900 {
11901 	struct tg3 *tp = netdev_priv(dev);
11902 	struct device *dp = &tp->pdev->dev;
11903 
11904 	if (wol->wolopts & ~WAKE_MAGIC)
11905 		return -EINVAL;
11906 	if ((wol->wolopts & WAKE_MAGIC) &&
11907 	    !(tg3_flag(tp, WOL_CAP) && device_can_wakeup(dp)))
11908 		return -EINVAL;
11909 
11910 	device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC);
11911 
11912 	spin_lock_bh(&tp->lock);
11913 	if (device_may_wakeup(dp))
11914 		tg3_flag_set(tp, WOL_ENABLE);
11915 	else
11916 		tg3_flag_clear(tp, WOL_ENABLE);
11917 	spin_unlock_bh(&tp->lock);
11918 
11919 	return 0;
11920 }
11921 
11922 static u32 tg3_get_msglevel(struct net_device *dev)
11923 {
11924 	struct tg3 *tp = netdev_priv(dev);
11925 	return tp->msg_enable;
11926 }
11927 
11928 static void tg3_set_msglevel(struct net_device *dev, u32 value)
11929 {
11930 	struct tg3 *tp = netdev_priv(dev);
11931 	tp->msg_enable = value;
11932 }
11933 
11934 static int tg3_nway_reset(struct net_device *dev)
11935 {
11936 	struct tg3 *tp = netdev_priv(dev);
11937 	int r;
11938 
11939 	if (!netif_running(dev))
11940 		return -EAGAIN;
11941 
11942 	if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
11943 		return -EINVAL;
11944 
11945 	tg3_warn_mgmt_link_flap(tp);
11946 
11947 	if (tg3_flag(tp, USE_PHYLIB)) {
11948 		if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
11949 			return -EAGAIN;
11950 		r = phy_start_aneg(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
11951 	} else {
11952 		u32 bmcr;
11953 
11954 		spin_lock_bh(&tp->lock);
11955 		r = -EINVAL;
11956 		tg3_readphy(tp, MII_BMCR, &bmcr);
11957 		if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
11958 		    ((bmcr & BMCR_ANENABLE) ||
11959 		     (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT))) {
11960 			tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
11961 						   BMCR_ANENABLE);
11962 			r = 0;
11963 		}
11964 		spin_unlock_bh(&tp->lock);
11965 	}
11966 
11967 	return r;
11968 }
11969 
11970 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
11971 {
11972 	struct tg3 *tp = netdev_priv(dev);
11973 
11974 	ering->rx_max_pending = tp->rx_std_ring_mask;
11975 	if (tg3_flag(tp, JUMBO_RING_ENABLE))
11976 		ering->rx_jumbo_max_pending = tp->rx_jmb_ring_mask;
11977 	else
11978 		ering->rx_jumbo_max_pending = 0;
11979 
11980 	ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
11981 
11982 	ering->rx_pending = tp->rx_pending;
11983 	if (tg3_flag(tp, JUMBO_RING_ENABLE))
11984 		ering->rx_jumbo_pending = tp->rx_jumbo_pending;
11985 	else
11986 		ering->rx_jumbo_pending = 0;
11987 
11988 	ering->tx_pending = tp->napi[0].tx_pending;
11989 }
11990 
11991 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
11992 {
11993 	struct tg3 *tp = netdev_priv(dev);
11994 	int i, irq_sync = 0, err = 0;
11995 
11996 	if ((ering->rx_pending > tp->rx_std_ring_mask) ||
11997 	    (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) ||
11998 	    (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
11999 	    (ering->tx_pending <= MAX_SKB_FRAGS) ||
12000 	    (tg3_flag(tp, TSO_BUG) &&
12001 	     (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
12002 		return -EINVAL;
12003 
12004 	if (netif_running(dev)) {
12005 		tg3_phy_stop(tp);
12006 		tg3_netif_stop(tp);
12007 		irq_sync = 1;
12008 	}
12009 
12010 	tg3_full_lock(tp, irq_sync);
12011 
12012 	tp->rx_pending = ering->rx_pending;
12013 
12014 	if (tg3_flag(tp, MAX_RXPEND_64) &&
12015 	    tp->rx_pending > 63)
12016 		tp->rx_pending = 63;
12017 	tp->rx_jumbo_pending = ering->rx_jumbo_pending;
12018 
12019 	for (i = 0; i < tp->irq_max; i++)
12020 		tp->napi[i].tx_pending = ering->tx_pending;
12021 
12022 	if (netif_running(dev)) {
12023 		tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12024 		err = tg3_restart_hw(tp, false);
12025 		if (!err)
12026 			tg3_netif_start(tp);
12027 	}
12028 
12029 	tg3_full_unlock(tp);
12030 
12031 	if (irq_sync && !err)
12032 		tg3_phy_start(tp);
12033 
12034 	return err;
12035 }
12036 
12037 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
12038 {
12039 	struct tg3 *tp = netdev_priv(dev);
12040 
12041 	epause->autoneg = !!tg3_flag(tp, PAUSE_AUTONEG);
12042 
12043 	if (tp->link_config.flowctrl & FLOW_CTRL_RX)
12044 		epause->rx_pause = 1;
12045 	else
12046 		epause->rx_pause = 0;
12047 
12048 	if (tp->link_config.flowctrl & FLOW_CTRL_TX)
12049 		epause->tx_pause = 1;
12050 	else
12051 		epause->tx_pause = 0;
12052 }
12053 
12054 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
12055 {
12056 	struct tg3 *tp = netdev_priv(dev);
12057 	int err = 0;
12058 
12059 	if (tp->link_config.autoneg == AUTONEG_ENABLE)
12060 		tg3_warn_mgmt_link_flap(tp);
12061 
12062 	if (tg3_flag(tp, USE_PHYLIB)) {
12063 		u32 newadv;
12064 		struct phy_device *phydev;
12065 
12066 		phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
12067 
12068 		if (!(phydev->supported & SUPPORTED_Pause) ||
12069 		    (!(phydev->supported & SUPPORTED_Asym_Pause) &&
12070 		     (epause->rx_pause != epause->tx_pause)))
12071 			return -EINVAL;
12072 
12073 		tp->link_config.flowctrl = 0;
12074 		if (epause->rx_pause) {
12075 			tp->link_config.flowctrl |= FLOW_CTRL_RX;
12076 
12077 			if (epause->tx_pause) {
12078 				tp->link_config.flowctrl |= FLOW_CTRL_TX;
12079 				newadv = ADVERTISED_Pause;
12080 			} else
12081 				newadv = ADVERTISED_Pause |
12082 					 ADVERTISED_Asym_Pause;
12083 		} else if (epause->tx_pause) {
12084 			tp->link_config.flowctrl |= FLOW_CTRL_TX;
12085 			newadv = ADVERTISED_Asym_Pause;
12086 		} else
12087 			newadv = 0;
12088 
12089 		if (epause->autoneg)
12090 			tg3_flag_set(tp, PAUSE_AUTONEG);
12091 		else
12092 			tg3_flag_clear(tp, PAUSE_AUTONEG);
12093 
12094 		if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
12095 			u32 oldadv = phydev->advertising &
12096 				     (ADVERTISED_Pause | ADVERTISED_Asym_Pause);
12097 			if (oldadv != newadv) {
12098 				phydev->advertising &=
12099 					~(ADVERTISED_Pause |
12100 					  ADVERTISED_Asym_Pause);
12101 				phydev->advertising |= newadv;
12102 				if (phydev->autoneg) {
12103 					/*
12104 					 * Always renegotiate the link to
12105 					 * inform our link partner of our
12106 					 * flow control settings, even if the
12107 					 * flow control is forced.  Let
12108 					 * tg3_adjust_link() do the final
12109 					 * flow control setup.
12110 					 */
12111 					return phy_start_aneg(phydev);
12112 				}
12113 			}
12114 
12115 			if (!epause->autoneg)
12116 				tg3_setup_flow_control(tp, 0, 0);
12117 		} else {
12118 			tp->link_config.advertising &=
12119 					~(ADVERTISED_Pause |
12120 					  ADVERTISED_Asym_Pause);
12121 			tp->link_config.advertising |= newadv;
12122 		}
12123 	} else {
12124 		int irq_sync = 0;
12125 
12126 		if (netif_running(dev)) {
12127 			tg3_netif_stop(tp);
12128 			irq_sync = 1;
12129 		}
12130 
12131 		tg3_full_lock(tp, irq_sync);
12132 
12133 		if (epause->autoneg)
12134 			tg3_flag_set(tp, PAUSE_AUTONEG);
12135 		else
12136 			tg3_flag_clear(tp, PAUSE_AUTONEG);
12137 		if (epause->rx_pause)
12138 			tp->link_config.flowctrl |= FLOW_CTRL_RX;
12139 		else
12140 			tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
12141 		if (epause->tx_pause)
12142 			tp->link_config.flowctrl |= FLOW_CTRL_TX;
12143 		else
12144 			tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
12145 
12146 		if (netif_running(dev)) {
12147 			tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12148 			err = tg3_restart_hw(tp, false);
12149 			if (!err)
12150 				tg3_netif_start(tp);
12151 		}
12152 
12153 		tg3_full_unlock(tp);
12154 	}
12155 
12156 	tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
12157 
12158 	return err;
12159 }
12160 
12161 static int tg3_get_sset_count(struct net_device *dev, int sset)
12162 {
12163 	switch (sset) {
12164 	case ETH_SS_TEST:
12165 		return TG3_NUM_TEST;
12166 	case ETH_SS_STATS:
12167 		return TG3_NUM_STATS;
12168 	default:
12169 		return -EOPNOTSUPP;
12170 	}
12171 }
12172 
12173 static int tg3_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
12174 			 u32 *rules __always_unused)
12175 {
12176 	struct tg3 *tp = netdev_priv(dev);
12177 
12178 	if (!tg3_flag(tp, SUPPORT_MSIX))
12179 		return -EOPNOTSUPP;
12180 
12181 	switch (info->cmd) {
12182 	case ETHTOOL_GRXRINGS:
12183 		if (netif_running(tp->dev))
12184 			info->data = tp->rxq_cnt;
12185 		else {
12186 			info->data = num_online_cpus();
12187 			if (info->data > TG3_RSS_MAX_NUM_QS)
12188 				info->data = TG3_RSS_MAX_NUM_QS;
12189 		}
12190 
12191 		/* The first interrupt vector only
12192 		 * handles link interrupts.
12193 		 */
12194 		info->data -= 1;
12195 		return 0;
12196 
12197 	default:
12198 		return -EOPNOTSUPP;
12199 	}
12200 }
12201 
12202 static u32 tg3_get_rxfh_indir_size(struct net_device *dev)
12203 {
12204 	u32 size = 0;
12205 	struct tg3 *tp = netdev_priv(dev);
12206 
12207 	if (tg3_flag(tp, SUPPORT_MSIX))
12208 		size = TG3_RSS_INDIR_TBL_SIZE;
12209 
12210 	return size;
12211 }
12212 
12213 static int tg3_get_rxfh_indir(struct net_device *dev, u32 *indir)
12214 {
12215 	struct tg3 *tp = netdev_priv(dev);
12216 	int i;
12217 
12218 	for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
12219 		indir[i] = tp->rss_ind_tbl[i];
12220 
12221 	return 0;
12222 }
12223 
12224 static int tg3_set_rxfh_indir(struct net_device *dev, const u32 *indir)
12225 {
12226 	struct tg3 *tp = netdev_priv(dev);
12227 	size_t i;
12228 
12229 	for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
12230 		tp->rss_ind_tbl[i] = indir[i];
12231 
12232 	if (!netif_running(dev) || !tg3_flag(tp, ENABLE_RSS))
12233 		return 0;
12234 
12235 	/* It is legal to write the indirection
12236 	 * table while the device is running.
12237 	 */
12238 	tg3_full_lock(tp, 0);
12239 	tg3_rss_write_indir_tbl(tp);
12240 	tg3_full_unlock(tp);
12241 
12242 	return 0;
12243 }
12244 
12245 static void tg3_get_channels(struct net_device *dev,
12246 			     struct ethtool_channels *channel)
12247 {
12248 	struct tg3 *tp = netdev_priv(dev);
12249 	u32 deflt_qs = netif_get_num_default_rss_queues();
12250 
12251 	channel->max_rx = tp->rxq_max;
12252 	channel->max_tx = tp->txq_max;
12253 
12254 	if (netif_running(dev)) {
12255 		channel->rx_count = tp->rxq_cnt;
12256 		channel->tx_count = tp->txq_cnt;
12257 	} else {
12258 		if (tp->rxq_req)
12259 			channel->rx_count = tp->rxq_req;
12260 		else
12261 			channel->rx_count = min(deflt_qs, tp->rxq_max);
12262 
12263 		if (tp->txq_req)
12264 			channel->tx_count = tp->txq_req;
12265 		else
12266 			channel->tx_count = min(deflt_qs, tp->txq_max);
12267 	}
12268 }
12269 
12270 static int tg3_set_channels(struct net_device *dev,
12271 			    struct ethtool_channels *channel)
12272 {
12273 	struct tg3 *tp = netdev_priv(dev);
12274 
12275 	if (!tg3_flag(tp, SUPPORT_MSIX))
12276 		return -EOPNOTSUPP;
12277 
12278 	if (channel->rx_count > tp->rxq_max ||
12279 	    channel->tx_count > tp->txq_max)
12280 		return -EINVAL;
12281 
12282 	tp->rxq_req = channel->rx_count;
12283 	tp->txq_req = channel->tx_count;
12284 
12285 	if (!netif_running(dev))
12286 		return 0;
12287 
12288 	tg3_stop(tp);
12289 
12290 	tg3_carrier_off(tp);
12291 
12292 	tg3_start(tp, true, false, false);
12293 
12294 	return 0;
12295 }
12296 
12297 static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
12298 {
12299 	switch (stringset) {
12300 	case ETH_SS_STATS:
12301 		memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
12302 		break;
12303 	case ETH_SS_TEST:
12304 		memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
12305 		break;
12306 	default:
12307 		WARN_ON(1);	/* we need a WARN() */
12308 		break;
12309 	}
12310 }
12311 
12312 static int tg3_set_phys_id(struct net_device *dev,
12313 			    enum ethtool_phys_id_state state)
12314 {
12315 	struct tg3 *tp = netdev_priv(dev);
12316 
12317 	if (!netif_running(tp->dev))
12318 		return -EAGAIN;
12319 
12320 	switch (state) {
12321 	case ETHTOOL_ID_ACTIVE:
12322 		return 1;	/* cycle on/off once per second */
12323 
12324 	case ETHTOOL_ID_ON:
12325 		tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
12326 		     LED_CTRL_1000MBPS_ON |
12327 		     LED_CTRL_100MBPS_ON |
12328 		     LED_CTRL_10MBPS_ON |
12329 		     LED_CTRL_TRAFFIC_OVERRIDE |
12330 		     LED_CTRL_TRAFFIC_BLINK |
12331 		     LED_CTRL_TRAFFIC_LED);
12332 		break;
12333 
12334 	case ETHTOOL_ID_OFF:
12335 		tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
12336 		     LED_CTRL_TRAFFIC_OVERRIDE);
12337 		break;
12338 
12339 	case ETHTOOL_ID_INACTIVE:
12340 		tw32(MAC_LED_CTRL, tp->led_ctrl);
12341 		break;
12342 	}
12343 
12344 	return 0;
12345 }
12346 
12347 static void tg3_get_ethtool_stats(struct net_device *dev,
12348 				   struct ethtool_stats *estats, u64 *tmp_stats)
12349 {
12350 	struct tg3 *tp = netdev_priv(dev);
12351 
12352 	if (tp->hw_stats)
12353 		tg3_get_estats(tp, (struct tg3_ethtool_stats *)tmp_stats);
12354 	else
12355 		memset(tmp_stats, 0, sizeof(struct tg3_ethtool_stats));
12356 }
12357 
12358 static __be32 *tg3_vpd_readblock(struct tg3 *tp, u32 *vpdlen)
12359 {
12360 	int i;
12361 	__be32 *buf;
12362 	u32 offset = 0, len = 0;
12363 	u32 magic, val;
12364 
12365 	if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &magic))
12366 		return NULL;
12367 
12368 	if (magic == TG3_EEPROM_MAGIC) {
12369 		for (offset = TG3_NVM_DIR_START;
12370 		     offset < TG3_NVM_DIR_END;
12371 		     offset += TG3_NVM_DIRENT_SIZE) {
12372 			if (tg3_nvram_read(tp, offset, &val))
12373 				return NULL;
12374 
12375 			if ((val >> TG3_NVM_DIRTYPE_SHIFT) ==
12376 			    TG3_NVM_DIRTYPE_EXTVPD)
12377 				break;
12378 		}
12379 
12380 		if (offset != TG3_NVM_DIR_END) {
12381 			len = (val & TG3_NVM_DIRTYPE_LENMSK) * 4;
12382 			if (tg3_nvram_read(tp, offset + 4, &offset))
12383 				return NULL;
12384 
12385 			offset = tg3_nvram_logical_addr(tp, offset);
12386 		}
12387 	}
12388 
12389 	if (!offset || !len) {
12390 		offset = TG3_NVM_VPD_OFF;
12391 		len = TG3_NVM_VPD_LEN;
12392 	}
12393 
12394 	buf = kmalloc(len, GFP_KERNEL);
12395 	if (buf == NULL)
12396 		return NULL;
12397 
12398 	if (magic == TG3_EEPROM_MAGIC) {
12399 		for (i = 0; i < len; i += 4) {
12400 			/* The data is in little-endian format in NVRAM.
12401 			 * Use the big-endian read routines to preserve
12402 			 * the byte order as it exists in NVRAM.
12403 			 */
12404 			if (tg3_nvram_read_be32(tp, offset + i, &buf[i/4]))
12405 				goto error;
12406 		}
12407 	} else {
12408 		u8 *ptr;
12409 		ssize_t cnt;
12410 		unsigned int pos = 0;
12411 
12412 		ptr = (u8 *)&buf[0];
12413 		for (i = 0; pos < len && i < 3; i++, pos += cnt, ptr += cnt) {
12414 			cnt = pci_read_vpd(tp->pdev, pos,
12415 					   len - pos, ptr);
12416 			if (cnt == -ETIMEDOUT || cnt == -EINTR)
12417 				cnt = 0;
12418 			else if (cnt < 0)
12419 				goto error;
12420 		}
12421 		if (pos != len)
12422 			goto error;
12423 	}
12424 
12425 	*vpdlen = len;
12426 
12427 	return buf;
12428 
12429 error:
12430 	kfree(buf);
12431 	return NULL;
12432 }
12433 
12434 #define NVRAM_TEST_SIZE 0x100
12435 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE	0x14
12436 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE	0x18
12437 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE	0x1c
12438 #define NVRAM_SELFBOOT_FORMAT1_4_SIZE	0x20
12439 #define NVRAM_SELFBOOT_FORMAT1_5_SIZE	0x24
12440 #define NVRAM_SELFBOOT_FORMAT1_6_SIZE	0x50
12441 #define NVRAM_SELFBOOT_HW_SIZE 0x20
12442 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
12443 
12444 static int tg3_test_nvram(struct tg3 *tp)
12445 {
12446 	u32 csum, magic, len;
12447 	__be32 *buf;
12448 	int i, j, k, err = 0, size;
12449 
12450 	if (tg3_flag(tp, NO_NVRAM))
12451 		return 0;
12452 
12453 	if (tg3_nvram_read(tp, 0, &magic) != 0)
12454 		return -EIO;
12455 
12456 	if (magic == TG3_EEPROM_MAGIC)
12457 		size = NVRAM_TEST_SIZE;
12458 	else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
12459 		if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
12460 		    TG3_EEPROM_SB_FORMAT_1) {
12461 			switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
12462 			case TG3_EEPROM_SB_REVISION_0:
12463 				size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
12464 				break;
12465 			case TG3_EEPROM_SB_REVISION_2:
12466 				size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
12467 				break;
12468 			case TG3_EEPROM_SB_REVISION_3:
12469 				size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
12470 				break;
12471 			case TG3_EEPROM_SB_REVISION_4:
12472 				size = NVRAM_SELFBOOT_FORMAT1_4_SIZE;
12473 				break;
12474 			case TG3_EEPROM_SB_REVISION_5:
12475 				size = NVRAM_SELFBOOT_FORMAT1_5_SIZE;
12476 				break;
12477 			case TG3_EEPROM_SB_REVISION_6:
12478 				size = NVRAM_SELFBOOT_FORMAT1_6_SIZE;
12479 				break;
12480 			default:
12481 				return -EIO;
12482 			}
12483 		} else
12484 			return 0;
12485 	} else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
12486 		size = NVRAM_SELFBOOT_HW_SIZE;
12487 	else
12488 		return -EIO;
12489 
12490 	buf = kmalloc(size, GFP_KERNEL);
12491 	if (buf == NULL)
12492 		return -ENOMEM;
12493 
12494 	err = -EIO;
12495 	for (i = 0, j = 0; i < size; i += 4, j++) {
12496 		err = tg3_nvram_read_be32(tp, i, &buf[j]);
12497 		if (err)
12498 			break;
12499 	}
12500 	if (i < size)
12501 		goto out;
12502 
12503 	/* Selfboot format */
12504 	magic = be32_to_cpu(buf[0]);
12505 	if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
12506 	    TG3_EEPROM_MAGIC_FW) {
12507 		u8 *buf8 = (u8 *) buf, csum8 = 0;
12508 
12509 		if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
12510 		    TG3_EEPROM_SB_REVISION_2) {
12511 			/* For rev 2, the csum doesn't include the MBA. */
12512 			for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
12513 				csum8 += buf8[i];
12514 			for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
12515 				csum8 += buf8[i];
12516 		} else {
12517 			for (i = 0; i < size; i++)
12518 				csum8 += buf8[i];
12519 		}
12520 
12521 		if (csum8 == 0) {
12522 			err = 0;
12523 			goto out;
12524 		}
12525 
12526 		err = -EIO;
12527 		goto out;
12528 	}
12529 
12530 	if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
12531 	    TG3_EEPROM_MAGIC_HW) {
12532 		u8 data[NVRAM_SELFBOOT_DATA_SIZE];
12533 		u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
12534 		u8 *buf8 = (u8 *) buf;
12535 
12536 		/* Separate the parity bits and the data bytes.  */
12537 		for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
12538 			if ((i == 0) || (i == 8)) {
12539 				int l;
12540 				u8 msk;
12541 
12542 				for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
12543 					parity[k++] = buf8[i] & msk;
12544 				i++;
12545 			} else if (i == 16) {
12546 				int l;
12547 				u8 msk;
12548 
12549 				for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
12550 					parity[k++] = buf8[i] & msk;
12551 				i++;
12552 
12553 				for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
12554 					parity[k++] = buf8[i] & msk;
12555 				i++;
12556 			}
12557 			data[j++] = buf8[i];
12558 		}
12559 
12560 		err = -EIO;
12561 		for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
12562 			u8 hw8 = hweight8(data[i]);
12563 
12564 			if ((hw8 & 0x1) && parity[i])
12565 				goto out;
12566 			else if (!(hw8 & 0x1) && !parity[i])
12567 				goto out;
12568 		}
12569 		err = 0;
12570 		goto out;
12571 	}
12572 
12573 	err = -EIO;
12574 
12575 	/* Bootstrap checksum at offset 0x10 */
12576 	csum = calc_crc((unsigned char *) buf, 0x10);
12577 	if (csum != le32_to_cpu(buf[0x10/4]))
12578 		goto out;
12579 
12580 	/* Manufacturing block starts at offset 0x74, checksum at 0xfc */
12581 	csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
12582 	if (csum != le32_to_cpu(buf[0xfc/4]))
12583 		goto out;
12584 
12585 	kfree(buf);
12586 
12587 	buf = tg3_vpd_readblock(tp, &len);
12588 	if (!buf)
12589 		return -ENOMEM;
12590 
12591 	i = pci_vpd_find_tag((u8 *)buf, 0, len, PCI_VPD_LRDT_RO_DATA);
12592 	if (i > 0) {
12593 		j = pci_vpd_lrdt_size(&((u8 *)buf)[i]);
12594 		if (j < 0)
12595 			goto out;
12596 
12597 		if (i + PCI_VPD_LRDT_TAG_SIZE + j > len)
12598 			goto out;
12599 
12600 		i += PCI_VPD_LRDT_TAG_SIZE;
12601 		j = pci_vpd_find_info_keyword((u8 *)buf, i, j,
12602 					      PCI_VPD_RO_KEYWORD_CHKSUM);
12603 		if (j > 0) {
12604 			u8 csum8 = 0;
12605 
12606 			j += PCI_VPD_INFO_FLD_HDR_SIZE;
12607 
12608 			for (i = 0; i <= j; i++)
12609 				csum8 += ((u8 *)buf)[i];
12610 
12611 			if (csum8)
12612 				goto out;
12613 		}
12614 	}
12615 
12616 	err = 0;
12617 
12618 out:
12619 	kfree(buf);
12620 	return err;
12621 }
12622 
12623 #define TG3_SERDES_TIMEOUT_SEC	2
12624 #define TG3_COPPER_TIMEOUT_SEC	6
12625 
12626 static int tg3_test_link(struct tg3 *tp)
12627 {
12628 	int i, max;
12629 
12630 	if (!netif_running(tp->dev))
12631 		return -ENODEV;
12632 
12633 	if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
12634 		max = TG3_SERDES_TIMEOUT_SEC;
12635 	else
12636 		max = TG3_COPPER_TIMEOUT_SEC;
12637 
12638 	for (i = 0; i < max; i++) {
12639 		if (tp->link_up)
12640 			return 0;
12641 
12642 		if (msleep_interruptible(1000))
12643 			break;
12644 	}
12645 
12646 	return -EIO;
12647 }
12648 
12649 /* Only test the commonly used registers */
12650 static int tg3_test_registers(struct tg3 *tp)
12651 {
12652 	int i, is_5705, is_5750;
12653 	u32 offset, read_mask, write_mask, val, save_val, read_val;
12654 	static struct {
12655 		u16 offset;
12656 		u16 flags;
12657 #define TG3_FL_5705	0x1
12658 #define TG3_FL_NOT_5705	0x2
12659 #define TG3_FL_NOT_5788	0x4
12660 #define TG3_FL_NOT_5750	0x8
12661 		u32 read_mask;
12662 		u32 write_mask;
12663 	} reg_tbl[] = {
12664 		/* MAC Control Registers */
12665 		{ MAC_MODE, TG3_FL_NOT_5705,
12666 			0x00000000, 0x00ef6f8c },
12667 		{ MAC_MODE, TG3_FL_5705,
12668 			0x00000000, 0x01ef6b8c },
12669 		{ MAC_STATUS, TG3_FL_NOT_5705,
12670 			0x03800107, 0x00000000 },
12671 		{ MAC_STATUS, TG3_FL_5705,
12672 			0x03800100, 0x00000000 },
12673 		{ MAC_ADDR_0_HIGH, 0x0000,
12674 			0x00000000, 0x0000ffff },
12675 		{ MAC_ADDR_0_LOW, 0x0000,
12676 			0x00000000, 0xffffffff },
12677 		{ MAC_RX_MTU_SIZE, 0x0000,
12678 			0x00000000, 0x0000ffff },
12679 		{ MAC_TX_MODE, 0x0000,
12680 			0x00000000, 0x00000070 },
12681 		{ MAC_TX_LENGTHS, 0x0000,
12682 			0x00000000, 0x00003fff },
12683 		{ MAC_RX_MODE, TG3_FL_NOT_5705,
12684 			0x00000000, 0x000007fc },
12685 		{ MAC_RX_MODE, TG3_FL_5705,
12686 			0x00000000, 0x000007dc },
12687 		{ MAC_HASH_REG_0, 0x0000,
12688 			0x00000000, 0xffffffff },
12689 		{ MAC_HASH_REG_1, 0x0000,
12690 			0x00000000, 0xffffffff },
12691 		{ MAC_HASH_REG_2, 0x0000,
12692 			0x00000000, 0xffffffff },
12693 		{ MAC_HASH_REG_3, 0x0000,
12694 			0x00000000, 0xffffffff },
12695 
12696 		/* Receive Data and Receive BD Initiator Control Registers. */
12697 		{ RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
12698 			0x00000000, 0xffffffff },
12699 		{ RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
12700 			0x00000000, 0xffffffff },
12701 		{ RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
12702 			0x00000000, 0x00000003 },
12703 		{ RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
12704 			0x00000000, 0xffffffff },
12705 		{ RCVDBDI_STD_BD+0, 0x0000,
12706 			0x00000000, 0xffffffff },
12707 		{ RCVDBDI_STD_BD+4, 0x0000,
12708 			0x00000000, 0xffffffff },
12709 		{ RCVDBDI_STD_BD+8, 0x0000,
12710 			0x00000000, 0xffff0002 },
12711 		{ RCVDBDI_STD_BD+0xc, 0x0000,
12712 			0x00000000, 0xffffffff },
12713 
12714 		/* Receive BD Initiator Control Registers. */
12715 		{ RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
12716 			0x00000000, 0xffffffff },
12717 		{ RCVBDI_STD_THRESH, TG3_FL_5705,
12718 			0x00000000, 0x000003ff },
12719 		{ RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
12720 			0x00000000, 0xffffffff },
12721 
12722 		/* Host Coalescing Control Registers. */
12723 		{ HOSTCC_MODE, TG3_FL_NOT_5705,
12724 			0x00000000, 0x00000004 },
12725 		{ HOSTCC_MODE, TG3_FL_5705,
12726 			0x00000000, 0x000000f6 },
12727 		{ HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
12728 			0x00000000, 0xffffffff },
12729 		{ HOSTCC_RXCOL_TICKS, TG3_FL_5705,
12730 			0x00000000, 0x000003ff },
12731 		{ HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
12732 			0x00000000, 0xffffffff },
12733 		{ HOSTCC_TXCOL_TICKS, TG3_FL_5705,
12734 			0x00000000, 0x000003ff },
12735 		{ HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
12736 			0x00000000, 0xffffffff },
12737 		{ HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
12738 			0x00000000, 0x000000ff },
12739 		{ HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
12740 			0x00000000, 0xffffffff },
12741 		{ HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
12742 			0x00000000, 0x000000ff },
12743 		{ HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
12744 			0x00000000, 0xffffffff },
12745 		{ HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
12746 			0x00000000, 0xffffffff },
12747 		{ HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
12748 			0x00000000, 0xffffffff },
12749 		{ HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
12750 			0x00000000, 0x000000ff },
12751 		{ HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
12752 			0x00000000, 0xffffffff },
12753 		{ HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
12754 			0x00000000, 0x000000ff },
12755 		{ HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
12756 			0x00000000, 0xffffffff },
12757 		{ HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
12758 			0x00000000, 0xffffffff },
12759 		{ HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
12760 			0x00000000, 0xffffffff },
12761 		{ HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
12762 			0x00000000, 0xffffffff },
12763 		{ HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
12764 			0x00000000, 0xffffffff },
12765 		{ HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
12766 			0xffffffff, 0x00000000 },
12767 		{ HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
12768 			0xffffffff, 0x00000000 },
12769 
12770 		/* Buffer Manager Control Registers. */
12771 		{ BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
12772 			0x00000000, 0x007fff80 },
12773 		{ BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
12774 			0x00000000, 0x007fffff },
12775 		{ BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
12776 			0x00000000, 0x0000003f },
12777 		{ BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
12778 			0x00000000, 0x000001ff },
12779 		{ BUFMGR_MB_HIGH_WATER, 0x0000,
12780 			0x00000000, 0x000001ff },
12781 		{ BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
12782 			0xffffffff, 0x00000000 },
12783 		{ BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
12784 			0xffffffff, 0x00000000 },
12785 
12786 		/* Mailbox Registers */
12787 		{ GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
12788 			0x00000000, 0x000001ff },
12789 		{ GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
12790 			0x00000000, 0x000001ff },
12791 		{ GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
12792 			0x00000000, 0x000007ff },
12793 		{ GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
12794 			0x00000000, 0x000001ff },
12795 
12796 		{ 0xffff, 0x0000, 0x00000000, 0x00000000 },
12797 	};
12798 
12799 	is_5705 = is_5750 = 0;
12800 	if (tg3_flag(tp, 5705_PLUS)) {
12801 		is_5705 = 1;
12802 		if (tg3_flag(tp, 5750_PLUS))
12803 			is_5750 = 1;
12804 	}
12805 
12806 	for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
12807 		if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
12808 			continue;
12809 
12810 		if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
12811 			continue;
12812 
12813 		if (tg3_flag(tp, IS_5788) &&
12814 		    (reg_tbl[i].flags & TG3_FL_NOT_5788))
12815 			continue;
12816 
12817 		if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
12818 			continue;
12819 
12820 		offset = (u32) reg_tbl[i].offset;
12821 		read_mask = reg_tbl[i].read_mask;
12822 		write_mask = reg_tbl[i].write_mask;
12823 
12824 		/* Save the original register content */
12825 		save_val = tr32(offset);
12826 
12827 		/* Determine the read-only value. */
12828 		read_val = save_val & read_mask;
12829 
12830 		/* Write zero to the register, then make sure the read-only bits
12831 		 * are not changed and the read/write bits are all zeros.
12832 		 */
12833 		tw32(offset, 0);
12834 
12835 		val = tr32(offset);
12836 
12837 		/* Test the read-only and read/write bits. */
12838 		if (((val & read_mask) != read_val) || (val & write_mask))
12839 			goto out;
12840 
12841 		/* Write ones to all the bits defined by RdMask and WrMask, then
12842 		 * make sure the read-only bits are not changed and the
12843 		 * read/write bits are all ones.
12844 		 */
12845 		tw32(offset, read_mask | write_mask);
12846 
12847 		val = tr32(offset);
12848 
12849 		/* Test the read-only bits. */
12850 		if ((val & read_mask) != read_val)
12851 			goto out;
12852 
12853 		/* Test the read/write bits. */
12854 		if ((val & write_mask) != write_mask)
12855 			goto out;
12856 
12857 		tw32(offset, save_val);
12858 	}
12859 
12860 	return 0;
12861 
12862 out:
12863 	if (netif_msg_hw(tp))
12864 		netdev_err(tp->dev,
12865 			   "Register test failed at offset %x\n", offset);
12866 	tw32(offset, save_val);
12867 	return -EIO;
12868 }
12869 
12870 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
12871 {
12872 	static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
12873 	int i;
12874 	u32 j;
12875 
12876 	for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
12877 		for (j = 0; j < len; j += 4) {
12878 			u32 val;
12879 
12880 			tg3_write_mem(tp, offset + j, test_pattern[i]);
12881 			tg3_read_mem(tp, offset + j, &val);
12882 			if (val != test_pattern[i])
12883 				return -EIO;
12884 		}
12885 	}
12886 	return 0;
12887 }
12888 
12889 static int tg3_test_memory(struct tg3 *tp)
12890 {
12891 	static struct mem_entry {
12892 		u32 offset;
12893 		u32 len;
12894 	} mem_tbl_570x[] = {
12895 		{ 0x00000000, 0x00b50},
12896 		{ 0x00002000, 0x1c000},
12897 		{ 0xffffffff, 0x00000}
12898 	}, mem_tbl_5705[] = {
12899 		{ 0x00000100, 0x0000c},
12900 		{ 0x00000200, 0x00008},
12901 		{ 0x00004000, 0x00800},
12902 		{ 0x00006000, 0x01000},
12903 		{ 0x00008000, 0x02000},
12904 		{ 0x00010000, 0x0e000},
12905 		{ 0xffffffff, 0x00000}
12906 	}, mem_tbl_5755[] = {
12907 		{ 0x00000200, 0x00008},
12908 		{ 0x00004000, 0x00800},
12909 		{ 0x00006000, 0x00800},
12910 		{ 0x00008000, 0x02000},
12911 		{ 0x00010000, 0x0c000},
12912 		{ 0xffffffff, 0x00000}
12913 	}, mem_tbl_5906[] = {
12914 		{ 0x00000200, 0x00008},
12915 		{ 0x00004000, 0x00400},
12916 		{ 0x00006000, 0x00400},
12917 		{ 0x00008000, 0x01000},
12918 		{ 0x00010000, 0x01000},
12919 		{ 0xffffffff, 0x00000}
12920 	}, mem_tbl_5717[] = {
12921 		{ 0x00000200, 0x00008},
12922 		{ 0x00010000, 0x0a000},
12923 		{ 0x00020000, 0x13c00},
12924 		{ 0xffffffff, 0x00000}
12925 	}, mem_tbl_57765[] = {
12926 		{ 0x00000200, 0x00008},
12927 		{ 0x00004000, 0x00800},
12928 		{ 0x00006000, 0x09800},
12929 		{ 0x00010000, 0x0a000},
12930 		{ 0xffffffff, 0x00000}
12931 	};
12932 	struct mem_entry *mem_tbl;
12933 	int err = 0;
12934 	int i;
12935 
12936 	if (tg3_flag(tp, 5717_PLUS))
12937 		mem_tbl = mem_tbl_5717;
12938 	else if (tg3_flag(tp, 57765_CLASS) ||
12939 		 tg3_asic_rev(tp) == ASIC_REV_5762)
12940 		mem_tbl = mem_tbl_57765;
12941 	else if (tg3_flag(tp, 5755_PLUS))
12942 		mem_tbl = mem_tbl_5755;
12943 	else if (tg3_asic_rev(tp) == ASIC_REV_5906)
12944 		mem_tbl = mem_tbl_5906;
12945 	else if (tg3_flag(tp, 5705_PLUS))
12946 		mem_tbl = mem_tbl_5705;
12947 	else
12948 		mem_tbl = mem_tbl_570x;
12949 
12950 	for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
12951 		err = tg3_do_mem_test(tp, mem_tbl[i].offset, mem_tbl[i].len);
12952 		if (err)
12953 			break;
12954 	}
12955 
12956 	return err;
12957 }
12958 
12959 #define TG3_TSO_MSS		500
12960 
12961 #define TG3_TSO_IP_HDR_LEN	20
12962 #define TG3_TSO_TCP_HDR_LEN	20
12963 #define TG3_TSO_TCP_OPT_LEN	12
12964 
12965 static const u8 tg3_tso_header[] = {
12966 0x08, 0x00,
12967 0x45, 0x00, 0x00, 0x00,
12968 0x00, 0x00, 0x40, 0x00,
12969 0x40, 0x06, 0x00, 0x00,
12970 0x0a, 0x00, 0x00, 0x01,
12971 0x0a, 0x00, 0x00, 0x02,
12972 0x0d, 0x00, 0xe0, 0x00,
12973 0x00, 0x00, 0x01, 0x00,
12974 0x00, 0x00, 0x02, 0x00,
12975 0x80, 0x10, 0x10, 0x00,
12976 0x14, 0x09, 0x00, 0x00,
12977 0x01, 0x01, 0x08, 0x0a,
12978 0x11, 0x11, 0x11, 0x11,
12979 0x11, 0x11, 0x11, 0x11,
12980 };
12981 
12982 static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, bool tso_loopback)
12983 {
12984 	u32 rx_start_idx, rx_idx, tx_idx, opaque_key;
12985 	u32 base_flags = 0, mss = 0, desc_idx, coal_now, data_off, val;
12986 	u32 budget;
12987 	struct sk_buff *skb;
12988 	u8 *tx_data, *rx_data;
12989 	dma_addr_t map;
12990 	int num_pkts, tx_len, rx_len, i, err;
12991 	struct tg3_rx_buffer_desc *desc;
12992 	struct tg3_napi *tnapi, *rnapi;
12993 	struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
12994 
12995 	tnapi = &tp->napi[0];
12996 	rnapi = &tp->napi[0];
12997 	if (tp->irq_cnt > 1) {
12998 		if (tg3_flag(tp, ENABLE_RSS))
12999 			rnapi = &tp->napi[1];
13000 		if (tg3_flag(tp, ENABLE_TSS))
13001 			tnapi = &tp->napi[1];
13002 	}
13003 	coal_now = tnapi->coal_now | rnapi->coal_now;
13004 
13005 	err = -EIO;
13006 
13007 	tx_len = pktsz;
13008 	skb = netdev_alloc_skb(tp->dev, tx_len);
13009 	if (!skb)
13010 		return -ENOMEM;
13011 
13012 	tx_data = skb_put(skb, tx_len);
13013 	memcpy(tx_data, tp->dev->dev_addr, 6);
13014 	memset(tx_data + 6, 0x0, 8);
13015 
13016 	tw32(MAC_RX_MTU_SIZE, tx_len + ETH_FCS_LEN);
13017 
13018 	if (tso_loopback) {
13019 		struct iphdr *iph = (struct iphdr *)&tx_data[ETH_HLEN];
13020 
13021 		u32 hdr_len = TG3_TSO_IP_HDR_LEN + TG3_TSO_TCP_HDR_LEN +
13022 			      TG3_TSO_TCP_OPT_LEN;
13023 
13024 		memcpy(tx_data + ETH_ALEN * 2, tg3_tso_header,
13025 		       sizeof(tg3_tso_header));
13026 		mss = TG3_TSO_MSS;
13027 
13028 		val = tx_len - ETH_ALEN * 2 - sizeof(tg3_tso_header);
13029 		num_pkts = DIV_ROUND_UP(val, TG3_TSO_MSS);
13030 
13031 		/* Set the total length field in the IP header */
13032 		iph->tot_len = htons((u16)(mss + hdr_len));
13033 
13034 		base_flags = (TXD_FLAG_CPU_PRE_DMA |
13035 			      TXD_FLAG_CPU_POST_DMA);
13036 
13037 		if (tg3_flag(tp, HW_TSO_1) ||
13038 		    tg3_flag(tp, HW_TSO_2) ||
13039 		    tg3_flag(tp, HW_TSO_3)) {
13040 			struct tcphdr *th;
13041 			val = ETH_HLEN + TG3_TSO_IP_HDR_LEN;
13042 			th = (struct tcphdr *)&tx_data[val];
13043 			th->check = 0;
13044 		} else
13045 			base_flags |= TXD_FLAG_TCPUDP_CSUM;
13046 
13047 		if (tg3_flag(tp, HW_TSO_3)) {
13048 			mss |= (hdr_len & 0xc) << 12;
13049 			if (hdr_len & 0x10)
13050 				base_flags |= 0x00000010;
13051 			base_flags |= (hdr_len & 0x3e0) << 5;
13052 		} else if (tg3_flag(tp, HW_TSO_2))
13053 			mss |= hdr_len << 9;
13054 		else if (tg3_flag(tp, HW_TSO_1) ||
13055 			 tg3_asic_rev(tp) == ASIC_REV_5705) {
13056 			mss |= (TG3_TSO_TCP_OPT_LEN << 9);
13057 		} else {
13058 			base_flags |= (TG3_TSO_TCP_OPT_LEN << 10);
13059 		}
13060 
13061 		data_off = ETH_ALEN * 2 + sizeof(tg3_tso_header);
13062 	} else {
13063 		num_pkts = 1;
13064 		data_off = ETH_HLEN;
13065 
13066 		if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
13067 		    tx_len > VLAN_ETH_FRAME_LEN)
13068 			base_flags |= TXD_FLAG_JMB_PKT;
13069 	}
13070 
13071 	for (i = data_off; i < tx_len; i++)
13072 		tx_data[i] = (u8) (i & 0xff);
13073 
13074 	map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
13075 	if (pci_dma_mapping_error(tp->pdev, map)) {
13076 		dev_kfree_skb(skb);
13077 		return -EIO;
13078 	}
13079 
13080 	val = tnapi->tx_prod;
13081 	tnapi->tx_buffers[val].skb = skb;
13082 	dma_unmap_addr_set(&tnapi->tx_buffers[val], mapping, map);
13083 
13084 	tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
13085 	       rnapi->coal_now);
13086 
13087 	udelay(10);
13088 
13089 	rx_start_idx = rnapi->hw_status->idx[0].rx_producer;
13090 
13091 	budget = tg3_tx_avail(tnapi);
13092 	if (tg3_tx_frag_set(tnapi, &val, &budget, map, tx_len,
13093 			    base_flags | TXD_FLAG_END, mss, 0)) {
13094 		tnapi->tx_buffers[val].skb = NULL;
13095 		dev_kfree_skb(skb);
13096 		return -EIO;
13097 	}
13098 
13099 	tnapi->tx_prod++;
13100 
13101 	/* Sync BD data before updating mailbox */
13102 	wmb();
13103 
13104 	tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod);
13105 	tr32_mailbox(tnapi->prodmbox);
13106 
13107 	udelay(10);
13108 
13109 	/* 350 usec to allow enough time on some 10/100 Mbps devices.  */
13110 	for (i = 0; i < 35; i++) {
13111 		tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
13112 		       coal_now);
13113 
13114 		udelay(10);
13115 
13116 		tx_idx = tnapi->hw_status->idx[0].tx_consumer;
13117 		rx_idx = rnapi->hw_status->idx[0].rx_producer;
13118 		if ((tx_idx == tnapi->tx_prod) &&
13119 		    (rx_idx == (rx_start_idx + num_pkts)))
13120 			break;
13121 	}
13122 
13123 	tg3_tx_skb_unmap(tnapi, tnapi->tx_prod - 1, -1);
13124 	dev_kfree_skb(skb);
13125 
13126 	if (tx_idx != tnapi->tx_prod)
13127 		goto out;
13128 
13129 	if (rx_idx != rx_start_idx + num_pkts)
13130 		goto out;
13131 
13132 	val = data_off;
13133 	while (rx_idx != rx_start_idx) {
13134 		desc = &rnapi->rx_rcb[rx_start_idx++];
13135 		desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
13136 		opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
13137 
13138 		if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
13139 		    (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
13140 			goto out;
13141 
13142 		rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT)
13143 			 - ETH_FCS_LEN;
13144 
13145 		if (!tso_loopback) {
13146 			if (rx_len != tx_len)
13147 				goto out;
13148 
13149 			if (pktsz <= TG3_RX_STD_DMA_SZ - ETH_FCS_LEN) {
13150 				if (opaque_key != RXD_OPAQUE_RING_STD)
13151 					goto out;
13152 			} else {
13153 				if (opaque_key != RXD_OPAQUE_RING_JUMBO)
13154 					goto out;
13155 			}
13156 		} else if ((desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
13157 			   (desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
13158 			    >> RXD_TCPCSUM_SHIFT != 0xffff) {
13159 			goto out;
13160 		}
13161 
13162 		if (opaque_key == RXD_OPAQUE_RING_STD) {
13163 			rx_data = tpr->rx_std_buffers[desc_idx].data;
13164 			map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx],
13165 					     mapping);
13166 		} else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
13167 			rx_data = tpr->rx_jmb_buffers[desc_idx].data;
13168 			map = dma_unmap_addr(&tpr->rx_jmb_buffers[desc_idx],
13169 					     mapping);
13170 		} else
13171 			goto out;
13172 
13173 		pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len,
13174 					    PCI_DMA_FROMDEVICE);
13175 
13176 		rx_data += TG3_RX_OFFSET(tp);
13177 		for (i = data_off; i < rx_len; i++, val++) {
13178 			if (*(rx_data + i) != (u8) (val & 0xff))
13179 				goto out;
13180 		}
13181 	}
13182 
13183 	err = 0;
13184 
13185 	/* tg3_free_rings will unmap and free the rx_data */
13186 out:
13187 	return err;
13188 }
13189 
13190 #define TG3_STD_LOOPBACK_FAILED		1
13191 #define TG3_JMB_LOOPBACK_FAILED		2
13192 #define TG3_TSO_LOOPBACK_FAILED		4
13193 #define TG3_LOOPBACK_FAILED \
13194 	(TG3_STD_LOOPBACK_FAILED | \
13195 	 TG3_JMB_LOOPBACK_FAILED | \
13196 	 TG3_TSO_LOOPBACK_FAILED)
13197 
13198 static int tg3_test_loopback(struct tg3 *tp, u64 *data, bool do_extlpbk)
13199 {
13200 	int err = -EIO;
13201 	u32 eee_cap;
13202 	u32 jmb_pkt_sz = 9000;
13203 
13204 	if (tp->dma_limit)
13205 		jmb_pkt_sz = tp->dma_limit - ETH_HLEN;
13206 
13207 	eee_cap = tp->phy_flags & TG3_PHYFLG_EEE_CAP;
13208 	tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
13209 
13210 	if (!netif_running(tp->dev)) {
13211 		data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13212 		data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13213 		if (do_extlpbk)
13214 			data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13215 		goto done;
13216 	}
13217 
13218 	err = tg3_reset_hw(tp, true);
13219 	if (err) {
13220 		data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13221 		data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13222 		if (do_extlpbk)
13223 			data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13224 		goto done;
13225 	}
13226 
13227 	if (tg3_flag(tp, ENABLE_RSS)) {
13228 		int i;
13229 
13230 		/* Reroute all rx packets to the 1st queue */
13231 		for (i = MAC_RSS_INDIR_TBL_0;
13232 		     i < MAC_RSS_INDIR_TBL_0 + TG3_RSS_INDIR_TBL_SIZE; i += 4)
13233 			tw32(i, 0x0);
13234 	}
13235 
13236 	/* HW errata - mac loopback fails in some cases on 5780.
13237 	 * Normal traffic and PHY loopback are not affected by
13238 	 * errata.  Also, the MAC loopback test is deprecated for
13239 	 * all newer ASIC revisions.
13240 	 */
13241 	if (tg3_asic_rev(tp) != ASIC_REV_5780 &&
13242 	    !tg3_flag(tp, CPMU_PRESENT)) {
13243 		tg3_mac_loopback(tp, true);
13244 
13245 		if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13246 			data[TG3_MAC_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED;
13247 
13248 		if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13249 		    tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13250 			data[TG3_MAC_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED;
13251 
13252 		tg3_mac_loopback(tp, false);
13253 	}
13254 
13255 	if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
13256 	    !tg3_flag(tp, USE_PHYLIB)) {
13257 		int i;
13258 
13259 		tg3_phy_lpbk_set(tp, 0, false);
13260 
13261 		/* Wait for link */
13262 		for (i = 0; i < 100; i++) {
13263 			if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
13264 				break;
13265 			mdelay(1);
13266 		}
13267 
13268 		if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13269 			data[TG3_PHY_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED;
13270 		if (tg3_flag(tp, TSO_CAPABLE) &&
13271 		    tg3_run_loopback(tp, ETH_FRAME_LEN, true))
13272 			data[TG3_PHY_LOOPB_TEST] |= TG3_TSO_LOOPBACK_FAILED;
13273 		if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13274 		    tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13275 			data[TG3_PHY_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED;
13276 
13277 		if (do_extlpbk) {
13278 			tg3_phy_lpbk_set(tp, 0, true);
13279 
13280 			/* All link indications report up, but the hardware
13281 			 * isn't really ready for about 20 msec.  Double it
13282 			 * to be sure.
13283 			 */
13284 			mdelay(40);
13285 
13286 			if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13287 				data[TG3_EXT_LOOPB_TEST] |=
13288 							TG3_STD_LOOPBACK_FAILED;
13289 			if (tg3_flag(tp, TSO_CAPABLE) &&
13290 			    tg3_run_loopback(tp, ETH_FRAME_LEN, true))
13291 				data[TG3_EXT_LOOPB_TEST] |=
13292 							TG3_TSO_LOOPBACK_FAILED;
13293 			if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13294 			    tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13295 				data[TG3_EXT_LOOPB_TEST] |=
13296 							TG3_JMB_LOOPBACK_FAILED;
13297 		}
13298 
13299 		/* Re-enable gphy autopowerdown. */
13300 		if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
13301 			tg3_phy_toggle_apd(tp, true);
13302 	}
13303 
13304 	err = (data[TG3_MAC_LOOPB_TEST] | data[TG3_PHY_LOOPB_TEST] |
13305 	       data[TG3_EXT_LOOPB_TEST]) ? -EIO : 0;
13306 
13307 done:
13308 	tp->phy_flags |= eee_cap;
13309 
13310 	return err;
13311 }
13312 
13313 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
13314 			  u64 *data)
13315 {
13316 	struct tg3 *tp = netdev_priv(dev);
13317 	bool doextlpbk = etest->flags & ETH_TEST_FL_EXTERNAL_LB;
13318 
13319 	if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
13320 	    tg3_power_up(tp)) {
13321 		etest->flags |= ETH_TEST_FL_FAILED;
13322 		memset(data, 1, sizeof(u64) * TG3_NUM_TEST);
13323 		return;
13324 	}
13325 
13326 	memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
13327 
13328 	if (tg3_test_nvram(tp) != 0) {
13329 		etest->flags |= ETH_TEST_FL_FAILED;
13330 		data[TG3_NVRAM_TEST] = 1;
13331 	}
13332 	if (!doextlpbk && tg3_test_link(tp)) {
13333 		etest->flags |= ETH_TEST_FL_FAILED;
13334 		data[TG3_LINK_TEST] = 1;
13335 	}
13336 	if (etest->flags & ETH_TEST_FL_OFFLINE) {
13337 		int err, err2 = 0, irq_sync = 0;
13338 
13339 		if (netif_running(dev)) {
13340 			tg3_phy_stop(tp);
13341 			tg3_netif_stop(tp);
13342 			irq_sync = 1;
13343 		}
13344 
13345 		tg3_full_lock(tp, irq_sync);
13346 		tg3_halt(tp, RESET_KIND_SUSPEND, 1);
13347 		err = tg3_nvram_lock(tp);
13348 		tg3_halt_cpu(tp, RX_CPU_BASE);
13349 		if (!tg3_flag(tp, 5705_PLUS))
13350 			tg3_halt_cpu(tp, TX_CPU_BASE);
13351 		if (!err)
13352 			tg3_nvram_unlock(tp);
13353 
13354 		if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
13355 			tg3_phy_reset(tp);
13356 
13357 		if (tg3_test_registers(tp) != 0) {
13358 			etest->flags |= ETH_TEST_FL_FAILED;
13359 			data[TG3_REGISTER_TEST] = 1;
13360 		}
13361 
13362 		if (tg3_test_memory(tp) != 0) {
13363 			etest->flags |= ETH_TEST_FL_FAILED;
13364 			data[TG3_MEMORY_TEST] = 1;
13365 		}
13366 
13367 		if (doextlpbk)
13368 			etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE;
13369 
13370 		if (tg3_test_loopback(tp, data, doextlpbk))
13371 			etest->flags |= ETH_TEST_FL_FAILED;
13372 
13373 		tg3_full_unlock(tp);
13374 
13375 		if (tg3_test_interrupt(tp) != 0) {
13376 			etest->flags |= ETH_TEST_FL_FAILED;
13377 			data[TG3_INTERRUPT_TEST] = 1;
13378 		}
13379 
13380 		tg3_full_lock(tp, 0);
13381 
13382 		tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
13383 		if (netif_running(dev)) {
13384 			tg3_flag_set(tp, INIT_COMPLETE);
13385 			err2 = tg3_restart_hw(tp, true);
13386 			if (!err2)
13387 				tg3_netif_start(tp);
13388 		}
13389 
13390 		tg3_full_unlock(tp);
13391 
13392 		if (irq_sync && !err2)
13393 			tg3_phy_start(tp);
13394 	}
13395 	if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
13396 		tg3_power_down(tp);
13397 
13398 }
13399 
13400 static int tg3_hwtstamp_ioctl(struct net_device *dev,
13401 			      struct ifreq *ifr, int cmd)
13402 {
13403 	struct tg3 *tp = netdev_priv(dev);
13404 	struct hwtstamp_config stmpconf;
13405 
13406 	if (!tg3_flag(tp, PTP_CAPABLE))
13407 		return -EINVAL;
13408 
13409 	if (copy_from_user(&stmpconf, ifr->ifr_data, sizeof(stmpconf)))
13410 		return -EFAULT;
13411 
13412 	if (stmpconf.flags)
13413 		return -EINVAL;
13414 
13415 	switch (stmpconf.tx_type) {
13416 	case HWTSTAMP_TX_ON:
13417 		tg3_flag_set(tp, TX_TSTAMP_EN);
13418 		break;
13419 	case HWTSTAMP_TX_OFF:
13420 		tg3_flag_clear(tp, TX_TSTAMP_EN);
13421 		break;
13422 	default:
13423 		return -ERANGE;
13424 	}
13425 
13426 	switch (stmpconf.rx_filter) {
13427 	case HWTSTAMP_FILTER_NONE:
13428 		tp->rxptpctl = 0;
13429 		break;
13430 	case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
13431 		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13432 			       TG3_RX_PTP_CTL_ALL_V1_EVENTS;
13433 		break;
13434 	case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
13435 		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13436 			       TG3_RX_PTP_CTL_SYNC_EVNT;
13437 		break;
13438 	case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
13439 		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13440 			       TG3_RX_PTP_CTL_DELAY_REQ;
13441 		break;
13442 	case HWTSTAMP_FILTER_PTP_V2_EVENT:
13443 		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13444 			       TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13445 		break;
13446 	case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
13447 		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13448 			       TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13449 		break;
13450 	case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
13451 		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13452 			       TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13453 		break;
13454 	case HWTSTAMP_FILTER_PTP_V2_SYNC:
13455 		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13456 			       TG3_RX_PTP_CTL_SYNC_EVNT;
13457 		break;
13458 	case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
13459 		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13460 			       TG3_RX_PTP_CTL_SYNC_EVNT;
13461 		break;
13462 	case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
13463 		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13464 			       TG3_RX_PTP_CTL_SYNC_EVNT;
13465 		break;
13466 	case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
13467 		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13468 			       TG3_RX_PTP_CTL_DELAY_REQ;
13469 		break;
13470 	case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
13471 		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13472 			       TG3_RX_PTP_CTL_DELAY_REQ;
13473 		break;
13474 	case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
13475 		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13476 			       TG3_RX_PTP_CTL_DELAY_REQ;
13477 		break;
13478 	default:
13479 		return -ERANGE;
13480 	}
13481 
13482 	if (netif_running(dev) && tp->rxptpctl)
13483 		tw32(TG3_RX_PTP_CTL,
13484 		     tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK);
13485 
13486 	return copy_to_user(ifr->ifr_data, &stmpconf, sizeof(stmpconf)) ?
13487 		-EFAULT : 0;
13488 }
13489 
13490 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
13491 {
13492 	struct mii_ioctl_data *data = if_mii(ifr);
13493 	struct tg3 *tp = netdev_priv(dev);
13494 	int err;
13495 
13496 	if (tg3_flag(tp, USE_PHYLIB)) {
13497 		struct phy_device *phydev;
13498 		if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
13499 			return -EAGAIN;
13500 		phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
13501 		return phy_mii_ioctl(phydev, ifr, cmd);
13502 	}
13503 
13504 	switch (cmd) {
13505 	case SIOCGMIIPHY:
13506 		data->phy_id = tp->phy_addr;
13507 
13508 		/* fallthru */
13509 	case SIOCGMIIREG: {
13510 		u32 mii_regval;
13511 
13512 		if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
13513 			break;			/* We have no PHY */
13514 
13515 		if (!netif_running(dev))
13516 			return -EAGAIN;
13517 
13518 		spin_lock_bh(&tp->lock);
13519 		err = __tg3_readphy(tp, data->phy_id & 0x1f,
13520 				    data->reg_num & 0x1f, &mii_regval);
13521 		spin_unlock_bh(&tp->lock);
13522 
13523 		data->val_out = mii_regval;
13524 
13525 		return err;
13526 	}
13527 
13528 	case SIOCSMIIREG:
13529 		if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
13530 			break;			/* We have no PHY */
13531 
13532 		if (!netif_running(dev))
13533 			return -EAGAIN;
13534 
13535 		spin_lock_bh(&tp->lock);
13536 		err = __tg3_writephy(tp, data->phy_id & 0x1f,
13537 				     data->reg_num & 0x1f, data->val_in);
13538 		spin_unlock_bh(&tp->lock);
13539 
13540 		return err;
13541 
13542 	case SIOCSHWTSTAMP:
13543 		return tg3_hwtstamp_ioctl(dev, ifr, cmd);
13544 
13545 	default:
13546 		/* do nothing */
13547 		break;
13548 	}
13549 	return -EOPNOTSUPP;
13550 }
13551 
13552 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
13553 {
13554 	struct tg3 *tp = netdev_priv(dev);
13555 
13556 	memcpy(ec, &tp->coal, sizeof(*ec));
13557 	return 0;
13558 }
13559 
13560 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
13561 {
13562 	struct tg3 *tp = netdev_priv(dev);
13563 	u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
13564 	u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
13565 
13566 	if (!tg3_flag(tp, 5705_PLUS)) {
13567 		max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
13568 		max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
13569 		max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
13570 		min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
13571 	}
13572 
13573 	if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
13574 	    (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
13575 	    (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
13576 	    (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
13577 	    (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
13578 	    (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
13579 	    (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
13580 	    (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
13581 	    (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
13582 	    (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
13583 		return -EINVAL;
13584 
13585 	/* No rx interrupts will be generated if both are zero */
13586 	if ((ec->rx_coalesce_usecs == 0) &&
13587 	    (ec->rx_max_coalesced_frames == 0))
13588 		return -EINVAL;
13589 
13590 	/* No tx interrupts will be generated if both are zero */
13591 	if ((ec->tx_coalesce_usecs == 0) &&
13592 	    (ec->tx_max_coalesced_frames == 0))
13593 		return -EINVAL;
13594 
13595 	/* Only copy relevant parameters, ignore all others. */
13596 	tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
13597 	tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
13598 	tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
13599 	tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
13600 	tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
13601 	tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
13602 	tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
13603 	tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
13604 	tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
13605 
13606 	if (netif_running(dev)) {
13607 		tg3_full_lock(tp, 0);
13608 		__tg3_set_coalesce(tp, &tp->coal);
13609 		tg3_full_unlock(tp);
13610 	}
13611 	return 0;
13612 }
13613 
13614 static const struct ethtool_ops tg3_ethtool_ops = {
13615 	.get_settings		= tg3_get_settings,
13616 	.set_settings		= tg3_set_settings,
13617 	.get_drvinfo		= tg3_get_drvinfo,
13618 	.get_regs_len		= tg3_get_regs_len,
13619 	.get_regs		= tg3_get_regs,
13620 	.get_wol		= tg3_get_wol,
13621 	.set_wol		= tg3_set_wol,
13622 	.get_msglevel		= tg3_get_msglevel,
13623 	.set_msglevel		= tg3_set_msglevel,
13624 	.nway_reset		= tg3_nway_reset,
13625 	.get_link		= ethtool_op_get_link,
13626 	.get_eeprom_len		= tg3_get_eeprom_len,
13627 	.get_eeprom		= tg3_get_eeprom,
13628 	.set_eeprom		= tg3_set_eeprom,
13629 	.get_ringparam		= tg3_get_ringparam,
13630 	.set_ringparam		= tg3_set_ringparam,
13631 	.get_pauseparam		= tg3_get_pauseparam,
13632 	.set_pauseparam		= tg3_set_pauseparam,
13633 	.self_test		= tg3_self_test,
13634 	.get_strings		= tg3_get_strings,
13635 	.set_phys_id		= tg3_set_phys_id,
13636 	.get_ethtool_stats	= tg3_get_ethtool_stats,
13637 	.get_coalesce		= tg3_get_coalesce,
13638 	.set_coalesce		= tg3_set_coalesce,
13639 	.get_sset_count		= tg3_get_sset_count,
13640 	.get_rxnfc		= tg3_get_rxnfc,
13641 	.get_rxfh_indir_size    = tg3_get_rxfh_indir_size,
13642 	.get_rxfh_indir		= tg3_get_rxfh_indir,
13643 	.set_rxfh_indir		= tg3_set_rxfh_indir,
13644 	.get_channels		= tg3_get_channels,
13645 	.set_channels		= tg3_set_channels,
13646 	.get_ts_info		= tg3_get_ts_info,
13647 };
13648 
13649 static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *dev,
13650 						struct rtnl_link_stats64 *stats)
13651 {
13652 	struct tg3 *tp = netdev_priv(dev);
13653 
13654 	spin_lock_bh(&tp->lock);
13655 	if (!tp->hw_stats) {
13656 		spin_unlock_bh(&tp->lock);
13657 		return &tp->net_stats_prev;
13658 	}
13659 
13660 	tg3_get_nstats(tp, stats);
13661 	spin_unlock_bh(&tp->lock);
13662 
13663 	return stats;
13664 }
13665 
13666 static void tg3_set_rx_mode(struct net_device *dev)
13667 {
13668 	struct tg3 *tp = netdev_priv(dev);
13669 
13670 	if (!netif_running(dev))
13671 		return;
13672 
13673 	tg3_full_lock(tp, 0);
13674 	__tg3_set_rx_mode(dev);
13675 	tg3_full_unlock(tp);
13676 }
13677 
13678 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
13679 			       int new_mtu)
13680 {
13681 	dev->mtu = new_mtu;
13682 
13683 	if (new_mtu > ETH_DATA_LEN) {
13684 		if (tg3_flag(tp, 5780_CLASS)) {
13685 			netdev_update_features(dev);
13686 			tg3_flag_clear(tp, TSO_CAPABLE);
13687 		} else {
13688 			tg3_flag_set(tp, JUMBO_RING_ENABLE);
13689 		}
13690 	} else {
13691 		if (tg3_flag(tp, 5780_CLASS)) {
13692 			tg3_flag_set(tp, TSO_CAPABLE);
13693 			netdev_update_features(dev);
13694 		}
13695 		tg3_flag_clear(tp, JUMBO_RING_ENABLE);
13696 	}
13697 }
13698 
13699 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
13700 {
13701 	struct tg3 *tp = netdev_priv(dev);
13702 	int err;
13703 	bool reset_phy = false;
13704 
13705 	if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
13706 		return -EINVAL;
13707 
13708 	if (!netif_running(dev)) {
13709 		/* We'll just catch it later when the
13710 		 * device is up'd.
13711 		 */
13712 		tg3_set_mtu(dev, tp, new_mtu);
13713 		return 0;
13714 	}
13715 
13716 	tg3_phy_stop(tp);
13717 
13718 	tg3_netif_stop(tp);
13719 
13720 	tg3_full_lock(tp, 1);
13721 
13722 	tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
13723 
13724 	tg3_set_mtu(dev, tp, new_mtu);
13725 
13726 	/* Reset PHY, otherwise the read DMA engine will be in a mode that
13727 	 * breaks all requests to 256 bytes.
13728 	 */
13729 	if (tg3_asic_rev(tp) == ASIC_REV_57766)
13730 		reset_phy = true;
13731 
13732 	err = tg3_restart_hw(tp, reset_phy);
13733 
13734 	if (!err)
13735 		tg3_netif_start(tp);
13736 
13737 	tg3_full_unlock(tp);
13738 
13739 	if (!err)
13740 		tg3_phy_start(tp);
13741 
13742 	return err;
13743 }
13744 
13745 static const struct net_device_ops tg3_netdev_ops = {
13746 	.ndo_open		= tg3_open,
13747 	.ndo_stop		= tg3_close,
13748 	.ndo_start_xmit		= tg3_start_xmit,
13749 	.ndo_get_stats64	= tg3_get_stats64,
13750 	.ndo_validate_addr	= eth_validate_addr,
13751 	.ndo_set_rx_mode	= tg3_set_rx_mode,
13752 	.ndo_set_mac_address	= tg3_set_mac_addr,
13753 	.ndo_do_ioctl		= tg3_ioctl,
13754 	.ndo_tx_timeout		= tg3_tx_timeout,
13755 	.ndo_change_mtu		= tg3_change_mtu,
13756 	.ndo_fix_features	= tg3_fix_features,
13757 	.ndo_set_features	= tg3_set_features,
13758 #ifdef CONFIG_NET_POLL_CONTROLLER
13759 	.ndo_poll_controller	= tg3_poll_controller,
13760 #endif
13761 };
13762 
13763 static void tg3_get_eeprom_size(struct tg3 *tp)
13764 {
13765 	u32 cursize, val, magic;
13766 
13767 	tp->nvram_size = EEPROM_CHIP_SIZE;
13768 
13769 	if (tg3_nvram_read(tp, 0, &magic) != 0)
13770 		return;
13771 
13772 	if ((magic != TG3_EEPROM_MAGIC) &&
13773 	    ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
13774 	    ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
13775 		return;
13776 
13777 	/*
13778 	 * Size the chip by reading offsets at increasing powers of two.
13779 	 * When we encounter our validation signature, we know the addressing
13780 	 * has wrapped around, and thus have our chip size.
13781 	 */
13782 	cursize = 0x10;
13783 
13784 	while (cursize < tp->nvram_size) {
13785 		if (tg3_nvram_read(tp, cursize, &val) != 0)
13786 			return;
13787 
13788 		if (val == magic)
13789 			break;
13790 
13791 		cursize <<= 1;
13792 	}
13793 
13794 	tp->nvram_size = cursize;
13795 }
13796 
13797 static void tg3_get_nvram_size(struct tg3 *tp)
13798 {
13799 	u32 val;
13800 
13801 	if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &val) != 0)
13802 		return;
13803 
13804 	/* Selfboot format */
13805 	if (val != TG3_EEPROM_MAGIC) {
13806 		tg3_get_eeprom_size(tp);
13807 		return;
13808 	}
13809 
13810 	if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
13811 		if (val != 0) {
13812 			/* This is confusing.  We want to operate on the
13813 			 * 16-bit value at offset 0xf2.  The tg3_nvram_read()
13814 			 * call will read from NVRAM and byteswap the data
13815 			 * according to the byteswapping settings for all
13816 			 * other register accesses.  This ensures the data we
13817 			 * want will always reside in the lower 16-bits.
13818 			 * However, the data in NVRAM is in LE format, which
13819 			 * means the data from the NVRAM read will always be
13820 			 * opposite the endianness of the CPU.  The 16-bit
13821 			 * byteswap then brings the data to CPU endianness.
13822 			 */
13823 			tp->nvram_size = swab16((u16)(val & 0x0000ffff)) * 1024;
13824 			return;
13825 		}
13826 	}
13827 	tp->nvram_size = TG3_NVRAM_SIZE_512KB;
13828 }
13829 
13830 static void tg3_get_nvram_info(struct tg3 *tp)
13831 {
13832 	u32 nvcfg1;
13833 
13834 	nvcfg1 = tr32(NVRAM_CFG1);
13835 	if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
13836 		tg3_flag_set(tp, FLASH);
13837 	} else {
13838 		nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
13839 		tw32(NVRAM_CFG1, nvcfg1);
13840 	}
13841 
13842 	if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
13843 	    tg3_flag(tp, 5780_CLASS)) {
13844 		switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
13845 		case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
13846 			tp->nvram_jedecnum = JEDEC_ATMEL;
13847 			tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
13848 			tg3_flag_set(tp, NVRAM_BUFFERED);
13849 			break;
13850 		case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
13851 			tp->nvram_jedecnum = JEDEC_ATMEL;
13852 			tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
13853 			break;
13854 		case FLASH_VENDOR_ATMEL_EEPROM:
13855 			tp->nvram_jedecnum = JEDEC_ATMEL;
13856 			tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13857 			tg3_flag_set(tp, NVRAM_BUFFERED);
13858 			break;
13859 		case FLASH_VENDOR_ST:
13860 			tp->nvram_jedecnum = JEDEC_ST;
13861 			tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
13862 			tg3_flag_set(tp, NVRAM_BUFFERED);
13863 			break;
13864 		case FLASH_VENDOR_SAIFUN:
13865 			tp->nvram_jedecnum = JEDEC_SAIFUN;
13866 			tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
13867 			break;
13868 		case FLASH_VENDOR_SST_SMALL:
13869 		case FLASH_VENDOR_SST_LARGE:
13870 			tp->nvram_jedecnum = JEDEC_SST;
13871 			tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
13872 			break;
13873 		}
13874 	} else {
13875 		tp->nvram_jedecnum = JEDEC_ATMEL;
13876 		tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
13877 		tg3_flag_set(tp, NVRAM_BUFFERED);
13878 	}
13879 }
13880 
13881 static void tg3_nvram_get_pagesize(struct tg3 *tp, u32 nvmcfg1)
13882 {
13883 	switch (nvmcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
13884 	case FLASH_5752PAGE_SIZE_256:
13885 		tp->nvram_pagesize = 256;
13886 		break;
13887 	case FLASH_5752PAGE_SIZE_512:
13888 		tp->nvram_pagesize = 512;
13889 		break;
13890 	case FLASH_5752PAGE_SIZE_1K:
13891 		tp->nvram_pagesize = 1024;
13892 		break;
13893 	case FLASH_5752PAGE_SIZE_2K:
13894 		tp->nvram_pagesize = 2048;
13895 		break;
13896 	case FLASH_5752PAGE_SIZE_4K:
13897 		tp->nvram_pagesize = 4096;
13898 		break;
13899 	case FLASH_5752PAGE_SIZE_264:
13900 		tp->nvram_pagesize = 264;
13901 		break;
13902 	case FLASH_5752PAGE_SIZE_528:
13903 		tp->nvram_pagesize = 528;
13904 		break;
13905 	}
13906 }
13907 
13908 static void tg3_get_5752_nvram_info(struct tg3 *tp)
13909 {
13910 	u32 nvcfg1;
13911 
13912 	nvcfg1 = tr32(NVRAM_CFG1);
13913 
13914 	/* NVRAM protection for TPM */
13915 	if (nvcfg1 & (1 << 27))
13916 		tg3_flag_set(tp, PROTECTED_NVRAM);
13917 
13918 	switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13919 	case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
13920 	case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
13921 		tp->nvram_jedecnum = JEDEC_ATMEL;
13922 		tg3_flag_set(tp, NVRAM_BUFFERED);
13923 		break;
13924 	case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
13925 		tp->nvram_jedecnum = JEDEC_ATMEL;
13926 		tg3_flag_set(tp, NVRAM_BUFFERED);
13927 		tg3_flag_set(tp, FLASH);
13928 		break;
13929 	case FLASH_5752VENDOR_ST_M45PE10:
13930 	case FLASH_5752VENDOR_ST_M45PE20:
13931 	case FLASH_5752VENDOR_ST_M45PE40:
13932 		tp->nvram_jedecnum = JEDEC_ST;
13933 		tg3_flag_set(tp, NVRAM_BUFFERED);
13934 		tg3_flag_set(tp, FLASH);
13935 		break;
13936 	}
13937 
13938 	if (tg3_flag(tp, FLASH)) {
13939 		tg3_nvram_get_pagesize(tp, nvcfg1);
13940 	} else {
13941 		/* For eeprom, set pagesize to maximum eeprom size */
13942 		tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13943 
13944 		nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
13945 		tw32(NVRAM_CFG1, nvcfg1);
13946 	}
13947 }
13948 
13949 static void tg3_get_5755_nvram_info(struct tg3 *tp)
13950 {
13951 	u32 nvcfg1, protect = 0;
13952 
13953 	nvcfg1 = tr32(NVRAM_CFG1);
13954 
13955 	/* NVRAM protection for TPM */
13956 	if (nvcfg1 & (1 << 27)) {
13957 		tg3_flag_set(tp, PROTECTED_NVRAM);
13958 		protect = 1;
13959 	}
13960 
13961 	nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
13962 	switch (nvcfg1) {
13963 	case FLASH_5755VENDOR_ATMEL_FLASH_1:
13964 	case FLASH_5755VENDOR_ATMEL_FLASH_2:
13965 	case FLASH_5755VENDOR_ATMEL_FLASH_3:
13966 	case FLASH_5755VENDOR_ATMEL_FLASH_5:
13967 		tp->nvram_jedecnum = JEDEC_ATMEL;
13968 		tg3_flag_set(tp, NVRAM_BUFFERED);
13969 		tg3_flag_set(tp, FLASH);
13970 		tp->nvram_pagesize = 264;
13971 		if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
13972 		    nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
13973 			tp->nvram_size = (protect ? 0x3e200 :
13974 					  TG3_NVRAM_SIZE_512KB);
13975 		else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
13976 			tp->nvram_size = (protect ? 0x1f200 :
13977 					  TG3_NVRAM_SIZE_256KB);
13978 		else
13979 			tp->nvram_size = (protect ? 0x1f200 :
13980 					  TG3_NVRAM_SIZE_128KB);
13981 		break;
13982 	case FLASH_5752VENDOR_ST_M45PE10:
13983 	case FLASH_5752VENDOR_ST_M45PE20:
13984 	case FLASH_5752VENDOR_ST_M45PE40:
13985 		tp->nvram_jedecnum = JEDEC_ST;
13986 		tg3_flag_set(tp, NVRAM_BUFFERED);
13987 		tg3_flag_set(tp, FLASH);
13988 		tp->nvram_pagesize = 256;
13989 		if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
13990 			tp->nvram_size = (protect ?
13991 					  TG3_NVRAM_SIZE_64KB :
13992 					  TG3_NVRAM_SIZE_128KB);
13993 		else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
13994 			tp->nvram_size = (protect ?
13995 					  TG3_NVRAM_SIZE_64KB :
13996 					  TG3_NVRAM_SIZE_256KB);
13997 		else
13998 			tp->nvram_size = (protect ?
13999 					  TG3_NVRAM_SIZE_128KB :
14000 					  TG3_NVRAM_SIZE_512KB);
14001 		break;
14002 	}
14003 }
14004 
14005 static void tg3_get_5787_nvram_info(struct tg3 *tp)
14006 {
14007 	u32 nvcfg1;
14008 
14009 	nvcfg1 = tr32(NVRAM_CFG1);
14010 
14011 	switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14012 	case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
14013 	case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
14014 	case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
14015 	case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
14016 		tp->nvram_jedecnum = JEDEC_ATMEL;
14017 		tg3_flag_set(tp, NVRAM_BUFFERED);
14018 		tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14019 
14020 		nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14021 		tw32(NVRAM_CFG1, nvcfg1);
14022 		break;
14023 	case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14024 	case FLASH_5755VENDOR_ATMEL_FLASH_1:
14025 	case FLASH_5755VENDOR_ATMEL_FLASH_2:
14026 	case FLASH_5755VENDOR_ATMEL_FLASH_3:
14027 		tp->nvram_jedecnum = JEDEC_ATMEL;
14028 		tg3_flag_set(tp, NVRAM_BUFFERED);
14029 		tg3_flag_set(tp, FLASH);
14030 		tp->nvram_pagesize = 264;
14031 		break;
14032 	case FLASH_5752VENDOR_ST_M45PE10:
14033 	case FLASH_5752VENDOR_ST_M45PE20:
14034 	case FLASH_5752VENDOR_ST_M45PE40:
14035 		tp->nvram_jedecnum = JEDEC_ST;
14036 		tg3_flag_set(tp, NVRAM_BUFFERED);
14037 		tg3_flag_set(tp, FLASH);
14038 		tp->nvram_pagesize = 256;
14039 		break;
14040 	}
14041 }
14042 
14043 static void tg3_get_5761_nvram_info(struct tg3 *tp)
14044 {
14045 	u32 nvcfg1, protect = 0;
14046 
14047 	nvcfg1 = tr32(NVRAM_CFG1);
14048 
14049 	/* NVRAM protection for TPM */
14050 	if (nvcfg1 & (1 << 27)) {
14051 		tg3_flag_set(tp, PROTECTED_NVRAM);
14052 		protect = 1;
14053 	}
14054 
14055 	nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
14056 	switch (nvcfg1) {
14057 	case FLASH_5761VENDOR_ATMEL_ADB021D:
14058 	case FLASH_5761VENDOR_ATMEL_ADB041D:
14059 	case FLASH_5761VENDOR_ATMEL_ADB081D:
14060 	case FLASH_5761VENDOR_ATMEL_ADB161D:
14061 	case FLASH_5761VENDOR_ATMEL_MDB021D:
14062 	case FLASH_5761VENDOR_ATMEL_MDB041D:
14063 	case FLASH_5761VENDOR_ATMEL_MDB081D:
14064 	case FLASH_5761VENDOR_ATMEL_MDB161D:
14065 		tp->nvram_jedecnum = JEDEC_ATMEL;
14066 		tg3_flag_set(tp, NVRAM_BUFFERED);
14067 		tg3_flag_set(tp, FLASH);
14068 		tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14069 		tp->nvram_pagesize = 256;
14070 		break;
14071 	case FLASH_5761VENDOR_ST_A_M45PE20:
14072 	case FLASH_5761VENDOR_ST_A_M45PE40:
14073 	case FLASH_5761VENDOR_ST_A_M45PE80:
14074 	case FLASH_5761VENDOR_ST_A_M45PE16:
14075 	case FLASH_5761VENDOR_ST_M_M45PE20:
14076 	case FLASH_5761VENDOR_ST_M_M45PE40:
14077 	case FLASH_5761VENDOR_ST_M_M45PE80:
14078 	case FLASH_5761VENDOR_ST_M_M45PE16:
14079 		tp->nvram_jedecnum = JEDEC_ST;
14080 		tg3_flag_set(tp, NVRAM_BUFFERED);
14081 		tg3_flag_set(tp, FLASH);
14082 		tp->nvram_pagesize = 256;
14083 		break;
14084 	}
14085 
14086 	if (protect) {
14087 		tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
14088 	} else {
14089 		switch (nvcfg1) {
14090 		case FLASH_5761VENDOR_ATMEL_ADB161D:
14091 		case FLASH_5761VENDOR_ATMEL_MDB161D:
14092 		case FLASH_5761VENDOR_ST_A_M45PE16:
14093 		case FLASH_5761VENDOR_ST_M_M45PE16:
14094 			tp->nvram_size = TG3_NVRAM_SIZE_2MB;
14095 			break;
14096 		case FLASH_5761VENDOR_ATMEL_ADB081D:
14097 		case FLASH_5761VENDOR_ATMEL_MDB081D:
14098 		case FLASH_5761VENDOR_ST_A_M45PE80:
14099 		case FLASH_5761VENDOR_ST_M_M45PE80:
14100 			tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14101 			break;
14102 		case FLASH_5761VENDOR_ATMEL_ADB041D:
14103 		case FLASH_5761VENDOR_ATMEL_MDB041D:
14104 		case FLASH_5761VENDOR_ST_A_M45PE40:
14105 		case FLASH_5761VENDOR_ST_M_M45PE40:
14106 			tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14107 			break;
14108 		case FLASH_5761VENDOR_ATMEL_ADB021D:
14109 		case FLASH_5761VENDOR_ATMEL_MDB021D:
14110 		case FLASH_5761VENDOR_ST_A_M45PE20:
14111 		case FLASH_5761VENDOR_ST_M_M45PE20:
14112 			tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14113 			break;
14114 		}
14115 	}
14116 }
14117 
14118 static void tg3_get_5906_nvram_info(struct tg3 *tp)
14119 {
14120 	tp->nvram_jedecnum = JEDEC_ATMEL;
14121 	tg3_flag_set(tp, NVRAM_BUFFERED);
14122 	tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14123 }
14124 
14125 static void tg3_get_57780_nvram_info(struct tg3 *tp)
14126 {
14127 	u32 nvcfg1;
14128 
14129 	nvcfg1 = tr32(NVRAM_CFG1);
14130 
14131 	switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14132 	case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
14133 	case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
14134 		tp->nvram_jedecnum = JEDEC_ATMEL;
14135 		tg3_flag_set(tp, NVRAM_BUFFERED);
14136 		tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14137 
14138 		nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14139 		tw32(NVRAM_CFG1, nvcfg1);
14140 		return;
14141 	case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14142 	case FLASH_57780VENDOR_ATMEL_AT45DB011D:
14143 	case FLASH_57780VENDOR_ATMEL_AT45DB011B:
14144 	case FLASH_57780VENDOR_ATMEL_AT45DB021D:
14145 	case FLASH_57780VENDOR_ATMEL_AT45DB021B:
14146 	case FLASH_57780VENDOR_ATMEL_AT45DB041D:
14147 	case FLASH_57780VENDOR_ATMEL_AT45DB041B:
14148 		tp->nvram_jedecnum = JEDEC_ATMEL;
14149 		tg3_flag_set(tp, NVRAM_BUFFERED);
14150 		tg3_flag_set(tp, FLASH);
14151 
14152 		switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14153 		case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14154 		case FLASH_57780VENDOR_ATMEL_AT45DB011D:
14155 		case FLASH_57780VENDOR_ATMEL_AT45DB011B:
14156 			tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14157 			break;
14158 		case FLASH_57780VENDOR_ATMEL_AT45DB021D:
14159 		case FLASH_57780VENDOR_ATMEL_AT45DB021B:
14160 			tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14161 			break;
14162 		case FLASH_57780VENDOR_ATMEL_AT45DB041D:
14163 		case FLASH_57780VENDOR_ATMEL_AT45DB041B:
14164 			tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14165 			break;
14166 		}
14167 		break;
14168 	case FLASH_5752VENDOR_ST_M45PE10:
14169 	case FLASH_5752VENDOR_ST_M45PE20:
14170 	case FLASH_5752VENDOR_ST_M45PE40:
14171 		tp->nvram_jedecnum = JEDEC_ST;
14172 		tg3_flag_set(tp, NVRAM_BUFFERED);
14173 		tg3_flag_set(tp, FLASH);
14174 
14175 		switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14176 		case FLASH_5752VENDOR_ST_M45PE10:
14177 			tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14178 			break;
14179 		case FLASH_5752VENDOR_ST_M45PE20:
14180 			tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14181 			break;
14182 		case FLASH_5752VENDOR_ST_M45PE40:
14183 			tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14184 			break;
14185 		}
14186 		break;
14187 	default:
14188 		tg3_flag_set(tp, NO_NVRAM);
14189 		return;
14190 	}
14191 
14192 	tg3_nvram_get_pagesize(tp, nvcfg1);
14193 	if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14194 		tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14195 }
14196 
14197 
14198 static void tg3_get_5717_nvram_info(struct tg3 *tp)
14199 {
14200 	u32 nvcfg1;
14201 
14202 	nvcfg1 = tr32(NVRAM_CFG1);
14203 
14204 	switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14205 	case FLASH_5717VENDOR_ATMEL_EEPROM:
14206 	case FLASH_5717VENDOR_MICRO_EEPROM:
14207 		tp->nvram_jedecnum = JEDEC_ATMEL;
14208 		tg3_flag_set(tp, NVRAM_BUFFERED);
14209 		tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14210 
14211 		nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14212 		tw32(NVRAM_CFG1, nvcfg1);
14213 		return;
14214 	case FLASH_5717VENDOR_ATMEL_MDB011D:
14215 	case FLASH_5717VENDOR_ATMEL_ADB011B:
14216 	case FLASH_5717VENDOR_ATMEL_ADB011D:
14217 	case FLASH_5717VENDOR_ATMEL_MDB021D:
14218 	case FLASH_5717VENDOR_ATMEL_ADB021B:
14219 	case FLASH_5717VENDOR_ATMEL_ADB021D:
14220 	case FLASH_5717VENDOR_ATMEL_45USPT:
14221 		tp->nvram_jedecnum = JEDEC_ATMEL;
14222 		tg3_flag_set(tp, NVRAM_BUFFERED);
14223 		tg3_flag_set(tp, FLASH);
14224 
14225 		switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14226 		case FLASH_5717VENDOR_ATMEL_MDB021D:
14227 			/* Detect size with tg3_nvram_get_size() */
14228 			break;
14229 		case FLASH_5717VENDOR_ATMEL_ADB021B:
14230 		case FLASH_5717VENDOR_ATMEL_ADB021D:
14231 			tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14232 			break;
14233 		default:
14234 			tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14235 			break;
14236 		}
14237 		break;
14238 	case FLASH_5717VENDOR_ST_M_M25PE10:
14239 	case FLASH_5717VENDOR_ST_A_M25PE10:
14240 	case FLASH_5717VENDOR_ST_M_M45PE10:
14241 	case FLASH_5717VENDOR_ST_A_M45PE10:
14242 	case FLASH_5717VENDOR_ST_M_M25PE20:
14243 	case FLASH_5717VENDOR_ST_A_M25PE20:
14244 	case FLASH_5717VENDOR_ST_M_M45PE20:
14245 	case FLASH_5717VENDOR_ST_A_M45PE20:
14246 	case FLASH_5717VENDOR_ST_25USPT:
14247 	case FLASH_5717VENDOR_ST_45USPT:
14248 		tp->nvram_jedecnum = JEDEC_ST;
14249 		tg3_flag_set(tp, NVRAM_BUFFERED);
14250 		tg3_flag_set(tp, FLASH);
14251 
14252 		switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14253 		case FLASH_5717VENDOR_ST_M_M25PE20:
14254 		case FLASH_5717VENDOR_ST_M_M45PE20:
14255 			/* Detect size with tg3_nvram_get_size() */
14256 			break;
14257 		case FLASH_5717VENDOR_ST_A_M25PE20:
14258 		case FLASH_5717VENDOR_ST_A_M45PE20:
14259 			tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14260 			break;
14261 		default:
14262 			tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14263 			break;
14264 		}
14265 		break;
14266 	default:
14267 		tg3_flag_set(tp, NO_NVRAM);
14268 		return;
14269 	}
14270 
14271 	tg3_nvram_get_pagesize(tp, nvcfg1);
14272 	if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14273 		tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14274 }
14275 
14276 static void tg3_get_5720_nvram_info(struct tg3 *tp)
14277 {
14278 	u32 nvcfg1, nvmpinstrp;
14279 
14280 	nvcfg1 = tr32(NVRAM_CFG1);
14281 	nvmpinstrp = nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK;
14282 
14283 	if (tg3_asic_rev(tp) == ASIC_REV_5762) {
14284 		if (!(nvcfg1 & NVRAM_CFG1_5762VENDOR_MASK)) {
14285 			tg3_flag_set(tp, NO_NVRAM);
14286 			return;
14287 		}
14288 
14289 		switch (nvmpinstrp) {
14290 		case FLASH_5762_EEPROM_HD:
14291 			nvmpinstrp = FLASH_5720_EEPROM_HD;
14292 			break;
14293 		case FLASH_5762_EEPROM_LD:
14294 			nvmpinstrp = FLASH_5720_EEPROM_LD;
14295 			break;
14296 		case FLASH_5720VENDOR_M_ST_M45PE20:
14297 			/* This pinstrap supports multiple sizes, so force it
14298 			 * to read the actual size from location 0xf0.
14299 			 */
14300 			nvmpinstrp = FLASH_5720VENDOR_ST_45USPT;
14301 			break;
14302 		}
14303 	}
14304 
14305 	switch (nvmpinstrp) {
14306 	case FLASH_5720_EEPROM_HD:
14307 	case FLASH_5720_EEPROM_LD:
14308 		tp->nvram_jedecnum = JEDEC_ATMEL;
14309 		tg3_flag_set(tp, NVRAM_BUFFERED);
14310 
14311 		nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14312 		tw32(NVRAM_CFG1, nvcfg1);
14313 		if (nvmpinstrp == FLASH_5720_EEPROM_HD)
14314 			tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14315 		else
14316 			tp->nvram_pagesize = ATMEL_AT24C02_CHIP_SIZE;
14317 		return;
14318 	case FLASH_5720VENDOR_M_ATMEL_DB011D:
14319 	case FLASH_5720VENDOR_A_ATMEL_DB011B:
14320 	case FLASH_5720VENDOR_A_ATMEL_DB011D:
14321 	case FLASH_5720VENDOR_M_ATMEL_DB021D:
14322 	case FLASH_5720VENDOR_A_ATMEL_DB021B:
14323 	case FLASH_5720VENDOR_A_ATMEL_DB021D:
14324 	case FLASH_5720VENDOR_M_ATMEL_DB041D:
14325 	case FLASH_5720VENDOR_A_ATMEL_DB041B:
14326 	case FLASH_5720VENDOR_A_ATMEL_DB041D:
14327 	case FLASH_5720VENDOR_M_ATMEL_DB081D:
14328 	case FLASH_5720VENDOR_A_ATMEL_DB081D:
14329 	case FLASH_5720VENDOR_ATMEL_45USPT:
14330 		tp->nvram_jedecnum = JEDEC_ATMEL;
14331 		tg3_flag_set(tp, NVRAM_BUFFERED);
14332 		tg3_flag_set(tp, FLASH);
14333 
14334 		switch (nvmpinstrp) {
14335 		case FLASH_5720VENDOR_M_ATMEL_DB021D:
14336 		case FLASH_5720VENDOR_A_ATMEL_DB021B:
14337 		case FLASH_5720VENDOR_A_ATMEL_DB021D:
14338 			tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14339 			break;
14340 		case FLASH_5720VENDOR_M_ATMEL_DB041D:
14341 		case FLASH_5720VENDOR_A_ATMEL_DB041B:
14342 		case FLASH_5720VENDOR_A_ATMEL_DB041D:
14343 			tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14344 			break;
14345 		case FLASH_5720VENDOR_M_ATMEL_DB081D:
14346 		case FLASH_5720VENDOR_A_ATMEL_DB081D:
14347 			tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14348 			break;
14349 		default:
14350 			if (tg3_asic_rev(tp) != ASIC_REV_5762)
14351 				tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14352 			break;
14353 		}
14354 		break;
14355 	case FLASH_5720VENDOR_M_ST_M25PE10:
14356 	case FLASH_5720VENDOR_M_ST_M45PE10:
14357 	case FLASH_5720VENDOR_A_ST_M25PE10:
14358 	case FLASH_5720VENDOR_A_ST_M45PE10:
14359 	case FLASH_5720VENDOR_M_ST_M25PE20:
14360 	case FLASH_5720VENDOR_M_ST_M45PE20:
14361 	case FLASH_5720VENDOR_A_ST_M25PE20:
14362 	case FLASH_5720VENDOR_A_ST_M45PE20:
14363 	case FLASH_5720VENDOR_M_ST_M25PE40:
14364 	case FLASH_5720VENDOR_M_ST_M45PE40:
14365 	case FLASH_5720VENDOR_A_ST_M25PE40:
14366 	case FLASH_5720VENDOR_A_ST_M45PE40:
14367 	case FLASH_5720VENDOR_M_ST_M25PE80:
14368 	case FLASH_5720VENDOR_M_ST_M45PE80:
14369 	case FLASH_5720VENDOR_A_ST_M25PE80:
14370 	case FLASH_5720VENDOR_A_ST_M45PE80:
14371 	case FLASH_5720VENDOR_ST_25USPT:
14372 	case FLASH_5720VENDOR_ST_45USPT:
14373 		tp->nvram_jedecnum = JEDEC_ST;
14374 		tg3_flag_set(tp, NVRAM_BUFFERED);
14375 		tg3_flag_set(tp, FLASH);
14376 
14377 		switch (nvmpinstrp) {
14378 		case FLASH_5720VENDOR_M_ST_M25PE20:
14379 		case FLASH_5720VENDOR_M_ST_M45PE20:
14380 		case FLASH_5720VENDOR_A_ST_M25PE20:
14381 		case FLASH_5720VENDOR_A_ST_M45PE20:
14382 			tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14383 			break;
14384 		case FLASH_5720VENDOR_M_ST_M25PE40:
14385 		case FLASH_5720VENDOR_M_ST_M45PE40:
14386 		case FLASH_5720VENDOR_A_ST_M25PE40:
14387 		case FLASH_5720VENDOR_A_ST_M45PE40:
14388 			tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14389 			break;
14390 		case FLASH_5720VENDOR_M_ST_M25PE80:
14391 		case FLASH_5720VENDOR_M_ST_M45PE80:
14392 		case FLASH_5720VENDOR_A_ST_M25PE80:
14393 		case FLASH_5720VENDOR_A_ST_M45PE80:
14394 			tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14395 			break;
14396 		default:
14397 			if (tg3_asic_rev(tp) != ASIC_REV_5762)
14398 				tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14399 			break;
14400 		}
14401 		break;
14402 	default:
14403 		tg3_flag_set(tp, NO_NVRAM);
14404 		return;
14405 	}
14406 
14407 	tg3_nvram_get_pagesize(tp, nvcfg1);
14408 	if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14409 		tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14410 
14411 	if (tg3_asic_rev(tp) == ASIC_REV_5762) {
14412 		u32 val;
14413 
14414 		if (tg3_nvram_read(tp, 0, &val))
14415 			return;
14416 
14417 		if (val != TG3_EEPROM_MAGIC &&
14418 		    (val & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW)
14419 			tg3_flag_set(tp, NO_NVRAM);
14420 	}
14421 }
14422 
14423 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
14424 static void tg3_nvram_init(struct tg3 *tp)
14425 {
14426 	if (tg3_flag(tp, IS_SSB_CORE)) {
14427 		/* No NVRAM and EEPROM on the SSB Broadcom GigE core. */
14428 		tg3_flag_clear(tp, NVRAM);
14429 		tg3_flag_clear(tp, NVRAM_BUFFERED);
14430 		tg3_flag_set(tp, NO_NVRAM);
14431 		return;
14432 	}
14433 
14434 	tw32_f(GRC_EEPROM_ADDR,
14435 	     (EEPROM_ADDR_FSM_RESET |
14436 	      (EEPROM_DEFAULT_CLOCK_PERIOD <<
14437 	       EEPROM_ADDR_CLKPERD_SHIFT)));
14438 
14439 	msleep(1);
14440 
14441 	/* Enable seeprom accesses. */
14442 	tw32_f(GRC_LOCAL_CTRL,
14443 	     tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
14444 	udelay(100);
14445 
14446 	if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
14447 	    tg3_asic_rev(tp) != ASIC_REV_5701) {
14448 		tg3_flag_set(tp, NVRAM);
14449 
14450 		if (tg3_nvram_lock(tp)) {
14451 			netdev_warn(tp->dev,
14452 				    "Cannot get nvram lock, %s failed\n",
14453 				    __func__);
14454 			return;
14455 		}
14456 		tg3_enable_nvram_access(tp);
14457 
14458 		tp->nvram_size = 0;
14459 
14460 		if (tg3_asic_rev(tp) == ASIC_REV_5752)
14461 			tg3_get_5752_nvram_info(tp);
14462 		else if (tg3_asic_rev(tp) == ASIC_REV_5755)
14463 			tg3_get_5755_nvram_info(tp);
14464 		else if (tg3_asic_rev(tp) == ASIC_REV_5787 ||
14465 			 tg3_asic_rev(tp) == ASIC_REV_5784 ||
14466 			 tg3_asic_rev(tp) == ASIC_REV_5785)
14467 			tg3_get_5787_nvram_info(tp);
14468 		else if (tg3_asic_rev(tp) == ASIC_REV_5761)
14469 			tg3_get_5761_nvram_info(tp);
14470 		else if (tg3_asic_rev(tp) == ASIC_REV_5906)
14471 			tg3_get_5906_nvram_info(tp);
14472 		else if (tg3_asic_rev(tp) == ASIC_REV_57780 ||
14473 			 tg3_flag(tp, 57765_CLASS))
14474 			tg3_get_57780_nvram_info(tp);
14475 		else if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
14476 			 tg3_asic_rev(tp) == ASIC_REV_5719)
14477 			tg3_get_5717_nvram_info(tp);
14478 		else if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
14479 			 tg3_asic_rev(tp) == ASIC_REV_5762)
14480 			tg3_get_5720_nvram_info(tp);
14481 		else
14482 			tg3_get_nvram_info(tp);
14483 
14484 		if (tp->nvram_size == 0)
14485 			tg3_get_nvram_size(tp);
14486 
14487 		tg3_disable_nvram_access(tp);
14488 		tg3_nvram_unlock(tp);
14489 
14490 	} else {
14491 		tg3_flag_clear(tp, NVRAM);
14492 		tg3_flag_clear(tp, NVRAM_BUFFERED);
14493 
14494 		tg3_get_eeprom_size(tp);
14495 	}
14496 }
14497 
14498 struct subsys_tbl_ent {
14499 	u16 subsys_vendor, subsys_devid;
14500 	u32 phy_id;
14501 };
14502 
14503 static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
14504 	/* Broadcom boards. */
14505 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
14506 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6, TG3_PHY_ID_BCM5401 },
14507 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
14508 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5, TG3_PHY_ID_BCM5701 },
14509 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
14510 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6, TG3_PHY_ID_BCM8002 },
14511 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
14512 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9, 0 },
14513 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
14514 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1, TG3_PHY_ID_BCM5701 },
14515 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
14516 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8, TG3_PHY_ID_BCM5701 },
14517 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
14518 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7, 0 },
14519 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
14520 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10, TG3_PHY_ID_BCM5701 },
14521 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
14522 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12, TG3_PHY_ID_BCM5701 },
14523 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
14524 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1, TG3_PHY_ID_BCM5703 },
14525 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
14526 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2, TG3_PHY_ID_BCM5703 },
14527 
14528 	/* 3com boards. */
14529 	{ TG3PCI_SUBVENDOR_ID_3COM,
14530 	  TG3PCI_SUBDEVICE_ID_3COM_3C996T, TG3_PHY_ID_BCM5401 },
14531 	{ TG3PCI_SUBVENDOR_ID_3COM,
14532 	  TG3PCI_SUBDEVICE_ID_3COM_3C996BT, TG3_PHY_ID_BCM5701 },
14533 	{ TG3PCI_SUBVENDOR_ID_3COM,
14534 	  TG3PCI_SUBDEVICE_ID_3COM_3C996SX, 0 },
14535 	{ TG3PCI_SUBVENDOR_ID_3COM,
14536 	  TG3PCI_SUBDEVICE_ID_3COM_3C1000T, TG3_PHY_ID_BCM5701 },
14537 	{ TG3PCI_SUBVENDOR_ID_3COM,
14538 	  TG3PCI_SUBDEVICE_ID_3COM_3C940BR01, TG3_PHY_ID_BCM5701 },
14539 
14540 	/* DELL boards. */
14541 	{ TG3PCI_SUBVENDOR_ID_DELL,
14542 	  TG3PCI_SUBDEVICE_ID_DELL_VIPER, TG3_PHY_ID_BCM5401 },
14543 	{ TG3PCI_SUBVENDOR_ID_DELL,
14544 	  TG3PCI_SUBDEVICE_ID_DELL_JAGUAR, TG3_PHY_ID_BCM5401 },
14545 	{ TG3PCI_SUBVENDOR_ID_DELL,
14546 	  TG3PCI_SUBDEVICE_ID_DELL_MERLOT, TG3_PHY_ID_BCM5411 },
14547 	{ TG3PCI_SUBVENDOR_ID_DELL,
14548 	  TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT, TG3_PHY_ID_BCM5411 },
14549 
14550 	/* Compaq boards. */
14551 	{ TG3PCI_SUBVENDOR_ID_COMPAQ,
14552 	  TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE, TG3_PHY_ID_BCM5701 },
14553 	{ TG3PCI_SUBVENDOR_ID_COMPAQ,
14554 	  TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2, TG3_PHY_ID_BCM5701 },
14555 	{ TG3PCI_SUBVENDOR_ID_COMPAQ,
14556 	  TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING, 0 },
14557 	{ TG3PCI_SUBVENDOR_ID_COMPAQ,
14558 	  TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780, TG3_PHY_ID_BCM5701 },
14559 	{ TG3PCI_SUBVENDOR_ID_COMPAQ,
14560 	  TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2, TG3_PHY_ID_BCM5701 },
14561 
14562 	/* IBM boards. */
14563 	{ TG3PCI_SUBVENDOR_ID_IBM,
14564 	  TG3PCI_SUBDEVICE_ID_IBM_5703SAX2, 0 }
14565 };
14566 
14567 static struct subsys_tbl_ent *tg3_lookup_by_subsys(struct tg3 *tp)
14568 {
14569 	int i;
14570 
14571 	for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
14572 		if ((subsys_id_to_phy_id[i].subsys_vendor ==
14573 		     tp->pdev->subsystem_vendor) &&
14574 		    (subsys_id_to_phy_id[i].subsys_devid ==
14575 		     tp->pdev->subsystem_device))
14576 			return &subsys_id_to_phy_id[i];
14577 	}
14578 	return NULL;
14579 }
14580 
14581 static void tg3_get_eeprom_hw_cfg(struct tg3 *tp)
14582 {
14583 	u32 val;
14584 
14585 	tp->phy_id = TG3_PHY_ID_INVALID;
14586 	tp->led_ctrl = LED_CTRL_MODE_PHY_1;
14587 
14588 	/* Assume an onboard device and WOL capable by default.  */
14589 	tg3_flag_set(tp, EEPROM_WRITE_PROT);
14590 	tg3_flag_set(tp, WOL_CAP);
14591 
14592 	if (tg3_asic_rev(tp) == ASIC_REV_5906) {
14593 		if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
14594 			tg3_flag_clear(tp, EEPROM_WRITE_PROT);
14595 			tg3_flag_set(tp, IS_NIC);
14596 		}
14597 		val = tr32(VCPU_CFGSHDW);
14598 		if (val & VCPU_CFGSHDW_ASPM_DBNC)
14599 			tg3_flag_set(tp, ASPM_WORKAROUND);
14600 		if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
14601 		    (val & VCPU_CFGSHDW_WOL_MAGPKT)) {
14602 			tg3_flag_set(tp, WOL_ENABLE);
14603 			device_set_wakeup_enable(&tp->pdev->dev, true);
14604 		}
14605 		goto done;
14606 	}
14607 
14608 	tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
14609 	if (val == NIC_SRAM_DATA_SIG_MAGIC) {
14610 		u32 nic_cfg, led_cfg;
14611 		u32 nic_phy_id, ver, cfg2 = 0, cfg4 = 0, eeprom_phy_id;
14612 		int eeprom_phy_serdes = 0;
14613 
14614 		tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
14615 		tp->nic_sram_data_cfg = nic_cfg;
14616 
14617 		tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
14618 		ver >>= NIC_SRAM_DATA_VER_SHIFT;
14619 		if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
14620 		    tg3_asic_rev(tp) != ASIC_REV_5701 &&
14621 		    tg3_asic_rev(tp) != ASIC_REV_5703 &&
14622 		    (ver > 0) && (ver < 0x100))
14623 			tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
14624 
14625 		if (tg3_asic_rev(tp) == ASIC_REV_5785)
14626 			tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
14627 
14628 		if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
14629 		    NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
14630 			eeprom_phy_serdes = 1;
14631 
14632 		tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
14633 		if (nic_phy_id != 0) {
14634 			u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
14635 			u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
14636 
14637 			eeprom_phy_id  = (id1 >> 16) << 10;
14638 			eeprom_phy_id |= (id2 & 0xfc00) << 16;
14639 			eeprom_phy_id |= (id2 & 0x03ff) <<  0;
14640 		} else
14641 			eeprom_phy_id = 0;
14642 
14643 		tp->phy_id = eeprom_phy_id;
14644 		if (eeprom_phy_serdes) {
14645 			if (!tg3_flag(tp, 5705_PLUS))
14646 				tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
14647 			else
14648 				tp->phy_flags |= TG3_PHYFLG_MII_SERDES;
14649 		}
14650 
14651 		if (tg3_flag(tp, 5750_PLUS))
14652 			led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
14653 				    SHASTA_EXT_LED_MODE_MASK);
14654 		else
14655 			led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
14656 
14657 		switch (led_cfg) {
14658 		default:
14659 		case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
14660 			tp->led_ctrl = LED_CTRL_MODE_PHY_1;
14661 			break;
14662 
14663 		case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
14664 			tp->led_ctrl = LED_CTRL_MODE_PHY_2;
14665 			break;
14666 
14667 		case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
14668 			tp->led_ctrl = LED_CTRL_MODE_MAC;
14669 
14670 			/* Default to PHY_1_MODE if 0 (MAC_MODE) is
14671 			 * read on some older 5700/5701 bootcode.
14672 			 */
14673 			if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
14674 			    tg3_asic_rev(tp) == ASIC_REV_5701)
14675 				tp->led_ctrl = LED_CTRL_MODE_PHY_1;
14676 
14677 			break;
14678 
14679 		case SHASTA_EXT_LED_SHARED:
14680 			tp->led_ctrl = LED_CTRL_MODE_SHARED;
14681 			if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0 &&
14682 			    tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A1)
14683 				tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
14684 						 LED_CTRL_MODE_PHY_2);
14685 			break;
14686 
14687 		case SHASTA_EXT_LED_MAC:
14688 			tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
14689 			break;
14690 
14691 		case SHASTA_EXT_LED_COMBO:
14692 			tp->led_ctrl = LED_CTRL_MODE_COMBO;
14693 			if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0)
14694 				tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
14695 						 LED_CTRL_MODE_PHY_2);
14696 			break;
14697 
14698 		}
14699 
14700 		if ((tg3_asic_rev(tp) == ASIC_REV_5700 ||
14701 		     tg3_asic_rev(tp) == ASIC_REV_5701) &&
14702 		    tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
14703 			tp->led_ctrl = LED_CTRL_MODE_PHY_2;
14704 
14705 		if (tg3_chip_rev(tp) == CHIPREV_5784_AX)
14706 			tp->led_ctrl = LED_CTRL_MODE_PHY_1;
14707 
14708 		if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
14709 			tg3_flag_set(tp, EEPROM_WRITE_PROT);
14710 			if ((tp->pdev->subsystem_vendor ==
14711 			     PCI_VENDOR_ID_ARIMA) &&
14712 			    (tp->pdev->subsystem_device == 0x205a ||
14713 			     tp->pdev->subsystem_device == 0x2063))
14714 				tg3_flag_clear(tp, EEPROM_WRITE_PROT);
14715 		} else {
14716 			tg3_flag_clear(tp, EEPROM_WRITE_PROT);
14717 			tg3_flag_set(tp, IS_NIC);
14718 		}
14719 
14720 		if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
14721 			tg3_flag_set(tp, ENABLE_ASF);
14722 			if (tg3_flag(tp, 5750_PLUS))
14723 				tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
14724 		}
14725 
14726 		if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
14727 		    tg3_flag(tp, 5750_PLUS))
14728 			tg3_flag_set(tp, ENABLE_APE);
14729 
14730 		if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES &&
14731 		    !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
14732 			tg3_flag_clear(tp, WOL_CAP);
14733 
14734 		if (tg3_flag(tp, WOL_CAP) &&
14735 		    (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)) {
14736 			tg3_flag_set(tp, WOL_ENABLE);
14737 			device_set_wakeup_enable(&tp->pdev->dev, true);
14738 		}
14739 
14740 		if (cfg2 & (1 << 17))
14741 			tp->phy_flags |= TG3_PHYFLG_CAPACITIVE_COUPLING;
14742 
14743 		/* serdes signal pre-emphasis in register 0x590 set by */
14744 		/* bootcode if bit 18 is set */
14745 		if (cfg2 & (1 << 18))
14746 			tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS;
14747 
14748 		if ((tg3_flag(tp, 57765_PLUS) ||
14749 		     (tg3_asic_rev(tp) == ASIC_REV_5784 &&
14750 		      tg3_chip_rev(tp) != CHIPREV_5784_AX)) &&
14751 		    (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
14752 			tp->phy_flags |= TG3_PHYFLG_ENABLE_APD;
14753 
14754 		if (tg3_flag(tp, PCI_EXPRESS)) {
14755 			u32 cfg3;
14756 
14757 			tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
14758 			if (tg3_asic_rev(tp) != ASIC_REV_5785 &&
14759 			    !tg3_flag(tp, 57765_PLUS) &&
14760 			    (cfg3 & NIC_SRAM_ASPM_DEBOUNCE))
14761 				tg3_flag_set(tp, ASPM_WORKAROUND);
14762 			if (cfg3 & NIC_SRAM_LNK_FLAP_AVOID)
14763 				tp->phy_flags |= TG3_PHYFLG_KEEP_LINK_ON_PWRDN;
14764 			if (cfg3 & NIC_SRAM_1G_ON_VAUX_OK)
14765 				tp->phy_flags |= TG3_PHYFLG_1G_ON_VAUX_OK;
14766 		}
14767 
14768 		if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE)
14769 			tg3_flag_set(tp, RGMII_INBAND_DISABLE);
14770 		if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
14771 			tg3_flag_set(tp, RGMII_EXT_IBND_RX_EN);
14772 		if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
14773 			tg3_flag_set(tp, RGMII_EXT_IBND_TX_EN);
14774 	}
14775 done:
14776 	if (tg3_flag(tp, WOL_CAP))
14777 		device_set_wakeup_enable(&tp->pdev->dev,
14778 					 tg3_flag(tp, WOL_ENABLE));
14779 	else
14780 		device_set_wakeup_capable(&tp->pdev->dev, false);
14781 }
14782 
14783 static int tg3_ape_otp_read(struct tg3 *tp, u32 offset, u32 *val)
14784 {
14785 	int i, err;
14786 	u32 val2, off = offset * 8;
14787 
14788 	err = tg3_nvram_lock(tp);
14789 	if (err)
14790 		return err;
14791 
14792 	tg3_ape_write32(tp, TG3_APE_OTP_ADDR, off | APE_OTP_ADDR_CPU_ENABLE);
14793 	tg3_ape_write32(tp, TG3_APE_OTP_CTRL, APE_OTP_CTRL_PROG_EN |
14794 			APE_OTP_CTRL_CMD_RD | APE_OTP_CTRL_START);
14795 	tg3_ape_read32(tp, TG3_APE_OTP_CTRL);
14796 	udelay(10);
14797 
14798 	for (i = 0; i < 100; i++) {
14799 		val2 = tg3_ape_read32(tp, TG3_APE_OTP_STATUS);
14800 		if (val2 & APE_OTP_STATUS_CMD_DONE) {
14801 			*val = tg3_ape_read32(tp, TG3_APE_OTP_RD_DATA);
14802 			break;
14803 		}
14804 		udelay(10);
14805 	}
14806 
14807 	tg3_ape_write32(tp, TG3_APE_OTP_CTRL, 0);
14808 
14809 	tg3_nvram_unlock(tp);
14810 	if (val2 & APE_OTP_STATUS_CMD_DONE)
14811 		return 0;
14812 
14813 	return -EBUSY;
14814 }
14815 
14816 static int tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
14817 {
14818 	int i;
14819 	u32 val;
14820 
14821 	tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
14822 	tw32(OTP_CTRL, cmd);
14823 
14824 	/* Wait for up to 1 ms for command to execute. */
14825 	for (i = 0; i < 100; i++) {
14826 		val = tr32(OTP_STATUS);
14827 		if (val & OTP_STATUS_CMD_DONE)
14828 			break;
14829 		udelay(10);
14830 	}
14831 
14832 	return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
14833 }
14834 
14835 /* Read the gphy configuration from the OTP region of the chip.  The gphy
14836  * configuration is a 32-bit value that straddles the alignment boundary.
14837  * We do two 32-bit reads and then shift and merge the results.
14838  */
14839 static u32 tg3_read_otp_phycfg(struct tg3 *tp)
14840 {
14841 	u32 bhalf_otp, thalf_otp;
14842 
14843 	tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
14844 
14845 	if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
14846 		return 0;
14847 
14848 	tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
14849 
14850 	if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
14851 		return 0;
14852 
14853 	thalf_otp = tr32(OTP_READ_DATA);
14854 
14855 	tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
14856 
14857 	if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
14858 		return 0;
14859 
14860 	bhalf_otp = tr32(OTP_READ_DATA);
14861 
14862 	return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
14863 }
14864 
14865 static void tg3_phy_init_link_config(struct tg3 *tp)
14866 {
14867 	u32 adv = ADVERTISED_Autoneg;
14868 
14869 	if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
14870 		adv |= ADVERTISED_1000baseT_Half |
14871 		       ADVERTISED_1000baseT_Full;
14872 
14873 	if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
14874 		adv |= ADVERTISED_100baseT_Half |
14875 		       ADVERTISED_100baseT_Full |
14876 		       ADVERTISED_10baseT_Half |
14877 		       ADVERTISED_10baseT_Full |
14878 		       ADVERTISED_TP;
14879 	else
14880 		adv |= ADVERTISED_FIBRE;
14881 
14882 	tp->link_config.advertising = adv;
14883 	tp->link_config.speed = SPEED_UNKNOWN;
14884 	tp->link_config.duplex = DUPLEX_UNKNOWN;
14885 	tp->link_config.autoneg = AUTONEG_ENABLE;
14886 	tp->link_config.active_speed = SPEED_UNKNOWN;
14887 	tp->link_config.active_duplex = DUPLEX_UNKNOWN;
14888 
14889 	tp->old_link = -1;
14890 }
14891 
14892 static int tg3_phy_probe(struct tg3 *tp)
14893 {
14894 	u32 hw_phy_id_1, hw_phy_id_2;
14895 	u32 hw_phy_id, hw_phy_id_masked;
14896 	int err;
14897 
14898 	/* flow control autonegotiation is default behavior */
14899 	tg3_flag_set(tp, PAUSE_AUTONEG);
14900 	tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
14901 
14902 	if (tg3_flag(tp, ENABLE_APE)) {
14903 		switch (tp->pci_fn) {
14904 		case 0:
14905 			tp->phy_ape_lock = TG3_APE_LOCK_PHY0;
14906 			break;
14907 		case 1:
14908 			tp->phy_ape_lock = TG3_APE_LOCK_PHY1;
14909 			break;
14910 		case 2:
14911 			tp->phy_ape_lock = TG3_APE_LOCK_PHY2;
14912 			break;
14913 		case 3:
14914 			tp->phy_ape_lock = TG3_APE_LOCK_PHY3;
14915 			break;
14916 		}
14917 	}
14918 
14919 	if (!tg3_flag(tp, ENABLE_ASF) &&
14920 	    !(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
14921 	    !(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
14922 		tp->phy_flags &= ~(TG3_PHYFLG_1G_ON_VAUX_OK |
14923 				   TG3_PHYFLG_KEEP_LINK_ON_PWRDN);
14924 
14925 	if (tg3_flag(tp, USE_PHYLIB))
14926 		return tg3_phy_init(tp);
14927 
14928 	/* Reading the PHY ID register can conflict with ASF
14929 	 * firmware access to the PHY hardware.
14930 	 */
14931 	err = 0;
14932 	if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)) {
14933 		hw_phy_id = hw_phy_id_masked = TG3_PHY_ID_INVALID;
14934 	} else {
14935 		/* Now read the physical PHY_ID from the chip and verify
14936 		 * that it is sane.  If it doesn't look good, we fall back
14937 		 * to either the hard-coded table based PHY_ID and failing
14938 		 * that the value found in the eeprom area.
14939 		 */
14940 		err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
14941 		err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
14942 
14943 		hw_phy_id  = (hw_phy_id_1 & 0xffff) << 10;
14944 		hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
14945 		hw_phy_id |= (hw_phy_id_2 & 0x03ff) <<  0;
14946 
14947 		hw_phy_id_masked = hw_phy_id & TG3_PHY_ID_MASK;
14948 	}
14949 
14950 	if (!err && TG3_KNOWN_PHY_ID(hw_phy_id_masked)) {
14951 		tp->phy_id = hw_phy_id;
14952 		if (hw_phy_id_masked == TG3_PHY_ID_BCM8002)
14953 			tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
14954 		else
14955 			tp->phy_flags &= ~TG3_PHYFLG_PHY_SERDES;
14956 	} else {
14957 		if (tp->phy_id != TG3_PHY_ID_INVALID) {
14958 			/* Do nothing, phy ID already set up in
14959 			 * tg3_get_eeprom_hw_cfg().
14960 			 */
14961 		} else {
14962 			struct subsys_tbl_ent *p;
14963 
14964 			/* No eeprom signature?  Try the hardcoded
14965 			 * subsys device table.
14966 			 */
14967 			p = tg3_lookup_by_subsys(tp);
14968 			if (p) {
14969 				tp->phy_id = p->phy_id;
14970 			} else if (!tg3_flag(tp, IS_SSB_CORE)) {
14971 				/* For now we saw the IDs 0xbc050cd0,
14972 				 * 0xbc050f80 and 0xbc050c30 on devices
14973 				 * connected to an BCM4785 and there are
14974 				 * probably more. Just assume that the phy is
14975 				 * supported when it is connected to a SSB core
14976 				 * for now.
14977 				 */
14978 				return -ENODEV;
14979 			}
14980 
14981 			if (!tp->phy_id ||
14982 			    tp->phy_id == TG3_PHY_ID_BCM8002)
14983 				tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
14984 		}
14985 	}
14986 
14987 	if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
14988 	    (tg3_asic_rev(tp) == ASIC_REV_5719 ||
14989 	     tg3_asic_rev(tp) == ASIC_REV_5720 ||
14990 	     tg3_asic_rev(tp) == ASIC_REV_57766 ||
14991 	     tg3_asic_rev(tp) == ASIC_REV_5762 ||
14992 	     (tg3_asic_rev(tp) == ASIC_REV_5717 &&
14993 	      tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0) ||
14994 	     (tg3_asic_rev(tp) == ASIC_REV_57765 &&
14995 	      tg3_chip_rev_id(tp) != CHIPREV_ID_57765_A0)))
14996 		tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
14997 
14998 	tg3_phy_init_link_config(tp);
14999 
15000 	if (!(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
15001 	    !(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
15002 	    !tg3_flag(tp, ENABLE_APE) &&
15003 	    !tg3_flag(tp, ENABLE_ASF)) {
15004 		u32 bmsr, dummy;
15005 
15006 		tg3_readphy(tp, MII_BMSR, &bmsr);
15007 		if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
15008 		    (bmsr & BMSR_LSTATUS))
15009 			goto skip_phy_reset;
15010 
15011 		err = tg3_phy_reset(tp);
15012 		if (err)
15013 			return err;
15014 
15015 		tg3_phy_set_wirespeed(tp);
15016 
15017 		if (!tg3_phy_copper_an_config_ok(tp, &dummy)) {
15018 			tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
15019 					    tp->link_config.flowctrl);
15020 
15021 			tg3_writephy(tp, MII_BMCR,
15022 				     BMCR_ANENABLE | BMCR_ANRESTART);
15023 		}
15024 	}
15025 
15026 skip_phy_reset:
15027 	if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
15028 		err = tg3_init_5401phy_dsp(tp);
15029 		if (err)
15030 			return err;
15031 
15032 		err = tg3_init_5401phy_dsp(tp);
15033 	}
15034 
15035 	return err;
15036 }
15037 
15038 static void tg3_read_vpd(struct tg3 *tp)
15039 {
15040 	u8 *vpd_data;
15041 	unsigned int block_end, rosize, len;
15042 	u32 vpdlen;
15043 	int j, i = 0;
15044 
15045 	vpd_data = (u8 *)tg3_vpd_readblock(tp, &vpdlen);
15046 	if (!vpd_data)
15047 		goto out_no_vpd;
15048 
15049 	i = pci_vpd_find_tag(vpd_data, 0, vpdlen, PCI_VPD_LRDT_RO_DATA);
15050 	if (i < 0)
15051 		goto out_not_found;
15052 
15053 	rosize = pci_vpd_lrdt_size(&vpd_data[i]);
15054 	block_end = i + PCI_VPD_LRDT_TAG_SIZE + rosize;
15055 	i += PCI_VPD_LRDT_TAG_SIZE;
15056 
15057 	if (block_end > vpdlen)
15058 		goto out_not_found;
15059 
15060 	j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
15061 				      PCI_VPD_RO_KEYWORD_MFR_ID);
15062 	if (j > 0) {
15063 		len = pci_vpd_info_field_size(&vpd_data[j]);
15064 
15065 		j += PCI_VPD_INFO_FLD_HDR_SIZE;
15066 		if (j + len > block_end || len != 4 ||
15067 		    memcmp(&vpd_data[j], "1028", 4))
15068 			goto partno;
15069 
15070 		j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
15071 					      PCI_VPD_RO_KEYWORD_VENDOR0);
15072 		if (j < 0)
15073 			goto partno;
15074 
15075 		len = pci_vpd_info_field_size(&vpd_data[j]);
15076 
15077 		j += PCI_VPD_INFO_FLD_HDR_SIZE;
15078 		if (j + len > block_end)
15079 			goto partno;
15080 
15081 		if (len >= sizeof(tp->fw_ver))
15082 			len = sizeof(tp->fw_ver) - 1;
15083 		memset(tp->fw_ver, 0, sizeof(tp->fw_ver));
15084 		snprintf(tp->fw_ver, sizeof(tp->fw_ver), "%.*s bc ", len,
15085 			 &vpd_data[j]);
15086 	}
15087 
15088 partno:
15089 	i = pci_vpd_find_info_keyword(vpd_data, i, rosize,
15090 				      PCI_VPD_RO_KEYWORD_PARTNO);
15091 	if (i < 0)
15092 		goto out_not_found;
15093 
15094 	len = pci_vpd_info_field_size(&vpd_data[i]);
15095 
15096 	i += PCI_VPD_INFO_FLD_HDR_SIZE;
15097 	if (len > TG3_BPN_SIZE ||
15098 	    (len + i) > vpdlen)
15099 		goto out_not_found;
15100 
15101 	memcpy(tp->board_part_number, &vpd_data[i], len);
15102 
15103 out_not_found:
15104 	kfree(vpd_data);
15105 	if (tp->board_part_number[0])
15106 		return;
15107 
15108 out_no_vpd:
15109 	if (tg3_asic_rev(tp) == ASIC_REV_5717) {
15110 		if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
15111 		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C)
15112 			strcpy(tp->board_part_number, "BCM5717");
15113 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718)
15114 			strcpy(tp->board_part_number, "BCM5718");
15115 		else
15116 			goto nomatch;
15117 	} else if (tg3_asic_rev(tp) == ASIC_REV_57780) {
15118 		if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780)
15119 			strcpy(tp->board_part_number, "BCM57780");
15120 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760)
15121 			strcpy(tp->board_part_number, "BCM57760");
15122 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790)
15123 			strcpy(tp->board_part_number, "BCM57790");
15124 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788)
15125 			strcpy(tp->board_part_number, "BCM57788");
15126 		else
15127 			goto nomatch;
15128 	} else if (tg3_asic_rev(tp) == ASIC_REV_57765) {
15129 		if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761)
15130 			strcpy(tp->board_part_number, "BCM57761");
15131 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765)
15132 			strcpy(tp->board_part_number, "BCM57765");
15133 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781)
15134 			strcpy(tp->board_part_number, "BCM57781");
15135 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785)
15136 			strcpy(tp->board_part_number, "BCM57785");
15137 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791)
15138 			strcpy(tp->board_part_number, "BCM57791");
15139 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
15140 			strcpy(tp->board_part_number, "BCM57795");
15141 		else
15142 			goto nomatch;
15143 	} else if (tg3_asic_rev(tp) == ASIC_REV_57766) {
15144 		if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762)
15145 			strcpy(tp->board_part_number, "BCM57762");
15146 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766)
15147 			strcpy(tp->board_part_number, "BCM57766");
15148 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782)
15149 			strcpy(tp->board_part_number, "BCM57782");
15150 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
15151 			strcpy(tp->board_part_number, "BCM57786");
15152 		else
15153 			goto nomatch;
15154 	} else if (tg3_asic_rev(tp) == ASIC_REV_5906) {
15155 		strcpy(tp->board_part_number, "BCM95906");
15156 	} else {
15157 nomatch:
15158 		strcpy(tp->board_part_number, "none");
15159 	}
15160 }
15161 
15162 static int tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
15163 {
15164 	u32 val;
15165 
15166 	if (tg3_nvram_read(tp, offset, &val) ||
15167 	    (val & 0xfc000000) != 0x0c000000 ||
15168 	    tg3_nvram_read(tp, offset + 4, &val) ||
15169 	    val != 0)
15170 		return 0;
15171 
15172 	return 1;
15173 }
15174 
15175 static void tg3_read_bc_ver(struct tg3 *tp)
15176 {
15177 	u32 val, offset, start, ver_offset;
15178 	int i, dst_off;
15179 	bool newver = false;
15180 
15181 	if (tg3_nvram_read(tp, 0xc, &offset) ||
15182 	    tg3_nvram_read(tp, 0x4, &start))
15183 		return;
15184 
15185 	offset = tg3_nvram_logical_addr(tp, offset);
15186 
15187 	if (tg3_nvram_read(tp, offset, &val))
15188 		return;
15189 
15190 	if ((val & 0xfc000000) == 0x0c000000) {
15191 		if (tg3_nvram_read(tp, offset + 4, &val))
15192 			return;
15193 
15194 		if (val == 0)
15195 			newver = true;
15196 	}
15197 
15198 	dst_off = strlen(tp->fw_ver);
15199 
15200 	if (newver) {
15201 		if (TG3_VER_SIZE - dst_off < 16 ||
15202 		    tg3_nvram_read(tp, offset + 8, &ver_offset))
15203 			return;
15204 
15205 		offset = offset + ver_offset - start;
15206 		for (i = 0; i < 16; i += 4) {
15207 			__be32 v;
15208 			if (tg3_nvram_read_be32(tp, offset + i, &v))
15209 				return;
15210 
15211 			memcpy(tp->fw_ver + dst_off + i, &v, sizeof(v));
15212 		}
15213 	} else {
15214 		u32 major, minor;
15215 
15216 		if (tg3_nvram_read(tp, TG3_NVM_PTREV_BCVER, &ver_offset))
15217 			return;
15218 
15219 		major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >>
15220 			TG3_NVM_BCVER_MAJSFT;
15221 		minor = ver_offset & TG3_NVM_BCVER_MINMSK;
15222 		snprintf(&tp->fw_ver[dst_off], TG3_VER_SIZE - dst_off,
15223 			 "v%d.%02d", major, minor);
15224 	}
15225 }
15226 
15227 static void tg3_read_hwsb_ver(struct tg3 *tp)
15228 {
15229 	u32 val, major, minor;
15230 
15231 	/* Use native endian representation */
15232 	if (tg3_nvram_read(tp, TG3_NVM_HWSB_CFG1, &val))
15233 		return;
15234 
15235 	major = (val & TG3_NVM_HWSB_CFG1_MAJMSK) >>
15236 		TG3_NVM_HWSB_CFG1_MAJSFT;
15237 	minor = (val & TG3_NVM_HWSB_CFG1_MINMSK) >>
15238 		TG3_NVM_HWSB_CFG1_MINSFT;
15239 
15240 	snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor);
15241 }
15242 
15243 static void tg3_read_sb_ver(struct tg3 *tp, u32 val)
15244 {
15245 	u32 offset, major, minor, build;
15246 
15247 	strncat(tp->fw_ver, "sb", TG3_VER_SIZE - strlen(tp->fw_ver) - 1);
15248 
15249 	if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1)
15250 		return;
15251 
15252 	switch (val & TG3_EEPROM_SB_REVISION_MASK) {
15253 	case TG3_EEPROM_SB_REVISION_0:
15254 		offset = TG3_EEPROM_SB_F1R0_EDH_OFF;
15255 		break;
15256 	case TG3_EEPROM_SB_REVISION_2:
15257 		offset = TG3_EEPROM_SB_F1R2_EDH_OFF;
15258 		break;
15259 	case TG3_EEPROM_SB_REVISION_3:
15260 		offset = TG3_EEPROM_SB_F1R3_EDH_OFF;
15261 		break;
15262 	case TG3_EEPROM_SB_REVISION_4:
15263 		offset = TG3_EEPROM_SB_F1R4_EDH_OFF;
15264 		break;
15265 	case TG3_EEPROM_SB_REVISION_5:
15266 		offset = TG3_EEPROM_SB_F1R5_EDH_OFF;
15267 		break;
15268 	case TG3_EEPROM_SB_REVISION_6:
15269 		offset = TG3_EEPROM_SB_F1R6_EDH_OFF;
15270 		break;
15271 	default:
15272 		return;
15273 	}
15274 
15275 	if (tg3_nvram_read(tp, offset, &val))
15276 		return;
15277 
15278 	build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >>
15279 		TG3_EEPROM_SB_EDH_BLD_SHFT;
15280 	major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >>
15281 		TG3_EEPROM_SB_EDH_MAJ_SHFT;
15282 	minor =  val & TG3_EEPROM_SB_EDH_MIN_MASK;
15283 
15284 	if (minor > 99 || build > 26)
15285 		return;
15286 
15287 	offset = strlen(tp->fw_ver);
15288 	snprintf(&tp->fw_ver[offset], TG3_VER_SIZE - offset,
15289 		 " v%d.%02d", major, minor);
15290 
15291 	if (build > 0) {
15292 		offset = strlen(tp->fw_ver);
15293 		if (offset < TG3_VER_SIZE - 1)
15294 			tp->fw_ver[offset] = 'a' + build - 1;
15295 	}
15296 }
15297 
15298 static void tg3_read_mgmtfw_ver(struct tg3 *tp)
15299 {
15300 	u32 val, offset, start;
15301 	int i, vlen;
15302 
15303 	for (offset = TG3_NVM_DIR_START;
15304 	     offset < TG3_NVM_DIR_END;
15305 	     offset += TG3_NVM_DIRENT_SIZE) {
15306 		if (tg3_nvram_read(tp, offset, &val))
15307 			return;
15308 
15309 		if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
15310 			break;
15311 	}
15312 
15313 	if (offset == TG3_NVM_DIR_END)
15314 		return;
15315 
15316 	if (!tg3_flag(tp, 5705_PLUS))
15317 		start = 0x08000000;
15318 	else if (tg3_nvram_read(tp, offset - 4, &start))
15319 		return;
15320 
15321 	if (tg3_nvram_read(tp, offset + 4, &offset) ||
15322 	    !tg3_fw_img_is_valid(tp, offset) ||
15323 	    tg3_nvram_read(tp, offset + 8, &val))
15324 		return;
15325 
15326 	offset += val - start;
15327 
15328 	vlen = strlen(tp->fw_ver);
15329 
15330 	tp->fw_ver[vlen++] = ',';
15331 	tp->fw_ver[vlen++] = ' ';
15332 
15333 	for (i = 0; i < 4; i++) {
15334 		__be32 v;
15335 		if (tg3_nvram_read_be32(tp, offset, &v))
15336 			return;
15337 
15338 		offset += sizeof(v);
15339 
15340 		if (vlen > TG3_VER_SIZE - sizeof(v)) {
15341 			memcpy(&tp->fw_ver[vlen], &v, TG3_VER_SIZE - vlen);
15342 			break;
15343 		}
15344 
15345 		memcpy(&tp->fw_ver[vlen], &v, sizeof(v));
15346 		vlen += sizeof(v);
15347 	}
15348 }
15349 
15350 static void tg3_probe_ncsi(struct tg3 *tp)
15351 {
15352 	u32 apedata;
15353 
15354 	apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
15355 	if (apedata != APE_SEG_SIG_MAGIC)
15356 		return;
15357 
15358 	apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
15359 	if (!(apedata & APE_FW_STATUS_READY))
15360 		return;
15361 
15362 	if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI)
15363 		tg3_flag_set(tp, APE_HAS_NCSI);
15364 }
15365 
15366 static void tg3_read_dash_ver(struct tg3 *tp)
15367 {
15368 	int vlen;
15369 	u32 apedata;
15370 	char *fwtype;
15371 
15372 	apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION);
15373 
15374 	if (tg3_flag(tp, APE_HAS_NCSI))
15375 		fwtype = "NCSI";
15376 	else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725)
15377 		fwtype = "SMASH";
15378 	else
15379 		fwtype = "DASH";
15380 
15381 	vlen = strlen(tp->fw_ver);
15382 
15383 	snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " %s v%d.%d.%d.%d",
15384 		 fwtype,
15385 		 (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT,
15386 		 (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT,
15387 		 (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT,
15388 		 (apedata & APE_FW_VERSION_BLDMSK));
15389 }
15390 
15391 static void tg3_read_otp_ver(struct tg3 *tp)
15392 {
15393 	u32 val, val2;
15394 
15395 	if (tg3_asic_rev(tp) != ASIC_REV_5762)
15396 		return;
15397 
15398 	if (!tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0, &val) &&
15399 	    !tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0 + 4, &val2) &&
15400 	    TG3_OTP_MAGIC0_VALID(val)) {
15401 		u64 val64 = (u64) val << 32 | val2;
15402 		u32 ver = 0;
15403 		int i, vlen;
15404 
15405 		for (i = 0; i < 7; i++) {
15406 			if ((val64 & 0xff) == 0)
15407 				break;
15408 			ver = val64 & 0xff;
15409 			val64 >>= 8;
15410 		}
15411 		vlen = strlen(tp->fw_ver);
15412 		snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " .%02d", ver);
15413 	}
15414 }
15415 
15416 static void tg3_read_fw_ver(struct tg3 *tp)
15417 {
15418 	u32 val;
15419 	bool vpd_vers = false;
15420 
15421 	if (tp->fw_ver[0] != 0)
15422 		vpd_vers = true;
15423 
15424 	if (tg3_flag(tp, NO_NVRAM)) {
15425 		strcat(tp->fw_ver, "sb");
15426 		tg3_read_otp_ver(tp);
15427 		return;
15428 	}
15429 
15430 	if (tg3_nvram_read(tp, 0, &val))
15431 		return;
15432 
15433 	if (val == TG3_EEPROM_MAGIC)
15434 		tg3_read_bc_ver(tp);
15435 	else if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW)
15436 		tg3_read_sb_ver(tp, val);
15437 	else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
15438 		tg3_read_hwsb_ver(tp);
15439 
15440 	if (tg3_flag(tp, ENABLE_ASF)) {
15441 		if (tg3_flag(tp, ENABLE_APE)) {
15442 			tg3_probe_ncsi(tp);
15443 			if (!vpd_vers)
15444 				tg3_read_dash_ver(tp);
15445 		} else if (!vpd_vers) {
15446 			tg3_read_mgmtfw_ver(tp);
15447 		}
15448 	}
15449 
15450 	tp->fw_ver[TG3_VER_SIZE - 1] = 0;
15451 }
15452 
15453 static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp)
15454 {
15455 	if (tg3_flag(tp, LRG_PROD_RING_CAP))
15456 		return TG3_RX_RET_MAX_SIZE_5717;
15457 	else if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))
15458 		return TG3_RX_RET_MAX_SIZE_5700;
15459 	else
15460 		return TG3_RX_RET_MAX_SIZE_5705;
15461 }
15462 
15463 static DEFINE_PCI_DEVICE_TABLE(tg3_write_reorder_chipsets) = {
15464 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C) },
15465 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE) },
15466 	{ PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8385_0) },
15467 	{ },
15468 };
15469 
15470 static struct pci_dev *tg3_find_peer(struct tg3 *tp)
15471 {
15472 	struct pci_dev *peer;
15473 	unsigned int func, devnr = tp->pdev->devfn & ~7;
15474 
15475 	for (func = 0; func < 8; func++) {
15476 		peer = pci_get_slot(tp->pdev->bus, devnr | func);
15477 		if (peer && peer != tp->pdev)
15478 			break;
15479 		pci_dev_put(peer);
15480 	}
15481 	/* 5704 can be configured in single-port mode, set peer to
15482 	 * tp->pdev in that case.
15483 	 */
15484 	if (!peer) {
15485 		peer = tp->pdev;
15486 		return peer;
15487 	}
15488 
15489 	/*
15490 	 * We don't need to keep the refcount elevated; there's no way
15491 	 * to remove one half of this device without removing the other
15492 	 */
15493 	pci_dev_put(peer);
15494 
15495 	return peer;
15496 }
15497 
15498 static void tg3_detect_asic_rev(struct tg3 *tp, u32 misc_ctrl_reg)
15499 {
15500 	tp->pci_chip_rev_id = misc_ctrl_reg >> MISC_HOST_CTRL_CHIPREV_SHIFT;
15501 	if (tg3_asic_rev(tp) == ASIC_REV_USE_PROD_ID_REG) {
15502 		u32 reg;
15503 
15504 		/* All devices that use the alternate
15505 		 * ASIC REV location have a CPMU.
15506 		 */
15507 		tg3_flag_set(tp, CPMU_PRESENT);
15508 
15509 		if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
15510 		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
15511 		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
15512 		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
15513 		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 ||
15514 		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 ||
15515 		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 ||
15516 		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727)
15517 			reg = TG3PCI_GEN2_PRODID_ASICREV;
15518 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 ||
15519 			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 ||
15520 			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 ||
15521 			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 ||
15522 			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
15523 			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
15524 			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762 ||
15525 			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766 ||
15526 			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782 ||
15527 			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
15528 			reg = TG3PCI_GEN15_PRODID_ASICREV;
15529 		else
15530 			reg = TG3PCI_PRODID_ASICREV;
15531 
15532 		pci_read_config_dword(tp->pdev, reg, &tp->pci_chip_rev_id);
15533 	}
15534 
15535 	/* Wrong chip ID in 5752 A0. This code can be removed later
15536 	 * as A0 is not in production.
15537 	 */
15538 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5752_A0_HW)
15539 		tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
15540 
15541 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5717_C0)
15542 		tp->pci_chip_rev_id = CHIPREV_ID_5720_A0;
15543 
15544 	if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
15545 	    tg3_asic_rev(tp) == ASIC_REV_5719 ||
15546 	    tg3_asic_rev(tp) == ASIC_REV_5720)
15547 		tg3_flag_set(tp, 5717_PLUS);
15548 
15549 	if (tg3_asic_rev(tp) == ASIC_REV_57765 ||
15550 	    tg3_asic_rev(tp) == ASIC_REV_57766)
15551 		tg3_flag_set(tp, 57765_CLASS);
15552 
15553 	if (tg3_flag(tp, 57765_CLASS) || tg3_flag(tp, 5717_PLUS) ||
15554 	     tg3_asic_rev(tp) == ASIC_REV_5762)
15555 		tg3_flag_set(tp, 57765_PLUS);
15556 
15557 	/* Intentionally exclude ASIC_REV_5906 */
15558 	if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
15559 	    tg3_asic_rev(tp) == ASIC_REV_5787 ||
15560 	    tg3_asic_rev(tp) == ASIC_REV_5784 ||
15561 	    tg3_asic_rev(tp) == ASIC_REV_5761 ||
15562 	    tg3_asic_rev(tp) == ASIC_REV_5785 ||
15563 	    tg3_asic_rev(tp) == ASIC_REV_57780 ||
15564 	    tg3_flag(tp, 57765_PLUS))
15565 		tg3_flag_set(tp, 5755_PLUS);
15566 
15567 	if (tg3_asic_rev(tp) == ASIC_REV_5780 ||
15568 	    tg3_asic_rev(tp) == ASIC_REV_5714)
15569 		tg3_flag_set(tp, 5780_CLASS);
15570 
15571 	if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
15572 	    tg3_asic_rev(tp) == ASIC_REV_5752 ||
15573 	    tg3_asic_rev(tp) == ASIC_REV_5906 ||
15574 	    tg3_flag(tp, 5755_PLUS) ||
15575 	    tg3_flag(tp, 5780_CLASS))
15576 		tg3_flag_set(tp, 5750_PLUS);
15577 
15578 	if (tg3_asic_rev(tp) == ASIC_REV_5705 ||
15579 	    tg3_flag(tp, 5750_PLUS))
15580 		tg3_flag_set(tp, 5705_PLUS);
15581 }
15582 
15583 static bool tg3_10_100_only_device(struct tg3 *tp,
15584 				   const struct pci_device_id *ent)
15585 {
15586 	u32 grc_misc_cfg = tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK;
15587 
15588 	if ((tg3_asic_rev(tp) == ASIC_REV_5703 &&
15589 	     (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
15590 	    (tp->phy_flags & TG3_PHYFLG_IS_FET))
15591 		return true;
15592 
15593 	if (ent->driver_data & TG3_DRV_DATA_FLAG_10_100_ONLY) {
15594 		if (tg3_asic_rev(tp) == ASIC_REV_5705) {
15595 			if (ent->driver_data & TG3_DRV_DATA_FLAG_5705_10_100)
15596 				return true;
15597 		} else {
15598 			return true;
15599 		}
15600 	}
15601 
15602 	return false;
15603 }
15604 
15605 static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
15606 {
15607 	u32 misc_ctrl_reg;
15608 	u32 pci_state_reg, grc_misc_cfg;
15609 	u32 val;
15610 	u16 pci_cmd;
15611 	int err;
15612 
15613 	/* Force memory write invalidate off.  If we leave it on,
15614 	 * then on 5700_BX chips we have to enable a workaround.
15615 	 * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
15616 	 * to match the cacheline size.  The Broadcom driver have this
15617 	 * workaround but turns MWI off all the times so never uses
15618 	 * it.  This seems to suggest that the workaround is insufficient.
15619 	 */
15620 	pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
15621 	pci_cmd &= ~PCI_COMMAND_INVALIDATE;
15622 	pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
15623 
15624 	/* Important! -- Make sure register accesses are byteswapped
15625 	 * correctly.  Also, for those chips that require it, make
15626 	 * sure that indirect register accesses are enabled before
15627 	 * the first operation.
15628 	 */
15629 	pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
15630 			      &misc_ctrl_reg);
15631 	tp->misc_host_ctrl |= (misc_ctrl_reg &
15632 			       MISC_HOST_CTRL_CHIPREV);
15633 	pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
15634 			       tp->misc_host_ctrl);
15635 
15636 	tg3_detect_asic_rev(tp, misc_ctrl_reg);
15637 
15638 	/* If we have 5702/03 A1 or A2 on certain ICH chipsets,
15639 	 * we need to disable memory and use config. cycles
15640 	 * only to access all registers. The 5702/03 chips
15641 	 * can mistakenly decode the special cycles from the
15642 	 * ICH chipsets as memory write cycles, causing corruption
15643 	 * of register and memory space. Only certain ICH bridges
15644 	 * will drive special cycles with non-zero data during the
15645 	 * address phase which can fall within the 5703's address
15646 	 * range. This is not an ICH bug as the PCI spec allows
15647 	 * non-zero address during special cycles. However, only
15648 	 * these ICH bridges are known to drive non-zero addresses
15649 	 * during special cycles.
15650 	 *
15651 	 * Since special cycles do not cross PCI bridges, we only
15652 	 * enable this workaround if the 5703 is on the secondary
15653 	 * bus of these ICH bridges.
15654 	 */
15655 	if ((tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A1) ||
15656 	    (tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A2)) {
15657 		static struct tg3_dev_id {
15658 			u32	vendor;
15659 			u32	device;
15660 			u32	rev;
15661 		} ich_chipsets[] = {
15662 			{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
15663 			  PCI_ANY_ID },
15664 			{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
15665 			  PCI_ANY_ID },
15666 			{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
15667 			  0xa },
15668 			{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
15669 			  PCI_ANY_ID },
15670 			{ },
15671 		};
15672 		struct tg3_dev_id *pci_id = &ich_chipsets[0];
15673 		struct pci_dev *bridge = NULL;
15674 
15675 		while (pci_id->vendor != 0) {
15676 			bridge = pci_get_device(pci_id->vendor, pci_id->device,
15677 						bridge);
15678 			if (!bridge) {
15679 				pci_id++;
15680 				continue;
15681 			}
15682 			if (pci_id->rev != PCI_ANY_ID) {
15683 				if (bridge->revision > pci_id->rev)
15684 					continue;
15685 			}
15686 			if (bridge->subordinate &&
15687 			    (bridge->subordinate->number ==
15688 			     tp->pdev->bus->number)) {
15689 				tg3_flag_set(tp, ICH_WORKAROUND);
15690 				pci_dev_put(bridge);
15691 				break;
15692 			}
15693 		}
15694 	}
15695 
15696 	if (tg3_asic_rev(tp) == ASIC_REV_5701) {
15697 		static struct tg3_dev_id {
15698 			u32	vendor;
15699 			u32	device;
15700 		} bridge_chipsets[] = {
15701 			{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
15702 			{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
15703 			{ },
15704 		};
15705 		struct tg3_dev_id *pci_id = &bridge_chipsets[0];
15706 		struct pci_dev *bridge = NULL;
15707 
15708 		while (pci_id->vendor != 0) {
15709 			bridge = pci_get_device(pci_id->vendor,
15710 						pci_id->device,
15711 						bridge);
15712 			if (!bridge) {
15713 				pci_id++;
15714 				continue;
15715 			}
15716 			if (bridge->subordinate &&
15717 			    (bridge->subordinate->number <=
15718 			     tp->pdev->bus->number) &&
15719 			    (bridge->subordinate->busn_res.end >=
15720 			     tp->pdev->bus->number)) {
15721 				tg3_flag_set(tp, 5701_DMA_BUG);
15722 				pci_dev_put(bridge);
15723 				break;
15724 			}
15725 		}
15726 	}
15727 
15728 	/* The EPB bridge inside 5714, 5715, and 5780 cannot support
15729 	 * DMA addresses > 40-bit. This bridge may have other additional
15730 	 * 57xx devices behind it in some 4-port NIC designs for example.
15731 	 * Any tg3 device found behind the bridge will also need the 40-bit
15732 	 * DMA workaround.
15733 	 */
15734 	if (tg3_flag(tp, 5780_CLASS)) {
15735 		tg3_flag_set(tp, 40BIT_DMA_BUG);
15736 		tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
15737 	} else {
15738 		struct pci_dev *bridge = NULL;
15739 
15740 		do {
15741 			bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
15742 						PCI_DEVICE_ID_SERVERWORKS_EPB,
15743 						bridge);
15744 			if (bridge && bridge->subordinate &&
15745 			    (bridge->subordinate->number <=
15746 			     tp->pdev->bus->number) &&
15747 			    (bridge->subordinate->busn_res.end >=
15748 			     tp->pdev->bus->number)) {
15749 				tg3_flag_set(tp, 40BIT_DMA_BUG);
15750 				pci_dev_put(bridge);
15751 				break;
15752 			}
15753 		} while (bridge);
15754 	}
15755 
15756 	if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
15757 	    tg3_asic_rev(tp) == ASIC_REV_5714)
15758 		tp->pdev_peer = tg3_find_peer(tp);
15759 
15760 	/* Determine TSO capabilities */
15761 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0)
15762 		; /* Do nothing. HW bug. */
15763 	else if (tg3_flag(tp, 57765_PLUS))
15764 		tg3_flag_set(tp, HW_TSO_3);
15765 	else if (tg3_flag(tp, 5755_PLUS) ||
15766 		 tg3_asic_rev(tp) == ASIC_REV_5906)
15767 		tg3_flag_set(tp, HW_TSO_2);
15768 	else if (tg3_flag(tp, 5750_PLUS)) {
15769 		tg3_flag_set(tp, HW_TSO_1);
15770 		tg3_flag_set(tp, TSO_BUG);
15771 		if (tg3_asic_rev(tp) == ASIC_REV_5750 &&
15772 		    tg3_chip_rev_id(tp) >= CHIPREV_ID_5750_C2)
15773 			tg3_flag_clear(tp, TSO_BUG);
15774 	} else if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
15775 		   tg3_asic_rev(tp) != ASIC_REV_5701 &&
15776 		   tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
15777 		tg3_flag_set(tp, FW_TSO);
15778 		tg3_flag_set(tp, TSO_BUG);
15779 		if (tg3_asic_rev(tp) == ASIC_REV_5705)
15780 			tp->fw_needed = FIRMWARE_TG3TSO5;
15781 		else
15782 			tp->fw_needed = FIRMWARE_TG3TSO;
15783 	}
15784 
15785 	/* Selectively allow TSO based on operating conditions */
15786 	if (tg3_flag(tp, HW_TSO_1) ||
15787 	    tg3_flag(tp, HW_TSO_2) ||
15788 	    tg3_flag(tp, HW_TSO_3) ||
15789 	    tg3_flag(tp, FW_TSO)) {
15790 		/* For firmware TSO, assume ASF is disabled.
15791 		 * We'll disable TSO later if we discover ASF
15792 		 * is enabled in tg3_get_eeprom_hw_cfg().
15793 		 */
15794 		tg3_flag_set(tp, TSO_CAPABLE);
15795 	} else {
15796 		tg3_flag_clear(tp, TSO_CAPABLE);
15797 		tg3_flag_clear(tp, TSO_BUG);
15798 		tp->fw_needed = NULL;
15799 	}
15800 
15801 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0)
15802 		tp->fw_needed = FIRMWARE_TG3;
15803 
15804 	if (tg3_asic_rev(tp) == ASIC_REV_57766)
15805 		tp->fw_needed = FIRMWARE_TG357766;
15806 
15807 	tp->irq_max = 1;
15808 
15809 	if (tg3_flag(tp, 5750_PLUS)) {
15810 		tg3_flag_set(tp, SUPPORT_MSI);
15811 		if (tg3_chip_rev(tp) == CHIPREV_5750_AX ||
15812 		    tg3_chip_rev(tp) == CHIPREV_5750_BX ||
15813 		    (tg3_asic_rev(tp) == ASIC_REV_5714 &&
15814 		     tg3_chip_rev_id(tp) <= CHIPREV_ID_5714_A2 &&
15815 		     tp->pdev_peer == tp->pdev))
15816 			tg3_flag_clear(tp, SUPPORT_MSI);
15817 
15818 		if (tg3_flag(tp, 5755_PLUS) ||
15819 		    tg3_asic_rev(tp) == ASIC_REV_5906) {
15820 			tg3_flag_set(tp, 1SHOT_MSI);
15821 		}
15822 
15823 		if (tg3_flag(tp, 57765_PLUS)) {
15824 			tg3_flag_set(tp, SUPPORT_MSIX);
15825 			tp->irq_max = TG3_IRQ_MAX_VECS;
15826 		}
15827 	}
15828 
15829 	tp->txq_max = 1;
15830 	tp->rxq_max = 1;
15831 	if (tp->irq_max > 1) {
15832 		tp->rxq_max = TG3_RSS_MAX_NUM_QS;
15833 		tg3_rss_init_dflt_indir_tbl(tp, TG3_RSS_MAX_NUM_QS);
15834 
15835 		if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
15836 		    tg3_asic_rev(tp) == ASIC_REV_5720)
15837 			tp->txq_max = tp->irq_max - 1;
15838 	}
15839 
15840 	if (tg3_flag(tp, 5755_PLUS) ||
15841 	    tg3_asic_rev(tp) == ASIC_REV_5906)
15842 		tg3_flag_set(tp, SHORT_DMA_BUG);
15843 
15844 	if (tg3_asic_rev(tp) == ASIC_REV_5719)
15845 		tp->dma_limit = TG3_TX_BD_DMA_MAX_4K;
15846 
15847 	if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
15848 	    tg3_asic_rev(tp) == ASIC_REV_5719 ||
15849 	    tg3_asic_rev(tp) == ASIC_REV_5720 ||
15850 	    tg3_asic_rev(tp) == ASIC_REV_5762)
15851 		tg3_flag_set(tp, LRG_PROD_RING_CAP);
15852 
15853 	if (tg3_flag(tp, 57765_PLUS) &&
15854 	    tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0)
15855 		tg3_flag_set(tp, USE_JUMBO_BDFLAG);
15856 
15857 	if (!tg3_flag(tp, 5705_PLUS) ||
15858 	    tg3_flag(tp, 5780_CLASS) ||
15859 	    tg3_flag(tp, USE_JUMBO_BDFLAG))
15860 		tg3_flag_set(tp, JUMBO_CAPABLE);
15861 
15862 	pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
15863 			      &pci_state_reg);
15864 
15865 	if (pci_is_pcie(tp->pdev)) {
15866 		u16 lnkctl;
15867 
15868 		tg3_flag_set(tp, PCI_EXPRESS);
15869 
15870 		pcie_capability_read_word(tp->pdev, PCI_EXP_LNKCTL, &lnkctl);
15871 		if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
15872 			if (tg3_asic_rev(tp) == ASIC_REV_5906) {
15873 				tg3_flag_clear(tp, HW_TSO_2);
15874 				tg3_flag_clear(tp, TSO_CAPABLE);
15875 			}
15876 			if (tg3_asic_rev(tp) == ASIC_REV_5784 ||
15877 			    tg3_asic_rev(tp) == ASIC_REV_5761 ||
15878 			    tg3_chip_rev_id(tp) == CHIPREV_ID_57780_A0 ||
15879 			    tg3_chip_rev_id(tp) == CHIPREV_ID_57780_A1)
15880 				tg3_flag_set(tp, CLKREQ_BUG);
15881 		} else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5717_A0) {
15882 			tg3_flag_set(tp, L1PLLPD_EN);
15883 		}
15884 	} else if (tg3_asic_rev(tp) == ASIC_REV_5785) {
15885 		/* BCM5785 devices are effectively PCIe devices, and should
15886 		 * follow PCIe codepaths, but do not have a PCIe capabilities
15887 		 * section.
15888 		 */
15889 		tg3_flag_set(tp, PCI_EXPRESS);
15890 	} else if (!tg3_flag(tp, 5705_PLUS) ||
15891 		   tg3_flag(tp, 5780_CLASS)) {
15892 		tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
15893 		if (!tp->pcix_cap) {
15894 			dev_err(&tp->pdev->dev,
15895 				"Cannot find PCI-X capability, aborting\n");
15896 			return -EIO;
15897 		}
15898 
15899 		if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE))
15900 			tg3_flag_set(tp, PCIX_MODE);
15901 	}
15902 
15903 	/* If we have an AMD 762 or VIA K8T800 chipset, write
15904 	 * reordering to the mailbox registers done by the host
15905 	 * controller can cause major troubles.  We read back from
15906 	 * every mailbox register write to force the writes to be
15907 	 * posted to the chip in order.
15908 	 */
15909 	if (pci_dev_present(tg3_write_reorder_chipsets) &&
15910 	    !tg3_flag(tp, PCI_EXPRESS))
15911 		tg3_flag_set(tp, MBOX_WRITE_REORDER);
15912 
15913 	pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
15914 			     &tp->pci_cacheline_sz);
15915 	pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER,
15916 			     &tp->pci_lat_timer);
15917 	if (tg3_asic_rev(tp) == ASIC_REV_5703 &&
15918 	    tp->pci_lat_timer < 64) {
15919 		tp->pci_lat_timer = 64;
15920 		pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
15921 				      tp->pci_lat_timer);
15922 	}
15923 
15924 	/* Important! -- It is critical that the PCI-X hw workaround
15925 	 * situation is decided before the first MMIO register access.
15926 	 */
15927 	if (tg3_chip_rev(tp) == CHIPREV_5700_BX) {
15928 		/* 5700 BX chips need to have their TX producer index
15929 		 * mailboxes written twice to workaround a bug.
15930 		 */
15931 		tg3_flag_set(tp, TXD_MBOX_HWBUG);
15932 
15933 		/* If we are in PCI-X mode, enable register write workaround.
15934 		 *
15935 		 * The workaround is to use indirect register accesses
15936 		 * for all chip writes not to mailbox registers.
15937 		 */
15938 		if (tg3_flag(tp, PCIX_MODE)) {
15939 			u32 pm_reg;
15940 
15941 			tg3_flag_set(tp, PCIX_TARGET_HWBUG);
15942 
15943 			/* The chip can have it's power management PCI config
15944 			 * space registers clobbered due to this bug.
15945 			 * So explicitly force the chip into D0 here.
15946 			 */
15947 			pci_read_config_dword(tp->pdev,
15948 					      tp->pm_cap + PCI_PM_CTRL,
15949 					      &pm_reg);
15950 			pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
15951 			pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
15952 			pci_write_config_dword(tp->pdev,
15953 					       tp->pm_cap + PCI_PM_CTRL,
15954 					       pm_reg);
15955 
15956 			/* Also, force SERR#/PERR# in PCI command. */
15957 			pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
15958 			pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
15959 			pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
15960 		}
15961 	}
15962 
15963 	if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
15964 		tg3_flag_set(tp, PCI_HIGH_SPEED);
15965 	if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
15966 		tg3_flag_set(tp, PCI_32BIT);
15967 
15968 	/* Chip-specific fixup from Broadcom driver */
15969 	if ((tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0) &&
15970 	    (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
15971 		pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
15972 		pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
15973 	}
15974 
15975 	/* Default fast path register access methods */
15976 	tp->read32 = tg3_read32;
15977 	tp->write32 = tg3_write32;
15978 	tp->read32_mbox = tg3_read32;
15979 	tp->write32_mbox = tg3_write32;
15980 	tp->write32_tx_mbox = tg3_write32;
15981 	tp->write32_rx_mbox = tg3_write32;
15982 
15983 	/* Various workaround register access methods */
15984 	if (tg3_flag(tp, PCIX_TARGET_HWBUG))
15985 		tp->write32 = tg3_write_indirect_reg32;
15986 	else if (tg3_asic_rev(tp) == ASIC_REV_5701 ||
15987 		 (tg3_flag(tp, PCI_EXPRESS) &&
15988 		  tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A0)) {
15989 		/*
15990 		 * Back to back register writes can cause problems on these
15991 		 * chips, the workaround is to read back all reg writes
15992 		 * except those to mailbox regs.
15993 		 *
15994 		 * See tg3_write_indirect_reg32().
15995 		 */
15996 		tp->write32 = tg3_write_flush_reg32;
15997 	}
15998 
15999 	if (tg3_flag(tp, TXD_MBOX_HWBUG) || tg3_flag(tp, MBOX_WRITE_REORDER)) {
16000 		tp->write32_tx_mbox = tg3_write32_tx_mbox;
16001 		if (tg3_flag(tp, MBOX_WRITE_REORDER))
16002 			tp->write32_rx_mbox = tg3_write_flush_reg32;
16003 	}
16004 
16005 	if (tg3_flag(tp, ICH_WORKAROUND)) {
16006 		tp->read32 = tg3_read_indirect_reg32;
16007 		tp->write32 = tg3_write_indirect_reg32;
16008 		tp->read32_mbox = tg3_read_indirect_mbox;
16009 		tp->write32_mbox = tg3_write_indirect_mbox;
16010 		tp->write32_tx_mbox = tg3_write_indirect_mbox;
16011 		tp->write32_rx_mbox = tg3_write_indirect_mbox;
16012 
16013 		iounmap(tp->regs);
16014 		tp->regs = NULL;
16015 
16016 		pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
16017 		pci_cmd &= ~PCI_COMMAND_MEMORY;
16018 		pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
16019 	}
16020 	if (tg3_asic_rev(tp) == ASIC_REV_5906) {
16021 		tp->read32_mbox = tg3_read32_mbox_5906;
16022 		tp->write32_mbox = tg3_write32_mbox_5906;
16023 		tp->write32_tx_mbox = tg3_write32_mbox_5906;
16024 		tp->write32_rx_mbox = tg3_write32_mbox_5906;
16025 	}
16026 
16027 	if (tp->write32 == tg3_write_indirect_reg32 ||
16028 	    (tg3_flag(tp, PCIX_MODE) &&
16029 	     (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16030 	      tg3_asic_rev(tp) == ASIC_REV_5701)))
16031 		tg3_flag_set(tp, SRAM_USE_CONFIG);
16032 
16033 	/* The memory arbiter has to be enabled in order for SRAM accesses
16034 	 * to succeed.  Normally on powerup the tg3 chip firmware will make
16035 	 * sure it is enabled, but other entities such as system netboot
16036 	 * code might disable it.
16037 	 */
16038 	val = tr32(MEMARB_MODE);
16039 	tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
16040 
16041 	tp->pci_fn = PCI_FUNC(tp->pdev->devfn) & 3;
16042 	if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
16043 	    tg3_flag(tp, 5780_CLASS)) {
16044 		if (tg3_flag(tp, PCIX_MODE)) {
16045 			pci_read_config_dword(tp->pdev,
16046 					      tp->pcix_cap + PCI_X_STATUS,
16047 					      &val);
16048 			tp->pci_fn = val & 0x7;
16049 		}
16050 	} else if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16051 		   tg3_asic_rev(tp) == ASIC_REV_5719 ||
16052 		   tg3_asic_rev(tp) == ASIC_REV_5720) {
16053 		tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val);
16054 		if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) != NIC_SRAM_CPMUSTAT_SIG)
16055 			val = tr32(TG3_CPMU_STATUS);
16056 
16057 		if (tg3_asic_rev(tp) == ASIC_REV_5717)
16058 			tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5717) ? 1 : 0;
16059 		else
16060 			tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5719) >>
16061 				     TG3_CPMU_STATUS_FSHFT_5719;
16062 	}
16063 
16064 	if (tg3_flag(tp, FLUSH_POSTED_WRITES)) {
16065 		tp->write32_tx_mbox = tg3_write_flush_reg32;
16066 		tp->write32_rx_mbox = tg3_write_flush_reg32;
16067 	}
16068 
16069 	/* Get eeprom hw config before calling tg3_set_power_state().
16070 	 * In particular, the TG3_FLAG_IS_NIC flag must be
16071 	 * determined before calling tg3_set_power_state() so that
16072 	 * we know whether or not to switch out of Vaux power.
16073 	 * When the flag is set, it means that GPIO1 is used for eeprom
16074 	 * write protect and also implies that it is a LOM where GPIOs
16075 	 * are not used to switch power.
16076 	 */
16077 	tg3_get_eeprom_hw_cfg(tp);
16078 
16079 	if (tg3_flag(tp, FW_TSO) && tg3_flag(tp, ENABLE_ASF)) {
16080 		tg3_flag_clear(tp, TSO_CAPABLE);
16081 		tg3_flag_clear(tp, TSO_BUG);
16082 		tp->fw_needed = NULL;
16083 	}
16084 
16085 	if (tg3_flag(tp, ENABLE_APE)) {
16086 		/* Allow reads and writes to the
16087 		 * APE register and memory space.
16088 		 */
16089 		pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
16090 				 PCISTATE_ALLOW_APE_SHMEM_WR |
16091 				 PCISTATE_ALLOW_APE_PSPACE_WR;
16092 		pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
16093 				       pci_state_reg);
16094 
16095 		tg3_ape_lock_init(tp);
16096 	}
16097 
16098 	/* Set up tp->grc_local_ctrl before calling
16099 	 * tg3_pwrsrc_switch_to_vmain().  GPIO1 driven high
16100 	 * will bring 5700's external PHY out of reset.
16101 	 * It is also used as eeprom write protect on LOMs.
16102 	 */
16103 	tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
16104 	if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16105 	    tg3_flag(tp, EEPROM_WRITE_PROT))
16106 		tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
16107 				       GRC_LCLCTRL_GPIO_OUTPUT1);
16108 	/* Unused GPIO3 must be driven as output on 5752 because there
16109 	 * are no pull-up resistors on unused GPIO pins.
16110 	 */
16111 	else if (tg3_asic_rev(tp) == ASIC_REV_5752)
16112 		tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
16113 
16114 	if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
16115 	    tg3_asic_rev(tp) == ASIC_REV_57780 ||
16116 	    tg3_flag(tp, 57765_CLASS))
16117 		tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
16118 
16119 	if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
16120 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
16121 		/* Turn off the debug UART. */
16122 		tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
16123 		if (tg3_flag(tp, IS_NIC))
16124 			/* Keep VMain power. */
16125 			tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
16126 					      GRC_LCLCTRL_GPIO_OUTPUT0;
16127 	}
16128 
16129 	if (tg3_asic_rev(tp) == ASIC_REV_5762)
16130 		tp->grc_local_ctrl |=
16131 			tr32(GRC_LOCAL_CTRL) & GRC_LCLCTRL_GPIO_UART_SEL;
16132 
16133 	/* Switch out of Vaux if it is a NIC */
16134 	tg3_pwrsrc_switch_to_vmain(tp);
16135 
16136 	/* Derive initial jumbo mode from MTU assigned in
16137 	 * ether_setup() via the alloc_etherdev() call
16138 	 */
16139 	if (tp->dev->mtu > ETH_DATA_LEN && !tg3_flag(tp, 5780_CLASS))
16140 		tg3_flag_set(tp, JUMBO_RING_ENABLE);
16141 
16142 	/* Determine WakeOnLan speed to use. */
16143 	if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16144 	    tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
16145 	    tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0 ||
16146 	    tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B2) {
16147 		tg3_flag_clear(tp, WOL_SPEED_100MB);
16148 	} else {
16149 		tg3_flag_set(tp, WOL_SPEED_100MB);
16150 	}
16151 
16152 	if (tg3_asic_rev(tp) == ASIC_REV_5906)
16153 		tp->phy_flags |= TG3_PHYFLG_IS_FET;
16154 
16155 	/* A few boards don't want Ethernet@WireSpeed phy feature */
16156 	if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16157 	    (tg3_asic_rev(tp) == ASIC_REV_5705 &&
16158 	     (tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) &&
16159 	     (tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A1)) ||
16160 	    (tp->phy_flags & TG3_PHYFLG_IS_FET) ||
16161 	    (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
16162 		tp->phy_flags |= TG3_PHYFLG_NO_ETH_WIRE_SPEED;
16163 
16164 	if (tg3_chip_rev(tp) == CHIPREV_5703_AX ||
16165 	    tg3_chip_rev(tp) == CHIPREV_5704_AX)
16166 		tp->phy_flags |= TG3_PHYFLG_ADC_BUG;
16167 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0)
16168 		tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG;
16169 
16170 	if (tg3_flag(tp, 5705_PLUS) &&
16171 	    !(tp->phy_flags & TG3_PHYFLG_IS_FET) &&
16172 	    tg3_asic_rev(tp) != ASIC_REV_5785 &&
16173 	    tg3_asic_rev(tp) != ASIC_REV_57780 &&
16174 	    !tg3_flag(tp, 57765_PLUS)) {
16175 		if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
16176 		    tg3_asic_rev(tp) == ASIC_REV_5787 ||
16177 		    tg3_asic_rev(tp) == ASIC_REV_5784 ||
16178 		    tg3_asic_rev(tp) == ASIC_REV_5761) {
16179 			if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
16180 			    tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
16181 				tp->phy_flags |= TG3_PHYFLG_JITTER_BUG;
16182 			if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
16183 				tp->phy_flags |= TG3_PHYFLG_ADJUST_TRIM;
16184 		} else
16185 			tp->phy_flags |= TG3_PHYFLG_BER_BUG;
16186 	}
16187 
16188 	if (tg3_asic_rev(tp) == ASIC_REV_5784 &&
16189 	    tg3_chip_rev(tp) != CHIPREV_5784_AX) {
16190 		tp->phy_otp = tg3_read_otp_phycfg(tp);
16191 		if (tp->phy_otp == 0)
16192 			tp->phy_otp = TG3_OTP_DEFAULT;
16193 	}
16194 
16195 	if (tg3_flag(tp, CPMU_PRESENT))
16196 		tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
16197 	else
16198 		tp->mi_mode = MAC_MI_MODE_BASE;
16199 
16200 	tp->coalesce_mode = 0;
16201 	if (tg3_chip_rev(tp) != CHIPREV_5700_AX &&
16202 	    tg3_chip_rev(tp) != CHIPREV_5700_BX)
16203 		tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
16204 
16205 	/* Set these bits to enable statistics workaround. */
16206 	if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16207 	    tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
16208 	    tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0) {
16209 		tp->coalesce_mode |= HOSTCC_MODE_ATTN;
16210 		tp->grc_mode |= GRC_MODE_IRQ_ON_FLOW_ATTN;
16211 	}
16212 
16213 	if (tg3_asic_rev(tp) == ASIC_REV_5785 ||
16214 	    tg3_asic_rev(tp) == ASIC_REV_57780)
16215 		tg3_flag_set(tp, USE_PHYLIB);
16216 
16217 	err = tg3_mdio_init(tp);
16218 	if (err)
16219 		return err;
16220 
16221 	/* Initialize data/descriptor byte/word swapping. */
16222 	val = tr32(GRC_MODE);
16223 	if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
16224 	    tg3_asic_rev(tp) == ASIC_REV_5762)
16225 		val &= (GRC_MODE_BYTE_SWAP_B2HRX_DATA |
16226 			GRC_MODE_WORD_SWAP_B2HRX_DATA |
16227 			GRC_MODE_B2HRX_ENABLE |
16228 			GRC_MODE_HTX2B_ENABLE |
16229 			GRC_MODE_HOST_STACKUP);
16230 	else
16231 		val &= GRC_MODE_HOST_STACKUP;
16232 
16233 	tw32(GRC_MODE, val | tp->grc_mode);
16234 
16235 	tg3_switch_clocks(tp);
16236 
16237 	/* Clear this out for sanity. */
16238 	tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
16239 
16240 	pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
16241 			      &pci_state_reg);
16242 	if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
16243 	    !tg3_flag(tp, PCIX_TARGET_HWBUG)) {
16244 		if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
16245 		    tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0 ||
16246 		    tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B2 ||
16247 		    tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B5) {
16248 			void __iomem *sram_base;
16249 
16250 			/* Write some dummy words into the SRAM status block
16251 			 * area, see if it reads back correctly.  If the return
16252 			 * value is bad, force enable the PCIX workaround.
16253 			 */
16254 			sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
16255 
16256 			writel(0x00000000, sram_base);
16257 			writel(0x00000000, sram_base + 4);
16258 			writel(0xffffffff, sram_base + 4);
16259 			if (readl(sram_base) != 0x00000000)
16260 				tg3_flag_set(tp, PCIX_TARGET_HWBUG);
16261 		}
16262 	}
16263 
16264 	udelay(50);
16265 	tg3_nvram_init(tp);
16266 
16267 	/* If the device has an NVRAM, no need to load patch firmware */
16268 	if (tg3_asic_rev(tp) == ASIC_REV_57766 &&
16269 	    !tg3_flag(tp, NO_NVRAM))
16270 		tp->fw_needed = NULL;
16271 
16272 	grc_misc_cfg = tr32(GRC_MISC_CFG);
16273 	grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
16274 
16275 	if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
16276 	    (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
16277 	     grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
16278 		tg3_flag_set(tp, IS_5788);
16279 
16280 	if (!tg3_flag(tp, IS_5788) &&
16281 	    tg3_asic_rev(tp) != ASIC_REV_5700)
16282 		tg3_flag_set(tp, TAGGED_STATUS);
16283 	if (tg3_flag(tp, TAGGED_STATUS)) {
16284 		tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
16285 				      HOSTCC_MODE_CLRTICK_TXBD);
16286 
16287 		tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
16288 		pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
16289 				       tp->misc_host_ctrl);
16290 	}
16291 
16292 	/* Preserve the APE MAC_MODE bits */
16293 	if (tg3_flag(tp, ENABLE_APE))
16294 		tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
16295 	else
16296 		tp->mac_mode = 0;
16297 
16298 	if (tg3_10_100_only_device(tp, ent))
16299 		tp->phy_flags |= TG3_PHYFLG_10_100_ONLY;
16300 
16301 	err = tg3_phy_probe(tp);
16302 	if (err) {
16303 		dev_err(&tp->pdev->dev, "phy probe failed, err %d\n", err);
16304 		/* ... but do not return immediately ... */
16305 		tg3_mdio_fini(tp);
16306 	}
16307 
16308 	tg3_read_vpd(tp);
16309 	tg3_read_fw_ver(tp);
16310 
16311 	if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
16312 		tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
16313 	} else {
16314 		if (tg3_asic_rev(tp) == ASIC_REV_5700)
16315 			tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
16316 		else
16317 			tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
16318 	}
16319 
16320 	/* 5700 {AX,BX} chips have a broken status block link
16321 	 * change bit implementation, so we must use the
16322 	 * status register in those cases.
16323 	 */
16324 	if (tg3_asic_rev(tp) == ASIC_REV_5700)
16325 		tg3_flag_set(tp, USE_LINKCHG_REG);
16326 	else
16327 		tg3_flag_clear(tp, USE_LINKCHG_REG);
16328 
16329 	/* The led_ctrl is set during tg3_phy_probe, here we might
16330 	 * have to force the link status polling mechanism based
16331 	 * upon subsystem IDs.
16332 	 */
16333 	if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
16334 	    tg3_asic_rev(tp) == ASIC_REV_5701 &&
16335 	    !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
16336 		tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
16337 		tg3_flag_set(tp, USE_LINKCHG_REG);
16338 	}
16339 
16340 	/* For all SERDES we poll the MAC status register. */
16341 	if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
16342 		tg3_flag_set(tp, POLL_SERDES);
16343 	else
16344 		tg3_flag_clear(tp, POLL_SERDES);
16345 
16346 	tp->rx_offset = NET_SKB_PAD + NET_IP_ALIGN;
16347 	tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;
16348 	if (tg3_asic_rev(tp) == ASIC_REV_5701 &&
16349 	    tg3_flag(tp, PCIX_MODE)) {
16350 		tp->rx_offset = NET_SKB_PAD;
16351 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
16352 		tp->rx_copy_thresh = ~(u16)0;
16353 #endif
16354 	}
16355 
16356 	tp->rx_std_ring_mask = TG3_RX_STD_RING_SIZE(tp) - 1;
16357 	tp->rx_jmb_ring_mask = TG3_RX_JMB_RING_SIZE(tp) - 1;
16358 	tp->rx_ret_ring_mask = tg3_rx_ret_ring_size(tp) - 1;
16359 
16360 	tp->rx_std_max_post = tp->rx_std_ring_mask + 1;
16361 
16362 	/* Increment the rx prod index on the rx std ring by at most
16363 	 * 8 for these chips to workaround hw errata.
16364 	 */
16365 	if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
16366 	    tg3_asic_rev(tp) == ASIC_REV_5752 ||
16367 	    tg3_asic_rev(tp) == ASIC_REV_5755)
16368 		tp->rx_std_max_post = 8;
16369 
16370 	if (tg3_flag(tp, ASPM_WORKAROUND))
16371 		tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
16372 				     PCIE_PWR_MGMT_L1_THRESH_MSK;
16373 
16374 	return err;
16375 }
16376 
16377 #ifdef CONFIG_SPARC
16378 static int tg3_get_macaddr_sparc(struct tg3 *tp)
16379 {
16380 	struct net_device *dev = tp->dev;
16381 	struct pci_dev *pdev = tp->pdev;
16382 	struct device_node *dp = pci_device_to_OF_node(pdev);
16383 	const unsigned char *addr;
16384 	int len;
16385 
16386 	addr = of_get_property(dp, "local-mac-address", &len);
16387 	if (addr && len == 6) {
16388 		memcpy(dev->dev_addr, addr, 6);
16389 		return 0;
16390 	}
16391 	return -ENODEV;
16392 }
16393 
16394 static int tg3_get_default_macaddr_sparc(struct tg3 *tp)
16395 {
16396 	struct net_device *dev = tp->dev;
16397 
16398 	memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
16399 	return 0;
16400 }
16401 #endif
16402 
16403 static int tg3_get_device_address(struct tg3 *tp)
16404 {
16405 	struct net_device *dev = tp->dev;
16406 	u32 hi, lo, mac_offset;
16407 	int addr_ok = 0;
16408 	int err;
16409 
16410 #ifdef CONFIG_SPARC
16411 	if (!tg3_get_macaddr_sparc(tp))
16412 		return 0;
16413 #endif
16414 
16415 	if (tg3_flag(tp, IS_SSB_CORE)) {
16416 		err = ssb_gige_get_macaddr(tp->pdev, &dev->dev_addr[0]);
16417 		if (!err && is_valid_ether_addr(&dev->dev_addr[0]))
16418 			return 0;
16419 	}
16420 
16421 	mac_offset = 0x7c;
16422 	if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
16423 	    tg3_flag(tp, 5780_CLASS)) {
16424 		if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
16425 			mac_offset = 0xcc;
16426 		if (tg3_nvram_lock(tp))
16427 			tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
16428 		else
16429 			tg3_nvram_unlock(tp);
16430 	} else if (tg3_flag(tp, 5717_PLUS)) {
16431 		if (tp->pci_fn & 1)
16432 			mac_offset = 0xcc;
16433 		if (tp->pci_fn > 1)
16434 			mac_offset += 0x18c;
16435 	} else if (tg3_asic_rev(tp) == ASIC_REV_5906)
16436 		mac_offset = 0x10;
16437 
16438 	/* First try to get it from MAC address mailbox. */
16439 	tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
16440 	if ((hi >> 16) == 0x484b) {
16441 		dev->dev_addr[0] = (hi >>  8) & 0xff;
16442 		dev->dev_addr[1] = (hi >>  0) & 0xff;
16443 
16444 		tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
16445 		dev->dev_addr[2] = (lo >> 24) & 0xff;
16446 		dev->dev_addr[3] = (lo >> 16) & 0xff;
16447 		dev->dev_addr[4] = (lo >>  8) & 0xff;
16448 		dev->dev_addr[5] = (lo >>  0) & 0xff;
16449 
16450 		/* Some old bootcode may report a 0 MAC address in SRAM */
16451 		addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
16452 	}
16453 	if (!addr_ok) {
16454 		/* Next, try NVRAM. */
16455 		if (!tg3_flag(tp, NO_NVRAM) &&
16456 		    !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) &&
16457 		    !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) {
16458 			memcpy(&dev->dev_addr[0], ((char *)&hi) + 2, 2);
16459 			memcpy(&dev->dev_addr[2], (char *)&lo, sizeof(lo));
16460 		}
16461 		/* Finally just fetch it out of the MAC control regs. */
16462 		else {
16463 			hi = tr32(MAC_ADDR_0_HIGH);
16464 			lo = tr32(MAC_ADDR_0_LOW);
16465 
16466 			dev->dev_addr[5] = lo & 0xff;
16467 			dev->dev_addr[4] = (lo >> 8) & 0xff;
16468 			dev->dev_addr[3] = (lo >> 16) & 0xff;
16469 			dev->dev_addr[2] = (lo >> 24) & 0xff;
16470 			dev->dev_addr[1] = hi & 0xff;
16471 			dev->dev_addr[0] = (hi >> 8) & 0xff;
16472 		}
16473 	}
16474 
16475 	if (!is_valid_ether_addr(&dev->dev_addr[0])) {
16476 #ifdef CONFIG_SPARC
16477 		if (!tg3_get_default_macaddr_sparc(tp))
16478 			return 0;
16479 #endif
16480 		return -EINVAL;
16481 	}
16482 	return 0;
16483 }
16484 
16485 #define BOUNDARY_SINGLE_CACHELINE	1
16486 #define BOUNDARY_MULTI_CACHELINE	2
16487 
16488 static u32 tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
16489 {
16490 	int cacheline_size;
16491 	u8 byte;
16492 	int goal;
16493 
16494 	pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
16495 	if (byte == 0)
16496 		cacheline_size = 1024;
16497 	else
16498 		cacheline_size = (int) byte * 4;
16499 
16500 	/* On 5703 and later chips, the boundary bits have no
16501 	 * effect.
16502 	 */
16503 	if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
16504 	    tg3_asic_rev(tp) != ASIC_REV_5701 &&
16505 	    !tg3_flag(tp, PCI_EXPRESS))
16506 		goto out;
16507 
16508 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
16509 	goal = BOUNDARY_MULTI_CACHELINE;
16510 #else
16511 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
16512 	goal = BOUNDARY_SINGLE_CACHELINE;
16513 #else
16514 	goal = 0;
16515 #endif
16516 #endif
16517 
16518 	if (tg3_flag(tp, 57765_PLUS)) {
16519 		val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
16520 		goto out;
16521 	}
16522 
16523 	if (!goal)
16524 		goto out;
16525 
16526 	/* PCI controllers on most RISC systems tend to disconnect
16527 	 * when a device tries to burst across a cache-line boundary.
16528 	 * Therefore, letting tg3 do so just wastes PCI bandwidth.
16529 	 *
16530 	 * Unfortunately, for PCI-E there are only limited
16531 	 * write-side controls for this, and thus for reads
16532 	 * we will still get the disconnects.  We'll also waste
16533 	 * these PCI cycles for both read and write for chips
16534 	 * other than 5700 and 5701 which do not implement the
16535 	 * boundary bits.
16536 	 */
16537 	if (tg3_flag(tp, PCIX_MODE) && !tg3_flag(tp, PCI_EXPRESS)) {
16538 		switch (cacheline_size) {
16539 		case 16:
16540 		case 32:
16541 		case 64:
16542 		case 128:
16543 			if (goal == BOUNDARY_SINGLE_CACHELINE) {
16544 				val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
16545 					DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
16546 			} else {
16547 				val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
16548 					DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
16549 			}
16550 			break;
16551 
16552 		case 256:
16553 			val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
16554 				DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
16555 			break;
16556 
16557 		default:
16558 			val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
16559 				DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
16560 			break;
16561 		}
16562 	} else if (tg3_flag(tp, PCI_EXPRESS)) {
16563 		switch (cacheline_size) {
16564 		case 16:
16565 		case 32:
16566 		case 64:
16567 			if (goal == BOUNDARY_SINGLE_CACHELINE) {
16568 				val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
16569 				val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
16570 				break;
16571 			}
16572 			/* fallthrough */
16573 		case 128:
16574 		default:
16575 			val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
16576 			val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
16577 			break;
16578 		}
16579 	} else {
16580 		switch (cacheline_size) {
16581 		case 16:
16582 			if (goal == BOUNDARY_SINGLE_CACHELINE) {
16583 				val |= (DMA_RWCTRL_READ_BNDRY_16 |
16584 					DMA_RWCTRL_WRITE_BNDRY_16);
16585 				break;
16586 			}
16587 			/* fallthrough */
16588 		case 32:
16589 			if (goal == BOUNDARY_SINGLE_CACHELINE) {
16590 				val |= (DMA_RWCTRL_READ_BNDRY_32 |
16591 					DMA_RWCTRL_WRITE_BNDRY_32);
16592 				break;
16593 			}
16594 			/* fallthrough */
16595 		case 64:
16596 			if (goal == BOUNDARY_SINGLE_CACHELINE) {
16597 				val |= (DMA_RWCTRL_READ_BNDRY_64 |
16598 					DMA_RWCTRL_WRITE_BNDRY_64);
16599 				break;
16600 			}
16601 			/* fallthrough */
16602 		case 128:
16603 			if (goal == BOUNDARY_SINGLE_CACHELINE) {
16604 				val |= (DMA_RWCTRL_READ_BNDRY_128 |
16605 					DMA_RWCTRL_WRITE_BNDRY_128);
16606 				break;
16607 			}
16608 			/* fallthrough */
16609 		case 256:
16610 			val |= (DMA_RWCTRL_READ_BNDRY_256 |
16611 				DMA_RWCTRL_WRITE_BNDRY_256);
16612 			break;
16613 		case 512:
16614 			val |= (DMA_RWCTRL_READ_BNDRY_512 |
16615 				DMA_RWCTRL_WRITE_BNDRY_512);
16616 			break;
16617 		case 1024:
16618 		default:
16619 			val |= (DMA_RWCTRL_READ_BNDRY_1024 |
16620 				DMA_RWCTRL_WRITE_BNDRY_1024);
16621 			break;
16622 		}
16623 	}
16624 
16625 out:
16626 	return val;
16627 }
16628 
16629 static int tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma,
16630 			   int size, bool to_device)
16631 {
16632 	struct tg3_internal_buffer_desc test_desc;
16633 	u32 sram_dma_descs;
16634 	int i, ret;
16635 
16636 	sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
16637 
16638 	tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
16639 	tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
16640 	tw32(RDMAC_STATUS, 0);
16641 	tw32(WDMAC_STATUS, 0);
16642 
16643 	tw32(BUFMGR_MODE, 0);
16644 	tw32(FTQ_RESET, 0);
16645 
16646 	test_desc.addr_hi = ((u64) buf_dma) >> 32;
16647 	test_desc.addr_lo = buf_dma & 0xffffffff;
16648 	test_desc.nic_mbuf = 0x00002100;
16649 	test_desc.len = size;
16650 
16651 	/*
16652 	 * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
16653 	 * the *second* time the tg3 driver was getting loaded after an
16654 	 * initial scan.
16655 	 *
16656 	 * Broadcom tells me:
16657 	 *   ...the DMA engine is connected to the GRC block and a DMA
16658 	 *   reset may affect the GRC block in some unpredictable way...
16659 	 *   The behavior of resets to individual blocks has not been tested.
16660 	 *
16661 	 * Broadcom noted the GRC reset will also reset all sub-components.
16662 	 */
16663 	if (to_device) {
16664 		test_desc.cqid_sqid = (13 << 8) | 2;
16665 
16666 		tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
16667 		udelay(40);
16668 	} else {
16669 		test_desc.cqid_sqid = (16 << 8) | 7;
16670 
16671 		tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
16672 		udelay(40);
16673 	}
16674 	test_desc.flags = 0x00000005;
16675 
16676 	for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
16677 		u32 val;
16678 
16679 		val = *(((u32 *)&test_desc) + i);
16680 		pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
16681 				       sram_dma_descs + (i * sizeof(u32)));
16682 		pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
16683 	}
16684 	pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
16685 
16686 	if (to_device)
16687 		tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
16688 	else
16689 		tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
16690 
16691 	ret = -ENODEV;
16692 	for (i = 0; i < 40; i++) {
16693 		u32 val;
16694 
16695 		if (to_device)
16696 			val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
16697 		else
16698 			val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
16699 		if ((val & 0xffff) == sram_dma_descs) {
16700 			ret = 0;
16701 			break;
16702 		}
16703 
16704 		udelay(100);
16705 	}
16706 
16707 	return ret;
16708 }
16709 
16710 #define TEST_BUFFER_SIZE	0x2000
16711 
16712 static DEFINE_PCI_DEVICE_TABLE(tg3_dma_wait_state_chipsets) = {
16713 	{ PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
16714 	{ },
16715 };
16716 
16717 static int tg3_test_dma(struct tg3 *tp)
16718 {
16719 	dma_addr_t buf_dma;
16720 	u32 *buf, saved_dma_rwctrl;
16721 	int ret = 0;
16722 
16723 	buf = dma_alloc_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE,
16724 				 &buf_dma, GFP_KERNEL);
16725 	if (!buf) {
16726 		ret = -ENOMEM;
16727 		goto out_nofree;
16728 	}
16729 
16730 	tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
16731 			  (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
16732 
16733 	tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
16734 
16735 	if (tg3_flag(tp, 57765_PLUS))
16736 		goto out;
16737 
16738 	if (tg3_flag(tp, PCI_EXPRESS)) {
16739 		/* DMA read watermark not used on PCIE */
16740 		tp->dma_rwctrl |= 0x00180000;
16741 	} else if (!tg3_flag(tp, PCIX_MODE)) {
16742 		if (tg3_asic_rev(tp) == ASIC_REV_5705 ||
16743 		    tg3_asic_rev(tp) == ASIC_REV_5750)
16744 			tp->dma_rwctrl |= 0x003f0000;
16745 		else
16746 			tp->dma_rwctrl |= 0x003f000f;
16747 	} else {
16748 		if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
16749 		    tg3_asic_rev(tp) == ASIC_REV_5704) {
16750 			u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
16751 			u32 read_water = 0x7;
16752 
16753 			/* If the 5704 is behind the EPB bridge, we can
16754 			 * do the less restrictive ONE_DMA workaround for
16755 			 * better performance.
16756 			 */
16757 			if (tg3_flag(tp, 40BIT_DMA_BUG) &&
16758 			    tg3_asic_rev(tp) == ASIC_REV_5704)
16759 				tp->dma_rwctrl |= 0x8000;
16760 			else if (ccval == 0x6 || ccval == 0x7)
16761 				tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
16762 
16763 			if (tg3_asic_rev(tp) == ASIC_REV_5703)
16764 				read_water = 4;
16765 			/* Set bit 23 to enable PCIX hw bug fix */
16766 			tp->dma_rwctrl |=
16767 				(read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
16768 				(0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
16769 				(1 << 23);
16770 		} else if (tg3_asic_rev(tp) == ASIC_REV_5780) {
16771 			/* 5780 always in PCIX mode */
16772 			tp->dma_rwctrl |= 0x00144000;
16773 		} else if (tg3_asic_rev(tp) == ASIC_REV_5714) {
16774 			/* 5714 always in PCIX mode */
16775 			tp->dma_rwctrl |= 0x00148000;
16776 		} else {
16777 			tp->dma_rwctrl |= 0x001b000f;
16778 		}
16779 	}
16780 	if (tg3_flag(tp, ONE_DMA_AT_ONCE))
16781 		tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
16782 
16783 	if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
16784 	    tg3_asic_rev(tp) == ASIC_REV_5704)
16785 		tp->dma_rwctrl &= 0xfffffff0;
16786 
16787 	if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16788 	    tg3_asic_rev(tp) == ASIC_REV_5701) {
16789 		/* Remove this if it causes problems for some boards. */
16790 		tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
16791 
16792 		/* On 5700/5701 chips, we need to set this bit.
16793 		 * Otherwise the chip will issue cacheline transactions
16794 		 * to streamable DMA memory with not all the byte
16795 		 * enables turned on.  This is an error on several
16796 		 * RISC PCI controllers, in particular sparc64.
16797 		 *
16798 		 * On 5703/5704 chips, this bit has been reassigned
16799 		 * a different meaning.  In particular, it is used
16800 		 * on those chips to enable a PCI-X workaround.
16801 		 */
16802 		tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
16803 	}
16804 
16805 	tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
16806 
16807 #if 0
16808 	/* Unneeded, already done by tg3_get_invariants.  */
16809 	tg3_switch_clocks(tp);
16810 #endif
16811 
16812 	if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
16813 	    tg3_asic_rev(tp) != ASIC_REV_5701)
16814 		goto out;
16815 
16816 	/* It is best to perform DMA test with maximum write burst size
16817 	 * to expose the 5700/5701 write DMA bug.
16818 	 */
16819 	saved_dma_rwctrl = tp->dma_rwctrl;
16820 	tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
16821 	tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
16822 
16823 	while (1) {
16824 		u32 *p = buf, i;
16825 
16826 		for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
16827 			p[i] = i;
16828 
16829 		/* Send the buffer to the chip. */
16830 		ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, true);
16831 		if (ret) {
16832 			dev_err(&tp->pdev->dev,
16833 				"%s: Buffer write failed. err = %d\n",
16834 				__func__, ret);
16835 			break;
16836 		}
16837 
16838 #if 0
16839 		/* validate data reached card RAM correctly. */
16840 		for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
16841 			u32 val;
16842 			tg3_read_mem(tp, 0x2100 + (i*4), &val);
16843 			if (le32_to_cpu(val) != p[i]) {
16844 				dev_err(&tp->pdev->dev,
16845 					"%s: Buffer corrupted on device! "
16846 					"(%d != %d)\n", __func__, val, i);
16847 				/* ret = -ENODEV here? */
16848 			}
16849 			p[i] = 0;
16850 		}
16851 #endif
16852 		/* Now read it back. */
16853 		ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, false);
16854 		if (ret) {
16855 			dev_err(&tp->pdev->dev, "%s: Buffer read failed. "
16856 				"err = %d\n", __func__, ret);
16857 			break;
16858 		}
16859 
16860 		/* Verify it. */
16861 		for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
16862 			if (p[i] == i)
16863 				continue;
16864 
16865 			if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
16866 			    DMA_RWCTRL_WRITE_BNDRY_16) {
16867 				tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
16868 				tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
16869 				tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
16870 				break;
16871 			} else {
16872 				dev_err(&tp->pdev->dev,
16873 					"%s: Buffer corrupted on read back! "
16874 					"(%d != %d)\n", __func__, p[i], i);
16875 				ret = -ENODEV;
16876 				goto out;
16877 			}
16878 		}
16879 
16880 		if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
16881 			/* Success. */
16882 			ret = 0;
16883 			break;
16884 		}
16885 	}
16886 	if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
16887 	    DMA_RWCTRL_WRITE_BNDRY_16) {
16888 		/* DMA test passed without adjusting DMA boundary,
16889 		 * now look for chipsets that are known to expose the
16890 		 * DMA bug without failing the test.
16891 		 */
16892 		if (pci_dev_present(tg3_dma_wait_state_chipsets)) {
16893 			tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
16894 			tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
16895 		} else {
16896 			/* Safe to use the calculated DMA boundary. */
16897 			tp->dma_rwctrl = saved_dma_rwctrl;
16898 		}
16899 
16900 		tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
16901 	}
16902 
16903 out:
16904 	dma_free_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, buf, buf_dma);
16905 out_nofree:
16906 	return ret;
16907 }
16908 
16909 static void tg3_init_bufmgr_config(struct tg3 *tp)
16910 {
16911 	if (tg3_flag(tp, 57765_PLUS)) {
16912 		tp->bufmgr_config.mbuf_read_dma_low_water =
16913 			DEFAULT_MB_RDMA_LOW_WATER_5705;
16914 		tp->bufmgr_config.mbuf_mac_rx_low_water =
16915 			DEFAULT_MB_MACRX_LOW_WATER_57765;
16916 		tp->bufmgr_config.mbuf_high_water =
16917 			DEFAULT_MB_HIGH_WATER_57765;
16918 
16919 		tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
16920 			DEFAULT_MB_RDMA_LOW_WATER_5705;
16921 		tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
16922 			DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765;
16923 		tp->bufmgr_config.mbuf_high_water_jumbo =
16924 			DEFAULT_MB_HIGH_WATER_JUMBO_57765;
16925 	} else if (tg3_flag(tp, 5705_PLUS)) {
16926 		tp->bufmgr_config.mbuf_read_dma_low_water =
16927 			DEFAULT_MB_RDMA_LOW_WATER_5705;
16928 		tp->bufmgr_config.mbuf_mac_rx_low_water =
16929 			DEFAULT_MB_MACRX_LOW_WATER_5705;
16930 		tp->bufmgr_config.mbuf_high_water =
16931 			DEFAULT_MB_HIGH_WATER_5705;
16932 		if (tg3_asic_rev(tp) == ASIC_REV_5906) {
16933 			tp->bufmgr_config.mbuf_mac_rx_low_water =
16934 				DEFAULT_MB_MACRX_LOW_WATER_5906;
16935 			tp->bufmgr_config.mbuf_high_water =
16936 				DEFAULT_MB_HIGH_WATER_5906;
16937 		}
16938 
16939 		tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
16940 			DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
16941 		tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
16942 			DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
16943 		tp->bufmgr_config.mbuf_high_water_jumbo =
16944 			DEFAULT_MB_HIGH_WATER_JUMBO_5780;
16945 	} else {
16946 		tp->bufmgr_config.mbuf_read_dma_low_water =
16947 			DEFAULT_MB_RDMA_LOW_WATER;
16948 		tp->bufmgr_config.mbuf_mac_rx_low_water =
16949 			DEFAULT_MB_MACRX_LOW_WATER;
16950 		tp->bufmgr_config.mbuf_high_water =
16951 			DEFAULT_MB_HIGH_WATER;
16952 
16953 		tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
16954 			DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
16955 		tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
16956 			DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
16957 		tp->bufmgr_config.mbuf_high_water_jumbo =
16958 			DEFAULT_MB_HIGH_WATER_JUMBO;
16959 	}
16960 
16961 	tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
16962 	tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
16963 }
16964 
16965 static char *tg3_phy_string(struct tg3 *tp)
16966 {
16967 	switch (tp->phy_id & TG3_PHY_ID_MASK) {
16968 	case TG3_PHY_ID_BCM5400:	return "5400";
16969 	case TG3_PHY_ID_BCM5401:	return "5401";
16970 	case TG3_PHY_ID_BCM5411:	return "5411";
16971 	case TG3_PHY_ID_BCM5701:	return "5701";
16972 	case TG3_PHY_ID_BCM5703:	return "5703";
16973 	case TG3_PHY_ID_BCM5704:	return "5704";
16974 	case TG3_PHY_ID_BCM5705:	return "5705";
16975 	case TG3_PHY_ID_BCM5750:	return "5750";
16976 	case TG3_PHY_ID_BCM5752:	return "5752";
16977 	case TG3_PHY_ID_BCM5714:	return "5714";
16978 	case TG3_PHY_ID_BCM5780:	return "5780";
16979 	case TG3_PHY_ID_BCM5755:	return "5755";
16980 	case TG3_PHY_ID_BCM5787:	return "5787";
16981 	case TG3_PHY_ID_BCM5784:	return "5784";
16982 	case TG3_PHY_ID_BCM5756:	return "5722/5756";
16983 	case TG3_PHY_ID_BCM5906:	return "5906";
16984 	case TG3_PHY_ID_BCM5761:	return "5761";
16985 	case TG3_PHY_ID_BCM5718C:	return "5718C";
16986 	case TG3_PHY_ID_BCM5718S:	return "5718S";
16987 	case TG3_PHY_ID_BCM57765:	return "57765";
16988 	case TG3_PHY_ID_BCM5719C:	return "5719C";
16989 	case TG3_PHY_ID_BCM5720C:	return "5720C";
16990 	case TG3_PHY_ID_BCM5762:	return "5762C";
16991 	case TG3_PHY_ID_BCM8002:	return "8002/serdes";
16992 	case 0:			return "serdes";
16993 	default:		return "unknown";
16994 	}
16995 }
16996 
16997 static char *tg3_bus_string(struct tg3 *tp, char *str)
16998 {
16999 	if (tg3_flag(tp, PCI_EXPRESS)) {
17000 		strcpy(str, "PCI Express");
17001 		return str;
17002 	} else if (tg3_flag(tp, PCIX_MODE)) {
17003 		u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
17004 
17005 		strcpy(str, "PCIX:");
17006 
17007 		if ((clock_ctrl == 7) ||
17008 		    ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
17009 		     GRC_MISC_CFG_BOARD_ID_5704CIOBE))
17010 			strcat(str, "133MHz");
17011 		else if (clock_ctrl == 0)
17012 			strcat(str, "33MHz");
17013 		else if (clock_ctrl == 2)
17014 			strcat(str, "50MHz");
17015 		else if (clock_ctrl == 4)
17016 			strcat(str, "66MHz");
17017 		else if (clock_ctrl == 6)
17018 			strcat(str, "100MHz");
17019 	} else {
17020 		strcpy(str, "PCI:");
17021 		if (tg3_flag(tp, PCI_HIGH_SPEED))
17022 			strcat(str, "66MHz");
17023 		else
17024 			strcat(str, "33MHz");
17025 	}
17026 	if (tg3_flag(tp, PCI_32BIT))
17027 		strcat(str, ":32-bit");
17028 	else
17029 		strcat(str, ":64-bit");
17030 	return str;
17031 }
17032 
17033 static void tg3_init_coal(struct tg3 *tp)
17034 {
17035 	struct ethtool_coalesce *ec = &tp->coal;
17036 
17037 	memset(ec, 0, sizeof(*ec));
17038 	ec->cmd = ETHTOOL_GCOALESCE;
17039 	ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
17040 	ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
17041 	ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
17042 	ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
17043 	ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
17044 	ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
17045 	ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
17046 	ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
17047 	ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
17048 
17049 	if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
17050 				 HOSTCC_MODE_CLRTICK_TXBD)) {
17051 		ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
17052 		ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
17053 		ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
17054 		ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
17055 	}
17056 
17057 	if (tg3_flag(tp, 5705_PLUS)) {
17058 		ec->rx_coalesce_usecs_irq = 0;
17059 		ec->tx_coalesce_usecs_irq = 0;
17060 		ec->stats_block_coalesce_usecs = 0;
17061 	}
17062 }
17063 
17064 static int tg3_init_one(struct pci_dev *pdev,
17065 				  const struct pci_device_id *ent)
17066 {
17067 	struct net_device *dev;
17068 	struct tg3 *tp;
17069 	int i, err, pm_cap;
17070 	u32 sndmbx, rcvmbx, intmbx;
17071 	char str[40];
17072 	u64 dma_mask, persist_dma_mask;
17073 	netdev_features_t features = 0;
17074 
17075 	printk_once(KERN_INFO "%s\n", version);
17076 
17077 	err = pci_enable_device(pdev);
17078 	if (err) {
17079 		dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
17080 		return err;
17081 	}
17082 
17083 	err = pci_request_regions(pdev, DRV_MODULE_NAME);
17084 	if (err) {
17085 		dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
17086 		goto err_out_disable_pdev;
17087 	}
17088 
17089 	pci_set_master(pdev);
17090 
17091 	/* Find power-management capability. */
17092 	pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
17093 	if (pm_cap == 0) {
17094 		dev_err(&pdev->dev,
17095 			"Cannot find Power Management capability, aborting\n");
17096 		err = -EIO;
17097 		goto err_out_free_res;
17098 	}
17099 
17100 	err = pci_set_power_state(pdev, PCI_D0);
17101 	if (err) {
17102 		dev_err(&pdev->dev, "Transition to D0 failed, aborting\n");
17103 		goto err_out_free_res;
17104 	}
17105 
17106 	dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS);
17107 	if (!dev) {
17108 		err = -ENOMEM;
17109 		goto err_out_power_down;
17110 	}
17111 
17112 	SET_NETDEV_DEV(dev, &pdev->dev);
17113 
17114 	tp = netdev_priv(dev);
17115 	tp->pdev = pdev;
17116 	tp->dev = dev;
17117 	tp->pm_cap = pm_cap;
17118 	tp->rx_mode = TG3_DEF_RX_MODE;
17119 	tp->tx_mode = TG3_DEF_TX_MODE;
17120 	tp->irq_sync = 1;
17121 
17122 	if (tg3_debug > 0)
17123 		tp->msg_enable = tg3_debug;
17124 	else
17125 		tp->msg_enable = TG3_DEF_MSG_ENABLE;
17126 
17127 	if (pdev_is_ssb_gige_core(pdev)) {
17128 		tg3_flag_set(tp, IS_SSB_CORE);
17129 		if (ssb_gige_must_flush_posted_writes(pdev))
17130 			tg3_flag_set(tp, FLUSH_POSTED_WRITES);
17131 		if (ssb_gige_one_dma_at_once(pdev))
17132 			tg3_flag_set(tp, ONE_DMA_AT_ONCE);
17133 		if (ssb_gige_have_roboswitch(pdev))
17134 			tg3_flag_set(tp, ROBOSWITCH);
17135 		if (ssb_gige_is_rgmii(pdev))
17136 			tg3_flag_set(tp, RGMII_MODE);
17137 	}
17138 
17139 	/* The word/byte swap controls here control register access byte
17140 	 * swapping.  DMA data byte swapping is controlled in the GRC_MODE
17141 	 * setting below.
17142 	 */
17143 	tp->misc_host_ctrl =
17144 		MISC_HOST_CTRL_MASK_PCI_INT |
17145 		MISC_HOST_CTRL_WORD_SWAP |
17146 		MISC_HOST_CTRL_INDIR_ACCESS |
17147 		MISC_HOST_CTRL_PCISTATE_RW;
17148 
17149 	/* The NONFRM (non-frame) byte/word swap controls take effect
17150 	 * on descriptor entries, anything which isn't packet data.
17151 	 *
17152 	 * The StrongARM chips on the board (one for tx, one for rx)
17153 	 * are running in big-endian mode.
17154 	 */
17155 	tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
17156 			GRC_MODE_WSWAP_NONFRM_DATA);
17157 #ifdef __BIG_ENDIAN
17158 	tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
17159 #endif
17160 	spin_lock_init(&tp->lock);
17161 	spin_lock_init(&tp->indirect_lock);
17162 	INIT_WORK(&tp->reset_task, tg3_reset_task);
17163 
17164 	tp->regs = pci_ioremap_bar(pdev, BAR_0);
17165 	if (!tp->regs) {
17166 		dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
17167 		err = -ENOMEM;
17168 		goto err_out_free_dev;
17169 	}
17170 
17171 	if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
17172 	    tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761E ||
17173 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S ||
17174 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761SE ||
17175 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
17176 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
17177 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
17178 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
17179 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 ||
17180 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 ||
17181 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 ||
17182 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727) {
17183 		tg3_flag_set(tp, ENABLE_APE);
17184 		tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
17185 		if (!tp->aperegs) {
17186 			dev_err(&pdev->dev,
17187 				"Cannot map APE registers, aborting\n");
17188 			err = -ENOMEM;
17189 			goto err_out_iounmap;
17190 		}
17191 	}
17192 
17193 	tp->rx_pending = TG3_DEF_RX_RING_PENDING;
17194 	tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
17195 
17196 	dev->ethtool_ops = &tg3_ethtool_ops;
17197 	dev->watchdog_timeo = TG3_TX_TIMEOUT;
17198 	dev->netdev_ops = &tg3_netdev_ops;
17199 	dev->irq = pdev->irq;
17200 
17201 	err = tg3_get_invariants(tp, ent);
17202 	if (err) {
17203 		dev_err(&pdev->dev,
17204 			"Problem fetching invariants of chip, aborting\n");
17205 		goto err_out_apeunmap;
17206 	}
17207 
17208 	/* The EPB bridge inside 5714, 5715, and 5780 and any
17209 	 * device behind the EPB cannot support DMA addresses > 40-bit.
17210 	 * On 64-bit systems with IOMMU, use 40-bit dma_mask.
17211 	 * On 64-bit systems without IOMMU, use 64-bit dma_mask and
17212 	 * do DMA address check in tg3_start_xmit().
17213 	 */
17214 	if (tg3_flag(tp, IS_5788))
17215 		persist_dma_mask = dma_mask = DMA_BIT_MASK(32);
17216 	else if (tg3_flag(tp, 40BIT_DMA_BUG)) {
17217 		persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
17218 #ifdef CONFIG_HIGHMEM
17219 		dma_mask = DMA_BIT_MASK(64);
17220 #endif
17221 	} else
17222 		persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
17223 
17224 	/* Configure DMA attributes. */
17225 	if (dma_mask > DMA_BIT_MASK(32)) {
17226 		err = pci_set_dma_mask(pdev, dma_mask);
17227 		if (!err) {
17228 			features |= NETIF_F_HIGHDMA;
17229 			err = pci_set_consistent_dma_mask(pdev,
17230 							  persist_dma_mask);
17231 			if (err < 0) {
17232 				dev_err(&pdev->dev, "Unable to obtain 64 bit "
17233 					"DMA for consistent allocations\n");
17234 				goto err_out_apeunmap;
17235 			}
17236 		}
17237 	}
17238 	if (err || dma_mask == DMA_BIT_MASK(32)) {
17239 		err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
17240 		if (err) {
17241 			dev_err(&pdev->dev,
17242 				"No usable DMA configuration, aborting\n");
17243 			goto err_out_apeunmap;
17244 		}
17245 	}
17246 
17247 	tg3_init_bufmgr_config(tp);
17248 
17249 	features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
17250 
17251 	/* 5700 B0 chips do not support checksumming correctly due
17252 	 * to hardware bugs.
17253 	 */
17254 	if (tg3_chip_rev_id(tp) != CHIPREV_ID_5700_B0) {
17255 		features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
17256 
17257 		if (tg3_flag(tp, 5755_PLUS))
17258 			features |= NETIF_F_IPV6_CSUM;
17259 	}
17260 
17261 	/* TSO is on by default on chips that support hardware TSO.
17262 	 * Firmware TSO on older chips gives lower performance, so it
17263 	 * is off by default, but can be enabled using ethtool.
17264 	 */
17265 	if ((tg3_flag(tp, HW_TSO_1) ||
17266 	     tg3_flag(tp, HW_TSO_2) ||
17267 	     tg3_flag(tp, HW_TSO_3)) &&
17268 	    (features & NETIF_F_IP_CSUM))
17269 		features |= NETIF_F_TSO;
17270 	if (tg3_flag(tp, HW_TSO_2) || tg3_flag(tp, HW_TSO_3)) {
17271 		if (features & NETIF_F_IPV6_CSUM)
17272 			features |= NETIF_F_TSO6;
17273 		if (tg3_flag(tp, HW_TSO_3) ||
17274 		    tg3_asic_rev(tp) == ASIC_REV_5761 ||
17275 		    (tg3_asic_rev(tp) == ASIC_REV_5784 &&
17276 		     tg3_chip_rev(tp) != CHIPREV_5784_AX) ||
17277 		    tg3_asic_rev(tp) == ASIC_REV_5785 ||
17278 		    tg3_asic_rev(tp) == ASIC_REV_57780)
17279 			features |= NETIF_F_TSO_ECN;
17280 	}
17281 
17282 	dev->features |= features;
17283 	dev->vlan_features |= features;
17284 
17285 	/*
17286 	 * Add loopback capability only for a subset of devices that support
17287 	 * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY
17288 	 * loopback for the remaining devices.
17289 	 */
17290 	if (tg3_asic_rev(tp) != ASIC_REV_5780 &&
17291 	    !tg3_flag(tp, CPMU_PRESENT))
17292 		/* Add the loopback capability */
17293 		features |= NETIF_F_LOOPBACK;
17294 
17295 	dev->hw_features |= features;
17296 
17297 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 &&
17298 	    !tg3_flag(tp, TSO_CAPABLE) &&
17299 	    !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
17300 		tg3_flag_set(tp, MAX_RXPEND_64);
17301 		tp->rx_pending = 63;
17302 	}
17303 
17304 	err = tg3_get_device_address(tp);
17305 	if (err) {
17306 		dev_err(&pdev->dev,
17307 			"Could not obtain valid ethernet address, aborting\n");
17308 		goto err_out_apeunmap;
17309 	}
17310 
17311 	/*
17312 	 * Reset chip in case UNDI or EFI driver did not shutdown
17313 	 * DMA self test will enable WDMAC and we'll see (spurious)
17314 	 * pending DMA on the PCI bus at that point.
17315 	 */
17316 	if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
17317 	    (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
17318 		tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
17319 		tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
17320 	}
17321 
17322 	err = tg3_test_dma(tp);
17323 	if (err) {
17324 		dev_err(&pdev->dev, "DMA engine test failed, aborting\n");
17325 		goto err_out_apeunmap;
17326 	}
17327 
17328 	intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
17329 	rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
17330 	sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
17331 	for (i = 0; i < tp->irq_max; i++) {
17332 		struct tg3_napi *tnapi = &tp->napi[i];
17333 
17334 		tnapi->tp = tp;
17335 		tnapi->tx_pending = TG3_DEF_TX_RING_PENDING;
17336 
17337 		tnapi->int_mbox = intmbx;
17338 		if (i <= 4)
17339 			intmbx += 0x8;
17340 		else
17341 			intmbx += 0x4;
17342 
17343 		tnapi->consmbox = rcvmbx;
17344 		tnapi->prodmbox = sndmbx;
17345 
17346 		if (i)
17347 			tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1);
17348 		else
17349 			tnapi->coal_now = HOSTCC_MODE_NOW;
17350 
17351 		if (!tg3_flag(tp, SUPPORT_MSIX))
17352 			break;
17353 
17354 		/*
17355 		 * If we support MSIX, we'll be using RSS.  If we're using
17356 		 * RSS, the first vector only handles link interrupts and the
17357 		 * remaining vectors handle rx and tx interrupts.  Reuse the
17358 		 * mailbox values for the next iteration.  The values we setup
17359 		 * above are still useful for the single vectored mode.
17360 		 */
17361 		if (!i)
17362 			continue;
17363 
17364 		rcvmbx += 0x8;
17365 
17366 		if (sndmbx & 0x4)
17367 			sndmbx -= 0x4;
17368 		else
17369 			sndmbx += 0xc;
17370 	}
17371 
17372 	tg3_init_coal(tp);
17373 
17374 	pci_set_drvdata(pdev, dev);
17375 
17376 	if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
17377 	    tg3_asic_rev(tp) == ASIC_REV_5720 ||
17378 	    tg3_asic_rev(tp) == ASIC_REV_5762)
17379 		tg3_flag_set(tp, PTP_CAPABLE);
17380 
17381 	if (tg3_flag(tp, 5717_PLUS)) {
17382 		/* Resume a low-power mode */
17383 		tg3_frob_aux_power(tp, false);
17384 	}
17385 
17386 	tg3_timer_init(tp);
17387 
17388 	tg3_carrier_off(tp);
17389 
17390 	err = register_netdev(dev);
17391 	if (err) {
17392 		dev_err(&pdev->dev, "Cannot register net device, aborting\n");
17393 		goto err_out_apeunmap;
17394 	}
17395 
17396 	netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
17397 		    tp->board_part_number,
17398 		    tg3_chip_rev_id(tp),
17399 		    tg3_bus_string(tp, str),
17400 		    dev->dev_addr);
17401 
17402 	if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
17403 		struct phy_device *phydev;
17404 		phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
17405 		netdev_info(dev,
17406 			    "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
17407 			    phydev->drv->name, dev_name(&phydev->dev));
17408 	} else {
17409 		char *ethtype;
17410 
17411 		if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
17412 			ethtype = "10/100Base-TX";
17413 		else if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
17414 			ethtype = "1000Base-SX";
17415 		else
17416 			ethtype = "10/100/1000Base-T";
17417 
17418 		netdev_info(dev, "attached PHY is %s (%s Ethernet) "
17419 			    "(WireSpeed[%d], EEE[%d])\n",
17420 			    tg3_phy_string(tp), ethtype,
17421 			    (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) == 0,
17422 			    (tp->phy_flags & TG3_PHYFLG_EEE_CAP) != 0);
17423 	}
17424 
17425 	netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
17426 		    (dev->features & NETIF_F_RXCSUM) != 0,
17427 		    tg3_flag(tp, USE_LINKCHG_REG) != 0,
17428 		    (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) != 0,
17429 		    tg3_flag(tp, ENABLE_ASF) != 0,
17430 		    tg3_flag(tp, TSO_CAPABLE) != 0);
17431 	netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
17432 		    tp->dma_rwctrl,
17433 		    pdev->dma_mask == DMA_BIT_MASK(32) ? 32 :
17434 		    ((u64)pdev->dma_mask) == DMA_BIT_MASK(40) ? 40 : 64);
17435 
17436 	pci_save_state(pdev);
17437 
17438 	return 0;
17439 
17440 err_out_apeunmap:
17441 	if (tp->aperegs) {
17442 		iounmap(tp->aperegs);
17443 		tp->aperegs = NULL;
17444 	}
17445 
17446 err_out_iounmap:
17447 	if (tp->regs) {
17448 		iounmap(tp->regs);
17449 		tp->regs = NULL;
17450 	}
17451 
17452 err_out_free_dev:
17453 	free_netdev(dev);
17454 
17455 err_out_power_down:
17456 	pci_set_power_state(pdev, PCI_D3hot);
17457 
17458 err_out_free_res:
17459 	pci_release_regions(pdev);
17460 
17461 err_out_disable_pdev:
17462 	pci_disable_device(pdev);
17463 	pci_set_drvdata(pdev, NULL);
17464 	return err;
17465 }
17466 
17467 static void tg3_remove_one(struct pci_dev *pdev)
17468 {
17469 	struct net_device *dev = pci_get_drvdata(pdev);
17470 
17471 	if (dev) {
17472 		struct tg3 *tp = netdev_priv(dev);
17473 
17474 		release_firmware(tp->fw);
17475 
17476 		tg3_reset_task_cancel(tp);
17477 
17478 		if (tg3_flag(tp, USE_PHYLIB)) {
17479 			tg3_phy_fini(tp);
17480 			tg3_mdio_fini(tp);
17481 		}
17482 
17483 		unregister_netdev(dev);
17484 		if (tp->aperegs) {
17485 			iounmap(tp->aperegs);
17486 			tp->aperegs = NULL;
17487 		}
17488 		if (tp->regs) {
17489 			iounmap(tp->regs);
17490 			tp->regs = NULL;
17491 		}
17492 		free_netdev(dev);
17493 		pci_release_regions(pdev);
17494 		pci_disable_device(pdev);
17495 		pci_set_drvdata(pdev, NULL);
17496 	}
17497 }
17498 
17499 #ifdef CONFIG_PM_SLEEP
17500 static int tg3_suspend(struct device *device)
17501 {
17502 	struct pci_dev *pdev = to_pci_dev(device);
17503 	struct net_device *dev = pci_get_drvdata(pdev);
17504 	struct tg3 *tp = netdev_priv(dev);
17505 	int err;
17506 
17507 	if (!netif_running(dev))
17508 		return 0;
17509 
17510 	tg3_reset_task_cancel(tp);
17511 	tg3_phy_stop(tp);
17512 	tg3_netif_stop(tp);
17513 
17514 	tg3_timer_stop(tp);
17515 
17516 	tg3_full_lock(tp, 1);
17517 	tg3_disable_ints(tp);
17518 	tg3_full_unlock(tp);
17519 
17520 	netif_device_detach(dev);
17521 
17522 	tg3_full_lock(tp, 0);
17523 	tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
17524 	tg3_flag_clear(tp, INIT_COMPLETE);
17525 	tg3_full_unlock(tp);
17526 
17527 	err = tg3_power_down_prepare(tp);
17528 	if (err) {
17529 		int err2;
17530 
17531 		tg3_full_lock(tp, 0);
17532 
17533 		tg3_flag_set(tp, INIT_COMPLETE);
17534 		err2 = tg3_restart_hw(tp, true);
17535 		if (err2)
17536 			goto out;
17537 
17538 		tg3_timer_start(tp);
17539 
17540 		netif_device_attach(dev);
17541 		tg3_netif_start(tp);
17542 
17543 out:
17544 		tg3_full_unlock(tp);
17545 
17546 		if (!err2)
17547 			tg3_phy_start(tp);
17548 	}
17549 
17550 	return err;
17551 }
17552 
17553 static int tg3_resume(struct device *device)
17554 {
17555 	struct pci_dev *pdev = to_pci_dev(device);
17556 	struct net_device *dev = pci_get_drvdata(pdev);
17557 	struct tg3 *tp = netdev_priv(dev);
17558 	int err;
17559 
17560 	if (!netif_running(dev))
17561 		return 0;
17562 
17563 	netif_device_attach(dev);
17564 
17565 	tg3_full_lock(tp, 0);
17566 
17567 	tg3_flag_set(tp, INIT_COMPLETE);
17568 	err = tg3_restart_hw(tp,
17569 			     !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN));
17570 	if (err)
17571 		goto out;
17572 
17573 	tg3_timer_start(tp);
17574 
17575 	tg3_netif_start(tp);
17576 
17577 out:
17578 	tg3_full_unlock(tp);
17579 
17580 	if (!err)
17581 		tg3_phy_start(tp);
17582 
17583 	return err;
17584 }
17585 #endif /* CONFIG_PM_SLEEP */
17586 
17587 static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume);
17588 
17589 /**
17590  * tg3_io_error_detected - called when PCI error is detected
17591  * @pdev: Pointer to PCI device
17592  * @state: The current pci connection state
17593  *
17594  * This function is called after a PCI bus error affecting
17595  * this device has been detected.
17596  */
17597 static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
17598 					      pci_channel_state_t state)
17599 {
17600 	struct net_device *netdev = pci_get_drvdata(pdev);
17601 	struct tg3 *tp = netdev_priv(netdev);
17602 	pci_ers_result_t err = PCI_ERS_RESULT_NEED_RESET;
17603 
17604 	netdev_info(netdev, "PCI I/O error detected\n");
17605 
17606 	rtnl_lock();
17607 
17608 	if (!netif_running(netdev))
17609 		goto done;
17610 
17611 	tg3_phy_stop(tp);
17612 
17613 	tg3_netif_stop(tp);
17614 
17615 	tg3_timer_stop(tp);
17616 
17617 	/* Want to make sure that the reset task doesn't run */
17618 	tg3_reset_task_cancel(tp);
17619 
17620 	netif_device_detach(netdev);
17621 
17622 	/* Clean up software state, even if MMIO is blocked */
17623 	tg3_full_lock(tp, 0);
17624 	tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
17625 	tg3_full_unlock(tp);
17626 
17627 done:
17628 	if (state == pci_channel_io_perm_failure)
17629 		err = PCI_ERS_RESULT_DISCONNECT;
17630 	else
17631 		pci_disable_device(pdev);
17632 
17633 	rtnl_unlock();
17634 
17635 	return err;
17636 }
17637 
17638 /**
17639  * tg3_io_slot_reset - called after the pci bus has been reset.
17640  * @pdev: Pointer to PCI device
17641  *
17642  * Restart the card from scratch, as if from a cold-boot.
17643  * At this point, the card has exprienced a hard reset,
17644  * followed by fixups by BIOS, and has its config space
17645  * set up identically to what it was at cold boot.
17646  */
17647 static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev)
17648 {
17649 	struct net_device *netdev = pci_get_drvdata(pdev);
17650 	struct tg3 *tp = netdev_priv(netdev);
17651 	pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
17652 	int err;
17653 
17654 	rtnl_lock();
17655 
17656 	if (pci_enable_device(pdev)) {
17657 		netdev_err(netdev, "Cannot re-enable PCI device after reset.\n");
17658 		goto done;
17659 	}
17660 
17661 	pci_set_master(pdev);
17662 	pci_restore_state(pdev);
17663 	pci_save_state(pdev);
17664 
17665 	if (!netif_running(netdev)) {
17666 		rc = PCI_ERS_RESULT_RECOVERED;
17667 		goto done;
17668 	}
17669 
17670 	err = tg3_power_up(tp);
17671 	if (err)
17672 		goto done;
17673 
17674 	rc = PCI_ERS_RESULT_RECOVERED;
17675 
17676 done:
17677 	rtnl_unlock();
17678 
17679 	return rc;
17680 }
17681 
17682 /**
17683  * tg3_io_resume - called when traffic can start flowing again.
17684  * @pdev: Pointer to PCI device
17685  *
17686  * This callback is called when the error recovery driver tells
17687  * us that its OK to resume normal operation.
17688  */
17689 static void tg3_io_resume(struct pci_dev *pdev)
17690 {
17691 	struct net_device *netdev = pci_get_drvdata(pdev);
17692 	struct tg3 *tp = netdev_priv(netdev);
17693 	int err;
17694 
17695 	rtnl_lock();
17696 
17697 	if (!netif_running(netdev))
17698 		goto done;
17699 
17700 	tg3_full_lock(tp, 0);
17701 	tg3_flag_set(tp, INIT_COMPLETE);
17702 	err = tg3_restart_hw(tp, true);
17703 	if (err) {
17704 		tg3_full_unlock(tp);
17705 		netdev_err(netdev, "Cannot restart hardware after reset.\n");
17706 		goto done;
17707 	}
17708 
17709 	netif_device_attach(netdev);
17710 
17711 	tg3_timer_start(tp);
17712 
17713 	tg3_netif_start(tp);
17714 
17715 	tg3_full_unlock(tp);
17716 
17717 	tg3_phy_start(tp);
17718 
17719 done:
17720 	rtnl_unlock();
17721 }
17722 
17723 static const struct pci_error_handlers tg3_err_handler = {
17724 	.error_detected	= tg3_io_error_detected,
17725 	.slot_reset	= tg3_io_slot_reset,
17726 	.resume		= tg3_io_resume
17727 };
17728 
17729 static struct pci_driver tg3_driver = {
17730 	.name		= DRV_MODULE_NAME,
17731 	.id_table	= tg3_pci_tbl,
17732 	.probe		= tg3_init_one,
17733 	.remove		= tg3_remove_one,
17734 	.err_handler	= &tg3_err_handler,
17735 	.driver.pm	= &tg3_pm_ops,
17736 };
17737 
17738 static int __init tg3_init(void)
17739 {
17740 	return pci_register_driver(&tg3_driver);
17741 }
17742 
17743 static void __exit tg3_cleanup(void)
17744 {
17745 	pci_unregister_driver(&tg3_driver);
17746 }
17747 
17748 module_init(tg3_init);
17749 module_exit(tg3_cleanup);
17750