1 /*
2  * tg3.c: Broadcom Tigon3 ethernet driver.
3  *
4  * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5  * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6  * Copyright (C) 2004 Sun Microsystems Inc.
7  * Copyright (C) 2005-2013 Broadcom Corporation.
8  *
9  * Firmware is:
10  *	Derived from proprietary unpublished source code,
11  *	Copyright (C) 2000-2003 Broadcom Corporation.
12  *
13  *	Permission is hereby granted for the distribution of this firmware
14  *	data in hexadecimal or equivalent format, provided this copyright
15  *	notice is accompanying it.
16  */
17 
18 
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/stringify.h>
22 #include <linux/kernel.h>
23 #include <linux/types.h>
24 #include <linux/compiler.h>
25 #include <linux/slab.h>
26 #include <linux/delay.h>
27 #include <linux/in.h>
28 #include <linux/init.h>
29 #include <linux/interrupt.h>
30 #include <linux/ioport.h>
31 #include <linux/pci.h>
32 #include <linux/netdevice.h>
33 #include <linux/etherdevice.h>
34 #include <linux/skbuff.h>
35 #include <linux/ethtool.h>
36 #include <linux/mdio.h>
37 #include <linux/mii.h>
38 #include <linux/phy.h>
39 #include <linux/brcmphy.h>
40 #include <linux/if_vlan.h>
41 #include <linux/ip.h>
42 #include <linux/tcp.h>
43 #include <linux/workqueue.h>
44 #include <linux/prefetch.h>
45 #include <linux/dma-mapping.h>
46 #include <linux/firmware.h>
47 #include <linux/ssb/ssb_driver_gige.h>
48 #include <linux/hwmon.h>
49 #include <linux/hwmon-sysfs.h>
50 
51 #include <net/checksum.h>
52 #include <net/ip.h>
53 
54 #include <linux/io.h>
55 #include <asm/byteorder.h>
56 #include <linux/uaccess.h>
57 
58 #include <uapi/linux/net_tstamp.h>
59 #include <linux/ptp_clock_kernel.h>
60 
61 #ifdef CONFIG_SPARC
62 #include <asm/idprom.h>
63 #include <asm/prom.h>
64 #endif
65 
66 #define BAR_0	0
67 #define BAR_2	2
68 
69 #include "tg3.h"
70 
71 /* Functions & macros to verify TG3_FLAGS types */
72 
73 static inline int _tg3_flag(enum TG3_FLAGS flag, unsigned long *bits)
74 {
75 	return test_bit(flag, bits);
76 }
77 
78 static inline void _tg3_flag_set(enum TG3_FLAGS flag, unsigned long *bits)
79 {
80 	set_bit(flag, bits);
81 }
82 
83 static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
84 {
85 	clear_bit(flag, bits);
86 }
87 
88 #define tg3_flag(tp, flag)				\
89 	_tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags)
90 #define tg3_flag_set(tp, flag)				\
91 	_tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags)
92 #define tg3_flag_clear(tp, flag)			\
93 	_tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags)
94 
95 #define DRV_MODULE_NAME		"tg3"
96 #define TG3_MAJ_NUM			3
97 #define TG3_MIN_NUM			134
98 #define DRV_MODULE_VERSION	\
99 	__stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
100 #define DRV_MODULE_RELDATE	"Sep 16, 2013"
101 
102 #define RESET_KIND_SHUTDOWN	0
103 #define RESET_KIND_INIT		1
104 #define RESET_KIND_SUSPEND	2
105 
106 #define TG3_DEF_RX_MODE		0
107 #define TG3_DEF_TX_MODE		0
108 #define TG3_DEF_MSG_ENABLE	  \
109 	(NETIF_MSG_DRV		| \
110 	 NETIF_MSG_PROBE	| \
111 	 NETIF_MSG_LINK		| \
112 	 NETIF_MSG_TIMER	| \
113 	 NETIF_MSG_IFDOWN	| \
114 	 NETIF_MSG_IFUP		| \
115 	 NETIF_MSG_RX_ERR	| \
116 	 NETIF_MSG_TX_ERR)
117 
118 #define TG3_GRC_LCLCTL_PWRSW_DELAY	100
119 
120 /* length of time before we decide the hardware is borked,
121  * and dev->tx_timeout() should be called to fix the problem
122  */
123 
124 #define TG3_TX_TIMEOUT			(5 * HZ)
125 
126 /* hardware minimum and maximum for a single frame's data payload */
127 #define TG3_MIN_MTU			60
128 #define TG3_MAX_MTU(tp)	\
129 	(tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500)
130 
131 /* These numbers seem to be hard coded in the NIC firmware somehow.
132  * You can't change the ring sizes, but you can change where you place
133  * them in the NIC onboard memory.
134  */
135 #define TG3_RX_STD_RING_SIZE(tp) \
136 	(tg3_flag(tp, LRG_PROD_RING_CAP) ? \
137 	 TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700)
138 #define TG3_DEF_RX_RING_PENDING		200
139 #define TG3_RX_JMB_RING_SIZE(tp) \
140 	(tg3_flag(tp, LRG_PROD_RING_CAP) ? \
141 	 TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
142 #define TG3_DEF_RX_JUMBO_RING_PENDING	100
143 
144 /* Do not place this n-ring entries value into the tp struct itself,
145  * we really want to expose these constants to GCC so that modulo et
146  * al.  operations are done with shifts and masks instead of with
147  * hw multiply/modulo instructions.  Another solution would be to
148  * replace things like '% foo' with '& (foo - 1)'.
149  */
150 
151 #define TG3_TX_RING_SIZE		512
152 #define TG3_DEF_TX_RING_PENDING		(TG3_TX_RING_SIZE - 1)
153 
154 #define TG3_RX_STD_RING_BYTES(tp) \
155 	(sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
156 #define TG3_RX_JMB_RING_BYTES(tp) \
157 	(sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
158 #define TG3_RX_RCB_RING_BYTES(tp) \
159 	(sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1))
160 #define TG3_TX_RING_BYTES	(sizeof(struct tg3_tx_buffer_desc) * \
161 				 TG3_TX_RING_SIZE)
162 #define NEXT_TX(N)		(((N) + 1) & (TG3_TX_RING_SIZE - 1))
163 
164 #define TG3_DMA_BYTE_ENAB		64
165 
166 #define TG3_RX_STD_DMA_SZ		1536
167 #define TG3_RX_JMB_DMA_SZ		9046
168 
169 #define TG3_RX_DMA_TO_MAP_SZ(x)		((x) + TG3_DMA_BYTE_ENAB)
170 
171 #define TG3_RX_STD_MAP_SZ		TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
172 #define TG3_RX_JMB_MAP_SZ		TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
173 
174 #define TG3_RX_STD_BUFF_RING_SIZE(tp) \
175 	(sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
176 
177 #define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
178 	(sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
179 
180 /* Due to a hardware bug, the 5701 can only DMA to memory addresses
181  * that are at least dword aligned when used in PCIX mode.  The driver
182  * works around this bug by double copying the packet.  This workaround
183  * is built into the normal double copy length check for efficiency.
184  *
185  * However, the double copy is only necessary on those architectures
186  * where unaligned memory accesses are inefficient.  For those architectures
187  * where unaligned memory accesses incur little penalty, we can reintegrate
188  * the 5701 in the normal rx path.  Doing so saves a device structure
189  * dereference by hardcoding the double copy threshold in place.
190  */
191 #define TG3_RX_COPY_THRESHOLD		256
192 #if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
193 	#define TG3_RX_COPY_THRESH(tp)	TG3_RX_COPY_THRESHOLD
194 #else
195 	#define TG3_RX_COPY_THRESH(tp)	((tp)->rx_copy_thresh)
196 #endif
197 
198 #if (NET_IP_ALIGN != 0)
199 #define TG3_RX_OFFSET(tp)	((tp)->rx_offset)
200 #else
201 #define TG3_RX_OFFSET(tp)	(NET_SKB_PAD)
202 #endif
203 
204 /* minimum number of free TX descriptors required to wake up TX process */
205 #define TG3_TX_WAKEUP_THRESH(tnapi)		((tnapi)->tx_pending / 4)
206 #define TG3_TX_BD_DMA_MAX_2K		2048
207 #define TG3_TX_BD_DMA_MAX_4K		4096
208 
209 #define TG3_RAW_IP_ALIGN 2
210 
211 #define TG3_FW_UPDATE_TIMEOUT_SEC	5
212 #define TG3_FW_UPDATE_FREQ_SEC		(TG3_FW_UPDATE_TIMEOUT_SEC / 2)
213 
214 #define FIRMWARE_TG3		"tigon/tg3.bin"
215 #define FIRMWARE_TG357766	"tigon/tg357766.bin"
216 #define FIRMWARE_TG3TSO		"tigon/tg3_tso.bin"
217 #define FIRMWARE_TG3TSO5	"tigon/tg3_tso5.bin"
218 
219 static char version[] =
220 	DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")";
221 
222 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
223 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
224 MODULE_LICENSE("GPL");
225 MODULE_VERSION(DRV_MODULE_VERSION);
226 MODULE_FIRMWARE(FIRMWARE_TG3);
227 MODULE_FIRMWARE(FIRMWARE_TG3TSO);
228 MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
229 
230 static int tg3_debug = -1;	/* -1 == use TG3_DEF_MSG_ENABLE as value */
231 module_param(tg3_debug, int, 0);
232 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
233 
234 #define TG3_DRV_DATA_FLAG_10_100_ONLY	0x0001
235 #define TG3_DRV_DATA_FLAG_5705_10_100	0x0002
236 
237 static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl) = {
238 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
239 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
240 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
241 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
242 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
243 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
244 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
245 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
246 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
247 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
248 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
249 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
250 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
251 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
252 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
253 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
254 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
255 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
256 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901),
257 	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
258 			TG3_DRV_DATA_FLAG_5705_10_100},
259 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2),
260 	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
261 			TG3_DRV_DATA_FLAG_5705_10_100},
262 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
263 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F),
264 	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
265 			TG3_DRV_DATA_FLAG_5705_10_100},
266 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
267 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
268 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750)},
269 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
270 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
271 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F),
272 	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
273 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
274 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
275 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
276 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
277 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F),
278 	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
279 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
280 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
281 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
282 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
283 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
284 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
285 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
286 	{PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5787M,
287 			PCI_VENDOR_ID_LENOVO,
288 			TG3PCI_SUBDEVICE_ID_LENOVO_5787M),
289 	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
290 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
291 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F),
292 	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
293 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
294 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
295 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
296 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
297 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
298 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
299 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
300 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
301 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
302 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
303 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
304 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
305 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
306 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
307 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
308 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
309 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)},
310 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)},
311 	{PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
312 			PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_A),
313 	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
314 	{PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
315 			PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_B),
316 	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
317 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
318 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
319 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790),
320 	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
321 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
322 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
323 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717_C)},
324 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
325 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)},
326 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)},
327 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)},
328 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)},
329 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791),
330 	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
331 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795),
332 	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
333 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)},
334 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)},
335 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57762)},
336 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57766)},
337 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5762)},
338 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5725)},
339 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5727)},
340 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57764)},
341 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57767)},
342 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57787)},
343 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57782)},
344 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57786)},
345 	{PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
346 	{PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
347 	{PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
348 	{PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
349 	{PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
350 	{PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
351 	{PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
352 	{PCI_DEVICE(0x10cf, 0x11a2)}, /* Fujitsu 1000base-SX with BCM5703SKHB */
353 	{}
354 };
355 
356 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
357 
358 static const struct {
359 	const char string[ETH_GSTRING_LEN];
360 } ethtool_stats_keys[] = {
361 	{ "rx_octets" },
362 	{ "rx_fragments" },
363 	{ "rx_ucast_packets" },
364 	{ "rx_mcast_packets" },
365 	{ "rx_bcast_packets" },
366 	{ "rx_fcs_errors" },
367 	{ "rx_align_errors" },
368 	{ "rx_xon_pause_rcvd" },
369 	{ "rx_xoff_pause_rcvd" },
370 	{ "rx_mac_ctrl_rcvd" },
371 	{ "rx_xoff_entered" },
372 	{ "rx_frame_too_long_errors" },
373 	{ "rx_jabbers" },
374 	{ "rx_undersize_packets" },
375 	{ "rx_in_length_errors" },
376 	{ "rx_out_length_errors" },
377 	{ "rx_64_or_less_octet_packets" },
378 	{ "rx_65_to_127_octet_packets" },
379 	{ "rx_128_to_255_octet_packets" },
380 	{ "rx_256_to_511_octet_packets" },
381 	{ "rx_512_to_1023_octet_packets" },
382 	{ "rx_1024_to_1522_octet_packets" },
383 	{ "rx_1523_to_2047_octet_packets" },
384 	{ "rx_2048_to_4095_octet_packets" },
385 	{ "rx_4096_to_8191_octet_packets" },
386 	{ "rx_8192_to_9022_octet_packets" },
387 
388 	{ "tx_octets" },
389 	{ "tx_collisions" },
390 
391 	{ "tx_xon_sent" },
392 	{ "tx_xoff_sent" },
393 	{ "tx_flow_control" },
394 	{ "tx_mac_errors" },
395 	{ "tx_single_collisions" },
396 	{ "tx_mult_collisions" },
397 	{ "tx_deferred" },
398 	{ "tx_excessive_collisions" },
399 	{ "tx_late_collisions" },
400 	{ "tx_collide_2times" },
401 	{ "tx_collide_3times" },
402 	{ "tx_collide_4times" },
403 	{ "tx_collide_5times" },
404 	{ "tx_collide_6times" },
405 	{ "tx_collide_7times" },
406 	{ "tx_collide_8times" },
407 	{ "tx_collide_9times" },
408 	{ "tx_collide_10times" },
409 	{ "tx_collide_11times" },
410 	{ "tx_collide_12times" },
411 	{ "tx_collide_13times" },
412 	{ "tx_collide_14times" },
413 	{ "tx_collide_15times" },
414 	{ "tx_ucast_packets" },
415 	{ "tx_mcast_packets" },
416 	{ "tx_bcast_packets" },
417 	{ "tx_carrier_sense_errors" },
418 	{ "tx_discards" },
419 	{ "tx_errors" },
420 
421 	{ "dma_writeq_full" },
422 	{ "dma_write_prioq_full" },
423 	{ "rxbds_empty" },
424 	{ "rx_discards" },
425 	{ "rx_errors" },
426 	{ "rx_threshold_hit" },
427 
428 	{ "dma_readq_full" },
429 	{ "dma_read_prioq_full" },
430 	{ "tx_comp_queue_full" },
431 
432 	{ "ring_set_send_prod_index" },
433 	{ "ring_status_update" },
434 	{ "nic_irqs" },
435 	{ "nic_avoided_irqs" },
436 	{ "nic_tx_threshold_hit" },
437 
438 	{ "mbuf_lwm_thresh_hit" },
439 };
440 
441 #define TG3_NUM_STATS	ARRAY_SIZE(ethtool_stats_keys)
442 #define TG3_NVRAM_TEST		0
443 #define TG3_LINK_TEST		1
444 #define TG3_REGISTER_TEST	2
445 #define TG3_MEMORY_TEST		3
446 #define TG3_MAC_LOOPB_TEST	4
447 #define TG3_PHY_LOOPB_TEST	5
448 #define TG3_EXT_LOOPB_TEST	6
449 #define TG3_INTERRUPT_TEST	7
450 
451 
452 static const struct {
453 	const char string[ETH_GSTRING_LEN];
454 } ethtool_test_keys[] = {
455 	[TG3_NVRAM_TEST]	= { "nvram test        (online) " },
456 	[TG3_LINK_TEST]		= { "link test         (online) " },
457 	[TG3_REGISTER_TEST]	= { "register test     (offline)" },
458 	[TG3_MEMORY_TEST]	= { "memory test       (offline)" },
459 	[TG3_MAC_LOOPB_TEST]	= { "mac loopback test (offline)" },
460 	[TG3_PHY_LOOPB_TEST]	= { "phy loopback test (offline)" },
461 	[TG3_EXT_LOOPB_TEST]	= { "ext loopback test (offline)" },
462 	[TG3_INTERRUPT_TEST]	= { "interrupt test    (offline)" },
463 };
464 
465 #define TG3_NUM_TEST	ARRAY_SIZE(ethtool_test_keys)
466 
467 
468 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
469 {
470 	writel(val, tp->regs + off);
471 }
472 
473 static u32 tg3_read32(struct tg3 *tp, u32 off)
474 {
475 	return readl(tp->regs + off);
476 }
477 
478 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
479 {
480 	writel(val, tp->aperegs + off);
481 }
482 
483 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
484 {
485 	return readl(tp->aperegs + off);
486 }
487 
488 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
489 {
490 	unsigned long flags;
491 
492 	spin_lock_irqsave(&tp->indirect_lock, flags);
493 	pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
494 	pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
495 	spin_unlock_irqrestore(&tp->indirect_lock, flags);
496 }
497 
498 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
499 {
500 	writel(val, tp->regs + off);
501 	readl(tp->regs + off);
502 }
503 
504 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
505 {
506 	unsigned long flags;
507 	u32 val;
508 
509 	spin_lock_irqsave(&tp->indirect_lock, flags);
510 	pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
511 	pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
512 	spin_unlock_irqrestore(&tp->indirect_lock, flags);
513 	return val;
514 }
515 
516 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
517 {
518 	unsigned long flags;
519 
520 	if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
521 		pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
522 				       TG3_64BIT_REG_LOW, val);
523 		return;
524 	}
525 	if (off == TG3_RX_STD_PROD_IDX_REG) {
526 		pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
527 				       TG3_64BIT_REG_LOW, val);
528 		return;
529 	}
530 
531 	spin_lock_irqsave(&tp->indirect_lock, flags);
532 	pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
533 	pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
534 	spin_unlock_irqrestore(&tp->indirect_lock, flags);
535 
536 	/* In indirect mode when disabling interrupts, we also need
537 	 * to clear the interrupt bit in the GRC local ctrl register.
538 	 */
539 	if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
540 	    (val == 0x1)) {
541 		pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
542 				       tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
543 	}
544 }
545 
546 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
547 {
548 	unsigned long flags;
549 	u32 val;
550 
551 	spin_lock_irqsave(&tp->indirect_lock, flags);
552 	pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
553 	pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
554 	spin_unlock_irqrestore(&tp->indirect_lock, flags);
555 	return val;
556 }
557 
558 /* usec_wait specifies the wait time in usec when writing to certain registers
559  * where it is unsafe to read back the register without some delay.
560  * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
561  * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
562  */
563 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
564 {
565 	if (tg3_flag(tp, PCIX_TARGET_HWBUG) || tg3_flag(tp, ICH_WORKAROUND))
566 		/* Non-posted methods */
567 		tp->write32(tp, off, val);
568 	else {
569 		/* Posted method */
570 		tg3_write32(tp, off, val);
571 		if (usec_wait)
572 			udelay(usec_wait);
573 		tp->read32(tp, off);
574 	}
575 	/* Wait again after the read for the posted method to guarantee that
576 	 * the wait time is met.
577 	 */
578 	if (usec_wait)
579 		udelay(usec_wait);
580 }
581 
582 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
583 {
584 	tp->write32_mbox(tp, off, val);
585 	if (tg3_flag(tp, FLUSH_POSTED_WRITES) ||
586 	    (!tg3_flag(tp, MBOX_WRITE_REORDER) &&
587 	     !tg3_flag(tp, ICH_WORKAROUND)))
588 		tp->read32_mbox(tp, off);
589 }
590 
591 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
592 {
593 	void __iomem *mbox = tp->regs + off;
594 	writel(val, mbox);
595 	if (tg3_flag(tp, TXD_MBOX_HWBUG))
596 		writel(val, mbox);
597 	if (tg3_flag(tp, MBOX_WRITE_REORDER) ||
598 	    tg3_flag(tp, FLUSH_POSTED_WRITES))
599 		readl(mbox);
600 }
601 
602 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
603 {
604 	return readl(tp->regs + off + GRCMBOX_BASE);
605 }
606 
607 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
608 {
609 	writel(val, tp->regs + off + GRCMBOX_BASE);
610 }
611 
612 #define tw32_mailbox(reg, val)		tp->write32_mbox(tp, reg, val)
613 #define tw32_mailbox_f(reg, val)	tw32_mailbox_flush(tp, (reg), (val))
614 #define tw32_rx_mbox(reg, val)		tp->write32_rx_mbox(tp, reg, val)
615 #define tw32_tx_mbox(reg, val)		tp->write32_tx_mbox(tp, reg, val)
616 #define tr32_mailbox(reg)		tp->read32_mbox(tp, reg)
617 
618 #define tw32(reg, val)			tp->write32(tp, reg, val)
619 #define tw32_f(reg, val)		_tw32_flush(tp, (reg), (val), 0)
620 #define tw32_wait_f(reg, val, us)	_tw32_flush(tp, (reg), (val), (us))
621 #define tr32(reg)			tp->read32(tp, reg)
622 
623 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
624 {
625 	unsigned long flags;
626 
627 	if (tg3_asic_rev(tp) == ASIC_REV_5906 &&
628 	    (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
629 		return;
630 
631 	spin_lock_irqsave(&tp->indirect_lock, flags);
632 	if (tg3_flag(tp, SRAM_USE_CONFIG)) {
633 		pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
634 		pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
635 
636 		/* Always leave this as zero. */
637 		pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
638 	} else {
639 		tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
640 		tw32_f(TG3PCI_MEM_WIN_DATA, val);
641 
642 		/* Always leave this as zero. */
643 		tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
644 	}
645 	spin_unlock_irqrestore(&tp->indirect_lock, flags);
646 }
647 
648 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
649 {
650 	unsigned long flags;
651 
652 	if (tg3_asic_rev(tp) == ASIC_REV_5906 &&
653 	    (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
654 		*val = 0;
655 		return;
656 	}
657 
658 	spin_lock_irqsave(&tp->indirect_lock, flags);
659 	if (tg3_flag(tp, SRAM_USE_CONFIG)) {
660 		pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
661 		pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
662 
663 		/* Always leave this as zero. */
664 		pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
665 	} else {
666 		tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
667 		*val = tr32(TG3PCI_MEM_WIN_DATA);
668 
669 		/* Always leave this as zero. */
670 		tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
671 	}
672 	spin_unlock_irqrestore(&tp->indirect_lock, flags);
673 }
674 
675 static void tg3_ape_lock_init(struct tg3 *tp)
676 {
677 	int i;
678 	u32 regbase, bit;
679 
680 	if (tg3_asic_rev(tp) == ASIC_REV_5761)
681 		regbase = TG3_APE_LOCK_GRANT;
682 	else
683 		regbase = TG3_APE_PER_LOCK_GRANT;
684 
685 	/* Make sure the driver hasn't any stale locks. */
686 	for (i = TG3_APE_LOCK_PHY0; i <= TG3_APE_LOCK_GPIO; i++) {
687 		switch (i) {
688 		case TG3_APE_LOCK_PHY0:
689 		case TG3_APE_LOCK_PHY1:
690 		case TG3_APE_LOCK_PHY2:
691 		case TG3_APE_LOCK_PHY3:
692 			bit = APE_LOCK_GRANT_DRIVER;
693 			break;
694 		default:
695 			if (!tp->pci_fn)
696 				bit = APE_LOCK_GRANT_DRIVER;
697 			else
698 				bit = 1 << tp->pci_fn;
699 		}
700 		tg3_ape_write32(tp, regbase + 4 * i, bit);
701 	}
702 
703 }
704 
705 static int tg3_ape_lock(struct tg3 *tp, int locknum)
706 {
707 	int i, off;
708 	int ret = 0;
709 	u32 status, req, gnt, bit;
710 
711 	if (!tg3_flag(tp, ENABLE_APE))
712 		return 0;
713 
714 	switch (locknum) {
715 	case TG3_APE_LOCK_GPIO:
716 		if (tg3_asic_rev(tp) == ASIC_REV_5761)
717 			return 0;
718 	case TG3_APE_LOCK_GRC:
719 	case TG3_APE_LOCK_MEM:
720 		if (!tp->pci_fn)
721 			bit = APE_LOCK_REQ_DRIVER;
722 		else
723 			bit = 1 << tp->pci_fn;
724 		break;
725 	case TG3_APE_LOCK_PHY0:
726 	case TG3_APE_LOCK_PHY1:
727 	case TG3_APE_LOCK_PHY2:
728 	case TG3_APE_LOCK_PHY3:
729 		bit = APE_LOCK_REQ_DRIVER;
730 		break;
731 	default:
732 		return -EINVAL;
733 	}
734 
735 	if (tg3_asic_rev(tp) == ASIC_REV_5761) {
736 		req = TG3_APE_LOCK_REQ;
737 		gnt = TG3_APE_LOCK_GRANT;
738 	} else {
739 		req = TG3_APE_PER_LOCK_REQ;
740 		gnt = TG3_APE_PER_LOCK_GRANT;
741 	}
742 
743 	off = 4 * locknum;
744 
745 	tg3_ape_write32(tp, req + off, bit);
746 
747 	/* Wait for up to 1 millisecond to acquire lock. */
748 	for (i = 0; i < 100; i++) {
749 		status = tg3_ape_read32(tp, gnt + off);
750 		if (status == bit)
751 			break;
752 		if (pci_channel_offline(tp->pdev))
753 			break;
754 
755 		udelay(10);
756 	}
757 
758 	if (status != bit) {
759 		/* Revoke the lock request. */
760 		tg3_ape_write32(tp, gnt + off, bit);
761 		ret = -EBUSY;
762 	}
763 
764 	return ret;
765 }
766 
767 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
768 {
769 	u32 gnt, bit;
770 
771 	if (!tg3_flag(tp, ENABLE_APE))
772 		return;
773 
774 	switch (locknum) {
775 	case TG3_APE_LOCK_GPIO:
776 		if (tg3_asic_rev(tp) == ASIC_REV_5761)
777 			return;
778 	case TG3_APE_LOCK_GRC:
779 	case TG3_APE_LOCK_MEM:
780 		if (!tp->pci_fn)
781 			bit = APE_LOCK_GRANT_DRIVER;
782 		else
783 			bit = 1 << tp->pci_fn;
784 		break;
785 	case TG3_APE_LOCK_PHY0:
786 	case TG3_APE_LOCK_PHY1:
787 	case TG3_APE_LOCK_PHY2:
788 	case TG3_APE_LOCK_PHY3:
789 		bit = APE_LOCK_GRANT_DRIVER;
790 		break;
791 	default:
792 		return;
793 	}
794 
795 	if (tg3_asic_rev(tp) == ASIC_REV_5761)
796 		gnt = TG3_APE_LOCK_GRANT;
797 	else
798 		gnt = TG3_APE_PER_LOCK_GRANT;
799 
800 	tg3_ape_write32(tp, gnt + 4 * locknum, bit);
801 }
802 
803 static int tg3_ape_event_lock(struct tg3 *tp, u32 timeout_us)
804 {
805 	u32 apedata;
806 
807 	while (timeout_us) {
808 		if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
809 			return -EBUSY;
810 
811 		apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
812 		if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
813 			break;
814 
815 		tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
816 
817 		udelay(10);
818 		timeout_us -= (timeout_us > 10) ? 10 : timeout_us;
819 	}
820 
821 	return timeout_us ? 0 : -EBUSY;
822 }
823 
824 static int tg3_ape_wait_for_event(struct tg3 *tp, u32 timeout_us)
825 {
826 	u32 i, apedata;
827 
828 	for (i = 0; i < timeout_us / 10; i++) {
829 		apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
830 
831 		if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
832 			break;
833 
834 		udelay(10);
835 	}
836 
837 	return i == timeout_us / 10;
838 }
839 
840 static int tg3_ape_scratchpad_read(struct tg3 *tp, u32 *data, u32 base_off,
841 				   u32 len)
842 {
843 	int err;
844 	u32 i, bufoff, msgoff, maxlen, apedata;
845 
846 	if (!tg3_flag(tp, APE_HAS_NCSI))
847 		return 0;
848 
849 	apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
850 	if (apedata != APE_SEG_SIG_MAGIC)
851 		return -ENODEV;
852 
853 	apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
854 	if (!(apedata & APE_FW_STATUS_READY))
855 		return -EAGAIN;
856 
857 	bufoff = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_OFF) +
858 		 TG3_APE_SHMEM_BASE;
859 	msgoff = bufoff + 2 * sizeof(u32);
860 	maxlen = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_LEN);
861 
862 	while (len) {
863 		u32 length;
864 
865 		/* Cap xfer sizes to scratchpad limits. */
866 		length = (len > maxlen) ? maxlen : len;
867 		len -= length;
868 
869 		apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
870 		if (!(apedata & APE_FW_STATUS_READY))
871 			return -EAGAIN;
872 
873 		/* Wait for up to 1 msec for APE to service previous event. */
874 		err = tg3_ape_event_lock(tp, 1000);
875 		if (err)
876 			return err;
877 
878 		apedata = APE_EVENT_STATUS_DRIVER_EVNT |
879 			  APE_EVENT_STATUS_SCRTCHPD_READ |
880 			  APE_EVENT_STATUS_EVENT_PENDING;
881 		tg3_ape_write32(tp, TG3_APE_EVENT_STATUS, apedata);
882 
883 		tg3_ape_write32(tp, bufoff, base_off);
884 		tg3_ape_write32(tp, bufoff + sizeof(u32), length);
885 
886 		tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
887 		tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
888 
889 		base_off += length;
890 
891 		if (tg3_ape_wait_for_event(tp, 30000))
892 			return -EAGAIN;
893 
894 		for (i = 0; length; i += 4, length -= 4) {
895 			u32 val = tg3_ape_read32(tp, msgoff + i);
896 			memcpy(data, &val, sizeof(u32));
897 			data++;
898 		}
899 	}
900 
901 	return 0;
902 }
903 
904 static int tg3_ape_send_event(struct tg3 *tp, u32 event)
905 {
906 	int err;
907 	u32 apedata;
908 
909 	apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
910 	if (apedata != APE_SEG_SIG_MAGIC)
911 		return -EAGAIN;
912 
913 	apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
914 	if (!(apedata & APE_FW_STATUS_READY))
915 		return -EAGAIN;
916 
917 	/* Wait for up to 1 millisecond for APE to service previous event. */
918 	err = tg3_ape_event_lock(tp, 1000);
919 	if (err)
920 		return err;
921 
922 	tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
923 			event | APE_EVENT_STATUS_EVENT_PENDING);
924 
925 	tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
926 	tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
927 
928 	return 0;
929 }
930 
931 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
932 {
933 	u32 event;
934 	u32 apedata;
935 
936 	if (!tg3_flag(tp, ENABLE_APE))
937 		return;
938 
939 	switch (kind) {
940 	case RESET_KIND_INIT:
941 		tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
942 				APE_HOST_SEG_SIG_MAGIC);
943 		tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
944 				APE_HOST_SEG_LEN_MAGIC);
945 		apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
946 		tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
947 		tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
948 			APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, TG3_MIN_NUM));
949 		tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
950 				APE_HOST_BEHAV_NO_PHYLOCK);
951 		tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE,
952 				    TG3_APE_HOST_DRVR_STATE_START);
953 
954 		event = APE_EVENT_STATUS_STATE_START;
955 		break;
956 	case RESET_KIND_SHUTDOWN:
957 		/* With the interface we are currently using,
958 		 * APE does not track driver state.  Wiping
959 		 * out the HOST SEGMENT SIGNATURE forces
960 		 * the APE to assume OS absent status.
961 		 */
962 		tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0);
963 
964 		if (device_may_wakeup(&tp->pdev->dev) &&
965 		    tg3_flag(tp, WOL_ENABLE)) {
966 			tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED,
967 					    TG3_APE_HOST_WOL_SPEED_AUTO);
968 			apedata = TG3_APE_HOST_DRVR_STATE_WOL;
969 		} else
970 			apedata = TG3_APE_HOST_DRVR_STATE_UNLOAD;
971 
972 		tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, apedata);
973 
974 		event = APE_EVENT_STATUS_STATE_UNLOAD;
975 		break;
976 	default:
977 		return;
978 	}
979 
980 	event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
981 
982 	tg3_ape_send_event(tp, event);
983 }
984 
985 static void tg3_disable_ints(struct tg3 *tp)
986 {
987 	int i;
988 
989 	tw32(TG3PCI_MISC_HOST_CTRL,
990 	     (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
991 	for (i = 0; i < tp->irq_max; i++)
992 		tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001);
993 }
994 
995 static void tg3_enable_ints(struct tg3 *tp)
996 {
997 	int i;
998 
999 	tp->irq_sync = 0;
1000 	wmb();
1001 
1002 	tw32(TG3PCI_MISC_HOST_CTRL,
1003 	     (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
1004 
1005 	tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE;
1006 	for (i = 0; i < tp->irq_cnt; i++) {
1007 		struct tg3_napi *tnapi = &tp->napi[i];
1008 
1009 		tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
1010 		if (tg3_flag(tp, 1SHOT_MSI))
1011 			tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
1012 
1013 		tp->coal_now |= tnapi->coal_now;
1014 	}
1015 
1016 	/* Force an initial interrupt */
1017 	if (!tg3_flag(tp, TAGGED_STATUS) &&
1018 	    (tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
1019 		tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
1020 	else
1021 		tw32(HOSTCC_MODE, tp->coal_now);
1022 
1023 	tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now);
1024 }
1025 
1026 static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
1027 {
1028 	struct tg3 *tp = tnapi->tp;
1029 	struct tg3_hw_status *sblk = tnapi->hw_status;
1030 	unsigned int work_exists = 0;
1031 
1032 	/* check for phy events */
1033 	if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
1034 		if (sblk->status & SD_STATUS_LINK_CHG)
1035 			work_exists = 1;
1036 	}
1037 
1038 	/* check for TX work to do */
1039 	if (sblk->idx[0].tx_consumer != tnapi->tx_cons)
1040 		work_exists = 1;
1041 
1042 	/* check for RX work to do */
1043 	if (tnapi->rx_rcb_prod_idx &&
1044 	    *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
1045 		work_exists = 1;
1046 
1047 	return work_exists;
1048 }
1049 
1050 /* tg3_int_reenable
1051  *  similar to tg3_enable_ints, but it accurately determines whether there
1052  *  is new work pending and can return without flushing the PIO write
1053  *  which reenables interrupts
1054  */
1055 static void tg3_int_reenable(struct tg3_napi *tnapi)
1056 {
1057 	struct tg3 *tp = tnapi->tp;
1058 
1059 	tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
1060 	mmiowb();
1061 
1062 	/* When doing tagged status, this work check is unnecessary.
1063 	 * The last_tag we write above tells the chip which piece of
1064 	 * work we've completed.
1065 	 */
1066 	if (!tg3_flag(tp, TAGGED_STATUS) && tg3_has_work(tnapi))
1067 		tw32(HOSTCC_MODE, tp->coalesce_mode |
1068 		     HOSTCC_MODE_ENABLE | tnapi->coal_now);
1069 }
1070 
1071 static void tg3_switch_clocks(struct tg3 *tp)
1072 {
1073 	u32 clock_ctrl;
1074 	u32 orig_clock_ctrl;
1075 
1076 	if (tg3_flag(tp, CPMU_PRESENT) || tg3_flag(tp, 5780_CLASS))
1077 		return;
1078 
1079 	clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
1080 
1081 	orig_clock_ctrl = clock_ctrl;
1082 	clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
1083 		       CLOCK_CTRL_CLKRUN_OENABLE |
1084 		       0x1f);
1085 	tp->pci_clock_ctrl = clock_ctrl;
1086 
1087 	if (tg3_flag(tp, 5705_PLUS)) {
1088 		if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
1089 			tw32_wait_f(TG3PCI_CLOCK_CTRL,
1090 				    clock_ctrl | CLOCK_CTRL_625_CORE, 40);
1091 		}
1092 	} else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
1093 		tw32_wait_f(TG3PCI_CLOCK_CTRL,
1094 			    clock_ctrl |
1095 			    (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
1096 			    40);
1097 		tw32_wait_f(TG3PCI_CLOCK_CTRL,
1098 			    clock_ctrl | (CLOCK_CTRL_ALTCLK),
1099 			    40);
1100 	}
1101 	tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
1102 }
1103 
1104 #define PHY_BUSY_LOOPS	5000
1105 
1106 static int __tg3_readphy(struct tg3 *tp, unsigned int phy_addr, int reg,
1107 			 u32 *val)
1108 {
1109 	u32 frame_val;
1110 	unsigned int loops;
1111 	int ret;
1112 
1113 	if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1114 		tw32_f(MAC_MI_MODE,
1115 		     (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1116 		udelay(80);
1117 	}
1118 
1119 	tg3_ape_lock(tp, tp->phy_ape_lock);
1120 
1121 	*val = 0x0;
1122 
1123 	frame_val  = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1124 		      MI_COM_PHY_ADDR_MASK);
1125 	frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1126 		      MI_COM_REG_ADDR_MASK);
1127 	frame_val |= (MI_COM_CMD_READ | MI_COM_START);
1128 
1129 	tw32_f(MAC_MI_COM, frame_val);
1130 
1131 	loops = PHY_BUSY_LOOPS;
1132 	while (loops != 0) {
1133 		udelay(10);
1134 		frame_val = tr32(MAC_MI_COM);
1135 
1136 		if ((frame_val & MI_COM_BUSY) == 0) {
1137 			udelay(5);
1138 			frame_val = tr32(MAC_MI_COM);
1139 			break;
1140 		}
1141 		loops -= 1;
1142 	}
1143 
1144 	ret = -EBUSY;
1145 	if (loops != 0) {
1146 		*val = frame_val & MI_COM_DATA_MASK;
1147 		ret = 0;
1148 	}
1149 
1150 	if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1151 		tw32_f(MAC_MI_MODE, tp->mi_mode);
1152 		udelay(80);
1153 	}
1154 
1155 	tg3_ape_unlock(tp, tp->phy_ape_lock);
1156 
1157 	return ret;
1158 }
1159 
1160 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
1161 {
1162 	return __tg3_readphy(tp, tp->phy_addr, reg, val);
1163 }
1164 
1165 static int __tg3_writephy(struct tg3 *tp, unsigned int phy_addr, int reg,
1166 			  u32 val)
1167 {
1168 	u32 frame_val;
1169 	unsigned int loops;
1170 	int ret;
1171 
1172 	if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
1173 	    (reg == MII_CTRL1000 || reg == MII_TG3_AUX_CTRL))
1174 		return 0;
1175 
1176 	if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1177 		tw32_f(MAC_MI_MODE,
1178 		     (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1179 		udelay(80);
1180 	}
1181 
1182 	tg3_ape_lock(tp, tp->phy_ape_lock);
1183 
1184 	frame_val  = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1185 		      MI_COM_PHY_ADDR_MASK);
1186 	frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1187 		      MI_COM_REG_ADDR_MASK);
1188 	frame_val |= (val & MI_COM_DATA_MASK);
1189 	frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
1190 
1191 	tw32_f(MAC_MI_COM, frame_val);
1192 
1193 	loops = PHY_BUSY_LOOPS;
1194 	while (loops != 0) {
1195 		udelay(10);
1196 		frame_val = tr32(MAC_MI_COM);
1197 		if ((frame_val & MI_COM_BUSY) == 0) {
1198 			udelay(5);
1199 			frame_val = tr32(MAC_MI_COM);
1200 			break;
1201 		}
1202 		loops -= 1;
1203 	}
1204 
1205 	ret = -EBUSY;
1206 	if (loops != 0)
1207 		ret = 0;
1208 
1209 	if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1210 		tw32_f(MAC_MI_MODE, tp->mi_mode);
1211 		udelay(80);
1212 	}
1213 
1214 	tg3_ape_unlock(tp, tp->phy_ape_lock);
1215 
1216 	return ret;
1217 }
1218 
1219 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
1220 {
1221 	return __tg3_writephy(tp, tp->phy_addr, reg, val);
1222 }
1223 
1224 static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val)
1225 {
1226 	int err;
1227 
1228 	err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1229 	if (err)
1230 		goto done;
1231 
1232 	err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1233 	if (err)
1234 		goto done;
1235 
1236 	err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1237 			   MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1238 	if (err)
1239 		goto done;
1240 
1241 	err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, val);
1242 
1243 done:
1244 	return err;
1245 }
1246 
1247 static int tg3_phy_cl45_read(struct tg3 *tp, u32 devad, u32 addr, u32 *val)
1248 {
1249 	int err;
1250 
1251 	err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1252 	if (err)
1253 		goto done;
1254 
1255 	err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1256 	if (err)
1257 		goto done;
1258 
1259 	err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1260 			   MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1261 	if (err)
1262 		goto done;
1263 
1264 	err = tg3_readphy(tp, MII_TG3_MMD_ADDRESS, val);
1265 
1266 done:
1267 	return err;
1268 }
1269 
1270 static int tg3_phydsp_read(struct tg3 *tp, u32 reg, u32 *val)
1271 {
1272 	int err;
1273 
1274 	err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1275 	if (!err)
1276 		err = tg3_readphy(tp, MII_TG3_DSP_RW_PORT, val);
1277 
1278 	return err;
1279 }
1280 
1281 static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
1282 {
1283 	int err;
1284 
1285 	err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1286 	if (!err)
1287 		err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
1288 
1289 	return err;
1290 }
1291 
1292 static int tg3_phy_auxctl_read(struct tg3 *tp, int reg, u32 *val)
1293 {
1294 	int err;
1295 
1296 	err = tg3_writephy(tp, MII_TG3_AUX_CTRL,
1297 			   (reg << MII_TG3_AUXCTL_MISC_RDSEL_SHIFT) |
1298 			   MII_TG3_AUXCTL_SHDWSEL_MISC);
1299 	if (!err)
1300 		err = tg3_readphy(tp, MII_TG3_AUX_CTRL, val);
1301 
1302 	return err;
1303 }
1304 
1305 static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set)
1306 {
1307 	if (reg == MII_TG3_AUXCTL_SHDWSEL_MISC)
1308 		set |= MII_TG3_AUXCTL_MISC_WREN;
1309 
1310 	return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg);
1311 }
1312 
1313 static int tg3_phy_toggle_auxctl_smdsp(struct tg3 *tp, bool enable)
1314 {
1315 	u32 val;
1316 	int err;
1317 
1318 	err = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
1319 
1320 	if (err)
1321 		return err;
1322 
1323 	if (enable)
1324 		val |= MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
1325 	else
1326 		val &= ~MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
1327 
1328 	err = tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
1329 				   val | MII_TG3_AUXCTL_ACTL_TX_6DB);
1330 
1331 	return err;
1332 }
1333 
1334 static int tg3_phy_shdw_write(struct tg3 *tp, int reg, u32 val)
1335 {
1336 	return tg3_writephy(tp, MII_TG3_MISC_SHDW,
1337 			    reg | val | MII_TG3_MISC_SHDW_WREN);
1338 }
1339 
1340 static int tg3_bmcr_reset(struct tg3 *tp)
1341 {
1342 	u32 phy_control;
1343 	int limit, err;
1344 
1345 	/* OK, reset it, and poll the BMCR_RESET bit until it
1346 	 * clears or we time out.
1347 	 */
1348 	phy_control = BMCR_RESET;
1349 	err = tg3_writephy(tp, MII_BMCR, phy_control);
1350 	if (err != 0)
1351 		return -EBUSY;
1352 
1353 	limit = 5000;
1354 	while (limit--) {
1355 		err = tg3_readphy(tp, MII_BMCR, &phy_control);
1356 		if (err != 0)
1357 			return -EBUSY;
1358 
1359 		if ((phy_control & BMCR_RESET) == 0) {
1360 			udelay(40);
1361 			break;
1362 		}
1363 		udelay(10);
1364 	}
1365 	if (limit < 0)
1366 		return -EBUSY;
1367 
1368 	return 0;
1369 }
1370 
1371 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
1372 {
1373 	struct tg3 *tp = bp->priv;
1374 	u32 val;
1375 
1376 	spin_lock_bh(&tp->lock);
1377 
1378 	if (__tg3_readphy(tp, mii_id, reg, &val))
1379 		val = -EIO;
1380 
1381 	spin_unlock_bh(&tp->lock);
1382 
1383 	return val;
1384 }
1385 
1386 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
1387 {
1388 	struct tg3 *tp = bp->priv;
1389 	u32 ret = 0;
1390 
1391 	spin_lock_bh(&tp->lock);
1392 
1393 	if (__tg3_writephy(tp, mii_id, reg, val))
1394 		ret = -EIO;
1395 
1396 	spin_unlock_bh(&tp->lock);
1397 
1398 	return ret;
1399 }
1400 
1401 static int tg3_mdio_reset(struct mii_bus *bp)
1402 {
1403 	return 0;
1404 }
1405 
1406 static void tg3_mdio_config_5785(struct tg3 *tp)
1407 {
1408 	u32 val;
1409 	struct phy_device *phydev;
1410 
1411 	phydev = tp->mdio_bus->phy_map[tp->phy_addr];
1412 	switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1413 	case PHY_ID_BCM50610:
1414 	case PHY_ID_BCM50610M:
1415 		val = MAC_PHYCFG2_50610_LED_MODES;
1416 		break;
1417 	case PHY_ID_BCMAC131:
1418 		val = MAC_PHYCFG2_AC131_LED_MODES;
1419 		break;
1420 	case PHY_ID_RTL8211C:
1421 		val = MAC_PHYCFG2_RTL8211C_LED_MODES;
1422 		break;
1423 	case PHY_ID_RTL8201E:
1424 		val = MAC_PHYCFG2_RTL8201E_LED_MODES;
1425 		break;
1426 	default:
1427 		return;
1428 	}
1429 
1430 	if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
1431 		tw32(MAC_PHYCFG2, val);
1432 
1433 		val = tr32(MAC_PHYCFG1);
1434 		val &= ~(MAC_PHYCFG1_RGMII_INT |
1435 			 MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK);
1436 		val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT;
1437 		tw32(MAC_PHYCFG1, val);
1438 
1439 		return;
1440 	}
1441 
1442 	if (!tg3_flag(tp, RGMII_INBAND_DISABLE))
1443 		val |= MAC_PHYCFG2_EMODE_MASK_MASK |
1444 		       MAC_PHYCFG2_FMODE_MASK_MASK |
1445 		       MAC_PHYCFG2_GMODE_MASK_MASK |
1446 		       MAC_PHYCFG2_ACT_MASK_MASK   |
1447 		       MAC_PHYCFG2_QUAL_MASK_MASK |
1448 		       MAC_PHYCFG2_INBAND_ENABLE;
1449 
1450 	tw32(MAC_PHYCFG2, val);
1451 
1452 	val = tr32(MAC_PHYCFG1);
1453 	val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
1454 		 MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
1455 	if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1456 		if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1457 			val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
1458 		if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1459 			val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
1460 	}
1461 	val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT |
1462 	       MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV;
1463 	tw32(MAC_PHYCFG1, val);
1464 
1465 	val = tr32(MAC_EXT_RGMII_MODE);
1466 	val &= ~(MAC_RGMII_MODE_RX_INT_B |
1467 		 MAC_RGMII_MODE_RX_QUALITY |
1468 		 MAC_RGMII_MODE_RX_ACTIVITY |
1469 		 MAC_RGMII_MODE_RX_ENG_DET |
1470 		 MAC_RGMII_MODE_TX_ENABLE |
1471 		 MAC_RGMII_MODE_TX_LOWPWR |
1472 		 MAC_RGMII_MODE_TX_RESET);
1473 	if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1474 		if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1475 			val |= MAC_RGMII_MODE_RX_INT_B |
1476 			       MAC_RGMII_MODE_RX_QUALITY |
1477 			       MAC_RGMII_MODE_RX_ACTIVITY |
1478 			       MAC_RGMII_MODE_RX_ENG_DET;
1479 		if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1480 			val |= MAC_RGMII_MODE_TX_ENABLE |
1481 			       MAC_RGMII_MODE_TX_LOWPWR |
1482 			       MAC_RGMII_MODE_TX_RESET;
1483 	}
1484 	tw32(MAC_EXT_RGMII_MODE, val);
1485 }
1486 
1487 static void tg3_mdio_start(struct tg3 *tp)
1488 {
1489 	tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
1490 	tw32_f(MAC_MI_MODE, tp->mi_mode);
1491 	udelay(80);
1492 
1493 	if (tg3_flag(tp, MDIOBUS_INITED) &&
1494 	    tg3_asic_rev(tp) == ASIC_REV_5785)
1495 		tg3_mdio_config_5785(tp);
1496 }
1497 
1498 static int tg3_mdio_init(struct tg3 *tp)
1499 {
1500 	int i;
1501 	u32 reg;
1502 	struct phy_device *phydev;
1503 
1504 	if (tg3_flag(tp, 5717_PLUS)) {
1505 		u32 is_serdes;
1506 
1507 		tp->phy_addr = tp->pci_fn + 1;
1508 
1509 		if (tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0)
1510 			is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
1511 		else
1512 			is_serdes = tr32(TG3_CPMU_PHY_STRAP) &
1513 				    TG3_CPMU_PHY_STRAP_IS_SERDES;
1514 		if (is_serdes)
1515 			tp->phy_addr += 7;
1516 	} else if (tg3_flag(tp, IS_SSB_CORE) && tg3_flag(tp, ROBOSWITCH)) {
1517 		int addr;
1518 
1519 		addr = ssb_gige_get_phyaddr(tp->pdev);
1520 		if (addr < 0)
1521 			return addr;
1522 		tp->phy_addr = addr;
1523 	} else
1524 		tp->phy_addr = TG3_PHY_MII_ADDR;
1525 
1526 	tg3_mdio_start(tp);
1527 
1528 	if (!tg3_flag(tp, USE_PHYLIB) || tg3_flag(tp, MDIOBUS_INITED))
1529 		return 0;
1530 
1531 	tp->mdio_bus = mdiobus_alloc();
1532 	if (tp->mdio_bus == NULL)
1533 		return -ENOMEM;
1534 
1535 	tp->mdio_bus->name     = "tg3 mdio bus";
1536 	snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
1537 		 (tp->pdev->bus->number << 8) | tp->pdev->devfn);
1538 	tp->mdio_bus->priv     = tp;
1539 	tp->mdio_bus->parent   = &tp->pdev->dev;
1540 	tp->mdio_bus->read     = &tg3_mdio_read;
1541 	tp->mdio_bus->write    = &tg3_mdio_write;
1542 	tp->mdio_bus->reset    = &tg3_mdio_reset;
1543 	tp->mdio_bus->phy_mask = ~(1 << tp->phy_addr);
1544 	tp->mdio_bus->irq      = &tp->mdio_irq[0];
1545 
1546 	for (i = 0; i < PHY_MAX_ADDR; i++)
1547 		tp->mdio_bus->irq[i] = PHY_POLL;
1548 
1549 	/* The bus registration will look for all the PHYs on the mdio bus.
1550 	 * Unfortunately, it does not ensure the PHY is powered up before
1551 	 * accessing the PHY ID registers.  A chip reset is the
1552 	 * quickest way to bring the device back to an operational state..
1553 	 */
1554 	if (tg3_readphy(tp, MII_BMCR, &reg) || (reg & BMCR_PDOWN))
1555 		tg3_bmcr_reset(tp);
1556 
1557 	i = mdiobus_register(tp->mdio_bus);
1558 	if (i) {
1559 		dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i);
1560 		mdiobus_free(tp->mdio_bus);
1561 		return i;
1562 	}
1563 
1564 	phydev = tp->mdio_bus->phy_map[tp->phy_addr];
1565 
1566 	if (!phydev || !phydev->drv) {
1567 		dev_warn(&tp->pdev->dev, "No PHY devices\n");
1568 		mdiobus_unregister(tp->mdio_bus);
1569 		mdiobus_free(tp->mdio_bus);
1570 		return -ENODEV;
1571 	}
1572 
1573 	switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1574 	case PHY_ID_BCM57780:
1575 		phydev->interface = PHY_INTERFACE_MODE_GMII;
1576 		phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1577 		break;
1578 	case PHY_ID_BCM50610:
1579 	case PHY_ID_BCM50610M:
1580 		phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE |
1581 				     PHY_BRCM_RX_REFCLK_UNUSED |
1582 				     PHY_BRCM_DIS_TXCRXC_NOENRGY |
1583 				     PHY_BRCM_AUTO_PWRDWN_ENABLE;
1584 		if (tg3_flag(tp, RGMII_INBAND_DISABLE))
1585 			phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
1586 		if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1587 			phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
1588 		if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1589 			phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
1590 		/* fallthru */
1591 	case PHY_ID_RTL8211C:
1592 		phydev->interface = PHY_INTERFACE_MODE_RGMII;
1593 		break;
1594 	case PHY_ID_RTL8201E:
1595 	case PHY_ID_BCMAC131:
1596 		phydev->interface = PHY_INTERFACE_MODE_MII;
1597 		phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1598 		tp->phy_flags |= TG3_PHYFLG_IS_FET;
1599 		break;
1600 	}
1601 
1602 	tg3_flag_set(tp, MDIOBUS_INITED);
1603 
1604 	if (tg3_asic_rev(tp) == ASIC_REV_5785)
1605 		tg3_mdio_config_5785(tp);
1606 
1607 	return 0;
1608 }
1609 
1610 static void tg3_mdio_fini(struct tg3 *tp)
1611 {
1612 	if (tg3_flag(tp, MDIOBUS_INITED)) {
1613 		tg3_flag_clear(tp, MDIOBUS_INITED);
1614 		mdiobus_unregister(tp->mdio_bus);
1615 		mdiobus_free(tp->mdio_bus);
1616 	}
1617 }
1618 
1619 /* tp->lock is held. */
1620 static inline void tg3_generate_fw_event(struct tg3 *tp)
1621 {
1622 	u32 val;
1623 
1624 	val = tr32(GRC_RX_CPU_EVENT);
1625 	val |= GRC_RX_CPU_DRIVER_EVENT;
1626 	tw32_f(GRC_RX_CPU_EVENT, val);
1627 
1628 	tp->last_event_jiffies = jiffies;
1629 }
1630 
1631 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1632 
1633 /* tp->lock is held. */
1634 static void tg3_wait_for_event_ack(struct tg3 *tp)
1635 {
1636 	int i;
1637 	unsigned int delay_cnt;
1638 	long time_remain;
1639 
1640 	/* If enough time has passed, no wait is necessary. */
1641 	time_remain = (long)(tp->last_event_jiffies + 1 +
1642 		      usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1643 		      (long)jiffies;
1644 	if (time_remain < 0)
1645 		return;
1646 
1647 	/* Check if we can shorten the wait time. */
1648 	delay_cnt = jiffies_to_usecs(time_remain);
1649 	if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1650 		delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1651 	delay_cnt = (delay_cnt >> 3) + 1;
1652 
1653 	for (i = 0; i < delay_cnt; i++) {
1654 		if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1655 			break;
1656 		if (pci_channel_offline(tp->pdev))
1657 			break;
1658 
1659 		udelay(8);
1660 	}
1661 }
1662 
1663 /* tp->lock is held. */
1664 static void tg3_phy_gather_ump_data(struct tg3 *tp, u32 *data)
1665 {
1666 	u32 reg, val;
1667 
1668 	val = 0;
1669 	if (!tg3_readphy(tp, MII_BMCR, &reg))
1670 		val = reg << 16;
1671 	if (!tg3_readphy(tp, MII_BMSR, &reg))
1672 		val |= (reg & 0xffff);
1673 	*data++ = val;
1674 
1675 	val = 0;
1676 	if (!tg3_readphy(tp, MII_ADVERTISE, &reg))
1677 		val = reg << 16;
1678 	if (!tg3_readphy(tp, MII_LPA, &reg))
1679 		val |= (reg & 0xffff);
1680 	*data++ = val;
1681 
1682 	val = 0;
1683 	if (!(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) {
1684 		if (!tg3_readphy(tp, MII_CTRL1000, &reg))
1685 			val = reg << 16;
1686 		if (!tg3_readphy(tp, MII_STAT1000, &reg))
1687 			val |= (reg & 0xffff);
1688 	}
1689 	*data++ = val;
1690 
1691 	if (!tg3_readphy(tp, MII_PHYADDR, &reg))
1692 		val = reg << 16;
1693 	else
1694 		val = 0;
1695 	*data++ = val;
1696 }
1697 
1698 /* tp->lock is held. */
1699 static void tg3_ump_link_report(struct tg3 *tp)
1700 {
1701 	u32 data[4];
1702 
1703 	if (!tg3_flag(tp, 5780_CLASS) || !tg3_flag(tp, ENABLE_ASF))
1704 		return;
1705 
1706 	tg3_phy_gather_ump_data(tp, data);
1707 
1708 	tg3_wait_for_event_ack(tp);
1709 
1710 	tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1711 	tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1712 	tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x0, data[0]);
1713 	tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x4, data[1]);
1714 	tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x8, data[2]);
1715 	tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0xc, data[3]);
1716 
1717 	tg3_generate_fw_event(tp);
1718 }
1719 
1720 /* tp->lock is held. */
1721 static void tg3_stop_fw(struct tg3 *tp)
1722 {
1723 	if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
1724 		/* Wait for RX cpu to ACK the previous event. */
1725 		tg3_wait_for_event_ack(tp);
1726 
1727 		tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
1728 
1729 		tg3_generate_fw_event(tp);
1730 
1731 		/* Wait for RX cpu to ACK this event. */
1732 		tg3_wait_for_event_ack(tp);
1733 	}
1734 }
1735 
1736 /* tp->lock is held. */
1737 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
1738 {
1739 	tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
1740 		      NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
1741 
1742 	if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1743 		switch (kind) {
1744 		case RESET_KIND_INIT:
1745 			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1746 				      DRV_STATE_START);
1747 			break;
1748 
1749 		case RESET_KIND_SHUTDOWN:
1750 			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1751 				      DRV_STATE_UNLOAD);
1752 			break;
1753 
1754 		case RESET_KIND_SUSPEND:
1755 			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1756 				      DRV_STATE_SUSPEND);
1757 			break;
1758 
1759 		default:
1760 			break;
1761 		}
1762 	}
1763 }
1764 
1765 /* tp->lock is held. */
1766 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
1767 {
1768 	if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1769 		switch (kind) {
1770 		case RESET_KIND_INIT:
1771 			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1772 				      DRV_STATE_START_DONE);
1773 			break;
1774 
1775 		case RESET_KIND_SHUTDOWN:
1776 			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1777 				      DRV_STATE_UNLOAD_DONE);
1778 			break;
1779 
1780 		default:
1781 			break;
1782 		}
1783 	}
1784 }
1785 
1786 /* tp->lock is held. */
1787 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
1788 {
1789 	if (tg3_flag(tp, ENABLE_ASF)) {
1790 		switch (kind) {
1791 		case RESET_KIND_INIT:
1792 			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1793 				      DRV_STATE_START);
1794 			break;
1795 
1796 		case RESET_KIND_SHUTDOWN:
1797 			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1798 				      DRV_STATE_UNLOAD);
1799 			break;
1800 
1801 		case RESET_KIND_SUSPEND:
1802 			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1803 				      DRV_STATE_SUSPEND);
1804 			break;
1805 
1806 		default:
1807 			break;
1808 		}
1809 	}
1810 }
1811 
1812 static int tg3_poll_fw(struct tg3 *tp)
1813 {
1814 	int i;
1815 	u32 val;
1816 
1817 	if (tg3_flag(tp, NO_FWARE_REPORTED))
1818 		return 0;
1819 
1820 	if (tg3_flag(tp, IS_SSB_CORE)) {
1821 		/* We don't use firmware. */
1822 		return 0;
1823 	}
1824 
1825 	if (tg3_asic_rev(tp) == ASIC_REV_5906) {
1826 		/* Wait up to 20ms for init done. */
1827 		for (i = 0; i < 200; i++) {
1828 			if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
1829 				return 0;
1830 			if (pci_channel_offline(tp->pdev))
1831 				return -ENODEV;
1832 
1833 			udelay(100);
1834 		}
1835 		return -ENODEV;
1836 	}
1837 
1838 	/* Wait for firmware initialization to complete. */
1839 	for (i = 0; i < 100000; i++) {
1840 		tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
1841 		if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1842 			break;
1843 		if (pci_channel_offline(tp->pdev)) {
1844 			if (!tg3_flag(tp, NO_FWARE_REPORTED)) {
1845 				tg3_flag_set(tp, NO_FWARE_REPORTED);
1846 				netdev_info(tp->dev, "No firmware running\n");
1847 			}
1848 
1849 			break;
1850 		}
1851 
1852 		udelay(10);
1853 	}
1854 
1855 	/* Chip might not be fitted with firmware.  Some Sun onboard
1856 	 * parts are configured like that.  So don't signal the timeout
1857 	 * of the above loop as an error, but do report the lack of
1858 	 * running firmware once.
1859 	 */
1860 	if (i >= 100000 && !tg3_flag(tp, NO_FWARE_REPORTED)) {
1861 		tg3_flag_set(tp, NO_FWARE_REPORTED);
1862 
1863 		netdev_info(tp->dev, "No firmware running\n");
1864 	}
1865 
1866 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) {
1867 		/* The 57765 A0 needs a little more
1868 		 * time to do some important work.
1869 		 */
1870 		mdelay(10);
1871 	}
1872 
1873 	return 0;
1874 }
1875 
1876 static void tg3_link_report(struct tg3 *tp)
1877 {
1878 	if (!netif_carrier_ok(tp->dev)) {
1879 		netif_info(tp, link, tp->dev, "Link is down\n");
1880 		tg3_ump_link_report(tp);
1881 	} else if (netif_msg_link(tp)) {
1882 		netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n",
1883 			    (tp->link_config.active_speed == SPEED_1000 ?
1884 			     1000 :
1885 			     (tp->link_config.active_speed == SPEED_100 ?
1886 			      100 : 10)),
1887 			    (tp->link_config.active_duplex == DUPLEX_FULL ?
1888 			     "full" : "half"));
1889 
1890 		netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n",
1891 			    (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
1892 			    "on" : "off",
1893 			    (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
1894 			    "on" : "off");
1895 
1896 		if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
1897 			netdev_info(tp->dev, "EEE is %s\n",
1898 				    tp->setlpicnt ? "enabled" : "disabled");
1899 
1900 		tg3_ump_link_report(tp);
1901 	}
1902 
1903 	tp->link_up = netif_carrier_ok(tp->dev);
1904 }
1905 
1906 static u32 tg3_decode_flowctrl_1000T(u32 adv)
1907 {
1908 	u32 flowctrl = 0;
1909 
1910 	if (adv & ADVERTISE_PAUSE_CAP) {
1911 		flowctrl |= FLOW_CTRL_RX;
1912 		if (!(adv & ADVERTISE_PAUSE_ASYM))
1913 			flowctrl |= FLOW_CTRL_TX;
1914 	} else if (adv & ADVERTISE_PAUSE_ASYM)
1915 		flowctrl |= FLOW_CTRL_TX;
1916 
1917 	return flowctrl;
1918 }
1919 
1920 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1921 {
1922 	u16 miireg;
1923 
1924 	if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1925 		miireg = ADVERTISE_1000XPAUSE;
1926 	else if (flow_ctrl & FLOW_CTRL_TX)
1927 		miireg = ADVERTISE_1000XPSE_ASYM;
1928 	else if (flow_ctrl & FLOW_CTRL_RX)
1929 		miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1930 	else
1931 		miireg = 0;
1932 
1933 	return miireg;
1934 }
1935 
1936 static u32 tg3_decode_flowctrl_1000X(u32 adv)
1937 {
1938 	u32 flowctrl = 0;
1939 
1940 	if (adv & ADVERTISE_1000XPAUSE) {
1941 		flowctrl |= FLOW_CTRL_RX;
1942 		if (!(adv & ADVERTISE_1000XPSE_ASYM))
1943 			flowctrl |= FLOW_CTRL_TX;
1944 	} else if (adv & ADVERTISE_1000XPSE_ASYM)
1945 		flowctrl |= FLOW_CTRL_TX;
1946 
1947 	return flowctrl;
1948 }
1949 
1950 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1951 {
1952 	u8 cap = 0;
1953 
1954 	if (lcladv & rmtadv & ADVERTISE_1000XPAUSE) {
1955 		cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1956 	} else if (lcladv & rmtadv & ADVERTISE_1000XPSE_ASYM) {
1957 		if (lcladv & ADVERTISE_1000XPAUSE)
1958 			cap = FLOW_CTRL_RX;
1959 		if (rmtadv & ADVERTISE_1000XPAUSE)
1960 			cap = FLOW_CTRL_TX;
1961 	}
1962 
1963 	return cap;
1964 }
1965 
1966 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1967 {
1968 	u8 autoneg;
1969 	u8 flowctrl = 0;
1970 	u32 old_rx_mode = tp->rx_mode;
1971 	u32 old_tx_mode = tp->tx_mode;
1972 
1973 	if (tg3_flag(tp, USE_PHYLIB))
1974 		autoneg = tp->mdio_bus->phy_map[tp->phy_addr]->autoneg;
1975 	else
1976 		autoneg = tp->link_config.autoneg;
1977 
1978 	if (autoneg == AUTONEG_ENABLE && tg3_flag(tp, PAUSE_AUTONEG)) {
1979 		if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
1980 			flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1981 		else
1982 			flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1983 	} else
1984 		flowctrl = tp->link_config.flowctrl;
1985 
1986 	tp->link_config.active_flowctrl = flowctrl;
1987 
1988 	if (flowctrl & FLOW_CTRL_RX)
1989 		tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1990 	else
1991 		tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1992 
1993 	if (old_rx_mode != tp->rx_mode)
1994 		tw32_f(MAC_RX_MODE, tp->rx_mode);
1995 
1996 	if (flowctrl & FLOW_CTRL_TX)
1997 		tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1998 	else
1999 		tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
2000 
2001 	if (old_tx_mode != tp->tx_mode)
2002 		tw32_f(MAC_TX_MODE, tp->tx_mode);
2003 }
2004 
2005 static void tg3_adjust_link(struct net_device *dev)
2006 {
2007 	u8 oldflowctrl, linkmesg = 0;
2008 	u32 mac_mode, lcl_adv, rmt_adv;
2009 	struct tg3 *tp = netdev_priv(dev);
2010 	struct phy_device *phydev = tp->mdio_bus->phy_map[tp->phy_addr];
2011 
2012 	spin_lock_bh(&tp->lock);
2013 
2014 	mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
2015 				    MAC_MODE_HALF_DUPLEX);
2016 
2017 	oldflowctrl = tp->link_config.active_flowctrl;
2018 
2019 	if (phydev->link) {
2020 		lcl_adv = 0;
2021 		rmt_adv = 0;
2022 
2023 		if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
2024 			mac_mode |= MAC_MODE_PORT_MODE_MII;
2025 		else if (phydev->speed == SPEED_1000 ||
2026 			 tg3_asic_rev(tp) != ASIC_REV_5785)
2027 			mac_mode |= MAC_MODE_PORT_MODE_GMII;
2028 		else
2029 			mac_mode |= MAC_MODE_PORT_MODE_MII;
2030 
2031 		if (phydev->duplex == DUPLEX_HALF)
2032 			mac_mode |= MAC_MODE_HALF_DUPLEX;
2033 		else {
2034 			lcl_adv = mii_advertise_flowctrl(
2035 				  tp->link_config.flowctrl);
2036 
2037 			if (phydev->pause)
2038 				rmt_adv = LPA_PAUSE_CAP;
2039 			if (phydev->asym_pause)
2040 				rmt_adv |= LPA_PAUSE_ASYM;
2041 		}
2042 
2043 		tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
2044 	} else
2045 		mac_mode |= MAC_MODE_PORT_MODE_GMII;
2046 
2047 	if (mac_mode != tp->mac_mode) {
2048 		tp->mac_mode = mac_mode;
2049 		tw32_f(MAC_MODE, tp->mac_mode);
2050 		udelay(40);
2051 	}
2052 
2053 	if (tg3_asic_rev(tp) == ASIC_REV_5785) {
2054 		if (phydev->speed == SPEED_10)
2055 			tw32(MAC_MI_STAT,
2056 			     MAC_MI_STAT_10MBPS_MODE |
2057 			     MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
2058 		else
2059 			tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
2060 	}
2061 
2062 	if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
2063 		tw32(MAC_TX_LENGTHS,
2064 		     ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2065 		      (6 << TX_LENGTHS_IPG_SHIFT) |
2066 		      (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
2067 	else
2068 		tw32(MAC_TX_LENGTHS,
2069 		     ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2070 		      (6 << TX_LENGTHS_IPG_SHIFT) |
2071 		      (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
2072 
2073 	if (phydev->link != tp->old_link ||
2074 	    phydev->speed != tp->link_config.active_speed ||
2075 	    phydev->duplex != tp->link_config.active_duplex ||
2076 	    oldflowctrl != tp->link_config.active_flowctrl)
2077 		linkmesg = 1;
2078 
2079 	tp->old_link = phydev->link;
2080 	tp->link_config.active_speed = phydev->speed;
2081 	tp->link_config.active_duplex = phydev->duplex;
2082 
2083 	spin_unlock_bh(&tp->lock);
2084 
2085 	if (linkmesg)
2086 		tg3_link_report(tp);
2087 }
2088 
2089 static int tg3_phy_init(struct tg3 *tp)
2090 {
2091 	struct phy_device *phydev;
2092 
2093 	if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)
2094 		return 0;
2095 
2096 	/* Bring the PHY back to a known state. */
2097 	tg3_bmcr_reset(tp);
2098 
2099 	phydev = tp->mdio_bus->phy_map[tp->phy_addr];
2100 
2101 	/* Attach the MAC to the PHY. */
2102 	phydev = phy_connect(tp->dev, dev_name(&phydev->dev),
2103 			     tg3_adjust_link, phydev->interface);
2104 	if (IS_ERR(phydev)) {
2105 		dev_err(&tp->pdev->dev, "Could not attach to PHY\n");
2106 		return PTR_ERR(phydev);
2107 	}
2108 
2109 	/* Mask with MAC supported features. */
2110 	switch (phydev->interface) {
2111 	case PHY_INTERFACE_MODE_GMII:
2112 	case PHY_INTERFACE_MODE_RGMII:
2113 		if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
2114 			phydev->supported &= (PHY_GBIT_FEATURES |
2115 					      SUPPORTED_Pause |
2116 					      SUPPORTED_Asym_Pause);
2117 			break;
2118 		}
2119 		/* fallthru */
2120 	case PHY_INTERFACE_MODE_MII:
2121 		phydev->supported &= (PHY_BASIC_FEATURES |
2122 				      SUPPORTED_Pause |
2123 				      SUPPORTED_Asym_Pause);
2124 		break;
2125 	default:
2126 		phy_disconnect(tp->mdio_bus->phy_map[tp->phy_addr]);
2127 		return -EINVAL;
2128 	}
2129 
2130 	tp->phy_flags |= TG3_PHYFLG_IS_CONNECTED;
2131 
2132 	phydev->advertising = phydev->supported;
2133 
2134 	return 0;
2135 }
2136 
2137 static void tg3_phy_start(struct tg3 *tp)
2138 {
2139 	struct phy_device *phydev;
2140 
2141 	if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
2142 		return;
2143 
2144 	phydev = tp->mdio_bus->phy_map[tp->phy_addr];
2145 
2146 	if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
2147 		tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
2148 		phydev->speed = tp->link_config.speed;
2149 		phydev->duplex = tp->link_config.duplex;
2150 		phydev->autoneg = tp->link_config.autoneg;
2151 		phydev->advertising = tp->link_config.advertising;
2152 	}
2153 
2154 	phy_start(phydev);
2155 
2156 	phy_start_aneg(phydev);
2157 }
2158 
2159 static void tg3_phy_stop(struct tg3 *tp)
2160 {
2161 	if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
2162 		return;
2163 
2164 	phy_stop(tp->mdio_bus->phy_map[tp->phy_addr]);
2165 }
2166 
2167 static void tg3_phy_fini(struct tg3 *tp)
2168 {
2169 	if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
2170 		phy_disconnect(tp->mdio_bus->phy_map[tp->phy_addr]);
2171 		tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED;
2172 	}
2173 }
2174 
2175 static int tg3_phy_set_extloopbk(struct tg3 *tp)
2176 {
2177 	int err;
2178 	u32 val;
2179 
2180 	if (tp->phy_flags & TG3_PHYFLG_IS_FET)
2181 		return 0;
2182 
2183 	if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2184 		/* Cannot do read-modify-write on 5401 */
2185 		err = tg3_phy_auxctl_write(tp,
2186 					   MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2187 					   MII_TG3_AUXCTL_ACTL_EXTLOOPBK |
2188 					   0x4c20);
2189 		goto done;
2190 	}
2191 
2192 	err = tg3_phy_auxctl_read(tp,
2193 				  MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2194 	if (err)
2195 		return err;
2196 
2197 	val |= MII_TG3_AUXCTL_ACTL_EXTLOOPBK;
2198 	err = tg3_phy_auxctl_write(tp,
2199 				   MII_TG3_AUXCTL_SHDWSEL_AUXCTL, val);
2200 
2201 done:
2202 	return err;
2203 }
2204 
2205 static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
2206 {
2207 	u32 phytest;
2208 
2209 	if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2210 		u32 phy;
2211 
2212 		tg3_writephy(tp, MII_TG3_FET_TEST,
2213 			     phytest | MII_TG3_FET_SHADOW_EN);
2214 		if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) {
2215 			if (enable)
2216 				phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD;
2217 			else
2218 				phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD;
2219 			tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy);
2220 		}
2221 		tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2222 	}
2223 }
2224 
2225 static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
2226 {
2227 	u32 reg;
2228 
2229 	if (!tg3_flag(tp, 5705_PLUS) ||
2230 	    (tg3_flag(tp, 5717_PLUS) &&
2231 	     (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
2232 		return;
2233 
2234 	if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2235 		tg3_phy_fet_toggle_apd(tp, enable);
2236 		return;
2237 	}
2238 
2239 	reg = MII_TG3_MISC_SHDW_SCR5_LPED |
2240 	      MII_TG3_MISC_SHDW_SCR5_DLPTLM |
2241 	      MII_TG3_MISC_SHDW_SCR5_SDTL |
2242 	      MII_TG3_MISC_SHDW_SCR5_C125OE;
2243 	if (tg3_asic_rev(tp) != ASIC_REV_5784 || !enable)
2244 		reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
2245 
2246 	tg3_phy_shdw_write(tp, MII_TG3_MISC_SHDW_SCR5_SEL, reg);
2247 
2248 
2249 	reg = MII_TG3_MISC_SHDW_APD_WKTM_84MS;
2250 	if (enable)
2251 		reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
2252 
2253 	tg3_phy_shdw_write(tp, MII_TG3_MISC_SHDW_APD_SEL, reg);
2254 }
2255 
2256 static void tg3_phy_toggle_automdix(struct tg3 *tp, bool enable)
2257 {
2258 	u32 phy;
2259 
2260 	if (!tg3_flag(tp, 5705_PLUS) ||
2261 	    (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
2262 		return;
2263 
2264 	if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2265 		u32 ephy;
2266 
2267 		if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
2268 			u32 reg = MII_TG3_FET_SHDW_MISCCTRL;
2269 
2270 			tg3_writephy(tp, MII_TG3_FET_TEST,
2271 				     ephy | MII_TG3_FET_SHADOW_EN);
2272 			if (!tg3_readphy(tp, reg, &phy)) {
2273 				if (enable)
2274 					phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2275 				else
2276 					phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2277 				tg3_writephy(tp, reg, phy);
2278 			}
2279 			tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
2280 		}
2281 	} else {
2282 		int ret;
2283 
2284 		ret = tg3_phy_auxctl_read(tp,
2285 					  MII_TG3_AUXCTL_SHDWSEL_MISC, &phy);
2286 		if (!ret) {
2287 			if (enable)
2288 				phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2289 			else
2290 				phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2291 			tg3_phy_auxctl_write(tp,
2292 					     MII_TG3_AUXCTL_SHDWSEL_MISC, phy);
2293 		}
2294 	}
2295 }
2296 
2297 static void tg3_phy_set_wirespeed(struct tg3 *tp)
2298 {
2299 	int ret;
2300 	u32 val;
2301 
2302 	if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED)
2303 		return;
2304 
2305 	ret = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, &val);
2306 	if (!ret)
2307 		tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_MISC,
2308 				     val | MII_TG3_AUXCTL_MISC_WIRESPD_EN);
2309 }
2310 
2311 static void tg3_phy_apply_otp(struct tg3 *tp)
2312 {
2313 	u32 otp, phy;
2314 
2315 	if (!tp->phy_otp)
2316 		return;
2317 
2318 	otp = tp->phy_otp;
2319 
2320 	if (tg3_phy_toggle_auxctl_smdsp(tp, true))
2321 		return;
2322 
2323 	phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
2324 	phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
2325 	tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
2326 
2327 	phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
2328 	      ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
2329 	tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
2330 
2331 	phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
2332 	phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
2333 	tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
2334 
2335 	phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
2336 	tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
2337 
2338 	phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
2339 	tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
2340 
2341 	phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
2342 	      ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
2343 	tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
2344 
2345 	tg3_phy_toggle_auxctl_smdsp(tp, false);
2346 }
2347 
2348 static void tg3_eee_pull_config(struct tg3 *tp, struct ethtool_eee *eee)
2349 {
2350 	u32 val;
2351 	struct ethtool_eee *dest = &tp->eee;
2352 
2353 	if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2354 		return;
2355 
2356 	if (eee)
2357 		dest = eee;
2358 
2359 	if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, TG3_CL45_D7_EEERES_STAT, &val))
2360 		return;
2361 
2362 	/* Pull eee_active */
2363 	if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T ||
2364 	    val == TG3_CL45_D7_EEERES_STAT_LP_100TX) {
2365 		dest->eee_active = 1;
2366 	} else
2367 		dest->eee_active = 0;
2368 
2369 	/* Pull lp advertised settings */
2370 	if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, MDIO_AN_EEE_LPABLE, &val))
2371 		return;
2372 	dest->lp_advertised = mmd_eee_adv_to_ethtool_adv_t(val);
2373 
2374 	/* Pull advertised and eee_enabled settings */
2375 	if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, &val))
2376 		return;
2377 	dest->eee_enabled = !!val;
2378 	dest->advertised = mmd_eee_adv_to_ethtool_adv_t(val);
2379 
2380 	/* Pull tx_lpi_enabled */
2381 	val = tr32(TG3_CPMU_EEE_MODE);
2382 	dest->tx_lpi_enabled = !!(val & TG3_CPMU_EEEMD_LPI_IN_TX);
2383 
2384 	/* Pull lpi timer value */
2385 	dest->tx_lpi_timer = tr32(TG3_CPMU_EEE_DBTMR1) & 0xffff;
2386 }
2387 
2388 static void tg3_phy_eee_adjust(struct tg3 *tp, bool current_link_up)
2389 {
2390 	u32 val;
2391 
2392 	if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2393 		return;
2394 
2395 	tp->setlpicnt = 0;
2396 
2397 	if (tp->link_config.autoneg == AUTONEG_ENABLE &&
2398 	    current_link_up &&
2399 	    tp->link_config.active_duplex == DUPLEX_FULL &&
2400 	    (tp->link_config.active_speed == SPEED_100 ||
2401 	     tp->link_config.active_speed == SPEED_1000)) {
2402 		u32 eeectl;
2403 
2404 		if (tp->link_config.active_speed == SPEED_1000)
2405 			eeectl = TG3_CPMU_EEE_CTRL_EXIT_16_5_US;
2406 		else
2407 			eeectl = TG3_CPMU_EEE_CTRL_EXIT_36_US;
2408 
2409 		tw32(TG3_CPMU_EEE_CTRL, eeectl);
2410 
2411 		tg3_eee_pull_config(tp, NULL);
2412 		if (tp->eee.eee_active)
2413 			tp->setlpicnt = 2;
2414 	}
2415 
2416 	if (!tp->setlpicnt) {
2417 		if (current_link_up &&
2418 		   !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2419 			tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0000);
2420 			tg3_phy_toggle_auxctl_smdsp(tp, false);
2421 		}
2422 
2423 		val = tr32(TG3_CPMU_EEE_MODE);
2424 		tw32(TG3_CPMU_EEE_MODE, val & ~TG3_CPMU_EEEMD_LPI_ENABLE);
2425 	}
2426 }
2427 
2428 static void tg3_phy_eee_enable(struct tg3 *tp)
2429 {
2430 	u32 val;
2431 
2432 	if (tp->link_config.active_speed == SPEED_1000 &&
2433 	    (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2434 	     tg3_asic_rev(tp) == ASIC_REV_5719 ||
2435 	     tg3_flag(tp, 57765_CLASS)) &&
2436 	    !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2437 		val = MII_TG3_DSP_TAP26_ALNOKO |
2438 		      MII_TG3_DSP_TAP26_RMRXSTO;
2439 		tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
2440 		tg3_phy_toggle_auxctl_smdsp(tp, false);
2441 	}
2442 
2443 	val = tr32(TG3_CPMU_EEE_MODE);
2444 	tw32(TG3_CPMU_EEE_MODE, val | TG3_CPMU_EEEMD_LPI_ENABLE);
2445 }
2446 
2447 static int tg3_wait_macro_done(struct tg3 *tp)
2448 {
2449 	int limit = 100;
2450 
2451 	while (limit--) {
2452 		u32 tmp32;
2453 
2454 		if (!tg3_readphy(tp, MII_TG3_DSP_CONTROL, &tmp32)) {
2455 			if ((tmp32 & 0x1000) == 0)
2456 				break;
2457 		}
2458 	}
2459 	if (limit < 0)
2460 		return -EBUSY;
2461 
2462 	return 0;
2463 }
2464 
2465 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
2466 {
2467 	static const u32 test_pat[4][6] = {
2468 	{ 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
2469 	{ 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
2470 	{ 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
2471 	{ 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
2472 	};
2473 	int chan;
2474 
2475 	for (chan = 0; chan < 4; chan++) {
2476 		int i;
2477 
2478 		tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2479 			     (chan * 0x2000) | 0x0200);
2480 		tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2481 
2482 		for (i = 0; i < 6; i++)
2483 			tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
2484 				     test_pat[chan][i]);
2485 
2486 		tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2487 		if (tg3_wait_macro_done(tp)) {
2488 			*resetp = 1;
2489 			return -EBUSY;
2490 		}
2491 
2492 		tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2493 			     (chan * 0x2000) | 0x0200);
2494 		tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0082);
2495 		if (tg3_wait_macro_done(tp)) {
2496 			*resetp = 1;
2497 			return -EBUSY;
2498 		}
2499 
2500 		tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0802);
2501 		if (tg3_wait_macro_done(tp)) {
2502 			*resetp = 1;
2503 			return -EBUSY;
2504 		}
2505 
2506 		for (i = 0; i < 6; i += 2) {
2507 			u32 low, high;
2508 
2509 			if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
2510 			    tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
2511 			    tg3_wait_macro_done(tp)) {
2512 				*resetp = 1;
2513 				return -EBUSY;
2514 			}
2515 			low &= 0x7fff;
2516 			high &= 0x000f;
2517 			if (low != test_pat[chan][i] ||
2518 			    high != test_pat[chan][i+1]) {
2519 				tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
2520 				tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
2521 				tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
2522 
2523 				return -EBUSY;
2524 			}
2525 		}
2526 	}
2527 
2528 	return 0;
2529 }
2530 
2531 static int tg3_phy_reset_chanpat(struct tg3 *tp)
2532 {
2533 	int chan;
2534 
2535 	for (chan = 0; chan < 4; chan++) {
2536 		int i;
2537 
2538 		tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2539 			     (chan * 0x2000) | 0x0200);
2540 		tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2541 		for (i = 0; i < 6; i++)
2542 			tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
2543 		tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2544 		if (tg3_wait_macro_done(tp))
2545 			return -EBUSY;
2546 	}
2547 
2548 	return 0;
2549 }
2550 
2551 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
2552 {
2553 	u32 reg32, phy9_orig;
2554 	int retries, do_phy_reset, err;
2555 
2556 	retries = 10;
2557 	do_phy_reset = 1;
2558 	do {
2559 		if (do_phy_reset) {
2560 			err = tg3_bmcr_reset(tp);
2561 			if (err)
2562 				return err;
2563 			do_phy_reset = 0;
2564 		}
2565 
2566 		/* Disable transmitter and interrupt.  */
2567 		if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
2568 			continue;
2569 
2570 		reg32 |= 0x3000;
2571 		tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2572 
2573 		/* Set full-duplex, 1000 mbps.  */
2574 		tg3_writephy(tp, MII_BMCR,
2575 			     BMCR_FULLDPLX | BMCR_SPEED1000);
2576 
2577 		/* Set to master mode.  */
2578 		if (tg3_readphy(tp, MII_CTRL1000, &phy9_orig))
2579 			continue;
2580 
2581 		tg3_writephy(tp, MII_CTRL1000,
2582 			     CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
2583 
2584 		err = tg3_phy_toggle_auxctl_smdsp(tp, true);
2585 		if (err)
2586 			return err;
2587 
2588 		/* Block the PHY control access.  */
2589 		tg3_phydsp_write(tp, 0x8005, 0x0800);
2590 
2591 		err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
2592 		if (!err)
2593 			break;
2594 	} while (--retries);
2595 
2596 	err = tg3_phy_reset_chanpat(tp);
2597 	if (err)
2598 		return err;
2599 
2600 	tg3_phydsp_write(tp, 0x8005, 0x0000);
2601 
2602 	tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
2603 	tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000);
2604 
2605 	tg3_phy_toggle_auxctl_smdsp(tp, false);
2606 
2607 	tg3_writephy(tp, MII_CTRL1000, phy9_orig);
2608 
2609 	if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
2610 		reg32 &= ~0x3000;
2611 		tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2612 	} else if (!err)
2613 		err = -EBUSY;
2614 
2615 	return err;
2616 }
2617 
2618 static void tg3_carrier_off(struct tg3 *tp)
2619 {
2620 	netif_carrier_off(tp->dev);
2621 	tp->link_up = false;
2622 }
2623 
2624 static void tg3_warn_mgmt_link_flap(struct tg3 *tp)
2625 {
2626 	if (tg3_flag(tp, ENABLE_ASF))
2627 		netdev_warn(tp->dev,
2628 			    "Management side-band traffic will be interrupted during phy settings change\n");
2629 }
2630 
2631 /* This will reset the tigon3 PHY if there is no valid
2632  * link unless the FORCE argument is non-zero.
2633  */
2634 static int tg3_phy_reset(struct tg3 *tp)
2635 {
2636 	u32 val, cpmuctrl;
2637 	int err;
2638 
2639 	if (tg3_asic_rev(tp) == ASIC_REV_5906) {
2640 		val = tr32(GRC_MISC_CFG);
2641 		tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
2642 		udelay(40);
2643 	}
2644 	err  = tg3_readphy(tp, MII_BMSR, &val);
2645 	err |= tg3_readphy(tp, MII_BMSR, &val);
2646 	if (err != 0)
2647 		return -EBUSY;
2648 
2649 	if (netif_running(tp->dev) && tp->link_up) {
2650 		netif_carrier_off(tp->dev);
2651 		tg3_link_report(tp);
2652 	}
2653 
2654 	if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
2655 	    tg3_asic_rev(tp) == ASIC_REV_5704 ||
2656 	    tg3_asic_rev(tp) == ASIC_REV_5705) {
2657 		err = tg3_phy_reset_5703_4_5(tp);
2658 		if (err)
2659 			return err;
2660 		goto out;
2661 	}
2662 
2663 	cpmuctrl = 0;
2664 	if (tg3_asic_rev(tp) == ASIC_REV_5784 &&
2665 	    tg3_chip_rev(tp) != CHIPREV_5784_AX) {
2666 		cpmuctrl = tr32(TG3_CPMU_CTRL);
2667 		if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
2668 			tw32(TG3_CPMU_CTRL,
2669 			     cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
2670 	}
2671 
2672 	err = tg3_bmcr_reset(tp);
2673 	if (err)
2674 		return err;
2675 
2676 	if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
2677 		val = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
2678 		tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, val);
2679 
2680 		tw32(TG3_CPMU_CTRL, cpmuctrl);
2681 	}
2682 
2683 	if (tg3_chip_rev(tp) == CHIPREV_5784_AX ||
2684 	    tg3_chip_rev(tp) == CHIPREV_5761_AX) {
2685 		val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2686 		if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
2687 		    CPMU_LSPD_1000MB_MACCLK_12_5) {
2688 			val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2689 			udelay(40);
2690 			tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2691 		}
2692 	}
2693 
2694 	if (tg3_flag(tp, 5717_PLUS) &&
2695 	    (tp->phy_flags & TG3_PHYFLG_MII_SERDES))
2696 		return 0;
2697 
2698 	tg3_phy_apply_otp(tp);
2699 
2700 	if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
2701 		tg3_phy_toggle_apd(tp, true);
2702 	else
2703 		tg3_phy_toggle_apd(tp, false);
2704 
2705 out:
2706 	if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) &&
2707 	    !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2708 		tg3_phydsp_write(tp, 0x201f, 0x2aaa);
2709 		tg3_phydsp_write(tp, 0x000a, 0x0323);
2710 		tg3_phy_toggle_auxctl_smdsp(tp, false);
2711 	}
2712 
2713 	if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) {
2714 		tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2715 		tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2716 	}
2717 
2718 	if (tp->phy_flags & TG3_PHYFLG_BER_BUG) {
2719 		if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2720 			tg3_phydsp_write(tp, 0x000a, 0x310b);
2721 			tg3_phydsp_write(tp, 0x201f, 0x9506);
2722 			tg3_phydsp_write(tp, 0x401f, 0x14e2);
2723 			tg3_phy_toggle_auxctl_smdsp(tp, false);
2724 		}
2725 	} else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) {
2726 		if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2727 			tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
2728 			if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) {
2729 				tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
2730 				tg3_writephy(tp, MII_TG3_TEST1,
2731 					     MII_TG3_TEST1_TRIM_EN | 0x4);
2732 			} else
2733 				tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
2734 
2735 			tg3_phy_toggle_auxctl_smdsp(tp, false);
2736 		}
2737 	}
2738 
2739 	/* Set Extended packet length bit (bit 14) on all chips that */
2740 	/* support jumbo frames */
2741 	if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2742 		/* Cannot do read-modify-write on 5401 */
2743 		tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
2744 	} else if (tg3_flag(tp, JUMBO_CAPABLE)) {
2745 		/* Set bit 14 with read-modify-write to preserve other bits */
2746 		err = tg3_phy_auxctl_read(tp,
2747 					  MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2748 		if (!err)
2749 			tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2750 					   val | MII_TG3_AUXCTL_ACTL_EXTPKTLEN);
2751 	}
2752 
2753 	/* Set phy register 0x10 bit 0 to high fifo elasticity to support
2754 	 * jumbo frames transmission.
2755 	 */
2756 	if (tg3_flag(tp, JUMBO_CAPABLE)) {
2757 		if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &val))
2758 			tg3_writephy(tp, MII_TG3_EXT_CTRL,
2759 				     val | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
2760 	}
2761 
2762 	if (tg3_asic_rev(tp) == ASIC_REV_5906) {
2763 		/* adjust output voltage */
2764 		tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
2765 	}
2766 
2767 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5762_A0)
2768 		tg3_phydsp_write(tp, 0xffb, 0x4000);
2769 
2770 	tg3_phy_toggle_automdix(tp, true);
2771 	tg3_phy_set_wirespeed(tp);
2772 	return 0;
2773 }
2774 
2775 #define TG3_GPIO_MSG_DRVR_PRES		 0x00000001
2776 #define TG3_GPIO_MSG_NEED_VAUX		 0x00000002
2777 #define TG3_GPIO_MSG_MASK		 (TG3_GPIO_MSG_DRVR_PRES | \
2778 					  TG3_GPIO_MSG_NEED_VAUX)
2779 #define TG3_GPIO_MSG_ALL_DRVR_PRES_MASK \
2780 	((TG3_GPIO_MSG_DRVR_PRES << 0) | \
2781 	 (TG3_GPIO_MSG_DRVR_PRES << 4) | \
2782 	 (TG3_GPIO_MSG_DRVR_PRES << 8) | \
2783 	 (TG3_GPIO_MSG_DRVR_PRES << 12))
2784 
2785 #define TG3_GPIO_MSG_ALL_NEED_VAUX_MASK \
2786 	((TG3_GPIO_MSG_NEED_VAUX << 0) | \
2787 	 (TG3_GPIO_MSG_NEED_VAUX << 4) | \
2788 	 (TG3_GPIO_MSG_NEED_VAUX << 8) | \
2789 	 (TG3_GPIO_MSG_NEED_VAUX << 12))
2790 
2791 static inline u32 tg3_set_function_status(struct tg3 *tp, u32 newstat)
2792 {
2793 	u32 status, shift;
2794 
2795 	if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2796 	    tg3_asic_rev(tp) == ASIC_REV_5719)
2797 		status = tg3_ape_read32(tp, TG3_APE_GPIO_MSG);
2798 	else
2799 		status = tr32(TG3_CPMU_DRV_STATUS);
2800 
2801 	shift = TG3_APE_GPIO_MSG_SHIFT + 4 * tp->pci_fn;
2802 	status &= ~(TG3_GPIO_MSG_MASK << shift);
2803 	status |= (newstat << shift);
2804 
2805 	if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2806 	    tg3_asic_rev(tp) == ASIC_REV_5719)
2807 		tg3_ape_write32(tp, TG3_APE_GPIO_MSG, status);
2808 	else
2809 		tw32(TG3_CPMU_DRV_STATUS, status);
2810 
2811 	return status >> TG3_APE_GPIO_MSG_SHIFT;
2812 }
2813 
2814 static inline int tg3_pwrsrc_switch_to_vmain(struct tg3 *tp)
2815 {
2816 	if (!tg3_flag(tp, IS_NIC))
2817 		return 0;
2818 
2819 	if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2820 	    tg3_asic_rev(tp) == ASIC_REV_5719 ||
2821 	    tg3_asic_rev(tp) == ASIC_REV_5720) {
2822 		if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2823 			return -EIO;
2824 
2825 		tg3_set_function_status(tp, TG3_GPIO_MSG_DRVR_PRES);
2826 
2827 		tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2828 			    TG3_GRC_LCLCTL_PWRSW_DELAY);
2829 
2830 		tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2831 	} else {
2832 		tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2833 			    TG3_GRC_LCLCTL_PWRSW_DELAY);
2834 	}
2835 
2836 	return 0;
2837 }
2838 
2839 static void tg3_pwrsrc_die_with_vmain(struct tg3 *tp)
2840 {
2841 	u32 grc_local_ctrl;
2842 
2843 	if (!tg3_flag(tp, IS_NIC) ||
2844 	    tg3_asic_rev(tp) == ASIC_REV_5700 ||
2845 	    tg3_asic_rev(tp) == ASIC_REV_5701)
2846 		return;
2847 
2848 	grc_local_ctrl = tp->grc_local_ctrl | GRC_LCLCTRL_GPIO_OE1;
2849 
2850 	tw32_wait_f(GRC_LOCAL_CTRL,
2851 		    grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2852 		    TG3_GRC_LCLCTL_PWRSW_DELAY);
2853 
2854 	tw32_wait_f(GRC_LOCAL_CTRL,
2855 		    grc_local_ctrl,
2856 		    TG3_GRC_LCLCTL_PWRSW_DELAY);
2857 
2858 	tw32_wait_f(GRC_LOCAL_CTRL,
2859 		    grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2860 		    TG3_GRC_LCLCTL_PWRSW_DELAY);
2861 }
2862 
2863 static void tg3_pwrsrc_switch_to_vaux(struct tg3 *tp)
2864 {
2865 	if (!tg3_flag(tp, IS_NIC))
2866 		return;
2867 
2868 	if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
2869 	    tg3_asic_rev(tp) == ASIC_REV_5701) {
2870 		tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2871 			    (GRC_LCLCTRL_GPIO_OE0 |
2872 			     GRC_LCLCTRL_GPIO_OE1 |
2873 			     GRC_LCLCTRL_GPIO_OE2 |
2874 			     GRC_LCLCTRL_GPIO_OUTPUT0 |
2875 			     GRC_LCLCTRL_GPIO_OUTPUT1),
2876 			    TG3_GRC_LCLCTL_PWRSW_DELAY);
2877 	} else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
2878 		   tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
2879 		/* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2880 		u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
2881 				     GRC_LCLCTRL_GPIO_OE1 |
2882 				     GRC_LCLCTRL_GPIO_OE2 |
2883 				     GRC_LCLCTRL_GPIO_OUTPUT0 |
2884 				     GRC_LCLCTRL_GPIO_OUTPUT1 |
2885 				     tp->grc_local_ctrl;
2886 		tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2887 			    TG3_GRC_LCLCTL_PWRSW_DELAY);
2888 
2889 		grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
2890 		tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2891 			    TG3_GRC_LCLCTL_PWRSW_DELAY);
2892 
2893 		grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
2894 		tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2895 			    TG3_GRC_LCLCTL_PWRSW_DELAY);
2896 	} else {
2897 		u32 no_gpio2;
2898 		u32 grc_local_ctrl = 0;
2899 
2900 		/* Workaround to prevent overdrawing Amps. */
2901 		if (tg3_asic_rev(tp) == ASIC_REV_5714) {
2902 			grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
2903 			tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2904 				    grc_local_ctrl,
2905 				    TG3_GRC_LCLCTL_PWRSW_DELAY);
2906 		}
2907 
2908 		/* On 5753 and variants, GPIO2 cannot be used. */
2909 		no_gpio2 = tp->nic_sram_data_cfg &
2910 			   NIC_SRAM_DATA_CFG_NO_GPIO2;
2911 
2912 		grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
2913 				  GRC_LCLCTRL_GPIO_OE1 |
2914 				  GRC_LCLCTRL_GPIO_OE2 |
2915 				  GRC_LCLCTRL_GPIO_OUTPUT1 |
2916 				  GRC_LCLCTRL_GPIO_OUTPUT2;
2917 		if (no_gpio2) {
2918 			grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
2919 					    GRC_LCLCTRL_GPIO_OUTPUT2);
2920 		}
2921 		tw32_wait_f(GRC_LOCAL_CTRL,
2922 			    tp->grc_local_ctrl | grc_local_ctrl,
2923 			    TG3_GRC_LCLCTL_PWRSW_DELAY);
2924 
2925 		grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
2926 
2927 		tw32_wait_f(GRC_LOCAL_CTRL,
2928 			    tp->grc_local_ctrl | grc_local_ctrl,
2929 			    TG3_GRC_LCLCTL_PWRSW_DELAY);
2930 
2931 		if (!no_gpio2) {
2932 			grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
2933 			tw32_wait_f(GRC_LOCAL_CTRL,
2934 				    tp->grc_local_ctrl | grc_local_ctrl,
2935 				    TG3_GRC_LCLCTL_PWRSW_DELAY);
2936 		}
2937 	}
2938 }
2939 
2940 static void tg3_frob_aux_power_5717(struct tg3 *tp, bool wol_enable)
2941 {
2942 	u32 msg = 0;
2943 
2944 	/* Serialize power state transitions */
2945 	if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2946 		return;
2947 
2948 	if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE) || wol_enable)
2949 		msg = TG3_GPIO_MSG_NEED_VAUX;
2950 
2951 	msg = tg3_set_function_status(tp, msg);
2952 
2953 	if (msg & TG3_GPIO_MSG_ALL_DRVR_PRES_MASK)
2954 		goto done;
2955 
2956 	if (msg & TG3_GPIO_MSG_ALL_NEED_VAUX_MASK)
2957 		tg3_pwrsrc_switch_to_vaux(tp);
2958 	else
2959 		tg3_pwrsrc_die_with_vmain(tp);
2960 
2961 done:
2962 	tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2963 }
2964 
2965 static void tg3_frob_aux_power(struct tg3 *tp, bool include_wol)
2966 {
2967 	bool need_vaux = false;
2968 
2969 	/* The GPIOs do something completely different on 57765. */
2970 	if (!tg3_flag(tp, IS_NIC) || tg3_flag(tp, 57765_CLASS))
2971 		return;
2972 
2973 	if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2974 	    tg3_asic_rev(tp) == ASIC_REV_5719 ||
2975 	    tg3_asic_rev(tp) == ASIC_REV_5720) {
2976 		tg3_frob_aux_power_5717(tp, include_wol ?
2977 					tg3_flag(tp, WOL_ENABLE) != 0 : 0);
2978 		return;
2979 	}
2980 
2981 	if (tp->pdev_peer && tp->pdev_peer != tp->pdev) {
2982 		struct net_device *dev_peer;
2983 
2984 		dev_peer = pci_get_drvdata(tp->pdev_peer);
2985 
2986 		/* remove_one() may have been run on the peer. */
2987 		if (dev_peer) {
2988 			struct tg3 *tp_peer = netdev_priv(dev_peer);
2989 
2990 			if (tg3_flag(tp_peer, INIT_COMPLETE))
2991 				return;
2992 
2993 			if ((include_wol && tg3_flag(tp_peer, WOL_ENABLE)) ||
2994 			    tg3_flag(tp_peer, ENABLE_ASF))
2995 				need_vaux = true;
2996 		}
2997 	}
2998 
2999 	if ((include_wol && tg3_flag(tp, WOL_ENABLE)) ||
3000 	    tg3_flag(tp, ENABLE_ASF))
3001 		need_vaux = true;
3002 
3003 	if (need_vaux)
3004 		tg3_pwrsrc_switch_to_vaux(tp);
3005 	else
3006 		tg3_pwrsrc_die_with_vmain(tp);
3007 }
3008 
3009 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
3010 {
3011 	if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
3012 		return 1;
3013 	else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) {
3014 		if (speed != SPEED_10)
3015 			return 1;
3016 	} else if (speed == SPEED_10)
3017 		return 1;
3018 
3019 	return 0;
3020 }
3021 
3022 static bool tg3_phy_power_bug(struct tg3 *tp)
3023 {
3024 	switch (tg3_asic_rev(tp)) {
3025 	case ASIC_REV_5700:
3026 	case ASIC_REV_5704:
3027 		return true;
3028 	case ASIC_REV_5780:
3029 		if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
3030 			return true;
3031 		return false;
3032 	case ASIC_REV_5717:
3033 		if (!tp->pci_fn)
3034 			return true;
3035 		return false;
3036 	case ASIC_REV_5719:
3037 	case ASIC_REV_5720:
3038 		if ((tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
3039 		    !tp->pci_fn)
3040 			return true;
3041 		return false;
3042 	}
3043 
3044 	return false;
3045 }
3046 
3047 static bool tg3_phy_led_bug(struct tg3 *tp)
3048 {
3049 	switch (tg3_asic_rev(tp)) {
3050 	case ASIC_REV_5719:
3051 	case ASIC_REV_5720:
3052 		if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
3053 		    !tp->pci_fn)
3054 			return true;
3055 		return false;
3056 	}
3057 
3058 	return false;
3059 }
3060 
3061 static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
3062 {
3063 	u32 val;
3064 
3065 	if (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)
3066 		return;
3067 
3068 	if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
3069 		if (tg3_asic_rev(tp) == ASIC_REV_5704) {
3070 			u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
3071 			u32 serdes_cfg = tr32(MAC_SERDES_CFG);
3072 
3073 			sg_dig_ctrl |=
3074 				SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
3075 			tw32(SG_DIG_CTRL, sg_dig_ctrl);
3076 			tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
3077 		}
3078 		return;
3079 	}
3080 
3081 	if (tg3_asic_rev(tp) == ASIC_REV_5906) {
3082 		tg3_bmcr_reset(tp);
3083 		val = tr32(GRC_MISC_CFG);
3084 		tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
3085 		udelay(40);
3086 		return;
3087 	} else if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
3088 		u32 phytest;
3089 		if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
3090 			u32 phy;
3091 
3092 			tg3_writephy(tp, MII_ADVERTISE, 0);
3093 			tg3_writephy(tp, MII_BMCR,
3094 				     BMCR_ANENABLE | BMCR_ANRESTART);
3095 
3096 			tg3_writephy(tp, MII_TG3_FET_TEST,
3097 				     phytest | MII_TG3_FET_SHADOW_EN);
3098 			if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) {
3099 				phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD;
3100 				tg3_writephy(tp,
3101 					     MII_TG3_FET_SHDW_AUXMODE4,
3102 					     phy);
3103 			}
3104 			tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
3105 		}
3106 		return;
3107 	} else if (do_low_power) {
3108 		if (!tg3_phy_led_bug(tp))
3109 			tg3_writephy(tp, MII_TG3_EXT_CTRL,
3110 				     MII_TG3_EXT_CTRL_FORCE_LED_OFF);
3111 
3112 		val = MII_TG3_AUXCTL_PCTL_100TX_LPWR |
3113 		      MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
3114 		      MII_TG3_AUXCTL_PCTL_VREG_11V;
3115 		tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, val);
3116 	}
3117 
3118 	/* The PHY should not be powered down on some chips because
3119 	 * of bugs.
3120 	 */
3121 	if (tg3_phy_power_bug(tp))
3122 		return;
3123 
3124 	if (tg3_chip_rev(tp) == CHIPREV_5784_AX ||
3125 	    tg3_chip_rev(tp) == CHIPREV_5761_AX) {
3126 		val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
3127 		val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
3128 		val |= CPMU_LSPD_1000MB_MACCLK_12_5;
3129 		tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
3130 	}
3131 
3132 	tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
3133 }
3134 
3135 /* tp->lock is held. */
3136 static int tg3_nvram_lock(struct tg3 *tp)
3137 {
3138 	if (tg3_flag(tp, NVRAM)) {
3139 		int i;
3140 
3141 		if (tp->nvram_lock_cnt == 0) {
3142 			tw32(NVRAM_SWARB, SWARB_REQ_SET1);
3143 			for (i = 0; i < 8000; i++) {
3144 				if (tr32(NVRAM_SWARB) & SWARB_GNT1)
3145 					break;
3146 				udelay(20);
3147 			}
3148 			if (i == 8000) {
3149 				tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
3150 				return -ENODEV;
3151 			}
3152 		}
3153 		tp->nvram_lock_cnt++;
3154 	}
3155 	return 0;
3156 }
3157 
3158 /* tp->lock is held. */
3159 static void tg3_nvram_unlock(struct tg3 *tp)
3160 {
3161 	if (tg3_flag(tp, NVRAM)) {
3162 		if (tp->nvram_lock_cnt > 0)
3163 			tp->nvram_lock_cnt--;
3164 		if (tp->nvram_lock_cnt == 0)
3165 			tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
3166 	}
3167 }
3168 
3169 /* tp->lock is held. */
3170 static void tg3_enable_nvram_access(struct tg3 *tp)
3171 {
3172 	if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
3173 		u32 nvaccess = tr32(NVRAM_ACCESS);
3174 
3175 		tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
3176 	}
3177 }
3178 
3179 /* tp->lock is held. */
3180 static void tg3_disable_nvram_access(struct tg3 *tp)
3181 {
3182 	if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
3183 		u32 nvaccess = tr32(NVRAM_ACCESS);
3184 
3185 		tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
3186 	}
3187 }
3188 
3189 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
3190 					u32 offset, u32 *val)
3191 {
3192 	u32 tmp;
3193 	int i;
3194 
3195 	if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
3196 		return -EINVAL;
3197 
3198 	tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
3199 					EEPROM_ADDR_DEVID_MASK |
3200 					EEPROM_ADDR_READ);
3201 	tw32(GRC_EEPROM_ADDR,
3202 	     tmp |
3203 	     (0 << EEPROM_ADDR_DEVID_SHIFT) |
3204 	     ((offset << EEPROM_ADDR_ADDR_SHIFT) &
3205 	      EEPROM_ADDR_ADDR_MASK) |
3206 	     EEPROM_ADDR_READ | EEPROM_ADDR_START);
3207 
3208 	for (i = 0; i < 1000; i++) {
3209 		tmp = tr32(GRC_EEPROM_ADDR);
3210 
3211 		if (tmp & EEPROM_ADDR_COMPLETE)
3212 			break;
3213 		msleep(1);
3214 	}
3215 	if (!(tmp & EEPROM_ADDR_COMPLETE))
3216 		return -EBUSY;
3217 
3218 	tmp = tr32(GRC_EEPROM_DATA);
3219 
3220 	/*
3221 	 * The data will always be opposite the native endian
3222 	 * format.  Perform a blind byteswap to compensate.
3223 	 */
3224 	*val = swab32(tmp);
3225 
3226 	return 0;
3227 }
3228 
3229 #define NVRAM_CMD_TIMEOUT 10000
3230 
3231 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
3232 {
3233 	int i;
3234 
3235 	tw32(NVRAM_CMD, nvram_cmd);
3236 	for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
3237 		udelay(10);
3238 		if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
3239 			udelay(10);
3240 			break;
3241 		}
3242 	}
3243 
3244 	if (i == NVRAM_CMD_TIMEOUT)
3245 		return -EBUSY;
3246 
3247 	return 0;
3248 }
3249 
3250 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
3251 {
3252 	if (tg3_flag(tp, NVRAM) &&
3253 	    tg3_flag(tp, NVRAM_BUFFERED) &&
3254 	    tg3_flag(tp, FLASH) &&
3255 	    !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
3256 	    (tp->nvram_jedecnum == JEDEC_ATMEL))
3257 
3258 		addr = ((addr / tp->nvram_pagesize) <<
3259 			ATMEL_AT45DB0X1B_PAGE_POS) +
3260 		       (addr % tp->nvram_pagesize);
3261 
3262 	return addr;
3263 }
3264 
3265 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
3266 {
3267 	if (tg3_flag(tp, NVRAM) &&
3268 	    tg3_flag(tp, NVRAM_BUFFERED) &&
3269 	    tg3_flag(tp, FLASH) &&
3270 	    !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
3271 	    (tp->nvram_jedecnum == JEDEC_ATMEL))
3272 
3273 		addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
3274 			tp->nvram_pagesize) +
3275 		       (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
3276 
3277 	return addr;
3278 }
3279 
3280 /* NOTE: Data read in from NVRAM is byteswapped according to
3281  * the byteswapping settings for all other register accesses.
3282  * tg3 devices are BE devices, so on a BE machine, the data
3283  * returned will be exactly as it is seen in NVRAM.  On a LE
3284  * machine, the 32-bit value will be byteswapped.
3285  */
3286 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
3287 {
3288 	int ret;
3289 
3290 	if (!tg3_flag(tp, NVRAM))
3291 		return tg3_nvram_read_using_eeprom(tp, offset, val);
3292 
3293 	offset = tg3_nvram_phys_addr(tp, offset);
3294 
3295 	if (offset > NVRAM_ADDR_MSK)
3296 		return -EINVAL;
3297 
3298 	ret = tg3_nvram_lock(tp);
3299 	if (ret)
3300 		return ret;
3301 
3302 	tg3_enable_nvram_access(tp);
3303 
3304 	tw32(NVRAM_ADDR, offset);
3305 	ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
3306 		NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
3307 
3308 	if (ret == 0)
3309 		*val = tr32(NVRAM_RDDATA);
3310 
3311 	tg3_disable_nvram_access(tp);
3312 
3313 	tg3_nvram_unlock(tp);
3314 
3315 	return ret;
3316 }
3317 
3318 /* Ensures NVRAM data is in bytestream format. */
3319 static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val)
3320 {
3321 	u32 v;
3322 	int res = tg3_nvram_read(tp, offset, &v);
3323 	if (!res)
3324 		*val = cpu_to_be32(v);
3325 	return res;
3326 }
3327 
3328 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
3329 				    u32 offset, u32 len, u8 *buf)
3330 {
3331 	int i, j, rc = 0;
3332 	u32 val;
3333 
3334 	for (i = 0; i < len; i += 4) {
3335 		u32 addr;
3336 		__be32 data;
3337 
3338 		addr = offset + i;
3339 
3340 		memcpy(&data, buf + i, 4);
3341 
3342 		/*
3343 		 * The SEEPROM interface expects the data to always be opposite
3344 		 * the native endian format.  We accomplish this by reversing
3345 		 * all the operations that would have been performed on the
3346 		 * data from a call to tg3_nvram_read_be32().
3347 		 */
3348 		tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data)));
3349 
3350 		val = tr32(GRC_EEPROM_ADDR);
3351 		tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
3352 
3353 		val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
3354 			EEPROM_ADDR_READ);
3355 		tw32(GRC_EEPROM_ADDR, val |
3356 			(0 << EEPROM_ADDR_DEVID_SHIFT) |
3357 			(addr & EEPROM_ADDR_ADDR_MASK) |
3358 			EEPROM_ADDR_START |
3359 			EEPROM_ADDR_WRITE);
3360 
3361 		for (j = 0; j < 1000; j++) {
3362 			val = tr32(GRC_EEPROM_ADDR);
3363 
3364 			if (val & EEPROM_ADDR_COMPLETE)
3365 				break;
3366 			msleep(1);
3367 		}
3368 		if (!(val & EEPROM_ADDR_COMPLETE)) {
3369 			rc = -EBUSY;
3370 			break;
3371 		}
3372 	}
3373 
3374 	return rc;
3375 }
3376 
3377 /* offset and length are dword aligned */
3378 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
3379 		u8 *buf)
3380 {
3381 	int ret = 0;
3382 	u32 pagesize = tp->nvram_pagesize;
3383 	u32 pagemask = pagesize - 1;
3384 	u32 nvram_cmd;
3385 	u8 *tmp;
3386 
3387 	tmp = kmalloc(pagesize, GFP_KERNEL);
3388 	if (tmp == NULL)
3389 		return -ENOMEM;
3390 
3391 	while (len) {
3392 		int j;
3393 		u32 phy_addr, page_off, size;
3394 
3395 		phy_addr = offset & ~pagemask;
3396 
3397 		for (j = 0; j < pagesize; j += 4) {
3398 			ret = tg3_nvram_read_be32(tp, phy_addr + j,
3399 						  (__be32 *) (tmp + j));
3400 			if (ret)
3401 				break;
3402 		}
3403 		if (ret)
3404 			break;
3405 
3406 		page_off = offset & pagemask;
3407 		size = pagesize;
3408 		if (len < size)
3409 			size = len;
3410 
3411 		len -= size;
3412 
3413 		memcpy(tmp + page_off, buf, size);
3414 
3415 		offset = offset + (pagesize - page_off);
3416 
3417 		tg3_enable_nvram_access(tp);
3418 
3419 		/*
3420 		 * Before we can erase the flash page, we need
3421 		 * to issue a special "write enable" command.
3422 		 */
3423 		nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3424 
3425 		if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3426 			break;
3427 
3428 		/* Erase the target page */
3429 		tw32(NVRAM_ADDR, phy_addr);
3430 
3431 		nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
3432 			NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
3433 
3434 		if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3435 			break;
3436 
3437 		/* Issue another write enable to start the write. */
3438 		nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3439 
3440 		if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3441 			break;
3442 
3443 		for (j = 0; j < pagesize; j += 4) {
3444 			__be32 data;
3445 
3446 			data = *((__be32 *) (tmp + j));
3447 
3448 			tw32(NVRAM_WRDATA, be32_to_cpu(data));
3449 
3450 			tw32(NVRAM_ADDR, phy_addr + j);
3451 
3452 			nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
3453 				NVRAM_CMD_WR;
3454 
3455 			if (j == 0)
3456 				nvram_cmd |= NVRAM_CMD_FIRST;
3457 			else if (j == (pagesize - 4))
3458 				nvram_cmd |= NVRAM_CMD_LAST;
3459 
3460 			ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3461 			if (ret)
3462 				break;
3463 		}
3464 		if (ret)
3465 			break;
3466 	}
3467 
3468 	nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3469 	tg3_nvram_exec_cmd(tp, nvram_cmd);
3470 
3471 	kfree(tmp);
3472 
3473 	return ret;
3474 }
3475 
3476 /* offset and length are dword aligned */
3477 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
3478 		u8 *buf)
3479 {
3480 	int i, ret = 0;
3481 
3482 	for (i = 0; i < len; i += 4, offset += 4) {
3483 		u32 page_off, phy_addr, nvram_cmd;
3484 		__be32 data;
3485 
3486 		memcpy(&data, buf + i, 4);
3487 		tw32(NVRAM_WRDATA, be32_to_cpu(data));
3488 
3489 		page_off = offset % tp->nvram_pagesize;
3490 
3491 		phy_addr = tg3_nvram_phys_addr(tp, offset);
3492 
3493 		nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
3494 
3495 		if (page_off == 0 || i == 0)
3496 			nvram_cmd |= NVRAM_CMD_FIRST;
3497 		if (page_off == (tp->nvram_pagesize - 4))
3498 			nvram_cmd |= NVRAM_CMD_LAST;
3499 
3500 		if (i == (len - 4))
3501 			nvram_cmd |= NVRAM_CMD_LAST;
3502 
3503 		if ((nvram_cmd & NVRAM_CMD_FIRST) ||
3504 		    !tg3_flag(tp, FLASH) ||
3505 		    !tg3_flag(tp, 57765_PLUS))
3506 			tw32(NVRAM_ADDR, phy_addr);
3507 
3508 		if (tg3_asic_rev(tp) != ASIC_REV_5752 &&
3509 		    !tg3_flag(tp, 5755_PLUS) &&
3510 		    (tp->nvram_jedecnum == JEDEC_ST) &&
3511 		    (nvram_cmd & NVRAM_CMD_FIRST)) {
3512 			u32 cmd;
3513 
3514 			cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3515 			ret = tg3_nvram_exec_cmd(tp, cmd);
3516 			if (ret)
3517 				break;
3518 		}
3519 		if (!tg3_flag(tp, FLASH)) {
3520 			/* We always do complete word writes to eeprom. */
3521 			nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
3522 		}
3523 
3524 		ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3525 		if (ret)
3526 			break;
3527 	}
3528 	return ret;
3529 }
3530 
3531 /* offset and length are dword aligned */
3532 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
3533 {
3534 	int ret;
3535 
3536 	if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3537 		tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
3538 		       ~GRC_LCLCTRL_GPIO_OUTPUT1);
3539 		udelay(40);
3540 	}
3541 
3542 	if (!tg3_flag(tp, NVRAM)) {
3543 		ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
3544 	} else {
3545 		u32 grc_mode;
3546 
3547 		ret = tg3_nvram_lock(tp);
3548 		if (ret)
3549 			return ret;
3550 
3551 		tg3_enable_nvram_access(tp);
3552 		if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM))
3553 			tw32(NVRAM_WRITE1, 0x406);
3554 
3555 		grc_mode = tr32(GRC_MODE);
3556 		tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
3557 
3558 		if (tg3_flag(tp, NVRAM_BUFFERED) || !tg3_flag(tp, FLASH)) {
3559 			ret = tg3_nvram_write_block_buffered(tp, offset, len,
3560 				buf);
3561 		} else {
3562 			ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
3563 				buf);
3564 		}
3565 
3566 		grc_mode = tr32(GRC_MODE);
3567 		tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
3568 
3569 		tg3_disable_nvram_access(tp);
3570 		tg3_nvram_unlock(tp);
3571 	}
3572 
3573 	if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3574 		tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
3575 		udelay(40);
3576 	}
3577 
3578 	return ret;
3579 }
3580 
3581 #define RX_CPU_SCRATCH_BASE	0x30000
3582 #define RX_CPU_SCRATCH_SIZE	0x04000
3583 #define TX_CPU_SCRATCH_BASE	0x34000
3584 #define TX_CPU_SCRATCH_SIZE	0x04000
3585 
3586 /* tp->lock is held. */
3587 static int tg3_pause_cpu(struct tg3 *tp, u32 cpu_base)
3588 {
3589 	int i;
3590 	const int iters = 10000;
3591 
3592 	for (i = 0; i < iters; i++) {
3593 		tw32(cpu_base + CPU_STATE, 0xffffffff);
3594 		tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
3595 		if (tr32(cpu_base + CPU_MODE) & CPU_MODE_HALT)
3596 			break;
3597 		if (pci_channel_offline(tp->pdev))
3598 			return -EBUSY;
3599 	}
3600 
3601 	return (i == iters) ? -EBUSY : 0;
3602 }
3603 
3604 /* tp->lock is held. */
3605 static int tg3_rxcpu_pause(struct tg3 *tp)
3606 {
3607 	int rc = tg3_pause_cpu(tp, RX_CPU_BASE);
3608 
3609 	tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3610 	tw32_f(RX_CPU_BASE + CPU_MODE,  CPU_MODE_HALT);
3611 	udelay(10);
3612 
3613 	return rc;
3614 }
3615 
3616 /* tp->lock is held. */
3617 static int tg3_txcpu_pause(struct tg3 *tp)
3618 {
3619 	return tg3_pause_cpu(tp, TX_CPU_BASE);
3620 }
3621 
3622 /* tp->lock is held. */
3623 static void tg3_resume_cpu(struct tg3 *tp, u32 cpu_base)
3624 {
3625 	tw32(cpu_base + CPU_STATE, 0xffffffff);
3626 	tw32_f(cpu_base + CPU_MODE,  0x00000000);
3627 }
3628 
3629 /* tp->lock is held. */
3630 static void tg3_rxcpu_resume(struct tg3 *tp)
3631 {
3632 	tg3_resume_cpu(tp, RX_CPU_BASE);
3633 }
3634 
3635 /* tp->lock is held. */
3636 static int tg3_halt_cpu(struct tg3 *tp, u32 cpu_base)
3637 {
3638 	int rc;
3639 
3640 	BUG_ON(cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS));
3641 
3642 	if (tg3_asic_rev(tp) == ASIC_REV_5906) {
3643 		u32 val = tr32(GRC_VCPU_EXT_CTRL);
3644 
3645 		tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
3646 		return 0;
3647 	}
3648 	if (cpu_base == RX_CPU_BASE) {
3649 		rc = tg3_rxcpu_pause(tp);
3650 	} else {
3651 		/*
3652 		 * There is only an Rx CPU for the 5750 derivative in the
3653 		 * BCM4785.
3654 		 */
3655 		if (tg3_flag(tp, IS_SSB_CORE))
3656 			return 0;
3657 
3658 		rc = tg3_txcpu_pause(tp);
3659 	}
3660 
3661 	if (rc) {
3662 		netdev_err(tp->dev, "%s timed out, %s CPU\n",
3663 			   __func__, cpu_base == RX_CPU_BASE ? "RX" : "TX");
3664 		return -ENODEV;
3665 	}
3666 
3667 	/* Clear firmware's nvram arbitration. */
3668 	if (tg3_flag(tp, NVRAM))
3669 		tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
3670 	return 0;
3671 }
3672 
3673 static int tg3_fw_data_len(struct tg3 *tp,
3674 			   const struct tg3_firmware_hdr *fw_hdr)
3675 {
3676 	int fw_len;
3677 
3678 	/* Non fragmented firmware have one firmware header followed by a
3679 	 * contiguous chunk of data to be written. The length field in that
3680 	 * header is not the length of data to be written but the complete
3681 	 * length of the bss. The data length is determined based on
3682 	 * tp->fw->size minus headers.
3683 	 *
3684 	 * Fragmented firmware have a main header followed by multiple
3685 	 * fragments. Each fragment is identical to non fragmented firmware
3686 	 * with a firmware header followed by a contiguous chunk of data. In
3687 	 * the main header, the length field is unused and set to 0xffffffff.
3688 	 * In each fragment header the length is the entire size of that
3689 	 * fragment i.e. fragment data + header length. Data length is
3690 	 * therefore length field in the header minus TG3_FW_HDR_LEN.
3691 	 */
3692 	if (tp->fw_len == 0xffffffff)
3693 		fw_len = be32_to_cpu(fw_hdr->len);
3694 	else
3695 		fw_len = tp->fw->size;
3696 
3697 	return (fw_len - TG3_FW_HDR_LEN) / sizeof(u32);
3698 }
3699 
3700 /* tp->lock is held. */
3701 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base,
3702 				 u32 cpu_scratch_base, int cpu_scratch_size,
3703 				 const struct tg3_firmware_hdr *fw_hdr)
3704 {
3705 	int err, i;
3706 	void (*write_op)(struct tg3 *, u32, u32);
3707 	int total_len = tp->fw->size;
3708 
3709 	if (cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)) {
3710 		netdev_err(tp->dev,
3711 			   "%s: Trying to load TX cpu firmware which is 5705\n",
3712 			   __func__);
3713 		return -EINVAL;
3714 	}
3715 
3716 	if (tg3_flag(tp, 5705_PLUS) && tg3_asic_rev(tp) != ASIC_REV_57766)
3717 		write_op = tg3_write_mem;
3718 	else
3719 		write_op = tg3_write_indirect_reg32;
3720 
3721 	if (tg3_asic_rev(tp) != ASIC_REV_57766) {
3722 		/* It is possible that bootcode is still loading at this point.
3723 		 * Get the nvram lock first before halting the cpu.
3724 		 */
3725 		int lock_err = tg3_nvram_lock(tp);
3726 		err = tg3_halt_cpu(tp, cpu_base);
3727 		if (!lock_err)
3728 			tg3_nvram_unlock(tp);
3729 		if (err)
3730 			goto out;
3731 
3732 		for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
3733 			write_op(tp, cpu_scratch_base + i, 0);
3734 		tw32(cpu_base + CPU_STATE, 0xffffffff);
3735 		tw32(cpu_base + CPU_MODE,
3736 		     tr32(cpu_base + CPU_MODE) | CPU_MODE_HALT);
3737 	} else {
3738 		/* Subtract additional main header for fragmented firmware and
3739 		 * advance to the first fragment
3740 		 */
3741 		total_len -= TG3_FW_HDR_LEN;
3742 		fw_hdr++;
3743 	}
3744 
3745 	do {
3746 		u32 *fw_data = (u32 *)(fw_hdr + 1);
3747 		for (i = 0; i < tg3_fw_data_len(tp, fw_hdr); i++)
3748 			write_op(tp, cpu_scratch_base +
3749 				     (be32_to_cpu(fw_hdr->base_addr) & 0xffff) +
3750 				     (i * sizeof(u32)),
3751 				 be32_to_cpu(fw_data[i]));
3752 
3753 		total_len -= be32_to_cpu(fw_hdr->len);
3754 
3755 		/* Advance to next fragment */
3756 		fw_hdr = (struct tg3_firmware_hdr *)
3757 			 ((void *)fw_hdr + be32_to_cpu(fw_hdr->len));
3758 	} while (total_len > 0);
3759 
3760 	err = 0;
3761 
3762 out:
3763 	return err;
3764 }
3765 
3766 /* tp->lock is held. */
3767 static int tg3_pause_cpu_and_set_pc(struct tg3 *tp, u32 cpu_base, u32 pc)
3768 {
3769 	int i;
3770 	const int iters = 5;
3771 
3772 	tw32(cpu_base + CPU_STATE, 0xffffffff);
3773 	tw32_f(cpu_base + CPU_PC, pc);
3774 
3775 	for (i = 0; i < iters; i++) {
3776 		if (tr32(cpu_base + CPU_PC) == pc)
3777 			break;
3778 		tw32(cpu_base + CPU_STATE, 0xffffffff);
3779 		tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
3780 		tw32_f(cpu_base + CPU_PC, pc);
3781 		udelay(1000);
3782 	}
3783 
3784 	return (i == iters) ? -EBUSY : 0;
3785 }
3786 
3787 /* tp->lock is held. */
3788 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
3789 {
3790 	const struct tg3_firmware_hdr *fw_hdr;
3791 	int err;
3792 
3793 	fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3794 
3795 	/* Firmware blob starts with version numbers, followed by
3796 	   start address and length. We are setting complete length.
3797 	   length = end_address_of_bss - start_address_of_text.
3798 	   Remainder is the blob to be loaded contiguously
3799 	   from start address. */
3800 
3801 	err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
3802 				    RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
3803 				    fw_hdr);
3804 	if (err)
3805 		return err;
3806 
3807 	err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
3808 				    TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
3809 				    fw_hdr);
3810 	if (err)
3811 		return err;
3812 
3813 	/* Now startup only the RX cpu. */
3814 	err = tg3_pause_cpu_and_set_pc(tp, RX_CPU_BASE,
3815 				       be32_to_cpu(fw_hdr->base_addr));
3816 	if (err) {
3817 		netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x "
3818 			   "should be %08x\n", __func__,
3819 			   tr32(RX_CPU_BASE + CPU_PC),
3820 				be32_to_cpu(fw_hdr->base_addr));
3821 		return -ENODEV;
3822 	}
3823 
3824 	tg3_rxcpu_resume(tp);
3825 
3826 	return 0;
3827 }
3828 
3829 static int tg3_validate_rxcpu_state(struct tg3 *tp)
3830 {
3831 	const int iters = 1000;
3832 	int i;
3833 	u32 val;
3834 
3835 	/* Wait for boot code to complete initialization and enter service
3836 	 * loop. It is then safe to download service patches
3837 	 */
3838 	for (i = 0; i < iters; i++) {
3839 		if (tr32(RX_CPU_HWBKPT) == TG3_SBROM_IN_SERVICE_LOOP)
3840 			break;
3841 
3842 		udelay(10);
3843 	}
3844 
3845 	if (i == iters) {
3846 		netdev_err(tp->dev, "Boot code not ready for service patches\n");
3847 		return -EBUSY;
3848 	}
3849 
3850 	val = tg3_read_indirect_reg32(tp, TG3_57766_FW_HANDSHAKE);
3851 	if (val & 0xff) {
3852 		netdev_warn(tp->dev,
3853 			    "Other patches exist. Not downloading EEE patch\n");
3854 		return -EEXIST;
3855 	}
3856 
3857 	return 0;
3858 }
3859 
3860 /* tp->lock is held. */
3861 static void tg3_load_57766_firmware(struct tg3 *tp)
3862 {
3863 	struct tg3_firmware_hdr *fw_hdr;
3864 
3865 	if (!tg3_flag(tp, NO_NVRAM))
3866 		return;
3867 
3868 	if (tg3_validate_rxcpu_state(tp))
3869 		return;
3870 
3871 	if (!tp->fw)
3872 		return;
3873 
3874 	/* This firmware blob has a different format than older firmware
3875 	 * releases as given below. The main difference is we have fragmented
3876 	 * data to be written to non-contiguous locations.
3877 	 *
3878 	 * In the beginning we have a firmware header identical to other
3879 	 * firmware which consists of version, base addr and length. The length
3880 	 * here is unused and set to 0xffffffff.
3881 	 *
3882 	 * This is followed by a series of firmware fragments which are
3883 	 * individually identical to previous firmware. i.e. they have the
3884 	 * firmware header and followed by data for that fragment. The version
3885 	 * field of the individual fragment header is unused.
3886 	 */
3887 
3888 	fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3889 	if (be32_to_cpu(fw_hdr->base_addr) != TG3_57766_FW_BASE_ADDR)
3890 		return;
3891 
3892 	if (tg3_rxcpu_pause(tp))
3893 		return;
3894 
3895 	/* tg3_load_firmware_cpu() will always succeed for the 57766 */
3896 	tg3_load_firmware_cpu(tp, 0, TG3_57766_FW_BASE_ADDR, 0, fw_hdr);
3897 
3898 	tg3_rxcpu_resume(tp);
3899 }
3900 
3901 /* tp->lock is held. */
3902 static int tg3_load_tso_firmware(struct tg3 *tp)
3903 {
3904 	const struct tg3_firmware_hdr *fw_hdr;
3905 	unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
3906 	int err;
3907 
3908 	if (!tg3_flag(tp, FW_TSO))
3909 		return 0;
3910 
3911 	fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3912 
3913 	/* Firmware blob starts with version numbers, followed by
3914 	   start address and length. We are setting complete length.
3915 	   length = end_address_of_bss - start_address_of_text.
3916 	   Remainder is the blob to be loaded contiguously
3917 	   from start address. */
3918 
3919 	cpu_scratch_size = tp->fw_len;
3920 
3921 	if (tg3_asic_rev(tp) == ASIC_REV_5705) {
3922 		cpu_base = RX_CPU_BASE;
3923 		cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
3924 	} else {
3925 		cpu_base = TX_CPU_BASE;
3926 		cpu_scratch_base = TX_CPU_SCRATCH_BASE;
3927 		cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
3928 	}
3929 
3930 	err = tg3_load_firmware_cpu(tp, cpu_base,
3931 				    cpu_scratch_base, cpu_scratch_size,
3932 				    fw_hdr);
3933 	if (err)
3934 		return err;
3935 
3936 	/* Now startup the cpu. */
3937 	err = tg3_pause_cpu_and_set_pc(tp, cpu_base,
3938 				       be32_to_cpu(fw_hdr->base_addr));
3939 	if (err) {
3940 		netdev_err(tp->dev,
3941 			   "%s fails to set CPU PC, is %08x should be %08x\n",
3942 			   __func__, tr32(cpu_base + CPU_PC),
3943 			   be32_to_cpu(fw_hdr->base_addr));
3944 		return -ENODEV;
3945 	}
3946 
3947 	tg3_resume_cpu(tp, cpu_base);
3948 	return 0;
3949 }
3950 
3951 
3952 /* tp->lock is held. */
3953 static void __tg3_set_mac_addr(struct tg3 *tp, bool skip_mac_1)
3954 {
3955 	u32 addr_high, addr_low;
3956 	int i;
3957 
3958 	addr_high = ((tp->dev->dev_addr[0] << 8) |
3959 		     tp->dev->dev_addr[1]);
3960 	addr_low = ((tp->dev->dev_addr[2] << 24) |
3961 		    (tp->dev->dev_addr[3] << 16) |
3962 		    (tp->dev->dev_addr[4] <<  8) |
3963 		    (tp->dev->dev_addr[5] <<  0));
3964 	for (i = 0; i < 4; i++) {
3965 		if (i == 1 && skip_mac_1)
3966 			continue;
3967 		tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
3968 		tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
3969 	}
3970 
3971 	if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
3972 	    tg3_asic_rev(tp) == ASIC_REV_5704) {
3973 		for (i = 0; i < 12; i++) {
3974 			tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
3975 			tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
3976 		}
3977 	}
3978 
3979 	addr_high = (tp->dev->dev_addr[0] +
3980 		     tp->dev->dev_addr[1] +
3981 		     tp->dev->dev_addr[2] +
3982 		     tp->dev->dev_addr[3] +
3983 		     tp->dev->dev_addr[4] +
3984 		     tp->dev->dev_addr[5]) &
3985 		TX_BACKOFF_SEED_MASK;
3986 	tw32(MAC_TX_BACKOFF_SEED, addr_high);
3987 }
3988 
3989 static void tg3_enable_register_access(struct tg3 *tp)
3990 {
3991 	/*
3992 	 * Make sure register accesses (indirect or otherwise) will function
3993 	 * correctly.
3994 	 */
3995 	pci_write_config_dword(tp->pdev,
3996 			       TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl);
3997 }
3998 
3999 static int tg3_power_up(struct tg3 *tp)
4000 {
4001 	int err;
4002 
4003 	tg3_enable_register_access(tp);
4004 
4005 	err = pci_set_power_state(tp->pdev, PCI_D0);
4006 	if (!err) {
4007 		/* Switch out of Vaux if it is a NIC */
4008 		tg3_pwrsrc_switch_to_vmain(tp);
4009 	} else {
4010 		netdev_err(tp->dev, "Transition to D0 failed\n");
4011 	}
4012 
4013 	return err;
4014 }
4015 
4016 static int tg3_setup_phy(struct tg3 *, bool);
4017 
4018 static int tg3_power_down_prepare(struct tg3 *tp)
4019 {
4020 	u32 misc_host_ctrl;
4021 	bool device_should_wake, do_low_power;
4022 
4023 	tg3_enable_register_access(tp);
4024 
4025 	/* Restore the CLKREQ setting. */
4026 	if (tg3_flag(tp, CLKREQ_BUG))
4027 		pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
4028 					 PCI_EXP_LNKCTL_CLKREQ_EN);
4029 
4030 	misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
4031 	tw32(TG3PCI_MISC_HOST_CTRL,
4032 	     misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
4033 
4034 	device_should_wake = device_may_wakeup(&tp->pdev->dev) &&
4035 			     tg3_flag(tp, WOL_ENABLE);
4036 
4037 	if (tg3_flag(tp, USE_PHYLIB)) {
4038 		do_low_power = false;
4039 		if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) &&
4040 		    !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4041 			struct phy_device *phydev;
4042 			u32 phyid, advertising;
4043 
4044 			phydev = tp->mdio_bus->phy_map[tp->phy_addr];
4045 
4046 			tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
4047 
4048 			tp->link_config.speed = phydev->speed;
4049 			tp->link_config.duplex = phydev->duplex;
4050 			tp->link_config.autoneg = phydev->autoneg;
4051 			tp->link_config.advertising = phydev->advertising;
4052 
4053 			advertising = ADVERTISED_TP |
4054 				      ADVERTISED_Pause |
4055 				      ADVERTISED_Autoneg |
4056 				      ADVERTISED_10baseT_Half;
4057 
4058 			if (tg3_flag(tp, ENABLE_ASF) || device_should_wake) {
4059 				if (tg3_flag(tp, WOL_SPEED_100MB))
4060 					advertising |=
4061 						ADVERTISED_100baseT_Half |
4062 						ADVERTISED_100baseT_Full |
4063 						ADVERTISED_10baseT_Full;
4064 				else
4065 					advertising |= ADVERTISED_10baseT_Full;
4066 			}
4067 
4068 			phydev->advertising = advertising;
4069 
4070 			phy_start_aneg(phydev);
4071 
4072 			phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
4073 			if (phyid != PHY_ID_BCMAC131) {
4074 				phyid &= PHY_BCM_OUI_MASK;
4075 				if (phyid == PHY_BCM_OUI_1 ||
4076 				    phyid == PHY_BCM_OUI_2 ||
4077 				    phyid == PHY_BCM_OUI_3)
4078 					do_low_power = true;
4079 			}
4080 		}
4081 	} else {
4082 		do_low_power = true;
4083 
4084 		if (!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER))
4085 			tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
4086 
4087 		if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
4088 			tg3_setup_phy(tp, false);
4089 	}
4090 
4091 	if (tg3_asic_rev(tp) == ASIC_REV_5906) {
4092 		u32 val;
4093 
4094 		val = tr32(GRC_VCPU_EXT_CTRL);
4095 		tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
4096 	} else if (!tg3_flag(tp, ENABLE_ASF)) {
4097 		int i;
4098 		u32 val;
4099 
4100 		for (i = 0; i < 200; i++) {
4101 			tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
4102 			if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
4103 				break;
4104 			msleep(1);
4105 		}
4106 	}
4107 	if (tg3_flag(tp, WOL_CAP))
4108 		tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
4109 						     WOL_DRV_STATE_SHUTDOWN |
4110 						     WOL_DRV_WOL |
4111 						     WOL_SET_MAGIC_PKT);
4112 
4113 	if (device_should_wake) {
4114 		u32 mac_mode;
4115 
4116 		if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
4117 			if (do_low_power &&
4118 			    !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
4119 				tg3_phy_auxctl_write(tp,
4120 					       MII_TG3_AUXCTL_SHDWSEL_PWRCTL,
4121 					       MII_TG3_AUXCTL_PCTL_WOL_EN |
4122 					       MII_TG3_AUXCTL_PCTL_100TX_LPWR |
4123 					       MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC);
4124 				udelay(40);
4125 			}
4126 
4127 			if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4128 				mac_mode = MAC_MODE_PORT_MODE_GMII;
4129 			else if (tp->phy_flags &
4130 				 TG3_PHYFLG_KEEP_LINK_ON_PWRDN) {
4131 				if (tp->link_config.active_speed == SPEED_1000)
4132 					mac_mode = MAC_MODE_PORT_MODE_GMII;
4133 				else
4134 					mac_mode = MAC_MODE_PORT_MODE_MII;
4135 			} else
4136 				mac_mode = MAC_MODE_PORT_MODE_MII;
4137 
4138 			mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
4139 			if (tg3_asic_rev(tp) == ASIC_REV_5700) {
4140 				u32 speed = tg3_flag(tp, WOL_SPEED_100MB) ?
4141 					     SPEED_100 : SPEED_10;
4142 				if (tg3_5700_link_polarity(tp, speed))
4143 					mac_mode |= MAC_MODE_LINK_POLARITY;
4144 				else
4145 					mac_mode &= ~MAC_MODE_LINK_POLARITY;
4146 			}
4147 		} else {
4148 			mac_mode = MAC_MODE_PORT_MODE_TBI;
4149 		}
4150 
4151 		if (!tg3_flag(tp, 5750_PLUS))
4152 			tw32(MAC_LED_CTRL, tp->led_ctrl);
4153 
4154 		mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
4155 		if ((tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) &&
4156 		    (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)))
4157 			mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
4158 
4159 		if (tg3_flag(tp, ENABLE_APE))
4160 			mac_mode |= MAC_MODE_APE_TX_EN |
4161 				    MAC_MODE_APE_RX_EN |
4162 				    MAC_MODE_TDE_ENABLE;
4163 
4164 		tw32_f(MAC_MODE, mac_mode);
4165 		udelay(100);
4166 
4167 		tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
4168 		udelay(10);
4169 	}
4170 
4171 	if (!tg3_flag(tp, WOL_SPEED_100MB) &&
4172 	    (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4173 	     tg3_asic_rev(tp) == ASIC_REV_5701)) {
4174 		u32 base_val;
4175 
4176 		base_val = tp->pci_clock_ctrl;
4177 		base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
4178 			     CLOCK_CTRL_TXCLK_DISABLE);
4179 
4180 		tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
4181 			    CLOCK_CTRL_PWRDOWN_PLL133, 40);
4182 	} else if (tg3_flag(tp, 5780_CLASS) ||
4183 		   tg3_flag(tp, CPMU_PRESENT) ||
4184 		   tg3_asic_rev(tp) == ASIC_REV_5906) {
4185 		/* do nothing */
4186 	} else if (!(tg3_flag(tp, 5750_PLUS) && tg3_flag(tp, ENABLE_ASF))) {
4187 		u32 newbits1, newbits2;
4188 
4189 		if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4190 		    tg3_asic_rev(tp) == ASIC_REV_5701) {
4191 			newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
4192 				    CLOCK_CTRL_TXCLK_DISABLE |
4193 				    CLOCK_CTRL_ALTCLK);
4194 			newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
4195 		} else if (tg3_flag(tp, 5705_PLUS)) {
4196 			newbits1 = CLOCK_CTRL_625_CORE;
4197 			newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
4198 		} else {
4199 			newbits1 = CLOCK_CTRL_ALTCLK;
4200 			newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
4201 		}
4202 
4203 		tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
4204 			    40);
4205 
4206 		tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
4207 			    40);
4208 
4209 		if (!tg3_flag(tp, 5705_PLUS)) {
4210 			u32 newbits3;
4211 
4212 			if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4213 			    tg3_asic_rev(tp) == ASIC_REV_5701) {
4214 				newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
4215 					    CLOCK_CTRL_TXCLK_DISABLE |
4216 					    CLOCK_CTRL_44MHZ_CORE);
4217 			} else {
4218 				newbits3 = CLOCK_CTRL_44MHZ_CORE;
4219 			}
4220 
4221 			tw32_wait_f(TG3PCI_CLOCK_CTRL,
4222 				    tp->pci_clock_ctrl | newbits3, 40);
4223 		}
4224 	}
4225 
4226 	if (!(device_should_wake) && !tg3_flag(tp, ENABLE_ASF))
4227 		tg3_power_down_phy(tp, do_low_power);
4228 
4229 	tg3_frob_aux_power(tp, true);
4230 
4231 	/* Workaround for unstable PLL clock */
4232 	if ((!tg3_flag(tp, IS_SSB_CORE)) &&
4233 	    ((tg3_chip_rev(tp) == CHIPREV_5750_AX) ||
4234 	     (tg3_chip_rev(tp) == CHIPREV_5750_BX))) {
4235 		u32 val = tr32(0x7d00);
4236 
4237 		val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
4238 		tw32(0x7d00, val);
4239 		if (!tg3_flag(tp, ENABLE_ASF)) {
4240 			int err;
4241 
4242 			err = tg3_nvram_lock(tp);
4243 			tg3_halt_cpu(tp, RX_CPU_BASE);
4244 			if (!err)
4245 				tg3_nvram_unlock(tp);
4246 		}
4247 	}
4248 
4249 	tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
4250 
4251 	tg3_ape_driver_state_change(tp, RESET_KIND_SHUTDOWN);
4252 
4253 	return 0;
4254 }
4255 
4256 static void tg3_power_down(struct tg3 *tp)
4257 {
4258 	pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE));
4259 	pci_set_power_state(tp->pdev, PCI_D3hot);
4260 }
4261 
4262 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
4263 {
4264 	switch (val & MII_TG3_AUX_STAT_SPDMASK) {
4265 	case MII_TG3_AUX_STAT_10HALF:
4266 		*speed = SPEED_10;
4267 		*duplex = DUPLEX_HALF;
4268 		break;
4269 
4270 	case MII_TG3_AUX_STAT_10FULL:
4271 		*speed = SPEED_10;
4272 		*duplex = DUPLEX_FULL;
4273 		break;
4274 
4275 	case MII_TG3_AUX_STAT_100HALF:
4276 		*speed = SPEED_100;
4277 		*duplex = DUPLEX_HALF;
4278 		break;
4279 
4280 	case MII_TG3_AUX_STAT_100FULL:
4281 		*speed = SPEED_100;
4282 		*duplex = DUPLEX_FULL;
4283 		break;
4284 
4285 	case MII_TG3_AUX_STAT_1000HALF:
4286 		*speed = SPEED_1000;
4287 		*duplex = DUPLEX_HALF;
4288 		break;
4289 
4290 	case MII_TG3_AUX_STAT_1000FULL:
4291 		*speed = SPEED_1000;
4292 		*duplex = DUPLEX_FULL;
4293 		break;
4294 
4295 	default:
4296 		if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4297 			*speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
4298 				 SPEED_10;
4299 			*duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
4300 				  DUPLEX_HALF;
4301 			break;
4302 		}
4303 		*speed = SPEED_UNKNOWN;
4304 		*duplex = DUPLEX_UNKNOWN;
4305 		break;
4306 	}
4307 }
4308 
4309 static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
4310 {
4311 	int err = 0;
4312 	u32 val, new_adv;
4313 
4314 	new_adv = ADVERTISE_CSMA;
4315 	new_adv |= ethtool_adv_to_mii_adv_t(advertise) & ADVERTISE_ALL;
4316 	new_adv |= mii_advertise_flowctrl(flowctrl);
4317 
4318 	err = tg3_writephy(tp, MII_ADVERTISE, new_adv);
4319 	if (err)
4320 		goto done;
4321 
4322 	if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4323 		new_adv = ethtool_adv_to_mii_ctrl1000_t(advertise);
4324 
4325 		if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4326 		    tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0)
4327 			new_adv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4328 
4329 		err = tg3_writephy(tp, MII_CTRL1000, new_adv);
4330 		if (err)
4331 			goto done;
4332 	}
4333 
4334 	if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
4335 		goto done;
4336 
4337 	tw32(TG3_CPMU_EEE_MODE,
4338 	     tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE);
4339 
4340 	err = tg3_phy_toggle_auxctl_smdsp(tp, true);
4341 	if (!err) {
4342 		u32 err2;
4343 
4344 		val = 0;
4345 		/* Advertise 100-BaseTX EEE ability */
4346 		if (advertise & ADVERTISED_100baseT_Full)
4347 			val |= MDIO_AN_EEE_ADV_100TX;
4348 		/* Advertise 1000-BaseT EEE ability */
4349 		if (advertise & ADVERTISED_1000baseT_Full)
4350 			val |= MDIO_AN_EEE_ADV_1000T;
4351 
4352 		if (!tp->eee.eee_enabled) {
4353 			val = 0;
4354 			tp->eee.advertised = 0;
4355 		} else {
4356 			tp->eee.advertised = advertise &
4357 					     (ADVERTISED_100baseT_Full |
4358 					      ADVERTISED_1000baseT_Full);
4359 		}
4360 
4361 		err = tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val);
4362 		if (err)
4363 			val = 0;
4364 
4365 		switch (tg3_asic_rev(tp)) {
4366 		case ASIC_REV_5717:
4367 		case ASIC_REV_57765:
4368 		case ASIC_REV_57766:
4369 		case ASIC_REV_5719:
4370 			/* If we advertised any eee advertisements above... */
4371 			if (val)
4372 				val = MII_TG3_DSP_TAP26_ALNOKO |
4373 				      MII_TG3_DSP_TAP26_RMRXSTO |
4374 				      MII_TG3_DSP_TAP26_OPCSINPT;
4375 			tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
4376 			/* Fall through */
4377 		case ASIC_REV_5720:
4378 		case ASIC_REV_5762:
4379 			if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val))
4380 				tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val |
4381 						 MII_TG3_DSP_CH34TP2_HIBW01);
4382 		}
4383 
4384 		err2 = tg3_phy_toggle_auxctl_smdsp(tp, false);
4385 		if (!err)
4386 			err = err2;
4387 	}
4388 
4389 done:
4390 	return err;
4391 }
4392 
4393 static void tg3_phy_copper_begin(struct tg3 *tp)
4394 {
4395 	if (tp->link_config.autoneg == AUTONEG_ENABLE ||
4396 	    (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4397 		u32 adv, fc;
4398 
4399 		if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
4400 		    !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)) {
4401 			adv = ADVERTISED_10baseT_Half |
4402 			      ADVERTISED_10baseT_Full;
4403 			if (tg3_flag(tp, WOL_SPEED_100MB))
4404 				adv |= ADVERTISED_100baseT_Half |
4405 				       ADVERTISED_100baseT_Full;
4406 			if (tp->phy_flags & TG3_PHYFLG_1G_ON_VAUX_OK)
4407 				adv |= ADVERTISED_1000baseT_Half |
4408 				       ADVERTISED_1000baseT_Full;
4409 
4410 			fc = FLOW_CTRL_TX | FLOW_CTRL_RX;
4411 		} else {
4412 			adv = tp->link_config.advertising;
4413 			if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
4414 				adv &= ~(ADVERTISED_1000baseT_Half |
4415 					 ADVERTISED_1000baseT_Full);
4416 
4417 			fc = tp->link_config.flowctrl;
4418 		}
4419 
4420 		tg3_phy_autoneg_cfg(tp, adv, fc);
4421 
4422 		if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
4423 		    (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)) {
4424 			/* Normally during power down we want to autonegotiate
4425 			 * the lowest possible speed for WOL. However, to avoid
4426 			 * link flap, we leave it untouched.
4427 			 */
4428 			return;
4429 		}
4430 
4431 		tg3_writephy(tp, MII_BMCR,
4432 			     BMCR_ANENABLE | BMCR_ANRESTART);
4433 	} else {
4434 		int i;
4435 		u32 bmcr, orig_bmcr;
4436 
4437 		tp->link_config.active_speed = tp->link_config.speed;
4438 		tp->link_config.active_duplex = tp->link_config.duplex;
4439 
4440 		if (tg3_asic_rev(tp) == ASIC_REV_5714) {
4441 			/* With autoneg disabled, 5715 only links up when the
4442 			 * advertisement register has the configured speed
4443 			 * enabled.
4444 			 */
4445 			tg3_writephy(tp, MII_ADVERTISE, ADVERTISE_ALL);
4446 		}
4447 
4448 		bmcr = 0;
4449 		switch (tp->link_config.speed) {
4450 		default:
4451 		case SPEED_10:
4452 			break;
4453 
4454 		case SPEED_100:
4455 			bmcr |= BMCR_SPEED100;
4456 			break;
4457 
4458 		case SPEED_1000:
4459 			bmcr |= BMCR_SPEED1000;
4460 			break;
4461 		}
4462 
4463 		if (tp->link_config.duplex == DUPLEX_FULL)
4464 			bmcr |= BMCR_FULLDPLX;
4465 
4466 		if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
4467 		    (bmcr != orig_bmcr)) {
4468 			tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
4469 			for (i = 0; i < 1500; i++) {
4470 				u32 tmp;
4471 
4472 				udelay(10);
4473 				if (tg3_readphy(tp, MII_BMSR, &tmp) ||
4474 				    tg3_readphy(tp, MII_BMSR, &tmp))
4475 					continue;
4476 				if (!(tmp & BMSR_LSTATUS)) {
4477 					udelay(40);
4478 					break;
4479 				}
4480 			}
4481 			tg3_writephy(tp, MII_BMCR, bmcr);
4482 			udelay(40);
4483 		}
4484 	}
4485 }
4486 
4487 static int tg3_phy_pull_config(struct tg3 *tp)
4488 {
4489 	int err;
4490 	u32 val;
4491 
4492 	err = tg3_readphy(tp, MII_BMCR, &val);
4493 	if (err)
4494 		goto done;
4495 
4496 	if (!(val & BMCR_ANENABLE)) {
4497 		tp->link_config.autoneg = AUTONEG_DISABLE;
4498 		tp->link_config.advertising = 0;
4499 		tg3_flag_clear(tp, PAUSE_AUTONEG);
4500 
4501 		err = -EIO;
4502 
4503 		switch (val & (BMCR_SPEED1000 | BMCR_SPEED100)) {
4504 		case 0:
4505 			if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
4506 				goto done;
4507 
4508 			tp->link_config.speed = SPEED_10;
4509 			break;
4510 		case BMCR_SPEED100:
4511 			if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
4512 				goto done;
4513 
4514 			tp->link_config.speed = SPEED_100;
4515 			break;
4516 		case BMCR_SPEED1000:
4517 			if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4518 				tp->link_config.speed = SPEED_1000;
4519 				break;
4520 			}
4521 			/* Fall through */
4522 		default:
4523 			goto done;
4524 		}
4525 
4526 		if (val & BMCR_FULLDPLX)
4527 			tp->link_config.duplex = DUPLEX_FULL;
4528 		else
4529 			tp->link_config.duplex = DUPLEX_HALF;
4530 
4531 		tp->link_config.flowctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
4532 
4533 		err = 0;
4534 		goto done;
4535 	}
4536 
4537 	tp->link_config.autoneg = AUTONEG_ENABLE;
4538 	tp->link_config.advertising = ADVERTISED_Autoneg;
4539 	tg3_flag_set(tp, PAUSE_AUTONEG);
4540 
4541 	if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
4542 		u32 adv;
4543 
4544 		err = tg3_readphy(tp, MII_ADVERTISE, &val);
4545 		if (err)
4546 			goto done;
4547 
4548 		adv = mii_adv_to_ethtool_adv_t(val & ADVERTISE_ALL);
4549 		tp->link_config.advertising |= adv | ADVERTISED_TP;
4550 
4551 		tp->link_config.flowctrl = tg3_decode_flowctrl_1000T(val);
4552 	} else {
4553 		tp->link_config.advertising |= ADVERTISED_FIBRE;
4554 	}
4555 
4556 	if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4557 		u32 adv;
4558 
4559 		if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
4560 			err = tg3_readphy(tp, MII_CTRL1000, &val);
4561 			if (err)
4562 				goto done;
4563 
4564 			adv = mii_ctrl1000_to_ethtool_adv_t(val);
4565 		} else {
4566 			err = tg3_readphy(tp, MII_ADVERTISE, &val);
4567 			if (err)
4568 				goto done;
4569 
4570 			adv = tg3_decode_flowctrl_1000X(val);
4571 			tp->link_config.flowctrl = adv;
4572 
4573 			val &= (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL);
4574 			adv = mii_adv_to_ethtool_adv_x(val);
4575 		}
4576 
4577 		tp->link_config.advertising |= adv;
4578 	}
4579 
4580 done:
4581 	return err;
4582 }
4583 
4584 static int tg3_init_5401phy_dsp(struct tg3 *tp)
4585 {
4586 	int err;
4587 
4588 	/* Turn off tap power management. */
4589 	/* Set Extended packet length bit */
4590 	err = tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
4591 
4592 	err |= tg3_phydsp_write(tp, 0x0012, 0x1804);
4593 	err |= tg3_phydsp_write(tp, 0x0013, 0x1204);
4594 	err |= tg3_phydsp_write(tp, 0x8006, 0x0132);
4595 	err |= tg3_phydsp_write(tp, 0x8006, 0x0232);
4596 	err |= tg3_phydsp_write(tp, 0x201f, 0x0a20);
4597 
4598 	udelay(40);
4599 
4600 	return err;
4601 }
4602 
4603 static bool tg3_phy_eee_config_ok(struct tg3 *tp)
4604 {
4605 	struct ethtool_eee eee;
4606 
4607 	if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
4608 		return true;
4609 
4610 	tg3_eee_pull_config(tp, &eee);
4611 
4612 	if (tp->eee.eee_enabled) {
4613 		if (tp->eee.advertised != eee.advertised ||
4614 		    tp->eee.tx_lpi_timer != eee.tx_lpi_timer ||
4615 		    tp->eee.tx_lpi_enabled != eee.tx_lpi_enabled)
4616 			return false;
4617 	} else {
4618 		/* EEE is disabled but we're advertising */
4619 		if (eee.advertised)
4620 			return false;
4621 	}
4622 
4623 	return true;
4624 }
4625 
4626 static bool tg3_phy_copper_an_config_ok(struct tg3 *tp, u32 *lcladv)
4627 {
4628 	u32 advmsk, tgtadv, advertising;
4629 
4630 	advertising = tp->link_config.advertising;
4631 	tgtadv = ethtool_adv_to_mii_adv_t(advertising) & ADVERTISE_ALL;
4632 
4633 	advmsk = ADVERTISE_ALL;
4634 	if (tp->link_config.active_duplex == DUPLEX_FULL) {
4635 		tgtadv |= mii_advertise_flowctrl(tp->link_config.flowctrl);
4636 		advmsk |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
4637 	}
4638 
4639 	if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
4640 		return false;
4641 
4642 	if ((*lcladv & advmsk) != tgtadv)
4643 		return false;
4644 
4645 	if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4646 		u32 tg3_ctrl;
4647 
4648 		tgtadv = ethtool_adv_to_mii_ctrl1000_t(advertising);
4649 
4650 		if (tg3_readphy(tp, MII_CTRL1000, &tg3_ctrl))
4651 			return false;
4652 
4653 		if (tgtadv &&
4654 		    (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4655 		     tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0)) {
4656 			tgtadv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4657 			tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL |
4658 				     CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
4659 		} else {
4660 			tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL);
4661 		}
4662 
4663 		if (tg3_ctrl != tgtadv)
4664 			return false;
4665 	}
4666 
4667 	return true;
4668 }
4669 
4670 static bool tg3_phy_copper_fetch_rmtadv(struct tg3 *tp, u32 *rmtadv)
4671 {
4672 	u32 lpeth = 0;
4673 
4674 	if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4675 		u32 val;
4676 
4677 		if (tg3_readphy(tp, MII_STAT1000, &val))
4678 			return false;
4679 
4680 		lpeth = mii_stat1000_to_ethtool_lpa_t(val);
4681 	}
4682 
4683 	if (tg3_readphy(tp, MII_LPA, rmtadv))
4684 		return false;
4685 
4686 	lpeth |= mii_lpa_to_ethtool_lpa_t(*rmtadv);
4687 	tp->link_config.rmt_adv = lpeth;
4688 
4689 	return true;
4690 }
4691 
4692 static bool tg3_test_and_report_link_chg(struct tg3 *tp, bool curr_link_up)
4693 {
4694 	if (curr_link_up != tp->link_up) {
4695 		if (curr_link_up) {
4696 			netif_carrier_on(tp->dev);
4697 		} else {
4698 			netif_carrier_off(tp->dev);
4699 			if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4700 				tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4701 		}
4702 
4703 		tg3_link_report(tp);
4704 		return true;
4705 	}
4706 
4707 	return false;
4708 }
4709 
4710 static void tg3_clear_mac_status(struct tg3 *tp)
4711 {
4712 	tw32(MAC_EVENT, 0);
4713 
4714 	tw32_f(MAC_STATUS,
4715 	       MAC_STATUS_SYNC_CHANGED |
4716 	       MAC_STATUS_CFG_CHANGED |
4717 	       MAC_STATUS_MI_COMPLETION |
4718 	       MAC_STATUS_LNKSTATE_CHANGED);
4719 	udelay(40);
4720 }
4721 
4722 static void tg3_setup_eee(struct tg3 *tp)
4723 {
4724 	u32 val;
4725 
4726 	val = TG3_CPMU_EEE_LNKIDL_PCIE_NL0 |
4727 	      TG3_CPMU_EEE_LNKIDL_UART_IDL;
4728 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0)
4729 		val |= TG3_CPMU_EEE_LNKIDL_APE_TX_MT;
4730 
4731 	tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL, val);
4732 
4733 	tw32_f(TG3_CPMU_EEE_CTRL,
4734 	       TG3_CPMU_EEE_CTRL_EXIT_20_1_US);
4735 
4736 	val = TG3_CPMU_EEEMD_ERLY_L1_XIT_DET |
4737 	      (tp->eee.tx_lpi_enabled ? TG3_CPMU_EEEMD_LPI_IN_TX : 0) |
4738 	      TG3_CPMU_EEEMD_LPI_IN_RX |
4739 	      TG3_CPMU_EEEMD_EEE_ENABLE;
4740 
4741 	if (tg3_asic_rev(tp) != ASIC_REV_5717)
4742 		val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN;
4743 
4744 	if (tg3_flag(tp, ENABLE_APE))
4745 		val |= TG3_CPMU_EEEMD_APE_TX_DET_EN;
4746 
4747 	tw32_f(TG3_CPMU_EEE_MODE, tp->eee.eee_enabled ? val : 0);
4748 
4749 	tw32_f(TG3_CPMU_EEE_DBTMR1,
4750 	       TG3_CPMU_DBTMR1_PCIEXIT_2047US |
4751 	       (tp->eee.tx_lpi_timer & 0xffff));
4752 
4753 	tw32_f(TG3_CPMU_EEE_DBTMR2,
4754 	       TG3_CPMU_DBTMR2_APE_TX_2047US |
4755 	       TG3_CPMU_DBTMR2_TXIDXEQ_2047US);
4756 }
4757 
4758 static int tg3_setup_copper_phy(struct tg3 *tp, bool force_reset)
4759 {
4760 	bool current_link_up;
4761 	u32 bmsr, val;
4762 	u32 lcl_adv, rmt_adv;
4763 	u16 current_speed;
4764 	u8 current_duplex;
4765 	int i, err;
4766 
4767 	tg3_clear_mac_status(tp);
4768 
4769 	if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
4770 		tw32_f(MAC_MI_MODE,
4771 		     (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
4772 		udelay(80);
4773 	}
4774 
4775 	tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, 0);
4776 
4777 	/* Some third-party PHYs need to be reset on link going
4778 	 * down.
4779 	 */
4780 	if ((tg3_asic_rev(tp) == ASIC_REV_5703 ||
4781 	     tg3_asic_rev(tp) == ASIC_REV_5704 ||
4782 	     tg3_asic_rev(tp) == ASIC_REV_5705) &&
4783 	    tp->link_up) {
4784 		tg3_readphy(tp, MII_BMSR, &bmsr);
4785 		if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4786 		    !(bmsr & BMSR_LSTATUS))
4787 			force_reset = true;
4788 	}
4789 	if (force_reset)
4790 		tg3_phy_reset(tp);
4791 
4792 	if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
4793 		tg3_readphy(tp, MII_BMSR, &bmsr);
4794 		if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
4795 		    !tg3_flag(tp, INIT_COMPLETE))
4796 			bmsr = 0;
4797 
4798 		if (!(bmsr & BMSR_LSTATUS)) {
4799 			err = tg3_init_5401phy_dsp(tp);
4800 			if (err)
4801 				return err;
4802 
4803 			tg3_readphy(tp, MII_BMSR, &bmsr);
4804 			for (i = 0; i < 1000; i++) {
4805 				udelay(10);
4806 				if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4807 				    (bmsr & BMSR_LSTATUS)) {
4808 					udelay(40);
4809 					break;
4810 				}
4811 			}
4812 
4813 			if ((tp->phy_id & TG3_PHY_ID_REV_MASK) ==
4814 			    TG3_PHY_REV_BCM5401_B0 &&
4815 			    !(bmsr & BMSR_LSTATUS) &&
4816 			    tp->link_config.active_speed == SPEED_1000) {
4817 				err = tg3_phy_reset(tp);
4818 				if (!err)
4819 					err = tg3_init_5401phy_dsp(tp);
4820 				if (err)
4821 					return err;
4822 			}
4823 		}
4824 	} else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4825 		   tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0) {
4826 		/* 5701 {A0,B0} CRC bug workaround */
4827 		tg3_writephy(tp, 0x15, 0x0a75);
4828 		tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4829 		tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
4830 		tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4831 	}
4832 
4833 	/* Clear pending interrupts... */
4834 	tg3_readphy(tp, MII_TG3_ISTAT, &val);
4835 	tg3_readphy(tp, MII_TG3_ISTAT, &val);
4836 
4837 	if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT)
4838 		tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
4839 	else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET))
4840 		tg3_writephy(tp, MII_TG3_IMASK, ~0);
4841 
4842 	if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4843 	    tg3_asic_rev(tp) == ASIC_REV_5701) {
4844 		if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
4845 			tg3_writephy(tp, MII_TG3_EXT_CTRL,
4846 				     MII_TG3_EXT_CTRL_LNK3_LED_MODE);
4847 		else
4848 			tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
4849 	}
4850 
4851 	current_link_up = false;
4852 	current_speed = SPEED_UNKNOWN;
4853 	current_duplex = DUPLEX_UNKNOWN;
4854 	tp->phy_flags &= ~TG3_PHYFLG_MDIX_STATE;
4855 	tp->link_config.rmt_adv = 0;
4856 
4857 	if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) {
4858 		err = tg3_phy_auxctl_read(tp,
4859 					  MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4860 					  &val);
4861 		if (!err && !(val & (1 << 10))) {
4862 			tg3_phy_auxctl_write(tp,
4863 					     MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4864 					     val | (1 << 10));
4865 			goto relink;
4866 		}
4867 	}
4868 
4869 	bmsr = 0;
4870 	for (i = 0; i < 100; i++) {
4871 		tg3_readphy(tp, MII_BMSR, &bmsr);
4872 		if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4873 		    (bmsr & BMSR_LSTATUS))
4874 			break;
4875 		udelay(40);
4876 	}
4877 
4878 	if (bmsr & BMSR_LSTATUS) {
4879 		u32 aux_stat, bmcr;
4880 
4881 		tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
4882 		for (i = 0; i < 2000; i++) {
4883 			udelay(10);
4884 			if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
4885 			    aux_stat)
4886 				break;
4887 		}
4888 
4889 		tg3_aux_stat_to_speed_duplex(tp, aux_stat,
4890 					     &current_speed,
4891 					     &current_duplex);
4892 
4893 		bmcr = 0;
4894 		for (i = 0; i < 200; i++) {
4895 			tg3_readphy(tp, MII_BMCR, &bmcr);
4896 			if (tg3_readphy(tp, MII_BMCR, &bmcr))
4897 				continue;
4898 			if (bmcr && bmcr != 0x7fff)
4899 				break;
4900 			udelay(10);
4901 		}
4902 
4903 		lcl_adv = 0;
4904 		rmt_adv = 0;
4905 
4906 		tp->link_config.active_speed = current_speed;
4907 		tp->link_config.active_duplex = current_duplex;
4908 
4909 		if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4910 			bool eee_config_ok = tg3_phy_eee_config_ok(tp);
4911 
4912 			if ((bmcr & BMCR_ANENABLE) &&
4913 			    eee_config_ok &&
4914 			    tg3_phy_copper_an_config_ok(tp, &lcl_adv) &&
4915 			    tg3_phy_copper_fetch_rmtadv(tp, &rmt_adv))
4916 				current_link_up = true;
4917 
4918 			/* EEE settings changes take effect only after a phy
4919 			 * reset.  If we have skipped a reset due to Link Flap
4920 			 * Avoidance being enabled, do it now.
4921 			 */
4922 			if (!eee_config_ok &&
4923 			    (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
4924 			    !force_reset) {
4925 				tg3_setup_eee(tp);
4926 				tg3_phy_reset(tp);
4927 			}
4928 		} else {
4929 			if (!(bmcr & BMCR_ANENABLE) &&
4930 			    tp->link_config.speed == current_speed &&
4931 			    tp->link_config.duplex == current_duplex) {
4932 				current_link_up = true;
4933 			}
4934 		}
4935 
4936 		if (current_link_up &&
4937 		    tp->link_config.active_duplex == DUPLEX_FULL) {
4938 			u32 reg, bit;
4939 
4940 			if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4941 				reg = MII_TG3_FET_GEN_STAT;
4942 				bit = MII_TG3_FET_GEN_STAT_MDIXSTAT;
4943 			} else {
4944 				reg = MII_TG3_EXT_STAT;
4945 				bit = MII_TG3_EXT_STAT_MDIX;
4946 			}
4947 
4948 			if (!tg3_readphy(tp, reg, &val) && (val & bit))
4949 				tp->phy_flags |= TG3_PHYFLG_MDIX_STATE;
4950 
4951 			tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
4952 		}
4953 	}
4954 
4955 relink:
4956 	if (!current_link_up || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4957 		tg3_phy_copper_begin(tp);
4958 
4959 		if (tg3_flag(tp, ROBOSWITCH)) {
4960 			current_link_up = true;
4961 			/* FIXME: when BCM5325 switch is used use 100 MBit/s */
4962 			current_speed = SPEED_1000;
4963 			current_duplex = DUPLEX_FULL;
4964 			tp->link_config.active_speed = current_speed;
4965 			tp->link_config.active_duplex = current_duplex;
4966 		}
4967 
4968 		tg3_readphy(tp, MII_BMSR, &bmsr);
4969 		if ((!tg3_readphy(tp, MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) ||
4970 		    (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
4971 			current_link_up = true;
4972 	}
4973 
4974 	tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
4975 	if (current_link_up) {
4976 		if (tp->link_config.active_speed == SPEED_100 ||
4977 		    tp->link_config.active_speed == SPEED_10)
4978 			tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4979 		else
4980 			tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4981 	} else if (tp->phy_flags & TG3_PHYFLG_IS_FET)
4982 		tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4983 	else
4984 		tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4985 
4986 	/* In order for the 5750 core in BCM4785 chip to work properly
4987 	 * in RGMII mode, the Led Control Register must be set up.
4988 	 */
4989 	if (tg3_flag(tp, RGMII_MODE)) {
4990 		u32 led_ctrl = tr32(MAC_LED_CTRL);
4991 		led_ctrl &= ~(LED_CTRL_1000MBPS_ON | LED_CTRL_100MBPS_ON);
4992 
4993 		if (tp->link_config.active_speed == SPEED_10)
4994 			led_ctrl |= LED_CTRL_LNKLED_OVERRIDE;
4995 		else if (tp->link_config.active_speed == SPEED_100)
4996 			led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE |
4997 				     LED_CTRL_100MBPS_ON);
4998 		else if (tp->link_config.active_speed == SPEED_1000)
4999 			led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE |
5000 				     LED_CTRL_1000MBPS_ON);
5001 
5002 		tw32(MAC_LED_CTRL, led_ctrl);
5003 		udelay(40);
5004 	}
5005 
5006 	tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
5007 	if (tp->link_config.active_duplex == DUPLEX_HALF)
5008 		tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
5009 
5010 	if (tg3_asic_rev(tp) == ASIC_REV_5700) {
5011 		if (current_link_up &&
5012 		    tg3_5700_link_polarity(tp, tp->link_config.active_speed))
5013 			tp->mac_mode |= MAC_MODE_LINK_POLARITY;
5014 		else
5015 			tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
5016 	}
5017 
5018 	/* ??? Without this setting Netgear GA302T PHY does not
5019 	 * ??? send/receive packets...
5020 	 */
5021 	if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 &&
5022 	    tg3_chip_rev_id(tp) == CHIPREV_ID_5700_ALTIMA) {
5023 		tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
5024 		tw32_f(MAC_MI_MODE, tp->mi_mode);
5025 		udelay(80);
5026 	}
5027 
5028 	tw32_f(MAC_MODE, tp->mac_mode);
5029 	udelay(40);
5030 
5031 	tg3_phy_eee_adjust(tp, current_link_up);
5032 
5033 	if (tg3_flag(tp, USE_LINKCHG_REG)) {
5034 		/* Polled via timer. */
5035 		tw32_f(MAC_EVENT, 0);
5036 	} else {
5037 		tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5038 	}
5039 	udelay(40);
5040 
5041 	if (tg3_asic_rev(tp) == ASIC_REV_5700 &&
5042 	    current_link_up &&
5043 	    tp->link_config.active_speed == SPEED_1000 &&
5044 	    (tg3_flag(tp, PCIX_MODE) || tg3_flag(tp, PCI_HIGH_SPEED))) {
5045 		udelay(120);
5046 		tw32_f(MAC_STATUS,
5047 		     (MAC_STATUS_SYNC_CHANGED |
5048 		      MAC_STATUS_CFG_CHANGED));
5049 		udelay(40);
5050 		tg3_write_mem(tp,
5051 			      NIC_SRAM_FIRMWARE_MBOX,
5052 			      NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
5053 	}
5054 
5055 	/* Prevent send BD corruption. */
5056 	if (tg3_flag(tp, CLKREQ_BUG)) {
5057 		if (tp->link_config.active_speed == SPEED_100 ||
5058 		    tp->link_config.active_speed == SPEED_10)
5059 			pcie_capability_clear_word(tp->pdev, PCI_EXP_LNKCTL,
5060 						   PCI_EXP_LNKCTL_CLKREQ_EN);
5061 		else
5062 			pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
5063 						 PCI_EXP_LNKCTL_CLKREQ_EN);
5064 	}
5065 
5066 	tg3_test_and_report_link_chg(tp, current_link_up);
5067 
5068 	return 0;
5069 }
5070 
5071 struct tg3_fiber_aneginfo {
5072 	int state;
5073 #define ANEG_STATE_UNKNOWN		0
5074 #define ANEG_STATE_AN_ENABLE		1
5075 #define ANEG_STATE_RESTART_INIT		2
5076 #define ANEG_STATE_RESTART		3
5077 #define ANEG_STATE_DISABLE_LINK_OK	4
5078 #define ANEG_STATE_ABILITY_DETECT_INIT	5
5079 #define ANEG_STATE_ABILITY_DETECT	6
5080 #define ANEG_STATE_ACK_DETECT_INIT	7
5081 #define ANEG_STATE_ACK_DETECT		8
5082 #define ANEG_STATE_COMPLETE_ACK_INIT	9
5083 #define ANEG_STATE_COMPLETE_ACK		10
5084 #define ANEG_STATE_IDLE_DETECT_INIT	11
5085 #define ANEG_STATE_IDLE_DETECT		12
5086 #define ANEG_STATE_LINK_OK		13
5087 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT	14
5088 #define ANEG_STATE_NEXT_PAGE_WAIT	15
5089 
5090 	u32 flags;
5091 #define MR_AN_ENABLE		0x00000001
5092 #define MR_RESTART_AN		0x00000002
5093 #define MR_AN_COMPLETE		0x00000004
5094 #define MR_PAGE_RX		0x00000008
5095 #define MR_NP_LOADED		0x00000010
5096 #define MR_TOGGLE_TX		0x00000020
5097 #define MR_LP_ADV_FULL_DUPLEX	0x00000040
5098 #define MR_LP_ADV_HALF_DUPLEX	0x00000080
5099 #define MR_LP_ADV_SYM_PAUSE	0x00000100
5100 #define MR_LP_ADV_ASYM_PAUSE	0x00000200
5101 #define MR_LP_ADV_REMOTE_FAULT1	0x00000400
5102 #define MR_LP_ADV_REMOTE_FAULT2	0x00000800
5103 #define MR_LP_ADV_NEXT_PAGE	0x00001000
5104 #define MR_TOGGLE_RX		0x00002000
5105 #define MR_NP_RX		0x00004000
5106 
5107 #define MR_LINK_OK		0x80000000
5108 
5109 	unsigned long link_time, cur_time;
5110 
5111 	u32 ability_match_cfg;
5112 	int ability_match_count;
5113 
5114 	char ability_match, idle_match, ack_match;
5115 
5116 	u32 txconfig, rxconfig;
5117 #define ANEG_CFG_NP		0x00000080
5118 #define ANEG_CFG_ACK		0x00000040
5119 #define ANEG_CFG_RF2		0x00000020
5120 #define ANEG_CFG_RF1		0x00000010
5121 #define ANEG_CFG_PS2		0x00000001
5122 #define ANEG_CFG_PS1		0x00008000
5123 #define ANEG_CFG_HD		0x00004000
5124 #define ANEG_CFG_FD		0x00002000
5125 #define ANEG_CFG_INVAL		0x00001f06
5126 
5127 };
5128 #define ANEG_OK		0
5129 #define ANEG_DONE	1
5130 #define ANEG_TIMER_ENAB	2
5131 #define ANEG_FAILED	-1
5132 
5133 #define ANEG_STATE_SETTLE_TIME	10000
5134 
5135 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
5136 				   struct tg3_fiber_aneginfo *ap)
5137 {
5138 	u16 flowctrl;
5139 	unsigned long delta;
5140 	u32 rx_cfg_reg;
5141 	int ret;
5142 
5143 	if (ap->state == ANEG_STATE_UNKNOWN) {
5144 		ap->rxconfig = 0;
5145 		ap->link_time = 0;
5146 		ap->cur_time = 0;
5147 		ap->ability_match_cfg = 0;
5148 		ap->ability_match_count = 0;
5149 		ap->ability_match = 0;
5150 		ap->idle_match = 0;
5151 		ap->ack_match = 0;
5152 	}
5153 	ap->cur_time++;
5154 
5155 	if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
5156 		rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
5157 
5158 		if (rx_cfg_reg != ap->ability_match_cfg) {
5159 			ap->ability_match_cfg = rx_cfg_reg;
5160 			ap->ability_match = 0;
5161 			ap->ability_match_count = 0;
5162 		} else {
5163 			if (++ap->ability_match_count > 1) {
5164 				ap->ability_match = 1;
5165 				ap->ability_match_cfg = rx_cfg_reg;
5166 			}
5167 		}
5168 		if (rx_cfg_reg & ANEG_CFG_ACK)
5169 			ap->ack_match = 1;
5170 		else
5171 			ap->ack_match = 0;
5172 
5173 		ap->idle_match = 0;
5174 	} else {
5175 		ap->idle_match = 1;
5176 		ap->ability_match_cfg = 0;
5177 		ap->ability_match_count = 0;
5178 		ap->ability_match = 0;
5179 		ap->ack_match = 0;
5180 
5181 		rx_cfg_reg = 0;
5182 	}
5183 
5184 	ap->rxconfig = rx_cfg_reg;
5185 	ret = ANEG_OK;
5186 
5187 	switch (ap->state) {
5188 	case ANEG_STATE_UNKNOWN:
5189 		if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
5190 			ap->state = ANEG_STATE_AN_ENABLE;
5191 
5192 		/* fallthru */
5193 	case ANEG_STATE_AN_ENABLE:
5194 		ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
5195 		if (ap->flags & MR_AN_ENABLE) {
5196 			ap->link_time = 0;
5197 			ap->cur_time = 0;
5198 			ap->ability_match_cfg = 0;
5199 			ap->ability_match_count = 0;
5200 			ap->ability_match = 0;
5201 			ap->idle_match = 0;
5202 			ap->ack_match = 0;
5203 
5204 			ap->state = ANEG_STATE_RESTART_INIT;
5205 		} else {
5206 			ap->state = ANEG_STATE_DISABLE_LINK_OK;
5207 		}
5208 		break;
5209 
5210 	case ANEG_STATE_RESTART_INIT:
5211 		ap->link_time = ap->cur_time;
5212 		ap->flags &= ~(MR_NP_LOADED);
5213 		ap->txconfig = 0;
5214 		tw32(MAC_TX_AUTO_NEG, 0);
5215 		tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5216 		tw32_f(MAC_MODE, tp->mac_mode);
5217 		udelay(40);
5218 
5219 		ret = ANEG_TIMER_ENAB;
5220 		ap->state = ANEG_STATE_RESTART;
5221 
5222 		/* fallthru */
5223 	case ANEG_STATE_RESTART:
5224 		delta = ap->cur_time - ap->link_time;
5225 		if (delta > ANEG_STATE_SETTLE_TIME)
5226 			ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
5227 		else
5228 			ret = ANEG_TIMER_ENAB;
5229 		break;
5230 
5231 	case ANEG_STATE_DISABLE_LINK_OK:
5232 		ret = ANEG_DONE;
5233 		break;
5234 
5235 	case ANEG_STATE_ABILITY_DETECT_INIT:
5236 		ap->flags &= ~(MR_TOGGLE_TX);
5237 		ap->txconfig = ANEG_CFG_FD;
5238 		flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5239 		if (flowctrl & ADVERTISE_1000XPAUSE)
5240 			ap->txconfig |= ANEG_CFG_PS1;
5241 		if (flowctrl & ADVERTISE_1000XPSE_ASYM)
5242 			ap->txconfig |= ANEG_CFG_PS2;
5243 		tw32(MAC_TX_AUTO_NEG, ap->txconfig);
5244 		tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5245 		tw32_f(MAC_MODE, tp->mac_mode);
5246 		udelay(40);
5247 
5248 		ap->state = ANEG_STATE_ABILITY_DETECT;
5249 		break;
5250 
5251 	case ANEG_STATE_ABILITY_DETECT:
5252 		if (ap->ability_match != 0 && ap->rxconfig != 0)
5253 			ap->state = ANEG_STATE_ACK_DETECT_INIT;
5254 		break;
5255 
5256 	case ANEG_STATE_ACK_DETECT_INIT:
5257 		ap->txconfig |= ANEG_CFG_ACK;
5258 		tw32(MAC_TX_AUTO_NEG, ap->txconfig);
5259 		tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5260 		tw32_f(MAC_MODE, tp->mac_mode);
5261 		udelay(40);
5262 
5263 		ap->state = ANEG_STATE_ACK_DETECT;
5264 
5265 		/* fallthru */
5266 	case ANEG_STATE_ACK_DETECT:
5267 		if (ap->ack_match != 0) {
5268 			if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
5269 			    (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
5270 				ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
5271 			} else {
5272 				ap->state = ANEG_STATE_AN_ENABLE;
5273 			}
5274 		} else if (ap->ability_match != 0 &&
5275 			   ap->rxconfig == 0) {
5276 			ap->state = ANEG_STATE_AN_ENABLE;
5277 		}
5278 		break;
5279 
5280 	case ANEG_STATE_COMPLETE_ACK_INIT:
5281 		if (ap->rxconfig & ANEG_CFG_INVAL) {
5282 			ret = ANEG_FAILED;
5283 			break;
5284 		}
5285 		ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
5286 			       MR_LP_ADV_HALF_DUPLEX |
5287 			       MR_LP_ADV_SYM_PAUSE |
5288 			       MR_LP_ADV_ASYM_PAUSE |
5289 			       MR_LP_ADV_REMOTE_FAULT1 |
5290 			       MR_LP_ADV_REMOTE_FAULT2 |
5291 			       MR_LP_ADV_NEXT_PAGE |
5292 			       MR_TOGGLE_RX |
5293 			       MR_NP_RX);
5294 		if (ap->rxconfig & ANEG_CFG_FD)
5295 			ap->flags |= MR_LP_ADV_FULL_DUPLEX;
5296 		if (ap->rxconfig & ANEG_CFG_HD)
5297 			ap->flags |= MR_LP_ADV_HALF_DUPLEX;
5298 		if (ap->rxconfig & ANEG_CFG_PS1)
5299 			ap->flags |= MR_LP_ADV_SYM_PAUSE;
5300 		if (ap->rxconfig & ANEG_CFG_PS2)
5301 			ap->flags |= MR_LP_ADV_ASYM_PAUSE;
5302 		if (ap->rxconfig & ANEG_CFG_RF1)
5303 			ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
5304 		if (ap->rxconfig & ANEG_CFG_RF2)
5305 			ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
5306 		if (ap->rxconfig & ANEG_CFG_NP)
5307 			ap->flags |= MR_LP_ADV_NEXT_PAGE;
5308 
5309 		ap->link_time = ap->cur_time;
5310 
5311 		ap->flags ^= (MR_TOGGLE_TX);
5312 		if (ap->rxconfig & 0x0008)
5313 			ap->flags |= MR_TOGGLE_RX;
5314 		if (ap->rxconfig & ANEG_CFG_NP)
5315 			ap->flags |= MR_NP_RX;
5316 		ap->flags |= MR_PAGE_RX;
5317 
5318 		ap->state = ANEG_STATE_COMPLETE_ACK;
5319 		ret = ANEG_TIMER_ENAB;
5320 		break;
5321 
5322 	case ANEG_STATE_COMPLETE_ACK:
5323 		if (ap->ability_match != 0 &&
5324 		    ap->rxconfig == 0) {
5325 			ap->state = ANEG_STATE_AN_ENABLE;
5326 			break;
5327 		}
5328 		delta = ap->cur_time - ap->link_time;
5329 		if (delta > ANEG_STATE_SETTLE_TIME) {
5330 			if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
5331 				ap->state = ANEG_STATE_IDLE_DETECT_INIT;
5332 			} else {
5333 				if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
5334 				    !(ap->flags & MR_NP_RX)) {
5335 					ap->state = ANEG_STATE_IDLE_DETECT_INIT;
5336 				} else {
5337 					ret = ANEG_FAILED;
5338 				}
5339 			}
5340 		}
5341 		break;
5342 
5343 	case ANEG_STATE_IDLE_DETECT_INIT:
5344 		ap->link_time = ap->cur_time;
5345 		tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
5346 		tw32_f(MAC_MODE, tp->mac_mode);
5347 		udelay(40);
5348 
5349 		ap->state = ANEG_STATE_IDLE_DETECT;
5350 		ret = ANEG_TIMER_ENAB;
5351 		break;
5352 
5353 	case ANEG_STATE_IDLE_DETECT:
5354 		if (ap->ability_match != 0 &&
5355 		    ap->rxconfig == 0) {
5356 			ap->state = ANEG_STATE_AN_ENABLE;
5357 			break;
5358 		}
5359 		delta = ap->cur_time - ap->link_time;
5360 		if (delta > ANEG_STATE_SETTLE_TIME) {
5361 			/* XXX another gem from the Broadcom driver :( */
5362 			ap->state = ANEG_STATE_LINK_OK;
5363 		}
5364 		break;
5365 
5366 	case ANEG_STATE_LINK_OK:
5367 		ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
5368 		ret = ANEG_DONE;
5369 		break;
5370 
5371 	case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
5372 		/* ??? unimplemented */
5373 		break;
5374 
5375 	case ANEG_STATE_NEXT_PAGE_WAIT:
5376 		/* ??? unimplemented */
5377 		break;
5378 
5379 	default:
5380 		ret = ANEG_FAILED;
5381 		break;
5382 	}
5383 
5384 	return ret;
5385 }
5386 
5387 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
5388 {
5389 	int res = 0;
5390 	struct tg3_fiber_aneginfo aninfo;
5391 	int status = ANEG_FAILED;
5392 	unsigned int tick;
5393 	u32 tmp;
5394 
5395 	tw32_f(MAC_TX_AUTO_NEG, 0);
5396 
5397 	tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
5398 	tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
5399 	udelay(40);
5400 
5401 	tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
5402 	udelay(40);
5403 
5404 	memset(&aninfo, 0, sizeof(aninfo));
5405 	aninfo.flags |= MR_AN_ENABLE;
5406 	aninfo.state = ANEG_STATE_UNKNOWN;
5407 	aninfo.cur_time = 0;
5408 	tick = 0;
5409 	while (++tick < 195000) {
5410 		status = tg3_fiber_aneg_smachine(tp, &aninfo);
5411 		if (status == ANEG_DONE || status == ANEG_FAILED)
5412 			break;
5413 
5414 		udelay(1);
5415 	}
5416 
5417 	tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
5418 	tw32_f(MAC_MODE, tp->mac_mode);
5419 	udelay(40);
5420 
5421 	*txflags = aninfo.txconfig;
5422 	*rxflags = aninfo.flags;
5423 
5424 	if (status == ANEG_DONE &&
5425 	    (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
5426 			     MR_LP_ADV_FULL_DUPLEX)))
5427 		res = 1;
5428 
5429 	return res;
5430 }
5431 
5432 static void tg3_init_bcm8002(struct tg3 *tp)
5433 {
5434 	u32 mac_status = tr32(MAC_STATUS);
5435 	int i;
5436 
5437 	/* Reset when initting first time or we have a link. */
5438 	if (tg3_flag(tp, INIT_COMPLETE) &&
5439 	    !(mac_status & MAC_STATUS_PCS_SYNCED))
5440 		return;
5441 
5442 	/* Set PLL lock range. */
5443 	tg3_writephy(tp, 0x16, 0x8007);
5444 
5445 	/* SW reset */
5446 	tg3_writephy(tp, MII_BMCR, BMCR_RESET);
5447 
5448 	/* Wait for reset to complete. */
5449 	/* XXX schedule_timeout() ... */
5450 	for (i = 0; i < 500; i++)
5451 		udelay(10);
5452 
5453 	/* Config mode; select PMA/Ch 1 regs. */
5454 	tg3_writephy(tp, 0x10, 0x8411);
5455 
5456 	/* Enable auto-lock and comdet, select txclk for tx. */
5457 	tg3_writephy(tp, 0x11, 0x0a10);
5458 
5459 	tg3_writephy(tp, 0x18, 0x00a0);
5460 	tg3_writephy(tp, 0x16, 0x41ff);
5461 
5462 	/* Assert and deassert POR. */
5463 	tg3_writephy(tp, 0x13, 0x0400);
5464 	udelay(40);
5465 	tg3_writephy(tp, 0x13, 0x0000);
5466 
5467 	tg3_writephy(tp, 0x11, 0x0a50);
5468 	udelay(40);
5469 	tg3_writephy(tp, 0x11, 0x0a10);
5470 
5471 	/* Wait for signal to stabilize */
5472 	/* XXX schedule_timeout() ... */
5473 	for (i = 0; i < 15000; i++)
5474 		udelay(10);
5475 
5476 	/* Deselect the channel register so we can read the PHYID
5477 	 * later.
5478 	 */
5479 	tg3_writephy(tp, 0x10, 0x8011);
5480 }
5481 
5482 static bool tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
5483 {
5484 	u16 flowctrl;
5485 	bool current_link_up;
5486 	u32 sg_dig_ctrl, sg_dig_status;
5487 	u32 serdes_cfg, expected_sg_dig_ctrl;
5488 	int workaround, port_a;
5489 
5490 	serdes_cfg = 0;
5491 	expected_sg_dig_ctrl = 0;
5492 	workaround = 0;
5493 	port_a = 1;
5494 	current_link_up = false;
5495 
5496 	if (tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A0 &&
5497 	    tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A1) {
5498 		workaround = 1;
5499 		if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
5500 			port_a = 0;
5501 
5502 		/* preserve bits 0-11,13,14 for signal pre-emphasis */
5503 		/* preserve bits 20-23 for voltage regulator */
5504 		serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
5505 	}
5506 
5507 	sg_dig_ctrl = tr32(SG_DIG_CTRL);
5508 
5509 	if (tp->link_config.autoneg != AUTONEG_ENABLE) {
5510 		if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
5511 			if (workaround) {
5512 				u32 val = serdes_cfg;
5513 
5514 				if (port_a)
5515 					val |= 0xc010000;
5516 				else
5517 					val |= 0x4010000;
5518 				tw32_f(MAC_SERDES_CFG, val);
5519 			}
5520 
5521 			tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
5522 		}
5523 		if (mac_status & MAC_STATUS_PCS_SYNCED) {
5524 			tg3_setup_flow_control(tp, 0, 0);
5525 			current_link_up = true;
5526 		}
5527 		goto out;
5528 	}
5529 
5530 	/* Want auto-negotiation.  */
5531 	expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
5532 
5533 	flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5534 	if (flowctrl & ADVERTISE_1000XPAUSE)
5535 		expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
5536 	if (flowctrl & ADVERTISE_1000XPSE_ASYM)
5537 		expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
5538 
5539 	if (sg_dig_ctrl != expected_sg_dig_ctrl) {
5540 		if ((tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT) &&
5541 		    tp->serdes_counter &&
5542 		    ((mac_status & (MAC_STATUS_PCS_SYNCED |
5543 				    MAC_STATUS_RCVD_CFG)) ==
5544 		     MAC_STATUS_PCS_SYNCED)) {
5545 			tp->serdes_counter--;
5546 			current_link_up = true;
5547 			goto out;
5548 		}
5549 restart_autoneg:
5550 		if (workaround)
5551 			tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
5552 		tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
5553 		udelay(5);
5554 		tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
5555 
5556 		tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
5557 		tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5558 	} else if (mac_status & (MAC_STATUS_PCS_SYNCED |
5559 				 MAC_STATUS_SIGNAL_DET)) {
5560 		sg_dig_status = tr32(SG_DIG_STATUS);
5561 		mac_status = tr32(MAC_STATUS);
5562 
5563 		if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
5564 		    (mac_status & MAC_STATUS_PCS_SYNCED)) {
5565 			u32 local_adv = 0, remote_adv = 0;
5566 
5567 			if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
5568 				local_adv |= ADVERTISE_1000XPAUSE;
5569 			if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
5570 				local_adv |= ADVERTISE_1000XPSE_ASYM;
5571 
5572 			if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
5573 				remote_adv |= LPA_1000XPAUSE;
5574 			if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
5575 				remote_adv |= LPA_1000XPAUSE_ASYM;
5576 
5577 			tp->link_config.rmt_adv =
5578 					   mii_adv_to_ethtool_adv_x(remote_adv);
5579 
5580 			tg3_setup_flow_control(tp, local_adv, remote_adv);
5581 			current_link_up = true;
5582 			tp->serdes_counter = 0;
5583 			tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5584 		} else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
5585 			if (tp->serdes_counter)
5586 				tp->serdes_counter--;
5587 			else {
5588 				if (workaround) {
5589 					u32 val = serdes_cfg;
5590 
5591 					if (port_a)
5592 						val |= 0xc010000;
5593 					else
5594 						val |= 0x4010000;
5595 
5596 					tw32_f(MAC_SERDES_CFG, val);
5597 				}
5598 
5599 				tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
5600 				udelay(40);
5601 
5602 				/* Link parallel detection - link is up */
5603 				/* only if we have PCS_SYNC and not */
5604 				/* receiving config code words */
5605 				mac_status = tr32(MAC_STATUS);
5606 				if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
5607 				    !(mac_status & MAC_STATUS_RCVD_CFG)) {
5608 					tg3_setup_flow_control(tp, 0, 0);
5609 					current_link_up = true;
5610 					tp->phy_flags |=
5611 						TG3_PHYFLG_PARALLEL_DETECT;
5612 					tp->serdes_counter =
5613 						SERDES_PARALLEL_DET_TIMEOUT;
5614 				} else
5615 					goto restart_autoneg;
5616 			}
5617 		}
5618 	} else {
5619 		tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
5620 		tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5621 	}
5622 
5623 out:
5624 	return current_link_up;
5625 }
5626 
5627 static bool tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
5628 {
5629 	bool current_link_up = false;
5630 
5631 	if (!(mac_status & MAC_STATUS_PCS_SYNCED))
5632 		goto out;
5633 
5634 	if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5635 		u32 txflags, rxflags;
5636 		int i;
5637 
5638 		if (fiber_autoneg(tp, &txflags, &rxflags)) {
5639 			u32 local_adv = 0, remote_adv = 0;
5640 
5641 			if (txflags & ANEG_CFG_PS1)
5642 				local_adv |= ADVERTISE_1000XPAUSE;
5643 			if (txflags & ANEG_CFG_PS2)
5644 				local_adv |= ADVERTISE_1000XPSE_ASYM;
5645 
5646 			if (rxflags & MR_LP_ADV_SYM_PAUSE)
5647 				remote_adv |= LPA_1000XPAUSE;
5648 			if (rxflags & MR_LP_ADV_ASYM_PAUSE)
5649 				remote_adv |= LPA_1000XPAUSE_ASYM;
5650 
5651 			tp->link_config.rmt_adv =
5652 					   mii_adv_to_ethtool_adv_x(remote_adv);
5653 
5654 			tg3_setup_flow_control(tp, local_adv, remote_adv);
5655 
5656 			current_link_up = true;
5657 		}
5658 		for (i = 0; i < 30; i++) {
5659 			udelay(20);
5660 			tw32_f(MAC_STATUS,
5661 			       (MAC_STATUS_SYNC_CHANGED |
5662 				MAC_STATUS_CFG_CHANGED));
5663 			udelay(40);
5664 			if ((tr32(MAC_STATUS) &
5665 			     (MAC_STATUS_SYNC_CHANGED |
5666 			      MAC_STATUS_CFG_CHANGED)) == 0)
5667 				break;
5668 		}
5669 
5670 		mac_status = tr32(MAC_STATUS);
5671 		if (!current_link_up &&
5672 		    (mac_status & MAC_STATUS_PCS_SYNCED) &&
5673 		    !(mac_status & MAC_STATUS_RCVD_CFG))
5674 			current_link_up = true;
5675 	} else {
5676 		tg3_setup_flow_control(tp, 0, 0);
5677 
5678 		/* Forcing 1000FD link up. */
5679 		current_link_up = true;
5680 
5681 		tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
5682 		udelay(40);
5683 
5684 		tw32_f(MAC_MODE, tp->mac_mode);
5685 		udelay(40);
5686 	}
5687 
5688 out:
5689 	return current_link_up;
5690 }
5691 
5692 static int tg3_setup_fiber_phy(struct tg3 *tp, bool force_reset)
5693 {
5694 	u32 orig_pause_cfg;
5695 	u16 orig_active_speed;
5696 	u8 orig_active_duplex;
5697 	u32 mac_status;
5698 	bool current_link_up;
5699 	int i;
5700 
5701 	orig_pause_cfg = tp->link_config.active_flowctrl;
5702 	orig_active_speed = tp->link_config.active_speed;
5703 	orig_active_duplex = tp->link_config.active_duplex;
5704 
5705 	if (!tg3_flag(tp, HW_AUTONEG) &&
5706 	    tp->link_up &&
5707 	    tg3_flag(tp, INIT_COMPLETE)) {
5708 		mac_status = tr32(MAC_STATUS);
5709 		mac_status &= (MAC_STATUS_PCS_SYNCED |
5710 			       MAC_STATUS_SIGNAL_DET |
5711 			       MAC_STATUS_CFG_CHANGED |
5712 			       MAC_STATUS_RCVD_CFG);
5713 		if (mac_status == (MAC_STATUS_PCS_SYNCED |
5714 				   MAC_STATUS_SIGNAL_DET)) {
5715 			tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5716 					    MAC_STATUS_CFG_CHANGED));
5717 			return 0;
5718 		}
5719 	}
5720 
5721 	tw32_f(MAC_TX_AUTO_NEG, 0);
5722 
5723 	tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
5724 	tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
5725 	tw32_f(MAC_MODE, tp->mac_mode);
5726 	udelay(40);
5727 
5728 	if (tp->phy_id == TG3_PHY_ID_BCM8002)
5729 		tg3_init_bcm8002(tp);
5730 
5731 	/* Enable link change event even when serdes polling.  */
5732 	tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5733 	udelay(40);
5734 
5735 	current_link_up = false;
5736 	tp->link_config.rmt_adv = 0;
5737 	mac_status = tr32(MAC_STATUS);
5738 
5739 	if (tg3_flag(tp, HW_AUTONEG))
5740 		current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
5741 	else
5742 		current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
5743 
5744 	tp->napi[0].hw_status->status =
5745 		(SD_STATUS_UPDATED |
5746 		 (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG));
5747 
5748 	for (i = 0; i < 100; i++) {
5749 		tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5750 				    MAC_STATUS_CFG_CHANGED));
5751 		udelay(5);
5752 		if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
5753 					 MAC_STATUS_CFG_CHANGED |
5754 					 MAC_STATUS_LNKSTATE_CHANGED)) == 0)
5755 			break;
5756 	}
5757 
5758 	mac_status = tr32(MAC_STATUS);
5759 	if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
5760 		current_link_up = false;
5761 		if (tp->link_config.autoneg == AUTONEG_ENABLE &&
5762 		    tp->serdes_counter == 0) {
5763 			tw32_f(MAC_MODE, (tp->mac_mode |
5764 					  MAC_MODE_SEND_CONFIGS));
5765 			udelay(1);
5766 			tw32_f(MAC_MODE, tp->mac_mode);
5767 		}
5768 	}
5769 
5770 	if (current_link_up) {
5771 		tp->link_config.active_speed = SPEED_1000;
5772 		tp->link_config.active_duplex = DUPLEX_FULL;
5773 		tw32(MAC_LED_CTRL, (tp->led_ctrl |
5774 				    LED_CTRL_LNKLED_OVERRIDE |
5775 				    LED_CTRL_1000MBPS_ON));
5776 	} else {
5777 		tp->link_config.active_speed = SPEED_UNKNOWN;
5778 		tp->link_config.active_duplex = DUPLEX_UNKNOWN;
5779 		tw32(MAC_LED_CTRL, (tp->led_ctrl |
5780 				    LED_CTRL_LNKLED_OVERRIDE |
5781 				    LED_CTRL_TRAFFIC_OVERRIDE));
5782 	}
5783 
5784 	if (!tg3_test_and_report_link_chg(tp, current_link_up)) {
5785 		u32 now_pause_cfg = tp->link_config.active_flowctrl;
5786 		if (orig_pause_cfg != now_pause_cfg ||
5787 		    orig_active_speed != tp->link_config.active_speed ||
5788 		    orig_active_duplex != tp->link_config.active_duplex)
5789 			tg3_link_report(tp);
5790 	}
5791 
5792 	return 0;
5793 }
5794 
5795 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, bool force_reset)
5796 {
5797 	int err = 0;
5798 	u32 bmsr, bmcr;
5799 	u16 current_speed = SPEED_UNKNOWN;
5800 	u8 current_duplex = DUPLEX_UNKNOWN;
5801 	bool current_link_up = false;
5802 	u32 local_adv, remote_adv, sgsr;
5803 
5804 	if ((tg3_asic_rev(tp) == ASIC_REV_5719 ||
5805 	     tg3_asic_rev(tp) == ASIC_REV_5720) &&
5806 	     !tg3_readphy(tp, SERDES_TG3_1000X_STATUS, &sgsr) &&
5807 	     (sgsr & SERDES_TG3_SGMII_MODE)) {
5808 
5809 		if (force_reset)
5810 			tg3_phy_reset(tp);
5811 
5812 		tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
5813 
5814 		if (!(sgsr & SERDES_TG3_LINK_UP)) {
5815 			tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5816 		} else {
5817 			current_link_up = true;
5818 			if (sgsr & SERDES_TG3_SPEED_1000) {
5819 				current_speed = SPEED_1000;
5820 				tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5821 			} else if (sgsr & SERDES_TG3_SPEED_100) {
5822 				current_speed = SPEED_100;
5823 				tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
5824 			} else {
5825 				current_speed = SPEED_10;
5826 				tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
5827 			}
5828 
5829 			if (sgsr & SERDES_TG3_FULL_DUPLEX)
5830 				current_duplex = DUPLEX_FULL;
5831 			else
5832 				current_duplex = DUPLEX_HALF;
5833 		}
5834 
5835 		tw32_f(MAC_MODE, tp->mac_mode);
5836 		udelay(40);
5837 
5838 		tg3_clear_mac_status(tp);
5839 
5840 		goto fiber_setup_done;
5841 	}
5842 
5843 	tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5844 	tw32_f(MAC_MODE, tp->mac_mode);
5845 	udelay(40);
5846 
5847 	tg3_clear_mac_status(tp);
5848 
5849 	if (force_reset)
5850 		tg3_phy_reset(tp);
5851 
5852 	tp->link_config.rmt_adv = 0;
5853 
5854 	err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5855 	err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5856 	if (tg3_asic_rev(tp) == ASIC_REV_5714) {
5857 		if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5858 			bmsr |= BMSR_LSTATUS;
5859 		else
5860 			bmsr &= ~BMSR_LSTATUS;
5861 	}
5862 
5863 	err |= tg3_readphy(tp, MII_BMCR, &bmcr);
5864 
5865 	if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
5866 	    (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
5867 		/* do nothing, just check for link up at the end */
5868 	} else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5869 		u32 adv, newadv;
5870 
5871 		err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5872 		newadv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
5873 				 ADVERTISE_1000XPAUSE |
5874 				 ADVERTISE_1000XPSE_ASYM |
5875 				 ADVERTISE_SLCT);
5876 
5877 		newadv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5878 		newadv |= ethtool_adv_to_mii_adv_x(tp->link_config.advertising);
5879 
5880 		if ((newadv != adv) || !(bmcr & BMCR_ANENABLE)) {
5881 			tg3_writephy(tp, MII_ADVERTISE, newadv);
5882 			bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
5883 			tg3_writephy(tp, MII_BMCR, bmcr);
5884 
5885 			tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5886 			tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
5887 			tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5888 
5889 			return err;
5890 		}
5891 	} else {
5892 		u32 new_bmcr;
5893 
5894 		bmcr &= ~BMCR_SPEED1000;
5895 		new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
5896 
5897 		if (tp->link_config.duplex == DUPLEX_FULL)
5898 			new_bmcr |= BMCR_FULLDPLX;
5899 
5900 		if (new_bmcr != bmcr) {
5901 			/* BMCR_SPEED1000 is a reserved bit that needs
5902 			 * to be set on write.
5903 			 */
5904 			new_bmcr |= BMCR_SPEED1000;
5905 
5906 			/* Force a linkdown */
5907 			if (tp->link_up) {
5908 				u32 adv;
5909 
5910 				err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5911 				adv &= ~(ADVERTISE_1000XFULL |
5912 					 ADVERTISE_1000XHALF |
5913 					 ADVERTISE_SLCT);
5914 				tg3_writephy(tp, MII_ADVERTISE, adv);
5915 				tg3_writephy(tp, MII_BMCR, bmcr |
5916 							   BMCR_ANRESTART |
5917 							   BMCR_ANENABLE);
5918 				udelay(10);
5919 				tg3_carrier_off(tp);
5920 			}
5921 			tg3_writephy(tp, MII_BMCR, new_bmcr);
5922 			bmcr = new_bmcr;
5923 			err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5924 			err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5925 			if (tg3_asic_rev(tp) == ASIC_REV_5714) {
5926 				if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5927 					bmsr |= BMSR_LSTATUS;
5928 				else
5929 					bmsr &= ~BMSR_LSTATUS;
5930 			}
5931 			tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5932 		}
5933 	}
5934 
5935 	if (bmsr & BMSR_LSTATUS) {
5936 		current_speed = SPEED_1000;
5937 		current_link_up = true;
5938 		if (bmcr & BMCR_FULLDPLX)
5939 			current_duplex = DUPLEX_FULL;
5940 		else
5941 			current_duplex = DUPLEX_HALF;
5942 
5943 		local_adv = 0;
5944 		remote_adv = 0;
5945 
5946 		if (bmcr & BMCR_ANENABLE) {
5947 			u32 common;
5948 
5949 			err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
5950 			err |= tg3_readphy(tp, MII_LPA, &remote_adv);
5951 			common = local_adv & remote_adv;
5952 			if (common & (ADVERTISE_1000XHALF |
5953 				      ADVERTISE_1000XFULL)) {
5954 				if (common & ADVERTISE_1000XFULL)
5955 					current_duplex = DUPLEX_FULL;
5956 				else
5957 					current_duplex = DUPLEX_HALF;
5958 
5959 				tp->link_config.rmt_adv =
5960 					   mii_adv_to_ethtool_adv_x(remote_adv);
5961 			} else if (!tg3_flag(tp, 5780_CLASS)) {
5962 				/* Link is up via parallel detect */
5963 			} else {
5964 				current_link_up = false;
5965 			}
5966 		}
5967 	}
5968 
5969 fiber_setup_done:
5970 	if (current_link_up && current_duplex == DUPLEX_FULL)
5971 		tg3_setup_flow_control(tp, local_adv, remote_adv);
5972 
5973 	tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
5974 	if (tp->link_config.active_duplex == DUPLEX_HALF)
5975 		tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
5976 
5977 	tw32_f(MAC_MODE, tp->mac_mode);
5978 	udelay(40);
5979 
5980 	tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5981 
5982 	tp->link_config.active_speed = current_speed;
5983 	tp->link_config.active_duplex = current_duplex;
5984 
5985 	tg3_test_and_report_link_chg(tp, current_link_up);
5986 	return err;
5987 }
5988 
5989 static void tg3_serdes_parallel_detect(struct tg3 *tp)
5990 {
5991 	if (tp->serdes_counter) {
5992 		/* Give autoneg time to complete. */
5993 		tp->serdes_counter--;
5994 		return;
5995 	}
5996 
5997 	if (!tp->link_up &&
5998 	    (tp->link_config.autoneg == AUTONEG_ENABLE)) {
5999 		u32 bmcr;
6000 
6001 		tg3_readphy(tp, MII_BMCR, &bmcr);
6002 		if (bmcr & BMCR_ANENABLE) {
6003 			u32 phy1, phy2;
6004 
6005 			/* Select shadow register 0x1f */
6006 			tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x7c00);
6007 			tg3_readphy(tp, MII_TG3_MISC_SHDW, &phy1);
6008 
6009 			/* Select expansion interrupt status register */
6010 			tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
6011 					 MII_TG3_DSP_EXP1_INT_STAT);
6012 			tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
6013 			tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
6014 
6015 			if ((phy1 & 0x10) && !(phy2 & 0x20)) {
6016 				/* We have signal detect and not receiving
6017 				 * config code words, link is up by parallel
6018 				 * detection.
6019 				 */
6020 
6021 				bmcr &= ~BMCR_ANENABLE;
6022 				bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
6023 				tg3_writephy(tp, MII_BMCR, bmcr);
6024 				tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT;
6025 			}
6026 		}
6027 	} else if (tp->link_up &&
6028 		   (tp->link_config.autoneg == AUTONEG_ENABLE) &&
6029 		   (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
6030 		u32 phy2;
6031 
6032 		/* Select expansion interrupt status register */
6033 		tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
6034 				 MII_TG3_DSP_EXP1_INT_STAT);
6035 		tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
6036 		if (phy2 & 0x20) {
6037 			u32 bmcr;
6038 
6039 			/* Config code words received, turn on autoneg. */
6040 			tg3_readphy(tp, MII_BMCR, &bmcr);
6041 			tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
6042 
6043 			tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
6044 
6045 		}
6046 	}
6047 }
6048 
6049 static int tg3_setup_phy(struct tg3 *tp, bool force_reset)
6050 {
6051 	u32 val;
6052 	int err;
6053 
6054 	if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
6055 		err = tg3_setup_fiber_phy(tp, force_reset);
6056 	else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
6057 		err = tg3_setup_fiber_mii_phy(tp, force_reset);
6058 	else
6059 		err = tg3_setup_copper_phy(tp, force_reset);
6060 
6061 	if (tg3_chip_rev(tp) == CHIPREV_5784_AX) {
6062 		u32 scale;
6063 
6064 		val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
6065 		if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
6066 			scale = 65;
6067 		else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
6068 			scale = 6;
6069 		else
6070 			scale = 12;
6071 
6072 		val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
6073 		val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
6074 		tw32(GRC_MISC_CFG, val);
6075 	}
6076 
6077 	val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
6078 	      (6 << TX_LENGTHS_IPG_SHIFT);
6079 	if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
6080 	    tg3_asic_rev(tp) == ASIC_REV_5762)
6081 		val |= tr32(MAC_TX_LENGTHS) &
6082 		       (TX_LENGTHS_JMB_FRM_LEN_MSK |
6083 			TX_LENGTHS_CNT_DWN_VAL_MSK);
6084 
6085 	if (tp->link_config.active_speed == SPEED_1000 &&
6086 	    tp->link_config.active_duplex == DUPLEX_HALF)
6087 		tw32(MAC_TX_LENGTHS, val |
6088 		     (0xff << TX_LENGTHS_SLOT_TIME_SHIFT));
6089 	else
6090 		tw32(MAC_TX_LENGTHS, val |
6091 		     (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
6092 
6093 	if (!tg3_flag(tp, 5705_PLUS)) {
6094 		if (tp->link_up) {
6095 			tw32(HOSTCC_STAT_COAL_TICKS,
6096 			     tp->coal.stats_block_coalesce_usecs);
6097 		} else {
6098 			tw32(HOSTCC_STAT_COAL_TICKS, 0);
6099 		}
6100 	}
6101 
6102 	if (tg3_flag(tp, ASPM_WORKAROUND)) {
6103 		val = tr32(PCIE_PWR_MGMT_THRESH);
6104 		if (!tp->link_up)
6105 			val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
6106 			      tp->pwrmgmt_thresh;
6107 		else
6108 			val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
6109 		tw32(PCIE_PWR_MGMT_THRESH, val);
6110 	}
6111 
6112 	return err;
6113 }
6114 
6115 /* tp->lock must be held */
6116 static u64 tg3_refclk_read(struct tg3 *tp)
6117 {
6118 	u64 stamp = tr32(TG3_EAV_REF_CLCK_LSB);
6119 	return stamp | (u64)tr32(TG3_EAV_REF_CLCK_MSB) << 32;
6120 }
6121 
6122 /* tp->lock must be held */
6123 static void tg3_refclk_write(struct tg3 *tp, u64 newval)
6124 {
6125 	u32 clock_ctl = tr32(TG3_EAV_REF_CLCK_CTL);
6126 
6127 	tw32(TG3_EAV_REF_CLCK_CTL, clock_ctl | TG3_EAV_REF_CLCK_CTL_STOP);
6128 	tw32(TG3_EAV_REF_CLCK_LSB, newval & 0xffffffff);
6129 	tw32(TG3_EAV_REF_CLCK_MSB, newval >> 32);
6130 	tw32_f(TG3_EAV_REF_CLCK_CTL, clock_ctl | TG3_EAV_REF_CLCK_CTL_RESUME);
6131 }
6132 
6133 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync);
6134 static inline void tg3_full_unlock(struct tg3 *tp);
6135 static int tg3_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info)
6136 {
6137 	struct tg3 *tp = netdev_priv(dev);
6138 
6139 	info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
6140 				SOF_TIMESTAMPING_RX_SOFTWARE |
6141 				SOF_TIMESTAMPING_SOFTWARE;
6142 
6143 	if (tg3_flag(tp, PTP_CAPABLE)) {
6144 		info->so_timestamping |= SOF_TIMESTAMPING_TX_HARDWARE |
6145 					SOF_TIMESTAMPING_RX_HARDWARE |
6146 					SOF_TIMESTAMPING_RAW_HARDWARE;
6147 	}
6148 
6149 	if (tp->ptp_clock)
6150 		info->phc_index = ptp_clock_index(tp->ptp_clock);
6151 	else
6152 		info->phc_index = -1;
6153 
6154 	info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
6155 
6156 	info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
6157 			   (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
6158 			   (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
6159 			   (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT);
6160 	return 0;
6161 }
6162 
6163 static int tg3_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
6164 {
6165 	struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6166 	bool neg_adj = false;
6167 	u32 correction = 0;
6168 
6169 	if (ppb < 0) {
6170 		neg_adj = true;
6171 		ppb = -ppb;
6172 	}
6173 
6174 	/* Frequency adjustment is performed using hardware with a 24 bit
6175 	 * accumulator and a programmable correction value. On each clk, the
6176 	 * correction value gets added to the accumulator and when it
6177 	 * overflows, the time counter is incremented/decremented.
6178 	 *
6179 	 * So conversion from ppb to correction value is
6180 	 *		ppb * (1 << 24) / 1000000000
6181 	 */
6182 	correction = div_u64((u64)ppb * (1 << 24), 1000000000ULL) &
6183 		     TG3_EAV_REF_CLK_CORRECT_MASK;
6184 
6185 	tg3_full_lock(tp, 0);
6186 
6187 	if (correction)
6188 		tw32(TG3_EAV_REF_CLK_CORRECT_CTL,
6189 		     TG3_EAV_REF_CLK_CORRECT_EN |
6190 		     (neg_adj ? TG3_EAV_REF_CLK_CORRECT_NEG : 0) | correction);
6191 	else
6192 		tw32(TG3_EAV_REF_CLK_CORRECT_CTL, 0);
6193 
6194 	tg3_full_unlock(tp);
6195 
6196 	return 0;
6197 }
6198 
6199 static int tg3_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
6200 {
6201 	struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6202 
6203 	tg3_full_lock(tp, 0);
6204 	tp->ptp_adjust += delta;
6205 	tg3_full_unlock(tp);
6206 
6207 	return 0;
6208 }
6209 
6210 static int tg3_ptp_gettime(struct ptp_clock_info *ptp, struct timespec *ts)
6211 {
6212 	u64 ns;
6213 	u32 remainder;
6214 	struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6215 
6216 	tg3_full_lock(tp, 0);
6217 	ns = tg3_refclk_read(tp);
6218 	ns += tp->ptp_adjust;
6219 	tg3_full_unlock(tp);
6220 
6221 	ts->tv_sec = div_u64_rem(ns, 1000000000, &remainder);
6222 	ts->tv_nsec = remainder;
6223 
6224 	return 0;
6225 }
6226 
6227 static int tg3_ptp_settime(struct ptp_clock_info *ptp,
6228 			   const struct timespec *ts)
6229 {
6230 	u64 ns;
6231 	struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6232 
6233 	ns = timespec_to_ns(ts);
6234 
6235 	tg3_full_lock(tp, 0);
6236 	tg3_refclk_write(tp, ns);
6237 	tp->ptp_adjust = 0;
6238 	tg3_full_unlock(tp);
6239 
6240 	return 0;
6241 }
6242 
6243 static int tg3_ptp_enable(struct ptp_clock_info *ptp,
6244 			  struct ptp_clock_request *rq, int on)
6245 {
6246 	struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6247 	u32 clock_ctl;
6248 	int rval = 0;
6249 
6250 	switch (rq->type) {
6251 	case PTP_CLK_REQ_PEROUT:
6252 		if (rq->perout.index != 0)
6253 			return -EINVAL;
6254 
6255 		tg3_full_lock(tp, 0);
6256 		clock_ctl = tr32(TG3_EAV_REF_CLCK_CTL);
6257 		clock_ctl &= ~TG3_EAV_CTL_TSYNC_GPIO_MASK;
6258 
6259 		if (on) {
6260 			u64 nsec;
6261 
6262 			nsec = rq->perout.start.sec * 1000000000ULL +
6263 			       rq->perout.start.nsec;
6264 
6265 			if (rq->perout.period.sec || rq->perout.period.nsec) {
6266 				netdev_warn(tp->dev,
6267 					    "Device supports only a one-shot timesync output, period must be 0\n");
6268 				rval = -EINVAL;
6269 				goto err_out;
6270 			}
6271 
6272 			if (nsec & (1ULL << 63)) {
6273 				netdev_warn(tp->dev,
6274 					    "Start value (nsec) is over limit. Maximum size of start is only 63 bits\n");
6275 				rval = -EINVAL;
6276 				goto err_out;
6277 			}
6278 
6279 			tw32(TG3_EAV_WATCHDOG0_LSB, (nsec & 0xffffffff));
6280 			tw32(TG3_EAV_WATCHDOG0_MSB,
6281 			     TG3_EAV_WATCHDOG0_EN |
6282 			     ((nsec >> 32) & TG3_EAV_WATCHDOG_MSB_MASK));
6283 
6284 			tw32(TG3_EAV_REF_CLCK_CTL,
6285 			     clock_ctl | TG3_EAV_CTL_TSYNC_WDOG0);
6286 		} else {
6287 			tw32(TG3_EAV_WATCHDOG0_MSB, 0);
6288 			tw32(TG3_EAV_REF_CLCK_CTL, clock_ctl);
6289 		}
6290 
6291 err_out:
6292 		tg3_full_unlock(tp);
6293 		return rval;
6294 
6295 	default:
6296 		break;
6297 	}
6298 
6299 	return -EOPNOTSUPP;
6300 }
6301 
6302 static const struct ptp_clock_info tg3_ptp_caps = {
6303 	.owner		= THIS_MODULE,
6304 	.name		= "tg3 clock",
6305 	.max_adj	= 250000000,
6306 	.n_alarm	= 0,
6307 	.n_ext_ts	= 0,
6308 	.n_per_out	= 1,
6309 	.pps		= 0,
6310 	.adjfreq	= tg3_ptp_adjfreq,
6311 	.adjtime	= tg3_ptp_adjtime,
6312 	.gettime	= tg3_ptp_gettime,
6313 	.settime	= tg3_ptp_settime,
6314 	.enable		= tg3_ptp_enable,
6315 };
6316 
6317 static void tg3_hwclock_to_timestamp(struct tg3 *tp, u64 hwclock,
6318 				     struct skb_shared_hwtstamps *timestamp)
6319 {
6320 	memset(timestamp, 0, sizeof(struct skb_shared_hwtstamps));
6321 	timestamp->hwtstamp  = ns_to_ktime((hwclock & TG3_TSTAMP_MASK) +
6322 					   tp->ptp_adjust);
6323 }
6324 
6325 /* tp->lock must be held */
6326 static void tg3_ptp_init(struct tg3 *tp)
6327 {
6328 	if (!tg3_flag(tp, PTP_CAPABLE))
6329 		return;
6330 
6331 	/* Initialize the hardware clock to the system time. */
6332 	tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()));
6333 	tp->ptp_adjust = 0;
6334 	tp->ptp_info = tg3_ptp_caps;
6335 }
6336 
6337 /* tp->lock must be held */
6338 static void tg3_ptp_resume(struct tg3 *tp)
6339 {
6340 	if (!tg3_flag(tp, PTP_CAPABLE))
6341 		return;
6342 
6343 	tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()) + tp->ptp_adjust);
6344 	tp->ptp_adjust = 0;
6345 }
6346 
6347 static void tg3_ptp_fini(struct tg3 *tp)
6348 {
6349 	if (!tg3_flag(tp, PTP_CAPABLE) || !tp->ptp_clock)
6350 		return;
6351 
6352 	ptp_clock_unregister(tp->ptp_clock);
6353 	tp->ptp_clock = NULL;
6354 	tp->ptp_adjust = 0;
6355 }
6356 
6357 static inline int tg3_irq_sync(struct tg3 *tp)
6358 {
6359 	return tp->irq_sync;
6360 }
6361 
6362 static inline void tg3_rd32_loop(struct tg3 *tp, u32 *dst, u32 off, u32 len)
6363 {
6364 	int i;
6365 
6366 	dst = (u32 *)((u8 *)dst + off);
6367 	for (i = 0; i < len; i += sizeof(u32))
6368 		*dst++ = tr32(off + i);
6369 }
6370 
6371 static void tg3_dump_legacy_regs(struct tg3 *tp, u32 *regs)
6372 {
6373 	tg3_rd32_loop(tp, regs, TG3PCI_VENDOR, 0xb0);
6374 	tg3_rd32_loop(tp, regs, MAILBOX_INTERRUPT_0, 0x200);
6375 	tg3_rd32_loop(tp, regs, MAC_MODE, 0x4f0);
6376 	tg3_rd32_loop(tp, regs, SNDDATAI_MODE, 0xe0);
6377 	tg3_rd32_loop(tp, regs, SNDDATAC_MODE, 0x04);
6378 	tg3_rd32_loop(tp, regs, SNDBDS_MODE, 0x80);
6379 	tg3_rd32_loop(tp, regs, SNDBDI_MODE, 0x48);
6380 	tg3_rd32_loop(tp, regs, SNDBDC_MODE, 0x04);
6381 	tg3_rd32_loop(tp, regs, RCVLPC_MODE, 0x20);
6382 	tg3_rd32_loop(tp, regs, RCVLPC_SELLST_BASE, 0x15c);
6383 	tg3_rd32_loop(tp, regs, RCVDBDI_MODE, 0x0c);
6384 	tg3_rd32_loop(tp, regs, RCVDBDI_JUMBO_BD, 0x3c);
6385 	tg3_rd32_loop(tp, regs, RCVDBDI_BD_PROD_IDX_0, 0x44);
6386 	tg3_rd32_loop(tp, regs, RCVDCC_MODE, 0x04);
6387 	tg3_rd32_loop(tp, regs, RCVBDI_MODE, 0x20);
6388 	tg3_rd32_loop(tp, regs, RCVCC_MODE, 0x14);
6389 	tg3_rd32_loop(tp, regs, RCVLSC_MODE, 0x08);
6390 	tg3_rd32_loop(tp, regs, MBFREE_MODE, 0x08);
6391 	tg3_rd32_loop(tp, regs, HOSTCC_MODE, 0x100);
6392 
6393 	if (tg3_flag(tp, SUPPORT_MSIX))
6394 		tg3_rd32_loop(tp, regs, HOSTCC_RXCOL_TICKS_VEC1, 0x180);
6395 
6396 	tg3_rd32_loop(tp, regs, MEMARB_MODE, 0x10);
6397 	tg3_rd32_loop(tp, regs, BUFMGR_MODE, 0x58);
6398 	tg3_rd32_loop(tp, regs, RDMAC_MODE, 0x08);
6399 	tg3_rd32_loop(tp, regs, WDMAC_MODE, 0x08);
6400 	tg3_rd32_loop(tp, regs, RX_CPU_MODE, 0x04);
6401 	tg3_rd32_loop(tp, regs, RX_CPU_STATE, 0x04);
6402 	tg3_rd32_loop(tp, regs, RX_CPU_PGMCTR, 0x04);
6403 	tg3_rd32_loop(tp, regs, RX_CPU_HWBKPT, 0x04);
6404 
6405 	if (!tg3_flag(tp, 5705_PLUS)) {
6406 		tg3_rd32_loop(tp, regs, TX_CPU_MODE, 0x04);
6407 		tg3_rd32_loop(tp, regs, TX_CPU_STATE, 0x04);
6408 		tg3_rd32_loop(tp, regs, TX_CPU_PGMCTR, 0x04);
6409 	}
6410 
6411 	tg3_rd32_loop(tp, regs, GRCMBOX_INTERRUPT_0, 0x110);
6412 	tg3_rd32_loop(tp, regs, FTQ_RESET, 0x120);
6413 	tg3_rd32_loop(tp, regs, MSGINT_MODE, 0x0c);
6414 	tg3_rd32_loop(tp, regs, DMAC_MODE, 0x04);
6415 	tg3_rd32_loop(tp, regs, GRC_MODE, 0x4c);
6416 
6417 	if (tg3_flag(tp, NVRAM))
6418 		tg3_rd32_loop(tp, regs, NVRAM_CMD, 0x24);
6419 }
6420 
6421 static void tg3_dump_state(struct tg3 *tp)
6422 {
6423 	int i;
6424 	u32 *regs;
6425 
6426 	regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC);
6427 	if (!regs)
6428 		return;
6429 
6430 	if (tg3_flag(tp, PCI_EXPRESS)) {
6431 		/* Read up to but not including private PCI registers */
6432 		for (i = 0; i < TG3_PCIE_TLDLPL_PORT; i += sizeof(u32))
6433 			regs[i / sizeof(u32)] = tr32(i);
6434 	} else
6435 		tg3_dump_legacy_regs(tp, regs);
6436 
6437 	for (i = 0; i < TG3_REG_BLK_SIZE / sizeof(u32); i += 4) {
6438 		if (!regs[i + 0] && !regs[i + 1] &&
6439 		    !regs[i + 2] && !regs[i + 3])
6440 			continue;
6441 
6442 		netdev_err(tp->dev, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
6443 			   i * 4,
6444 			   regs[i + 0], regs[i + 1], regs[i + 2], regs[i + 3]);
6445 	}
6446 
6447 	kfree(regs);
6448 
6449 	for (i = 0; i < tp->irq_cnt; i++) {
6450 		struct tg3_napi *tnapi = &tp->napi[i];
6451 
6452 		/* SW status block */
6453 		netdev_err(tp->dev,
6454 			 "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
6455 			   i,
6456 			   tnapi->hw_status->status,
6457 			   tnapi->hw_status->status_tag,
6458 			   tnapi->hw_status->rx_jumbo_consumer,
6459 			   tnapi->hw_status->rx_consumer,
6460 			   tnapi->hw_status->rx_mini_consumer,
6461 			   tnapi->hw_status->idx[0].rx_producer,
6462 			   tnapi->hw_status->idx[0].tx_consumer);
6463 
6464 		netdev_err(tp->dev,
6465 		"%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n",
6466 			   i,
6467 			   tnapi->last_tag, tnapi->last_irq_tag,
6468 			   tnapi->tx_prod, tnapi->tx_cons, tnapi->tx_pending,
6469 			   tnapi->rx_rcb_ptr,
6470 			   tnapi->prodring.rx_std_prod_idx,
6471 			   tnapi->prodring.rx_std_cons_idx,
6472 			   tnapi->prodring.rx_jmb_prod_idx,
6473 			   tnapi->prodring.rx_jmb_cons_idx);
6474 	}
6475 }
6476 
6477 /* This is called whenever we suspect that the system chipset is re-
6478  * ordering the sequence of MMIO to the tx send mailbox. The symptom
6479  * is bogus tx completions. We try to recover by setting the
6480  * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
6481  * in the workqueue.
6482  */
6483 static void tg3_tx_recover(struct tg3 *tp)
6484 {
6485 	BUG_ON(tg3_flag(tp, MBOX_WRITE_REORDER) ||
6486 	       tp->write32_tx_mbox == tg3_write_indirect_mbox);
6487 
6488 	netdev_warn(tp->dev,
6489 		    "The system may be re-ordering memory-mapped I/O "
6490 		    "cycles to the network device, attempting to recover. "
6491 		    "Please report the problem to the driver maintainer "
6492 		    "and include system chipset information.\n");
6493 
6494 	tg3_flag_set(tp, TX_RECOVERY_PENDING);
6495 }
6496 
6497 static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
6498 {
6499 	/* Tell compiler to fetch tx indices from memory. */
6500 	barrier();
6501 	return tnapi->tx_pending -
6502 	       ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1));
6503 }
6504 
6505 /* Tigon3 never reports partial packet sends.  So we do not
6506  * need special logic to handle SKBs that have not had all
6507  * of their frags sent yet, like SunGEM does.
6508  */
6509 static void tg3_tx(struct tg3_napi *tnapi)
6510 {
6511 	struct tg3 *tp = tnapi->tp;
6512 	u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer;
6513 	u32 sw_idx = tnapi->tx_cons;
6514 	struct netdev_queue *txq;
6515 	int index = tnapi - tp->napi;
6516 	unsigned int pkts_compl = 0, bytes_compl = 0;
6517 
6518 	if (tg3_flag(tp, ENABLE_TSS))
6519 		index--;
6520 
6521 	txq = netdev_get_tx_queue(tp->dev, index);
6522 
6523 	while (sw_idx != hw_idx) {
6524 		struct tg3_tx_ring_info *ri = &tnapi->tx_buffers[sw_idx];
6525 		struct sk_buff *skb = ri->skb;
6526 		int i, tx_bug = 0;
6527 
6528 		if (unlikely(skb == NULL)) {
6529 			tg3_tx_recover(tp);
6530 			return;
6531 		}
6532 
6533 		if (tnapi->tx_ring[sw_idx].len_flags & TXD_FLAG_HWTSTAMP) {
6534 			struct skb_shared_hwtstamps timestamp;
6535 			u64 hwclock = tr32(TG3_TX_TSTAMP_LSB);
6536 			hwclock |= (u64)tr32(TG3_TX_TSTAMP_MSB) << 32;
6537 
6538 			tg3_hwclock_to_timestamp(tp, hwclock, &timestamp);
6539 
6540 			skb_tstamp_tx(skb, &timestamp);
6541 		}
6542 
6543 		pci_unmap_single(tp->pdev,
6544 				 dma_unmap_addr(ri, mapping),
6545 				 skb_headlen(skb),
6546 				 PCI_DMA_TODEVICE);
6547 
6548 		ri->skb = NULL;
6549 
6550 		while (ri->fragmented) {
6551 			ri->fragmented = false;
6552 			sw_idx = NEXT_TX(sw_idx);
6553 			ri = &tnapi->tx_buffers[sw_idx];
6554 		}
6555 
6556 		sw_idx = NEXT_TX(sw_idx);
6557 
6558 		for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
6559 			ri = &tnapi->tx_buffers[sw_idx];
6560 			if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
6561 				tx_bug = 1;
6562 
6563 			pci_unmap_page(tp->pdev,
6564 				       dma_unmap_addr(ri, mapping),
6565 				       skb_frag_size(&skb_shinfo(skb)->frags[i]),
6566 				       PCI_DMA_TODEVICE);
6567 
6568 			while (ri->fragmented) {
6569 				ri->fragmented = false;
6570 				sw_idx = NEXT_TX(sw_idx);
6571 				ri = &tnapi->tx_buffers[sw_idx];
6572 			}
6573 
6574 			sw_idx = NEXT_TX(sw_idx);
6575 		}
6576 
6577 		pkts_compl++;
6578 		bytes_compl += skb->len;
6579 
6580 		dev_kfree_skb(skb);
6581 
6582 		if (unlikely(tx_bug)) {
6583 			tg3_tx_recover(tp);
6584 			return;
6585 		}
6586 	}
6587 
6588 	netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
6589 
6590 	tnapi->tx_cons = sw_idx;
6591 
6592 	/* Need to make the tx_cons update visible to tg3_start_xmit()
6593 	 * before checking for netif_queue_stopped().  Without the
6594 	 * memory barrier, there is a small possibility that tg3_start_xmit()
6595 	 * will miss it and cause the queue to be stopped forever.
6596 	 */
6597 	smp_mb();
6598 
6599 	if (unlikely(netif_tx_queue_stopped(txq) &&
6600 		     (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) {
6601 		__netif_tx_lock(txq, smp_processor_id());
6602 		if (netif_tx_queue_stopped(txq) &&
6603 		    (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))
6604 			netif_tx_wake_queue(txq);
6605 		__netif_tx_unlock(txq);
6606 	}
6607 }
6608 
6609 static void tg3_frag_free(bool is_frag, void *data)
6610 {
6611 	if (is_frag)
6612 		put_page(virt_to_head_page(data));
6613 	else
6614 		kfree(data);
6615 }
6616 
6617 static void tg3_rx_data_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
6618 {
6619 	unsigned int skb_size = SKB_DATA_ALIGN(map_sz + TG3_RX_OFFSET(tp)) +
6620 		   SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
6621 
6622 	if (!ri->data)
6623 		return;
6624 
6625 	pci_unmap_single(tp->pdev, dma_unmap_addr(ri, mapping),
6626 			 map_sz, PCI_DMA_FROMDEVICE);
6627 	tg3_frag_free(skb_size <= PAGE_SIZE, ri->data);
6628 	ri->data = NULL;
6629 }
6630 
6631 
6632 /* Returns size of skb allocated or < 0 on error.
6633  *
6634  * We only need to fill in the address because the other members
6635  * of the RX descriptor are invariant, see tg3_init_rings.
6636  *
6637  * Note the purposeful assymetry of cpu vs. chip accesses.  For
6638  * posting buffers we only dirty the first cache line of the RX
6639  * descriptor (containing the address).  Whereas for the RX status
6640  * buffers the cpu only reads the last cacheline of the RX descriptor
6641  * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
6642  */
6643 static int tg3_alloc_rx_data(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
6644 			     u32 opaque_key, u32 dest_idx_unmasked,
6645 			     unsigned int *frag_size)
6646 {
6647 	struct tg3_rx_buffer_desc *desc;
6648 	struct ring_info *map;
6649 	u8 *data;
6650 	dma_addr_t mapping;
6651 	int skb_size, data_size, dest_idx;
6652 
6653 	switch (opaque_key) {
6654 	case RXD_OPAQUE_RING_STD:
6655 		dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
6656 		desc = &tpr->rx_std[dest_idx];
6657 		map = &tpr->rx_std_buffers[dest_idx];
6658 		data_size = tp->rx_pkt_map_sz;
6659 		break;
6660 
6661 	case RXD_OPAQUE_RING_JUMBO:
6662 		dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
6663 		desc = &tpr->rx_jmb[dest_idx].std;
6664 		map = &tpr->rx_jmb_buffers[dest_idx];
6665 		data_size = TG3_RX_JMB_MAP_SZ;
6666 		break;
6667 
6668 	default:
6669 		return -EINVAL;
6670 	}
6671 
6672 	/* Do not overwrite any of the map or rp information
6673 	 * until we are sure we can commit to a new buffer.
6674 	 *
6675 	 * Callers depend upon this behavior and assume that
6676 	 * we leave everything unchanged if we fail.
6677 	 */
6678 	skb_size = SKB_DATA_ALIGN(data_size + TG3_RX_OFFSET(tp)) +
6679 		   SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
6680 	if (skb_size <= PAGE_SIZE) {
6681 		data = netdev_alloc_frag(skb_size);
6682 		*frag_size = skb_size;
6683 	} else {
6684 		data = kmalloc(skb_size, GFP_ATOMIC);
6685 		*frag_size = 0;
6686 	}
6687 	if (!data)
6688 		return -ENOMEM;
6689 
6690 	mapping = pci_map_single(tp->pdev,
6691 				 data + TG3_RX_OFFSET(tp),
6692 				 data_size,
6693 				 PCI_DMA_FROMDEVICE);
6694 	if (unlikely(pci_dma_mapping_error(tp->pdev, mapping))) {
6695 		tg3_frag_free(skb_size <= PAGE_SIZE, data);
6696 		return -EIO;
6697 	}
6698 
6699 	map->data = data;
6700 	dma_unmap_addr_set(map, mapping, mapping);
6701 
6702 	desc->addr_hi = ((u64)mapping >> 32);
6703 	desc->addr_lo = ((u64)mapping & 0xffffffff);
6704 
6705 	return data_size;
6706 }
6707 
6708 /* We only need to move over in the address because the other
6709  * members of the RX descriptor are invariant.  See notes above
6710  * tg3_alloc_rx_data for full details.
6711  */
6712 static void tg3_recycle_rx(struct tg3_napi *tnapi,
6713 			   struct tg3_rx_prodring_set *dpr,
6714 			   u32 opaque_key, int src_idx,
6715 			   u32 dest_idx_unmasked)
6716 {
6717 	struct tg3 *tp = tnapi->tp;
6718 	struct tg3_rx_buffer_desc *src_desc, *dest_desc;
6719 	struct ring_info *src_map, *dest_map;
6720 	struct tg3_rx_prodring_set *spr = &tp->napi[0].prodring;
6721 	int dest_idx;
6722 
6723 	switch (opaque_key) {
6724 	case RXD_OPAQUE_RING_STD:
6725 		dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
6726 		dest_desc = &dpr->rx_std[dest_idx];
6727 		dest_map = &dpr->rx_std_buffers[dest_idx];
6728 		src_desc = &spr->rx_std[src_idx];
6729 		src_map = &spr->rx_std_buffers[src_idx];
6730 		break;
6731 
6732 	case RXD_OPAQUE_RING_JUMBO:
6733 		dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
6734 		dest_desc = &dpr->rx_jmb[dest_idx].std;
6735 		dest_map = &dpr->rx_jmb_buffers[dest_idx];
6736 		src_desc = &spr->rx_jmb[src_idx].std;
6737 		src_map = &spr->rx_jmb_buffers[src_idx];
6738 		break;
6739 
6740 	default:
6741 		return;
6742 	}
6743 
6744 	dest_map->data = src_map->data;
6745 	dma_unmap_addr_set(dest_map, mapping,
6746 			   dma_unmap_addr(src_map, mapping));
6747 	dest_desc->addr_hi = src_desc->addr_hi;
6748 	dest_desc->addr_lo = src_desc->addr_lo;
6749 
6750 	/* Ensure that the update to the skb happens after the physical
6751 	 * addresses have been transferred to the new BD location.
6752 	 */
6753 	smp_wmb();
6754 
6755 	src_map->data = NULL;
6756 }
6757 
6758 /* The RX ring scheme is composed of multiple rings which post fresh
6759  * buffers to the chip, and one special ring the chip uses to report
6760  * status back to the host.
6761  *
6762  * The special ring reports the status of received packets to the
6763  * host.  The chip does not write into the original descriptor the
6764  * RX buffer was obtained from.  The chip simply takes the original
6765  * descriptor as provided by the host, updates the status and length
6766  * field, then writes this into the next status ring entry.
6767  *
6768  * Each ring the host uses to post buffers to the chip is described
6769  * by a TG3_BDINFO entry in the chips SRAM area.  When a packet arrives,
6770  * it is first placed into the on-chip ram.  When the packet's length
6771  * is known, it walks down the TG3_BDINFO entries to select the ring.
6772  * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
6773  * which is within the range of the new packet's length is chosen.
6774  *
6775  * The "separate ring for rx status" scheme may sound queer, but it makes
6776  * sense from a cache coherency perspective.  If only the host writes
6777  * to the buffer post rings, and only the chip writes to the rx status
6778  * rings, then cache lines never move beyond shared-modified state.
6779  * If both the host and chip were to write into the same ring, cache line
6780  * eviction could occur since both entities want it in an exclusive state.
6781  */
6782 static int tg3_rx(struct tg3_napi *tnapi, int budget)
6783 {
6784 	struct tg3 *tp = tnapi->tp;
6785 	u32 work_mask, rx_std_posted = 0;
6786 	u32 std_prod_idx, jmb_prod_idx;
6787 	u32 sw_idx = tnapi->rx_rcb_ptr;
6788 	u16 hw_idx;
6789 	int received;
6790 	struct tg3_rx_prodring_set *tpr = &tnapi->prodring;
6791 
6792 	hw_idx = *(tnapi->rx_rcb_prod_idx);
6793 	/*
6794 	 * We need to order the read of hw_idx and the read of
6795 	 * the opaque cookie.
6796 	 */
6797 	rmb();
6798 	work_mask = 0;
6799 	received = 0;
6800 	std_prod_idx = tpr->rx_std_prod_idx;
6801 	jmb_prod_idx = tpr->rx_jmb_prod_idx;
6802 	while (sw_idx != hw_idx && budget > 0) {
6803 		struct ring_info *ri;
6804 		struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
6805 		unsigned int len;
6806 		struct sk_buff *skb;
6807 		dma_addr_t dma_addr;
6808 		u32 opaque_key, desc_idx, *post_ptr;
6809 		u8 *data;
6810 		u64 tstamp = 0;
6811 
6812 		desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
6813 		opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
6814 		if (opaque_key == RXD_OPAQUE_RING_STD) {
6815 			ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx];
6816 			dma_addr = dma_unmap_addr(ri, mapping);
6817 			data = ri->data;
6818 			post_ptr = &std_prod_idx;
6819 			rx_std_posted++;
6820 		} else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
6821 			ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx];
6822 			dma_addr = dma_unmap_addr(ri, mapping);
6823 			data = ri->data;
6824 			post_ptr = &jmb_prod_idx;
6825 		} else
6826 			goto next_pkt_nopost;
6827 
6828 		work_mask |= opaque_key;
6829 
6830 		if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
6831 		    (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
6832 		drop_it:
6833 			tg3_recycle_rx(tnapi, tpr, opaque_key,
6834 				       desc_idx, *post_ptr);
6835 		drop_it_no_recycle:
6836 			/* Other statistics kept track of by card. */
6837 			tp->rx_dropped++;
6838 			goto next_pkt;
6839 		}
6840 
6841 		prefetch(data + TG3_RX_OFFSET(tp));
6842 		len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
6843 		      ETH_FCS_LEN;
6844 
6845 		if ((desc->type_flags & RXD_FLAG_PTPSTAT_MASK) ==
6846 		     RXD_FLAG_PTPSTAT_PTPV1 ||
6847 		    (desc->type_flags & RXD_FLAG_PTPSTAT_MASK) ==
6848 		     RXD_FLAG_PTPSTAT_PTPV2) {
6849 			tstamp = tr32(TG3_RX_TSTAMP_LSB);
6850 			tstamp |= (u64)tr32(TG3_RX_TSTAMP_MSB) << 32;
6851 		}
6852 
6853 		if (len > TG3_RX_COPY_THRESH(tp)) {
6854 			int skb_size;
6855 			unsigned int frag_size;
6856 
6857 			skb_size = tg3_alloc_rx_data(tp, tpr, opaque_key,
6858 						    *post_ptr, &frag_size);
6859 			if (skb_size < 0)
6860 				goto drop_it;
6861 
6862 			pci_unmap_single(tp->pdev, dma_addr, skb_size,
6863 					 PCI_DMA_FROMDEVICE);
6864 
6865 			/* Ensure that the update to the data happens
6866 			 * after the usage of the old DMA mapping.
6867 			 */
6868 			smp_wmb();
6869 
6870 			ri->data = NULL;
6871 
6872 			skb = build_skb(data, frag_size);
6873 			if (!skb) {
6874 				tg3_frag_free(frag_size != 0, data);
6875 				goto drop_it_no_recycle;
6876 			}
6877 			skb_reserve(skb, TG3_RX_OFFSET(tp));
6878 		} else {
6879 			tg3_recycle_rx(tnapi, tpr, opaque_key,
6880 				       desc_idx, *post_ptr);
6881 
6882 			skb = netdev_alloc_skb(tp->dev,
6883 					       len + TG3_RAW_IP_ALIGN);
6884 			if (skb == NULL)
6885 				goto drop_it_no_recycle;
6886 
6887 			skb_reserve(skb, TG3_RAW_IP_ALIGN);
6888 			pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
6889 			memcpy(skb->data,
6890 			       data + TG3_RX_OFFSET(tp),
6891 			       len);
6892 			pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
6893 		}
6894 
6895 		skb_put(skb, len);
6896 		if (tstamp)
6897 			tg3_hwclock_to_timestamp(tp, tstamp,
6898 						 skb_hwtstamps(skb));
6899 
6900 		if ((tp->dev->features & NETIF_F_RXCSUM) &&
6901 		    (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
6902 		    (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
6903 		      >> RXD_TCPCSUM_SHIFT) == 0xffff))
6904 			skb->ip_summed = CHECKSUM_UNNECESSARY;
6905 		else
6906 			skb_checksum_none_assert(skb);
6907 
6908 		skb->protocol = eth_type_trans(skb, tp->dev);
6909 
6910 		if (len > (tp->dev->mtu + ETH_HLEN) &&
6911 		    skb->protocol != htons(ETH_P_8021Q)) {
6912 			dev_kfree_skb(skb);
6913 			goto drop_it_no_recycle;
6914 		}
6915 
6916 		if (desc->type_flags & RXD_FLAG_VLAN &&
6917 		    !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG))
6918 			__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
6919 					       desc->err_vlan & RXD_VLAN_MASK);
6920 
6921 		napi_gro_receive(&tnapi->napi, skb);
6922 
6923 		received++;
6924 		budget--;
6925 
6926 next_pkt:
6927 		(*post_ptr)++;
6928 
6929 		if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
6930 			tpr->rx_std_prod_idx = std_prod_idx &
6931 					       tp->rx_std_ring_mask;
6932 			tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6933 				     tpr->rx_std_prod_idx);
6934 			work_mask &= ~RXD_OPAQUE_RING_STD;
6935 			rx_std_posted = 0;
6936 		}
6937 next_pkt_nopost:
6938 		sw_idx++;
6939 		sw_idx &= tp->rx_ret_ring_mask;
6940 
6941 		/* Refresh hw_idx to see if there is new work */
6942 		if (sw_idx == hw_idx) {
6943 			hw_idx = *(tnapi->rx_rcb_prod_idx);
6944 			rmb();
6945 		}
6946 	}
6947 
6948 	/* ACK the status ring. */
6949 	tnapi->rx_rcb_ptr = sw_idx;
6950 	tw32_rx_mbox(tnapi->consmbox, sw_idx);
6951 
6952 	/* Refill RX ring(s). */
6953 	if (!tg3_flag(tp, ENABLE_RSS)) {
6954 		/* Sync BD data before updating mailbox */
6955 		wmb();
6956 
6957 		if (work_mask & RXD_OPAQUE_RING_STD) {
6958 			tpr->rx_std_prod_idx = std_prod_idx &
6959 					       tp->rx_std_ring_mask;
6960 			tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6961 				     tpr->rx_std_prod_idx);
6962 		}
6963 		if (work_mask & RXD_OPAQUE_RING_JUMBO) {
6964 			tpr->rx_jmb_prod_idx = jmb_prod_idx &
6965 					       tp->rx_jmb_ring_mask;
6966 			tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
6967 				     tpr->rx_jmb_prod_idx);
6968 		}
6969 		mmiowb();
6970 	} else if (work_mask) {
6971 		/* rx_std_buffers[] and rx_jmb_buffers[] entries must be
6972 		 * updated before the producer indices can be updated.
6973 		 */
6974 		smp_wmb();
6975 
6976 		tpr->rx_std_prod_idx = std_prod_idx & tp->rx_std_ring_mask;
6977 		tpr->rx_jmb_prod_idx = jmb_prod_idx & tp->rx_jmb_ring_mask;
6978 
6979 		if (tnapi != &tp->napi[1]) {
6980 			tp->rx_refill = true;
6981 			napi_schedule(&tp->napi[1].napi);
6982 		}
6983 	}
6984 
6985 	return received;
6986 }
6987 
6988 static void tg3_poll_link(struct tg3 *tp)
6989 {
6990 	/* handle link change and other phy events */
6991 	if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
6992 		struct tg3_hw_status *sblk = tp->napi[0].hw_status;
6993 
6994 		if (sblk->status & SD_STATUS_LINK_CHG) {
6995 			sblk->status = SD_STATUS_UPDATED |
6996 				       (sblk->status & ~SD_STATUS_LINK_CHG);
6997 			spin_lock(&tp->lock);
6998 			if (tg3_flag(tp, USE_PHYLIB)) {
6999 				tw32_f(MAC_STATUS,
7000 				     (MAC_STATUS_SYNC_CHANGED |
7001 				      MAC_STATUS_CFG_CHANGED |
7002 				      MAC_STATUS_MI_COMPLETION |
7003 				      MAC_STATUS_LNKSTATE_CHANGED));
7004 				udelay(40);
7005 			} else
7006 				tg3_setup_phy(tp, false);
7007 			spin_unlock(&tp->lock);
7008 		}
7009 	}
7010 }
7011 
7012 static int tg3_rx_prodring_xfer(struct tg3 *tp,
7013 				struct tg3_rx_prodring_set *dpr,
7014 				struct tg3_rx_prodring_set *spr)
7015 {
7016 	u32 si, di, cpycnt, src_prod_idx;
7017 	int i, err = 0;
7018 
7019 	while (1) {
7020 		src_prod_idx = spr->rx_std_prod_idx;
7021 
7022 		/* Make sure updates to the rx_std_buffers[] entries and the
7023 		 * standard producer index are seen in the correct order.
7024 		 */
7025 		smp_rmb();
7026 
7027 		if (spr->rx_std_cons_idx == src_prod_idx)
7028 			break;
7029 
7030 		if (spr->rx_std_cons_idx < src_prod_idx)
7031 			cpycnt = src_prod_idx - spr->rx_std_cons_idx;
7032 		else
7033 			cpycnt = tp->rx_std_ring_mask + 1 -
7034 				 spr->rx_std_cons_idx;
7035 
7036 		cpycnt = min(cpycnt,
7037 			     tp->rx_std_ring_mask + 1 - dpr->rx_std_prod_idx);
7038 
7039 		si = spr->rx_std_cons_idx;
7040 		di = dpr->rx_std_prod_idx;
7041 
7042 		for (i = di; i < di + cpycnt; i++) {
7043 			if (dpr->rx_std_buffers[i].data) {
7044 				cpycnt = i - di;
7045 				err = -ENOSPC;
7046 				break;
7047 			}
7048 		}
7049 
7050 		if (!cpycnt)
7051 			break;
7052 
7053 		/* Ensure that updates to the rx_std_buffers ring and the
7054 		 * shadowed hardware producer ring from tg3_recycle_skb() are
7055 		 * ordered correctly WRT the skb check above.
7056 		 */
7057 		smp_rmb();
7058 
7059 		memcpy(&dpr->rx_std_buffers[di],
7060 		       &spr->rx_std_buffers[si],
7061 		       cpycnt * sizeof(struct ring_info));
7062 
7063 		for (i = 0; i < cpycnt; i++, di++, si++) {
7064 			struct tg3_rx_buffer_desc *sbd, *dbd;
7065 			sbd = &spr->rx_std[si];
7066 			dbd = &dpr->rx_std[di];
7067 			dbd->addr_hi = sbd->addr_hi;
7068 			dbd->addr_lo = sbd->addr_lo;
7069 		}
7070 
7071 		spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) &
7072 				       tp->rx_std_ring_mask;
7073 		dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) &
7074 				       tp->rx_std_ring_mask;
7075 	}
7076 
7077 	while (1) {
7078 		src_prod_idx = spr->rx_jmb_prod_idx;
7079 
7080 		/* Make sure updates to the rx_jmb_buffers[] entries and
7081 		 * the jumbo producer index are seen in the correct order.
7082 		 */
7083 		smp_rmb();
7084 
7085 		if (spr->rx_jmb_cons_idx == src_prod_idx)
7086 			break;
7087 
7088 		if (spr->rx_jmb_cons_idx < src_prod_idx)
7089 			cpycnt = src_prod_idx - spr->rx_jmb_cons_idx;
7090 		else
7091 			cpycnt = tp->rx_jmb_ring_mask + 1 -
7092 				 spr->rx_jmb_cons_idx;
7093 
7094 		cpycnt = min(cpycnt,
7095 			     tp->rx_jmb_ring_mask + 1 - dpr->rx_jmb_prod_idx);
7096 
7097 		si = spr->rx_jmb_cons_idx;
7098 		di = dpr->rx_jmb_prod_idx;
7099 
7100 		for (i = di; i < di + cpycnt; i++) {
7101 			if (dpr->rx_jmb_buffers[i].data) {
7102 				cpycnt = i - di;
7103 				err = -ENOSPC;
7104 				break;
7105 			}
7106 		}
7107 
7108 		if (!cpycnt)
7109 			break;
7110 
7111 		/* Ensure that updates to the rx_jmb_buffers ring and the
7112 		 * shadowed hardware producer ring from tg3_recycle_skb() are
7113 		 * ordered correctly WRT the skb check above.
7114 		 */
7115 		smp_rmb();
7116 
7117 		memcpy(&dpr->rx_jmb_buffers[di],
7118 		       &spr->rx_jmb_buffers[si],
7119 		       cpycnt * sizeof(struct ring_info));
7120 
7121 		for (i = 0; i < cpycnt; i++, di++, si++) {
7122 			struct tg3_rx_buffer_desc *sbd, *dbd;
7123 			sbd = &spr->rx_jmb[si].std;
7124 			dbd = &dpr->rx_jmb[di].std;
7125 			dbd->addr_hi = sbd->addr_hi;
7126 			dbd->addr_lo = sbd->addr_lo;
7127 		}
7128 
7129 		spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) &
7130 				       tp->rx_jmb_ring_mask;
7131 		dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) &
7132 				       tp->rx_jmb_ring_mask;
7133 	}
7134 
7135 	return err;
7136 }
7137 
7138 static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
7139 {
7140 	struct tg3 *tp = tnapi->tp;
7141 
7142 	/* run TX completion thread */
7143 	if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
7144 		tg3_tx(tnapi);
7145 		if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7146 			return work_done;
7147 	}
7148 
7149 	if (!tnapi->rx_rcb_prod_idx)
7150 		return work_done;
7151 
7152 	/* run RX thread, within the bounds set by NAPI.
7153 	 * All RX "locking" is done by ensuring outside
7154 	 * code synchronizes with tg3->napi.poll()
7155 	 */
7156 	if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
7157 		work_done += tg3_rx(tnapi, budget - work_done);
7158 
7159 	if (tg3_flag(tp, ENABLE_RSS) && tnapi == &tp->napi[1]) {
7160 		struct tg3_rx_prodring_set *dpr = &tp->napi[0].prodring;
7161 		int i, err = 0;
7162 		u32 std_prod_idx = dpr->rx_std_prod_idx;
7163 		u32 jmb_prod_idx = dpr->rx_jmb_prod_idx;
7164 
7165 		tp->rx_refill = false;
7166 		for (i = 1; i <= tp->rxq_cnt; i++)
7167 			err |= tg3_rx_prodring_xfer(tp, dpr,
7168 						    &tp->napi[i].prodring);
7169 
7170 		wmb();
7171 
7172 		if (std_prod_idx != dpr->rx_std_prod_idx)
7173 			tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
7174 				     dpr->rx_std_prod_idx);
7175 
7176 		if (jmb_prod_idx != dpr->rx_jmb_prod_idx)
7177 			tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
7178 				     dpr->rx_jmb_prod_idx);
7179 
7180 		mmiowb();
7181 
7182 		if (err)
7183 			tw32_f(HOSTCC_MODE, tp->coal_now);
7184 	}
7185 
7186 	return work_done;
7187 }
7188 
7189 static inline void tg3_reset_task_schedule(struct tg3 *tp)
7190 {
7191 	if (!test_and_set_bit(TG3_FLAG_RESET_TASK_PENDING, tp->tg3_flags))
7192 		schedule_work(&tp->reset_task);
7193 }
7194 
7195 static inline void tg3_reset_task_cancel(struct tg3 *tp)
7196 {
7197 	cancel_work_sync(&tp->reset_task);
7198 	tg3_flag_clear(tp, RESET_TASK_PENDING);
7199 	tg3_flag_clear(tp, TX_RECOVERY_PENDING);
7200 }
7201 
7202 static int tg3_poll_msix(struct napi_struct *napi, int budget)
7203 {
7204 	struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
7205 	struct tg3 *tp = tnapi->tp;
7206 	int work_done = 0;
7207 	struct tg3_hw_status *sblk = tnapi->hw_status;
7208 
7209 	while (1) {
7210 		work_done = tg3_poll_work(tnapi, work_done, budget);
7211 
7212 		if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7213 			goto tx_recovery;
7214 
7215 		if (unlikely(work_done >= budget))
7216 			break;
7217 
7218 		/* tp->last_tag is used in tg3_int_reenable() below
7219 		 * to tell the hw how much work has been processed,
7220 		 * so we must read it before checking for more work.
7221 		 */
7222 		tnapi->last_tag = sblk->status_tag;
7223 		tnapi->last_irq_tag = tnapi->last_tag;
7224 		rmb();
7225 
7226 		/* check for RX/TX work to do */
7227 		if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons &&
7228 			   *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) {
7229 
7230 			/* This test here is not race free, but will reduce
7231 			 * the number of interrupts by looping again.
7232 			 */
7233 			if (tnapi == &tp->napi[1] && tp->rx_refill)
7234 				continue;
7235 
7236 			napi_complete(napi);
7237 			/* Reenable interrupts. */
7238 			tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
7239 
7240 			/* This test here is synchronized by napi_schedule()
7241 			 * and napi_complete() to close the race condition.
7242 			 */
7243 			if (unlikely(tnapi == &tp->napi[1] && tp->rx_refill)) {
7244 				tw32(HOSTCC_MODE, tp->coalesce_mode |
7245 						  HOSTCC_MODE_ENABLE |
7246 						  tnapi->coal_now);
7247 			}
7248 			mmiowb();
7249 			break;
7250 		}
7251 	}
7252 
7253 	return work_done;
7254 
7255 tx_recovery:
7256 	/* work_done is guaranteed to be less than budget. */
7257 	napi_complete(napi);
7258 	tg3_reset_task_schedule(tp);
7259 	return work_done;
7260 }
7261 
7262 static void tg3_process_error(struct tg3 *tp)
7263 {
7264 	u32 val;
7265 	bool real_error = false;
7266 
7267 	if (tg3_flag(tp, ERROR_PROCESSED))
7268 		return;
7269 
7270 	/* Check Flow Attention register */
7271 	val = tr32(HOSTCC_FLOW_ATTN);
7272 	if (val & ~HOSTCC_FLOW_ATTN_MBUF_LWM) {
7273 		netdev_err(tp->dev, "FLOW Attention error.  Resetting chip.\n");
7274 		real_error = true;
7275 	}
7276 
7277 	if (tr32(MSGINT_STATUS) & ~MSGINT_STATUS_MSI_REQ) {
7278 		netdev_err(tp->dev, "MSI Status error.  Resetting chip.\n");
7279 		real_error = true;
7280 	}
7281 
7282 	if (tr32(RDMAC_STATUS) || tr32(WDMAC_STATUS)) {
7283 		netdev_err(tp->dev, "DMA Status error.  Resetting chip.\n");
7284 		real_error = true;
7285 	}
7286 
7287 	if (!real_error)
7288 		return;
7289 
7290 	tg3_dump_state(tp);
7291 
7292 	tg3_flag_set(tp, ERROR_PROCESSED);
7293 	tg3_reset_task_schedule(tp);
7294 }
7295 
7296 static int tg3_poll(struct napi_struct *napi, int budget)
7297 {
7298 	struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
7299 	struct tg3 *tp = tnapi->tp;
7300 	int work_done = 0;
7301 	struct tg3_hw_status *sblk = tnapi->hw_status;
7302 
7303 	while (1) {
7304 		if (sblk->status & SD_STATUS_ERROR)
7305 			tg3_process_error(tp);
7306 
7307 		tg3_poll_link(tp);
7308 
7309 		work_done = tg3_poll_work(tnapi, work_done, budget);
7310 
7311 		if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7312 			goto tx_recovery;
7313 
7314 		if (unlikely(work_done >= budget))
7315 			break;
7316 
7317 		if (tg3_flag(tp, TAGGED_STATUS)) {
7318 			/* tp->last_tag is used in tg3_int_reenable() below
7319 			 * to tell the hw how much work has been processed,
7320 			 * so we must read it before checking for more work.
7321 			 */
7322 			tnapi->last_tag = sblk->status_tag;
7323 			tnapi->last_irq_tag = tnapi->last_tag;
7324 			rmb();
7325 		} else
7326 			sblk->status &= ~SD_STATUS_UPDATED;
7327 
7328 		if (likely(!tg3_has_work(tnapi))) {
7329 			napi_complete(napi);
7330 			tg3_int_reenable(tnapi);
7331 			break;
7332 		}
7333 	}
7334 
7335 	return work_done;
7336 
7337 tx_recovery:
7338 	/* work_done is guaranteed to be less than budget. */
7339 	napi_complete(napi);
7340 	tg3_reset_task_schedule(tp);
7341 	return work_done;
7342 }
7343 
7344 static void tg3_napi_disable(struct tg3 *tp)
7345 {
7346 	int i;
7347 
7348 	for (i = tp->irq_cnt - 1; i >= 0; i--)
7349 		napi_disable(&tp->napi[i].napi);
7350 }
7351 
7352 static void tg3_napi_enable(struct tg3 *tp)
7353 {
7354 	int i;
7355 
7356 	for (i = 0; i < tp->irq_cnt; i++)
7357 		napi_enable(&tp->napi[i].napi);
7358 }
7359 
7360 static void tg3_napi_init(struct tg3 *tp)
7361 {
7362 	int i;
7363 
7364 	netif_napi_add(tp->dev, &tp->napi[0].napi, tg3_poll, 64);
7365 	for (i = 1; i < tp->irq_cnt; i++)
7366 		netif_napi_add(tp->dev, &tp->napi[i].napi, tg3_poll_msix, 64);
7367 }
7368 
7369 static void tg3_napi_fini(struct tg3 *tp)
7370 {
7371 	int i;
7372 
7373 	for (i = 0; i < tp->irq_cnt; i++)
7374 		netif_napi_del(&tp->napi[i].napi);
7375 }
7376 
7377 static inline void tg3_netif_stop(struct tg3 *tp)
7378 {
7379 	tp->dev->trans_start = jiffies;	/* prevent tx timeout */
7380 	tg3_napi_disable(tp);
7381 	netif_carrier_off(tp->dev);
7382 	netif_tx_disable(tp->dev);
7383 }
7384 
7385 /* tp->lock must be held */
7386 static inline void tg3_netif_start(struct tg3 *tp)
7387 {
7388 	tg3_ptp_resume(tp);
7389 
7390 	/* NOTE: unconditional netif_tx_wake_all_queues is only
7391 	 * appropriate so long as all callers are assured to
7392 	 * have free tx slots (such as after tg3_init_hw)
7393 	 */
7394 	netif_tx_wake_all_queues(tp->dev);
7395 
7396 	if (tp->link_up)
7397 		netif_carrier_on(tp->dev);
7398 
7399 	tg3_napi_enable(tp);
7400 	tp->napi[0].hw_status->status |= SD_STATUS_UPDATED;
7401 	tg3_enable_ints(tp);
7402 }
7403 
7404 static void tg3_irq_quiesce(struct tg3 *tp)
7405 {
7406 	int i;
7407 
7408 	BUG_ON(tp->irq_sync);
7409 
7410 	tp->irq_sync = 1;
7411 	smp_mb();
7412 
7413 	for (i = 0; i < tp->irq_cnt; i++)
7414 		synchronize_irq(tp->napi[i].irq_vec);
7415 }
7416 
7417 /* Fully shutdown all tg3 driver activity elsewhere in the system.
7418  * If irq_sync is non-zero, then the IRQ handler must be synchronized
7419  * with as well.  Most of the time, this is not necessary except when
7420  * shutting down the device.
7421  */
7422 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
7423 {
7424 	spin_lock_bh(&tp->lock);
7425 	if (irq_sync)
7426 		tg3_irq_quiesce(tp);
7427 }
7428 
7429 static inline void tg3_full_unlock(struct tg3 *tp)
7430 {
7431 	spin_unlock_bh(&tp->lock);
7432 }
7433 
7434 /* One-shot MSI handler - Chip automatically disables interrupt
7435  * after sending MSI so driver doesn't have to do it.
7436  */
7437 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
7438 {
7439 	struct tg3_napi *tnapi = dev_id;
7440 	struct tg3 *tp = tnapi->tp;
7441 
7442 	prefetch(tnapi->hw_status);
7443 	if (tnapi->rx_rcb)
7444 		prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7445 
7446 	if (likely(!tg3_irq_sync(tp)))
7447 		napi_schedule(&tnapi->napi);
7448 
7449 	return IRQ_HANDLED;
7450 }
7451 
7452 /* MSI ISR - No need to check for interrupt sharing and no need to
7453  * flush status block and interrupt mailbox. PCI ordering rules
7454  * guarantee that MSI will arrive after the status block.
7455  */
7456 static irqreturn_t tg3_msi(int irq, void *dev_id)
7457 {
7458 	struct tg3_napi *tnapi = dev_id;
7459 	struct tg3 *tp = tnapi->tp;
7460 
7461 	prefetch(tnapi->hw_status);
7462 	if (tnapi->rx_rcb)
7463 		prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7464 	/*
7465 	 * Writing any value to intr-mbox-0 clears PCI INTA# and
7466 	 * chip-internal interrupt pending events.
7467 	 * Writing non-zero to intr-mbox-0 additional tells the
7468 	 * NIC to stop sending us irqs, engaging "in-intr-handler"
7469 	 * event coalescing.
7470 	 */
7471 	tw32_mailbox(tnapi->int_mbox, 0x00000001);
7472 	if (likely(!tg3_irq_sync(tp)))
7473 		napi_schedule(&tnapi->napi);
7474 
7475 	return IRQ_RETVAL(1);
7476 }
7477 
7478 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
7479 {
7480 	struct tg3_napi *tnapi = dev_id;
7481 	struct tg3 *tp = tnapi->tp;
7482 	struct tg3_hw_status *sblk = tnapi->hw_status;
7483 	unsigned int handled = 1;
7484 
7485 	/* In INTx mode, it is possible for the interrupt to arrive at
7486 	 * the CPU before the status block posted prior to the interrupt.
7487 	 * Reading the PCI State register will confirm whether the
7488 	 * interrupt is ours and will flush the status block.
7489 	 */
7490 	if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
7491 		if (tg3_flag(tp, CHIP_RESETTING) ||
7492 		    (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7493 			handled = 0;
7494 			goto out;
7495 		}
7496 	}
7497 
7498 	/*
7499 	 * Writing any value to intr-mbox-0 clears PCI INTA# and
7500 	 * chip-internal interrupt pending events.
7501 	 * Writing non-zero to intr-mbox-0 additional tells the
7502 	 * NIC to stop sending us irqs, engaging "in-intr-handler"
7503 	 * event coalescing.
7504 	 *
7505 	 * Flush the mailbox to de-assert the IRQ immediately to prevent
7506 	 * spurious interrupts.  The flush impacts performance but
7507 	 * excessive spurious interrupts can be worse in some cases.
7508 	 */
7509 	tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
7510 	if (tg3_irq_sync(tp))
7511 		goto out;
7512 	sblk->status &= ~SD_STATUS_UPDATED;
7513 	if (likely(tg3_has_work(tnapi))) {
7514 		prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7515 		napi_schedule(&tnapi->napi);
7516 	} else {
7517 		/* No work, shared interrupt perhaps?  re-enable
7518 		 * interrupts, and flush that PCI write
7519 		 */
7520 		tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
7521 			       0x00000000);
7522 	}
7523 out:
7524 	return IRQ_RETVAL(handled);
7525 }
7526 
7527 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
7528 {
7529 	struct tg3_napi *tnapi = dev_id;
7530 	struct tg3 *tp = tnapi->tp;
7531 	struct tg3_hw_status *sblk = tnapi->hw_status;
7532 	unsigned int handled = 1;
7533 
7534 	/* In INTx mode, it is possible for the interrupt to arrive at
7535 	 * the CPU before the status block posted prior to the interrupt.
7536 	 * Reading the PCI State register will confirm whether the
7537 	 * interrupt is ours and will flush the status block.
7538 	 */
7539 	if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) {
7540 		if (tg3_flag(tp, CHIP_RESETTING) ||
7541 		    (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7542 			handled = 0;
7543 			goto out;
7544 		}
7545 	}
7546 
7547 	/*
7548 	 * writing any value to intr-mbox-0 clears PCI INTA# and
7549 	 * chip-internal interrupt pending events.
7550 	 * writing non-zero to intr-mbox-0 additional tells the
7551 	 * NIC to stop sending us irqs, engaging "in-intr-handler"
7552 	 * event coalescing.
7553 	 *
7554 	 * Flush the mailbox to de-assert the IRQ immediately to prevent
7555 	 * spurious interrupts.  The flush impacts performance but
7556 	 * excessive spurious interrupts can be worse in some cases.
7557 	 */
7558 	tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
7559 
7560 	/*
7561 	 * In a shared interrupt configuration, sometimes other devices'
7562 	 * interrupts will scream.  We record the current status tag here
7563 	 * so that the above check can report that the screaming interrupts
7564 	 * are unhandled.  Eventually they will be silenced.
7565 	 */
7566 	tnapi->last_irq_tag = sblk->status_tag;
7567 
7568 	if (tg3_irq_sync(tp))
7569 		goto out;
7570 
7571 	prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7572 
7573 	napi_schedule(&tnapi->napi);
7574 
7575 out:
7576 	return IRQ_RETVAL(handled);
7577 }
7578 
7579 /* ISR for interrupt test */
7580 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
7581 {
7582 	struct tg3_napi *tnapi = dev_id;
7583 	struct tg3 *tp = tnapi->tp;
7584 	struct tg3_hw_status *sblk = tnapi->hw_status;
7585 
7586 	if ((sblk->status & SD_STATUS_UPDATED) ||
7587 	    !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7588 		tg3_disable_ints(tp);
7589 		return IRQ_RETVAL(1);
7590 	}
7591 	return IRQ_RETVAL(0);
7592 }
7593 
7594 #ifdef CONFIG_NET_POLL_CONTROLLER
7595 static void tg3_poll_controller(struct net_device *dev)
7596 {
7597 	int i;
7598 	struct tg3 *tp = netdev_priv(dev);
7599 
7600 	if (tg3_irq_sync(tp))
7601 		return;
7602 
7603 	for (i = 0; i < tp->irq_cnt; i++)
7604 		tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]);
7605 }
7606 #endif
7607 
7608 static void tg3_tx_timeout(struct net_device *dev)
7609 {
7610 	struct tg3 *tp = netdev_priv(dev);
7611 
7612 	if (netif_msg_tx_err(tp)) {
7613 		netdev_err(dev, "transmit timed out, resetting\n");
7614 		tg3_dump_state(tp);
7615 	}
7616 
7617 	tg3_reset_task_schedule(tp);
7618 }
7619 
7620 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
7621 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
7622 {
7623 	u32 base = (u32) mapping & 0xffffffff;
7624 
7625 	return base + len + 8 < base;
7626 }
7627 
7628 /* Test for TSO DMA buffers that cross into regions which are within MSS bytes
7629  * of any 4GB boundaries: 4G, 8G, etc
7630  */
7631 static inline int tg3_4g_tso_overflow_test(struct tg3 *tp, dma_addr_t mapping,
7632 					   u32 len, u32 mss)
7633 {
7634 	if (tg3_asic_rev(tp) == ASIC_REV_5762 && mss) {
7635 		u32 base = (u32) mapping & 0xffffffff;
7636 
7637 		return ((base + len + (mss & 0x3fff)) < base);
7638 	}
7639 	return 0;
7640 }
7641 
7642 /* Test for DMA addresses > 40-bit */
7643 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
7644 					  int len)
7645 {
7646 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
7647 	if (tg3_flag(tp, 40BIT_DMA_BUG))
7648 		return ((u64) mapping + len) > DMA_BIT_MASK(40);
7649 	return 0;
7650 #else
7651 	return 0;
7652 #endif
7653 }
7654 
7655 static inline void tg3_tx_set_bd(struct tg3_tx_buffer_desc *txbd,
7656 				 dma_addr_t mapping, u32 len, u32 flags,
7657 				 u32 mss, u32 vlan)
7658 {
7659 	txbd->addr_hi = ((u64) mapping >> 32);
7660 	txbd->addr_lo = ((u64) mapping & 0xffffffff);
7661 	txbd->len_flags = (len << TXD_LEN_SHIFT) | (flags & 0x0000ffff);
7662 	txbd->vlan_tag = (mss << TXD_MSS_SHIFT) | (vlan << TXD_VLAN_TAG_SHIFT);
7663 }
7664 
7665 static bool tg3_tx_frag_set(struct tg3_napi *tnapi, u32 *entry, u32 *budget,
7666 			    dma_addr_t map, u32 len, u32 flags,
7667 			    u32 mss, u32 vlan)
7668 {
7669 	struct tg3 *tp = tnapi->tp;
7670 	bool hwbug = false;
7671 
7672 	if (tg3_flag(tp, SHORT_DMA_BUG) && len <= 8)
7673 		hwbug = true;
7674 
7675 	if (tg3_4g_overflow_test(map, len))
7676 		hwbug = true;
7677 
7678 	if (tg3_4g_tso_overflow_test(tp, map, len, mss))
7679 		hwbug = true;
7680 
7681 	if (tg3_40bit_overflow_test(tp, map, len))
7682 		hwbug = true;
7683 
7684 	if (tp->dma_limit) {
7685 		u32 prvidx = *entry;
7686 		u32 tmp_flag = flags & ~TXD_FLAG_END;
7687 		while (len > tp->dma_limit && *budget) {
7688 			u32 frag_len = tp->dma_limit;
7689 			len -= tp->dma_limit;
7690 
7691 			/* Avoid the 8byte DMA problem */
7692 			if (len <= 8) {
7693 				len += tp->dma_limit / 2;
7694 				frag_len = tp->dma_limit / 2;
7695 			}
7696 
7697 			tnapi->tx_buffers[*entry].fragmented = true;
7698 
7699 			tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7700 				      frag_len, tmp_flag, mss, vlan);
7701 			*budget -= 1;
7702 			prvidx = *entry;
7703 			*entry = NEXT_TX(*entry);
7704 
7705 			map += frag_len;
7706 		}
7707 
7708 		if (len) {
7709 			if (*budget) {
7710 				tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7711 					      len, flags, mss, vlan);
7712 				*budget -= 1;
7713 				*entry = NEXT_TX(*entry);
7714 			} else {
7715 				hwbug = true;
7716 				tnapi->tx_buffers[prvidx].fragmented = false;
7717 			}
7718 		}
7719 	} else {
7720 		tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7721 			      len, flags, mss, vlan);
7722 		*entry = NEXT_TX(*entry);
7723 	}
7724 
7725 	return hwbug;
7726 }
7727 
7728 static void tg3_tx_skb_unmap(struct tg3_napi *tnapi, u32 entry, int last)
7729 {
7730 	int i;
7731 	struct sk_buff *skb;
7732 	struct tg3_tx_ring_info *txb = &tnapi->tx_buffers[entry];
7733 
7734 	skb = txb->skb;
7735 	txb->skb = NULL;
7736 
7737 	pci_unmap_single(tnapi->tp->pdev,
7738 			 dma_unmap_addr(txb, mapping),
7739 			 skb_headlen(skb),
7740 			 PCI_DMA_TODEVICE);
7741 
7742 	while (txb->fragmented) {
7743 		txb->fragmented = false;
7744 		entry = NEXT_TX(entry);
7745 		txb = &tnapi->tx_buffers[entry];
7746 	}
7747 
7748 	for (i = 0; i <= last; i++) {
7749 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
7750 
7751 		entry = NEXT_TX(entry);
7752 		txb = &tnapi->tx_buffers[entry];
7753 
7754 		pci_unmap_page(tnapi->tp->pdev,
7755 			       dma_unmap_addr(txb, mapping),
7756 			       skb_frag_size(frag), PCI_DMA_TODEVICE);
7757 
7758 		while (txb->fragmented) {
7759 			txb->fragmented = false;
7760 			entry = NEXT_TX(entry);
7761 			txb = &tnapi->tx_buffers[entry];
7762 		}
7763 	}
7764 }
7765 
7766 /* Workaround 4GB and 40-bit hardware DMA bugs. */
7767 static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
7768 				       struct sk_buff **pskb,
7769 				       u32 *entry, u32 *budget,
7770 				       u32 base_flags, u32 mss, u32 vlan)
7771 {
7772 	struct tg3 *tp = tnapi->tp;
7773 	struct sk_buff *new_skb, *skb = *pskb;
7774 	dma_addr_t new_addr = 0;
7775 	int ret = 0;
7776 
7777 	if (tg3_asic_rev(tp) != ASIC_REV_5701)
7778 		new_skb = skb_copy(skb, GFP_ATOMIC);
7779 	else {
7780 		int more_headroom = 4 - ((unsigned long)skb->data & 3);
7781 
7782 		new_skb = skb_copy_expand(skb,
7783 					  skb_headroom(skb) + more_headroom,
7784 					  skb_tailroom(skb), GFP_ATOMIC);
7785 	}
7786 
7787 	if (!new_skb) {
7788 		ret = -1;
7789 	} else {
7790 		/* New SKB is guaranteed to be linear. */
7791 		new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
7792 					  PCI_DMA_TODEVICE);
7793 		/* Make sure the mapping succeeded */
7794 		if (pci_dma_mapping_error(tp->pdev, new_addr)) {
7795 			dev_kfree_skb(new_skb);
7796 			ret = -1;
7797 		} else {
7798 			u32 save_entry = *entry;
7799 
7800 			base_flags |= TXD_FLAG_END;
7801 
7802 			tnapi->tx_buffers[*entry].skb = new_skb;
7803 			dma_unmap_addr_set(&tnapi->tx_buffers[*entry],
7804 					   mapping, new_addr);
7805 
7806 			if (tg3_tx_frag_set(tnapi, entry, budget, new_addr,
7807 					    new_skb->len, base_flags,
7808 					    mss, vlan)) {
7809 				tg3_tx_skb_unmap(tnapi, save_entry, -1);
7810 				dev_kfree_skb(new_skb);
7811 				ret = -1;
7812 			}
7813 		}
7814 	}
7815 
7816 	dev_kfree_skb(skb);
7817 	*pskb = new_skb;
7818 	return ret;
7819 }
7820 
7821 static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *);
7822 
7823 /* Use GSO to workaround a rare TSO bug that may be triggered when the
7824  * TSO header is greater than 80 bytes.
7825  */
7826 static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
7827 {
7828 	struct sk_buff *segs, *nskb;
7829 	u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3;
7830 
7831 	/* Estimate the number of fragments in the worst case */
7832 	if (unlikely(tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)) {
7833 		netif_stop_queue(tp->dev);
7834 
7835 		/* netif_tx_stop_queue() must be done before checking
7836 		 * checking tx index in tg3_tx_avail() below, because in
7837 		 * tg3_tx(), we update tx index before checking for
7838 		 * netif_tx_queue_stopped().
7839 		 */
7840 		smp_mb();
7841 		if (tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)
7842 			return NETDEV_TX_BUSY;
7843 
7844 		netif_wake_queue(tp->dev);
7845 	}
7846 
7847 	segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
7848 	if (IS_ERR(segs))
7849 		goto tg3_tso_bug_end;
7850 
7851 	do {
7852 		nskb = segs;
7853 		segs = segs->next;
7854 		nskb->next = NULL;
7855 		tg3_start_xmit(nskb, tp->dev);
7856 	} while (segs);
7857 
7858 tg3_tso_bug_end:
7859 	dev_kfree_skb(skb);
7860 
7861 	return NETDEV_TX_OK;
7862 }
7863 
7864 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
7865  * support TG3_FLAG_HW_TSO_1 or firmware TSO only.
7866  */
7867 static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
7868 {
7869 	struct tg3 *tp = netdev_priv(dev);
7870 	u32 len, entry, base_flags, mss, vlan = 0;
7871 	u32 budget;
7872 	int i = -1, would_hit_hwbug;
7873 	dma_addr_t mapping;
7874 	struct tg3_napi *tnapi;
7875 	struct netdev_queue *txq;
7876 	unsigned int last;
7877 
7878 	txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
7879 	tnapi = &tp->napi[skb_get_queue_mapping(skb)];
7880 	if (tg3_flag(tp, ENABLE_TSS))
7881 		tnapi++;
7882 
7883 	budget = tg3_tx_avail(tnapi);
7884 
7885 	/* We are running in BH disabled context with netif_tx_lock
7886 	 * and TX reclaim runs via tp->napi.poll inside of a software
7887 	 * interrupt.  Furthermore, IRQ processing runs lockless so we have
7888 	 * no IRQ context deadlocks to worry about either.  Rejoice!
7889 	 */
7890 	if (unlikely(budget <= (skb_shinfo(skb)->nr_frags + 1))) {
7891 		if (!netif_tx_queue_stopped(txq)) {
7892 			netif_tx_stop_queue(txq);
7893 
7894 			/* This is a hard error, log it. */
7895 			netdev_err(dev,
7896 				   "BUG! Tx Ring full when queue awake!\n");
7897 		}
7898 		return NETDEV_TX_BUSY;
7899 	}
7900 
7901 	entry = tnapi->tx_prod;
7902 	base_flags = 0;
7903 	if (skb->ip_summed == CHECKSUM_PARTIAL)
7904 		base_flags |= TXD_FLAG_TCPUDP_CSUM;
7905 
7906 	mss = skb_shinfo(skb)->gso_size;
7907 	if (mss) {
7908 		struct iphdr *iph;
7909 		u32 tcp_opt_len, hdr_len;
7910 
7911 		if (skb_header_cloned(skb) &&
7912 		    pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
7913 			goto drop;
7914 
7915 		iph = ip_hdr(skb);
7916 		tcp_opt_len = tcp_optlen(skb);
7917 
7918 		hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb) - ETH_HLEN;
7919 
7920 		if (!skb_is_gso_v6(skb)) {
7921 			iph->check = 0;
7922 			iph->tot_len = htons(mss + hdr_len);
7923 		}
7924 
7925 		if (unlikely((ETH_HLEN + hdr_len) > 80) &&
7926 		    tg3_flag(tp, TSO_BUG))
7927 			return tg3_tso_bug(tp, skb);
7928 
7929 		base_flags |= (TXD_FLAG_CPU_PRE_DMA |
7930 			       TXD_FLAG_CPU_POST_DMA);
7931 
7932 		if (tg3_flag(tp, HW_TSO_1) ||
7933 		    tg3_flag(tp, HW_TSO_2) ||
7934 		    tg3_flag(tp, HW_TSO_3)) {
7935 			tcp_hdr(skb)->check = 0;
7936 			base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
7937 		} else
7938 			tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
7939 								 iph->daddr, 0,
7940 								 IPPROTO_TCP,
7941 								 0);
7942 
7943 		if (tg3_flag(tp, HW_TSO_3)) {
7944 			mss |= (hdr_len & 0xc) << 12;
7945 			if (hdr_len & 0x10)
7946 				base_flags |= 0x00000010;
7947 			base_flags |= (hdr_len & 0x3e0) << 5;
7948 		} else if (tg3_flag(tp, HW_TSO_2))
7949 			mss |= hdr_len << 9;
7950 		else if (tg3_flag(tp, HW_TSO_1) ||
7951 			 tg3_asic_rev(tp) == ASIC_REV_5705) {
7952 			if (tcp_opt_len || iph->ihl > 5) {
7953 				int tsflags;
7954 
7955 				tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
7956 				mss |= (tsflags << 11);
7957 			}
7958 		} else {
7959 			if (tcp_opt_len || iph->ihl > 5) {
7960 				int tsflags;
7961 
7962 				tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
7963 				base_flags |= tsflags << 12;
7964 			}
7965 		}
7966 	}
7967 
7968 	if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
7969 	    !mss && skb->len > VLAN_ETH_FRAME_LEN)
7970 		base_flags |= TXD_FLAG_JMB_PKT;
7971 
7972 	if (vlan_tx_tag_present(skb)) {
7973 		base_flags |= TXD_FLAG_VLAN;
7974 		vlan = vlan_tx_tag_get(skb);
7975 	}
7976 
7977 	if ((unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) &&
7978 	    tg3_flag(tp, TX_TSTAMP_EN)) {
7979 		skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
7980 		base_flags |= TXD_FLAG_HWTSTAMP;
7981 	}
7982 
7983 	len = skb_headlen(skb);
7984 
7985 	mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
7986 	if (pci_dma_mapping_error(tp->pdev, mapping))
7987 		goto drop;
7988 
7989 
7990 	tnapi->tx_buffers[entry].skb = skb;
7991 	dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
7992 
7993 	would_hit_hwbug = 0;
7994 
7995 	if (tg3_flag(tp, 5701_DMA_BUG))
7996 		would_hit_hwbug = 1;
7997 
7998 	if (tg3_tx_frag_set(tnapi, &entry, &budget, mapping, len, base_flags |
7999 			  ((skb_shinfo(skb)->nr_frags == 0) ? TXD_FLAG_END : 0),
8000 			    mss, vlan)) {
8001 		would_hit_hwbug = 1;
8002 	} else if (skb_shinfo(skb)->nr_frags > 0) {
8003 		u32 tmp_mss = mss;
8004 
8005 		if (!tg3_flag(tp, HW_TSO_1) &&
8006 		    !tg3_flag(tp, HW_TSO_2) &&
8007 		    !tg3_flag(tp, HW_TSO_3))
8008 			tmp_mss = 0;
8009 
8010 		/* Now loop through additional data
8011 		 * fragments, and queue them.
8012 		 */
8013 		last = skb_shinfo(skb)->nr_frags - 1;
8014 		for (i = 0; i <= last; i++) {
8015 			skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
8016 
8017 			len = skb_frag_size(frag);
8018 			mapping = skb_frag_dma_map(&tp->pdev->dev, frag, 0,
8019 						   len, DMA_TO_DEVICE);
8020 
8021 			tnapi->tx_buffers[entry].skb = NULL;
8022 			dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
8023 					   mapping);
8024 			if (dma_mapping_error(&tp->pdev->dev, mapping))
8025 				goto dma_error;
8026 
8027 			if (!budget ||
8028 			    tg3_tx_frag_set(tnapi, &entry, &budget, mapping,
8029 					    len, base_flags |
8030 					    ((i == last) ? TXD_FLAG_END : 0),
8031 					    tmp_mss, vlan)) {
8032 				would_hit_hwbug = 1;
8033 				break;
8034 			}
8035 		}
8036 	}
8037 
8038 	if (would_hit_hwbug) {
8039 		tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i);
8040 
8041 		/* If the workaround fails due to memory/mapping
8042 		 * failure, silently drop this packet.
8043 		 */
8044 		entry = tnapi->tx_prod;
8045 		budget = tg3_tx_avail(tnapi);
8046 		if (tigon3_dma_hwbug_workaround(tnapi, &skb, &entry, &budget,
8047 						base_flags, mss, vlan))
8048 			goto drop_nofree;
8049 	}
8050 
8051 	skb_tx_timestamp(skb);
8052 	netdev_tx_sent_queue(txq, skb->len);
8053 
8054 	/* Sync BD data before updating mailbox */
8055 	wmb();
8056 
8057 	/* Packets are ready, update Tx producer idx local and on card. */
8058 	tw32_tx_mbox(tnapi->prodmbox, entry);
8059 
8060 	tnapi->tx_prod = entry;
8061 	if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
8062 		netif_tx_stop_queue(txq);
8063 
8064 		/* netif_tx_stop_queue() must be done before checking
8065 		 * checking tx index in tg3_tx_avail() below, because in
8066 		 * tg3_tx(), we update tx index before checking for
8067 		 * netif_tx_queue_stopped().
8068 		 */
8069 		smp_mb();
8070 		if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
8071 			netif_tx_wake_queue(txq);
8072 	}
8073 
8074 	mmiowb();
8075 	return NETDEV_TX_OK;
8076 
8077 dma_error:
8078 	tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, --i);
8079 	tnapi->tx_buffers[tnapi->tx_prod].skb = NULL;
8080 drop:
8081 	dev_kfree_skb(skb);
8082 drop_nofree:
8083 	tp->tx_dropped++;
8084 	return NETDEV_TX_OK;
8085 }
8086 
8087 static void tg3_mac_loopback(struct tg3 *tp, bool enable)
8088 {
8089 	if (enable) {
8090 		tp->mac_mode &= ~(MAC_MODE_HALF_DUPLEX |
8091 				  MAC_MODE_PORT_MODE_MASK);
8092 
8093 		tp->mac_mode |= MAC_MODE_PORT_INT_LPBACK;
8094 
8095 		if (!tg3_flag(tp, 5705_PLUS))
8096 			tp->mac_mode |= MAC_MODE_LINK_POLARITY;
8097 
8098 		if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
8099 			tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
8100 		else
8101 			tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
8102 	} else {
8103 		tp->mac_mode &= ~MAC_MODE_PORT_INT_LPBACK;
8104 
8105 		if (tg3_flag(tp, 5705_PLUS) ||
8106 		    (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) ||
8107 		    tg3_asic_rev(tp) == ASIC_REV_5700)
8108 			tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
8109 	}
8110 
8111 	tw32(MAC_MODE, tp->mac_mode);
8112 	udelay(40);
8113 }
8114 
8115 static int tg3_phy_lpbk_set(struct tg3 *tp, u32 speed, bool extlpbk)
8116 {
8117 	u32 val, bmcr, mac_mode, ptest = 0;
8118 
8119 	tg3_phy_toggle_apd(tp, false);
8120 	tg3_phy_toggle_automdix(tp, false);
8121 
8122 	if (extlpbk && tg3_phy_set_extloopbk(tp))
8123 		return -EIO;
8124 
8125 	bmcr = BMCR_FULLDPLX;
8126 	switch (speed) {
8127 	case SPEED_10:
8128 		break;
8129 	case SPEED_100:
8130 		bmcr |= BMCR_SPEED100;
8131 		break;
8132 	case SPEED_1000:
8133 	default:
8134 		if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
8135 			speed = SPEED_100;
8136 			bmcr |= BMCR_SPEED100;
8137 		} else {
8138 			speed = SPEED_1000;
8139 			bmcr |= BMCR_SPEED1000;
8140 		}
8141 	}
8142 
8143 	if (extlpbk) {
8144 		if (!(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
8145 			tg3_readphy(tp, MII_CTRL1000, &val);
8146 			val |= CTL1000_AS_MASTER |
8147 			       CTL1000_ENABLE_MASTER;
8148 			tg3_writephy(tp, MII_CTRL1000, val);
8149 		} else {
8150 			ptest = MII_TG3_FET_PTEST_TRIM_SEL |
8151 				MII_TG3_FET_PTEST_TRIM_2;
8152 			tg3_writephy(tp, MII_TG3_FET_PTEST, ptest);
8153 		}
8154 	} else
8155 		bmcr |= BMCR_LOOPBACK;
8156 
8157 	tg3_writephy(tp, MII_BMCR, bmcr);
8158 
8159 	/* The write needs to be flushed for the FETs */
8160 	if (tp->phy_flags & TG3_PHYFLG_IS_FET)
8161 		tg3_readphy(tp, MII_BMCR, &bmcr);
8162 
8163 	udelay(40);
8164 
8165 	if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
8166 	    tg3_asic_rev(tp) == ASIC_REV_5785) {
8167 		tg3_writephy(tp, MII_TG3_FET_PTEST, ptest |
8168 			     MII_TG3_FET_PTEST_FRC_TX_LINK |
8169 			     MII_TG3_FET_PTEST_FRC_TX_LOCK);
8170 
8171 		/* The write needs to be flushed for the AC131 */
8172 		tg3_readphy(tp, MII_TG3_FET_PTEST, &val);
8173 	}
8174 
8175 	/* Reset to prevent losing 1st rx packet intermittently */
8176 	if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
8177 	    tg3_flag(tp, 5780_CLASS)) {
8178 		tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8179 		udelay(10);
8180 		tw32_f(MAC_RX_MODE, tp->rx_mode);
8181 	}
8182 
8183 	mac_mode = tp->mac_mode &
8184 		   ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
8185 	if (speed == SPEED_1000)
8186 		mac_mode |= MAC_MODE_PORT_MODE_GMII;
8187 	else
8188 		mac_mode |= MAC_MODE_PORT_MODE_MII;
8189 
8190 	if (tg3_asic_rev(tp) == ASIC_REV_5700) {
8191 		u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK;
8192 
8193 		if (masked_phy_id == TG3_PHY_ID_BCM5401)
8194 			mac_mode &= ~MAC_MODE_LINK_POLARITY;
8195 		else if (masked_phy_id == TG3_PHY_ID_BCM5411)
8196 			mac_mode |= MAC_MODE_LINK_POLARITY;
8197 
8198 		tg3_writephy(tp, MII_TG3_EXT_CTRL,
8199 			     MII_TG3_EXT_CTRL_LNK3_LED_MODE);
8200 	}
8201 
8202 	tw32(MAC_MODE, mac_mode);
8203 	udelay(40);
8204 
8205 	return 0;
8206 }
8207 
8208 static void tg3_set_loopback(struct net_device *dev, netdev_features_t features)
8209 {
8210 	struct tg3 *tp = netdev_priv(dev);
8211 
8212 	if (features & NETIF_F_LOOPBACK) {
8213 		if (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)
8214 			return;
8215 
8216 		spin_lock_bh(&tp->lock);
8217 		tg3_mac_loopback(tp, true);
8218 		netif_carrier_on(tp->dev);
8219 		spin_unlock_bh(&tp->lock);
8220 		netdev_info(dev, "Internal MAC loopback mode enabled.\n");
8221 	} else {
8222 		if (!(tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
8223 			return;
8224 
8225 		spin_lock_bh(&tp->lock);
8226 		tg3_mac_loopback(tp, false);
8227 		/* Force link status check */
8228 		tg3_setup_phy(tp, true);
8229 		spin_unlock_bh(&tp->lock);
8230 		netdev_info(dev, "Internal MAC loopback mode disabled.\n");
8231 	}
8232 }
8233 
8234 static netdev_features_t tg3_fix_features(struct net_device *dev,
8235 	netdev_features_t features)
8236 {
8237 	struct tg3 *tp = netdev_priv(dev);
8238 
8239 	if (dev->mtu > ETH_DATA_LEN && tg3_flag(tp, 5780_CLASS))
8240 		features &= ~NETIF_F_ALL_TSO;
8241 
8242 	return features;
8243 }
8244 
8245 static int tg3_set_features(struct net_device *dev, netdev_features_t features)
8246 {
8247 	netdev_features_t changed = dev->features ^ features;
8248 
8249 	if ((changed & NETIF_F_LOOPBACK) && netif_running(dev))
8250 		tg3_set_loopback(dev, features);
8251 
8252 	return 0;
8253 }
8254 
8255 static void tg3_rx_prodring_free(struct tg3 *tp,
8256 				 struct tg3_rx_prodring_set *tpr)
8257 {
8258 	int i;
8259 
8260 	if (tpr != &tp->napi[0].prodring) {
8261 		for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx;
8262 		     i = (i + 1) & tp->rx_std_ring_mask)
8263 			tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
8264 					tp->rx_pkt_map_sz);
8265 
8266 		if (tg3_flag(tp, JUMBO_CAPABLE)) {
8267 			for (i = tpr->rx_jmb_cons_idx;
8268 			     i != tpr->rx_jmb_prod_idx;
8269 			     i = (i + 1) & tp->rx_jmb_ring_mask) {
8270 				tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
8271 						TG3_RX_JMB_MAP_SZ);
8272 			}
8273 		}
8274 
8275 		return;
8276 	}
8277 
8278 	for (i = 0; i <= tp->rx_std_ring_mask; i++)
8279 		tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
8280 				tp->rx_pkt_map_sz);
8281 
8282 	if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
8283 		for (i = 0; i <= tp->rx_jmb_ring_mask; i++)
8284 			tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
8285 					TG3_RX_JMB_MAP_SZ);
8286 	}
8287 }
8288 
8289 /* Initialize rx rings for packet processing.
8290  *
8291  * The chip has been shut down and the driver detached from
8292  * the networking, so no interrupts or new tx packets will
8293  * end up in the driver.  tp->{tx,}lock are held and thus
8294  * we may not sleep.
8295  */
8296 static int tg3_rx_prodring_alloc(struct tg3 *tp,
8297 				 struct tg3_rx_prodring_set *tpr)
8298 {
8299 	u32 i, rx_pkt_dma_sz;
8300 
8301 	tpr->rx_std_cons_idx = 0;
8302 	tpr->rx_std_prod_idx = 0;
8303 	tpr->rx_jmb_cons_idx = 0;
8304 	tpr->rx_jmb_prod_idx = 0;
8305 
8306 	if (tpr != &tp->napi[0].prodring) {
8307 		memset(&tpr->rx_std_buffers[0], 0,
8308 		       TG3_RX_STD_BUFF_RING_SIZE(tp));
8309 		if (tpr->rx_jmb_buffers)
8310 			memset(&tpr->rx_jmb_buffers[0], 0,
8311 			       TG3_RX_JMB_BUFF_RING_SIZE(tp));
8312 		goto done;
8313 	}
8314 
8315 	/* Zero out all descriptors. */
8316 	memset(tpr->rx_std, 0, TG3_RX_STD_RING_BYTES(tp));
8317 
8318 	rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ;
8319 	if (tg3_flag(tp, 5780_CLASS) &&
8320 	    tp->dev->mtu > ETH_DATA_LEN)
8321 		rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ;
8322 	tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz);
8323 
8324 	/* Initialize invariants of the rings, we only set this
8325 	 * stuff once.  This works because the card does not
8326 	 * write into the rx buffer posting rings.
8327 	 */
8328 	for (i = 0; i <= tp->rx_std_ring_mask; i++) {
8329 		struct tg3_rx_buffer_desc *rxd;
8330 
8331 		rxd = &tpr->rx_std[i];
8332 		rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT;
8333 		rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
8334 		rxd->opaque = (RXD_OPAQUE_RING_STD |
8335 			       (i << RXD_OPAQUE_INDEX_SHIFT));
8336 	}
8337 
8338 	/* Now allocate fresh SKBs for each rx ring. */
8339 	for (i = 0; i < tp->rx_pending; i++) {
8340 		unsigned int frag_size;
8341 
8342 		if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_STD, i,
8343 				      &frag_size) < 0) {
8344 			netdev_warn(tp->dev,
8345 				    "Using a smaller RX standard ring. Only "
8346 				    "%d out of %d buffers were allocated "
8347 				    "successfully\n", i, tp->rx_pending);
8348 			if (i == 0)
8349 				goto initfail;
8350 			tp->rx_pending = i;
8351 			break;
8352 		}
8353 	}
8354 
8355 	if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
8356 		goto done;
8357 
8358 	memset(tpr->rx_jmb, 0, TG3_RX_JMB_RING_BYTES(tp));
8359 
8360 	if (!tg3_flag(tp, JUMBO_RING_ENABLE))
8361 		goto done;
8362 
8363 	for (i = 0; i <= tp->rx_jmb_ring_mask; i++) {
8364 		struct tg3_rx_buffer_desc *rxd;
8365 
8366 		rxd = &tpr->rx_jmb[i].std;
8367 		rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT;
8368 		rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
8369 				  RXD_FLAG_JUMBO;
8370 		rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
8371 		       (i << RXD_OPAQUE_INDEX_SHIFT));
8372 	}
8373 
8374 	for (i = 0; i < tp->rx_jumbo_pending; i++) {
8375 		unsigned int frag_size;
8376 
8377 		if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_JUMBO, i,
8378 				      &frag_size) < 0) {
8379 			netdev_warn(tp->dev,
8380 				    "Using a smaller RX jumbo ring. Only %d "
8381 				    "out of %d buffers were allocated "
8382 				    "successfully\n", i, tp->rx_jumbo_pending);
8383 			if (i == 0)
8384 				goto initfail;
8385 			tp->rx_jumbo_pending = i;
8386 			break;
8387 		}
8388 	}
8389 
8390 done:
8391 	return 0;
8392 
8393 initfail:
8394 	tg3_rx_prodring_free(tp, tpr);
8395 	return -ENOMEM;
8396 }
8397 
8398 static void tg3_rx_prodring_fini(struct tg3 *tp,
8399 				 struct tg3_rx_prodring_set *tpr)
8400 {
8401 	kfree(tpr->rx_std_buffers);
8402 	tpr->rx_std_buffers = NULL;
8403 	kfree(tpr->rx_jmb_buffers);
8404 	tpr->rx_jmb_buffers = NULL;
8405 	if (tpr->rx_std) {
8406 		dma_free_coherent(&tp->pdev->dev, TG3_RX_STD_RING_BYTES(tp),
8407 				  tpr->rx_std, tpr->rx_std_mapping);
8408 		tpr->rx_std = NULL;
8409 	}
8410 	if (tpr->rx_jmb) {
8411 		dma_free_coherent(&tp->pdev->dev, TG3_RX_JMB_RING_BYTES(tp),
8412 				  tpr->rx_jmb, tpr->rx_jmb_mapping);
8413 		tpr->rx_jmb = NULL;
8414 	}
8415 }
8416 
8417 static int tg3_rx_prodring_init(struct tg3 *tp,
8418 				struct tg3_rx_prodring_set *tpr)
8419 {
8420 	tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp),
8421 				      GFP_KERNEL);
8422 	if (!tpr->rx_std_buffers)
8423 		return -ENOMEM;
8424 
8425 	tpr->rx_std = dma_alloc_coherent(&tp->pdev->dev,
8426 					 TG3_RX_STD_RING_BYTES(tp),
8427 					 &tpr->rx_std_mapping,
8428 					 GFP_KERNEL);
8429 	if (!tpr->rx_std)
8430 		goto err_out;
8431 
8432 	if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
8433 		tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp),
8434 					      GFP_KERNEL);
8435 		if (!tpr->rx_jmb_buffers)
8436 			goto err_out;
8437 
8438 		tpr->rx_jmb = dma_alloc_coherent(&tp->pdev->dev,
8439 						 TG3_RX_JMB_RING_BYTES(tp),
8440 						 &tpr->rx_jmb_mapping,
8441 						 GFP_KERNEL);
8442 		if (!tpr->rx_jmb)
8443 			goto err_out;
8444 	}
8445 
8446 	return 0;
8447 
8448 err_out:
8449 	tg3_rx_prodring_fini(tp, tpr);
8450 	return -ENOMEM;
8451 }
8452 
8453 /* Free up pending packets in all rx/tx rings.
8454  *
8455  * The chip has been shut down and the driver detached from
8456  * the networking, so no interrupts or new tx packets will
8457  * end up in the driver.  tp->{tx,}lock is not held and we are not
8458  * in an interrupt context and thus may sleep.
8459  */
8460 static void tg3_free_rings(struct tg3 *tp)
8461 {
8462 	int i, j;
8463 
8464 	for (j = 0; j < tp->irq_cnt; j++) {
8465 		struct tg3_napi *tnapi = &tp->napi[j];
8466 
8467 		tg3_rx_prodring_free(tp, &tnapi->prodring);
8468 
8469 		if (!tnapi->tx_buffers)
8470 			continue;
8471 
8472 		for (i = 0; i < TG3_TX_RING_SIZE; i++) {
8473 			struct sk_buff *skb = tnapi->tx_buffers[i].skb;
8474 
8475 			if (!skb)
8476 				continue;
8477 
8478 			tg3_tx_skb_unmap(tnapi, i,
8479 					 skb_shinfo(skb)->nr_frags - 1);
8480 
8481 			dev_kfree_skb_any(skb);
8482 		}
8483 		netdev_tx_reset_queue(netdev_get_tx_queue(tp->dev, j));
8484 	}
8485 }
8486 
8487 /* Initialize tx/rx rings for packet processing.
8488  *
8489  * The chip has been shut down and the driver detached from
8490  * the networking, so no interrupts or new tx packets will
8491  * end up in the driver.  tp->{tx,}lock are held and thus
8492  * we may not sleep.
8493  */
8494 static int tg3_init_rings(struct tg3 *tp)
8495 {
8496 	int i;
8497 
8498 	/* Free up all the SKBs. */
8499 	tg3_free_rings(tp);
8500 
8501 	for (i = 0; i < tp->irq_cnt; i++) {
8502 		struct tg3_napi *tnapi = &tp->napi[i];
8503 
8504 		tnapi->last_tag = 0;
8505 		tnapi->last_irq_tag = 0;
8506 		tnapi->hw_status->status = 0;
8507 		tnapi->hw_status->status_tag = 0;
8508 		memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8509 
8510 		tnapi->tx_prod = 0;
8511 		tnapi->tx_cons = 0;
8512 		if (tnapi->tx_ring)
8513 			memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES);
8514 
8515 		tnapi->rx_rcb_ptr = 0;
8516 		if (tnapi->rx_rcb)
8517 			memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
8518 
8519 		if (tg3_rx_prodring_alloc(tp, &tnapi->prodring)) {
8520 			tg3_free_rings(tp);
8521 			return -ENOMEM;
8522 		}
8523 	}
8524 
8525 	return 0;
8526 }
8527 
8528 static void tg3_mem_tx_release(struct tg3 *tp)
8529 {
8530 	int i;
8531 
8532 	for (i = 0; i < tp->irq_max; i++) {
8533 		struct tg3_napi *tnapi = &tp->napi[i];
8534 
8535 		if (tnapi->tx_ring) {
8536 			dma_free_coherent(&tp->pdev->dev, TG3_TX_RING_BYTES,
8537 				tnapi->tx_ring, tnapi->tx_desc_mapping);
8538 			tnapi->tx_ring = NULL;
8539 		}
8540 
8541 		kfree(tnapi->tx_buffers);
8542 		tnapi->tx_buffers = NULL;
8543 	}
8544 }
8545 
8546 static int tg3_mem_tx_acquire(struct tg3 *tp)
8547 {
8548 	int i;
8549 	struct tg3_napi *tnapi = &tp->napi[0];
8550 
8551 	/* If multivector TSS is enabled, vector 0 does not handle
8552 	 * tx interrupts.  Don't allocate any resources for it.
8553 	 */
8554 	if (tg3_flag(tp, ENABLE_TSS))
8555 		tnapi++;
8556 
8557 	for (i = 0; i < tp->txq_cnt; i++, tnapi++) {
8558 		tnapi->tx_buffers = kzalloc(sizeof(struct tg3_tx_ring_info) *
8559 					    TG3_TX_RING_SIZE, GFP_KERNEL);
8560 		if (!tnapi->tx_buffers)
8561 			goto err_out;
8562 
8563 		tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev,
8564 						    TG3_TX_RING_BYTES,
8565 						    &tnapi->tx_desc_mapping,
8566 						    GFP_KERNEL);
8567 		if (!tnapi->tx_ring)
8568 			goto err_out;
8569 	}
8570 
8571 	return 0;
8572 
8573 err_out:
8574 	tg3_mem_tx_release(tp);
8575 	return -ENOMEM;
8576 }
8577 
8578 static void tg3_mem_rx_release(struct tg3 *tp)
8579 {
8580 	int i;
8581 
8582 	for (i = 0; i < tp->irq_max; i++) {
8583 		struct tg3_napi *tnapi = &tp->napi[i];
8584 
8585 		tg3_rx_prodring_fini(tp, &tnapi->prodring);
8586 
8587 		if (!tnapi->rx_rcb)
8588 			continue;
8589 
8590 		dma_free_coherent(&tp->pdev->dev,
8591 				  TG3_RX_RCB_RING_BYTES(tp),
8592 				  tnapi->rx_rcb,
8593 				  tnapi->rx_rcb_mapping);
8594 		tnapi->rx_rcb = NULL;
8595 	}
8596 }
8597 
8598 static int tg3_mem_rx_acquire(struct tg3 *tp)
8599 {
8600 	unsigned int i, limit;
8601 
8602 	limit = tp->rxq_cnt;
8603 
8604 	/* If RSS is enabled, we need a (dummy) producer ring
8605 	 * set on vector zero.  This is the true hw prodring.
8606 	 */
8607 	if (tg3_flag(tp, ENABLE_RSS))
8608 		limit++;
8609 
8610 	for (i = 0; i < limit; i++) {
8611 		struct tg3_napi *tnapi = &tp->napi[i];
8612 
8613 		if (tg3_rx_prodring_init(tp, &tnapi->prodring))
8614 			goto err_out;
8615 
8616 		/* If multivector RSS is enabled, vector 0
8617 		 * does not handle rx or tx interrupts.
8618 		 * Don't allocate any resources for it.
8619 		 */
8620 		if (!i && tg3_flag(tp, ENABLE_RSS))
8621 			continue;
8622 
8623 		tnapi->rx_rcb = dma_zalloc_coherent(&tp->pdev->dev,
8624 						    TG3_RX_RCB_RING_BYTES(tp),
8625 						    &tnapi->rx_rcb_mapping,
8626 						    GFP_KERNEL);
8627 		if (!tnapi->rx_rcb)
8628 			goto err_out;
8629 	}
8630 
8631 	return 0;
8632 
8633 err_out:
8634 	tg3_mem_rx_release(tp);
8635 	return -ENOMEM;
8636 }
8637 
8638 /*
8639  * Must not be invoked with interrupt sources disabled and
8640  * the hardware shutdown down.
8641  */
8642 static void tg3_free_consistent(struct tg3 *tp)
8643 {
8644 	int i;
8645 
8646 	for (i = 0; i < tp->irq_cnt; i++) {
8647 		struct tg3_napi *tnapi = &tp->napi[i];
8648 
8649 		if (tnapi->hw_status) {
8650 			dma_free_coherent(&tp->pdev->dev, TG3_HW_STATUS_SIZE,
8651 					  tnapi->hw_status,
8652 					  tnapi->status_mapping);
8653 			tnapi->hw_status = NULL;
8654 		}
8655 	}
8656 
8657 	tg3_mem_rx_release(tp);
8658 	tg3_mem_tx_release(tp);
8659 
8660 	if (tp->hw_stats) {
8661 		dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats),
8662 				  tp->hw_stats, tp->stats_mapping);
8663 		tp->hw_stats = NULL;
8664 	}
8665 }
8666 
8667 /*
8668  * Must not be invoked with interrupt sources disabled and
8669  * the hardware shutdown down.  Can sleep.
8670  */
8671 static int tg3_alloc_consistent(struct tg3 *tp)
8672 {
8673 	int i;
8674 
8675 	tp->hw_stats = dma_zalloc_coherent(&tp->pdev->dev,
8676 					   sizeof(struct tg3_hw_stats),
8677 					   &tp->stats_mapping, GFP_KERNEL);
8678 	if (!tp->hw_stats)
8679 		goto err_out;
8680 
8681 	for (i = 0; i < tp->irq_cnt; i++) {
8682 		struct tg3_napi *tnapi = &tp->napi[i];
8683 		struct tg3_hw_status *sblk;
8684 
8685 		tnapi->hw_status = dma_zalloc_coherent(&tp->pdev->dev,
8686 						       TG3_HW_STATUS_SIZE,
8687 						       &tnapi->status_mapping,
8688 						       GFP_KERNEL);
8689 		if (!tnapi->hw_status)
8690 			goto err_out;
8691 
8692 		sblk = tnapi->hw_status;
8693 
8694 		if (tg3_flag(tp, ENABLE_RSS)) {
8695 			u16 *prodptr = NULL;
8696 
8697 			/*
8698 			 * When RSS is enabled, the status block format changes
8699 			 * slightly.  The "rx_jumbo_consumer", "reserved",
8700 			 * and "rx_mini_consumer" members get mapped to the
8701 			 * other three rx return ring producer indexes.
8702 			 */
8703 			switch (i) {
8704 			case 1:
8705 				prodptr = &sblk->idx[0].rx_producer;
8706 				break;
8707 			case 2:
8708 				prodptr = &sblk->rx_jumbo_consumer;
8709 				break;
8710 			case 3:
8711 				prodptr = &sblk->reserved;
8712 				break;
8713 			case 4:
8714 				prodptr = &sblk->rx_mini_consumer;
8715 				break;
8716 			}
8717 			tnapi->rx_rcb_prod_idx = prodptr;
8718 		} else {
8719 			tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer;
8720 		}
8721 	}
8722 
8723 	if (tg3_mem_tx_acquire(tp) || tg3_mem_rx_acquire(tp))
8724 		goto err_out;
8725 
8726 	return 0;
8727 
8728 err_out:
8729 	tg3_free_consistent(tp);
8730 	return -ENOMEM;
8731 }
8732 
8733 #define MAX_WAIT_CNT 1000
8734 
8735 /* To stop a block, clear the enable bit and poll till it
8736  * clears.  tp->lock is held.
8737  */
8738 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, bool silent)
8739 {
8740 	unsigned int i;
8741 	u32 val;
8742 
8743 	if (tg3_flag(tp, 5705_PLUS)) {
8744 		switch (ofs) {
8745 		case RCVLSC_MODE:
8746 		case DMAC_MODE:
8747 		case MBFREE_MODE:
8748 		case BUFMGR_MODE:
8749 		case MEMARB_MODE:
8750 			/* We can't enable/disable these bits of the
8751 			 * 5705/5750, just say success.
8752 			 */
8753 			return 0;
8754 
8755 		default:
8756 			break;
8757 		}
8758 	}
8759 
8760 	val = tr32(ofs);
8761 	val &= ~enable_bit;
8762 	tw32_f(ofs, val);
8763 
8764 	for (i = 0; i < MAX_WAIT_CNT; i++) {
8765 		if (pci_channel_offline(tp->pdev)) {
8766 			dev_err(&tp->pdev->dev,
8767 				"tg3_stop_block device offline, "
8768 				"ofs=%lx enable_bit=%x\n",
8769 				ofs, enable_bit);
8770 			return -ENODEV;
8771 		}
8772 
8773 		udelay(100);
8774 		val = tr32(ofs);
8775 		if ((val & enable_bit) == 0)
8776 			break;
8777 	}
8778 
8779 	if (i == MAX_WAIT_CNT && !silent) {
8780 		dev_err(&tp->pdev->dev,
8781 			"tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
8782 			ofs, enable_bit);
8783 		return -ENODEV;
8784 	}
8785 
8786 	return 0;
8787 }
8788 
8789 /* tp->lock is held. */
8790 static int tg3_abort_hw(struct tg3 *tp, bool silent)
8791 {
8792 	int i, err;
8793 
8794 	tg3_disable_ints(tp);
8795 
8796 	if (pci_channel_offline(tp->pdev)) {
8797 		tp->rx_mode &= ~(RX_MODE_ENABLE | TX_MODE_ENABLE);
8798 		tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
8799 		err = -ENODEV;
8800 		goto err_no_dev;
8801 	}
8802 
8803 	tp->rx_mode &= ~RX_MODE_ENABLE;
8804 	tw32_f(MAC_RX_MODE, tp->rx_mode);
8805 	udelay(10);
8806 
8807 	err  = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
8808 	err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
8809 	err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
8810 	err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
8811 	err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
8812 	err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
8813 
8814 	err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
8815 	err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
8816 	err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
8817 	err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
8818 	err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
8819 	err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
8820 	err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
8821 
8822 	tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
8823 	tw32_f(MAC_MODE, tp->mac_mode);
8824 	udelay(40);
8825 
8826 	tp->tx_mode &= ~TX_MODE_ENABLE;
8827 	tw32_f(MAC_TX_MODE, tp->tx_mode);
8828 
8829 	for (i = 0; i < MAX_WAIT_CNT; i++) {
8830 		udelay(100);
8831 		if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
8832 			break;
8833 	}
8834 	if (i >= MAX_WAIT_CNT) {
8835 		dev_err(&tp->pdev->dev,
8836 			"%s timed out, TX_MODE_ENABLE will not clear "
8837 			"MAC_TX_MODE=%08x\n", __func__, tr32(MAC_TX_MODE));
8838 		err |= -ENODEV;
8839 	}
8840 
8841 	err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
8842 	err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
8843 	err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
8844 
8845 	tw32(FTQ_RESET, 0xffffffff);
8846 	tw32(FTQ_RESET, 0x00000000);
8847 
8848 	err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
8849 	err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
8850 
8851 err_no_dev:
8852 	for (i = 0; i < tp->irq_cnt; i++) {
8853 		struct tg3_napi *tnapi = &tp->napi[i];
8854 		if (tnapi->hw_status)
8855 			memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8856 	}
8857 
8858 	return err;
8859 }
8860 
8861 /* Save PCI command register before chip reset */
8862 static void tg3_save_pci_state(struct tg3 *tp)
8863 {
8864 	pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
8865 }
8866 
8867 /* Restore PCI state after chip reset */
8868 static void tg3_restore_pci_state(struct tg3 *tp)
8869 {
8870 	u32 val;
8871 
8872 	/* Re-enable indirect register accesses. */
8873 	pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
8874 			       tp->misc_host_ctrl);
8875 
8876 	/* Set MAX PCI retry to zero. */
8877 	val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
8878 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0 &&
8879 	    tg3_flag(tp, PCIX_MODE))
8880 		val |= PCISTATE_RETRY_SAME_DMA;
8881 	/* Allow reads and writes to the APE register and memory space. */
8882 	if (tg3_flag(tp, ENABLE_APE))
8883 		val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
8884 		       PCISTATE_ALLOW_APE_SHMEM_WR |
8885 		       PCISTATE_ALLOW_APE_PSPACE_WR;
8886 	pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
8887 
8888 	pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
8889 
8890 	if (!tg3_flag(tp, PCI_EXPRESS)) {
8891 		pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
8892 				      tp->pci_cacheline_sz);
8893 		pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
8894 				      tp->pci_lat_timer);
8895 	}
8896 
8897 	/* Make sure PCI-X relaxed ordering bit is clear. */
8898 	if (tg3_flag(tp, PCIX_MODE)) {
8899 		u16 pcix_cmd;
8900 
8901 		pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8902 				     &pcix_cmd);
8903 		pcix_cmd &= ~PCI_X_CMD_ERO;
8904 		pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8905 				      pcix_cmd);
8906 	}
8907 
8908 	if (tg3_flag(tp, 5780_CLASS)) {
8909 
8910 		/* Chip reset on 5780 will reset MSI enable bit,
8911 		 * so need to restore it.
8912 		 */
8913 		if (tg3_flag(tp, USING_MSI)) {
8914 			u16 ctrl;
8915 
8916 			pci_read_config_word(tp->pdev,
8917 					     tp->msi_cap + PCI_MSI_FLAGS,
8918 					     &ctrl);
8919 			pci_write_config_word(tp->pdev,
8920 					      tp->msi_cap + PCI_MSI_FLAGS,
8921 					      ctrl | PCI_MSI_FLAGS_ENABLE);
8922 			val = tr32(MSGINT_MODE);
8923 			tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
8924 		}
8925 	}
8926 }
8927 
8928 /* tp->lock is held. */
8929 static int tg3_chip_reset(struct tg3 *tp)
8930 {
8931 	u32 val;
8932 	void (*write_op)(struct tg3 *, u32, u32);
8933 	int i, err;
8934 
8935 	if (!pci_device_is_present(tp->pdev))
8936 		return -ENODEV;
8937 
8938 	tg3_nvram_lock(tp);
8939 
8940 	tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
8941 
8942 	/* No matching tg3_nvram_unlock() after this because
8943 	 * chip reset below will undo the nvram lock.
8944 	 */
8945 	tp->nvram_lock_cnt = 0;
8946 
8947 	/* GRC_MISC_CFG core clock reset will clear the memory
8948 	 * enable bit in PCI register 4 and the MSI enable bit
8949 	 * on some chips, so we save relevant registers here.
8950 	 */
8951 	tg3_save_pci_state(tp);
8952 
8953 	if (tg3_asic_rev(tp) == ASIC_REV_5752 ||
8954 	    tg3_flag(tp, 5755_PLUS))
8955 		tw32(GRC_FASTBOOT_PC, 0);
8956 
8957 	/*
8958 	 * We must avoid the readl() that normally takes place.
8959 	 * It locks machines, causes machine checks, and other
8960 	 * fun things.  So, temporarily disable the 5701
8961 	 * hardware workaround, while we do the reset.
8962 	 */
8963 	write_op = tp->write32;
8964 	if (write_op == tg3_write_flush_reg32)
8965 		tp->write32 = tg3_write32;
8966 
8967 	/* Prevent the irq handler from reading or writing PCI registers
8968 	 * during chip reset when the memory enable bit in the PCI command
8969 	 * register may be cleared.  The chip does not generate interrupt
8970 	 * at this time, but the irq handler may still be called due to irq
8971 	 * sharing or irqpoll.
8972 	 */
8973 	tg3_flag_set(tp, CHIP_RESETTING);
8974 	for (i = 0; i < tp->irq_cnt; i++) {
8975 		struct tg3_napi *tnapi = &tp->napi[i];
8976 		if (tnapi->hw_status) {
8977 			tnapi->hw_status->status = 0;
8978 			tnapi->hw_status->status_tag = 0;
8979 		}
8980 		tnapi->last_tag = 0;
8981 		tnapi->last_irq_tag = 0;
8982 	}
8983 	smp_mb();
8984 
8985 	for (i = 0; i < tp->irq_cnt; i++)
8986 		synchronize_irq(tp->napi[i].irq_vec);
8987 
8988 	if (tg3_asic_rev(tp) == ASIC_REV_57780) {
8989 		val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
8990 		tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
8991 	}
8992 
8993 	/* do the reset */
8994 	val = GRC_MISC_CFG_CORECLK_RESET;
8995 
8996 	if (tg3_flag(tp, PCI_EXPRESS)) {
8997 		/* Force PCIe 1.0a mode */
8998 		if (tg3_asic_rev(tp) != ASIC_REV_5785 &&
8999 		    !tg3_flag(tp, 57765_PLUS) &&
9000 		    tr32(TG3_PCIE_PHY_TSTCTL) ==
9001 		    (TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM))
9002 			tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM);
9003 
9004 		if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0) {
9005 			tw32(GRC_MISC_CFG, (1 << 29));
9006 			val |= (1 << 29);
9007 		}
9008 	}
9009 
9010 	if (tg3_asic_rev(tp) == ASIC_REV_5906) {
9011 		tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
9012 		tw32(GRC_VCPU_EXT_CTRL,
9013 		     tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
9014 	}
9015 
9016 	/* Manage gphy power for all CPMU absent PCIe devices. */
9017 	if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, CPMU_PRESENT))
9018 		val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
9019 
9020 	tw32(GRC_MISC_CFG, val);
9021 
9022 	/* restore 5701 hardware bug workaround write method */
9023 	tp->write32 = write_op;
9024 
9025 	/* Unfortunately, we have to delay before the PCI read back.
9026 	 * Some 575X chips even will not respond to a PCI cfg access
9027 	 * when the reset command is given to the chip.
9028 	 *
9029 	 * How do these hardware designers expect things to work
9030 	 * properly if the PCI write is posted for a long period
9031 	 * of time?  It is always necessary to have some method by
9032 	 * which a register read back can occur to push the write
9033 	 * out which does the reset.
9034 	 *
9035 	 * For most tg3 variants the trick below was working.
9036 	 * Ho hum...
9037 	 */
9038 	udelay(120);
9039 
9040 	/* Flush PCI posted writes.  The normal MMIO registers
9041 	 * are inaccessible at this time so this is the only
9042 	 * way to make this reliably (actually, this is no longer
9043 	 * the case, see above).  I tried to use indirect
9044 	 * register read/write but this upset some 5701 variants.
9045 	 */
9046 	pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
9047 
9048 	udelay(120);
9049 
9050 	if (tg3_flag(tp, PCI_EXPRESS) && pci_is_pcie(tp->pdev)) {
9051 		u16 val16;
9052 
9053 		if (tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A0) {
9054 			int j;
9055 			u32 cfg_val;
9056 
9057 			/* Wait for link training to complete.  */
9058 			for (j = 0; j < 5000; j++)
9059 				udelay(100);
9060 
9061 			pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
9062 			pci_write_config_dword(tp->pdev, 0xc4,
9063 					       cfg_val | (1 << 15));
9064 		}
9065 
9066 		/* Clear the "no snoop" and "relaxed ordering" bits. */
9067 		val16 = PCI_EXP_DEVCTL_RELAX_EN | PCI_EXP_DEVCTL_NOSNOOP_EN;
9068 		/*
9069 		 * Older PCIe devices only support the 128 byte
9070 		 * MPS setting.  Enforce the restriction.
9071 		 */
9072 		if (!tg3_flag(tp, CPMU_PRESENT))
9073 			val16 |= PCI_EXP_DEVCTL_PAYLOAD;
9074 		pcie_capability_clear_word(tp->pdev, PCI_EXP_DEVCTL, val16);
9075 
9076 		/* Clear error status */
9077 		pcie_capability_write_word(tp->pdev, PCI_EXP_DEVSTA,
9078 				      PCI_EXP_DEVSTA_CED |
9079 				      PCI_EXP_DEVSTA_NFED |
9080 				      PCI_EXP_DEVSTA_FED |
9081 				      PCI_EXP_DEVSTA_URD);
9082 	}
9083 
9084 	tg3_restore_pci_state(tp);
9085 
9086 	tg3_flag_clear(tp, CHIP_RESETTING);
9087 	tg3_flag_clear(tp, ERROR_PROCESSED);
9088 
9089 	val = 0;
9090 	if (tg3_flag(tp, 5780_CLASS))
9091 		val = tr32(MEMARB_MODE);
9092 	tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
9093 
9094 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A3) {
9095 		tg3_stop_fw(tp);
9096 		tw32(0x5000, 0x400);
9097 	}
9098 
9099 	if (tg3_flag(tp, IS_SSB_CORE)) {
9100 		/*
9101 		 * BCM4785: In order to avoid repercussions from using
9102 		 * potentially defective internal ROM, stop the Rx RISC CPU,
9103 		 * which is not required.
9104 		 */
9105 		tg3_stop_fw(tp);
9106 		tg3_halt_cpu(tp, RX_CPU_BASE);
9107 	}
9108 
9109 	err = tg3_poll_fw(tp);
9110 	if (err)
9111 		return err;
9112 
9113 	tw32(GRC_MODE, tp->grc_mode);
9114 
9115 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0) {
9116 		val = tr32(0xc4);
9117 
9118 		tw32(0xc4, val | (1 << 15));
9119 	}
9120 
9121 	if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
9122 	    tg3_asic_rev(tp) == ASIC_REV_5705) {
9123 		tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
9124 		if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0)
9125 			tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
9126 		tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
9127 	}
9128 
9129 	if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
9130 		tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
9131 		val = tp->mac_mode;
9132 	} else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
9133 		tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
9134 		val = tp->mac_mode;
9135 	} else
9136 		val = 0;
9137 
9138 	tw32_f(MAC_MODE, val);
9139 	udelay(40);
9140 
9141 	tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
9142 
9143 	tg3_mdio_start(tp);
9144 
9145 	if (tg3_flag(tp, PCI_EXPRESS) &&
9146 	    tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0 &&
9147 	    tg3_asic_rev(tp) != ASIC_REV_5785 &&
9148 	    !tg3_flag(tp, 57765_PLUS)) {
9149 		val = tr32(0x7c00);
9150 
9151 		tw32(0x7c00, val | (1 << 25));
9152 	}
9153 
9154 	if (tg3_asic_rev(tp) == ASIC_REV_5720) {
9155 		val = tr32(TG3_CPMU_CLCK_ORIDE);
9156 		tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
9157 	}
9158 
9159 	/* Reprobe ASF enable state.  */
9160 	tg3_flag_clear(tp, ENABLE_ASF);
9161 	tp->phy_flags &= ~(TG3_PHYFLG_1G_ON_VAUX_OK |
9162 			   TG3_PHYFLG_KEEP_LINK_ON_PWRDN);
9163 
9164 	tg3_flag_clear(tp, ASF_NEW_HANDSHAKE);
9165 	tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
9166 	if (val == NIC_SRAM_DATA_SIG_MAGIC) {
9167 		u32 nic_cfg;
9168 
9169 		tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
9170 		if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
9171 			tg3_flag_set(tp, ENABLE_ASF);
9172 			tp->last_event_jiffies = jiffies;
9173 			if (tg3_flag(tp, 5750_PLUS))
9174 				tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
9175 
9176 			tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &nic_cfg);
9177 			if (nic_cfg & NIC_SRAM_1G_ON_VAUX_OK)
9178 				tp->phy_flags |= TG3_PHYFLG_1G_ON_VAUX_OK;
9179 			if (nic_cfg & NIC_SRAM_LNK_FLAP_AVOID)
9180 				tp->phy_flags |= TG3_PHYFLG_KEEP_LINK_ON_PWRDN;
9181 		}
9182 	}
9183 
9184 	return 0;
9185 }
9186 
9187 static void tg3_get_nstats(struct tg3 *, struct rtnl_link_stats64 *);
9188 static void tg3_get_estats(struct tg3 *, struct tg3_ethtool_stats *);
9189 
9190 /* tp->lock is held. */
9191 static int tg3_halt(struct tg3 *tp, int kind, bool silent)
9192 {
9193 	int err;
9194 
9195 	tg3_stop_fw(tp);
9196 
9197 	tg3_write_sig_pre_reset(tp, kind);
9198 
9199 	tg3_abort_hw(tp, silent);
9200 	err = tg3_chip_reset(tp);
9201 
9202 	__tg3_set_mac_addr(tp, false);
9203 
9204 	tg3_write_sig_legacy(tp, kind);
9205 	tg3_write_sig_post_reset(tp, kind);
9206 
9207 	if (tp->hw_stats) {
9208 		/* Save the stats across chip resets... */
9209 		tg3_get_nstats(tp, &tp->net_stats_prev);
9210 		tg3_get_estats(tp, &tp->estats_prev);
9211 
9212 		/* And make sure the next sample is new data */
9213 		memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
9214 	}
9215 
9216 	return err;
9217 }
9218 
9219 static int tg3_set_mac_addr(struct net_device *dev, void *p)
9220 {
9221 	struct tg3 *tp = netdev_priv(dev);
9222 	struct sockaddr *addr = p;
9223 	int err = 0;
9224 	bool skip_mac_1 = false;
9225 
9226 	if (!is_valid_ether_addr(addr->sa_data))
9227 		return -EADDRNOTAVAIL;
9228 
9229 	memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
9230 
9231 	if (!netif_running(dev))
9232 		return 0;
9233 
9234 	if (tg3_flag(tp, ENABLE_ASF)) {
9235 		u32 addr0_high, addr0_low, addr1_high, addr1_low;
9236 
9237 		addr0_high = tr32(MAC_ADDR_0_HIGH);
9238 		addr0_low = tr32(MAC_ADDR_0_LOW);
9239 		addr1_high = tr32(MAC_ADDR_1_HIGH);
9240 		addr1_low = tr32(MAC_ADDR_1_LOW);
9241 
9242 		/* Skip MAC addr 1 if ASF is using it. */
9243 		if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
9244 		    !(addr1_high == 0 && addr1_low == 0))
9245 			skip_mac_1 = true;
9246 	}
9247 	spin_lock_bh(&tp->lock);
9248 	__tg3_set_mac_addr(tp, skip_mac_1);
9249 	spin_unlock_bh(&tp->lock);
9250 
9251 	return err;
9252 }
9253 
9254 /* tp->lock is held. */
9255 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
9256 			   dma_addr_t mapping, u32 maxlen_flags,
9257 			   u32 nic_addr)
9258 {
9259 	tg3_write_mem(tp,
9260 		      (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
9261 		      ((u64) mapping >> 32));
9262 	tg3_write_mem(tp,
9263 		      (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
9264 		      ((u64) mapping & 0xffffffff));
9265 	tg3_write_mem(tp,
9266 		      (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
9267 		       maxlen_flags);
9268 
9269 	if (!tg3_flag(tp, 5705_PLUS))
9270 		tg3_write_mem(tp,
9271 			      (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
9272 			      nic_addr);
9273 }
9274 
9275 
9276 static void tg3_coal_tx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
9277 {
9278 	int i = 0;
9279 
9280 	if (!tg3_flag(tp, ENABLE_TSS)) {
9281 		tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
9282 		tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
9283 		tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
9284 	} else {
9285 		tw32(HOSTCC_TXCOL_TICKS, 0);
9286 		tw32(HOSTCC_TXMAX_FRAMES, 0);
9287 		tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
9288 
9289 		for (; i < tp->txq_cnt; i++) {
9290 			u32 reg;
9291 
9292 			reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18;
9293 			tw32(reg, ec->tx_coalesce_usecs);
9294 			reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18;
9295 			tw32(reg, ec->tx_max_coalesced_frames);
9296 			reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18;
9297 			tw32(reg, ec->tx_max_coalesced_frames_irq);
9298 		}
9299 	}
9300 
9301 	for (; i < tp->irq_max - 1; i++) {
9302 		tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0);
9303 		tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0);
9304 		tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
9305 	}
9306 }
9307 
9308 static void tg3_coal_rx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
9309 {
9310 	int i = 0;
9311 	u32 limit = tp->rxq_cnt;
9312 
9313 	if (!tg3_flag(tp, ENABLE_RSS)) {
9314 		tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
9315 		tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
9316 		tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
9317 		limit--;
9318 	} else {
9319 		tw32(HOSTCC_RXCOL_TICKS, 0);
9320 		tw32(HOSTCC_RXMAX_FRAMES, 0);
9321 		tw32(HOSTCC_RXCOAL_MAXF_INT, 0);
9322 	}
9323 
9324 	for (; i < limit; i++) {
9325 		u32 reg;
9326 
9327 		reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18;
9328 		tw32(reg, ec->rx_coalesce_usecs);
9329 		reg = HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18;
9330 		tw32(reg, ec->rx_max_coalesced_frames);
9331 		reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18;
9332 		tw32(reg, ec->rx_max_coalesced_frames_irq);
9333 	}
9334 
9335 	for (; i < tp->irq_max - 1; i++) {
9336 		tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0);
9337 		tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0);
9338 		tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
9339 	}
9340 }
9341 
9342 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
9343 {
9344 	tg3_coal_tx_init(tp, ec);
9345 	tg3_coal_rx_init(tp, ec);
9346 
9347 	if (!tg3_flag(tp, 5705_PLUS)) {
9348 		u32 val = ec->stats_block_coalesce_usecs;
9349 
9350 		tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
9351 		tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
9352 
9353 		if (!tp->link_up)
9354 			val = 0;
9355 
9356 		tw32(HOSTCC_STAT_COAL_TICKS, val);
9357 	}
9358 }
9359 
9360 /* tp->lock is held. */
9361 static void tg3_tx_rcbs_disable(struct tg3 *tp)
9362 {
9363 	u32 txrcb, limit;
9364 
9365 	/* Disable all transmit rings but the first. */
9366 	if (!tg3_flag(tp, 5705_PLUS))
9367 		limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16;
9368 	else if (tg3_flag(tp, 5717_PLUS))
9369 		limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4;
9370 	else if (tg3_flag(tp, 57765_CLASS) ||
9371 		 tg3_asic_rev(tp) == ASIC_REV_5762)
9372 		limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2;
9373 	else
9374 		limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
9375 
9376 	for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
9377 	     txrcb < limit; txrcb += TG3_BDINFO_SIZE)
9378 		tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS,
9379 			      BDINFO_FLAGS_DISABLED);
9380 }
9381 
9382 /* tp->lock is held. */
9383 static void tg3_tx_rcbs_init(struct tg3 *tp)
9384 {
9385 	int i = 0;
9386 	u32 txrcb = NIC_SRAM_SEND_RCB;
9387 
9388 	if (tg3_flag(tp, ENABLE_TSS))
9389 		i++;
9390 
9391 	for (; i < tp->irq_max; i++, txrcb += TG3_BDINFO_SIZE) {
9392 		struct tg3_napi *tnapi = &tp->napi[i];
9393 
9394 		if (!tnapi->tx_ring)
9395 			continue;
9396 
9397 		tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
9398 			       (TG3_TX_RING_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT),
9399 			       NIC_SRAM_TX_BUFFER_DESC);
9400 	}
9401 }
9402 
9403 /* tp->lock is held. */
9404 static void tg3_rx_ret_rcbs_disable(struct tg3 *tp)
9405 {
9406 	u32 rxrcb, limit;
9407 
9408 	/* Disable all receive return rings but the first. */
9409 	if (tg3_flag(tp, 5717_PLUS))
9410 		limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17;
9411 	else if (!tg3_flag(tp, 5705_PLUS))
9412 		limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
9413 	else if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
9414 		 tg3_asic_rev(tp) == ASIC_REV_5762 ||
9415 		 tg3_flag(tp, 57765_CLASS))
9416 		limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4;
9417 	else
9418 		limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
9419 
9420 	for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
9421 	     rxrcb < limit; rxrcb += TG3_BDINFO_SIZE)
9422 		tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS,
9423 			      BDINFO_FLAGS_DISABLED);
9424 }
9425 
9426 /* tp->lock is held. */
9427 static void tg3_rx_ret_rcbs_init(struct tg3 *tp)
9428 {
9429 	int i = 0;
9430 	u32 rxrcb = NIC_SRAM_RCV_RET_RCB;
9431 
9432 	if (tg3_flag(tp, ENABLE_RSS))
9433 		i++;
9434 
9435 	for (; i < tp->irq_max; i++, rxrcb += TG3_BDINFO_SIZE) {
9436 		struct tg3_napi *tnapi = &tp->napi[i];
9437 
9438 		if (!tnapi->rx_rcb)
9439 			continue;
9440 
9441 		tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
9442 			       (tp->rx_ret_ring_mask + 1) <<
9443 				BDINFO_FLAGS_MAXLEN_SHIFT, 0);
9444 	}
9445 }
9446 
9447 /* tp->lock is held. */
9448 static void tg3_rings_reset(struct tg3 *tp)
9449 {
9450 	int i;
9451 	u32 stblk;
9452 	struct tg3_napi *tnapi = &tp->napi[0];
9453 
9454 	tg3_tx_rcbs_disable(tp);
9455 
9456 	tg3_rx_ret_rcbs_disable(tp);
9457 
9458 	/* Disable interrupts */
9459 	tw32_mailbox_f(tp->napi[0].int_mbox, 1);
9460 	tp->napi[0].chk_msi_cnt = 0;
9461 	tp->napi[0].last_rx_cons = 0;
9462 	tp->napi[0].last_tx_cons = 0;
9463 
9464 	/* Zero mailbox registers. */
9465 	if (tg3_flag(tp, SUPPORT_MSIX)) {
9466 		for (i = 1; i < tp->irq_max; i++) {
9467 			tp->napi[i].tx_prod = 0;
9468 			tp->napi[i].tx_cons = 0;
9469 			if (tg3_flag(tp, ENABLE_TSS))
9470 				tw32_mailbox(tp->napi[i].prodmbox, 0);
9471 			tw32_rx_mbox(tp->napi[i].consmbox, 0);
9472 			tw32_mailbox_f(tp->napi[i].int_mbox, 1);
9473 			tp->napi[i].chk_msi_cnt = 0;
9474 			tp->napi[i].last_rx_cons = 0;
9475 			tp->napi[i].last_tx_cons = 0;
9476 		}
9477 		if (!tg3_flag(tp, ENABLE_TSS))
9478 			tw32_mailbox(tp->napi[0].prodmbox, 0);
9479 	} else {
9480 		tp->napi[0].tx_prod = 0;
9481 		tp->napi[0].tx_cons = 0;
9482 		tw32_mailbox(tp->napi[0].prodmbox, 0);
9483 		tw32_rx_mbox(tp->napi[0].consmbox, 0);
9484 	}
9485 
9486 	/* Make sure the NIC-based send BD rings are disabled. */
9487 	if (!tg3_flag(tp, 5705_PLUS)) {
9488 		u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW;
9489 		for (i = 0; i < 16; i++)
9490 			tw32_tx_mbox(mbox + i * 8, 0);
9491 	}
9492 
9493 	/* Clear status block in ram. */
9494 	memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
9495 
9496 	/* Set status block DMA address */
9497 	tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
9498 	     ((u64) tnapi->status_mapping >> 32));
9499 	tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
9500 	     ((u64) tnapi->status_mapping & 0xffffffff));
9501 
9502 	stblk = HOSTCC_STATBLCK_RING1;
9503 
9504 	for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) {
9505 		u64 mapping = (u64)tnapi->status_mapping;
9506 		tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32);
9507 		tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff);
9508 		stblk += 8;
9509 
9510 		/* Clear status block in ram. */
9511 		memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
9512 	}
9513 
9514 	tg3_tx_rcbs_init(tp);
9515 	tg3_rx_ret_rcbs_init(tp);
9516 }
9517 
9518 static void tg3_setup_rxbd_thresholds(struct tg3 *tp)
9519 {
9520 	u32 val, bdcache_maxcnt, host_rep_thresh, nic_rep_thresh;
9521 
9522 	if (!tg3_flag(tp, 5750_PLUS) ||
9523 	    tg3_flag(tp, 5780_CLASS) ||
9524 	    tg3_asic_rev(tp) == ASIC_REV_5750 ||
9525 	    tg3_asic_rev(tp) == ASIC_REV_5752 ||
9526 	    tg3_flag(tp, 57765_PLUS))
9527 		bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5700;
9528 	else if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
9529 		 tg3_asic_rev(tp) == ASIC_REV_5787)
9530 		bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5755;
9531 	else
9532 		bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5906;
9533 
9534 	nic_rep_thresh = min(bdcache_maxcnt / 2, tp->rx_std_max_post);
9535 	host_rep_thresh = max_t(u32, tp->rx_pending / 8, 1);
9536 
9537 	val = min(nic_rep_thresh, host_rep_thresh);
9538 	tw32(RCVBDI_STD_THRESH, val);
9539 
9540 	if (tg3_flag(tp, 57765_PLUS))
9541 		tw32(STD_REPLENISH_LWM, bdcache_maxcnt);
9542 
9543 	if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
9544 		return;
9545 
9546 	bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700;
9547 
9548 	host_rep_thresh = max_t(u32, tp->rx_jumbo_pending / 8, 1);
9549 
9550 	val = min(bdcache_maxcnt / 2, host_rep_thresh);
9551 	tw32(RCVBDI_JUMBO_THRESH, val);
9552 
9553 	if (tg3_flag(tp, 57765_PLUS))
9554 		tw32(JMB_REPLENISH_LWM, bdcache_maxcnt);
9555 }
9556 
9557 static inline u32 calc_crc(unsigned char *buf, int len)
9558 {
9559 	u32 reg;
9560 	u32 tmp;
9561 	int j, k;
9562 
9563 	reg = 0xffffffff;
9564 
9565 	for (j = 0; j < len; j++) {
9566 		reg ^= buf[j];
9567 
9568 		for (k = 0; k < 8; k++) {
9569 			tmp = reg & 0x01;
9570 
9571 			reg >>= 1;
9572 
9573 			if (tmp)
9574 				reg ^= 0xedb88320;
9575 		}
9576 	}
9577 
9578 	return ~reg;
9579 }
9580 
9581 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
9582 {
9583 	/* accept or reject all multicast frames */
9584 	tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
9585 	tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
9586 	tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
9587 	tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
9588 }
9589 
9590 static void __tg3_set_rx_mode(struct net_device *dev)
9591 {
9592 	struct tg3 *tp = netdev_priv(dev);
9593 	u32 rx_mode;
9594 
9595 	rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
9596 				  RX_MODE_KEEP_VLAN_TAG);
9597 
9598 #if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE)
9599 	/* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
9600 	 * flag clear.
9601 	 */
9602 	if (!tg3_flag(tp, ENABLE_ASF))
9603 		rx_mode |= RX_MODE_KEEP_VLAN_TAG;
9604 #endif
9605 
9606 	if (dev->flags & IFF_PROMISC) {
9607 		/* Promiscuous mode. */
9608 		rx_mode |= RX_MODE_PROMISC;
9609 	} else if (dev->flags & IFF_ALLMULTI) {
9610 		/* Accept all multicast. */
9611 		tg3_set_multi(tp, 1);
9612 	} else if (netdev_mc_empty(dev)) {
9613 		/* Reject all multicast. */
9614 		tg3_set_multi(tp, 0);
9615 	} else {
9616 		/* Accept one or more multicast(s). */
9617 		struct netdev_hw_addr *ha;
9618 		u32 mc_filter[4] = { 0, };
9619 		u32 regidx;
9620 		u32 bit;
9621 		u32 crc;
9622 
9623 		netdev_for_each_mc_addr(ha, dev) {
9624 			crc = calc_crc(ha->addr, ETH_ALEN);
9625 			bit = ~crc & 0x7f;
9626 			regidx = (bit & 0x60) >> 5;
9627 			bit &= 0x1f;
9628 			mc_filter[regidx] |= (1 << bit);
9629 		}
9630 
9631 		tw32(MAC_HASH_REG_0, mc_filter[0]);
9632 		tw32(MAC_HASH_REG_1, mc_filter[1]);
9633 		tw32(MAC_HASH_REG_2, mc_filter[2]);
9634 		tw32(MAC_HASH_REG_3, mc_filter[3]);
9635 	}
9636 
9637 	if (rx_mode != tp->rx_mode) {
9638 		tp->rx_mode = rx_mode;
9639 		tw32_f(MAC_RX_MODE, rx_mode);
9640 		udelay(10);
9641 	}
9642 }
9643 
9644 static void tg3_rss_init_dflt_indir_tbl(struct tg3 *tp, u32 qcnt)
9645 {
9646 	int i;
9647 
9648 	for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
9649 		tp->rss_ind_tbl[i] = ethtool_rxfh_indir_default(i, qcnt);
9650 }
9651 
9652 static void tg3_rss_check_indir_tbl(struct tg3 *tp)
9653 {
9654 	int i;
9655 
9656 	if (!tg3_flag(tp, SUPPORT_MSIX))
9657 		return;
9658 
9659 	if (tp->rxq_cnt == 1) {
9660 		memset(&tp->rss_ind_tbl[0], 0, sizeof(tp->rss_ind_tbl));
9661 		return;
9662 	}
9663 
9664 	/* Validate table against current IRQ count */
9665 	for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) {
9666 		if (tp->rss_ind_tbl[i] >= tp->rxq_cnt)
9667 			break;
9668 	}
9669 
9670 	if (i != TG3_RSS_INDIR_TBL_SIZE)
9671 		tg3_rss_init_dflt_indir_tbl(tp, tp->rxq_cnt);
9672 }
9673 
9674 static void tg3_rss_write_indir_tbl(struct tg3 *tp)
9675 {
9676 	int i = 0;
9677 	u32 reg = MAC_RSS_INDIR_TBL_0;
9678 
9679 	while (i < TG3_RSS_INDIR_TBL_SIZE) {
9680 		u32 val = tp->rss_ind_tbl[i];
9681 		i++;
9682 		for (; i % 8; i++) {
9683 			val <<= 4;
9684 			val |= tp->rss_ind_tbl[i];
9685 		}
9686 		tw32(reg, val);
9687 		reg += 4;
9688 	}
9689 }
9690 
9691 static inline u32 tg3_lso_rd_dma_workaround_bit(struct tg3 *tp)
9692 {
9693 	if (tg3_asic_rev(tp) == ASIC_REV_5719)
9694 		return TG3_LSO_RD_DMA_TX_LENGTH_WA_5719;
9695 	else
9696 		return TG3_LSO_RD_DMA_TX_LENGTH_WA_5720;
9697 }
9698 
9699 /* tp->lock is held. */
9700 static int tg3_reset_hw(struct tg3 *tp, bool reset_phy)
9701 {
9702 	u32 val, rdmac_mode;
9703 	int i, err, limit;
9704 	struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
9705 
9706 	tg3_disable_ints(tp);
9707 
9708 	tg3_stop_fw(tp);
9709 
9710 	tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
9711 
9712 	if (tg3_flag(tp, INIT_COMPLETE))
9713 		tg3_abort_hw(tp, 1);
9714 
9715 	if ((tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
9716 	    !(tp->phy_flags & TG3_PHYFLG_USER_CONFIGURED)) {
9717 		tg3_phy_pull_config(tp);
9718 		tg3_eee_pull_config(tp, NULL);
9719 		tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
9720 	}
9721 
9722 	/* Enable MAC control of LPI */
9723 	if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
9724 		tg3_setup_eee(tp);
9725 
9726 	if (reset_phy)
9727 		tg3_phy_reset(tp);
9728 
9729 	err = tg3_chip_reset(tp);
9730 	if (err)
9731 		return err;
9732 
9733 	tg3_write_sig_legacy(tp, RESET_KIND_INIT);
9734 
9735 	if (tg3_chip_rev(tp) == CHIPREV_5784_AX) {
9736 		val = tr32(TG3_CPMU_CTRL);
9737 		val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
9738 		tw32(TG3_CPMU_CTRL, val);
9739 
9740 		val = tr32(TG3_CPMU_LSPD_10MB_CLK);
9741 		val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
9742 		val |= CPMU_LSPD_10MB_MACCLK_6_25;
9743 		tw32(TG3_CPMU_LSPD_10MB_CLK, val);
9744 
9745 		val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
9746 		val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
9747 		val |= CPMU_LNK_AWARE_MACCLK_6_25;
9748 		tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
9749 
9750 		val = tr32(TG3_CPMU_HST_ACC);
9751 		val &= ~CPMU_HST_ACC_MACCLK_MASK;
9752 		val |= CPMU_HST_ACC_MACCLK_6_25;
9753 		tw32(TG3_CPMU_HST_ACC, val);
9754 	}
9755 
9756 	if (tg3_asic_rev(tp) == ASIC_REV_57780) {
9757 		val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK;
9758 		val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN |
9759 		       PCIE_PWR_MGMT_L1_THRESH_4MS;
9760 		tw32(PCIE_PWR_MGMT_THRESH, val);
9761 
9762 		val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK;
9763 		tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS);
9764 
9765 		tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR);
9766 
9767 		val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
9768 		tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
9769 	}
9770 
9771 	if (tg3_flag(tp, L1PLLPD_EN)) {
9772 		u32 grc_mode = tr32(GRC_MODE);
9773 
9774 		/* Access the lower 1K of PL PCIE block registers. */
9775 		val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9776 		tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
9777 
9778 		val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1);
9779 		tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1,
9780 		     val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN);
9781 
9782 		tw32(GRC_MODE, grc_mode);
9783 	}
9784 
9785 	if (tg3_flag(tp, 57765_CLASS)) {
9786 		if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) {
9787 			u32 grc_mode = tr32(GRC_MODE);
9788 
9789 			/* Access the lower 1K of PL PCIE block registers. */
9790 			val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9791 			tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
9792 
9793 			val = tr32(TG3_PCIE_TLDLPL_PORT +
9794 				   TG3_PCIE_PL_LO_PHYCTL5);
9795 			tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5,
9796 			     val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ);
9797 
9798 			tw32(GRC_MODE, grc_mode);
9799 		}
9800 
9801 		if (tg3_chip_rev(tp) != CHIPREV_57765_AX) {
9802 			u32 grc_mode;
9803 
9804 			/* Fix transmit hangs */
9805 			val = tr32(TG3_CPMU_PADRNG_CTL);
9806 			val |= TG3_CPMU_PADRNG_CTL_RDIV2;
9807 			tw32(TG3_CPMU_PADRNG_CTL, val);
9808 
9809 			grc_mode = tr32(GRC_MODE);
9810 
9811 			/* Access the lower 1K of DL PCIE block registers. */
9812 			val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9813 			tw32(GRC_MODE, val | GRC_MODE_PCIE_DL_SEL);
9814 
9815 			val = tr32(TG3_PCIE_TLDLPL_PORT +
9816 				   TG3_PCIE_DL_LO_FTSMAX);
9817 			val &= ~TG3_PCIE_DL_LO_FTSMAX_MSK;
9818 			tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_DL_LO_FTSMAX,
9819 			     val | TG3_PCIE_DL_LO_FTSMAX_VAL);
9820 
9821 			tw32(GRC_MODE, grc_mode);
9822 		}
9823 
9824 		val = tr32(TG3_CPMU_LSPD_10MB_CLK);
9825 		val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
9826 		val |= CPMU_LSPD_10MB_MACCLK_6_25;
9827 		tw32(TG3_CPMU_LSPD_10MB_CLK, val);
9828 	}
9829 
9830 	/* This works around an issue with Athlon chipsets on
9831 	 * B3 tigon3 silicon.  This bit has no effect on any
9832 	 * other revision.  But do not set this on PCI Express
9833 	 * chips and don't even touch the clocks if the CPMU is present.
9834 	 */
9835 	if (!tg3_flag(tp, CPMU_PRESENT)) {
9836 		if (!tg3_flag(tp, PCI_EXPRESS))
9837 			tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
9838 		tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
9839 	}
9840 
9841 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0 &&
9842 	    tg3_flag(tp, PCIX_MODE)) {
9843 		val = tr32(TG3PCI_PCISTATE);
9844 		val |= PCISTATE_RETRY_SAME_DMA;
9845 		tw32(TG3PCI_PCISTATE, val);
9846 	}
9847 
9848 	if (tg3_flag(tp, ENABLE_APE)) {
9849 		/* Allow reads and writes to the
9850 		 * APE register and memory space.
9851 		 */
9852 		val = tr32(TG3PCI_PCISTATE);
9853 		val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
9854 		       PCISTATE_ALLOW_APE_SHMEM_WR |
9855 		       PCISTATE_ALLOW_APE_PSPACE_WR;
9856 		tw32(TG3PCI_PCISTATE, val);
9857 	}
9858 
9859 	if (tg3_chip_rev(tp) == CHIPREV_5704_BX) {
9860 		/* Enable some hw fixes.  */
9861 		val = tr32(TG3PCI_MSI_DATA);
9862 		val |= (1 << 26) | (1 << 28) | (1 << 29);
9863 		tw32(TG3PCI_MSI_DATA, val);
9864 	}
9865 
9866 	/* Descriptor ring init may make accesses to the
9867 	 * NIC SRAM area to setup the TX descriptors, so we
9868 	 * can only do this after the hardware has been
9869 	 * successfully reset.
9870 	 */
9871 	err = tg3_init_rings(tp);
9872 	if (err)
9873 		return err;
9874 
9875 	if (tg3_flag(tp, 57765_PLUS)) {
9876 		val = tr32(TG3PCI_DMA_RW_CTRL) &
9877 		      ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
9878 		if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0)
9879 			val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK;
9880 		if (!tg3_flag(tp, 57765_CLASS) &&
9881 		    tg3_asic_rev(tp) != ASIC_REV_5717 &&
9882 		    tg3_asic_rev(tp) != ASIC_REV_5762)
9883 			val |= DMA_RWCTRL_TAGGED_STAT_WA;
9884 		tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
9885 	} else if (tg3_asic_rev(tp) != ASIC_REV_5784 &&
9886 		   tg3_asic_rev(tp) != ASIC_REV_5761) {
9887 		/* This value is determined during the probe time DMA
9888 		 * engine test, tg3_test_dma.
9889 		 */
9890 		tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
9891 	}
9892 
9893 	tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
9894 			  GRC_MODE_4X_NIC_SEND_RINGS |
9895 			  GRC_MODE_NO_TX_PHDR_CSUM |
9896 			  GRC_MODE_NO_RX_PHDR_CSUM);
9897 	tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
9898 
9899 	/* Pseudo-header checksum is done by hardware logic and not
9900 	 * the offload processers, so make the chip do the pseudo-
9901 	 * header checksums on receive.  For transmit it is more
9902 	 * convenient to do the pseudo-header checksum in software
9903 	 * as Linux does that on transmit for us in all cases.
9904 	 */
9905 	tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
9906 
9907 	val = GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP;
9908 	if (tp->rxptpctl)
9909 		tw32(TG3_RX_PTP_CTL,
9910 		     tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK);
9911 
9912 	if (tg3_flag(tp, PTP_CAPABLE))
9913 		val |= GRC_MODE_TIME_SYNC_ENABLE;
9914 
9915 	tw32(GRC_MODE, tp->grc_mode | val);
9916 
9917 	/* Setup the timer prescalar register.  Clock is always 66Mhz. */
9918 	val = tr32(GRC_MISC_CFG);
9919 	val &= ~0xff;
9920 	val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
9921 	tw32(GRC_MISC_CFG, val);
9922 
9923 	/* Initialize MBUF/DESC pool. */
9924 	if (tg3_flag(tp, 5750_PLUS)) {
9925 		/* Do nothing.  */
9926 	} else if (tg3_asic_rev(tp) != ASIC_REV_5705) {
9927 		tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
9928 		if (tg3_asic_rev(tp) == ASIC_REV_5704)
9929 			tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
9930 		else
9931 			tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
9932 		tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
9933 		tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
9934 	} else if (tg3_flag(tp, TSO_CAPABLE)) {
9935 		int fw_len;
9936 
9937 		fw_len = tp->fw_len;
9938 		fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
9939 		tw32(BUFMGR_MB_POOL_ADDR,
9940 		     NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
9941 		tw32(BUFMGR_MB_POOL_SIZE,
9942 		     NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
9943 	}
9944 
9945 	if (tp->dev->mtu <= ETH_DATA_LEN) {
9946 		tw32(BUFMGR_MB_RDMA_LOW_WATER,
9947 		     tp->bufmgr_config.mbuf_read_dma_low_water);
9948 		tw32(BUFMGR_MB_MACRX_LOW_WATER,
9949 		     tp->bufmgr_config.mbuf_mac_rx_low_water);
9950 		tw32(BUFMGR_MB_HIGH_WATER,
9951 		     tp->bufmgr_config.mbuf_high_water);
9952 	} else {
9953 		tw32(BUFMGR_MB_RDMA_LOW_WATER,
9954 		     tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
9955 		tw32(BUFMGR_MB_MACRX_LOW_WATER,
9956 		     tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
9957 		tw32(BUFMGR_MB_HIGH_WATER,
9958 		     tp->bufmgr_config.mbuf_high_water_jumbo);
9959 	}
9960 	tw32(BUFMGR_DMA_LOW_WATER,
9961 	     tp->bufmgr_config.dma_low_water);
9962 	tw32(BUFMGR_DMA_HIGH_WATER,
9963 	     tp->bufmgr_config.dma_high_water);
9964 
9965 	val = BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE;
9966 	if (tg3_asic_rev(tp) == ASIC_REV_5719)
9967 		val |= BUFMGR_MODE_NO_TX_UNDERRUN;
9968 	if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
9969 	    tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
9970 	    tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0)
9971 		val |= BUFMGR_MODE_MBLOW_ATTN_ENAB;
9972 	tw32(BUFMGR_MODE, val);
9973 	for (i = 0; i < 2000; i++) {
9974 		if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
9975 			break;
9976 		udelay(10);
9977 	}
9978 	if (i >= 2000) {
9979 		netdev_err(tp->dev, "%s cannot enable BUFMGR\n", __func__);
9980 		return -ENODEV;
9981 	}
9982 
9983 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5906_A1)
9984 		tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
9985 
9986 	tg3_setup_rxbd_thresholds(tp);
9987 
9988 	/* Initialize TG3_BDINFO's at:
9989 	 *  RCVDBDI_STD_BD:	standard eth size rx ring
9990 	 *  RCVDBDI_JUMBO_BD:	jumbo frame rx ring
9991 	 *  RCVDBDI_MINI_BD:	small frame rx ring (??? does not work)
9992 	 *
9993 	 * like so:
9994 	 *  TG3_BDINFO_HOST_ADDR:	high/low parts of DMA address of ring
9995 	 *  TG3_BDINFO_MAXLEN_FLAGS:	(rx max buffer size << 16) |
9996 	 *                              ring attribute flags
9997 	 *  TG3_BDINFO_NIC_ADDR:	location of descriptors in nic SRAM
9998 	 *
9999 	 * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
10000 	 * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
10001 	 *
10002 	 * The size of each ring is fixed in the firmware, but the location is
10003 	 * configurable.
10004 	 */
10005 	tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
10006 	     ((u64) tpr->rx_std_mapping >> 32));
10007 	tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
10008 	     ((u64) tpr->rx_std_mapping & 0xffffffff));
10009 	if (!tg3_flag(tp, 5717_PLUS))
10010 		tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
10011 		     NIC_SRAM_RX_BUFFER_DESC);
10012 
10013 	/* Disable the mini ring */
10014 	if (!tg3_flag(tp, 5705_PLUS))
10015 		tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
10016 		     BDINFO_FLAGS_DISABLED);
10017 
10018 	/* Program the jumbo buffer descriptor ring control
10019 	 * blocks on those devices that have them.
10020 	 */
10021 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
10022 	    (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))) {
10023 
10024 		if (tg3_flag(tp, JUMBO_RING_ENABLE)) {
10025 			tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
10026 			     ((u64) tpr->rx_jmb_mapping >> 32));
10027 			tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
10028 			     ((u64) tpr->rx_jmb_mapping & 0xffffffff));
10029 			val = TG3_RX_JMB_RING_SIZE(tp) <<
10030 			      BDINFO_FLAGS_MAXLEN_SHIFT;
10031 			tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
10032 			     val | BDINFO_FLAGS_USE_EXT_RECV);
10033 			if (!tg3_flag(tp, USE_JUMBO_BDFLAG) ||
10034 			    tg3_flag(tp, 57765_CLASS) ||
10035 			    tg3_asic_rev(tp) == ASIC_REV_5762)
10036 				tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
10037 				     NIC_SRAM_RX_JUMBO_BUFFER_DESC);
10038 		} else {
10039 			tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
10040 			     BDINFO_FLAGS_DISABLED);
10041 		}
10042 
10043 		if (tg3_flag(tp, 57765_PLUS)) {
10044 			val = TG3_RX_STD_RING_SIZE(tp);
10045 			val <<= BDINFO_FLAGS_MAXLEN_SHIFT;
10046 			val |= (TG3_RX_STD_DMA_SZ << 2);
10047 		} else
10048 			val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT;
10049 	} else
10050 		val = TG3_RX_STD_MAX_SIZE_5700 << BDINFO_FLAGS_MAXLEN_SHIFT;
10051 
10052 	tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val);
10053 
10054 	tpr->rx_std_prod_idx = tp->rx_pending;
10055 	tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx);
10056 
10057 	tpr->rx_jmb_prod_idx =
10058 		tg3_flag(tp, JUMBO_RING_ENABLE) ? tp->rx_jumbo_pending : 0;
10059 	tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx);
10060 
10061 	tg3_rings_reset(tp);
10062 
10063 	/* Initialize MAC address and backoff seed. */
10064 	__tg3_set_mac_addr(tp, false);
10065 
10066 	/* MTU + ethernet header + FCS + optional VLAN tag */
10067 	tw32(MAC_RX_MTU_SIZE,
10068 	     tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
10069 
10070 	/* The slot time is changed by tg3_setup_phy if we
10071 	 * run at gigabit with half duplex.
10072 	 */
10073 	val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
10074 	      (6 << TX_LENGTHS_IPG_SHIFT) |
10075 	      (32 << TX_LENGTHS_SLOT_TIME_SHIFT);
10076 
10077 	if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
10078 	    tg3_asic_rev(tp) == ASIC_REV_5762)
10079 		val |= tr32(MAC_TX_LENGTHS) &
10080 		       (TX_LENGTHS_JMB_FRM_LEN_MSK |
10081 			TX_LENGTHS_CNT_DWN_VAL_MSK);
10082 
10083 	tw32(MAC_TX_LENGTHS, val);
10084 
10085 	/* Receive rules. */
10086 	tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
10087 	tw32(RCVLPC_CONFIG, 0x0181);
10088 
10089 	/* Calculate RDMAC_MODE setting early, we need it to determine
10090 	 * the RCVLPC_STATE_ENABLE mask.
10091 	 */
10092 	rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
10093 		      RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
10094 		      RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
10095 		      RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
10096 		      RDMAC_MODE_LNGREAD_ENAB);
10097 
10098 	if (tg3_asic_rev(tp) == ASIC_REV_5717)
10099 		rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS;
10100 
10101 	if (tg3_asic_rev(tp) == ASIC_REV_5784 ||
10102 	    tg3_asic_rev(tp) == ASIC_REV_5785 ||
10103 	    tg3_asic_rev(tp) == ASIC_REV_57780)
10104 		rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
10105 			      RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
10106 			      RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
10107 
10108 	if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
10109 	    tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
10110 		if (tg3_flag(tp, TSO_CAPABLE) &&
10111 		    tg3_asic_rev(tp) == ASIC_REV_5705) {
10112 			rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
10113 		} else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
10114 			   !tg3_flag(tp, IS_5788)) {
10115 			rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
10116 		}
10117 	}
10118 
10119 	if (tg3_flag(tp, PCI_EXPRESS))
10120 		rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
10121 
10122 	if (tg3_asic_rev(tp) == ASIC_REV_57766) {
10123 		tp->dma_limit = 0;
10124 		if (tp->dev->mtu <= ETH_DATA_LEN) {
10125 			rdmac_mode |= RDMAC_MODE_JMB_2K_MMRR;
10126 			tp->dma_limit = TG3_TX_BD_DMA_MAX_2K;
10127 		}
10128 	}
10129 
10130 	if (tg3_flag(tp, HW_TSO_1) ||
10131 	    tg3_flag(tp, HW_TSO_2) ||
10132 	    tg3_flag(tp, HW_TSO_3))
10133 		rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN;
10134 
10135 	if (tg3_flag(tp, 57765_PLUS) ||
10136 	    tg3_asic_rev(tp) == ASIC_REV_5785 ||
10137 	    tg3_asic_rev(tp) == ASIC_REV_57780)
10138 		rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;
10139 
10140 	if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
10141 	    tg3_asic_rev(tp) == ASIC_REV_5762)
10142 		rdmac_mode |= tr32(RDMAC_MODE) & RDMAC_MODE_H2BNC_VLAN_DET;
10143 
10144 	if (tg3_asic_rev(tp) == ASIC_REV_5761 ||
10145 	    tg3_asic_rev(tp) == ASIC_REV_5784 ||
10146 	    tg3_asic_rev(tp) == ASIC_REV_5785 ||
10147 	    tg3_asic_rev(tp) == ASIC_REV_57780 ||
10148 	    tg3_flag(tp, 57765_PLUS)) {
10149 		u32 tgtreg;
10150 
10151 		if (tg3_asic_rev(tp) == ASIC_REV_5762)
10152 			tgtreg = TG3_RDMA_RSRVCTRL_REG2;
10153 		else
10154 			tgtreg = TG3_RDMA_RSRVCTRL_REG;
10155 
10156 		val = tr32(tgtreg);
10157 		if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
10158 		    tg3_asic_rev(tp) == ASIC_REV_5762) {
10159 			val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK |
10160 				 TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK |
10161 				 TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK);
10162 			val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B |
10163 			       TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
10164 			       TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K;
10165 		}
10166 		tw32(tgtreg, val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
10167 	}
10168 
10169 	if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
10170 	    tg3_asic_rev(tp) == ASIC_REV_5720 ||
10171 	    tg3_asic_rev(tp) == ASIC_REV_5762) {
10172 		u32 tgtreg;
10173 
10174 		if (tg3_asic_rev(tp) == ASIC_REV_5762)
10175 			tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL2;
10176 		else
10177 			tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL;
10178 
10179 		val = tr32(tgtreg);
10180 		tw32(tgtreg, val |
10181 		     TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K |
10182 		     TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K);
10183 	}
10184 
10185 	/* Receive/send statistics. */
10186 	if (tg3_flag(tp, 5750_PLUS)) {
10187 		val = tr32(RCVLPC_STATS_ENABLE);
10188 		val &= ~RCVLPC_STATSENAB_DACK_FIX;
10189 		tw32(RCVLPC_STATS_ENABLE, val);
10190 	} else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
10191 		   tg3_flag(tp, TSO_CAPABLE)) {
10192 		val = tr32(RCVLPC_STATS_ENABLE);
10193 		val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
10194 		tw32(RCVLPC_STATS_ENABLE, val);
10195 	} else {
10196 		tw32(RCVLPC_STATS_ENABLE, 0xffffff);
10197 	}
10198 	tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
10199 	tw32(SNDDATAI_STATSENAB, 0xffffff);
10200 	tw32(SNDDATAI_STATSCTRL,
10201 	     (SNDDATAI_SCTRL_ENABLE |
10202 	      SNDDATAI_SCTRL_FASTUPD));
10203 
10204 	/* Setup host coalescing engine. */
10205 	tw32(HOSTCC_MODE, 0);
10206 	for (i = 0; i < 2000; i++) {
10207 		if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
10208 			break;
10209 		udelay(10);
10210 	}
10211 
10212 	__tg3_set_coalesce(tp, &tp->coal);
10213 
10214 	if (!tg3_flag(tp, 5705_PLUS)) {
10215 		/* Status/statistics block address.  See tg3_timer,
10216 		 * the tg3_periodic_fetch_stats call there, and
10217 		 * tg3_get_stats to see how this works for 5705/5750 chips.
10218 		 */
10219 		tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
10220 		     ((u64) tp->stats_mapping >> 32));
10221 		tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
10222 		     ((u64) tp->stats_mapping & 0xffffffff));
10223 		tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
10224 
10225 		tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
10226 
10227 		/* Clear statistics and status block memory areas */
10228 		for (i = NIC_SRAM_STATS_BLK;
10229 		     i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
10230 		     i += sizeof(u32)) {
10231 			tg3_write_mem(tp, i, 0);
10232 			udelay(40);
10233 		}
10234 	}
10235 
10236 	tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
10237 
10238 	tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
10239 	tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
10240 	if (!tg3_flag(tp, 5705_PLUS))
10241 		tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
10242 
10243 	if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
10244 		tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
10245 		/* reset to prevent losing 1st rx packet intermittently */
10246 		tw32_f(MAC_RX_MODE, RX_MODE_RESET);
10247 		udelay(10);
10248 	}
10249 
10250 	tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
10251 			MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE |
10252 			MAC_MODE_FHDE_ENABLE;
10253 	if (tg3_flag(tp, ENABLE_APE))
10254 		tp->mac_mode |= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
10255 	if (!tg3_flag(tp, 5705_PLUS) &&
10256 	    !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
10257 	    tg3_asic_rev(tp) != ASIC_REV_5700)
10258 		tp->mac_mode |= MAC_MODE_LINK_POLARITY;
10259 	tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
10260 	udelay(40);
10261 
10262 	/* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
10263 	 * If TG3_FLAG_IS_NIC is zero, we should read the
10264 	 * register to preserve the GPIO settings for LOMs. The GPIOs,
10265 	 * whether used as inputs or outputs, are set by boot code after
10266 	 * reset.
10267 	 */
10268 	if (!tg3_flag(tp, IS_NIC)) {
10269 		u32 gpio_mask;
10270 
10271 		gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
10272 			    GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
10273 			    GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
10274 
10275 		if (tg3_asic_rev(tp) == ASIC_REV_5752)
10276 			gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
10277 				     GRC_LCLCTRL_GPIO_OUTPUT3;
10278 
10279 		if (tg3_asic_rev(tp) == ASIC_REV_5755)
10280 			gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
10281 
10282 		tp->grc_local_ctrl &= ~gpio_mask;
10283 		tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
10284 
10285 		/* GPIO1 must be driven high for eeprom write protect */
10286 		if (tg3_flag(tp, EEPROM_WRITE_PROT))
10287 			tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
10288 					       GRC_LCLCTRL_GPIO_OUTPUT1);
10289 	}
10290 	tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
10291 	udelay(100);
10292 
10293 	if (tg3_flag(tp, USING_MSIX)) {
10294 		val = tr32(MSGINT_MODE);
10295 		val |= MSGINT_MODE_ENABLE;
10296 		if (tp->irq_cnt > 1)
10297 			val |= MSGINT_MODE_MULTIVEC_EN;
10298 		if (!tg3_flag(tp, 1SHOT_MSI))
10299 			val |= MSGINT_MODE_ONE_SHOT_DISABLE;
10300 		tw32(MSGINT_MODE, val);
10301 	}
10302 
10303 	if (!tg3_flag(tp, 5705_PLUS)) {
10304 		tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
10305 		udelay(40);
10306 	}
10307 
10308 	val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
10309 	       WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
10310 	       WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
10311 	       WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
10312 	       WDMAC_MODE_LNGREAD_ENAB);
10313 
10314 	if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
10315 	    tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
10316 		if (tg3_flag(tp, TSO_CAPABLE) &&
10317 		    (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 ||
10318 		     tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A2)) {
10319 			/* nothing */
10320 		} else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
10321 			   !tg3_flag(tp, IS_5788)) {
10322 			val |= WDMAC_MODE_RX_ACCEL;
10323 		}
10324 	}
10325 
10326 	/* Enable host coalescing bug fix */
10327 	if (tg3_flag(tp, 5755_PLUS))
10328 		val |= WDMAC_MODE_STATUS_TAG_FIX;
10329 
10330 	if (tg3_asic_rev(tp) == ASIC_REV_5785)
10331 		val |= WDMAC_MODE_BURST_ALL_DATA;
10332 
10333 	tw32_f(WDMAC_MODE, val);
10334 	udelay(40);
10335 
10336 	if (tg3_flag(tp, PCIX_MODE)) {
10337 		u16 pcix_cmd;
10338 
10339 		pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
10340 				     &pcix_cmd);
10341 		if (tg3_asic_rev(tp) == ASIC_REV_5703) {
10342 			pcix_cmd &= ~PCI_X_CMD_MAX_READ;
10343 			pcix_cmd |= PCI_X_CMD_READ_2K;
10344 		} else if (tg3_asic_rev(tp) == ASIC_REV_5704) {
10345 			pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
10346 			pcix_cmd |= PCI_X_CMD_READ_2K;
10347 		}
10348 		pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
10349 				      pcix_cmd);
10350 	}
10351 
10352 	tw32_f(RDMAC_MODE, rdmac_mode);
10353 	udelay(40);
10354 
10355 	if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
10356 	    tg3_asic_rev(tp) == ASIC_REV_5720) {
10357 		for (i = 0; i < TG3_NUM_RDMA_CHANNELS; i++) {
10358 			if (tr32(TG3_RDMA_LENGTH + (i << 2)) > TG3_MAX_MTU(tp))
10359 				break;
10360 		}
10361 		if (i < TG3_NUM_RDMA_CHANNELS) {
10362 			val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
10363 			val |= tg3_lso_rd_dma_workaround_bit(tp);
10364 			tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
10365 			tg3_flag_set(tp, 5719_5720_RDMA_BUG);
10366 		}
10367 	}
10368 
10369 	tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
10370 	if (!tg3_flag(tp, 5705_PLUS))
10371 		tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
10372 
10373 	if (tg3_asic_rev(tp) == ASIC_REV_5761)
10374 		tw32(SNDDATAC_MODE,
10375 		     SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
10376 	else
10377 		tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
10378 
10379 	tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
10380 	tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
10381 	val = RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ;
10382 	if (tg3_flag(tp, LRG_PROD_RING_CAP))
10383 		val |= RCVDBDI_MODE_LRG_RING_SZ;
10384 	tw32(RCVDBDI_MODE, val);
10385 	tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
10386 	if (tg3_flag(tp, HW_TSO_1) ||
10387 	    tg3_flag(tp, HW_TSO_2) ||
10388 	    tg3_flag(tp, HW_TSO_3))
10389 		tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
10390 	val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE;
10391 	if (tg3_flag(tp, ENABLE_TSS))
10392 		val |= SNDBDI_MODE_MULTI_TXQ_EN;
10393 	tw32(SNDBDI_MODE, val);
10394 	tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
10395 
10396 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) {
10397 		err = tg3_load_5701_a0_firmware_fix(tp);
10398 		if (err)
10399 			return err;
10400 	}
10401 
10402 	if (tg3_asic_rev(tp) == ASIC_REV_57766) {
10403 		/* Ignore any errors for the firmware download. If download
10404 		 * fails, the device will operate with EEE disabled
10405 		 */
10406 		tg3_load_57766_firmware(tp);
10407 	}
10408 
10409 	if (tg3_flag(tp, TSO_CAPABLE)) {
10410 		err = tg3_load_tso_firmware(tp);
10411 		if (err)
10412 			return err;
10413 	}
10414 
10415 	tp->tx_mode = TX_MODE_ENABLE;
10416 
10417 	if (tg3_flag(tp, 5755_PLUS) ||
10418 	    tg3_asic_rev(tp) == ASIC_REV_5906)
10419 		tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX;
10420 
10421 	if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
10422 	    tg3_asic_rev(tp) == ASIC_REV_5762) {
10423 		val = TX_MODE_JMB_FRM_LEN | TX_MODE_CNT_DN_MODE;
10424 		tp->tx_mode &= ~val;
10425 		tp->tx_mode |= tr32(MAC_TX_MODE) & val;
10426 	}
10427 
10428 	tw32_f(MAC_TX_MODE, tp->tx_mode);
10429 	udelay(100);
10430 
10431 	if (tg3_flag(tp, ENABLE_RSS)) {
10432 		tg3_rss_write_indir_tbl(tp);
10433 
10434 		/* Setup the "secret" hash key. */
10435 		tw32(MAC_RSS_HASH_KEY_0, 0x5f865437);
10436 		tw32(MAC_RSS_HASH_KEY_1, 0xe4ac62cc);
10437 		tw32(MAC_RSS_HASH_KEY_2, 0x50103a45);
10438 		tw32(MAC_RSS_HASH_KEY_3, 0x36621985);
10439 		tw32(MAC_RSS_HASH_KEY_4, 0xbf14c0e8);
10440 		tw32(MAC_RSS_HASH_KEY_5, 0x1bc27a1e);
10441 		tw32(MAC_RSS_HASH_KEY_6, 0x84f4b556);
10442 		tw32(MAC_RSS_HASH_KEY_7, 0x094ea6fe);
10443 		tw32(MAC_RSS_HASH_KEY_8, 0x7dda01e7);
10444 		tw32(MAC_RSS_HASH_KEY_9, 0xc04d7481);
10445 	}
10446 
10447 	tp->rx_mode = RX_MODE_ENABLE;
10448 	if (tg3_flag(tp, 5755_PLUS))
10449 		tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
10450 
10451 	if (tg3_asic_rev(tp) == ASIC_REV_5762)
10452 		tp->rx_mode |= RX_MODE_IPV4_FRAG_FIX;
10453 
10454 	if (tg3_flag(tp, ENABLE_RSS))
10455 		tp->rx_mode |= RX_MODE_RSS_ENABLE |
10456 			       RX_MODE_RSS_ITBL_HASH_BITS_7 |
10457 			       RX_MODE_RSS_IPV6_HASH_EN |
10458 			       RX_MODE_RSS_TCP_IPV6_HASH_EN |
10459 			       RX_MODE_RSS_IPV4_HASH_EN |
10460 			       RX_MODE_RSS_TCP_IPV4_HASH_EN;
10461 
10462 	tw32_f(MAC_RX_MODE, tp->rx_mode);
10463 	udelay(10);
10464 
10465 	tw32(MAC_LED_CTRL, tp->led_ctrl);
10466 
10467 	tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
10468 	if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
10469 		tw32_f(MAC_RX_MODE, RX_MODE_RESET);
10470 		udelay(10);
10471 	}
10472 	tw32_f(MAC_RX_MODE, tp->rx_mode);
10473 	udelay(10);
10474 
10475 	if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
10476 		if ((tg3_asic_rev(tp) == ASIC_REV_5704) &&
10477 		    !(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) {
10478 			/* Set drive transmission level to 1.2V  */
10479 			/* only if the signal pre-emphasis bit is not set  */
10480 			val = tr32(MAC_SERDES_CFG);
10481 			val &= 0xfffff000;
10482 			val |= 0x880;
10483 			tw32(MAC_SERDES_CFG, val);
10484 		}
10485 		if (tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A1)
10486 			tw32(MAC_SERDES_CFG, 0x616000);
10487 	}
10488 
10489 	/* Prevent chip from dropping frames when flow control
10490 	 * is enabled.
10491 	 */
10492 	if (tg3_flag(tp, 57765_CLASS))
10493 		val = 1;
10494 	else
10495 		val = 2;
10496 	tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val);
10497 
10498 	if (tg3_asic_rev(tp) == ASIC_REV_5704 &&
10499 	    (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
10500 		/* Use hardware link auto-negotiation */
10501 		tg3_flag_set(tp, HW_AUTONEG);
10502 	}
10503 
10504 	if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
10505 	    tg3_asic_rev(tp) == ASIC_REV_5714) {
10506 		u32 tmp;
10507 
10508 		tmp = tr32(SERDES_RX_CTRL);
10509 		tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
10510 		tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
10511 		tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
10512 		tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
10513 	}
10514 
10515 	if (!tg3_flag(tp, USE_PHYLIB)) {
10516 		if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10517 			tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
10518 
10519 		err = tg3_setup_phy(tp, false);
10520 		if (err)
10521 			return err;
10522 
10523 		if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
10524 		    !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
10525 			u32 tmp;
10526 
10527 			/* Clear CRC stats. */
10528 			if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
10529 				tg3_writephy(tp, MII_TG3_TEST1,
10530 					     tmp | MII_TG3_TEST1_CRC_EN);
10531 				tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &tmp);
10532 			}
10533 		}
10534 	}
10535 
10536 	__tg3_set_rx_mode(tp->dev);
10537 
10538 	/* Initialize receive rules. */
10539 	tw32(MAC_RCV_RULE_0,  0xc2000000 & RCV_RULE_DISABLE_MASK);
10540 	tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
10541 	tw32(MAC_RCV_RULE_1,  0x86000004 & RCV_RULE_DISABLE_MASK);
10542 	tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
10543 
10544 	if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS))
10545 		limit = 8;
10546 	else
10547 		limit = 16;
10548 	if (tg3_flag(tp, ENABLE_ASF))
10549 		limit -= 4;
10550 	switch (limit) {
10551 	case 16:
10552 		tw32(MAC_RCV_RULE_15,  0); tw32(MAC_RCV_VALUE_15,  0);
10553 	case 15:
10554 		tw32(MAC_RCV_RULE_14,  0); tw32(MAC_RCV_VALUE_14,  0);
10555 	case 14:
10556 		tw32(MAC_RCV_RULE_13,  0); tw32(MAC_RCV_VALUE_13,  0);
10557 	case 13:
10558 		tw32(MAC_RCV_RULE_12,  0); tw32(MAC_RCV_VALUE_12,  0);
10559 	case 12:
10560 		tw32(MAC_RCV_RULE_11,  0); tw32(MAC_RCV_VALUE_11,  0);
10561 	case 11:
10562 		tw32(MAC_RCV_RULE_10,  0); tw32(MAC_RCV_VALUE_10,  0);
10563 	case 10:
10564 		tw32(MAC_RCV_RULE_9,  0); tw32(MAC_RCV_VALUE_9,  0);
10565 	case 9:
10566 		tw32(MAC_RCV_RULE_8,  0); tw32(MAC_RCV_VALUE_8,  0);
10567 	case 8:
10568 		tw32(MAC_RCV_RULE_7,  0); tw32(MAC_RCV_VALUE_7,  0);
10569 	case 7:
10570 		tw32(MAC_RCV_RULE_6,  0); tw32(MAC_RCV_VALUE_6,  0);
10571 	case 6:
10572 		tw32(MAC_RCV_RULE_5,  0); tw32(MAC_RCV_VALUE_5,  0);
10573 	case 5:
10574 		tw32(MAC_RCV_RULE_4,  0); tw32(MAC_RCV_VALUE_4,  0);
10575 	case 4:
10576 		/* tw32(MAC_RCV_RULE_3,  0); tw32(MAC_RCV_VALUE_3,  0); */
10577 	case 3:
10578 		/* tw32(MAC_RCV_RULE_2,  0); tw32(MAC_RCV_VALUE_2,  0); */
10579 	case 2:
10580 	case 1:
10581 
10582 	default:
10583 		break;
10584 	}
10585 
10586 	if (tg3_flag(tp, ENABLE_APE))
10587 		/* Write our heartbeat update interval to APE. */
10588 		tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
10589 				APE_HOST_HEARTBEAT_INT_DISABLE);
10590 
10591 	tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
10592 
10593 	return 0;
10594 }
10595 
10596 /* Called at device open time to get the chip ready for
10597  * packet processing.  Invoked with tp->lock held.
10598  */
10599 static int tg3_init_hw(struct tg3 *tp, bool reset_phy)
10600 {
10601 	/* Chip may have been just powered on. If so, the boot code may still
10602 	 * be running initialization. Wait for it to finish to avoid races in
10603 	 * accessing the hardware.
10604 	 */
10605 	tg3_enable_register_access(tp);
10606 	tg3_poll_fw(tp);
10607 
10608 	tg3_switch_clocks(tp);
10609 
10610 	tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
10611 
10612 	return tg3_reset_hw(tp, reset_phy);
10613 }
10614 
10615 static void tg3_sd_scan_scratchpad(struct tg3 *tp, struct tg3_ocir *ocir)
10616 {
10617 	int i;
10618 
10619 	for (i = 0; i < TG3_SD_NUM_RECS; i++, ocir++) {
10620 		u32 off = i * TG3_OCIR_LEN, len = TG3_OCIR_LEN;
10621 
10622 		tg3_ape_scratchpad_read(tp, (u32 *) ocir, off, len);
10623 		off += len;
10624 
10625 		if (ocir->signature != TG3_OCIR_SIG_MAGIC ||
10626 		    !(ocir->version_flags & TG3_OCIR_FLAG_ACTIVE))
10627 			memset(ocir, 0, TG3_OCIR_LEN);
10628 	}
10629 }
10630 
10631 /* sysfs attributes for hwmon */
10632 static ssize_t tg3_show_temp(struct device *dev,
10633 			     struct device_attribute *devattr, char *buf)
10634 {
10635 	struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
10636 	struct tg3 *tp = dev_get_drvdata(dev);
10637 	u32 temperature;
10638 
10639 	spin_lock_bh(&tp->lock);
10640 	tg3_ape_scratchpad_read(tp, &temperature, attr->index,
10641 				sizeof(temperature));
10642 	spin_unlock_bh(&tp->lock);
10643 	return sprintf(buf, "%u\n", temperature);
10644 }
10645 
10646 
10647 static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, tg3_show_temp, NULL,
10648 			  TG3_TEMP_SENSOR_OFFSET);
10649 static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, tg3_show_temp, NULL,
10650 			  TG3_TEMP_CAUTION_OFFSET);
10651 static SENSOR_DEVICE_ATTR(temp1_max, S_IRUGO, tg3_show_temp, NULL,
10652 			  TG3_TEMP_MAX_OFFSET);
10653 
10654 static struct attribute *tg3_attrs[] = {
10655 	&sensor_dev_attr_temp1_input.dev_attr.attr,
10656 	&sensor_dev_attr_temp1_crit.dev_attr.attr,
10657 	&sensor_dev_attr_temp1_max.dev_attr.attr,
10658 	NULL
10659 };
10660 ATTRIBUTE_GROUPS(tg3);
10661 
10662 static void tg3_hwmon_close(struct tg3 *tp)
10663 {
10664 	if (tp->hwmon_dev) {
10665 		hwmon_device_unregister(tp->hwmon_dev);
10666 		tp->hwmon_dev = NULL;
10667 	}
10668 }
10669 
10670 static void tg3_hwmon_open(struct tg3 *tp)
10671 {
10672 	int i;
10673 	u32 size = 0;
10674 	struct pci_dev *pdev = tp->pdev;
10675 	struct tg3_ocir ocirs[TG3_SD_NUM_RECS];
10676 
10677 	tg3_sd_scan_scratchpad(tp, ocirs);
10678 
10679 	for (i = 0; i < TG3_SD_NUM_RECS; i++) {
10680 		if (!ocirs[i].src_data_length)
10681 			continue;
10682 
10683 		size += ocirs[i].src_hdr_length;
10684 		size += ocirs[i].src_data_length;
10685 	}
10686 
10687 	if (!size)
10688 		return;
10689 
10690 	tp->hwmon_dev = hwmon_device_register_with_groups(&pdev->dev, "tg3",
10691 							  tp, tg3_groups);
10692 	if (IS_ERR(tp->hwmon_dev)) {
10693 		tp->hwmon_dev = NULL;
10694 		dev_err(&pdev->dev, "Cannot register hwmon device, aborting\n");
10695 	}
10696 }
10697 
10698 
10699 #define TG3_STAT_ADD32(PSTAT, REG) \
10700 do {	u32 __val = tr32(REG); \
10701 	(PSTAT)->low += __val; \
10702 	if ((PSTAT)->low < __val) \
10703 		(PSTAT)->high += 1; \
10704 } while (0)
10705 
10706 static void tg3_periodic_fetch_stats(struct tg3 *tp)
10707 {
10708 	struct tg3_hw_stats *sp = tp->hw_stats;
10709 
10710 	if (!tp->link_up)
10711 		return;
10712 
10713 	TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
10714 	TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
10715 	TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
10716 	TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
10717 	TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
10718 	TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
10719 	TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
10720 	TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
10721 	TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
10722 	TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
10723 	TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
10724 	TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
10725 	TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
10726 	if (unlikely(tg3_flag(tp, 5719_5720_RDMA_BUG) &&
10727 		     (sp->tx_ucast_packets.low + sp->tx_mcast_packets.low +
10728 		      sp->tx_bcast_packets.low) > TG3_NUM_RDMA_CHANNELS)) {
10729 		u32 val;
10730 
10731 		val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
10732 		val &= ~tg3_lso_rd_dma_workaround_bit(tp);
10733 		tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
10734 		tg3_flag_clear(tp, 5719_5720_RDMA_BUG);
10735 	}
10736 
10737 	TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
10738 	TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
10739 	TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
10740 	TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
10741 	TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
10742 	TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
10743 	TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
10744 	TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
10745 	TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
10746 	TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
10747 	TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
10748 	TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
10749 	TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
10750 	TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
10751 
10752 	TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
10753 	if (tg3_asic_rev(tp) != ASIC_REV_5717 &&
10754 	    tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0 &&
10755 	    tg3_chip_rev_id(tp) != CHIPREV_ID_5720_A0) {
10756 		TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
10757 	} else {
10758 		u32 val = tr32(HOSTCC_FLOW_ATTN);
10759 		val = (val & HOSTCC_FLOW_ATTN_MBUF_LWM) ? 1 : 0;
10760 		if (val) {
10761 			tw32(HOSTCC_FLOW_ATTN, HOSTCC_FLOW_ATTN_MBUF_LWM);
10762 			sp->rx_discards.low += val;
10763 			if (sp->rx_discards.low < val)
10764 				sp->rx_discards.high += 1;
10765 		}
10766 		sp->mbuf_lwm_thresh_hit = sp->rx_discards;
10767 	}
10768 	TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
10769 }
10770 
10771 static void tg3_chk_missed_msi(struct tg3 *tp)
10772 {
10773 	u32 i;
10774 
10775 	for (i = 0; i < tp->irq_cnt; i++) {
10776 		struct tg3_napi *tnapi = &tp->napi[i];
10777 
10778 		if (tg3_has_work(tnapi)) {
10779 			if (tnapi->last_rx_cons == tnapi->rx_rcb_ptr &&
10780 			    tnapi->last_tx_cons == tnapi->tx_cons) {
10781 				if (tnapi->chk_msi_cnt < 1) {
10782 					tnapi->chk_msi_cnt++;
10783 					return;
10784 				}
10785 				tg3_msi(0, tnapi);
10786 			}
10787 		}
10788 		tnapi->chk_msi_cnt = 0;
10789 		tnapi->last_rx_cons = tnapi->rx_rcb_ptr;
10790 		tnapi->last_tx_cons = tnapi->tx_cons;
10791 	}
10792 }
10793 
10794 static void tg3_timer(unsigned long __opaque)
10795 {
10796 	struct tg3 *tp = (struct tg3 *) __opaque;
10797 
10798 	if (tp->irq_sync || tg3_flag(tp, RESET_TASK_PENDING))
10799 		goto restart_timer;
10800 
10801 	spin_lock(&tp->lock);
10802 
10803 	if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
10804 	    tg3_flag(tp, 57765_CLASS))
10805 		tg3_chk_missed_msi(tp);
10806 
10807 	if (tg3_flag(tp, FLUSH_POSTED_WRITES)) {
10808 		/* BCM4785: Flush posted writes from GbE to host memory. */
10809 		tr32(HOSTCC_MODE);
10810 	}
10811 
10812 	if (!tg3_flag(tp, TAGGED_STATUS)) {
10813 		/* All of this garbage is because when using non-tagged
10814 		 * IRQ status the mailbox/status_block protocol the chip
10815 		 * uses with the cpu is race prone.
10816 		 */
10817 		if (tp->napi[0].hw_status->status & SD_STATUS_UPDATED) {
10818 			tw32(GRC_LOCAL_CTRL,
10819 			     tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
10820 		} else {
10821 			tw32(HOSTCC_MODE, tp->coalesce_mode |
10822 			     HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW);
10823 		}
10824 
10825 		if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
10826 			spin_unlock(&tp->lock);
10827 			tg3_reset_task_schedule(tp);
10828 			goto restart_timer;
10829 		}
10830 	}
10831 
10832 	/* This part only runs once per second. */
10833 	if (!--tp->timer_counter) {
10834 		if (tg3_flag(tp, 5705_PLUS))
10835 			tg3_periodic_fetch_stats(tp);
10836 
10837 		if (tp->setlpicnt && !--tp->setlpicnt)
10838 			tg3_phy_eee_enable(tp);
10839 
10840 		if (tg3_flag(tp, USE_LINKCHG_REG)) {
10841 			u32 mac_stat;
10842 			int phy_event;
10843 
10844 			mac_stat = tr32(MAC_STATUS);
10845 
10846 			phy_event = 0;
10847 			if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) {
10848 				if (mac_stat & MAC_STATUS_MI_INTERRUPT)
10849 					phy_event = 1;
10850 			} else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
10851 				phy_event = 1;
10852 
10853 			if (phy_event)
10854 				tg3_setup_phy(tp, false);
10855 		} else if (tg3_flag(tp, POLL_SERDES)) {
10856 			u32 mac_stat = tr32(MAC_STATUS);
10857 			int need_setup = 0;
10858 
10859 			if (tp->link_up &&
10860 			    (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
10861 				need_setup = 1;
10862 			}
10863 			if (!tp->link_up &&
10864 			    (mac_stat & (MAC_STATUS_PCS_SYNCED |
10865 					 MAC_STATUS_SIGNAL_DET))) {
10866 				need_setup = 1;
10867 			}
10868 			if (need_setup) {
10869 				if (!tp->serdes_counter) {
10870 					tw32_f(MAC_MODE,
10871 					     (tp->mac_mode &
10872 					      ~MAC_MODE_PORT_MODE_MASK));
10873 					udelay(40);
10874 					tw32_f(MAC_MODE, tp->mac_mode);
10875 					udelay(40);
10876 				}
10877 				tg3_setup_phy(tp, false);
10878 			}
10879 		} else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
10880 			   tg3_flag(tp, 5780_CLASS)) {
10881 			tg3_serdes_parallel_detect(tp);
10882 		}
10883 
10884 		tp->timer_counter = tp->timer_multiplier;
10885 	}
10886 
10887 	/* Heartbeat is only sent once every 2 seconds.
10888 	 *
10889 	 * The heartbeat is to tell the ASF firmware that the host
10890 	 * driver is still alive.  In the event that the OS crashes,
10891 	 * ASF needs to reset the hardware to free up the FIFO space
10892 	 * that may be filled with rx packets destined for the host.
10893 	 * If the FIFO is full, ASF will no longer function properly.
10894 	 *
10895 	 * Unintended resets have been reported on real time kernels
10896 	 * where the timer doesn't run on time.  Netpoll will also have
10897 	 * same problem.
10898 	 *
10899 	 * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
10900 	 * to check the ring condition when the heartbeat is expiring
10901 	 * before doing the reset.  This will prevent most unintended
10902 	 * resets.
10903 	 */
10904 	if (!--tp->asf_counter) {
10905 		if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
10906 			tg3_wait_for_event_ack(tp);
10907 
10908 			tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
10909 				      FWCMD_NICDRV_ALIVE3);
10910 			tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
10911 			tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX,
10912 				      TG3_FW_UPDATE_TIMEOUT_SEC);
10913 
10914 			tg3_generate_fw_event(tp);
10915 		}
10916 		tp->asf_counter = tp->asf_multiplier;
10917 	}
10918 
10919 	spin_unlock(&tp->lock);
10920 
10921 restart_timer:
10922 	tp->timer.expires = jiffies + tp->timer_offset;
10923 	add_timer(&tp->timer);
10924 }
10925 
10926 static void tg3_timer_init(struct tg3 *tp)
10927 {
10928 	if (tg3_flag(tp, TAGGED_STATUS) &&
10929 	    tg3_asic_rev(tp) != ASIC_REV_5717 &&
10930 	    !tg3_flag(tp, 57765_CLASS))
10931 		tp->timer_offset = HZ;
10932 	else
10933 		tp->timer_offset = HZ / 10;
10934 
10935 	BUG_ON(tp->timer_offset > HZ);
10936 
10937 	tp->timer_multiplier = (HZ / tp->timer_offset);
10938 	tp->asf_multiplier = (HZ / tp->timer_offset) *
10939 			     TG3_FW_UPDATE_FREQ_SEC;
10940 
10941 	init_timer(&tp->timer);
10942 	tp->timer.data = (unsigned long) tp;
10943 	tp->timer.function = tg3_timer;
10944 }
10945 
10946 static void tg3_timer_start(struct tg3 *tp)
10947 {
10948 	tp->asf_counter   = tp->asf_multiplier;
10949 	tp->timer_counter = tp->timer_multiplier;
10950 
10951 	tp->timer.expires = jiffies + tp->timer_offset;
10952 	add_timer(&tp->timer);
10953 }
10954 
10955 static void tg3_timer_stop(struct tg3 *tp)
10956 {
10957 	del_timer_sync(&tp->timer);
10958 }
10959 
10960 /* Restart hardware after configuration changes, self-test, etc.
10961  * Invoked with tp->lock held.
10962  */
10963 static int tg3_restart_hw(struct tg3 *tp, bool reset_phy)
10964 	__releases(tp->lock)
10965 	__acquires(tp->lock)
10966 {
10967 	int err;
10968 
10969 	err = tg3_init_hw(tp, reset_phy);
10970 	if (err) {
10971 		netdev_err(tp->dev,
10972 			   "Failed to re-initialize device, aborting\n");
10973 		tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10974 		tg3_full_unlock(tp);
10975 		tg3_timer_stop(tp);
10976 		tp->irq_sync = 0;
10977 		tg3_napi_enable(tp);
10978 		dev_close(tp->dev);
10979 		tg3_full_lock(tp, 0);
10980 	}
10981 	return err;
10982 }
10983 
10984 static void tg3_reset_task(struct work_struct *work)
10985 {
10986 	struct tg3 *tp = container_of(work, struct tg3, reset_task);
10987 	int err;
10988 
10989 	tg3_full_lock(tp, 0);
10990 
10991 	if (!netif_running(tp->dev)) {
10992 		tg3_flag_clear(tp, RESET_TASK_PENDING);
10993 		tg3_full_unlock(tp);
10994 		return;
10995 	}
10996 
10997 	tg3_full_unlock(tp);
10998 
10999 	tg3_phy_stop(tp);
11000 
11001 	tg3_netif_stop(tp);
11002 
11003 	tg3_full_lock(tp, 1);
11004 
11005 	if (tg3_flag(tp, TX_RECOVERY_PENDING)) {
11006 		tp->write32_tx_mbox = tg3_write32_tx_mbox;
11007 		tp->write32_rx_mbox = tg3_write_flush_reg32;
11008 		tg3_flag_set(tp, MBOX_WRITE_REORDER);
11009 		tg3_flag_clear(tp, TX_RECOVERY_PENDING);
11010 	}
11011 
11012 	tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
11013 	err = tg3_init_hw(tp, true);
11014 	if (err)
11015 		goto out;
11016 
11017 	tg3_netif_start(tp);
11018 
11019 out:
11020 	tg3_full_unlock(tp);
11021 
11022 	if (!err)
11023 		tg3_phy_start(tp);
11024 
11025 	tg3_flag_clear(tp, RESET_TASK_PENDING);
11026 }
11027 
11028 static int tg3_request_irq(struct tg3 *tp, int irq_num)
11029 {
11030 	irq_handler_t fn;
11031 	unsigned long flags;
11032 	char *name;
11033 	struct tg3_napi *tnapi = &tp->napi[irq_num];
11034 
11035 	if (tp->irq_cnt == 1)
11036 		name = tp->dev->name;
11037 	else {
11038 		name = &tnapi->irq_lbl[0];
11039 		if (tnapi->tx_buffers && tnapi->rx_rcb)
11040 			snprintf(name, IFNAMSIZ,
11041 				 "%s-txrx-%d", tp->dev->name, irq_num);
11042 		else if (tnapi->tx_buffers)
11043 			snprintf(name, IFNAMSIZ,
11044 				 "%s-tx-%d", tp->dev->name, irq_num);
11045 		else if (tnapi->rx_rcb)
11046 			snprintf(name, IFNAMSIZ,
11047 				 "%s-rx-%d", tp->dev->name, irq_num);
11048 		else
11049 			snprintf(name, IFNAMSIZ,
11050 				 "%s-%d", tp->dev->name, irq_num);
11051 		name[IFNAMSIZ-1] = 0;
11052 	}
11053 
11054 	if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
11055 		fn = tg3_msi;
11056 		if (tg3_flag(tp, 1SHOT_MSI))
11057 			fn = tg3_msi_1shot;
11058 		flags = 0;
11059 	} else {
11060 		fn = tg3_interrupt;
11061 		if (tg3_flag(tp, TAGGED_STATUS))
11062 			fn = tg3_interrupt_tagged;
11063 		flags = IRQF_SHARED;
11064 	}
11065 
11066 	return request_irq(tnapi->irq_vec, fn, flags, name, tnapi);
11067 }
11068 
11069 static int tg3_test_interrupt(struct tg3 *tp)
11070 {
11071 	struct tg3_napi *tnapi = &tp->napi[0];
11072 	struct net_device *dev = tp->dev;
11073 	int err, i, intr_ok = 0;
11074 	u32 val;
11075 
11076 	if (!netif_running(dev))
11077 		return -ENODEV;
11078 
11079 	tg3_disable_ints(tp);
11080 
11081 	free_irq(tnapi->irq_vec, tnapi);
11082 
11083 	/*
11084 	 * Turn off MSI one shot mode.  Otherwise this test has no
11085 	 * observable way to know whether the interrupt was delivered.
11086 	 */
11087 	if (tg3_flag(tp, 57765_PLUS)) {
11088 		val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE;
11089 		tw32(MSGINT_MODE, val);
11090 	}
11091 
11092 	err = request_irq(tnapi->irq_vec, tg3_test_isr,
11093 			  IRQF_SHARED, dev->name, tnapi);
11094 	if (err)
11095 		return err;
11096 
11097 	tnapi->hw_status->status &= ~SD_STATUS_UPDATED;
11098 	tg3_enable_ints(tp);
11099 
11100 	tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
11101 	       tnapi->coal_now);
11102 
11103 	for (i = 0; i < 5; i++) {
11104 		u32 int_mbox, misc_host_ctrl;
11105 
11106 		int_mbox = tr32_mailbox(tnapi->int_mbox);
11107 		misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
11108 
11109 		if ((int_mbox != 0) ||
11110 		    (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
11111 			intr_ok = 1;
11112 			break;
11113 		}
11114 
11115 		if (tg3_flag(tp, 57765_PLUS) &&
11116 		    tnapi->hw_status->status_tag != tnapi->last_tag)
11117 			tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
11118 
11119 		msleep(10);
11120 	}
11121 
11122 	tg3_disable_ints(tp);
11123 
11124 	free_irq(tnapi->irq_vec, tnapi);
11125 
11126 	err = tg3_request_irq(tp, 0);
11127 
11128 	if (err)
11129 		return err;
11130 
11131 	if (intr_ok) {
11132 		/* Reenable MSI one shot mode. */
11133 		if (tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, 1SHOT_MSI)) {
11134 			val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE;
11135 			tw32(MSGINT_MODE, val);
11136 		}
11137 		return 0;
11138 	}
11139 
11140 	return -EIO;
11141 }
11142 
11143 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
11144  * successfully restored
11145  */
11146 static int tg3_test_msi(struct tg3 *tp)
11147 {
11148 	int err;
11149 	u16 pci_cmd;
11150 
11151 	if (!tg3_flag(tp, USING_MSI))
11152 		return 0;
11153 
11154 	/* Turn off SERR reporting in case MSI terminates with Master
11155 	 * Abort.
11156 	 */
11157 	pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
11158 	pci_write_config_word(tp->pdev, PCI_COMMAND,
11159 			      pci_cmd & ~PCI_COMMAND_SERR);
11160 
11161 	err = tg3_test_interrupt(tp);
11162 
11163 	pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
11164 
11165 	if (!err)
11166 		return 0;
11167 
11168 	/* other failures */
11169 	if (err != -EIO)
11170 		return err;
11171 
11172 	/* MSI test failed, go back to INTx mode */
11173 	netdev_warn(tp->dev, "No interrupt was generated using MSI. Switching "
11174 		    "to INTx mode. Please report this failure to the PCI "
11175 		    "maintainer and include system chipset information\n");
11176 
11177 	free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
11178 
11179 	pci_disable_msi(tp->pdev);
11180 
11181 	tg3_flag_clear(tp, USING_MSI);
11182 	tp->napi[0].irq_vec = tp->pdev->irq;
11183 
11184 	err = tg3_request_irq(tp, 0);
11185 	if (err)
11186 		return err;
11187 
11188 	/* Need to reset the chip because the MSI cycle may have terminated
11189 	 * with Master Abort.
11190 	 */
11191 	tg3_full_lock(tp, 1);
11192 
11193 	tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11194 	err = tg3_init_hw(tp, true);
11195 
11196 	tg3_full_unlock(tp);
11197 
11198 	if (err)
11199 		free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
11200 
11201 	return err;
11202 }
11203 
11204 static int tg3_request_firmware(struct tg3 *tp)
11205 {
11206 	const struct tg3_firmware_hdr *fw_hdr;
11207 
11208 	if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) {
11209 		netdev_err(tp->dev, "Failed to load firmware \"%s\"\n",
11210 			   tp->fw_needed);
11211 		return -ENOENT;
11212 	}
11213 
11214 	fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
11215 
11216 	/* Firmware blob starts with version numbers, followed by
11217 	 * start address and _full_ length including BSS sections
11218 	 * (which must be longer than the actual data, of course
11219 	 */
11220 
11221 	tp->fw_len = be32_to_cpu(fw_hdr->len);	/* includes bss */
11222 	if (tp->fw_len < (tp->fw->size - TG3_FW_HDR_LEN)) {
11223 		netdev_err(tp->dev, "bogus length %d in \"%s\"\n",
11224 			   tp->fw_len, tp->fw_needed);
11225 		release_firmware(tp->fw);
11226 		tp->fw = NULL;
11227 		return -EINVAL;
11228 	}
11229 
11230 	/* We no longer need firmware; we have it. */
11231 	tp->fw_needed = NULL;
11232 	return 0;
11233 }
11234 
11235 static u32 tg3_irq_count(struct tg3 *tp)
11236 {
11237 	u32 irq_cnt = max(tp->rxq_cnt, tp->txq_cnt);
11238 
11239 	if (irq_cnt > 1) {
11240 		/* We want as many rx rings enabled as there are cpus.
11241 		 * In multiqueue MSI-X mode, the first MSI-X vector
11242 		 * only deals with link interrupts, etc, so we add
11243 		 * one to the number of vectors we are requesting.
11244 		 */
11245 		irq_cnt = min_t(unsigned, irq_cnt + 1, tp->irq_max);
11246 	}
11247 
11248 	return irq_cnt;
11249 }
11250 
11251 static bool tg3_enable_msix(struct tg3 *tp)
11252 {
11253 	int i, rc;
11254 	struct msix_entry msix_ent[TG3_IRQ_MAX_VECS];
11255 
11256 	tp->txq_cnt = tp->txq_req;
11257 	tp->rxq_cnt = tp->rxq_req;
11258 	if (!tp->rxq_cnt)
11259 		tp->rxq_cnt = netif_get_num_default_rss_queues();
11260 	if (tp->rxq_cnt > tp->rxq_max)
11261 		tp->rxq_cnt = tp->rxq_max;
11262 
11263 	/* Disable multiple TX rings by default.  Simple round-robin hardware
11264 	 * scheduling of the TX rings can cause starvation of rings with
11265 	 * small packets when other rings have TSO or jumbo packets.
11266 	 */
11267 	if (!tp->txq_req)
11268 		tp->txq_cnt = 1;
11269 
11270 	tp->irq_cnt = tg3_irq_count(tp);
11271 
11272 	for (i = 0; i < tp->irq_max; i++) {
11273 		msix_ent[i].entry  = i;
11274 		msix_ent[i].vector = 0;
11275 	}
11276 
11277 	rc = pci_enable_msix(tp->pdev, msix_ent, tp->irq_cnt);
11278 	if (rc < 0) {
11279 		return false;
11280 	} else if (rc != 0) {
11281 		if (pci_enable_msix(tp->pdev, msix_ent, rc))
11282 			return false;
11283 		netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n",
11284 			      tp->irq_cnt, rc);
11285 		tp->irq_cnt = rc;
11286 		tp->rxq_cnt = max(rc - 1, 1);
11287 		if (tp->txq_cnt)
11288 			tp->txq_cnt = min(tp->rxq_cnt, tp->txq_max);
11289 	}
11290 
11291 	for (i = 0; i < tp->irq_max; i++)
11292 		tp->napi[i].irq_vec = msix_ent[i].vector;
11293 
11294 	if (netif_set_real_num_rx_queues(tp->dev, tp->rxq_cnt)) {
11295 		pci_disable_msix(tp->pdev);
11296 		return false;
11297 	}
11298 
11299 	if (tp->irq_cnt == 1)
11300 		return true;
11301 
11302 	tg3_flag_set(tp, ENABLE_RSS);
11303 
11304 	if (tp->txq_cnt > 1)
11305 		tg3_flag_set(tp, ENABLE_TSS);
11306 
11307 	netif_set_real_num_tx_queues(tp->dev, tp->txq_cnt);
11308 
11309 	return true;
11310 }
11311 
11312 static void tg3_ints_init(struct tg3 *tp)
11313 {
11314 	if ((tg3_flag(tp, SUPPORT_MSI) || tg3_flag(tp, SUPPORT_MSIX)) &&
11315 	    !tg3_flag(tp, TAGGED_STATUS)) {
11316 		/* All MSI supporting chips should support tagged
11317 		 * status.  Assert that this is the case.
11318 		 */
11319 		netdev_warn(tp->dev,
11320 			    "MSI without TAGGED_STATUS? Not using MSI\n");
11321 		goto defcfg;
11322 	}
11323 
11324 	if (tg3_flag(tp, SUPPORT_MSIX) && tg3_enable_msix(tp))
11325 		tg3_flag_set(tp, USING_MSIX);
11326 	else if (tg3_flag(tp, SUPPORT_MSI) && pci_enable_msi(tp->pdev) == 0)
11327 		tg3_flag_set(tp, USING_MSI);
11328 
11329 	if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
11330 		u32 msi_mode = tr32(MSGINT_MODE);
11331 		if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1)
11332 			msi_mode |= MSGINT_MODE_MULTIVEC_EN;
11333 		if (!tg3_flag(tp, 1SHOT_MSI))
11334 			msi_mode |= MSGINT_MODE_ONE_SHOT_DISABLE;
11335 		tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
11336 	}
11337 defcfg:
11338 	if (!tg3_flag(tp, USING_MSIX)) {
11339 		tp->irq_cnt = 1;
11340 		tp->napi[0].irq_vec = tp->pdev->irq;
11341 	}
11342 
11343 	if (tp->irq_cnt == 1) {
11344 		tp->txq_cnt = 1;
11345 		tp->rxq_cnt = 1;
11346 		netif_set_real_num_tx_queues(tp->dev, 1);
11347 		netif_set_real_num_rx_queues(tp->dev, 1);
11348 	}
11349 }
11350 
11351 static void tg3_ints_fini(struct tg3 *tp)
11352 {
11353 	if (tg3_flag(tp, USING_MSIX))
11354 		pci_disable_msix(tp->pdev);
11355 	else if (tg3_flag(tp, USING_MSI))
11356 		pci_disable_msi(tp->pdev);
11357 	tg3_flag_clear(tp, USING_MSI);
11358 	tg3_flag_clear(tp, USING_MSIX);
11359 	tg3_flag_clear(tp, ENABLE_RSS);
11360 	tg3_flag_clear(tp, ENABLE_TSS);
11361 }
11362 
11363 static int tg3_start(struct tg3 *tp, bool reset_phy, bool test_irq,
11364 		     bool init)
11365 {
11366 	struct net_device *dev = tp->dev;
11367 	int i, err;
11368 
11369 	/*
11370 	 * Setup interrupts first so we know how
11371 	 * many NAPI resources to allocate
11372 	 */
11373 	tg3_ints_init(tp);
11374 
11375 	tg3_rss_check_indir_tbl(tp);
11376 
11377 	/* The placement of this call is tied
11378 	 * to the setup and use of Host TX descriptors.
11379 	 */
11380 	err = tg3_alloc_consistent(tp);
11381 	if (err)
11382 		goto out_ints_fini;
11383 
11384 	tg3_napi_init(tp);
11385 
11386 	tg3_napi_enable(tp);
11387 
11388 	for (i = 0; i < tp->irq_cnt; i++) {
11389 		struct tg3_napi *tnapi = &tp->napi[i];
11390 		err = tg3_request_irq(tp, i);
11391 		if (err) {
11392 			for (i--; i >= 0; i--) {
11393 				tnapi = &tp->napi[i];
11394 				free_irq(tnapi->irq_vec, tnapi);
11395 			}
11396 			goto out_napi_fini;
11397 		}
11398 	}
11399 
11400 	tg3_full_lock(tp, 0);
11401 
11402 	if (init)
11403 		tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
11404 
11405 	err = tg3_init_hw(tp, reset_phy);
11406 	if (err) {
11407 		tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11408 		tg3_free_rings(tp);
11409 	}
11410 
11411 	tg3_full_unlock(tp);
11412 
11413 	if (err)
11414 		goto out_free_irq;
11415 
11416 	if (test_irq && tg3_flag(tp, USING_MSI)) {
11417 		err = tg3_test_msi(tp);
11418 
11419 		if (err) {
11420 			tg3_full_lock(tp, 0);
11421 			tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11422 			tg3_free_rings(tp);
11423 			tg3_full_unlock(tp);
11424 
11425 			goto out_napi_fini;
11426 		}
11427 
11428 		if (!tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
11429 			u32 val = tr32(PCIE_TRANSACTION_CFG);
11430 
11431 			tw32(PCIE_TRANSACTION_CFG,
11432 			     val | PCIE_TRANS_CFG_1SHOT_MSI);
11433 		}
11434 	}
11435 
11436 	tg3_phy_start(tp);
11437 
11438 	tg3_hwmon_open(tp);
11439 
11440 	tg3_full_lock(tp, 0);
11441 
11442 	tg3_timer_start(tp);
11443 	tg3_flag_set(tp, INIT_COMPLETE);
11444 	tg3_enable_ints(tp);
11445 
11446 	if (init)
11447 		tg3_ptp_init(tp);
11448 	else
11449 		tg3_ptp_resume(tp);
11450 
11451 
11452 	tg3_full_unlock(tp);
11453 
11454 	netif_tx_start_all_queues(dev);
11455 
11456 	/*
11457 	 * Reset loopback feature if it was turned on while the device was down
11458 	 * make sure that it's installed properly now.
11459 	 */
11460 	if (dev->features & NETIF_F_LOOPBACK)
11461 		tg3_set_loopback(dev, dev->features);
11462 
11463 	return 0;
11464 
11465 out_free_irq:
11466 	for (i = tp->irq_cnt - 1; i >= 0; i--) {
11467 		struct tg3_napi *tnapi = &tp->napi[i];
11468 		free_irq(tnapi->irq_vec, tnapi);
11469 	}
11470 
11471 out_napi_fini:
11472 	tg3_napi_disable(tp);
11473 	tg3_napi_fini(tp);
11474 	tg3_free_consistent(tp);
11475 
11476 out_ints_fini:
11477 	tg3_ints_fini(tp);
11478 
11479 	return err;
11480 }
11481 
11482 static void tg3_stop(struct tg3 *tp)
11483 {
11484 	int i;
11485 
11486 	tg3_reset_task_cancel(tp);
11487 	tg3_netif_stop(tp);
11488 
11489 	tg3_timer_stop(tp);
11490 
11491 	tg3_hwmon_close(tp);
11492 
11493 	tg3_phy_stop(tp);
11494 
11495 	tg3_full_lock(tp, 1);
11496 
11497 	tg3_disable_ints(tp);
11498 
11499 	tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11500 	tg3_free_rings(tp);
11501 	tg3_flag_clear(tp, INIT_COMPLETE);
11502 
11503 	tg3_full_unlock(tp);
11504 
11505 	for (i = tp->irq_cnt - 1; i >= 0; i--) {
11506 		struct tg3_napi *tnapi = &tp->napi[i];
11507 		free_irq(tnapi->irq_vec, tnapi);
11508 	}
11509 
11510 	tg3_ints_fini(tp);
11511 
11512 	tg3_napi_fini(tp);
11513 
11514 	tg3_free_consistent(tp);
11515 }
11516 
11517 static int tg3_open(struct net_device *dev)
11518 {
11519 	struct tg3 *tp = netdev_priv(dev);
11520 	int err;
11521 
11522 	if (tp->fw_needed) {
11523 		err = tg3_request_firmware(tp);
11524 		if (tg3_asic_rev(tp) == ASIC_REV_57766) {
11525 			if (err) {
11526 				netdev_warn(tp->dev, "EEE capability disabled\n");
11527 				tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
11528 			} else if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
11529 				netdev_warn(tp->dev, "EEE capability restored\n");
11530 				tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
11531 			}
11532 		} else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) {
11533 			if (err)
11534 				return err;
11535 		} else if (err) {
11536 			netdev_warn(tp->dev, "TSO capability disabled\n");
11537 			tg3_flag_clear(tp, TSO_CAPABLE);
11538 		} else if (!tg3_flag(tp, TSO_CAPABLE)) {
11539 			netdev_notice(tp->dev, "TSO capability restored\n");
11540 			tg3_flag_set(tp, TSO_CAPABLE);
11541 		}
11542 	}
11543 
11544 	tg3_carrier_off(tp);
11545 
11546 	err = tg3_power_up(tp);
11547 	if (err)
11548 		return err;
11549 
11550 	tg3_full_lock(tp, 0);
11551 
11552 	tg3_disable_ints(tp);
11553 	tg3_flag_clear(tp, INIT_COMPLETE);
11554 
11555 	tg3_full_unlock(tp);
11556 
11557 	err = tg3_start(tp,
11558 			!(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN),
11559 			true, true);
11560 	if (err) {
11561 		tg3_frob_aux_power(tp, false);
11562 		pci_set_power_state(tp->pdev, PCI_D3hot);
11563 	}
11564 
11565 	if (tg3_flag(tp, PTP_CAPABLE)) {
11566 		tp->ptp_clock = ptp_clock_register(&tp->ptp_info,
11567 						   &tp->pdev->dev);
11568 		if (IS_ERR(tp->ptp_clock))
11569 			tp->ptp_clock = NULL;
11570 	}
11571 
11572 	return err;
11573 }
11574 
11575 static int tg3_close(struct net_device *dev)
11576 {
11577 	struct tg3 *tp = netdev_priv(dev);
11578 
11579 	tg3_ptp_fini(tp);
11580 
11581 	tg3_stop(tp);
11582 
11583 	/* Clear stats across close / open calls */
11584 	memset(&tp->net_stats_prev, 0, sizeof(tp->net_stats_prev));
11585 	memset(&tp->estats_prev, 0, sizeof(tp->estats_prev));
11586 
11587 	if (pci_device_is_present(tp->pdev)) {
11588 		tg3_power_down_prepare(tp);
11589 
11590 		tg3_carrier_off(tp);
11591 	}
11592 	return 0;
11593 }
11594 
11595 static inline u64 get_stat64(tg3_stat64_t *val)
11596 {
11597        return ((u64)val->high << 32) | ((u64)val->low);
11598 }
11599 
11600 static u64 tg3_calc_crc_errors(struct tg3 *tp)
11601 {
11602 	struct tg3_hw_stats *hw_stats = tp->hw_stats;
11603 
11604 	if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
11605 	    (tg3_asic_rev(tp) == ASIC_REV_5700 ||
11606 	     tg3_asic_rev(tp) == ASIC_REV_5701)) {
11607 		u32 val;
11608 
11609 		if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
11610 			tg3_writephy(tp, MII_TG3_TEST1,
11611 				     val | MII_TG3_TEST1_CRC_EN);
11612 			tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &val);
11613 		} else
11614 			val = 0;
11615 
11616 		tp->phy_crc_errors += val;
11617 
11618 		return tp->phy_crc_errors;
11619 	}
11620 
11621 	return get_stat64(&hw_stats->rx_fcs_errors);
11622 }
11623 
11624 #define ESTAT_ADD(member) \
11625 	estats->member =	old_estats->member + \
11626 				get_stat64(&hw_stats->member)
11627 
11628 static void tg3_get_estats(struct tg3 *tp, struct tg3_ethtool_stats *estats)
11629 {
11630 	struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
11631 	struct tg3_hw_stats *hw_stats = tp->hw_stats;
11632 
11633 	ESTAT_ADD(rx_octets);
11634 	ESTAT_ADD(rx_fragments);
11635 	ESTAT_ADD(rx_ucast_packets);
11636 	ESTAT_ADD(rx_mcast_packets);
11637 	ESTAT_ADD(rx_bcast_packets);
11638 	ESTAT_ADD(rx_fcs_errors);
11639 	ESTAT_ADD(rx_align_errors);
11640 	ESTAT_ADD(rx_xon_pause_rcvd);
11641 	ESTAT_ADD(rx_xoff_pause_rcvd);
11642 	ESTAT_ADD(rx_mac_ctrl_rcvd);
11643 	ESTAT_ADD(rx_xoff_entered);
11644 	ESTAT_ADD(rx_frame_too_long_errors);
11645 	ESTAT_ADD(rx_jabbers);
11646 	ESTAT_ADD(rx_undersize_packets);
11647 	ESTAT_ADD(rx_in_length_errors);
11648 	ESTAT_ADD(rx_out_length_errors);
11649 	ESTAT_ADD(rx_64_or_less_octet_packets);
11650 	ESTAT_ADD(rx_65_to_127_octet_packets);
11651 	ESTAT_ADD(rx_128_to_255_octet_packets);
11652 	ESTAT_ADD(rx_256_to_511_octet_packets);
11653 	ESTAT_ADD(rx_512_to_1023_octet_packets);
11654 	ESTAT_ADD(rx_1024_to_1522_octet_packets);
11655 	ESTAT_ADD(rx_1523_to_2047_octet_packets);
11656 	ESTAT_ADD(rx_2048_to_4095_octet_packets);
11657 	ESTAT_ADD(rx_4096_to_8191_octet_packets);
11658 	ESTAT_ADD(rx_8192_to_9022_octet_packets);
11659 
11660 	ESTAT_ADD(tx_octets);
11661 	ESTAT_ADD(tx_collisions);
11662 	ESTAT_ADD(tx_xon_sent);
11663 	ESTAT_ADD(tx_xoff_sent);
11664 	ESTAT_ADD(tx_flow_control);
11665 	ESTAT_ADD(tx_mac_errors);
11666 	ESTAT_ADD(tx_single_collisions);
11667 	ESTAT_ADD(tx_mult_collisions);
11668 	ESTAT_ADD(tx_deferred);
11669 	ESTAT_ADD(tx_excessive_collisions);
11670 	ESTAT_ADD(tx_late_collisions);
11671 	ESTAT_ADD(tx_collide_2times);
11672 	ESTAT_ADD(tx_collide_3times);
11673 	ESTAT_ADD(tx_collide_4times);
11674 	ESTAT_ADD(tx_collide_5times);
11675 	ESTAT_ADD(tx_collide_6times);
11676 	ESTAT_ADD(tx_collide_7times);
11677 	ESTAT_ADD(tx_collide_8times);
11678 	ESTAT_ADD(tx_collide_9times);
11679 	ESTAT_ADD(tx_collide_10times);
11680 	ESTAT_ADD(tx_collide_11times);
11681 	ESTAT_ADD(tx_collide_12times);
11682 	ESTAT_ADD(tx_collide_13times);
11683 	ESTAT_ADD(tx_collide_14times);
11684 	ESTAT_ADD(tx_collide_15times);
11685 	ESTAT_ADD(tx_ucast_packets);
11686 	ESTAT_ADD(tx_mcast_packets);
11687 	ESTAT_ADD(tx_bcast_packets);
11688 	ESTAT_ADD(tx_carrier_sense_errors);
11689 	ESTAT_ADD(tx_discards);
11690 	ESTAT_ADD(tx_errors);
11691 
11692 	ESTAT_ADD(dma_writeq_full);
11693 	ESTAT_ADD(dma_write_prioq_full);
11694 	ESTAT_ADD(rxbds_empty);
11695 	ESTAT_ADD(rx_discards);
11696 	ESTAT_ADD(rx_errors);
11697 	ESTAT_ADD(rx_threshold_hit);
11698 
11699 	ESTAT_ADD(dma_readq_full);
11700 	ESTAT_ADD(dma_read_prioq_full);
11701 	ESTAT_ADD(tx_comp_queue_full);
11702 
11703 	ESTAT_ADD(ring_set_send_prod_index);
11704 	ESTAT_ADD(ring_status_update);
11705 	ESTAT_ADD(nic_irqs);
11706 	ESTAT_ADD(nic_avoided_irqs);
11707 	ESTAT_ADD(nic_tx_threshold_hit);
11708 
11709 	ESTAT_ADD(mbuf_lwm_thresh_hit);
11710 }
11711 
11712 static void tg3_get_nstats(struct tg3 *tp, struct rtnl_link_stats64 *stats)
11713 {
11714 	struct rtnl_link_stats64 *old_stats = &tp->net_stats_prev;
11715 	struct tg3_hw_stats *hw_stats = tp->hw_stats;
11716 
11717 	stats->rx_packets = old_stats->rx_packets +
11718 		get_stat64(&hw_stats->rx_ucast_packets) +
11719 		get_stat64(&hw_stats->rx_mcast_packets) +
11720 		get_stat64(&hw_stats->rx_bcast_packets);
11721 
11722 	stats->tx_packets = old_stats->tx_packets +
11723 		get_stat64(&hw_stats->tx_ucast_packets) +
11724 		get_stat64(&hw_stats->tx_mcast_packets) +
11725 		get_stat64(&hw_stats->tx_bcast_packets);
11726 
11727 	stats->rx_bytes = old_stats->rx_bytes +
11728 		get_stat64(&hw_stats->rx_octets);
11729 	stats->tx_bytes = old_stats->tx_bytes +
11730 		get_stat64(&hw_stats->tx_octets);
11731 
11732 	stats->rx_errors = old_stats->rx_errors +
11733 		get_stat64(&hw_stats->rx_errors);
11734 	stats->tx_errors = old_stats->tx_errors +
11735 		get_stat64(&hw_stats->tx_errors) +
11736 		get_stat64(&hw_stats->tx_mac_errors) +
11737 		get_stat64(&hw_stats->tx_carrier_sense_errors) +
11738 		get_stat64(&hw_stats->tx_discards);
11739 
11740 	stats->multicast = old_stats->multicast +
11741 		get_stat64(&hw_stats->rx_mcast_packets);
11742 	stats->collisions = old_stats->collisions +
11743 		get_stat64(&hw_stats->tx_collisions);
11744 
11745 	stats->rx_length_errors = old_stats->rx_length_errors +
11746 		get_stat64(&hw_stats->rx_frame_too_long_errors) +
11747 		get_stat64(&hw_stats->rx_undersize_packets);
11748 
11749 	stats->rx_over_errors = old_stats->rx_over_errors +
11750 		get_stat64(&hw_stats->rxbds_empty);
11751 	stats->rx_frame_errors = old_stats->rx_frame_errors +
11752 		get_stat64(&hw_stats->rx_align_errors);
11753 	stats->tx_aborted_errors = old_stats->tx_aborted_errors +
11754 		get_stat64(&hw_stats->tx_discards);
11755 	stats->tx_carrier_errors = old_stats->tx_carrier_errors +
11756 		get_stat64(&hw_stats->tx_carrier_sense_errors);
11757 
11758 	stats->rx_crc_errors = old_stats->rx_crc_errors +
11759 		tg3_calc_crc_errors(tp);
11760 
11761 	stats->rx_missed_errors = old_stats->rx_missed_errors +
11762 		get_stat64(&hw_stats->rx_discards);
11763 
11764 	stats->rx_dropped = tp->rx_dropped;
11765 	stats->tx_dropped = tp->tx_dropped;
11766 }
11767 
11768 static int tg3_get_regs_len(struct net_device *dev)
11769 {
11770 	return TG3_REG_BLK_SIZE;
11771 }
11772 
11773 static void tg3_get_regs(struct net_device *dev,
11774 		struct ethtool_regs *regs, void *_p)
11775 {
11776 	struct tg3 *tp = netdev_priv(dev);
11777 
11778 	regs->version = 0;
11779 
11780 	memset(_p, 0, TG3_REG_BLK_SIZE);
11781 
11782 	if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11783 		return;
11784 
11785 	tg3_full_lock(tp, 0);
11786 
11787 	tg3_dump_legacy_regs(tp, (u32 *)_p);
11788 
11789 	tg3_full_unlock(tp);
11790 }
11791 
11792 static int tg3_get_eeprom_len(struct net_device *dev)
11793 {
11794 	struct tg3 *tp = netdev_priv(dev);
11795 
11796 	return tp->nvram_size;
11797 }
11798 
11799 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
11800 {
11801 	struct tg3 *tp = netdev_priv(dev);
11802 	int ret;
11803 	u8  *pd;
11804 	u32 i, offset, len, b_offset, b_count;
11805 	__be32 val;
11806 
11807 	if (tg3_flag(tp, NO_NVRAM))
11808 		return -EINVAL;
11809 
11810 	offset = eeprom->offset;
11811 	len = eeprom->len;
11812 	eeprom->len = 0;
11813 
11814 	eeprom->magic = TG3_EEPROM_MAGIC;
11815 
11816 	if (offset & 3) {
11817 		/* adjustments to start on required 4 byte boundary */
11818 		b_offset = offset & 3;
11819 		b_count = 4 - b_offset;
11820 		if (b_count > len) {
11821 			/* i.e. offset=1 len=2 */
11822 			b_count = len;
11823 		}
11824 		ret = tg3_nvram_read_be32(tp, offset-b_offset, &val);
11825 		if (ret)
11826 			return ret;
11827 		memcpy(data, ((char *)&val) + b_offset, b_count);
11828 		len -= b_count;
11829 		offset += b_count;
11830 		eeprom->len += b_count;
11831 	}
11832 
11833 	/* read bytes up to the last 4 byte boundary */
11834 	pd = &data[eeprom->len];
11835 	for (i = 0; i < (len - (len & 3)); i += 4) {
11836 		ret = tg3_nvram_read_be32(tp, offset + i, &val);
11837 		if (ret) {
11838 			eeprom->len += i;
11839 			return ret;
11840 		}
11841 		memcpy(pd + i, &val, 4);
11842 	}
11843 	eeprom->len += i;
11844 
11845 	if (len & 3) {
11846 		/* read last bytes not ending on 4 byte boundary */
11847 		pd = &data[eeprom->len];
11848 		b_count = len & 3;
11849 		b_offset = offset + len - b_count;
11850 		ret = tg3_nvram_read_be32(tp, b_offset, &val);
11851 		if (ret)
11852 			return ret;
11853 		memcpy(pd, &val, b_count);
11854 		eeprom->len += b_count;
11855 	}
11856 	return 0;
11857 }
11858 
11859 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
11860 {
11861 	struct tg3 *tp = netdev_priv(dev);
11862 	int ret;
11863 	u32 offset, len, b_offset, odd_len;
11864 	u8 *buf;
11865 	__be32 start, end;
11866 
11867 	if (tg3_flag(tp, NO_NVRAM) ||
11868 	    eeprom->magic != TG3_EEPROM_MAGIC)
11869 		return -EINVAL;
11870 
11871 	offset = eeprom->offset;
11872 	len = eeprom->len;
11873 
11874 	if ((b_offset = (offset & 3))) {
11875 		/* adjustments to start on required 4 byte boundary */
11876 		ret = tg3_nvram_read_be32(tp, offset-b_offset, &start);
11877 		if (ret)
11878 			return ret;
11879 		len += b_offset;
11880 		offset &= ~3;
11881 		if (len < 4)
11882 			len = 4;
11883 	}
11884 
11885 	odd_len = 0;
11886 	if (len & 3) {
11887 		/* adjustments to end on required 4 byte boundary */
11888 		odd_len = 1;
11889 		len = (len + 3) & ~3;
11890 		ret = tg3_nvram_read_be32(tp, offset+len-4, &end);
11891 		if (ret)
11892 			return ret;
11893 	}
11894 
11895 	buf = data;
11896 	if (b_offset || odd_len) {
11897 		buf = kmalloc(len, GFP_KERNEL);
11898 		if (!buf)
11899 			return -ENOMEM;
11900 		if (b_offset)
11901 			memcpy(buf, &start, 4);
11902 		if (odd_len)
11903 			memcpy(buf+len-4, &end, 4);
11904 		memcpy(buf + b_offset, data, eeprom->len);
11905 	}
11906 
11907 	ret = tg3_nvram_write_block(tp, offset, len, buf);
11908 
11909 	if (buf != data)
11910 		kfree(buf);
11911 
11912 	return ret;
11913 }
11914 
11915 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
11916 {
11917 	struct tg3 *tp = netdev_priv(dev);
11918 
11919 	if (tg3_flag(tp, USE_PHYLIB)) {
11920 		struct phy_device *phydev;
11921 		if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
11922 			return -EAGAIN;
11923 		phydev = tp->mdio_bus->phy_map[tp->phy_addr];
11924 		return phy_ethtool_gset(phydev, cmd);
11925 	}
11926 
11927 	cmd->supported = (SUPPORTED_Autoneg);
11928 
11929 	if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
11930 		cmd->supported |= (SUPPORTED_1000baseT_Half |
11931 				   SUPPORTED_1000baseT_Full);
11932 
11933 	if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
11934 		cmd->supported |= (SUPPORTED_100baseT_Half |
11935 				  SUPPORTED_100baseT_Full |
11936 				  SUPPORTED_10baseT_Half |
11937 				  SUPPORTED_10baseT_Full |
11938 				  SUPPORTED_TP);
11939 		cmd->port = PORT_TP;
11940 	} else {
11941 		cmd->supported |= SUPPORTED_FIBRE;
11942 		cmd->port = PORT_FIBRE;
11943 	}
11944 
11945 	cmd->advertising = tp->link_config.advertising;
11946 	if (tg3_flag(tp, PAUSE_AUTONEG)) {
11947 		if (tp->link_config.flowctrl & FLOW_CTRL_RX) {
11948 			if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
11949 				cmd->advertising |= ADVERTISED_Pause;
11950 			} else {
11951 				cmd->advertising |= ADVERTISED_Pause |
11952 						    ADVERTISED_Asym_Pause;
11953 			}
11954 		} else if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
11955 			cmd->advertising |= ADVERTISED_Asym_Pause;
11956 		}
11957 	}
11958 	if (netif_running(dev) && tp->link_up) {
11959 		ethtool_cmd_speed_set(cmd, tp->link_config.active_speed);
11960 		cmd->duplex = tp->link_config.active_duplex;
11961 		cmd->lp_advertising = tp->link_config.rmt_adv;
11962 		if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
11963 			if (tp->phy_flags & TG3_PHYFLG_MDIX_STATE)
11964 				cmd->eth_tp_mdix = ETH_TP_MDI_X;
11965 			else
11966 				cmd->eth_tp_mdix = ETH_TP_MDI;
11967 		}
11968 	} else {
11969 		ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN);
11970 		cmd->duplex = DUPLEX_UNKNOWN;
11971 		cmd->eth_tp_mdix = ETH_TP_MDI_INVALID;
11972 	}
11973 	cmd->phy_address = tp->phy_addr;
11974 	cmd->transceiver = XCVR_INTERNAL;
11975 	cmd->autoneg = tp->link_config.autoneg;
11976 	cmd->maxtxpkt = 0;
11977 	cmd->maxrxpkt = 0;
11978 	return 0;
11979 }
11980 
11981 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
11982 {
11983 	struct tg3 *tp = netdev_priv(dev);
11984 	u32 speed = ethtool_cmd_speed(cmd);
11985 
11986 	if (tg3_flag(tp, USE_PHYLIB)) {
11987 		struct phy_device *phydev;
11988 		if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
11989 			return -EAGAIN;
11990 		phydev = tp->mdio_bus->phy_map[tp->phy_addr];
11991 		return phy_ethtool_sset(phydev, cmd);
11992 	}
11993 
11994 	if (cmd->autoneg != AUTONEG_ENABLE &&
11995 	    cmd->autoneg != AUTONEG_DISABLE)
11996 		return -EINVAL;
11997 
11998 	if (cmd->autoneg == AUTONEG_DISABLE &&
11999 	    cmd->duplex != DUPLEX_FULL &&
12000 	    cmd->duplex != DUPLEX_HALF)
12001 		return -EINVAL;
12002 
12003 	if (cmd->autoneg == AUTONEG_ENABLE) {
12004 		u32 mask = ADVERTISED_Autoneg |
12005 			   ADVERTISED_Pause |
12006 			   ADVERTISED_Asym_Pause;
12007 
12008 		if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
12009 			mask |= ADVERTISED_1000baseT_Half |
12010 				ADVERTISED_1000baseT_Full;
12011 
12012 		if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
12013 			mask |= ADVERTISED_100baseT_Half |
12014 				ADVERTISED_100baseT_Full |
12015 				ADVERTISED_10baseT_Half |
12016 				ADVERTISED_10baseT_Full |
12017 				ADVERTISED_TP;
12018 		else
12019 			mask |= ADVERTISED_FIBRE;
12020 
12021 		if (cmd->advertising & ~mask)
12022 			return -EINVAL;
12023 
12024 		mask &= (ADVERTISED_1000baseT_Half |
12025 			 ADVERTISED_1000baseT_Full |
12026 			 ADVERTISED_100baseT_Half |
12027 			 ADVERTISED_100baseT_Full |
12028 			 ADVERTISED_10baseT_Half |
12029 			 ADVERTISED_10baseT_Full);
12030 
12031 		cmd->advertising &= mask;
12032 	} else {
12033 		if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) {
12034 			if (speed != SPEED_1000)
12035 				return -EINVAL;
12036 
12037 			if (cmd->duplex != DUPLEX_FULL)
12038 				return -EINVAL;
12039 		} else {
12040 			if (speed != SPEED_100 &&
12041 			    speed != SPEED_10)
12042 				return -EINVAL;
12043 		}
12044 	}
12045 
12046 	tg3_full_lock(tp, 0);
12047 
12048 	tp->link_config.autoneg = cmd->autoneg;
12049 	if (cmd->autoneg == AUTONEG_ENABLE) {
12050 		tp->link_config.advertising = (cmd->advertising |
12051 					      ADVERTISED_Autoneg);
12052 		tp->link_config.speed = SPEED_UNKNOWN;
12053 		tp->link_config.duplex = DUPLEX_UNKNOWN;
12054 	} else {
12055 		tp->link_config.advertising = 0;
12056 		tp->link_config.speed = speed;
12057 		tp->link_config.duplex = cmd->duplex;
12058 	}
12059 
12060 	tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
12061 
12062 	tg3_warn_mgmt_link_flap(tp);
12063 
12064 	if (netif_running(dev))
12065 		tg3_setup_phy(tp, true);
12066 
12067 	tg3_full_unlock(tp);
12068 
12069 	return 0;
12070 }
12071 
12072 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
12073 {
12074 	struct tg3 *tp = netdev_priv(dev);
12075 
12076 	strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
12077 	strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
12078 	strlcpy(info->fw_version, tp->fw_ver, sizeof(info->fw_version));
12079 	strlcpy(info->bus_info, pci_name(tp->pdev), sizeof(info->bus_info));
12080 }
12081 
12082 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
12083 {
12084 	struct tg3 *tp = netdev_priv(dev);
12085 
12086 	if (tg3_flag(tp, WOL_CAP) && device_can_wakeup(&tp->pdev->dev))
12087 		wol->supported = WAKE_MAGIC;
12088 	else
12089 		wol->supported = 0;
12090 	wol->wolopts = 0;
12091 	if (tg3_flag(tp, WOL_ENABLE) && device_can_wakeup(&tp->pdev->dev))
12092 		wol->wolopts = WAKE_MAGIC;
12093 	memset(&wol->sopass, 0, sizeof(wol->sopass));
12094 }
12095 
12096 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
12097 {
12098 	struct tg3 *tp = netdev_priv(dev);
12099 	struct device *dp = &tp->pdev->dev;
12100 
12101 	if (wol->wolopts & ~WAKE_MAGIC)
12102 		return -EINVAL;
12103 	if ((wol->wolopts & WAKE_MAGIC) &&
12104 	    !(tg3_flag(tp, WOL_CAP) && device_can_wakeup(dp)))
12105 		return -EINVAL;
12106 
12107 	device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC);
12108 
12109 	if (device_may_wakeup(dp))
12110 		tg3_flag_set(tp, WOL_ENABLE);
12111 	else
12112 		tg3_flag_clear(tp, WOL_ENABLE);
12113 
12114 	return 0;
12115 }
12116 
12117 static u32 tg3_get_msglevel(struct net_device *dev)
12118 {
12119 	struct tg3 *tp = netdev_priv(dev);
12120 	return tp->msg_enable;
12121 }
12122 
12123 static void tg3_set_msglevel(struct net_device *dev, u32 value)
12124 {
12125 	struct tg3 *tp = netdev_priv(dev);
12126 	tp->msg_enable = value;
12127 }
12128 
12129 static int tg3_nway_reset(struct net_device *dev)
12130 {
12131 	struct tg3 *tp = netdev_priv(dev);
12132 	int r;
12133 
12134 	if (!netif_running(dev))
12135 		return -EAGAIN;
12136 
12137 	if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
12138 		return -EINVAL;
12139 
12140 	tg3_warn_mgmt_link_flap(tp);
12141 
12142 	if (tg3_flag(tp, USE_PHYLIB)) {
12143 		if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
12144 			return -EAGAIN;
12145 		r = phy_start_aneg(tp->mdio_bus->phy_map[tp->phy_addr]);
12146 	} else {
12147 		u32 bmcr;
12148 
12149 		spin_lock_bh(&tp->lock);
12150 		r = -EINVAL;
12151 		tg3_readphy(tp, MII_BMCR, &bmcr);
12152 		if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
12153 		    ((bmcr & BMCR_ANENABLE) ||
12154 		     (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT))) {
12155 			tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
12156 						   BMCR_ANENABLE);
12157 			r = 0;
12158 		}
12159 		spin_unlock_bh(&tp->lock);
12160 	}
12161 
12162 	return r;
12163 }
12164 
12165 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
12166 {
12167 	struct tg3 *tp = netdev_priv(dev);
12168 
12169 	ering->rx_max_pending = tp->rx_std_ring_mask;
12170 	if (tg3_flag(tp, JUMBO_RING_ENABLE))
12171 		ering->rx_jumbo_max_pending = tp->rx_jmb_ring_mask;
12172 	else
12173 		ering->rx_jumbo_max_pending = 0;
12174 
12175 	ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
12176 
12177 	ering->rx_pending = tp->rx_pending;
12178 	if (tg3_flag(tp, JUMBO_RING_ENABLE))
12179 		ering->rx_jumbo_pending = tp->rx_jumbo_pending;
12180 	else
12181 		ering->rx_jumbo_pending = 0;
12182 
12183 	ering->tx_pending = tp->napi[0].tx_pending;
12184 }
12185 
12186 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
12187 {
12188 	struct tg3 *tp = netdev_priv(dev);
12189 	int i, irq_sync = 0, err = 0;
12190 
12191 	if ((ering->rx_pending > tp->rx_std_ring_mask) ||
12192 	    (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) ||
12193 	    (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
12194 	    (ering->tx_pending <= MAX_SKB_FRAGS) ||
12195 	    (tg3_flag(tp, TSO_BUG) &&
12196 	     (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
12197 		return -EINVAL;
12198 
12199 	if (netif_running(dev)) {
12200 		tg3_phy_stop(tp);
12201 		tg3_netif_stop(tp);
12202 		irq_sync = 1;
12203 	}
12204 
12205 	tg3_full_lock(tp, irq_sync);
12206 
12207 	tp->rx_pending = ering->rx_pending;
12208 
12209 	if (tg3_flag(tp, MAX_RXPEND_64) &&
12210 	    tp->rx_pending > 63)
12211 		tp->rx_pending = 63;
12212 	tp->rx_jumbo_pending = ering->rx_jumbo_pending;
12213 
12214 	for (i = 0; i < tp->irq_max; i++)
12215 		tp->napi[i].tx_pending = ering->tx_pending;
12216 
12217 	if (netif_running(dev)) {
12218 		tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12219 		err = tg3_restart_hw(tp, false);
12220 		if (!err)
12221 			tg3_netif_start(tp);
12222 	}
12223 
12224 	tg3_full_unlock(tp);
12225 
12226 	if (irq_sync && !err)
12227 		tg3_phy_start(tp);
12228 
12229 	return err;
12230 }
12231 
12232 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
12233 {
12234 	struct tg3 *tp = netdev_priv(dev);
12235 
12236 	epause->autoneg = !!tg3_flag(tp, PAUSE_AUTONEG);
12237 
12238 	if (tp->link_config.flowctrl & FLOW_CTRL_RX)
12239 		epause->rx_pause = 1;
12240 	else
12241 		epause->rx_pause = 0;
12242 
12243 	if (tp->link_config.flowctrl & FLOW_CTRL_TX)
12244 		epause->tx_pause = 1;
12245 	else
12246 		epause->tx_pause = 0;
12247 }
12248 
12249 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
12250 {
12251 	struct tg3 *tp = netdev_priv(dev);
12252 	int err = 0;
12253 
12254 	if (tp->link_config.autoneg == AUTONEG_ENABLE)
12255 		tg3_warn_mgmt_link_flap(tp);
12256 
12257 	if (tg3_flag(tp, USE_PHYLIB)) {
12258 		u32 newadv;
12259 		struct phy_device *phydev;
12260 
12261 		phydev = tp->mdio_bus->phy_map[tp->phy_addr];
12262 
12263 		if (!(phydev->supported & SUPPORTED_Pause) ||
12264 		    (!(phydev->supported & SUPPORTED_Asym_Pause) &&
12265 		     (epause->rx_pause != epause->tx_pause)))
12266 			return -EINVAL;
12267 
12268 		tp->link_config.flowctrl = 0;
12269 		if (epause->rx_pause) {
12270 			tp->link_config.flowctrl |= FLOW_CTRL_RX;
12271 
12272 			if (epause->tx_pause) {
12273 				tp->link_config.flowctrl |= FLOW_CTRL_TX;
12274 				newadv = ADVERTISED_Pause;
12275 			} else
12276 				newadv = ADVERTISED_Pause |
12277 					 ADVERTISED_Asym_Pause;
12278 		} else if (epause->tx_pause) {
12279 			tp->link_config.flowctrl |= FLOW_CTRL_TX;
12280 			newadv = ADVERTISED_Asym_Pause;
12281 		} else
12282 			newadv = 0;
12283 
12284 		if (epause->autoneg)
12285 			tg3_flag_set(tp, PAUSE_AUTONEG);
12286 		else
12287 			tg3_flag_clear(tp, PAUSE_AUTONEG);
12288 
12289 		if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
12290 			u32 oldadv = phydev->advertising &
12291 				     (ADVERTISED_Pause | ADVERTISED_Asym_Pause);
12292 			if (oldadv != newadv) {
12293 				phydev->advertising &=
12294 					~(ADVERTISED_Pause |
12295 					  ADVERTISED_Asym_Pause);
12296 				phydev->advertising |= newadv;
12297 				if (phydev->autoneg) {
12298 					/*
12299 					 * Always renegotiate the link to
12300 					 * inform our link partner of our
12301 					 * flow control settings, even if the
12302 					 * flow control is forced.  Let
12303 					 * tg3_adjust_link() do the final
12304 					 * flow control setup.
12305 					 */
12306 					return phy_start_aneg(phydev);
12307 				}
12308 			}
12309 
12310 			if (!epause->autoneg)
12311 				tg3_setup_flow_control(tp, 0, 0);
12312 		} else {
12313 			tp->link_config.advertising &=
12314 					~(ADVERTISED_Pause |
12315 					  ADVERTISED_Asym_Pause);
12316 			tp->link_config.advertising |= newadv;
12317 		}
12318 	} else {
12319 		int irq_sync = 0;
12320 
12321 		if (netif_running(dev)) {
12322 			tg3_netif_stop(tp);
12323 			irq_sync = 1;
12324 		}
12325 
12326 		tg3_full_lock(tp, irq_sync);
12327 
12328 		if (epause->autoneg)
12329 			tg3_flag_set(tp, PAUSE_AUTONEG);
12330 		else
12331 			tg3_flag_clear(tp, PAUSE_AUTONEG);
12332 		if (epause->rx_pause)
12333 			tp->link_config.flowctrl |= FLOW_CTRL_RX;
12334 		else
12335 			tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
12336 		if (epause->tx_pause)
12337 			tp->link_config.flowctrl |= FLOW_CTRL_TX;
12338 		else
12339 			tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
12340 
12341 		if (netif_running(dev)) {
12342 			tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12343 			err = tg3_restart_hw(tp, false);
12344 			if (!err)
12345 				tg3_netif_start(tp);
12346 		}
12347 
12348 		tg3_full_unlock(tp);
12349 	}
12350 
12351 	tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
12352 
12353 	return err;
12354 }
12355 
12356 static int tg3_get_sset_count(struct net_device *dev, int sset)
12357 {
12358 	switch (sset) {
12359 	case ETH_SS_TEST:
12360 		return TG3_NUM_TEST;
12361 	case ETH_SS_STATS:
12362 		return TG3_NUM_STATS;
12363 	default:
12364 		return -EOPNOTSUPP;
12365 	}
12366 }
12367 
12368 static int tg3_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
12369 			 u32 *rules __always_unused)
12370 {
12371 	struct tg3 *tp = netdev_priv(dev);
12372 
12373 	if (!tg3_flag(tp, SUPPORT_MSIX))
12374 		return -EOPNOTSUPP;
12375 
12376 	switch (info->cmd) {
12377 	case ETHTOOL_GRXRINGS:
12378 		if (netif_running(tp->dev))
12379 			info->data = tp->rxq_cnt;
12380 		else {
12381 			info->data = num_online_cpus();
12382 			if (info->data > TG3_RSS_MAX_NUM_QS)
12383 				info->data = TG3_RSS_MAX_NUM_QS;
12384 		}
12385 
12386 		/* The first interrupt vector only
12387 		 * handles link interrupts.
12388 		 */
12389 		info->data -= 1;
12390 		return 0;
12391 
12392 	default:
12393 		return -EOPNOTSUPP;
12394 	}
12395 }
12396 
12397 static u32 tg3_get_rxfh_indir_size(struct net_device *dev)
12398 {
12399 	u32 size = 0;
12400 	struct tg3 *tp = netdev_priv(dev);
12401 
12402 	if (tg3_flag(tp, SUPPORT_MSIX))
12403 		size = TG3_RSS_INDIR_TBL_SIZE;
12404 
12405 	return size;
12406 }
12407 
12408 static int tg3_get_rxfh_indir(struct net_device *dev, u32 *indir)
12409 {
12410 	struct tg3 *tp = netdev_priv(dev);
12411 	int i;
12412 
12413 	for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
12414 		indir[i] = tp->rss_ind_tbl[i];
12415 
12416 	return 0;
12417 }
12418 
12419 static int tg3_set_rxfh_indir(struct net_device *dev, const u32 *indir)
12420 {
12421 	struct tg3 *tp = netdev_priv(dev);
12422 	size_t i;
12423 
12424 	for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
12425 		tp->rss_ind_tbl[i] = indir[i];
12426 
12427 	if (!netif_running(dev) || !tg3_flag(tp, ENABLE_RSS))
12428 		return 0;
12429 
12430 	/* It is legal to write the indirection
12431 	 * table while the device is running.
12432 	 */
12433 	tg3_full_lock(tp, 0);
12434 	tg3_rss_write_indir_tbl(tp);
12435 	tg3_full_unlock(tp);
12436 
12437 	return 0;
12438 }
12439 
12440 static void tg3_get_channels(struct net_device *dev,
12441 			     struct ethtool_channels *channel)
12442 {
12443 	struct tg3 *tp = netdev_priv(dev);
12444 	u32 deflt_qs = netif_get_num_default_rss_queues();
12445 
12446 	channel->max_rx = tp->rxq_max;
12447 	channel->max_tx = tp->txq_max;
12448 
12449 	if (netif_running(dev)) {
12450 		channel->rx_count = tp->rxq_cnt;
12451 		channel->tx_count = tp->txq_cnt;
12452 	} else {
12453 		if (tp->rxq_req)
12454 			channel->rx_count = tp->rxq_req;
12455 		else
12456 			channel->rx_count = min(deflt_qs, tp->rxq_max);
12457 
12458 		if (tp->txq_req)
12459 			channel->tx_count = tp->txq_req;
12460 		else
12461 			channel->tx_count = min(deflt_qs, tp->txq_max);
12462 	}
12463 }
12464 
12465 static int tg3_set_channels(struct net_device *dev,
12466 			    struct ethtool_channels *channel)
12467 {
12468 	struct tg3 *tp = netdev_priv(dev);
12469 
12470 	if (!tg3_flag(tp, SUPPORT_MSIX))
12471 		return -EOPNOTSUPP;
12472 
12473 	if (channel->rx_count > tp->rxq_max ||
12474 	    channel->tx_count > tp->txq_max)
12475 		return -EINVAL;
12476 
12477 	tp->rxq_req = channel->rx_count;
12478 	tp->txq_req = channel->tx_count;
12479 
12480 	if (!netif_running(dev))
12481 		return 0;
12482 
12483 	tg3_stop(tp);
12484 
12485 	tg3_carrier_off(tp);
12486 
12487 	tg3_start(tp, true, false, false);
12488 
12489 	return 0;
12490 }
12491 
12492 static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
12493 {
12494 	switch (stringset) {
12495 	case ETH_SS_STATS:
12496 		memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
12497 		break;
12498 	case ETH_SS_TEST:
12499 		memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
12500 		break;
12501 	default:
12502 		WARN_ON(1);	/* we need a WARN() */
12503 		break;
12504 	}
12505 }
12506 
12507 static int tg3_set_phys_id(struct net_device *dev,
12508 			    enum ethtool_phys_id_state state)
12509 {
12510 	struct tg3 *tp = netdev_priv(dev);
12511 
12512 	if (!netif_running(tp->dev))
12513 		return -EAGAIN;
12514 
12515 	switch (state) {
12516 	case ETHTOOL_ID_ACTIVE:
12517 		return 1;	/* cycle on/off once per second */
12518 
12519 	case ETHTOOL_ID_ON:
12520 		tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
12521 		     LED_CTRL_1000MBPS_ON |
12522 		     LED_CTRL_100MBPS_ON |
12523 		     LED_CTRL_10MBPS_ON |
12524 		     LED_CTRL_TRAFFIC_OVERRIDE |
12525 		     LED_CTRL_TRAFFIC_BLINK |
12526 		     LED_CTRL_TRAFFIC_LED);
12527 		break;
12528 
12529 	case ETHTOOL_ID_OFF:
12530 		tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
12531 		     LED_CTRL_TRAFFIC_OVERRIDE);
12532 		break;
12533 
12534 	case ETHTOOL_ID_INACTIVE:
12535 		tw32(MAC_LED_CTRL, tp->led_ctrl);
12536 		break;
12537 	}
12538 
12539 	return 0;
12540 }
12541 
12542 static void tg3_get_ethtool_stats(struct net_device *dev,
12543 				   struct ethtool_stats *estats, u64 *tmp_stats)
12544 {
12545 	struct tg3 *tp = netdev_priv(dev);
12546 
12547 	if (tp->hw_stats)
12548 		tg3_get_estats(tp, (struct tg3_ethtool_stats *)tmp_stats);
12549 	else
12550 		memset(tmp_stats, 0, sizeof(struct tg3_ethtool_stats));
12551 }
12552 
12553 static __be32 *tg3_vpd_readblock(struct tg3 *tp, u32 *vpdlen)
12554 {
12555 	int i;
12556 	__be32 *buf;
12557 	u32 offset = 0, len = 0;
12558 	u32 magic, val;
12559 
12560 	if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &magic))
12561 		return NULL;
12562 
12563 	if (magic == TG3_EEPROM_MAGIC) {
12564 		for (offset = TG3_NVM_DIR_START;
12565 		     offset < TG3_NVM_DIR_END;
12566 		     offset += TG3_NVM_DIRENT_SIZE) {
12567 			if (tg3_nvram_read(tp, offset, &val))
12568 				return NULL;
12569 
12570 			if ((val >> TG3_NVM_DIRTYPE_SHIFT) ==
12571 			    TG3_NVM_DIRTYPE_EXTVPD)
12572 				break;
12573 		}
12574 
12575 		if (offset != TG3_NVM_DIR_END) {
12576 			len = (val & TG3_NVM_DIRTYPE_LENMSK) * 4;
12577 			if (tg3_nvram_read(tp, offset + 4, &offset))
12578 				return NULL;
12579 
12580 			offset = tg3_nvram_logical_addr(tp, offset);
12581 		}
12582 	}
12583 
12584 	if (!offset || !len) {
12585 		offset = TG3_NVM_VPD_OFF;
12586 		len = TG3_NVM_VPD_LEN;
12587 	}
12588 
12589 	buf = kmalloc(len, GFP_KERNEL);
12590 	if (buf == NULL)
12591 		return NULL;
12592 
12593 	if (magic == TG3_EEPROM_MAGIC) {
12594 		for (i = 0; i < len; i += 4) {
12595 			/* The data is in little-endian format in NVRAM.
12596 			 * Use the big-endian read routines to preserve
12597 			 * the byte order as it exists in NVRAM.
12598 			 */
12599 			if (tg3_nvram_read_be32(tp, offset + i, &buf[i/4]))
12600 				goto error;
12601 		}
12602 	} else {
12603 		u8 *ptr;
12604 		ssize_t cnt;
12605 		unsigned int pos = 0;
12606 
12607 		ptr = (u8 *)&buf[0];
12608 		for (i = 0; pos < len && i < 3; i++, pos += cnt, ptr += cnt) {
12609 			cnt = pci_read_vpd(tp->pdev, pos,
12610 					   len - pos, ptr);
12611 			if (cnt == -ETIMEDOUT || cnt == -EINTR)
12612 				cnt = 0;
12613 			else if (cnt < 0)
12614 				goto error;
12615 		}
12616 		if (pos != len)
12617 			goto error;
12618 	}
12619 
12620 	*vpdlen = len;
12621 
12622 	return buf;
12623 
12624 error:
12625 	kfree(buf);
12626 	return NULL;
12627 }
12628 
12629 #define NVRAM_TEST_SIZE 0x100
12630 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE	0x14
12631 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE	0x18
12632 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE	0x1c
12633 #define NVRAM_SELFBOOT_FORMAT1_4_SIZE	0x20
12634 #define NVRAM_SELFBOOT_FORMAT1_5_SIZE	0x24
12635 #define NVRAM_SELFBOOT_FORMAT1_6_SIZE	0x50
12636 #define NVRAM_SELFBOOT_HW_SIZE 0x20
12637 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
12638 
12639 static int tg3_test_nvram(struct tg3 *tp)
12640 {
12641 	u32 csum, magic, len;
12642 	__be32 *buf;
12643 	int i, j, k, err = 0, size;
12644 
12645 	if (tg3_flag(tp, NO_NVRAM))
12646 		return 0;
12647 
12648 	if (tg3_nvram_read(tp, 0, &magic) != 0)
12649 		return -EIO;
12650 
12651 	if (magic == TG3_EEPROM_MAGIC)
12652 		size = NVRAM_TEST_SIZE;
12653 	else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
12654 		if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
12655 		    TG3_EEPROM_SB_FORMAT_1) {
12656 			switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
12657 			case TG3_EEPROM_SB_REVISION_0:
12658 				size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
12659 				break;
12660 			case TG3_EEPROM_SB_REVISION_2:
12661 				size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
12662 				break;
12663 			case TG3_EEPROM_SB_REVISION_3:
12664 				size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
12665 				break;
12666 			case TG3_EEPROM_SB_REVISION_4:
12667 				size = NVRAM_SELFBOOT_FORMAT1_4_SIZE;
12668 				break;
12669 			case TG3_EEPROM_SB_REVISION_5:
12670 				size = NVRAM_SELFBOOT_FORMAT1_5_SIZE;
12671 				break;
12672 			case TG3_EEPROM_SB_REVISION_6:
12673 				size = NVRAM_SELFBOOT_FORMAT1_6_SIZE;
12674 				break;
12675 			default:
12676 				return -EIO;
12677 			}
12678 		} else
12679 			return 0;
12680 	} else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
12681 		size = NVRAM_SELFBOOT_HW_SIZE;
12682 	else
12683 		return -EIO;
12684 
12685 	buf = kmalloc(size, GFP_KERNEL);
12686 	if (buf == NULL)
12687 		return -ENOMEM;
12688 
12689 	err = -EIO;
12690 	for (i = 0, j = 0; i < size; i += 4, j++) {
12691 		err = tg3_nvram_read_be32(tp, i, &buf[j]);
12692 		if (err)
12693 			break;
12694 	}
12695 	if (i < size)
12696 		goto out;
12697 
12698 	/* Selfboot format */
12699 	magic = be32_to_cpu(buf[0]);
12700 	if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
12701 	    TG3_EEPROM_MAGIC_FW) {
12702 		u8 *buf8 = (u8 *) buf, csum8 = 0;
12703 
12704 		if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
12705 		    TG3_EEPROM_SB_REVISION_2) {
12706 			/* For rev 2, the csum doesn't include the MBA. */
12707 			for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
12708 				csum8 += buf8[i];
12709 			for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
12710 				csum8 += buf8[i];
12711 		} else {
12712 			for (i = 0; i < size; i++)
12713 				csum8 += buf8[i];
12714 		}
12715 
12716 		if (csum8 == 0) {
12717 			err = 0;
12718 			goto out;
12719 		}
12720 
12721 		err = -EIO;
12722 		goto out;
12723 	}
12724 
12725 	if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
12726 	    TG3_EEPROM_MAGIC_HW) {
12727 		u8 data[NVRAM_SELFBOOT_DATA_SIZE];
12728 		u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
12729 		u8 *buf8 = (u8 *) buf;
12730 
12731 		/* Separate the parity bits and the data bytes.  */
12732 		for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
12733 			if ((i == 0) || (i == 8)) {
12734 				int l;
12735 				u8 msk;
12736 
12737 				for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
12738 					parity[k++] = buf8[i] & msk;
12739 				i++;
12740 			} else if (i == 16) {
12741 				int l;
12742 				u8 msk;
12743 
12744 				for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
12745 					parity[k++] = buf8[i] & msk;
12746 				i++;
12747 
12748 				for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
12749 					parity[k++] = buf8[i] & msk;
12750 				i++;
12751 			}
12752 			data[j++] = buf8[i];
12753 		}
12754 
12755 		err = -EIO;
12756 		for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
12757 			u8 hw8 = hweight8(data[i]);
12758 
12759 			if ((hw8 & 0x1) && parity[i])
12760 				goto out;
12761 			else if (!(hw8 & 0x1) && !parity[i])
12762 				goto out;
12763 		}
12764 		err = 0;
12765 		goto out;
12766 	}
12767 
12768 	err = -EIO;
12769 
12770 	/* Bootstrap checksum at offset 0x10 */
12771 	csum = calc_crc((unsigned char *) buf, 0x10);
12772 	if (csum != le32_to_cpu(buf[0x10/4]))
12773 		goto out;
12774 
12775 	/* Manufacturing block starts at offset 0x74, checksum at 0xfc */
12776 	csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
12777 	if (csum != le32_to_cpu(buf[0xfc/4]))
12778 		goto out;
12779 
12780 	kfree(buf);
12781 
12782 	buf = tg3_vpd_readblock(tp, &len);
12783 	if (!buf)
12784 		return -ENOMEM;
12785 
12786 	i = pci_vpd_find_tag((u8 *)buf, 0, len, PCI_VPD_LRDT_RO_DATA);
12787 	if (i > 0) {
12788 		j = pci_vpd_lrdt_size(&((u8 *)buf)[i]);
12789 		if (j < 0)
12790 			goto out;
12791 
12792 		if (i + PCI_VPD_LRDT_TAG_SIZE + j > len)
12793 			goto out;
12794 
12795 		i += PCI_VPD_LRDT_TAG_SIZE;
12796 		j = pci_vpd_find_info_keyword((u8 *)buf, i, j,
12797 					      PCI_VPD_RO_KEYWORD_CHKSUM);
12798 		if (j > 0) {
12799 			u8 csum8 = 0;
12800 
12801 			j += PCI_VPD_INFO_FLD_HDR_SIZE;
12802 
12803 			for (i = 0; i <= j; i++)
12804 				csum8 += ((u8 *)buf)[i];
12805 
12806 			if (csum8)
12807 				goto out;
12808 		}
12809 	}
12810 
12811 	err = 0;
12812 
12813 out:
12814 	kfree(buf);
12815 	return err;
12816 }
12817 
12818 #define TG3_SERDES_TIMEOUT_SEC	2
12819 #define TG3_COPPER_TIMEOUT_SEC	6
12820 
12821 static int tg3_test_link(struct tg3 *tp)
12822 {
12823 	int i, max;
12824 
12825 	if (!netif_running(tp->dev))
12826 		return -ENODEV;
12827 
12828 	if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
12829 		max = TG3_SERDES_TIMEOUT_SEC;
12830 	else
12831 		max = TG3_COPPER_TIMEOUT_SEC;
12832 
12833 	for (i = 0; i < max; i++) {
12834 		if (tp->link_up)
12835 			return 0;
12836 
12837 		if (msleep_interruptible(1000))
12838 			break;
12839 	}
12840 
12841 	return -EIO;
12842 }
12843 
12844 /* Only test the commonly used registers */
12845 static int tg3_test_registers(struct tg3 *tp)
12846 {
12847 	int i, is_5705, is_5750;
12848 	u32 offset, read_mask, write_mask, val, save_val, read_val;
12849 	static struct {
12850 		u16 offset;
12851 		u16 flags;
12852 #define TG3_FL_5705	0x1
12853 #define TG3_FL_NOT_5705	0x2
12854 #define TG3_FL_NOT_5788	0x4
12855 #define TG3_FL_NOT_5750	0x8
12856 		u32 read_mask;
12857 		u32 write_mask;
12858 	} reg_tbl[] = {
12859 		/* MAC Control Registers */
12860 		{ MAC_MODE, TG3_FL_NOT_5705,
12861 			0x00000000, 0x00ef6f8c },
12862 		{ MAC_MODE, TG3_FL_5705,
12863 			0x00000000, 0x01ef6b8c },
12864 		{ MAC_STATUS, TG3_FL_NOT_5705,
12865 			0x03800107, 0x00000000 },
12866 		{ MAC_STATUS, TG3_FL_5705,
12867 			0x03800100, 0x00000000 },
12868 		{ MAC_ADDR_0_HIGH, 0x0000,
12869 			0x00000000, 0x0000ffff },
12870 		{ MAC_ADDR_0_LOW, 0x0000,
12871 			0x00000000, 0xffffffff },
12872 		{ MAC_RX_MTU_SIZE, 0x0000,
12873 			0x00000000, 0x0000ffff },
12874 		{ MAC_TX_MODE, 0x0000,
12875 			0x00000000, 0x00000070 },
12876 		{ MAC_TX_LENGTHS, 0x0000,
12877 			0x00000000, 0x00003fff },
12878 		{ MAC_RX_MODE, TG3_FL_NOT_5705,
12879 			0x00000000, 0x000007fc },
12880 		{ MAC_RX_MODE, TG3_FL_5705,
12881 			0x00000000, 0x000007dc },
12882 		{ MAC_HASH_REG_0, 0x0000,
12883 			0x00000000, 0xffffffff },
12884 		{ MAC_HASH_REG_1, 0x0000,
12885 			0x00000000, 0xffffffff },
12886 		{ MAC_HASH_REG_2, 0x0000,
12887 			0x00000000, 0xffffffff },
12888 		{ MAC_HASH_REG_3, 0x0000,
12889 			0x00000000, 0xffffffff },
12890 
12891 		/* Receive Data and Receive BD Initiator Control Registers. */
12892 		{ RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
12893 			0x00000000, 0xffffffff },
12894 		{ RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
12895 			0x00000000, 0xffffffff },
12896 		{ RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
12897 			0x00000000, 0x00000003 },
12898 		{ RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
12899 			0x00000000, 0xffffffff },
12900 		{ RCVDBDI_STD_BD+0, 0x0000,
12901 			0x00000000, 0xffffffff },
12902 		{ RCVDBDI_STD_BD+4, 0x0000,
12903 			0x00000000, 0xffffffff },
12904 		{ RCVDBDI_STD_BD+8, 0x0000,
12905 			0x00000000, 0xffff0002 },
12906 		{ RCVDBDI_STD_BD+0xc, 0x0000,
12907 			0x00000000, 0xffffffff },
12908 
12909 		/* Receive BD Initiator Control Registers. */
12910 		{ RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
12911 			0x00000000, 0xffffffff },
12912 		{ RCVBDI_STD_THRESH, TG3_FL_5705,
12913 			0x00000000, 0x000003ff },
12914 		{ RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
12915 			0x00000000, 0xffffffff },
12916 
12917 		/* Host Coalescing Control Registers. */
12918 		{ HOSTCC_MODE, TG3_FL_NOT_5705,
12919 			0x00000000, 0x00000004 },
12920 		{ HOSTCC_MODE, TG3_FL_5705,
12921 			0x00000000, 0x000000f6 },
12922 		{ HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
12923 			0x00000000, 0xffffffff },
12924 		{ HOSTCC_RXCOL_TICKS, TG3_FL_5705,
12925 			0x00000000, 0x000003ff },
12926 		{ HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
12927 			0x00000000, 0xffffffff },
12928 		{ HOSTCC_TXCOL_TICKS, TG3_FL_5705,
12929 			0x00000000, 0x000003ff },
12930 		{ HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
12931 			0x00000000, 0xffffffff },
12932 		{ HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
12933 			0x00000000, 0x000000ff },
12934 		{ HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
12935 			0x00000000, 0xffffffff },
12936 		{ HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
12937 			0x00000000, 0x000000ff },
12938 		{ HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
12939 			0x00000000, 0xffffffff },
12940 		{ HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
12941 			0x00000000, 0xffffffff },
12942 		{ HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
12943 			0x00000000, 0xffffffff },
12944 		{ HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
12945 			0x00000000, 0x000000ff },
12946 		{ HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
12947 			0x00000000, 0xffffffff },
12948 		{ HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
12949 			0x00000000, 0x000000ff },
12950 		{ HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
12951 			0x00000000, 0xffffffff },
12952 		{ HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
12953 			0x00000000, 0xffffffff },
12954 		{ HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
12955 			0x00000000, 0xffffffff },
12956 		{ HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
12957 			0x00000000, 0xffffffff },
12958 		{ HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
12959 			0x00000000, 0xffffffff },
12960 		{ HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
12961 			0xffffffff, 0x00000000 },
12962 		{ HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
12963 			0xffffffff, 0x00000000 },
12964 
12965 		/* Buffer Manager Control Registers. */
12966 		{ BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
12967 			0x00000000, 0x007fff80 },
12968 		{ BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
12969 			0x00000000, 0x007fffff },
12970 		{ BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
12971 			0x00000000, 0x0000003f },
12972 		{ BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
12973 			0x00000000, 0x000001ff },
12974 		{ BUFMGR_MB_HIGH_WATER, 0x0000,
12975 			0x00000000, 0x000001ff },
12976 		{ BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
12977 			0xffffffff, 0x00000000 },
12978 		{ BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
12979 			0xffffffff, 0x00000000 },
12980 
12981 		/* Mailbox Registers */
12982 		{ GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
12983 			0x00000000, 0x000001ff },
12984 		{ GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
12985 			0x00000000, 0x000001ff },
12986 		{ GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
12987 			0x00000000, 0x000007ff },
12988 		{ GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
12989 			0x00000000, 0x000001ff },
12990 
12991 		{ 0xffff, 0x0000, 0x00000000, 0x00000000 },
12992 	};
12993 
12994 	is_5705 = is_5750 = 0;
12995 	if (tg3_flag(tp, 5705_PLUS)) {
12996 		is_5705 = 1;
12997 		if (tg3_flag(tp, 5750_PLUS))
12998 			is_5750 = 1;
12999 	}
13000 
13001 	for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
13002 		if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
13003 			continue;
13004 
13005 		if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
13006 			continue;
13007 
13008 		if (tg3_flag(tp, IS_5788) &&
13009 		    (reg_tbl[i].flags & TG3_FL_NOT_5788))
13010 			continue;
13011 
13012 		if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
13013 			continue;
13014 
13015 		offset = (u32) reg_tbl[i].offset;
13016 		read_mask = reg_tbl[i].read_mask;
13017 		write_mask = reg_tbl[i].write_mask;
13018 
13019 		/* Save the original register content */
13020 		save_val = tr32(offset);
13021 
13022 		/* Determine the read-only value. */
13023 		read_val = save_val & read_mask;
13024 
13025 		/* Write zero to the register, then make sure the read-only bits
13026 		 * are not changed and the read/write bits are all zeros.
13027 		 */
13028 		tw32(offset, 0);
13029 
13030 		val = tr32(offset);
13031 
13032 		/* Test the read-only and read/write bits. */
13033 		if (((val & read_mask) != read_val) || (val & write_mask))
13034 			goto out;
13035 
13036 		/* Write ones to all the bits defined by RdMask and WrMask, then
13037 		 * make sure the read-only bits are not changed and the
13038 		 * read/write bits are all ones.
13039 		 */
13040 		tw32(offset, read_mask | write_mask);
13041 
13042 		val = tr32(offset);
13043 
13044 		/* Test the read-only bits. */
13045 		if ((val & read_mask) != read_val)
13046 			goto out;
13047 
13048 		/* Test the read/write bits. */
13049 		if ((val & write_mask) != write_mask)
13050 			goto out;
13051 
13052 		tw32(offset, save_val);
13053 	}
13054 
13055 	return 0;
13056 
13057 out:
13058 	if (netif_msg_hw(tp))
13059 		netdev_err(tp->dev,
13060 			   "Register test failed at offset %x\n", offset);
13061 	tw32(offset, save_val);
13062 	return -EIO;
13063 }
13064 
13065 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
13066 {
13067 	static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
13068 	int i;
13069 	u32 j;
13070 
13071 	for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
13072 		for (j = 0; j < len; j += 4) {
13073 			u32 val;
13074 
13075 			tg3_write_mem(tp, offset + j, test_pattern[i]);
13076 			tg3_read_mem(tp, offset + j, &val);
13077 			if (val != test_pattern[i])
13078 				return -EIO;
13079 		}
13080 	}
13081 	return 0;
13082 }
13083 
13084 static int tg3_test_memory(struct tg3 *tp)
13085 {
13086 	static struct mem_entry {
13087 		u32 offset;
13088 		u32 len;
13089 	} mem_tbl_570x[] = {
13090 		{ 0x00000000, 0x00b50},
13091 		{ 0x00002000, 0x1c000},
13092 		{ 0xffffffff, 0x00000}
13093 	}, mem_tbl_5705[] = {
13094 		{ 0x00000100, 0x0000c},
13095 		{ 0x00000200, 0x00008},
13096 		{ 0x00004000, 0x00800},
13097 		{ 0x00006000, 0x01000},
13098 		{ 0x00008000, 0x02000},
13099 		{ 0x00010000, 0x0e000},
13100 		{ 0xffffffff, 0x00000}
13101 	}, mem_tbl_5755[] = {
13102 		{ 0x00000200, 0x00008},
13103 		{ 0x00004000, 0x00800},
13104 		{ 0x00006000, 0x00800},
13105 		{ 0x00008000, 0x02000},
13106 		{ 0x00010000, 0x0c000},
13107 		{ 0xffffffff, 0x00000}
13108 	}, mem_tbl_5906[] = {
13109 		{ 0x00000200, 0x00008},
13110 		{ 0x00004000, 0x00400},
13111 		{ 0x00006000, 0x00400},
13112 		{ 0x00008000, 0x01000},
13113 		{ 0x00010000, 0x01000},
13114 		{ 0xffffffff, 0x00000}
13115 	}, mem_tbl_5717[] = {
13116 		{ 0x00000200, 0x00008},
13117 		{ 0x00010000, 0x0a000},
13118 		{ 0x00020000, 0x13c00},
13119 		{ 0xffffffff, 0x00000}
13120 	}, mem_tbl_57765[] = {
13121 		{ 0x00000200, 0x00008},
13122 		{ 0x00004000, 0x00800},
13123 		{ 0x00006000, 0x09800},
13124 		{ 0x00010000, 0x0a000},
13125 		{ 0xffffffff, 0x00000}
13126 	};
13127 	struct mem_entry *mem_tbl;
13128 	int err = 0;
13129 	int i;
13130 
13131 	if (tg3_flag(tp, 5717_PLUS))
13132 		mem_tbl = mem_tbl_5717;
13133 	else if (tg3_flag(tp, 57765_CLASS) ||
13134 		 tg3_asic_rev(tp) == ASIC_REV_5762)
13135 		mem_tbl = mem_tbl_57765;
13136 	else if (tg3_flag(tp, 5755_PLUS))
13137 		mem_tbl = mem_tbl_5755;
13138 	else if (tg3_asic_rev(tp) == ASIC_REV_5906)
13139 		mem_tbl = mem_tbl_5906;
13140 	else if (tg3_flag(tp, 5705_PLUS))
13141 		mem_tbl = mem_tbl_5705;
13142 	else
13143 		mem_tbl = mem_tbl_570x;
13144 
13145 	for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
13146 		err = tg3_do_mem_test(tp, mem_tbl[i].offset, mem_tbl[i].len);
13147 		if (err)
13148 			break;
13149 	}
13150 
13151 	return err;
13152 }
13153 
13154 #define TG3_TSO_MSS		500
13155 
13156 #define TG3_TSO_IP_HDR_LEN	20
13157 #define TG3_TSO_TCP_HDR_LEN	20
13158 #define TG3_TSO_TCP_OPT_LEN	12
13159 
13160 static const u8 tg3_tso_header[] = {
13161 0x08, 0x00,
13162 0x45, 0x00, 0x00, 0x00,
13163 0x00, 0x00, 0x40, 0x00,
13164 0x40, 0x06, 0x00, 0x00,
13165 0x0a, 0x00, 0x00, 0x01,
13166 0x0a, 0x00, 0x00, 0x02,
13167 0x0d, 0x00, 0xe0, 0x00,
13168 0x00, 0x00, 0x01, 0x00,
13169 0x00, 0x00, 0x02, 0x00,
13170 0x80, 0x10, 0x10, 0x00,
13171 0x14, 0x09, 0x00, 0x00,
13172 0x01, 0x01, 0x08, 0x0a,
13173 0x11, 0x11, 0x11, 0x11,
13174 0x11, 0x11, 0x11, 0x11,
13175 };
13176 
13177 static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, bool tso_loopback)
13178 {
13179 	u32 rx_start_idx, rx_idx, tx_idx, opaque_key;
13180 	u32 base_flags = 0, mss = 0, desc_idx, coal_now, data_off, val;
13181 	u32 budget;
13182 	struct sk_buff *skb;
13183 	u8 *tx_data, *rx_data;
13184 	dma_addr_t map;
13185 	int num_pkts, tx_len, rx_len, i, err;
13186 	struct tg3_rx_buffer_desc *desc;
13187 	struct tg3_napi *tnapi, *rnapi;
13188 	struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
13189 
13190 	tnapi = &tp->napi[0];
13191 	rnapi = &tp->napi[0];
13192 	if (tp->irq_cnt > 1) {
13193 		if (tg3_flag(tp, ENABLE_RSS))
13194 			rnapi = &tp->napi[1];
13195 		if (tg3_flag(tp, ENABLE_TSS))
13196 			tnapi = &tp->napi[1];
13197 	}
13198 	coal_now = tnapi->coal_now | rnapi->coal_now;
13199 
13200 	err = -EIO;
13201 
13202 	tx_len = pktsz;
13203 	skb = netdev_alloc_skb(tp->dev, tx_len);
13204 	if (!skb)
13205 		return -ENOMEM;
13206 
13207 	tx_data = skb_put(skb, tx_len);
13208 	memcpy(tx_data, tp->dev->dev_addr, ETH_ALEN);
13209 	memset(tx_data + ETH_ALEN, 0x0, 8);
13210 
13211 	tw32(MAC_RX_MTU_SIZE, tx_len + ETH_FCS_LEN);
13212 
13213 	if (tso_loopback) {
13214 		struct iphdr *iph = (struct iphdr *)&tx_data[ETH_HLEN];
13215 
13216 		u32 hdr_len = TG3_TSO_IP_HDR_LEN + TG3_TSO_TCP_HDR_LEN +
13217 			      TG3_TSO_TCP_OPT_LEN;
13218 
13219 		memcpy(tx_data + ETH_ALEN * 2, tg3_tso_header,
13220 		       sizeof(tg3_tso_header));
13221 		mss = TG3_TSO_MSS;
13222 
13223 		val = tx_len - ETH_ALEN * 2 - sizeof(tg3_tso_header);
13224 		num_pkts = DIV_ROUND_UP(val, TG3_TSO_MSS);
13225 
13226 		/* Set the total length field in the IP header */
13227 		iph->tot_len = htons((u16)(mss + hdr_len));
13228 
13229 		base_flags = (TXD_FLAG_CPU_PRE_DMA |
13230 			      TXD_FLAG_CPU_POST_DMA);
13231 
13232 		if (tg3_flag(tp, HW_TSO_1) ||
13233 		    tg3_flag(tp, HW_TSO_2) ||
13234 		    tg3_flag(tp, HW_TSO_3)) {
13235 			struct tcphdr *th;
13236 			val = ETH_HLEN + TG3_TSO_IP_HDR_LEN;
13237 			th = (struct tcphdr *)&tx_data[val];
13238 			th->check = 0;
13239 		} else
13240 			base_flags |= TXD_FLAG_TCPUDP_CSUM;
13241 
13242 		if (tg3_flag(tp, HW_TSO_3)) {
13243 			mss |= (hdr_len & 0xc) << 12;
13244 			if (hdr_len & 0x10)
13245 				base_flags |= 0x00000010;
13246 			base_flags |= (hdr_len & 0x3e0) << 5;
13247 		} else if (tg3_flag(tp, HW_TSO_2))
13248 			mss |= hdr_len << 9;
13249 		else if (tg3_flag(tp, HW_TSO_1) ||
13250 			 tg3_asic_rev(tp) == ASIC_REV_5705) {
13251 			mss |= (TG3_TSO_TCP_OPT_LEN << 9);
13252 		} else {
13253 			base_flags |= (TG3_TSO_TCP_OPT_LEN << 10);
13254 		}
13255 
13256 		data_off = ETH_ALEN * 2 + sizeof(tg3_tso_header);
13257 	} else {
13258 		num_pkts = 1;
13259 		data_off = ETH_HLEN;
13260 
13261 		if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
13262 		    tx_len > VLAN_ETH_FRAME_LEN)
13263 			base_flags |= TXD_FLAG_JMB_PKT;
13264 	}
13265 
13266 	for (i = data_off; i < tx_len; i++)
13267 		tx_data[i] = (u8) (i & 0xff);
13268 
13269 	map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
13270 	if (pci_dma_mapping_error(tp->pdev, map)) {
13271 		dev_kfree_skb(skb);
13272 		return -EIO;
13273 	}
13274 
13275 	val = tnapi->tx_prod;
13276 	tnapi->tx_buffers[val].skb = skb;
13277 	dma_unmap_addr_set(&tnapi->tx_buffers[val], mapping, map);
13278 
13279 	tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
13280 	       rnapi->coal_now);
13281 
13282 	udelay(10);
13283 
13284 	rx_start_idx = rnapi->hw_status->idx[0].rx_producer;
13285 
13286 	budget = tg3_tx_avail(tnapi);
13287 	if (tg3_tx_frag_set(tnapi, &val, &budget, map, tx_len,
13288 			    base_flags | TXD_FLAG_END, mss, 0)) {
13289 		tnapi->tx_buffers[val].skb = NULL;
13290 		dev_kfree_skb(skb);
13291 		return -EIO;
13292 	}
13293 
13294 	tnapi->tx_prod++;
13295 
13296 	/* Sync BD data before updating mailbox */
13297 	wmb();
13298 
13299 	tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod);
13300 	tr32_mailbox(tnapi->prodmbox);
13301 
13302 	udelay(10);
13303 
13304 	/* 350 usec to allow enough time on some 10/100 Mbps devices.  */
13305 	for (i = 0; i < 35; i++) {
13306 		tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
13307 		       coal_now);
13308 
13309 		udelay(10);
13310 
13311 		tx_idx = tnapi->hw_status->idx[0].tx_consumer;
13312 		rx_idx = rnapi->hw_status->idx[0].rx_producer;
13313 		if ((tx_idx == tnapi->tx_prod) &&
13314 		    (rx_idx == (rx_start_idx + num_pkts)))
13315 			break;
13316 	}
13317 
13318 	tg3_tx_skb_unmap(tnapi, tnapi->tx_prod - 1, -1);
13319 	dev_kfree_skb(skb);
13320 
13321 	if (tx_idx != tnapi->tx_prod)
13322 		goto out;
13323 
13324 	if (rx_idx != rx_start_idx + num_pkts)
13325 		goto out;
13326 
13327 	val = data_off;
13328 	while (rx_idx != rx_start_idx) {
13329 		desc = &rnapi->rx_rcb[rx_start_idx++];
13330 		desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
13331 		opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
13332 
13333 		if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
13334 		    (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
13335 			goto out;
13336 
13337 		rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT)
13338 			 - ETH_FCS_LEN;
13339 
13340 		if (!tso_loopback) {
13341 			if (rx_len != tx_len)
13342 				goto out;
13343 
13344 			if (pktsz <= TG3_RX_STD_DMA_SZ - ETH_FCS_LEN) {
13345 				if (opaque_key != RXD_OPAQUE_RING_STD)
13346 					goto out;
13347 			} else {
13348 				if (opaque_key != RXD_OPAQUE_RING_JUMBO)
13349 					goto out;
13350 			}
13351 		} else if ((desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
13352 			   (desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
13353 			    >> RXD_TCPCSUM_SHIFT != 0xffff) {
13354 			goto out;
13355 		}
13356 
13357 		if (opaque_key == RXD_OPAQUE_RING_STD) {
13358 			rx_data = tpr->rx_std_buffers[desc_idx].data;
13359 			map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx],
13360 					     mapping);
13361 		} else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
13362 			rx_data = tpr->rx_jmb_buffers[desc_idx].data;
13363 			map = dma_unmap_addr(&tpr->rx_jmb_buffers[desc_idx],
13364 					     mapping);
13365 		} else
13366 			goto out;
13367 
13368 		pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len,
13369 					    PCI_DMA_FROMDEVICE);
13370 
13371 		rx_data += TG3_RX_OFFSET(tp);
13372 		for (i = data_off; i < rx_len; i++, val++) {
13373 			if (*(rx_data + i) != (u8) (val & 0xff))
13374 				goto out;
13375 		}
13376 	}
13377 
13378 	err = 0;
13379 
13380 	/* tg3_free_rings will unmap and free the rx_data */
13381 out:
13382 	return err;
13383 }
13384 
13385 #define TG3_STD_LOOPBACK_FAILED		1
13386 #define TG3_JMB_LOOPBACK_FAILED		2
13387 #define TG3_TSO_LOOPBACK_FAILED		4
13388 #define TG3_LOOPBACK_FAILED \
13389 	(TG3_STD_LOOPBACK_FAILED | \
13390 	 TG3_JMB_LOOPBACK_FAILED | \
13391 	 TG3_TSO_LOOPBACK_FAILED)
13392 
13393 static int tg3_test_loopback(struct tg3 *tp, u64 *data, bool do_extlpbk)
13394 {
13395 	int err = -EIO;
13396 	u32 eee_cap;
13397 	u32 jmb_pkt_sz = 9000;
13398 
13399 	if (tp->dma_limit)
13400 		jmb_pkt_sz = tp->dma_limit - ETH_HLEN;
13401 
13402 	eee_cap = tp->phy_flags & TG3_PHYFLG_EEE_CAP;
13403 	tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
13404 
13405 	if (!netif_running(tp->dev)) {
13406 		data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13407 		data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13408 		if (do_extlpbk)
13409 			data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13410 		goto done;
13411 	}
13412 
13413 	err = tg3_reset_hw(tp, true);
13414 	if (err) {
13415 		data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13416 		data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13417 		if (do_extlpbk)
13418 			data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13419 		goto done;
13420 	}
13421 
13422 	if (tg3_flag(tp, ENABLE_RSS)) {
13423 		int i;
13424 
13425 		/* Reroute all rx packets to the 1st queue */
13426 		for (i = MAC_RSS_INDIR_TBL_0;
13427 		     i < MAC_RSS_INDIR_TBL_0 + TG3_RSS_INDIR_TBL_SIZE; i += 4)
13428 			tw32(i, 0x0);
13429 	}
13430 
13431 	/* HW errata - mac loopback fails in some cases on 5780.
13432 	 * Normal traffic and PHY loopback are not affected by
13433 	 * errata.  Also, the MAC loopback test is deprecated for
13434 	 * all newer ASIC revisions.
13435 	 */
13436 	if (tg3_asic_rev(tp) != ASIC_REV_5780 &&
13437 	    !tg3_flag(tp, CPMU_PRESENT)) {
13438 		tg3_mac_loopback(tp, true);
13439 
13440 		if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13441 			data[TG3_MAC_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED;
13442 
13443 		if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13444 		    tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13445 			data[TG3_MAC_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED;
13446 
13447 		tg3_mac_loopback(tp, false);
13448 	}
13449 
13450 	if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
13451 	    !tg3_flag(tp, USE_PHYLIB)) {
13452 		int i;
13453 
13454 		tg3_phy_lpbk_set(tp, 0, false);
13455 
13456 		/* Wait for link */
13457 		for (i = 0; i < 100; i++) {
13458 			if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
13459 				break;
13460 			mdelay(1);
13461 		}
13462 
13463 		if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13464 			data[TG3_PHY_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED;
13465 		if (tg3_flag(tp, TSO_CAPABLE) &&
13466 		    tg3_run_loopback(tp, ETH_FRAME_LEN, true))
13467 			data[TG3_PHY_LOOPB_TEST] |= TG3_TSO_LOOPBACK_FAILED;
13468 		if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13469 		    tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13470 			data[TG3_PHY_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED;
13471 
13472 		if (do_extlpbk) {
13473 			tg3_phy_lpbk_set(tp, 0, true);
13474 
13475 			/* All link indications report up, but the hardware
13476 			 * isn't really ready for about 20 msec.  Double it
13477 			 * to be sure.
13478 			 */
13479 			mdelay(40);
13480 
13481 			if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13482 				data[TG3_EXT_LOOPB_TEST] |=
13483 							TG3_STD_LOOPBACK_FAILED;
13484 			if (tg3_flag(tp, TSO_CAPABLE) &&
13485 			    tg3_run_loopback(tp, ETH_FRAME_LEN, true))
13486 				data[TG3_EXT_LOOPB_TEST] |=
13487 							TG3_TSO_LOOPBACK_FAILED;
13488 			if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13489 			    tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13490 				data[TG3_EXT_LOOPB_TEST] |=
13491 							TG3_JMB_LOOPBACK_FAILED;
13492 		}
13493 
13494 		/* Re-enable gphy autopowerdown. */
13495 		if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
13496 			tg3_phy_toggle_apd(tp, true);
13497 	}
13498 
13499 	err = (data[TG3_MAC_LOOPB_TEST] | data[TG3_PHY_LOOPB_TEST] |
13500 	       data[TG3_EXT_LOOPB_TEST]) ? -EIO : 0;
13501 
13502 done:
13503 	tp->phy_flags |= eee_cap;
13504 
13505 	return err;
13506 }
13507 
13508 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
13509 			  u64 *data)
13510 {
13511 	struct tg3 *tp = netdev_priv(dev);
13512 	bool doextlpbk = etest->flags & ETH_TEST_FL_EXTERNAL_LB;
13513 
13514 	if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
13515 		if (tg3_power_up(tp)) {
13516 			etest->flags |= ETH_TEST_FL_FAILED;
13517 			memset(data, 1, sizeof(u64) * TG3_NUM_TEST);
13518 			return;
13519 		}
13520 		tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
13521 	}
13522 
13523 	memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
13524 
13525 	if (tg3_test_nvram(tp) != 0) {
13526 		etest->flags |= ETH_TEST_FL_FAILED;
13527 		data[TG3_NVRAM_TEST] = 1;
13528 	}
13529 	if (!doextlpbk && tg3_test_link(tp)) {
13530 		etest->flags |= ETH_TEST_FL_FAILED;
13531 		data[TG3_LINK_TEST] = 1;
13532 	}
13533 	if (etest->flags & ETH_TEST_FL_OFFLINE) {
13534 		int err, err2 = 0, irq_sync = 0;
13535 
13536 		if (netif_running(dev)) {
13537 			tg3_phy_stop(tp);
13538 			tg3_netif_stop(tp);
13539 			irq_sync = 1;
13540 		}
13541 
13542 		tg3_full_lock(tp, irq_sync);
13543 		tg3_halt(tp, RESET_KIND_SUSPEND, 1);
13544 		err = tg3_nvram_lock(tp);
13545 		tg3_halt_cpu(tp, RX_CPU_BASE);
13546 		if (!tg3_flag(tp, 5705_PLUS))
13547 			tg3_halt_cpu(tp, TX_CPU_BASE);
13548 		if (!err)
13549 			tg3_nvram_unlock(tp);
13550 
13551 		if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
13552 			tg3_phy_reset(tp);
13553 
13554 		if (tg3_test_registers(tp) != 0) {
13555 			etest->flags |= ETH_TEST_FL_FAILED;
13556 			data[TG3_REGISTER_TEST] = 1;
13557 		}
13558 
13559 		if (tg3_test_memory(tp) != 0) {
13560 			etest->flags |= ETH_TEST_FL_FAILED;
13561 			data[TG3_MEMORY_TEST] = 1;
13562 		}
13563 
13564 		if (doextlpbk)
13565 			etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE;
13566 
13567 		if (tg3_test_loopback(tp, data, doextlpbk))
13568 			etest->flags |= ETH_TEST_FL_FAILED;
13569 
13570 		tg3_full_unlock(tp);
13571 
13572 		if (tg3_test_interrupt(tp) != 0) {
13573 			etest->flags |= ETH_TEST_FL_FAILED;
13574 			data[TG3_INTERRUPT_TEST] = 1;
13575 		}
13576 
13577 		tg3_full_lock(tp, 0);
13578 
13579 		tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
13580 		if (netif_running(dev)) {
13581 			tg3_flag_set(tp, INIT_COMPLETE);
13582 			err2 = tg3_restart_hw(tp, true);
13583 			if (!err2)
13584 				tg3_netif_start(tp);
13585 		}
13586 
13587 		tg3_full_unlock(tp);
13588 
13589 		if (irq_sync && !err2)
13590 			tg3_phy_start(tp);
13591 	}
13592 	if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
13593 		tg3_power_down_prepare(tp);
13594 
13595 }
13596 
13597 static int tg3_hwtstamp_ioctl(struct net_device *dev,
13598 			      struct ifreq *ifr, int cmd)
13599 {
13600 	struct tg3 *tp = netdev_priv(dev);
13601 	struct hwtstamp_config stmpconf;
13602 
13603 	if (!tg3_flag(tp, PTP_CAPABLE))
13604 		return -EINVAL;
13605 
13606 	if (copy_from_user(&stmpconf, ifr->ifr_data, sizeof(stmpconf)))
13607 		return -EFAULT;
13608 
13609 	if (stmpconf.flags)
13610 		return -EINVAL;
13611 
13612 	if (stmpconf.tx_type != HWTSTAMP_TX_ON &&
13613 	    stmpconf.tx_type != HWTSTAMP_TX_OFF)
13614 		return -ERANGE;
13615 
13616 	switch (stmpconf.rx_filter) {
13617 	case HWTSTAMP_FILTER_NONE:
13618 		tp->rxptpctl = 0;
13619 		break;
13620 	case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
13621 		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13622 			       TG3_RX_PTP_CTL_ALL_V1_EVENTS;
13623 		break;
13624 	case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
13625 		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13626 			       TG3_RX_PTP_CTL_SYNC_EVNT;
13627 		break;
13628 	case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
13629 		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13630 			       TG3_RX_PTP_CTL_DELAY_REQ;
13631 		break;
13632 	case HWTSTAMP_FILTER_PTP_V2_EVENT:
13633 		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13634 			       TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13635 		break;
13636 	case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
13637 		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13638 			       TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13639 		break;
13640 	case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
13641 		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13642 			       TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13643 		break;
13644 	case HWTSTAMP_FILTER_PTP_V2_SYNC:
13645 		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13646 			       TG3_RX_PTP_CTL_SYNC_EVNT;
13647 		break;
13648 	case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
13649 		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13650 			       TG3_RX_PTP_CTL_SYNC_EVNT;
13651 		break;
13652 	case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
13653 		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13654 			       TG3_RX_PTP_CTL_SYNC_EVNT;
13655 		break;
13656 	case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
13657 		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13658 			       TG3_RX_PTP_CTL_DELAY_REQ;
13659 		break;
13660 	case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
13661 		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13662 			       TG3_RX_PTP_CTL_DELAY_REQ;
13663 		break;
13664 	case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
13665 		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13666 			       TG3_RX_PTP_CTL_DELAY_REQ;
13667 		break;
13668 	default:
13669 		return -ERANGE;
13670 	}
13671 
13672 	if (netif_running(dev) && tp->rxptpctl)
13673 		tw32(TG3_RX_PTP_CTL,
13674 		     tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK);
13675 
13676 	if (stmpconf.tx_type == HWTSTAMP_TX_ON)
13677 		tg3_flag_set(tp, TX_TSTAMP_EN);
13678 	else
13679 		tg3_flag_clear(tp, TX_TSTAMP_EN);
13680 
13681 	return copy_to_user(ifr->ifr_data, &stmpconf, sizeof(stmpconf)) ?
13682 		-EFAULT : 0;
13683 }
13684 
13685 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
13686 {
13687 	struct mii_ioctl_data *data = if_mii(ifr);
13688 	struct tg3 *tp = netdev_priv(dev);
13689 	int err;
13690 
13691 	if (tg3_flag(tp, USE_PHYLIB)) {
13692 		struct phy_device *phydev;
13693 		if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
13694 			return -EAGAIN;
13695 		phydev = tp->mdio_bus->phy_map[tp->phy_addr];
13696 		return phy_mii_ioctl(phydev, ifr, cmd);
13697 	}
13698 
13699 	switch (cmd) {
13700 	case SIOCGMIIPHY:
13701 		data->phy_id = tp->phy_addr;
13702 
13703 		/* fallthru */
13704 	case SIOCGMIIREG: {
13705 		u32 mii_regval;
13706 
13707 		if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
13708 			break;			/* We have no PHY */
13709 
13710 		if (!netif_running(dev))
13711 			return -EAGAIN;
13712 
13713 		spin_lock_bh(&tp->lock);
13714 		err = __tg3_readphy(tp, data->phy_id & 0x1f,
13715 				    data->reg_num & 0x1f, &mii_regval);
13716 		spin_unlock_bh(&tp->lock);
13717 
13718 		data->val_out = mii_regval;
13719 
13720 		return err;
13721 	}
13722 
13723 	case SIOCSMIIREG:
13724 		if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
13725 			break;			/* We have no PHY */
13726 
13727 		if (!netif_running(dev))
13728 			return -EAGAIN;
13729 
13730 		spin_lock_bh(&tp->lock);
13731 		err = __tg3_writephy(tp, data->phy_id & 0x1f,
13732 				     data->reg_num & 0x1f, data->val_in);
13733 		spin_unlock_bh(&tp->lock);
13734 
13735 		return err;
13736 
13737 	case SIOCSHWTSTAMP:
13738 		return tg3_hwtstamp_ioctl(dev, ifr, cmd);
13739 
13740 	default:
13741 		/* do nothing */
13742 		break;
13743 	}
13744 	return -EOPNOTSUPP;
13745 }
13746 
13747 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
13748 {
13749 	struct tg3 *tp = netdev_priv(dev);
13750 
13751 	memcpy(ec, &tp->coal, sizeof(*ec));
13752 	return 0;
13753 }
13754 
13755 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
13756 {
13757 	struct tg3 *tp = netdev_priv(dev);
13758 	u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
13759 	u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
13760 
13761 	if (!tg3_flag(tp, 5705_PLUS)) {
13762 		max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
13763 		max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
13764 		max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
13765 		min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
13766 	}
13767 
13768 	if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
13769 	    (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
13770 	    (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
13771 	    (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
13772 	    (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
13773 	    (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
13774 	    (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
13775 	    (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
13776 	    (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
13777 	    (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
13778 		return -EINVAL;
13779 
13780 	/* No rx interrupts will be generated if both are zero */
13781 	if ((ec->rx_coalesce_usecs == 0) &&
13782 	    (ec->rx_max_coalesced_frames == 0))
13783 		return -EINVAL;
13784 
13785 	/* No tx interrupts will be generated if both are zero */
13786 	if ((ec->tx_coalesce_usecs == 0) &&
13787 	    (ec->tx_max_coalesced_frames == 0))
13788 		return -EINVAL;
13789 
13790 	/* Only copy relevant parameters, ignore all others. */
13791 	tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
13792 	tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
13793 	tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
13794 	tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
13795 	tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
13796 	tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
13797 	tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
13798 	tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
13799 	tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
13800 
13801 	if (netif_running(dev)) {
13802 		tg3_full_lock(tp, 0);
13803 		__tg3_set_coalesce(tp, &tp->coal);
13804 		tg3_full_unlock(tp);
13805 	}
13806 	return 0;
13807 }
13808 
13809 static int tg3_set_eee(struct net_device *dev, struct ethtool_eee *edata)
13810 {
13811 	struct tg3 *tp = netdev_priv(dev);
13812 
13813 	if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
13814 		netdev_warn(tp->dev, "Board does not support EEE!\n");
13815 		return -EOPNOTSUPP;
13816 	}
13817 
13818 	if (edata->advertised != tp->eee.advertised) {
13819 		netdev_warn(tp->dev,
13820 			    "Direct manipulation of EEE advertisement is not supported\n");
13821 		return -EINVAL;
13822 	}
13823 
13824 	if (edata->tx_lpi_timer > TG3_CPMU_DBTMR1_LNKIDLE_MAX) {
13825 		netdev_warn(tp->dev,
13826 			    "Maximal Tx Lpi timer supported is %#x(u)\n",
13827 			    TG3_CPMU_DBTMR1_LNKIDLE_MAX);
13828 		return -EINVAL;
13829 	}
13830 
13831 	tp->eee = *edata;
13832 
13833 	tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
13834 	tg3_warn_mgmt_link_flap(tp);
13835 
13836 	if (netif_running(tp->dev)) {
13837 		tg3_full_lock(tp, 0);
13838 		tg3_setup_eee(tp);
13839 		tg3_phy_reset(tp);
13840 		tg3_full_unlock(tp);
13841 	}
13842 
13843 	return 0;
13844 }
13845 
13846 static int tg3_get_eee(struct net_device *dev, struct ethtool_eee *edata)
13847 {
13848 	struct tg3 *tp = netdev_priv(dev);
13849 
13850 	if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
13851 		netdev_warn(tp->dev,
13852 			    "Board does not support EEE!\n");
13853 		return -EOPNOTSUPP;
13854 	}
13855 
13856 	*edata = tp->eee;
13857 	return 0;
13858 }
13859 
13860 static const struct ethtool_ops tg3_ethtool_ops = {
13861 	.get_settings		= tg3_get_settings,
13862 	.set_settings		= tg3_set_settings,
13863 	.get_drvinfo		= tg3_get_drvinfo,
13864 	.get_regs_len		= tg3_get_regs_len,
13865 	.get_regs		= tg3_get_regs,
13866 	.get_wol		= tg3_get_wol,
13867 	.set_wol		= tg3_set_wol,
13868 	.get_msglevel		= tg3_get_msglevel,
13869 	.set_msglevel		= tg3_set_msglevel,
13870 	.nway_reset		= tg3_nway_reset,
13871 	.get_link		= ethtool_op_get_link,
13872 	.get_eeprom_len		= tg3_get_eeprom_len,
13873 	.get_eeprom		= tg3_get_eeprom,
13874 	.set_eeprom		= tg3_set_eeprom,
13875 	.get_ringparam		= tg3_get_ringparam,
13876 	.set_ringparam		= tg3_set_ringparam,
13877 	.get_pauseparam		= tg3_get_pauseparam,
13878 	.set_pauseparam		= tg3_set_pauseparam,
13879 	.self_test		= tg3_self_test,
13880 	.get_strings		= tg3_get_strings,
13881 	.set_phys_id		= tg3_set_phys_id,
13882 	.get_ethtool_stats	= tg3_get_ethtool_stats,
13883 	.get_coalesce		= tg3_get_coalesce,
13884 	.set_coalesce		= tg3_set_coalesce,
13885 	.get_sset_count		= tg3_get_sset_count,
13886 	.get_rxnfc		= tg3_get_rxnfc,
13887 	.get_rxfh_indir_size    = tg3_get_rxfh_indir_size,
13888 	.get_rxfh_indir		= tg3_get_rxfh_indir,
13889 	.set_rxfh_indir		= tg3_set_rxfh_indir,
13890 	.get_channels		= tg3_get_channels,
13891 	.set_channels		= tg3_set_channels,
13892 	.get_ts_info		= tg3_get_ts_info,
13893 	.get_eee		= tg3_get_eee,
13894 	.set_eee		= tg3_set_eee,
13895 };
13896 
13897 static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *dev,
13898 						struct rtnl_link_stats64 *stats)
13899 {
13900 	struct tg3 *tp = netdev_priv(dev);
13901 
13902 	spin_lock_bh(&tp->lock);
13903 	if (!tp->hw_stats) {
13904 		spin_unlock_bh(&tp->lock);
13905 		return &tp->net_stats_prev;
13906 	}
13907 
13908 	tg3_get_nstats(tp, stats);
13909 	spin_unlock_bh(&tp->lock);
13910 
13911 	return stats;
13912 }
13913 
13914 static void tg3_set_rx_mode(struct net_device *dev)
13915 {
13916 	struct tg3 *tp = netdev_priv(dev);
13917 
13918 	if (!netif_running(dev))
13919 		return;
13920 
13921 	tg3_full_lock(tp, 0);
13922 	__tg3_set_rx_mode(dev);
13923 	tg3_full_unlock(tp);
13924 }
13925 
13926 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
13927 			       int new_mtu)
13928 {
13929 	dev->mtu = new_mtu;
13930 
13931 	if (new_mtu > ETH_DATA_LEN) {
13932 		if (tg3_flag(tp, 5780_CLASS)) {
13933 			netdev_update_features(dev);
13934 			tg3_flag_clear(tp, TSO_CAPABLE);
13935 		} else {
13936 			tg3_flag_set(tp, JUMBO_RING_ENABLE);
13937 		}
13938 	} else {
13939 		if (tg3_flag(tp, 5780_CLASS)) {
13940 			tg3_flag_set(tp, TSO_CAPABLE);
13941 			netdev_update_features(dev);
13942 		}
13943 		tg3_flag_clear(tp, JUMBO_RING_ENABLE);
13944 	}
13945 }
13946 
13947 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
13948 {
13949 	struct tg3 *tp = netdev_priv(dev);
13950 	int err;
13951 	bool reset_phy = false;
13952 
13953 	if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
13954 		return -EINVAL;
13955 
13956 	if (!netif_running(dev)) {
13957 		/* We'll just catch it later when the
13958 		 * device is up'd.
13959 		 */
13960 		tg3_set_mtu(dev, tp, new_mtu);
13961 		return 0;
13962 	}
13963 
13964 	tg3_phy_stop(tp);
13965 
13966 	tg3_netif_stop(tp);
13967 
13968 	tg3_full_lock(tp, 1);
13969 
13970 	tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
13971 
13972 	tg3_set_mtu(dev, tp, new_mtu);
13973 
13974 	/* Reset PHY, otherwise the read DMA engine will be in a mode that
13975 	 * breaks all requests to 256 bytes.
13976 	 */
13977 	if (tg3_asic_rev(tp) == ASIC_REV_57766)
13978 		reset_phy = true;
13979 
13980 	err = tg3_restart_hw(tp, reset_phy);
13981 
13982 	if (!err)
13983 		tg3_netif_start(tp);
13984 
13985 	tg3_full_unlock(tp);
13986 
13987 	if (!err)
13988 		tg3_phy_start(tp);
13989 
13990 	return err;
13991 }
13992 
13993 static const struct net_device_ops tg3_netdev_ops = {
13994 	.ndo_open		= tg3_open,
13995 	.ndo_stop		= tg3_close,
13996 	.ndo_start_xmit		= tg3_start_xmit,
13997 	.ndo_get_stats64	= tg3_get_stats64,
13998 	.ndo_validate_addr	= eth_validate_addr,
13999 	.ndo_set_rx_mode	= tg3_set_rx_mode,
14000 	.ndo_set_mac_address	= tg3_set_mac_addr,
14001 	.ndo_do_ioctl		= tg3_ioctl,
14002 	.ndo_tx_timeout		= tg3_tx_timeout,
14003 	.ndo_change_mtu		= tg3_change_mtu,
14004 	.ndo_fix_features	= tg3_fix_features,
14005 	.ndo_set_features	= tg3_set_features,
14006 #ifdef CONFIG_NET_POLL_CONTROLLER
14007 	.ndo_poll_controller	= tg3_poll_controller,
14008 #endif
14009 };
14010 
14011 static void tg3_get_eeprom_size(struct tg3 *tp)
14012 {
14013 	u32 cursize, val, magic;
14014 
14015 	tp->nvram_size = EEPROM_CHIP_SIZE;
14016 
14017 	if (tg3_nvram_read(tp, 0, &magic) != 0)
14018 		return;
14019 
14020 	if ((magic != TG3_EEPROM_MAGIC) &&
14021 	    ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
14022 	    ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
14023 		return;
14024 
14025 	/*
14026 	 * Size the chip by reading offsets at increasing powers of two.
14027 	 * When we encounter our validation signature, we know the addressing
14028 	 * has wrapped around, and thus have our chip size.
14029 	 */
14030 	cursize = 0x10;
14031 
14032 	while (cursize < tp->nvram_size) {
14033 		if (tg3_nvram_read(tp, cursize, &val) != 0)
14034 			return;
14035 
14036 		if (val == magic)
14037 			break;
14038 
14039 		cursize <<= 1;
14040 	}
14041 
14042 	tp->nvram_size = cursize;
14043 }
14044 
14045 static void tg3_get_nvram_size(struct tg3 *tp)
14046 {
14047 	u32 val;
14048 
14049 	if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &val) != 0)
14050 		return;
14051 
14052 	/* Selfboot format */
14053 	if (val != TG3_EEPROM_MAGIC) {
14054 		tg3_get_eeprom_size(tp);
14055 		return;
14056 	}
14057 
14058 	if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
14059 		if (val != 0) {
14060 			/* This is confusing.  We want to operate on the
14061 			 * 16-bit value at offset 0xf2.  The tg3_nvram_read()
14062 			 * call will read from NVRAM and byteswap the data
14063 			 * according to the byteswapping settings for all
14064 			 * other register accesses.  This ensures the data we
14065 			 * want will always reside in the lower 16-bits.
14066 			 * However, the data in NVRAM is in LE format, which
14067 			 * means the data from the NVRAM read will always be
14068 			 * opposite the endianness of the CPU.  The 16-bit
14069 			 * byteswap then brings the data to CPU endianness.
14070 			 */
14071 			tp->nvram_size = swab16((u16)(val & 0x0000ffff)) * 1024;
14072 			return;
14073 		}
14074 	}
14075 	tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14076 }
14077 
14078 static void tg3_get_nvram_info(struct tg3 *tp)
14079 {
14080 	u32 nvcfg1;
14081 
14082 	nvcfg1 = tr32(NVRAM_CFG1);
14083 	if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
14084 		tg3_flag_set(tp, FLASH);
14085 	} else {
14086 		nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14087 		tw32(NVRAM_CFG1, nvcfg1);
14088 	}
14089 
14090 	if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
14091 	    tg3_flag(tp, 5780_CLASS)) {
14092 		switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
14093 		case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
14094 			tp->nvram_jedecnum = JEDEC_ATMEL;
14095 			tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
14096 			tg3_flag_set(tp, NVRAM_BUFFERED);
14097 			break;
14098 		case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
14099 			tp->nvram_jedecnum = JEDEC_ATMEL;
14100 			tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
14101 			break;
14102 		case FLASH_VENDOR_ATMEL_EEPROM:
14103 			tp->nvram_jedecnum = JEDEC_ATMEL;
14104 			tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14105 			tg3_flag_set(tp, NVRAM_BUFFERED);
14106 			break;
14107 		case FLASH_VENDOR_ST:
14108 			tp->nvram_jedecnum = JEDEC_ST;
14109 			tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
14110 			tg3_flag_set(tp, NVRAM_BUFFERED);
14111 			break;
14112 		case FLASH_VENDOR_SAIFUN:
14113 			tp->nvram_jedecnum = JEDEC_SAIFUN;
14114 			tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
14115 			break;
14116 		case FLASH_VENDOR_SST_SMALL:
14117 		case FLASH_VENDOR_SST_LARGE:
14118 			tp->nvram_jedecnum = JEDEC_SST;
14119 			tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
14120 			break;
14121 		}
14122 	} else {
14123 		tp->nvram_jedecnum = JEDEC_ATMEL;
14124 		tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
14125 		tg3_flag_set(tp, NVRAM_BUFFERED);
14126 	}
14127 }
14128 
14129 static void tg3_nvram_get_pagesize(struct tg3 *tp, u32 nvmcfg1)
14130 {
14131 	switch (nvmcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
14132 	case FLASH_5752PAGE_SIZE_256:
14133 		tp->nvram_pagesize = 256;
14134 		break;
14135 	case FLASH_5752PAGE_SIZE_512:
14136 		tp->nvram_pagesize = 512;
14137 		break;
14138 	case FLASH_5752PAGE_SIZE_1K:
14139 		tp->nvram_pagesize = 1024;
14140 		break;
14141 	case FLASH_5752PAGE_SIZE_2K:
14142 		tp->nvram_pagesize = 2048;
14143 		break;
14144 	case FLASH_5752PAGE_SIZE_4K:
14145 		tp->nvram_pagesize = 4096;
14146 		break;
14147 	case FLASH_5752PAGE_SIZE_264:
14148 		tp->nvram_pagesize = 264;
14149 		break;
14150 	case FLASH_5752PAGE_SIZE_528:
14151 		tp->nvram_pagesize = 528;
14152 		break;
14153 	}
14154 }
14155 
14156 static void tg3_get_5752_nvram_info(struct tg3 *tp)
14157 {
14158 	u32 nvcfg1;
14159 
14160 	nvcfg1 = tr32(NVRAM_CFG1);
14161 
14162 	/* NVRAM protection for TPM */
14163 	if (nvcfg1 & (1 << 27))
14164 		tg3_flag_set(tp, PROTECTED_NVRAM);
14165 
14166 	switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14167 	case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
14168 	case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
14169 		tp->nvram_jedecnum = JEDEC_ATMEL;
14170 		tg3_flag_set(tp, NVRAM_BUFFERED);
14171 		break;
14172 	case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14173 		tp->nvram_jedecnum = JEDEC_ATMEL;
14174 		tg3_flag_set(tp, NVRAM_BUFFERED);
14175 		tg3_flag_set(tp, FLASH);
14176 		break;
14177 	case FLASH_5752VENDOR_ST_M45PE10:
14178 	case FLASH_5752VENDOR_ST_M45PE20:
14179 	case FLASH_5752VENDOR_ST_M45PE40:
14180 		tp->nvram_jedecnum = JEDEC_ST;
14181 		tg3_flag_set(tp, NVRAM_BUFFERED);
14182 		tg3_flag_set(tp, FLASH);
14183 		break;
14184 	}
14185 
14186 	if (tg3_flag(tp, FLASH)) {
14187 		tg3_nvram_get_pagesize(tp, nvcfg1);
14188 	} else {
14189 		/* For eeprom, set pagesize to maximum eeprom size */
14190 		tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14191 
14192 		nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14193 		tw32(NVRAM_CFG1, nvcfg1);
14194 	}
14195 }
14196 
14197 static void tg3_get_5755_nvram_info(struct tg3 *tp)
14198 {
14199 	u32 nvcfg1, protect = 0;
14200 
14201 	nvcfg1 = tr32(NVRAM_CFG1);
14202 
14203 	/* NVRAM protection for TPM */
14204 	if (nvcfg1 & (1 << 27)) {
14205 		tg3_flag_set(tp, PROTECTED_NVRAM);
14206 		protect = 1;
14207 	}
14208 
14209 	nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
14210 	switch (nvcfg1) {
14211 	case FLASH_5755VENDOR_ATMEL_FLASH_1:
14212 	case FLASH_5755VENDOR_ATMEL_FLASH_2:
14213 	case FLASH_5755VENDOR_ATMEL_FLASH_3:
14214 	case FLASH_5755VENDOR_ATMEL_FLASH_5:
14215 		tp->nvram_jedecnum = JEDEC_ATMEL;
14216 		tg3_flag_set(tp, NVRAM_BUFFERED);
14217 		tg3_flag_set(tp, FLASH);
14218 		tp->nvram_pagesize = 264;
14219 		if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
14220 		    nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
14221 			tp->nvram_size = (protect ? 0x3e200 :
14222 					  TG3_NVRAM_SIZE_512KB);
14223 		else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
14224 			tp->nvram_size = (protect ? 0x1f200 :
14225 					  TG3_NVRAM_SIZE_256KB);
14226 		else
14227 			tp->nvram_size = (protect ? 0x1f200 :
14228 					  TG3_NVRAM_SIZE_128KB);
14229 		break;
14230 	case FLASH_5752VENDOR_ST_M45PE10:
14231 	case FLASH_5752VENDOR_ST_M45PE20:
14232 	case FLASH_5752VENDOR_ST_M45PE40:
14233 		tp->nvram_jedecnum = JEDEC_ST;
14234 		tg3_flag_set(tp, NVRAM_BUFFERED);
14235 		tg3_flag_set(tp, FLASH);
14236 		tp->nvram_pagesize = 256;
14237 		if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
14238 			tp->nvram_size = (protect ?
14239 					  TG3_NVRAM_SIZE_64KB :
14240 					  TG3_NVRAM_SIZE_128KB);
14241 		else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
14242 			tp->nvram_size = (protect ?
14243 					  TG3_NVRAM_SIZE_64KB :
14244 					  TG3_NVRAM_SIZE_256KB);
14245 		else
14246 			tp->nvram_size = (protect ?
14247 					  TG3_NVRAM_SIZE_128KB :
14248 					  TG3_NVRAM_SIZE_512KB);
14249 		break;
14250 	}
14251 }
14252 
14253 static void tg3_get_5787_nvram_info(struct tg3 *tp)
14254 {
14255 	u32 nvcfg1;
14256 
14257 	nvcfg1 = tr32(NVRAM_CFG1);
14258 
14259 	switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14260 	case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
14261 	case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
14262 	case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
14263 	case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
14264 		tp->nvram_jedecnum = JEDEC_ATMEL;
14265 		tg3_flag_set(tp, NVRAM_BUFFERED);
14266 		tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14267 
14268 		nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14269 		tw32(NVRAM_CFG1, nvcfg1);
14270 		break;
14271 	case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14272 	case FLASH_5755VENDOR_ATMEL_FLASH_1:
14273 	case FLASH_5755VENDOR_ATMEL_FLASH_2:
14274 	case FLASH_5755VENDOR_ATMEL_FLASH_3:
14275 		tp->nvram_jedecnum = JEDEC_ATMEL;
14276 		tg3_flag_set(tp, NVRAM_BUFFERED);
14277 		tg3_flag_set(tp, FLASH);
14278 		tp->nvram_pagesize = 264;
14279 		break;
14280 	case FLASH_5752VENDOR_ST_M45PE10:
14281 	case FLASH_5752VENDOR_ST_M45PE20:
14282 	case FLASH_5752VENDOR_ST_M45PE40:
14283 		tp->nvram_jedecnum = JEDEC_ST;
14284 		tg3_flag_set(tp, NVRAM_BUFFERED);
14285 		tg3_flag_set(tp, FLASH);
14286 		tp->nvram_pagesize = 256;
14287 		break;
14288 	}
14289 }
14290 
14291 static void tg3_get_5761_nvram_info(struct tg3 *tp)
14292 {
14293 	u32 nvcfg1, protect = 0;
14294 
14295 	nvcfg1 = tr32(NVRAM_CFG1);
14296 
14297 	/* NVRAM protection for TPM */
14298 	if (nvcfg1 & (1 << 27)) {
14299 		tg3_flag_set(tp, PROTECTED_NVRAM);
14300 		protect = 1;
14301 	}
14302 
14303 	nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
14304 	switch (nvcfg1) {
14305 	case FLASH_5761VENDOR_ATMEL_ADB021D:
14306 	case FLASH_5761VENDOR_ATMEL_ADB041D:
14307 	case FLASH_5761VENDOR_ATMEL_ADB081D:
14308 	case FLASH_5761VENDOR_ATMEL_ADB161D:
14309 	case FLASH_5761VENDOR_ATMEL_MDB021D:
14310 	case FLASH_5761VENDOR_ATMEL_MDB041D:
14311 	case FLASH_5761VENDOR_ATMEL_MDB081D:
14312 	case FLASH_5761VENDOR_ATMEL_MDB161D:
14313 		tp->nvram_jedecnum = JEDEC_ATMEL;
14314 		tg3_flag_set(tp, NVRAM_BUFFERED);
14315 		tg3_flag_set(tp, FLASH);
14316 		tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14317 		tp->nvram_pagesize = 256;
14318 		break;
14319 	case FLASH_5761VENDOR_ST_A_M45PE20:
14320 	case FLASH_5761VENDOR_ST_A_M45PE40:
14321 	case FLASH_5761VENDOR_ST_A_M45PE80:
14322 	case FLASH_5761VENDOR_ST_A_M45PE16:
14323 	case FLASH_5761VENDOR_ST_M_M45PE20:
14324 	case FLASH_5761VENDOR_ST_M_M45PE40:
14325 	case FLASH_5761VENDOR_ST_M_M45PE80:
14326 	case FLASH_5761VENDOR_ST_M_M45PE16:
14327 		tp->nvram_jedecnum = JEDEC_ST;
14328 		tg3_flag_set(tp, NVRAM_BUFFERED);
14329 		tg3_flag_set(tp, FLASH);
14330 		tp->nvram_pagesize = 256;
14331 		break;
14332 	}
14333 
14334 	if (protect) {
14335 		tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
14336 	} else {
14337 		switch (nvcfg1) {
14338 		case FLASH_5761VENDOR_ATMEL_ADB161D:
14339 		case FLASH_5761VENDOR_ATMEL_MDB161D:
14340 		case FLASH_5761VENDOR_ST_A_M45PE16:
14341 		case FLASH_5761VENDOR_ST_M_M45PE16:
14342 			tp->nvram_size = TG3_NVRAM_SIZE_2MB;
14343 			break;
14344 		case FLASH_5761VENDOR_ATMEL_ADB081D:
14345 		case FLASH_5761VENDOR_ATMEL_MDB081D:
14346 		case FLASH_5761VENDOR_ST_A_M45PE80:
14347 		case FLASH_5761VENDOR_ST_M_M45PE80:
14348 			tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14349 			break;
14350 		case FLASH_5761VENDOR_ATMEL_ADB041D:
14351 		case FLASH_5761VENDOR_ATMEL_MDB041D:
14352 		case FLASH_5761VENDOR_ST_A_M45PE40:
14353 		case FLASH_5761VENDOR_ST_M_M45PE40:
14354 			tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14355 			break;
14356 		case FLASH_5761VENDOR_ATMEL_ADB021D:
14357 		case FLASH_5761VENDOR_ATMEL_MDB021D:
14358 		case FLASH_5761VENDOR_ST_A_M45PE20:
14359 		case FLASH_5761VENDOR_ST_M_M45PE20:
14360 			tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14361 			break;
14362 		}
14363 	}
14364 }
14365 
14366 static void tg3_get_5906_nvram_info(struct tg3 *tp)
14367 {
14368 	tp->nvram_jedecnum = JEDEC_ATMEL;
14369 	tg3_flag_set(tp, NVRAM_BUFFERED);
14370 	tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14371 }
14372 
14373 static void tg3_get_57780_nvram_info(struct tg3 *tp)
14374 {
14375 	u32 nvcfg1;
14376 
14377 	nvcfg1 = tr32(NVRAM_CFG1);
14378 
14379 	switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14380 	case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
14381 	case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
14382 		tp->nvram_jedecnum = JEDEC_ATMEL;
14383 		tg3_flag_set(tp, NVRAM_BUFFERED);
14384 		tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14385 
14386 		nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14387 		tw32(NVRAM_CFG1, nvcfg1);
14388 		return;
14389 	case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14390 	case FLASH_57780VENDOR_ATMEL_AT45DB011D:
14391 	case FLASH_57780VENDOR_ATMEL_AT45DB011B:
14392 	case FLASH_57780VENDOR_ATMEL_AT45DB021D:
14393 	case FLASH_57780VENDOR_ATMEL_AT45DB021B:
14394 	case FLASH_57780VENDOR_ATMEL_AT45DB041D:
14395 	case FLASH_57780VENDOR_ATMEL_AT45DB041B:
14396 		tp->nvram_jedecnum = JEDEC_ATMEL;
14397 		tg3_flag_set(tp, NVRAM_BUFFERED);
14398 		tg3_flag_set(tp, FLASH);
14399 
14400 		switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14401 		case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14402 		case FLASH_57780VENDOR_ATMEL_AT45DB011D:
14403 		case FLASH_57780VENDOR_ATMEL_AT45DB011B:
14404 			tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14405 			break;
14406 		case FLASH_57780VENDOR_ATMEL_AT45DB021D:
14407 		case FLASH_57780VENDOR_ATMEL_AT45DB021B:
14408 			tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14409 			break;
14410 		case FLASH_57780VENDOR_ATMEL_AT45DB041D:
14411 		case FLASH_57780VENDOR_ATMEL_AT45DB041B:
14412 			tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14413 			break;
14414 		}
14415 		break;
14416 	case FLASH_5752VENDOR_ST_M45PE10:
14417 	case FLASH_5752VENDOR_ST_M45PE20:
14418 	case FLASH_5752VENDOR_ST_M45PE40:
14419 		tp->nvram_jedecnum = JEDEC_ST;
14420 		tg3_flag_set(tp, NVRAM_BUFFERED);
14421 		tg3_flag_set(tp, FLASH);
14422 
14423 		switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14424 		case FLASH_5752VENDOR_ST_M45PE10:
14425 			tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14426 			break;
14427 		case FLASH_5752VENDOR_ST_M45PE20:
14428 			tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14429 			break;
14430 		case FLASH_5752VENDOR_ST_M45PE40:
14431 			tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14432 			break;
14433 		}
14434 		break;
14435 	default:
14436 		tg3_flag_set(tp, NO_NVRAM);
14437 		return;
14438 	}
14439 
14440 	tg3_nvram_get_pagesize(tp, nvcfg1);
14441 	if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14442 		tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14443 }
14444 
14445 
14446 static void tg3_get_5717_nvram_info(struct tg3 *tp)
14447 {
14448 	u32 nvcfg1;
14449 
14450 	nvcfg1 = tr32(NVRAM_CFG1);
14451 
14452 	switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14453 	case FLASH_5717VENDOR_ATMEL_EEPROM:
14454 	case FLASH_5717VENDOR_MICRO_EEPROM:
14455 		tp->nvram_jedecnum = JEDEC_ATMEL;
14456 		tg3_flag_set(tp, NVRAM_BUFFERED);
14457 		tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14458 
14459 		nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14460 		tw32(NVRAM_CFG1, nvcfg1);
14461 		return;
14462 	case FLASH_5717VENDOR_ATMEL_MDB011D:
14463 	case FLASH_5717VENDOR_ATMEL_ADB011B:
14464 	case FLASH_5717VENDOR_ATMEL_ADB011D:
14465 	case FLASH_5717VENDOR_ATMEL_MDB021D:
14466 	case FLASH_5717VENDOR_ATMEL_ADB021B:
14467 	case FLASH_5717VENDOR_ATMEL_ADB021D:
14468 	case FLASH_5717VENDOR_ATMEL_45USPT:
14469 		tp->nvram_jedecnum = JEDEC_ATMEL;
14470 		tg3_flag_set(tp, NVRAM_BUFFERED);
14471 		tg3_flag_set(tp, FLASH);
14472 
14473 		switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14474 		case FLASH_5717VENDOR_ATMEL_MDB021D:
14475 			/* Detect size with tg3_nvram_get_size() */
14476 			break;
14477 		case FLASH_5717VENDOR_ATMEL_ADB021B:
14478 		case FLASH_5717VENDOR_ATMEL_ADB021D:
14479 			tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14480 			break;
14481 		default:
14482 			tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14483 			break;
14484 		}
14485 		break;
14486 	case FLASH_5717VENDOR_ST_M_M25PE10:
14487 	case FLASH_5717VENDOR_ST_A_M25PE10:
14488 	case FLASH_5717VENDOR_ST_M_M45PE10:
14489 	case FLASH_5717VENDOR_ST_A_M45PE10:
14490 	case FLASH_5717VENDOR_ST_M_M25PE20:
14491 	case FLASH_5717VENDOR_ST_A_M25PE20:
14492 	case FLASH_5717VENDOR_ST_M_M45PE20:
14493 	case FLASH_5717VENDOR_ST_A_M45PE20:
14494 	case FLASH_5717VENDOR_ST_25USPT:
14495 	case FLASH_5717VENDOR_ST_45USPT:
14496 		tp->nvram_jedecnum = JEDEC_ST;
14497 		tg3_flag_set(tp, NVRAM_BUFFERED);
14498 		tg3_flag_set(tp, FLASH);
14499 
14500 		switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14501 		case FLASH_5717VENDOR_ST_M_M25PE20:
14502 		case FLASH_5717VENDOR_ST_M_M45PE20:
14503 			/* Detect size with tg3_nvram_get_size() */
14504 			break;
14505 		case FLASH_5717VENDOR_ST_A_M25PE20:
14506 		case FLASH_5717VENDOR_ST_A_M45PE20:
14507 			tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14508 			break;
14509 		default:
14510 			tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14511 			break;
14512 		}
14513 		break;
14514 	default:
14515 		tg3_flag_set(tp, NO_NVRAM);
14516 		return;
14517 	}
14518 
14519 	tg3_nvram_get_pagesize(tp, nvcfg1);
14520 	if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14521 		tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14522 }
14523 
14524 static void tg3_get_5720_nvram_info(struct tg3 *tp)
14525 {
14526 	u32 nvcfg1, nvmpinstrp;
14527 
14528 	nvcfg1 = tr32(NVRAM_CFG1);
14529 	nvmpinstrp = nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK;
14530 
14531 	if (tg3_asic_rev(tp) == ASIC_REV_5762) {
14532 		if (!(nvcfg1 & NVRAM_CFG1_5762VENDOR_MASK)) {
14533 			tg3_flag_set(tp, NO_NVRAM);
14534 			return;
14535 		}
14536 
14537 		switch (nvmpinstrp) {
14538 		case FLASH_5762_EEPROM_HD:
14539 			nvmpinstrp = FLASH_5720_EEPROM_HD;
14540 			break;
14541 		case FLASH_5762_EEPROM_LD:
14542 			nvmpinstrp = FLASH_5720_EEPROM_LD;
14543 			break;
14544 		case FLASH_5720VENDOR_M_ST_M45PE20:
14545 			/* This pinstrap supports multiple sizes, so force it
14546 			 * to read the actual size from location 0xf0.
14547 			 */
14548 			nvmpinstrp = FLASH_5720VENDOR_ST_45USPT;
14549 			break;
14550 		}
14551 	}
14552 
14553 	switch (nvmpinstrp) {
14554 	case FLASH_5720_EEPROM_HD:
14555 	case FLASH_5720_EEPROM_LD:
14556 		tp->nvram_jedecnum = JEDEC_ATMEL;
14557 		tg3_flag_set(tp, NVRAM_BUFFERED);
14558 
14559 		nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14560 		tw32(NVRAM_CFG1, nvcfg1);
14561 		if (nvmpinstrp == FLASH_5720_EEPROM_HD)
14562 			tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14563 		else
14564 			tp->nvram_pagesize = ATMEL_AT24C02_CHIP_SIZE;
14565 		return;
14566 	case FLASH_5720VENDOR_M_ATMEL_DB011D:
14567 	case FLASH_5720VENDOR_A_ATMEL_DB011B:
14568 	case FLASH_5720VENDOR_A_ATMEL_DB011D:
14569 	case FLASH_5720VENDOR_M_ATMEL_DB021D:
14570 	case FLASH_5720VENDOR_A_ATMEL_DB021B:
14571 	case FLASH_5720VENDOR_A_ATMEL_DB021D:
14572 	case FLASH_5720VENDOR_M_ATMEL_DB041D:
14573 	case FLASH_5720VENDOR_A_ATMEL_DB041B:
14574 	case FLASH_5720VENDOR_A_ATMEL_DB041D:
14575 	case FLASH_5720VENDOR_M_ATMEL_DB081D:
14576 	case FLASH_5720VENDOR_A_ATMEL_DB081D:
14577 	case FLASH_5720VENDOR_ATMEL_45USPT:
14578 		tp->nvram_jedecnum = JEDEC_ATMEL;
14579 		tg3_flag_set(tp, NVRAM_BUFFERED);
14580 		tg3_flag_set(tp, FLASH);
14581 
14582 		switch (nvmpinstrp) {
14583 		case FLASH_5720VENDOR_M_ATMEL_DB021D:
14584 		case FLASH_5720VENDOR_A_ATMEL_DB021B:
14585 		case FLASH_5720VENDOR_A_ATMEL_DB021D:
14586 			tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14587 			break;
14588 		case FLASH_5720VENDOR_M_ATMEL_DB041D:
14589 		case FLASH_5720VENDOR_A_ATMEL_DB041B:
14590 		case FLASH_5720VENDOR_A_ATMEL_DB041D:
14591 			tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14592 			break;
14593 		case FLASH_5720VENDOR_M_ATMEL_DB081D:
14594 		case FLASH_5720VENDOR_A_ATMEL_DB081D:
14595 			tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14596 			break;
14597 		default:
14598 			if (tg3_asic_rev(tp) != ASIC_REV_5762)
14599 				tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14600 			break;
14601 		}
14602 		break;
14603 	case FLASH_5720VENDOR_M_ST_M25PE10:
14604 	case FLASH_5720VENDOR_M_ST_M45PE10:
14605 	case FLASH_5720VENDOR_A_ST_M25PE10:
14606 	case FLASH_5720VENDOR_A_ST_M45PE10:
14607 	case FLASH_5720VENDOR_M_ST_M25PE20:
14608 	case FLASH_5720VENDOR_M_ST_M45PE20:
14609 	case FLASH_5720VENDOR_A_ST_M25PE20:
14610 	case FLASH_5720VENDOR_A_ST_M45PE20:
14611 	case FLASH_5720VENDOR_M_ST_M25PE40:
14612 	case FLASH_5720VENDOR_M_ST_M45PE40:
14613 	case FLASH_5720VENDOR_A_ST_M25PE40:
14614 	case FLASH_5720VENDOR_A_ST_M45PE40:
14615 	case FLASH_5720VENDOR_M_ST_M25PE80:
14616 	case FLASH_5720VENDOR_M_ST_M45PE80:
14617 	case FLASH_5720VENDOR_A_ST_M25PE80:
14618 	case FLASH_5720VENDOR_A_ST_M45PE80:
14619 	case FLASH_5720VENDOR_ST_25USPT:
14620 	case FLASH_5720VENDOR_ST_45USPT:
14621 		tp->nvram_jedecnum = JEDEC_ST;
14622 		tg3_flag_set(tp, NVRAM_BUFFERED);
14623 		tg3_flag_set(tp, FLASH);
14624 
14625 		switch (nvmpinstrp) {
14626 		case FLASH_5720VENDOR_M_ST_M25PE20:
14627 		case FLASH_5720VENDOR_M_ST_M45PE20:
14628 		case FLASH_5720VENDOR_A_ST_M25PE20:
14629 		case FLASH_5720VENDOR_A_ST_M45PE20:
14630 			tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14631 			break;
14632 		case FLASH_5720VENDOR_M_ST_M25PE40:
14633 		case FLASH_5720VENDOR_M_ST_M45PE40:
14634 		case FLASH_5720VENDOR_A_ST_M25PE40:
14635 		case FLASH_5720VENDOR_A_ST_M45PE40:
14636 			tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14637 			break;
14638 		case FLASH_5720VENDOR_M_ST_M25PE80:
14639 		case FLASH_5720VENDOR_M_ST_M45PE80:
14640 		case FLASH_5720VENDOR_A_ST_M25PE80:
14641 		case FLASH_5720VENDOR_A_ST_M45PE80:
14642 			tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14643 			break;
14644 		default:
14645 			if (tg3_asic_rev(tp) != ASIC_REV_5762)
14646 				tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14647 			break;
14648 		}
14649 		break;
14650 	default:
14651 		tg3_flag_set(tp, NO_NVRAM);
14652 		return;
14653 	}
14654 
14655 	tg3_nvram_get_pagesize(tp, nvcfg1);
14656 	if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14657 		tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14658 
14659 	if (tg3_asic_rev(tp) == ASIC_REV_5762) {
14660 		u32 val;
14661 
14662 		if (tg3_nvram_read(tp, 0, &val))
14663 			return;
14664 
14665 		if (val != TG3_EEPROM_MAGIC &&
14666 		    (val & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW)
14667 			tg3_flag_set(tp, NO_NVRAM);
14668 	}
14669 }
14670 
14671 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
14672 static void tg3_nvram_init(struct tg3 *tp)
14673 {
14674 	if (tg3_flag(tp, IS_SSB_CORE)) {
14675 		/* No NVRAM and EEPROM on the SSB Broadcom GigE core. */
14676 		tg3_flag_clear(tp, NVRAM);
14677 		tg3_flag_clear(tp, NVRAM_BUFFERED);
14678 		tg3_flag_set(tp, NO_NVRAM);
14679 		return;
14680 	}
14681 
14682 	tw32_f(GRC_EEPROM_ADDR,
14683 	     (EEPROM_ADDR_FSM_RESET |
14684 	      (EEPROM_DEFAULT_CLOCK_PERIOD <<
14685 	       EEPROM_ADDR_CLKPERD_SHIFT)));
14686 
14687 	msleep(1);
14688 
14689 	/* Enable seeprom accesses. */
14690 	tw32_f(GRC_LOCAL_CTRL,
14691 	     tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
14692 	udelay(100);
14693 
14694 	if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
14695 	    tg3_asic_rev(tp) != ASIC_REV_5701) {
14696 		tg3_flag_set(tp, NVRAM);
14697 
14698 		if (tg3_nvram_lock(tp)) {
14699 			netdev_warn(tp->dev,
14700 				    "Cannot get nvram lock, %s failed\n",
14701 				    __func__);
14702 			return;
14703 		}
14704 		tg3_enable_nvram_access(tp);
14705 
14706 		tp->nvram_size = 0;
14707 
14708 		if (tg3_asic_rev(tp) == ASIC_REV_5752)
14709 			tg3_get_5752_nvram_info(tp);
14710 		else if (tg3_asic_rev(tp) == ASIC_REV_5755)
14711 			tg3_get_5755_nvram_info(tp);
14712 		else if (tg3_asic_rev(tp) == ASIC_REV_5787 ||
14713 			 tg3_asic_rev(tp) == ASIC_REV_5784 ||
14714 			 tg3_asic_rev(tp) == ASIC_REV_5785)
14715 			tg3_get_5787_nvram_info(tp);
14716 		else if (tg3_asic_rev(tp) == ASIC_REV_5761)
14717 			tg3_get_5761_nvram_info(tp);
14718 		else if (tg3_asic_rev(tp) == ASIC_REV_5906)
14719 			tg3_get_5906_nvram_info(tp);
14720 		else if (tg3_asic_rev(tp) == ASIC_REV_57780 ||
14721 			 tg3_flag(tp, 57765_CLASS))
14722 			tg3_get_57780_nvram_info(tp);
14723 		else if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
14724 			 tg3_asic_rev(tp) == ASIC_REV_5719)
14725 			tg3_get_5717_nvram_info(tp);
14726 		else if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
14727 			 tg3_asic_rev(tp) == ASIC_REV_5762)
14728 			tg3_get_5720_nvram_info(tp);
14729 		else
14730 			tg3_get_nvram_info(tp);
14731 
14732 		if (tp->nvram_size == 0)
14733 			tg3_get_nvram_size(tp);
14734 
14735 		tg3_disable_nvram_access(tp);
14736 		tg3_nvram_unlock(tp);
14737 
14738 	} else {
14739 		tg3_flag_clear(tp, NVRAM);
14740 		tg3_flag_clear(tp, NVRAM_BUFFERED);
14741 
14742 		tg3_get_eeprom_size(tp);
14743 	}
14744 }
14745 
14746 struct subsys_tbl_ent {
14747 	u16 subsys_vendor, subsys_devid;
14748 	u32 phy_id;
14749 };
14750 
14751 static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
14752 	/* Broadcom boards. */
14753 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
14754 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6, TG3_PHY_ID_BCM5401 },
14755 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
14756 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5, TG3_PHY_ID_BCM5701 },
14757 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
14758 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6, TG3_PHY_ID_BCM8002 },
14759 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
14760 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9, 0 },
14761 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
14762 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1, TG3_PHY_ID_BCM5701 },
14763 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
14764 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8, TG3_PHY_ID_BCM5701 },
14765 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
14766 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7, 0 },
14767 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
14768 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10, TG3_PHY_ID_BCM5701 },
14769 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
14770 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12, TG3_PHY_ID_BCM5701 },
14771 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
14772 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1, TG3_PHY_ID_BCM5703 },
14773 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
14774 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2, TG3_PHY_ID_BCM5703 },
14775 
14776 	/* 3com boards. */
14777 	{ TG3PCI_SUBVENDOR_ID_3COM,
14778 	  TG3PCI_SUBDEVICE_ID_3COM_3C996T, TG3_PHY_ID_BCM5401 },
14779 	{ TG3PCI_SUBVENDOR_ID_3COM,
14780 	  TG3PCI_SUBDEVICE_ID_3COM_3C996BT, TG3_PHY_ID_BCM5701 },
14781 	{ TG3PCI_SUBVENDOR_ID_3COM,
14782 	  TG3PCI_SUBDEVICE_ID_3COM_3C996SX, 0 },
14783 	{ TG3PCI_SUBVENDOR_ID_3COM,
14784 	  TG3PCI_SUBDEVICE_ID_3COM_3C1000T, TG3_PHY_ID_BCM5701 },
14785 	{ TG3PCI_SUBVENDOR_ID_3COM,
14786 	  TG3PCI_SUBDEVICE_ID_3COM_3C940BR01, TG3_PHY_ID_BCM5701 },
14787 
14788 	/* DELL boards. */
14789 	{ TG3PCI_SUBVENDOR_ID_DELL,
14790 	  TG3PCI_SUBDEVICE_ID_DELL_VIPER, TG3_PHY_ID_BCM5401 },
14791 	{ TG3PCI_SUBVENDOR_ID_DELL,
14792 	  TG3PCI_SUBDEVICE_ID_DELL_JAGUAR, TG3_PHY_ID_BCM5401 },
14793 	{ TG3PCI_SUBVENDOR_ID_DELL,
14794 	  TG3PCI_SUBDEVICE_ID_DELL_MERLOT, TG3_PHY_ID_BCM5411 },
14795 	{ TG3PCI_SUBVENDOR_ID_DELL,
14796 	  TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT, TG3_PHY_ID_BCM5411 },
14797 
14798 	/* Compaq boards. */
14799 	{ TG3PCI_SUBVENDOR_ID_COMPAQ,
14800 	  TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE, TG3_PHY_ID_BCM5701 },
14801 	{ TG3PCI_SUBVENDOR_ID_COMPAQ,
14802 	  TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2, TG3_PHY_ID_BCM5701 },
14803 	{ TG3PCI_SUBVENDOR_ID_COMPAQ,
14804 	  TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING, 0 },
14805 	{ TG3PCI_SUBVENDOR_ID_COMPAQ,
14806 	  TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780, TG3_PHY_ID_BCM5701 },
14807 	{ TG3PCI_SUBVENDOR_ID_COMPAQ,
14808 	  TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2, TG3_PHY_ID_BCM5701 },
14809 
14810 	/* IBM boards. */
14811 	{ TG3PCI_SUBVENDOR_ID_IBM,
14812 	  TG3PCI_SUBDEVICE_ID_IBM_5703SAX2, 0 }
14813 };
14814 
14815 static struct subsys_tbl_ent *tg3_lookup_by_subsys(struct tg3 *tp)
14816 {
14817 	int i;
14818 
14819 	for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
14820 		if ((subsys_id_to_phy_id[i].subsys_vendor ==
14821 		     tp->pdev->subsystem_vendor) &&
14822 		    (subsys_id_to_phy_id[i].subsys_devid ==
14823 		     tp->pdev->subsystem_device))
14824 			return &subsys_id_to_phy_id[i];
14825 	}
14826 	return NULL;
14827 }
14828 
14829 static void tg3_get_eeprom_hw_cfg(struct tg3 *tp)
14830 {
14831 	u32 val;
14832 
14833 	tp->phy_id = TG3_PHY_ID_INVALID;
14834 	tp->led_ctrl = LED_CTRL_MODE_PHY_1;
14835 
14836 	/* Assume an onboard device and WOL capable by default.  */
14837 	tg3_flag_set(tp, EEPROM_WRITE_PROT);
14838 	tg3_flag_set(tp, WOL_CAP);
14839 
14840 	if (tg3_asic_rev(tp) == ASIC_REV_5906) {
14841 		if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
14842 			tg3_flag_clear(tp, EEPROM_WRITE_PROT);
14843 			tg3_flag_set(tp, IS_NIC);
14844 		}
14845 		val = tr32(VCPU_CFGSHDW);
14846 		if (val & VCPU_CFGSHDW_ASPM_DBNC)
14847 			tg3_flag_set(tp, ASPM_WORKAROUND);
14848 		if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
14849 		    (val & VCPU_CFGSHDW_WOL_MAGPKT)) {
14850 			tg3_flag_set(tp, WOL_ENABLE);
14851 			device_set_wakeup_enable(&tp->pdev->dev, true);
14852 		}
14853 		goto done;
14854 	}
14855 
14856 	tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
14857 	if (val == NIC_SRAM_DATA_SIG_MAGIC) {
14858 		u32 nic_cfg, led_cfg;
14859 		u32 nic_phy_id, ver, cfg2 = 0, cfg4 = 0, eeprom_phy_id;
14860 		int eeprom_phy_serdes = 0;
14861 
14862 		tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
14863 		tp->nic_sram_data_cfg = nic_cfg;
14864 
14865 		tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
14866 		ver >>= NIC_SRAM_DATA_VER_SHIFT;
14867 		if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
14868 		    tg3_asic_rev(tp) != ASIC_REV_5701 &&
14869 		    tg3_asic_rev(tp) != ASIC_REV_5703 &&
14870 		    (ver > 0) && (ver < 0x100))
14871 			tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
14872 
14873 		if (tg3_asic_rev(tp) == ASIC_REV_5785)
14874 			tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
14875 
14876 		if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
14877 		    NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
14878 			eeprom_phy_serdes = 1;
14879 
14880 		tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
14881 		if (nic_phy_id != 0) {
14882 			u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
14883 			u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
14884 
14885 			eeprom_phy_id  = (id1 >> 16) << 10;
14886 			eeprom_phy_id |= (id2 & 0xfc00) << 16;
14887 			eeprom_phy_id |= (id2 & 0x03ff) <<  0;
14888 		} else
14889 			eeprom_phy_id = 0;
14890 
14891 		tp->phy_id = eeprom_phy_id;
14892 		if (eeprom_phy_serdes) {
14893 			if (!tg3_flag(tp, 5705_PLUS))
14894 				tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
14895 			else
14896 				tp->phy_flags |= TG3_PHYFLG_MII_SERDES;
14897 		}
14898 
14899 		if (tg3_flag(tp, 5750_PLUS))
14900 			led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
14901 				    SHASTA_EXT_LED_MODE_MASK);
14902 		else
14903 			led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
14904 
14905 		switch (led_cfg) {
14906 		default:
14907 		case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
14908 			tp->led_ctrl = LED_CTRL_MODE_PHY_1;
14909 			break;
14910 
14911 		case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
14912 			tp->led_ctrl = LED_CTRL_MODE_PHY_2;
14913 			break;
14914 
14915 		case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
14916 			tp->led_ctrl = LED_CTRL_MODE_MAC;
14917 
14918 			/* Default to PHY_1_MODE if 0 (MAC_MODE) is
14919 			 * read on some older 5700/5701 bootcode.
14920 			 */
14921 			if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
14922 			    tg3_asic_rev(tp) == ASIC_REV_5701)
14923 				tp->led_ctrl = LED_CTRL_MODE_PHY_1;
14924 
14925 			break;
14926 
14927 		case SHASTA_EXT_LED_SHARED:
14928 			tp->led_ctrl = LED_CTRL_MODE_SHARED;
14929 			if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0 &&
14930 			    tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A1)
14931 				tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
14932 						 LED_CTRL_MODE_PHY_2);
14933 
14934 			if (tg3_flag(tp, 5717_PLUS) ||
14935 			    tg3_asic_rev(tp) == ASIC_REV_5762)
14936 				tp->led_ctrl |= LED_CTRL_BLINK_RATE_OVERRIDE |
14937 						LED_CTRL_BLINK_RATE_MASK;
14938 
14939 			break;
14940 
14941 		case SHASTA_EXT_LED_MAC:
14942 			tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
14943 			break;
14944 
14945 		case SHASTA_EXT_LED_COMBO:
14946 			tp->led_ctrl = LED_CTRL_MODE_COMBO;
14947 			if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0)
14948 				tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
14949 						 LED_CTRL_MODE_PHY_2);
14950 			break;
14951 
14952 		}
14953 
14954 		if ((tg3_asic_rev(tp) == ASIC_REV_5700 ||
14955 		     tg3_asic_rev(tp) == ASIC_REV_5701) &&
14956 		    tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
14957 			tp->led_ctrl = LED_CTRL_MODE_PHY_2;
14958 
14959 		if (tg3_chip_rev(tp) == CHIPREV_5784_AX)
14960 			tp->led_ctrl = LED_CTRL_MODE_PHY_1;
14961 
14962 		if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
14963 			tg3_flag_set(tp, EEPROM_WRITE_PROT);
14964 			if ((tp->pdev->subsystem_vendor ==
14965 			     PCI_VENDOR_ID_ARIMA) &&
14966 			    (tp->pdev->subsystem_device == 0x205a ||
14967 			     tp->pdev->subsystem_device == 0x2063))
14968 				tg3_flag_clear(tp, EEPROM_WRITE_PROT);
14969 		} else {
14970 			tg3_flag_clear(tp, EEPROM_WRITE_PROT);
14971 			tg3_flag_set(tp, IS_NIC);
14972 		}
14973 
14974 		if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
14975 			tg3_flag_set(tp, ENABLE_ASF);
14976 			if (tg3_flag(tp, 5750_PLUS))
14977 				tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
14978 		}
14979 
14980 		if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
14981 		    tg3_flag(tp, 5750_PLUS))
14982 			tg3_flag_set(tp, ENABLE_APE);
14983 
14984 		if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES &&
14985 		    !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
14986 			tg3_flag_clear(tp, WOL_CAP);
14987 
14988 		if (tg3_flag(tp, WOL_CAP) &&
14989 		    (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)) {
14990 			tg3_flag_set(tp, WOL_ENABLE);
14991 			device_set_wakeup_enable(&tp->pdev->dev, true);
14992 		}
14993 
14994 		if (cfg2 & (1 << 17))
14995 			tp->phy_flags |= TG3_PHYFLG_CAPACITIVE_COUPLING;
14996 
14997 		/* serdes signal pre-emphasis in register 0x590 set by */
14998 		/* bootcode if bit 18 is set */
14999 		if (cfg2 & (1 << 18))
15000 			tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS;
15001 
15002 		if ((tg3_flag(tp, 57765_PLUS) ||
15003 		     (tg3_asic_rev(tp) == ASIC_REV_5784 &&
15004 		      tg3_chip_rev(tp) != CHIPREV_5784_AX)) &&
15005 		    (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
15006 			tp->phy_flags |= TG3_PHYFLG_ENABLE_APD;
15007 
15008 		if (tg3_flag(tp, PCI_EXPRESS)) {
15009 			u32 cfg3;
15010 
15011 			tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
15012 			if (tg3_asic_rev(tp) != ASIC_REV_5785 &&
15013 			    !tg3_flag(tp, 57765_PLUS) &&
15014 			    (cfg3 & NIC_SRAM_ASPM_DEBOUNCE))
15015 				tg3_flag_set(tp, ASPM_WORKAROUND);
15016 			if (cfg3 & NIC_SRAM_LNK_FLAP_AVOID)
15017 				tp->phy_flags |= TG3_PHYFLG_KEEP_LINK_ON_PWRDN;
15018 			if (cfg3 & NIC_SRAM_1G_ON_VAUX_OK)
15019 				tp->phy_flags |= TG3_PHYFLG_1G_ON_VAUX_OK;
15020 		}
15021 
15022 		if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE)
15023 			tg3_flag_set(tp, RGMII_INBAND_DISABLE);
15024 		if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
15025 			tg3_flag_set(tp, RGMII_EXT_IBND_RX_EN);
15026 		if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
15027 			tg3_flag_set(tp, RGMII_EXT_IBND_TX_EN);
15028 	}
15029 done:
15030 	if (tg3_flag(tp, WOL_CAP))
15031 		device_set_wakeup_enable(&tp->pdev->dev,
15032 					 tg3_flag(tp, WOL_ENABLE));
15033 	else
15034 		device_set_wakeup_capable(&tp->pdev->dev, false);
15035 }
15036 
15037 static int tg3_ape_otp_read(struct tg3 *tp, u32 offset, u32 *val)
15038 {
15039 	int i, err;
15040 	u32 val2, off = offset * 8;
15041 
15042 	err = tg3_nvram_lock(tp);
15043 	if (err)
15044 		return err;
15045 
15046 	tg3_ape_write32(tp, TG3_APE_OTP_ADDR, off | APE_OTP_ADDR_CPU_ENABLE);
15047 	tg3_ape_write32(tp, TG3_APE_OTP_CTRL, APE_OTP_CTRL_PROG_EN |
15048 			APE_OTP_CTRL_CMD_RD | APE_OTP_CTRL_START);
15049 	tg3_ape_read32(tp, TG3_APE_OTP_CTRL);
15050 	udelay(10);
15051 
15052 	for (i = 0; i < 100; i++) {
15053 		val2 = tg3_ape_read32(tp, TG3_APE_OTP_STATUS);
15054 		if (val2 & APE_OTP_STATUS_CMD_DONE) {
15055 			*val = tg3_ape_read32(tp, TG3_APE_OTP_RD_DATA);
15056 			break;
15057 		}
15058 		udelay(10);
15059 	}
15060 
15061 	tg3_ape_write32(tp, TG3_APE_OTP_CTRL, 0);
15062 
15063 	tg3_nvram_unlock(tp);
15064 	if (val2 & APE_OTP_STATUS_CMD_DONE)
15065 		return 0;
15066 
15067 	return -EBUSY;
15068 }
15069 
15070 static int tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
15071 {
15072 	int i;
15073 	u32 val;
15074 
15075 	tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
15076 	tw32(OTP_CTRL, cmd);
15077 
15078 	/* Wait for up to 1 ms for command to execute. */
15079 	for (i = 0; i < 100; i++) {
15080 		val = tr32(OTP_STATUS);
15081 		if (val & OTP_STATUS_CMD_DONE)
15082 			break;
15083 		udelay(10);
15084 	}
15085 
15086 	return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
15087 }
15088 
15089 /* Read the gphy configuration from the OTP region of the chip.  The gphy
15090  * configuration is a 32-bit value that straddles the alignment boundary.
15091  * We do two 32-bit reads and then shift and merge the results.
15092  */
15093 static u32 tg3_read_otp_phycfg(struct tg3 *tp)
15094 {
15095 	u32 bhalf_otp, thalf_otp;
15096 
15097 	tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
15098 
15099 	if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
15100 		return 0;
15101 
15102 	tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
15103 
15104 	if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
15105 		return 0;
15106 
15107 	thalf_otp = tr32(OTP_READ_DATA);
15108 
15109 	tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
15110 
15111 	if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
15112 		return 0;
15113 
15114 	bhalf_otp = tr32(OTP_READ_DATA);
15115 
15116 	return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
15117 }
15118 
15119 static void tg3_phy_init_link_config(struct tg3 *tp)
15120 {
15121 	u32 adv = ADVERTISED_Autoneg;
15122 
15123 	if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
15124 		adv |= ADVERTISED_1000baseT_Half |
15125 		       ADVERTISED_1000baseT_Full;
15126 
15127 	if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
15128 		adv |= ADVERTISED_100baseT_Half |
15129 		       ADVERTISED_100baseT_Full |
15130 		       ADVERTISED_10baseT_Half |
15131 		       ADVERTISED_10baseT_Full |
15132 		       ADVERTISED_TP;
15133 	else
15134 		adv |= ADVERTISED_FIBRE;
15135 
15136 	tp->link_config.advertising = adv;
15137 	tp->link_config.speed = SPEED_UNKNOWN;
15138 	tp->link_config.duplex = DUPLEX_UNKNOWN;
15139 	tp->link_config.autoneg = AUTONEG_ENABLE;
15140 	tp->link_config.active_speed = SPEED_UNKNOWN;
15141 	tp->link_config.active_duplex = DUPLEX_UNKNOWN;
15142 
15143 	tp->old_link = -1;
15144 }
15145 
15146 static int tg3_phy_probe(struct tg3 *tp)
15147 {
15148 	u32 hw_phy_id_1, hw_phy_id_2;
15149 	u32 hw_phy_id, hw_phy_id_masked;
15150 	int err;
15151 
15152 	/* flow control autonegotiation is default behavior */
15153 	tg3_flag_set(tp, PAUSE_AUTONEG);
15154 	tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
15155 
15156 	if (tg3_flag(tp, ENABLE_APE)) {
15157 		switch (tp->pci_fn) {
15158 		case 0:
15159 			tp->phy_ape_lock = TG3_APE_LOCK_PHY0;
15160 			break;
15161 		case 1:
15162 			tp->phy_ape_lock = TG3_APE_LOCK_PHY1;
15163 			break;
15164 		case 2:
15165 			tp->phy_ape_lock = TG3_APE_LOCK_PHY2;
15166 			break;
15167 		case 3:
15168 			tp->phy_ape_lock = TG3_APE_LOCK_PHY3;
15169 			break;
15170 		}
15171 	}
15172 
15173 	if (!tg3_flag(tp, ENABLE_ASF) &&
15174 	    !(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
15175 	    !(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
15176 		tp->phy_flags &= ~(TG3_PHYFLG_1G_ON_VAUX_OK |
15177 				   TG3_PHYFLG_KEEP_LINK_ON_PWRDN);
15178 
15179 	if (tg3_flag(tp, USE_PHYLIB))
15180 		return tg3_phy_init(tp);
15181 
15182 	/* Reading the PHY ID register can conflict with ASF
15183 	 * firmware access to the PHY hardware.
15184 	 */
15185 	err = 0;
15186 	if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)) {
15187 		hw_phy_id = hw_phy_id_masked = TG3_PHY_ID_INVALID;
15188 	} else {
15189 		/* Now read the physical PHY_ID from the chip and verify
15190 		 * that it is sane.  If it doesn't look good, we fall back
15191 		 * to either the hard-coded table based PHY_ID and failing
15192 		 * that the value found in the eeprom area.
15193 		 */
15194 		err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
15195 		err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
15196 
15197 		hw_phy_id  = (hw_phy_id_1 & 0xffff) << 10;
15198 		hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
15199 		hw_phy_id |= (hw_phy_id_2 & 0x03ff) <<  0;
15200 
15201 		hw_phy_id_masked = hw_phy_id & TG3_PHY_ID_MASK;
15202 	}
15203 
15204 	if (!err && TG3_KNOWN_PHY_ID(hw_phy_id_masked)) {
15205 		tp->phy_id = hw_phy_id;
15206 		if (hw_phy_id_masked == TG3_PHY_ID_BCM8002)
15207 			tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
15208 		else
15209 			tp->phy_flags &= ~TG3_PHYFLG_PHY_SERDES;
15210 	} else {
15211 		if (tp->phy_id != TG3_PHY_ID_INVALID) {
15212 			/* Do nothing, phy ID already set up in
15213 			 * tg3_get_eeprom_hw_cfg().
15214 			 */
15215 		} else {
15216 			struct subsys_tbl_ent *p;
15217 
15218 			/* No eeprom signature?  Try the hardcoded
15219 			 * subsys device table.
15220 			 */
15221 			p = tg3_lookup_by_subsys(tp);
15222 			if (p) {
15223 				tp->phy_id = p->phy_id;
15224 			} else if (!tg3_flag(tp, IS_SSB_CORE)) {
15225 				/* For now we saw the IDs 0xbc050cd0,
15226 				 * 0xbc050f80 and 0xbc050c30 on devices
15227 				 * connected to an BCM4785 and there are
15228 				 * probably more. Just assume that the phy is
15229 				 * supported when it is connected to a SSB core
15230 				 * for now.
15231 				 */
15232 				return -ENODEV;
15233 			}
15234 
15235 			if (!tp->phy_id ||
15236 			    tp->phy_id == TG3_PHY_ID_BCM8002)
15237 				tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
15238 		}
15239 	}
15240 
15241 	if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
15242 	    (tg3_asic_rev(tp) == ASIC_REV_5719 ||
15243 	     tg3_asic_rev(tp) == ASIC_REV_5720 ||
15244 	     tg3_asic_rev(tp) == ASIC_REV_57766 ||
15245 	     tg3_asic_rev(tp) == ASIC_REV_5762 ||
15246 	     (tg3_asic_rev(tp) == ASIC_REV_5717 &&
15247 	      tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0) ||
15248 	     (tg3_asic_rev(tp) == ASIC_REV_57765 &&
15249 	      tg3_chip_rev_id(tp) != CHIPREV_ID_57765_A0))) {
15250 		tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
15251 
15252 		tp->eee.supported = SUPPORTED_100baseT_Full |
15253 				    SUPPORTED_1000baseT_Full;
15254 		tp->eee.advertised = ADVERTISED_100baseT_Full |
15255 				     ADVERTISED_1000baseT_Full;
15256 		tp->eee.eee_enabled = 1;
15257 		tp->eee.tx_lpi_enabled = 1;
15258 		tp->eee.tx_lpi_timer = TG3_CPMU_DBTMR1_LNKIDLE_2047US;
15259 	}
15260 
15261 	tg3_phy_init_link_config(tp);
15262 
15263 	if (!(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
15264 	    !(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
15265 	    !tg3_flag(tp, ENABLE_APE) &&
15266 	    !tg3_flag(tp, ENABLE_ASF)) {
15267 		u32 bmsr, dummy;
15268 
15269 		tg3_readphy(tp, MII_BMSR, &bmsr);
15270 		if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
15271 		    (bmsr & BMSR_LSTATUS))
15272 			goto skip_phy_reset;
15273 
15274 		err = tg3_phy_reset(tp);
15275 		if (err)
15276 			return err;
15277 
15278 		tg3_phy_set_wirespeed(tp);
15279 
15280 		if (!tg3_phy_copper_an_config_ok(tp, &dummy)) {
15281 			tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
15282 					    tp->link_config.flowctrl);
15283 
15284 			tg3_writephy(tp, MII_BMCR,
15285 				     BMCR_ANENABLE | BMCR_ANRESTART);
15286 		}
15287 	}
15288 
15289 skip_phy_reset:
15290 	if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
15291 		err = tg3_init_5401phy_dsp(tp);
15292 		if (err)
15293 			return err;
15294 
15295 		err = tg3_init_5401phy_dsp(tp);
15296 	}
15297 
15298 	return err;
15299 }
15300 
15301 static void tg3_read_vpd(struct tg3 *tp)
15302 {
15303 	u8 *vpd_data;
15304 	unsigned int block_end, rosize, len;
15305 	u32 vpdlen;
15306 	int j, i = 0;
15307 
15308 	vpd_data = (u8 *)tg3_vpd_readblock(tp, &vpdlen);
15309 	if (!vpd_data)
15310 		goto out_no_vpd;
15311 
15312 	i = pci_vpd_find_tag(vpd_data, 0, vpdlen, PCI_VPD_LRDT_RO_DATA);
15313 	if (i < 0)
15314 		goto out_not_found;
15315 
15316 	rosize = pci_vpd_lrdt_size(&vpd_data[i]);
15317 	block_end = i + PCI_VPD_LRDT_TAG_SIZE + rosize;
15318 	i += PCI_VPD_LRDT_TAG_SIZE;
15319 
15320 	if (block_end > vpdlen)
15321 		goto out_not_found;
15322 
15323 	j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
15324 				      PCI_VPD_RO_KEYWORD_MFR_ID);
15325 	if (j > 0) {
15326 		len = pci_vpd_info_field_size(&vpd_data[j]);
15327 
15328 		j += PCI_VPD_INFO_FLD_HDR_SIZE;
15329 		if (j + len > block_end || len != 4 ||
15330 		    memcmp(&vpd_data[j], "1028", 4))
15331 			goto partno;
15332 
15333 		j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
15334 					      PCI_VPD_RO_KEYWORD_VENDOR0);
15335 		if (j < 0)
15336 			goto partno;
15337 
15338 		len = pci_vpd_info_field_size(&vpd_data[j]);
15339 
15340 		j += PCI_VPD_INFO_FLD_HDR_SIZE;
15341 		if (j + len > block_end)
15342 			goto partno;
15343 
15344 		if (len >= sizeof(tp->fw_ver))
15345 			len = sizeof(tp->fw_ver) - 1;
15346 		memset(tp->fw_ver, 0, sizeof(tp->fw_ver));
15347 		snprintf(tp->fw_ver, sizeof(tp->fw_ver), "%.*s bc ", len,
15348 			 &vpd_data[j]);
15349 	}
15350 
15351 partno:
15352 	i = pci_vpd_find_info_keyword(vpd_data, i, rosize,
15353 				      PCI_VPD_RO_KEYWORD_PARTNO);
15354 	if (i < 0)
15355 		goto out_not_found;
15356 
15357 	len = pci_vpd_info_field_size(&vpd_data[i]);
15358 
15359 	i += PCI_VPD_INFO_FLD_HDR_SIZE;
15360 	if (len > TG3_BPN_SIZE ||
15361 	    (len + i) > vpdlen)
15362 		goto out_not_found;
15363 
15364 	memcpy(tp->board_part_number, &vpd_data[i], len);
15365 
15366 out_not_found:
15367 	kfree(vpd_data);
15368 	if (tp->board_part_number[0])
15369 		return;
15370 
15371 out_no_vpd:
15372 	if (tg3_asic_rev(tp) == ASIC_REV_5717) {
15373 		if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
15374 		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C)
15375 			strcpy(tp->board_part_number, "BCM5717");
15376 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718)
15377 			strcpy(tp->board_part_number, "BCM5718");
15378 		else
15379 			goto nomatch;
15380 	} else if (tg3_asic_rev(tp) == ASIC_REV_57780) {
15381 		if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780)
15382 			strcpy(tp->board_part_number, "BCM57780");
15383 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760)
15384 			strcpy(tp->board_part_number, "BCM57760");
15385 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790)
15386 			strcpy(tp->board_part_number, "BCM57790");
15387 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788)
15388 			strcpy(tp->board_part_number, "BCM57788");
15389 		else
15390 			goto nomatch;
15391 	} else if (tg3_asic_rev(tp) == ASIC_REV_57765) {
15392 		if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761)
15393 			strcpy(tp->board_part_number, "BCM57761");
15394 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765)
15395 			strcpy(tp->board_part_number, "BCM57765");
15396 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781)
15397 			strcpy(tp->board_part_number, "BCM57781");
15398 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785)
15399 			strcpy(tp->board_part_number, "BCM57785");
15400 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791)
15401 			strcpy(tp->board_part_number, "BCM57791");
15402 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
15403 			strcpy(tp->board_part_number, "BCM57795");
15404 		else
15405 			goto nomatch;
15406 	} else if (tg3_asic_rev(tp) == ASIC_REV_57766) {
15407 		if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762)
15408 			strcpy(tp->board_part_number, "BCM57762");
15409 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766)
15410 			strcpy(tp->board_part_number, "BCM57766");
15411 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782)
15412 			strcpy(tp->board_part_number, "BCM57782");
15413 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
15414 			strcpy(tp->board_part_number, "BCM57786");
15415 		else
15416 			goto nomatch;
15417 	} else if (tg3_asic_rev(tp) == ASIC_REV_5906) {
15418 		strcpy(tp->board_part_number, "BCM95906");
15419 	} else {
15420 nomatch:
15421 		strcpy(tp->board_part_number, "none");
15422 	}
15423 }
15424 
15425 static int tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
15426 {
15427 	u32 val;
15428 
15429 	if (tg3_nvram_read(tp, offset, &val) ||
15430 	    (val & 0xfc000000) != 0x0c000000 ||
15431 	    tg3_nvram_read(tp, offset + 4, &val) ||
15432 	    val != 0)
15433 		return 0;
15434 
15435 	return 1;
15436 }
15437 
15438 static void tg3_read_bc_ver(struct tg3 *tp)
15439 {
15440 	u32 val, offset, start, ver_offset;
15441 	int i, dst_off;
15442 	bool newver = false;
15443 
15444 	if (tg3_nvram_read(tp, 0xc, &offset) ||
15445 	    tg3_nvram_read(tp, 0x4, &start))
15446 		return;
15447 
15448 	offset = tg3_nvram_logical_addr(tp, offset);
15449 
15450 	if (tg3_nvram_read(tp, offset, &val))
15451 		return;
15452 
15453 	if ((val & 0xfc000000) == 0x0c000000) {
15454 		if (tg3_nvram_read(tp, offset + 4, &val))
15455 			return;
15456 
15457 		if (val == 0)
15458 			newver = true;
15459 	}
15460 
15461 	dst_off = strlen(tp->fw_ver);
15462 
15463 	if (newver) {
15464 		if (TG3_VER_SIZE - dst_off < 16 ||
15465 		    tg3_nvram_read(tp, offset + 8, &ver_offset))
15466 			return;
15467 
15468 		offset = offset + ver_offset - start;
15469 		for (i = 0; i < 16; i += 4) {
15470 			__be32 v;
15471 			if (tg3_nvram_read_be32(tp, offset + i, &v))
15472 				return;
15473 
15474 			memcpy(tp->fw_ver + dst_off + i, &v, sizeof(v));
15475 		}
15476 	} else {
15477 		u32 major, minor;
15478 
15479 		if (tg3_nvram_read(tp, TG3_NVM_PTREV_BCVER, &ver_offset))
15480 			return;
15481 
15482 		major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >>
15483 			TG3_NVM_BCVER_MAJSFT;
15484 		minor = ver_offset & TG3_NVM_BCVER_MINMSK;
15485 		snprintf(&tp->fw_ver[dst_off], TG3_VER_SIZE - dst_off,
15486 			 "v%d.%02d", major, minor);
15487 	}
15488 }
15489 
15490 static void tg3_read_hwsb_ver(struct tg3 *tp)
15491 {
15492 	u32 val, major, minor;
15493 
15494 	/* Use native endian representation */
15495 	if (tg3_nvram_read(tp, TG3_NVM_HWSB_CFG1, &val))
15496 		return;
15497 
15498 	major = (val & TG3_NVM_HWSB_CFG1_MAJMSK) >>
15499 		TG3_NVM_HWSB_CFG1_MAJSFT;
15500 	minor = (val & TG3_NVM_HWSB_CFG1_MINMSK) >>
15501 		TG3_NVM_HWSB_CFG1_MINSFT;
15502 
15503 	snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor);
15504 }
15505 
15506 static void tg3_read_sb_ver(struct tg3 *tp, u32 val)
15507 {
15508 	u32 offset, major, minor, build;
15509 
15510 	strncat(tp->fw_ver, "sb", TG3_VER_SIZE - strlen(tp->fw_ver) - 1);
15511 
15512 	if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1)
15513 		return;
15514 
15515 	switch (val & TG3_EEPROM_SB_REVISION_MASK) {
15516 	case TG3_EEPROM_SB_REVISION_0:
15517 		offset = TG3_EEPROM_SB_F1R0_EDH_OFF;
15518 		break;
15519 	case TG3_EEPROM_SB_REVISION_2:
15520 		offset = TG3_EEPROM_SB_F1R2_EDH_OFF;
15521 		break;
15522 	case TG3_EEPROM_SB_REVISION_3:
15523 		offset = TG3_EEPROM_SB_F1R3_EDH_OFF;
15524 		break;
15525 	case TG3_EEPROM_SB_REVISION_4:
15526 		offset = TG3_EEPROM_SB_F1R4_EDH_OFF;
15527 		break;
15528 	case TG3_EEPROM_SB_REVISION_5:
15529 		offset = TG3_EEPROM_SB_F1R5_EDH_OFF;
15530 		break;
15531 	case TG3_EEPROM_SB_REVISION_6:
15532 		offset = TG3_EEPROM_SB_F1R6_EDH_OFF;
15533 		break;
15534 	default:
15535 		return;
15536 	}
15537 
15538 	if (tg3_nvram_read(tp, offset, &val))
15539 		return;
15540 
15541 	build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >>
15542 		TG3_EEPROM_SB_EDH_BLD_SHFT;
15543 	major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >>
15544 		TG3_EEPROM_SB_EDH_MAJ_SHFT;
15545 	minor =  val & TG3_EEPROM_SB_EDH_MIN_MASK;
15546 
15547 	if (minor > 99 || build > 26)
15548 		return;
15549 
15550 	offset = strlen(tp->fw_ver);
15551 	snprintf(&tp->fw_ver[offset], TG3_VER_SIZE - offset,
15552 		 " v%d.%02d", major, minor);
15553 
15554 	if (build > 0) {
15555 		offset = strlen(tp->fw_ver);
15556 		if (offset < TG3_VER_SIZE - 1)
15557 			tp->fw_ver[offset] = 'a' + build - 1;
15558 	}
15559 }
15560 
15561 static void tg3_read_mgmtfw_ver(struct tg3 *tp)
15562 {
15563 	u32 val, offset, start;
15564 	int i, vlen;
15565 
15566 	for (offset = TG3_NVM_DIR_START;
15567 	     offset < TG3_NVM_DIR_END;
15568 	     offset += TG3_NVM_DIRENT_SIZE) {
15569 		if (tg3_nvram_read(tp, offset, &val))
15570 			return;
15571 
15572 		if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
15573 			break;
15574 	}
15575 
15576 	if (offset == TG3_NVM_DIR_END)
15577 		return;
15578 
15579 	if (!tg3_flag(tp, 5705_PLUS))
15580 		start = 0x08000000;
15581 	else if (tg3_nvram_read(tp, offset - 4, &start))
15582 		return;
15583 
15584 	if (tg3_nvram_read(tp, offset + 4, &offset) ||
15585 	    !tg3_fw_img_is_valid(tp, offset) ||
15586 	    tg3_nvram_read(tp, offset + 8, &val))
15587 		return;
15588 
15589 	offset += val - start;
15590 
15591 	vlen = strlen(tp->fw_ver);
15592 
15593 	tp->fw_ver[vlen++] = ',';
15594 	tp->fw_ver[vlen++] = ' ';
15595 
15596 	for (i = 0; i < 4; i++) {
15597 		__be32 v;
15598 		if (tg3_nvram_read_be32(tp, offset, &v))
15599 			return;
15600 
15601 		offset += sizeof(v);
15602 
15603 		if (vlen > TG3_VER_SIZE - sizeof(v)) {
15604 			memcpy(&tp->fw_ver[vlen], &v, TG3_VER_SIZE - vlen);
15605 			break;
15606 		}
15607 
15608 		memcpy(&tp->fw_ver[vlen], &v, sizeof(v));
15609 		vlen += sizeof(v);
15610 	}
15611 }
15612 
15613 static void tg3_probe_ncsi(struct tg3 *tp)
15614 {
15615 	u32 apedata;
15616 
15617 	apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
15618 	if (apedata != APE_SEG_SIG_MAGIC)
15619 		return;
15620 
15621 	apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
15622 	if (!(apedata & APE_FW_STATUS_READY))
15623 		return;
15624 
15625 	if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI)
15626 		tg3_flag_set(tp, APE_HAS_NCSI);
15627 }
15628 
15629 static void tg3_read_dash_ver(struct tg3 *tp)
15630 {
15631 	int vlen;
15632 	u32 apedata;
15633 	char *fwtype;
15634 
15635 	apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION);
15636 
15637 	if (tg3_flag(tp, APE_HAS_NCSI))
15638 		fwtype = "NCSI";
15639 	else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725)
15640 		fwtype = "SMASH";
15641 	else
15642 		fwtype = "DASH";
15643 
15644 	vlen = strlen(tp->fw_ver);
15645 
15646 	snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " %s v%d.%d.%d.%d",
15647 		 fwtype,
15648 		 (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT,
15649 		 (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT,
15650 		 (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT,
15651 		 (apedata & APE_FW_VERSION_BLDMSK));
15652 }
15653 
15654 static void tg3_read_otp_ver(struct tg3 *tp)
15655 {
15656 	u32 val, val2;
15657 
15658 	if (tg3_asic_rev(tp) != ASIC_REV_5762)
15659 		return;
15660 
15661 	if (!tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0, &val) &&
15662 	    !tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0 + 4, &val2) &&
15663 	    TG3_OTP_MAGIC0_VALID(val)) {
15664 		u64 val64 = (u64) val << 32 | val2;
15665 		u32 ver = 0;
15666 		int i, vlen;
15667 
15668 		for (i = 0; i < 7; i++) {
15669 			if ((val64 & 0xff) == 0)
15670 				break;
15671 			ver = val64 & 0xff;
15672 			val64 >>= 8;
15673 		}
15674 		vlen = strlen(tp->fw_ver);
15675 		snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " .%02d", ver);
15676 	}
15677 }
15678 
15679 static void tg3_read_fw_ver(struct tg3 *tp)
15680 {
15681 	u32 val;
15682 	bool vpd_vers = false;
15683 
15684 	if (tp->fw_ver[0] != 0)
15685 		vpd_vers = true;
15686 
15687 	if (tg3_flag(tp, NO_NVRAM)) {
15688 		strcat(tp->fw_ver, "sb");
15689 		tg3_read_otp_ver(tp);
15690 		return;
15691 	}
15692 
15693 	if (tg3_nvram_read(tp, 0, &val))
15694 		return;
15695 
15696 	if (val == TG3_EEPROM_MAGIC)
15697 		tg3_read_bc_ver(tp);
15698 	else if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW)
15699 		tg3_read_sb_ver(tp, val);
15700 	else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
15701 		tg3_read_hwsb_ver(tp);
15702 
15703 	if (tg3_flag(tp, ENABLE_ASF)) {
15704 		if (tg3_flag(tp, ENABLE_APE)) {
15705 			tg3_probe_ncsi(tp);
15706 			if (!vpd_vers)
15707 				tg3_read_dash_ver(tp);
15708 		} else if (!vpd_vers) {
15709 			tg3_read_mgmtfw_ver(tp);
15710 		}
15711 	}
15712 
15713 	tp->fw_ver[TG3_VER_SIZE - 1] = 0;
15714 }
15715 
15716 static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp)
15717 {
15718 	if (tg3_flag(tp, LRG_PROD_RING_CAP))
15719 		return TG3_RX_RET_MAX_SIZE_5717;
15720 	else if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))
15721 		return TG3_RX_RET_MAX_SIZE_5700;
15722 	else
15723 		return TG3_RX_RET_MAX_SIZE_5705;
15724 }
15725 
15726 static DEFINE_PCI_DEVICE_TABLE(tg3_write_reorder_chipsets) = {
15727 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C) },
15728 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE) },
15729 	{ PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8385_0) },
15730 	{ },
15731 };
15732 
15733 static struct pci_dev *tg3_find_peer(struct tg3 *tp)
15734 {
15735 	struct pci_dev *peer;
15736 	unsigned int func, devnr = tp->pdev->devfn & ~7;
15737 
15738 	for (func = 0; func < 8; func++) {
15739 		peer = pci_get_slot(tp->pdev->bus, devnr | func);
15740 		if (peer && peer != tp->pdev)
15741 			break;
15742 		pci_dev_put(peer);
15743 	}
15744 	/* 5704 can be configured in single-port mode, set peer to
15745 	 * tp->pdev in that case.
15746 	 */
15747 	if (!peer) {
15748 		peer = tp->pdev;
15749 		return peer;
15750 	}
15751 
15752 	/*
15753 	 * We don't need to keep the refcount elevated; there's no way
15754 	 * to remove one half of this device without removing the other
15755 	 */
15756 	pci_dev_put(peer);
15757 
15758 	return peer;
15759 }
15760 
15761 static void tg3_detect_asic_rev(struct tg3 *tp, u32 misc_ctrl_reg)
15762 {
15763 	tp->pci_chip_rev_id = misc_ctrl_reg >> MISC_HOST_CTRL_CHIPREV_SHIFT;
15764 	if (tg3_asic_rev(tp) == ASIC_REV_USE_PROD_ID_REG) {
15765 		u32 reg;
15766 
15767 		/* All devices that use the alternate
15768 		 * ASIC REV location have a CPMU.
15769 		 */
15770 		tg3_flag_set(tp, CPMU_PRESENT);
15771 
15772 		if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
15773 		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
15774 		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
15775 		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
15776 		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 ||
15777 		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_57767 ||
15778 		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_57764 ||
15779 		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 ||
15780 		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 ||
15781 		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727 ||
15782 		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_57787)
15783 			reg = TG3PCI_GEN2_PRODID_ASICREV;
15784 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 ||
15785 			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 ||
15786 			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 ||
15787 			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 ||
15788 			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
15789 			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
15790 			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762 ||
15791 			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766 ||
15792 			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782 ||
15793 			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
15794 			reg = TG3PCI_GEN15_PRODID_ASICREV;
15795 		else
15796 			reg = TG3PCI_PRODID_ASICREV;
15797 
15798 		pci_read_config_dword(tp->pdev, reg, &tp->pci_chip_rev_id);
15799 	}
15800 
15801 	/* Wrong chip ID in 5752 A0. This code can be removed later
15802 	 * as A0 is not in production.
15803 	 */
15804 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5752_A0_HW)
15805 		tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
15806 
15807 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5717_C0)
15808 		tp->pci_chip_rev_id = CHIPREV_ID_5720_A0;
15809 
15810 	if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
15811 	    tg3_asic_rev(tp) == ASIC_REV_5719 ||
15812 	    tg3_asic_rev(tp) == ASIC_REV_5720)
15813 		tg3_flag_set(tp, 5717_PLUS);
15814 
15815 	if (tg3_asic_rev(tp) == ASIC_REV_57765 ||
15816 	    tg3_asic_rev(tp) == ASIC_REV_57766)
15817 		tg3_flag_set(tp, 57765_CLASS);
15818 
15819 	if (tg3_flag(tp, 57765_CLASS) || tg3_flag(tp, 5717_PLUS) ||
15820 	     tg3_asic_rev(tp) == ASIC_REV_5762)
15821 		tg3_flag_set(tp, 57765_PLUS);
15822 
15823 	/* Intentionally exclude ASIC_REV_5906 */
15824 	if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
15825 	    tg3_asic_rev(tp) == ASIC_REV_5787 ||
15826 	    tg3_asic_rev(tp) == ASIC_REV_5784 ||
15827 	    tg3_asic_rev(tp) == ASIC_REV_5761 ||
15828 	    tg3_asic_rev(tp) == ASIC_REV_5785 ||
15829 	    tg3_asic_rev(tp) == ASIC_REV_57780 ||
15830 	    tg3_flag(tp, 57765_PLUS))
15831 		tg3_flag_set(tp, 5755_PLUS);
15832 
15833 	if (tg3_asic_rev(tp) == ASIC_REV_5780 ||
15834 	    tg3_asic_rev(tp) == ASIC_REV_5714)
15835 		tg3_flag_set(tp, 5780_CLASS);
15836 
15837 	if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
15838 	    tg3_asic_rev(tp) == ASIC_REV_5752 ||
15839 	    tg3_asic_rev(tp) == ASIC_REV_5906 ||
15840 	    tg3_flag(tp, 5755_PLUS) ||
15841 	    tg3_flag(tp, 5780_CLASS))
15842 		tg3_flag_set(tp, 5750_PLUS);
15843 
15844 	if (tg3_asic_rev(tp) == ASIC_REV_5705 ||
15845 	    tg3_flag(tp, 5750_PLUS))
15846 		tg3_flag_set(tp, 5705_PLUS);
15847 }
15848 
15849 static bool tg3_10_100_only_device(struct tg3 *tp,
15850 				   const struct pci_device_id *ent)
15851 {
15852 	u32 grc_misc_cfg = tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK;
15853 
15854 	if ((tg3_asic_rev(tp) == ASIC_REV_5703 &&
15855 	     (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
15856 	    (tp->phy_flags & TG3_PHYFLG_IS_FET))
15857 		return true;
15858 
15859 	if (ent->driver_data & TG3_DRV_DATA_FLAG_10_100_ONLY) {
15860 		if (tg3_asic_rev(tp) == ASIC_REV_5705) {
15861 			if (ent->driver_data & TG3_DRV_DATA_FLAG_5705_10_100)
15862 				return true;
15863 		} else {
15864 			return true;
15865 		}
15866 	}
15867 
15868 	return false;
15869 }
15870 
15871 static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
15872 {
15873 	u32 misc_ctrl_reg;
15874 	u32 pci_state_reg, grc_misc_cfg;
15875 	u32 val;
15876 	u16 pci_cmd;
15877 	int err;
15878 
15879 	/* Force memory write invalidate off.  If we leave it on,
15880 	 * then on 5700_BX chips we have to enable a workaround.
15881 	 * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
15882 	 * to match the cacheline size.  The Broadcom driver have this
15883 	 * workaround but turns MWI off all the times so never uses
15884 	 * it.  This seems to suggest that the workaround is insufficient.
15885 	 */
15886 	pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
15887 	pci_cmd &= ~PCI_COMMAND_INVALIDATE;
15888 	pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
15889 
15890 	/* Important! -- Make sure register accesses are byteswapped
15891 	 * correctly.  Also, for those chips that require it, make
15892 	 * sure that indirect register accesses are enabled before
15893 	 * the first operation.
15894 	 */
15895 	pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
15896 			      &misc_ctrl_reg);
15897 	tp->misc_host_ctrl |= (misc_ctrl_reg &
15898 			       MISC_HOST_CTRL_CHIPREV);
15899 	pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
15900 			       tp->misc_host_ctrl);
15901 
15902 	tg3_detect_asic_rev(tp, misc_ctrl_reg);
15903 
15904 	/* If we have 5702/03 A1 or A2 on certain ICH chipsets,
15905 	 * we need to disable memory and use config. cycles
15906 	 * only to access all registers. The 5702/03 chips
15907 	 * can mistakenly decode the special cycles from the
15908 	 * ICH chipsets as memory write cycles, causing corruption
15909 	 * of register and memory space. Only certain ICH bridges
15910 	 * will drive special cycles with non-zero data during the
15911 	 * address phase which can fall within the 5703's address
15912 	 * range. This is not an ICH bug as the PCI spec allows
15913 	 * non-zero address during special cycles. However, only
15914 	 * these ICH bridges are known to drive non-zero addresses
15915 	 * during special cycles.
15916 	 *
15917 	 * Since special cycles do not cross PCI bridges, we only
15918 	 * enable this workaround if the 5703 is on the secondary
15919 	 * bus of these ICH bridges.
15920 	 */
15921 	if ((tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A1) ||
15922 	    (tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A2)) {
15923 		static struct tg3_dev_id {
15924 			u32	vendor;
15925 			u32	device;
15926 			u32	rev;
15927 		} ich_chipsets[] = {
15928 			{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
15929 			  PCI_ANY_ID },
15930 			{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
15931 			  PCI_ANY_ID },
15932 			{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
15933 			  0xa },
15934 			{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
15935 			  PCI_ANY_ID },
15936 			{ },
15937 		};
15938 		struct tg3_dev_id *pci_id = &ich_chipsets[0];
15939 		struct pci_dev *bridge = NULL;
15940 
15941 		while (pci_id->vendor != 0) {
15942 			bridge = pci_get_device(pci_id->vendor, pci_id->device,
15943 						bridge);
15944 			if (!bridge) {
15945 				pci_id++;
15946 				continue;
15947 			}
15948 			if (pci_id->rev != PCI_ANY_ID) {
15949 				if (bridge->revision > pci_id->rev)
15950 					continue;
15951 			}
15952 			if (bridge->subordinate &&
15953 			    (bridge->subordinate->number ==
15954 			     tp->pdev->bus->number)) {
15955 				tg3_flag_set(tp, ICH_WORKAROUND);
15956 				pci_dev_put(bridge);
15957 				break;
15958 			}
15959 		}
15960 	}
15961 
15962 	if (tg3_asic_rev(tp) == ASIC_REV_5701) {
15963 		static struct tg3_dev_id {
15964 			u32	vendor;
15965 			u32	device;
15966 		} bridge_chipsets[] = {
15967 			{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
15968 			{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
15969 			{ },
15970 		};
15971 		struct tg3_dev_id *pci_id = &bridge_chipsets[0];
15972 		struct pci_dev *bridge = NULL;
15973 
15974 		while (pci_id->vendor != 0) {
15975 			bridge = pci_get_device(pci_id->vendor,
15976 						pci_id->device,
15977 						bridge);
15978 			if (!bridge) {
15979 				pci_id++;
15980 				continue;
15981 			}
15982 			if (bridge->subordinate &&
15983 			    (bridge->subordinate->number <=
15984 			     tp->pdev->bus->number) &&
15985 			    (bridge->subordinate->busn_res.end >=
15986 			     tp->pdev->bus->number)) {
15987 				tg3_flag_set(tp, 5701_DMA_BUG);
15988 				pci_dev_put(bridge);
15989 				break;
15990 			}
15991 		}
15992 	}
15993 
15994 	/* The EPB bridge inside 5714, 5715, and 5780 cannot support
15995 	 * DMA addresses > 40-bit. This bridge may have other additional
15996 	 * 57xx devices behind it in some 4-port NIC designs for example.
15997 	 * Any tg3 device found behind the bridge will also need the 40-bit
15998 	 * DMA workaround.
15999 	 */
16000 	if (tg3_flag(tp, 5780_CLASS)) {
16001 		tg3_flag_set(tp, 40BIT_DMA_BUG);
16002 		tp->msi_cap = tp->pdev->msi_cap;
16003 	} else {
16004 		struct pci_dev *bridge = NULL;
16005 
16006 		do {
16007 			bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
16008 						PCI_DEVICE_ID_SERVERWORKS_EPB,
16009 						bridge);
16010 			if (bridge && bridge->subordinate &&
16011 			    (bridge->subordinate->number <=
16012 			     tp->pdev->bus->number) &&
16013 			    (bridge->subordinate->busn_res.end >=
16014 			     tp->pdev->bus->number)) {
16015 				tg3_flag_set(tp, 40BIT_DMA_BUG);
16016 				pci_dev_put(bridge);
16017 				break;
16018 			}
16019 		} while (bridge);
16020 	}
16021 
16022 	if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
16023 	    tg3_asic_rev(tp) == ASIC_REV_5714)
16024 		tp->pdev_peer = tg3_find_peer(tp);
16025 
16026 	/* Determine TSO capabilities */
16027 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0)
16028 		; /* Do nothing. HW bug. */
16029 	else if (tg3_flag(tp, 57765_PLUS))
16030 		tg3_flag_set(tp, HW_TSO_3);
16031 	else if (tg3_flag(tp, 5755_PLUS) ||
16032 		 tg3_asic_rev(tp) == ASIC_REV_5906)
16033 		tg3_flag_set(tp, HW_TSO_2);
16034 	else if (tg3_flag(tp, 5750_PLUS)) {
16035 		tg3_flag_set(tp, HW_TSO_1);
16036 		tg3_flag_set(tp, TSO_BUG);
16037 		if (tg3_asic_rev(tp) == ASIC_REV_5750 &&
16038 		    tg3_chip_rev_id(tp) >= CHIPREV_ID_5750_C2)
16039 			tg3_flag_clear(tp, TSO_BUG);
16040 	} else if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
16041 		   tg3_asic_rev(tp) != ASIC_REV_5701 &&
16042 		   tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
16043 		tg3_flag_set(tp, FW_TSO);
16044 		tg3_flag_set(tp, TSO_BUG);
16045 		if (tg3_asic_rev(tp) == ASIC_REV_5705)
16046 			tp->fw_needed = FIRMWARE_TG3TSO5;
16047 		else
16048 			tp->fw_needed = FIRMWARE_TG3TSO;
16049 	}
16050 
16051 	/* Selectively allow TSO based on operating conditions */
16052 	if (tg3_flag(tp, HW_TSO_1) ||
16053 	    tg3_flag(tp, HW_TSO_2) ||
16054 	    tg3_flag(tp, HW_TSO_3) ||
16055 	    tg3_flag(tp, FW_TSO)) {
16056 		/* For firmware TSO, assume ASF is disabled.
16057 		 * We'll disable TSO later if we discover ASF
16058 		 * is enabled in tg3_get_eeprom_hw_cfg().
16059 		 */
16060 		tg3_flag_set(tp, TSO_CAPABLE);
16061 	} else {
16062 		tg3_flag_clear(tp, TSO_CAPABLE);
16063 		tg3_flag_clear(tp, TSO_BUG);
16064 		tp->fw_needed = NULL;
16065 	}
16066 
16067 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0)
16068 		tp->fw_needed = FIRMWARE_TG3;
16069 
16070 	if (tg3_asic_rev(tp) == ASIC_REV_57766)
16071 		tp->fw_needed = FIRMWARE_TG357766;
16072 
16073 	tp->irq_max = 1;
16074 
16075 	if (tg3_flag(tp, 5750_PLUS)) {
16076 		tg3_flag_set(tp, SUPPORT_MSI);
16077 		if (tg3_chip_rev(tp) == CHIPREV_5750_AX ||
16078 		    tg3_chip_rev(tp) == CHIPREV_5750_BX ||
16079 		    (tg3_asic_rev(tp) == ASIC_REV_5714 &&
16080 		     tg3_chip_rev_id(tp) <= CHIPREV_ID_5714_A2 &&
16081 		     tp->pdev_peer == tp->pdev))
16082 			tg3_flag_clear(tp, SUPPORT_MSI);
16083 
16084 		if (tg3_flag(tp, 5755_PLUS) ||
16085 		    tg3_asic_rev(tp) == ASIC_REV_5906) {
16086 			tg3_flag_set(tp, 1SHOT_MSI);
16087 		}
16088 
16089 		if (tg3_flag(tp, 57765_PLUS)) {
16090 			tg3_flag_set(tp, SUPPORT_MSIX);
16091 			tp->irq_max = TG3_IRQ_MAX_VECS;
16092 		}
16093 	}
16094 
16095 	tp->txq_max = 1;
16096 	tp->rxq_max = 1;
16097 	if (tp->irq_max > 1) {
16098 		tp->rxq_max = TG3_RSS_MAX_NUM_QS;
16099 		tg3_rss_init_dflt_indir_tbl(tp, TG3_RSS_MAX_NUM_QS);
16100 
16101 		if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
16102 		    tg3_asic_rev(tp) == ASIC_REV_5720)
16103 			tp->txq_max = tp->irq_max - 1;
16104 	}
16105 
16106 	if (tg3_flag(tp, 5755_PLUS) ||
16107 	    tg3_asic_rev(tp) == ASIC_REV_5906)
16108 		tg3_flag_set(tp, SHORT_DMA_BUG);
16109 
16110 	if (tg3_asic_rev(tp) == ASIC_REV_5719)
16111 		tp->dma_limit = TG3_TX_BD_DMA_MAX_4K;
16112 
16113 	if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16114 	    tg3_asic_rev(tp) == ASIC_REV_5719 ||
16115 	    tg3_asic_rev(tp) == ASIC_REV_5720 ||
16116 	    tg3_asic_rev(tp) == ASIC_REV_5762)
16117 		tg3_flag_set(tp, LRG_PROD_RING_CAP);
16118 
16119 	if (tg3_flag(tp, 57765_PLUS) &&
16120 	    tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0)
16121 		tg3_flag_set(tp, USE_JUMBO_BDFLAG);
16122 
16123 	if (!tg3_flag(tp, 5705_PLUS) ||
16124 	    tg3_flag(tp, 5780_CLASS) ||
16125 	    tg3_flag(tp, USE_JUMBO_BDFLAG))
16126 		tg3_flag_set(tp, JUMBO_CAPABLE);
16127 
16128 	pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
16129 			      &pci_state_reg);
16130 
16131 	if (pci_is_pcie(tp->pdev)) {
16132 		u16 lnkctl;
16133 
16134 		tg3_flag_set(tp, PCI_EXPRESS);
16135 
16136 		pcie_capability_read_word(tp->pdev, PCI_EXP_LNKCTL, &lnkctl);
16137 		if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
16138 			if (tg3_asic_rev(tp) == ASIC_REV_5906) {
16139 				tg3_flag_clear(tp, HW_TSO_2);
16140 				tg3_flag_clear(tp, TSO_CAPABLE);
16141 			}
16142 			if (tg3_asic_rev(tp) == ASIC_REV_5784 ||
16143 			    tg3_asic_rev(tp) == ASIC_REV_5761 ||
16144 			    tg3_chip_rev_id(tp) == CHIPREV_ID_57780_A0 ||
16145 			    tg3_chip_rev_id(tp) == CHIPREV_ID_57780_A1)
16146 				tg3_flag_set(tp, CLKREQ_BUG);
16147 		} else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5717_A0) {
16148 			tg3_flag_set(tp, L1PLLPD_EN);
16149 		}
16150 	} else if (tg3_asic_rev(tp) == ASIC_REV_5785) {
16151 		/* BCM5785 devices are effectively PCIe devices, and should
16152 		 * follow PCIe codepaths, but do not have a PCIe capabilities
16153 		 * section.
16154 		 */
16155 		tg3_flag_set(tp, PCI_EXPRESS);
16156 	} else if (!tg3_flag(tp, 5705_PLUS) ||
16157 		   tg3_flag(tp, 5780_CLASS)) {
16158 		tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
16159 		if (!tp->pcix_cap) {
16160 			dev_err(&tp->pdev->dev,
16161 				"Cannot find PCI-X capability, aborting\n");
16162 			return -EIO;
16163 		}
16164 
16165 		if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE))
16166 			tg3_flag_set(tp, PCIX_MODE);
16167 	}
16168 
16169 	/* If we have an AMD 762 or VIA K8T800 chipset, write
16170 	 * reordering to the mailbox registers done by the host
16171 	 * controller can cause major troubles.  We read back from
16172 	 * every mailbox register write to force the writes to be
16173 	 * posted to the chip in order.
16174 	 */
16175 	if (pci_dev_present(tg3_write_reorder_chipsets) &&
16176 	    !tg3_flag(tp, PCI_EXPRESS))
16177 		tg3_flag_set(tp, MBOX_WRITE_REORDER);
16178 
16179 	pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
16180 			     &tp->pci_cacheline_sz);
16181 	pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER,
16182 			     &tp->pci_lat_timer);
16183 	if (tg3_asic_rev(tp) == ASIC_REV_5703 &&
16184 	    tp->pci_lat_timer < 64) {
16185 		tp->pci_lat_timer = 64;
16186 		pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
16187 				      tp->pci_lat_timer);
16188 	}
16189 
16190 	/* Important! -- It is critical that the PCI-X hw workaround
16191 	 * situation is decided before the first MMIO register access.
16192 	 */
16193 	if (tg3_chip_rev(tp) == CHIPREV_5700_BX) {
16194 		/* 5700 BX chips need to have their TX producer index
16195 		 * mailboxes written twice to workaround a bug.
16196 		 */
16197 		tg3_flag_set(tp, TXD_MBOX_HWBUG);
16198 
16199 		/* If we are in PCI-X mode, enable register write workaround.
16200 		 *
16201 		 * The workaround is to use indirect register accesses
16202 		 * for all chip writes not to mailbox registers.
16203 		 */
16204 		if (tg3_flag(tp, PCIX_MODE)) {
16205 			u32 pm_reg;
16206 
16207 			tg3_flag_set(tp, PCIX_TARGET_HWBUG);
16208 
16209 			/* The chip can have it's power management PCI config
16210 			 * space registers clobbered due to this bug.
16211 			 * So explicitly force the chip into D0 here.
16212 			 */
16213 			pci_read_config_dword(tp->pdev,
16214 					      tp->pdev->pm_cap + PCI_PM_CTRL,
16215 					      &pm_reg);
16216 			pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
16217 			pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
16218 			pci_write_config_dword(tp->pdev,
16219 					       tp->pdev->pm_cap + PCI_PM_CTRL,
16220 					       pm_reg);
16221 
16222 			/* Also, force SERR#/PERR# in PCI command. */
16223 			pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
16224 			pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
16225 			pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
16226 		}
16227 	}
16228 
16229 	if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
16230 		tg3_flag_set(tp, PCI_HIGH_SPEED);
16231 	if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
16232 		tg3_flag_set(tp, PCI_32BIT);
16233 
16234 	/* Chip-specific fixup from Broadcom driver */
16235 	if ((tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0) &&
16236 	    (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
16237 		pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
16238 		pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
16239 	}
16240 
16241 	/* Default fast path register access methods */
16242 	tp->read32 = tg3_read32;
16243 	tp->write32 = tg3_write32;
16244 	tp->read32_mbox = tg3_read32;
16245 	tp->write32_mbox = tg3_write32;
16246 	tp->write32_tx_mbox = tg3_write32;
16247 	tp->write32_rx_mbox = tg3_write32;
16248 
16249 	/* Various workaround register access methods */
16250 	if (tg3_flag(tp, PCIX_TARGET_HWBUG))
16251 		tp->write32 = tg3_write_indirect_reg32;
16252 	else if (tg3_asic_rev(tp) == ASIC_REV_5701 ||
16253 		 (tg3_flag(tp, PCI_EXPRESS) &&
16254 		  tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A0)) {
16255 		/*
16256 		 * Back to back register writes can cause problems on these
16257 		 * chips, the workaround is to read back all reg writes
16258 		 * except those to mailbox regs.
16259 		 *
16260 		 * See tg3_write_indirect_reg32().
16261 		 */
16262 		tp->write32 = tg3_write_flush_reg32;
16263 	}
16264 
16265 	if (tg3_flag(tp, TXD_MBOX_HWBUG) || tg3_flag(tp, MBOX_WRITE_REORDER)) {
16266 		tp->write32_tx_mbox = tg3_write32_tx_mbox;
16267 		if (tg3_flag(tp, MBOX_WRITE_REORDER))
16268 			tp->write32_rx_mbox = tg3_write_flush_reg32;
16269 	}
16270 
16271 	if (tg3_flag(tp, ICH_WORKAROUND)) {
16272 		tp->read32 = tg3_read_indirect_reg32;
16273 		tp->write32 = tg3_write_indirect_reg32;
16274 		tp->read32_mbox = tg3_read_indirect_mbox;
16275 		tp->write32_mbox = tg3_write_indirect_mbox;
16276 		tp->write32_tx_mbox = tg3_write_indirect_mbox;
16277 		tp->write32_rx_mbox = tg3_write_indirect_mbox;
16278 
16279 		iounmap(tp->regs);
16280 		tp->regs = NULL;
16281 
16282 		pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
16283 		pci_cmd &= ~PCI_COMMAND_MEMORY;
16284 		pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
16285 	}
16286 	if (tg3_asic_rev(tp) == ASIC_REV_5906) {
16287 		tp->read32_mbox = tg3_read32_mbox_5906;
16288 		tp->write32_mbox = tg3_write32_mbox_5906;
16289 		tp->write32_tx_mbox = tg3_write32_mbox_5906;
16290 		tp->write32_rx_mbox = tg3_write32_mbox_5906;
16291 	}
16292 
16293 	if (tp->write32 == tg3_write_indirect_reg32 ||
16294 	    (tg3_flag(tp, PCIX_MODE) &&
16295 	     (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16296 	      tg3_asic_rev(tp) == ASIC_REV_5701)))
16297 		tg3_flag_set(tp, SRAM_USE_CONFIG);
16298 
16299 	/* The memory arbiter has to be enabled in order for SRAM accesses
16300 	 * to succeed.  Normally on powerup the tg3 chip firmware will make
16301 	 * sure it is enabled, but other entities such as system netboot
16302 	 * code might disable it.
16303 	 */
16304 	val = tr32(MEMARB_MODE);
16305 	tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
16306 
16307 	tp->pci_fn = PCI_FUNC(tp->pdev->devfn) & 3;
16308 	if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
16309 	    tg3_flag(tp, 5780_CLASS)) {
16310 		if (tg3_flag(tp, PCIX_MODE)) {
16311 			pci_read_config_dword(tp->pdev,
16312 					      tp->pcix_cap + PCI_X_STATUS,
16313 					      &val);
16314 			tp->pci_fn = val & 0x7;
16315 		}
16316 	} else if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16317 		   tg3_asic_rev(tp) == ASIC_REV_5719 ||
16318 		   tg3_asic_rev(tp) == ASIC_REV_5720) {
16319 		tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val);
16320 		if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) != NIC_SRAM_CPMUSTAT_SIG)
16321 			val = tr32(TG3_CPMU_STATUS);
16322 
16323 		if (tg3_asic_rev(tp) == ASIC_REV_5717)
16324 			tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5717) ? 1 : 0;
16325 		else
16326 			tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5719) >>
16327 				     TG3_CPMU_STATUS_FSHFT_5719;
16328 	}
16329 
16330 	if (tg3_flag(tp, FLUSH_POSTED_WRITES)) {
16331 		tp->write32_tx_mbox = tg3_write_flush_reg32;
16332 		tp->write32_rx_mbox = tg3_write_flush_reg32;
16333 	}
16334 
16335 	/* Get eeprom hw config before calling tg3_set_power_state().
16336 	 * In particular, the TG3_FLAG_IS_NIC flag must be
16337 	 * determined before calling tg3_set_power_state() so that
16338 	 * we know whether or not to switch out of Vaux power.
16339 	 * When the flag is set, it means that GPIO1 is used for eeprom
16340 	 * write protect and also implies that it is a LOM where GPIOs
16341 	 * are not used to switch power.
16342 	 */
16343 	tg3_get_eeprom_hw_cfg(tp);
16344 
16345 	if (tg3_flag(tp, FW_TSO) && tg3_flag(tp, ENABLE_ASF)) {
16346 		tg3_flag_clear(tp, TSO_CAPABLE);
16347 		tg3_flag_clear(tp, TSO_BUG);
16348 		tp->fw_needed = NULL;
16349 	}
16350 
16351 	if (tg3_flag(tp, ENABLE_APE)) {
16352 		/* Allow reads and writes to the
16353 		 * APE register and memory space.
16354 		 */
16355 		pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
16356 				 PCISTATE_ALLOW_APE_SHMEM_WR |
16357 				 PCISTATE_ALLOW_APE_PSPACE_WR;
16358 		pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
16359 				       pci_state_reg);
16360 
16361 		tg3_ape_lock_init(tp);
16362 	}
16363 
16364 	/* Set up tp->grc_local_ctrl before calling
16365 	 * tg3_pwrsrc_switch_to_vmain().  GPIO1 driven high
16366 	 * will bring 5700's external PHY out of reset.
16367 	 * It is also used as eeprom write protect on LOMs.
16368 	 */
16369 	tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
16370 	if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16371 	    tg3_flag(tp, EEPROM_WRITE_PROT))
16372 		tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
16373 				       GRC_LCLCTRL_GPIO_OUTPUT1);
16374 	/* Unused GPIO3 must be driven as output on 5752 because there
16375 	 * are no pull-up resistors on unused GPIO pins.
16376 	 */
16377 	else if (tg3_asic_rev(tp) == ASIC_REV_5752)
16378 		tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
16379 
16380 	if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
16381 	    tg3_asic_rev(tp) == ASIC_REV_57780 ||
16382 	    tg3_flag(tp, 57765_CLASS))
16383 		tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
16384 
16385 	if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
16386 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
16387 		/* Turn off the debug UART. */
16388 		tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
16389 		if (tg3_flag(tp, IS_NIC))
16390 			/* Keep VMain power. */
16391 			tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
16392 					      GRC_LCLCTRL_GPIO_OUTPUT0;
16393 	}
16394 
16395 	if (tg3_asic_rev(tp) == ASIC_REV_5762)
16396 		tp->grc_local_ctrl |=
16397 			tr32(GRC_LOCAL_CTRL) & GRC_LCLCTRL_GPIO_UART_SEL;
16398 
16399 	/* Switch out of Vaux if it is a NIC */
16400 	tg3_pwrsrc_switch_to_vmain(tp);
16401 
16402 	/* Derive initial jumbo mode from MTU assigned in
16403 	 * ether_setup() via the alloc_etherdev() call
16404 	 */
16405 	if (tp->dev->mtu > ETH_DATA_LEN && !tg3_flag(tp, 5780_CLASS))
16406 		tg3_flag_set(tp, JUMBO_RING_ENABLE);
16407 
16408 	/* Determine WakeOnLan speed to use. */
16409 	if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16410 	    tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
16411 	    tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0 ||
16412 	    tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B2) {
16413 		tg3_flag_clear(tp, WOL_SPEED_100MB);
16414 	} else {
16415 		tg3_flag_set(tp, WOL_SPEED_100MB);
16416 	}
16417 
16418 	if (tg3_asic_rev(tp) == ASIC_REV_5906)
16419 		tp->phy_flags |= TG3_PHYFLG_IS_FET;
16420 
16421 	/* A few boards don't want Ethernet@WireSpeed phy feature */
16422 	if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16423 	    (tg3_asic_rev(tp) == ASIC_REV_5705 &&
16424 	     (tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) &&
16425 	     (tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A1)) ||
16426 	    (tp->phy_flags & TG3_PHYFLG_IS_FET) ||
16427 	    (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
16428 		tp->phy_flags |= TG3_PHYFLG_NO_ETH_WIRE_SPEED;
16429 
16430 	if (tg3_chip_rev(tp) == CHIPREV_5703_AX ||
16431 	    tg3_chip_rev(tp) == CHIPREV_5704_AX)
16432 		tp->phy_flags |= TG3_PHYFLG_ADC_BUG;
16433 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0)
16434 		tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG;
16435 
16436 	if (tg3_flag(tp, 5705_PLUS) &&
16437 	    !(tp->phy_flags & TG3_PHYFLG_IS_FET) &&
16438 	    tg3_asic_rev(tp) != ASIC_REV_5785 &&
16439 	    tg3_asic_rev(tp) != ASIC_REV_57780 &&
16440 	    !tg3_flag(tp, 57765_PLUS)) {
16441 		if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
16442 		    tg3_asic_rev(tp) == ASIC_REV_5787 ||
16443 		    tg3_asic_rev(tp) == ASIC_REV_5784 ||
16444 		    tg3_asic_rev(tp) == ASIC_REV_5761) {
16445 			if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
16446 			    tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
16447 				tp->phy_flags |= TG3_PHYFLG_JITTER_BUG;
16448 			if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
16449 				tp->phy_flags |= TG3_PHYFLG_ADJUST_TRIM;
16450 		} else
16451 			tp->phy_flags |= TG3_PHYFLG_BER_BUG;
16452 	}
16453 
16454 	if (tg3_asic_rev(tp) == ASIC_REV_5784 &&
16455 	    tg3_chip_rev(tp) != CHIPREV_5784_AX) {
16456 		tp->phy_otp = tg3_read_otp_phycfg(tp);
16457 		if (tp->phy_otp == 0)
16458 			tp->phy_otp = TG3_OTP_DEFAULT;
16459 	}
16460 
16461 	if (tg3_flag(tp, CPMU_PRESENT))
16462 		tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
16463 	else
16464 		tp->mi_mode = MAC_MI_MODE_BASE;
16465 
16466 	tp->coalesce_mode = 0;
16467 	if (tg3_chip_rev(tp) != CHIPREV_5700_AX &&
16468 	    tg3_chip_rev(tp) != CHIPREV_5700_BX)
16469 		tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
16470 
16471 	/* Set these bits to enable statistics workaround. */
16472 	if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16473 	    tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
16474 	    tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0) {
16475 		tp->coalesce_mode |= HOSTCC_MODE_ATTN;
16476 		tp->grc_mode |= GRC_MODE_IRQ_ON_FLOW_ATTN;
16477 	}
16478 
16479 	if (tg3_asic_rev(tp) == ASIC_REV_5785 ||
16480 	    tg3_asic_rev(tp) == ASIC_REV_57780)
16481 		tg3_flag_set(tp, USE_PHYLIB);
16482 
16483 	err = tg3_mdio_init(tp);
16484 	if (err)
16485 		return err;
16486 
16487 	/* Initialize data/descriptor byte/word swapping. */
16488 	val = tr32(GRC_MODE);
16489 	if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
16490 	    tg3_asic_rev(tp) == ASIC_REV_5762)
16491 		val &= (GRC_MODE_BYTE_SWAP_B2HRX_DATA |
16492 			GRC_MODE_WORD_SWAP_B2HRX_DATA |
16493 			GRC_MODE_B2HRX_ENABLE |
16494 			GRC_MODE_HTX2B_ENABLE |
16495 			GRC_MODE_HOST_STACKUP);
16496 	else
16497 		val &= GRC_MODE_HOST_STACKUP;
16498 
16499 	tw32(GRC_MODE, val | tp->grc_mode);
16500 
16501 	tg3_switch_clocks(tp);
16502 
16503 	/* Clear this out for sanity. */
16504 	tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
16505 
16506 	/* Clear TG3PCI_REG_BASE_ADDR to prevent hangs. */
16507 	tw32(TG3PCI_REG_BASE_ADDR, 0);
16508 
16509 	pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
16510 			      &pci_state_reg);
16511 	if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
16512 	    !tg3_flag(tp, PCIX_TARGET_HWBUG)) {
16513 		if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
16514 		    tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0 ||
16515 		    tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B2 ||
16516 		    tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B5) {
16517 			void __iomem *sram_base;
16518 
16519 			/* Write some dummy words into the SRAM status block
16520 			 * area, see if it reads back correctly.  If the return
16521 			 * value is bad, force enable the PCIX workaround.
16522 			 */
16523 			sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
16524 
16525 			writel(0x00000000, sram_base);
16526 			writel(0x00000000, sram_base + 4);
16527 			writel(0xffffffff, sram_base + 4);
16528 			if (readl(sram_base) != 0x00000000)
16529 				tg3_flag_set(tp, PCIX_TARGET_HWBUG);
16530 		}
16531 	}
16532 
16533 	udelay(50);
16534 	tg3_nvram_init(tp);
16535 
16536 	/* If the device has an NVRAM, no need to load patch firmware */
16537 	if (tg3_asic_rev(tp) == ASIC_REV_57766 &&
16538 	    !tg3_flag(tp, NO_NVRAM))
16539 		tp->fw_needed = NULL;
16540 
16541 	grc_misc_cfg = tr32(GRC_MISC_CFG);
16542 	grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
16543 
16544 	if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
16545 	    (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
16546 	     grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
16547 		tg3_flag_set(tp, IS_5788);
16548 
16549 	if (!tg3_flag(tp, IS_5788) &&
16550 	    tg3_asic_rev(tp) != ASIC_REV_5700)
16551 		tg3_flag_set(tp, TAGGED_STATUS);
16552 	if (tg3_flag(tp, TAGGED_STATUS)) {
16553 		tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
16554 				      HOSTCC_MODE_CLRTICK_TXBD);
16555 
16556 		tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
16557 		pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
16558 				       tp->misc_host_ctrl);
16559 	}
16560 
16561 	/* Preserve the APE MAC_MODE bits */
16562 	if (tg3_flag(tp, ENABLE_APE))
16563 		tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
16564 	else
16565 		tp->mac_mode = 0;
16566 
16567 	if (tg3_10_100_only_device(tp, ent))
16568 		tp->phy_flags |= TG3_PHYFLG_10_100_ONLY;
16569 
16570 	err = tg3_phy_probe(tp);
16571 	if (err) {
16572 		dev_err(&tp->pdev->dev, "phy probe failed, err %d\n", err);
16573 		/* ... but do not return immediately ... */
16574 		tg3_mdio_fini(tp);
16575 	}
16576 
16577 	tg3_read_vpd(tp);
16578 	tg3_read_fw_ver(tp);
16579 
16580 	if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
16581 		tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
16582 	} else {
16583 		if (tg3_asic_rev(tp) == ASIC_REV_5700)
16584 			tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
16585 		else
16586 			tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
16587 	}
16588 
16589 	/* 5700 {AX,BX} chips have a broken status block link
16590 	 * change bit implementation, so we must use the
16591 	 * status register in those cases.
16592 	 */
16593 	if (tg3_asic_rev(tp) == ASIC_REV_5700)
16594 		tg3_flag_set(tp, USE_LINKCHG_REG);
16595 	else
16596 		tg3_flag_clear(tp, USE_LINKCHG_REG);
16597 
16598 	/* The led_ctrl is set during tg3_phy_probe, here we might
16599 	 * have to force the link status polling mechanism based
16600 	 * upon subsystem IDs.
16601 	 */
16602 	if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
16603 	    tg3_asic_rev(tp) == ASIC_REV_5701 &&
16604 	    !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
16605 		tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
16606 		tg3_flag_set(tp, USE_LINKCHG_REG);
16607 	}
16608 
16609 	/* For all SERDES we poll the MAC status register. */
16610 	if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
16611 		tg3_flag_set(tp, POLL_SERDES);
16612 	else
16613 		tg3_flag_clear(tp, POLL_SERDES);
16614 
16615 	tp->rx_offset = NET_SKB_PAD + NET_IP_ALIGN;
16616 	tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;
16617 	if (tg3_asic_rev(tp) == ASIC_REV_5701 &&
16618 	    tg3_flag(tp, PCIX_MODE)) {
16619 		tp->rx_offset = NET_SKB_PAD;
16620 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
16621 		tp->rx_copy_thresh = ~(u16)0;
16622 #endif
16623 	}
16624 
16625 	tp->rx_std_ring_mask = TG3_RX_STD_RING_SIZE(tp) - 1;
16626 	tp->rx_jmb_ring_mask = TG3_RX_JMB_RING_SIZE(tp) - 1;
16627 	tp->rx_ret_ring_mask = tg3_rx_ret_ring_size(tp) - 1;
16628 
16629 	tp->rx_std_max_post = tp->rx_std_ring_mask + 1;
16630 
16631 	/* Increment the rx prod index on the rx std ring by at most
16632 	 * 8 for these chips to workaround hw errata.
16633 	 */
16634 	if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
16635 	    tg3_asic_rev(tp) == ASIC_REV_5752 ||
16636 	    tg3_asic_rev(tp) == ASIC_REV_5755)
16637 		tp->rx_std_max_post = 8;
16638 
16639 	if (tg3_flag(tp, ASPM_WORKAROUND))
16640 		tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
16641 				     PCIE_PWR_MGMT_L1_THRESH_MSK;
16642 
16643 	return err;
16644 }
16645 
16646 #ifdef CONFIG_SPARC
16647 static int tg3_get_macaddr_sparc(struct tg3 *tp)
16648 {
16649 	struct net_device *dev = tp->dev;
16650 	struct pci_dev *pdev = tp->pdev;
16651 	struct device_node *dp = pci_device_to_OF_node(pdev);
16652 	const unsigned char *addr;
16653 	int len;
16654 
16655 	addr = of_get_property(dp, "local-mac-address", &len);
16656 	if (addr && len == ETH_ALEN) {
16657 		memcpy(dev->dev_addr, addr, ETH_ALEN);
16658 		return 0;
16659 	}
16660 	return -ENODEV;
16661 }
16662 
16663 static int tg3_get_default_macaddr_sparc(struct tg3 *tp)
16664 {
16665 	struct net_device *dev = tp->dev;
16666 
16667 	memcpy(dev->dev_addr, idprom->id_ethaddr, ETH_ALEN);
16668 	return 0;
16669 }
16670 #endif
16671 
16672 static int tg3_get_device_address(struct tg3 *tp)
16673 {
16674 	struct net_device *dev = tp->dev;
16675 	u32 hi, lo, mac_offset;
16676 	int addr_ok = 0;
16677 	int err;
16678 
16679 #ifdef CONFIG_SPARC
16680 	if (!tg3_get_macaddr_sparc(tp))
16681 		return 0;
16682 #endif
16683 
16684 	if (tg3_flag(tp, IS_SSB_CORE)) {
16685 		err = ssb_gige_get_macaddr(tp->pdev, &dev->dev_addr[0]);
16686 		if (!err && is_valid_ether_addr(&dev->dev_addr[0]))
16687 			return 0;
16688 	}
16689 
16690 	mac_offset = 0x7c;
16691 	if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
16692 	    tg3_flag(tp, 5780_CLASS)) {
16693 		if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
16694 			mac_offset = 0xcc;
16695 		if (tg3_nvram_lock(tp))
16696 			tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
16697 		else
16698 			tg3_nvram_unlock(tp);
16699 	} else if (tg3_flag(tp, 5717_PLUS)) {
16700 		if (tp->pci_fn & 1)
16701 			mac_offset = 0xcc;
16702 		if (tp->pci_fn > 1)
16703 			mac_offset += 0x18c;
16704 	} else if (tg3_asic_rev(tp) == ASIC_REV_5906)
16705 		mac_offset = 0x10;
16706 
16707 	/* First try to get it from MAC address mailbox. */
16708 	tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
16709 	if ((hi >> 16) == 0x484b) {
16710 		dev->dev_addr[0] = (hi >>  8) & 0xff;
16711 		dev->dev_addr[1] = (hi >>  0) & 0xff;
16712 
16713 		tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
16714 		dev->dev_addr[2] = (lo >> 24) & 0xff;
16715 		dev->dev_addr[3] = (lo >> 16) & 0xff;
16716 		dev->dev_addr[4] = (lo >>  8) & 0xff;
16717 		dev->dev_addr[5] = (lo >>  0) & 0xff;
16718 
16719 		/* Some old bootcode may report a 0 MAC address in SRAM */
16720 		addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
16721 	}
16722 	if (!addr_ok) {
16723 		/* Next, try NVRAM. */
16724 		if (!tg3_flag(tp, NO_NVRAM) &&
16725 		    !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) &&
16726 		    !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) {
16727 			memcpy(&dev->dev_addr[0], ((char *)&hi) + 2, 2);
16728 			memcpy(&dev->dev_addr[2], (char *)&lo, sizeof(lo));
16729 		}
16730 		/* Finally just fetch it out of the MAC control regs. */
16731 		else {
16732 			hi = tr32(MAC_ADDR_0_HIGH);
16733 			lo = tr32(MAC_ADDR_0_LOW);
16734 
16735 			dev->dev_addr[5] = lo & 0xff;
16736 			dev->dev_addr[4] = (lo >> 8) & 0xff;
16737 			dev->dev_addr[3] = (lo >> 16) & 0xff;
16738 			dev->dev_addr[2] = (lo >> 24) & 0xff;
16739 			dev->dev_addr[1] = hi & 0xff;
16740 			dev->dev_addr[0] = (hi >> 8) & 0xff;
16741 		}
16742 	}
16743 
16744 	if (!is_valid_ether_addr(&dev->dev_addr[0])) {
16745 #ifdef CONFIG_SPARC
16746 		if (!tg3_get_default_macaddr_sparc(tp))
16747 			return 0;
16748 #endif
16749 		return -EINVAL;
16750 	}
16751 	return 0;
16752 }
16753 
16754 #define BOUNDARY_SINGLE_CACHELINE	1
16755 #define BOUNDARY_MULTI_CACHELINE	2
16756 
16757 static u32 tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
16758 {
16759 	int cacheline_size;
16760 	u8 byte;
16761 	int goal;
16762 
16763 	pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
16764 	if (byte == 0)
16765 		cacheline_size = 1024;
16766 	else
16767 		cacheline_size = (int) byte * 4;
16768 
16769 	/* On 5703 and later chips, the boundary bits have no
16770 	 * effect.
16771 	 */
16772 	if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
16773 	    tg3_asic_rev(tp) != ASIC_REV_5701 &&
16774 	    !tg3_flag(tp, PCI_EXPRESS))
16775 		goto out;
16776 
16777 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
16778 	goal = BOUNDARY_MULTI_CACHELINE;
16779 #else
16780 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
16781 	goal = BOUNDARY_SINGLE_CACHELINE;
16782 #else
16783 	goal = 0;
16784 #endif
16785 #endif
16786 
16787 	if (tg3_flag(tp, 57765_PLUS)) {
16788 		val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
16789 		goto out;
16790 	}
16791 
16792 	if (!goal)
16793 		goto out;
16794 
16795 	/* PCI controllers on most RISC systems tend to disconnect
16796 	 * when a device tries to burst across a cache-line boundary.
16797 	 * Therefore, letting tg3 do so just wastes PCI bandwidth.
16798 	 *
16799 	 * Unfortunately, for PCI-E there are only limited
16800 	 * write-side controls for this, and thus for reads
16801 	 * we will still get the disconnects.  We'll also waste
16802 	 * these PCI cycles for both read and write for chips
16803 	 * other than 5700 and 5701 which do not implement the
16804 	 * boundary bits.
16805 	 */
16806 	if (tg3_flag(tp, PCIX_MODE) && !tg3_flag(tp, PCI_EXPRESS)) {
16807 		switch (cacheline_size) {
16808 		case 16:
16809 		case 32:
16810 		case 64:
16811 		case 128:
16812 			if (goal == BOUNDARY_SINGLE_CACHELINE) {
16813 				val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
16814 					DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
16815 			} else {
16816 				val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
16817 					DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
16818 			}
16819 			break;
16820 
16821 		case 256:
16822 			val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
16823 				DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
16824 			break;
16825 
16826 		default:
16827 			val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
16828 				DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
16829 			break;
16830 		}
16831 	} else if (tg3_flag(tp, PCI_EXPRESS)) {
16832 		switch (cacheline_size) {
16833 		case 16:
16834 		case 32:
16835 		case 64:
16836 			if (goal == BOUNDARY_SINGLE_CACHELINE) {
16837 				val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
16838 				val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
16839 				break;
16840 			}
16841 			/* fallthrough */
16842 		case 128:
16843 		default:
16844 			val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
16845 			val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
16846 			break;
16847 		}
16848 	} else {
16849 		switch (cacheline_size) {
16850 		case 16:
16851 			if (goal == BOUNDARY_SINGLE_CACHELINE) {
16852 				val |= (DMA_RWCTRL_READ_BNDRY_16 |
16853 					DMA_RWCTRL_WRITE_BNDRY_16);
16854 				break;
16855 			}
16856 			/* fallthrough */
16857 		case 32:
16858 			if (goal == BOUNDARY_SINGLE_CACHELINE) {
16859 				val |= (DMA_RWCTRL_READ_BNDRY_32 |
16860 					DMA_RWCTRL_WRITE_BNDRY_32);
16861 				break;
16862 			}
16863 			/* fallthrough */
16864 		case 64:
16865 			if (goal == BOUNDARY_SINGLE_CACHELINE) {
16866 				val |= (DMA_RWCTRL_READ_BNDRY_64 |
16867 					DMA_RWCTRL_WRITE_BNDRY_64);
16868 				break;
16869 			}
16870 			/* fallthrough */
16871 		case 128:
16872 			if (goal == BOUNDARY_SINGLE_CACHELINE) {
16873 				val |= (DMA_RWCTRL_READ_BNDRY_128 |
16874 					DMA_RWCTRL_WRITE_BNDRY_128);
16875 				break;
16876 			}
16877 			/* fallthrough */
16878 		case 256:
16879 			val |= (DMA_RWCTRL_READ_BNDRY_256 |
16880 				DMA_RWCTRL_WRITE_BNDRY_256);
16881 			break;
16882 		case 512:
16883 			val |= (DMA_RWCTRL_READ_BNDRY_512 |
16884 				DMA_RWCTRL_WRITE_BNDRY_512);
16885 			break;
16886 		case 1024:
16887 		default:
16888 			val |= (DMA_RWCTRL_READ_BNDRY_1024 |
16889 				DMA_RWCTRL_WRITE_BNDRY_1024);
16890 			break;
16891 		}
16892 	}
16893 
16894 out:
16895 	return val;
16896 }
16897 
16898 static int tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma,
16899 			   int size, bool to_device)
16900 {
16901 	struct tg3_internal_buffer_desc test_desc;
16902 	u32 sram_dma_descs;
16903 	int i, ret;
16904 
16905 	sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
16906 
16907 	tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
16908 	tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
16909 	tw32(RDMAC_STATUS, 0);
16910 	tw32(WDMAC_STATUS, 0);
16911 
16912 	tw32(BUFMGR_MODE, 0);
16913 	tw32(FTQ_RESET, 0);
16914 
16915 	test_desc.addr_hi = ((u64) buf_dma) >> 32;
16916 	test_desc.addr_lo = buf_dma & 0xffffffff;
16917 	test_desc.nic_mbuf = 0x00002100;
16918 	test_desc.len = size;
16919 
16920 	/*
16921 	 * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
16922 	 * the *second* time the tg3 driver was getting loaded after an
16923 	 * initial scan.
16924 	 *
16925 	 * Broadcom tells me:
16926 	 *   ...the DMA engine is connected to the GRC block and a DMA
16927 	 *   reset may affect the GRC block in some unpredictable way...
16928 	 *   The behavior of resets to individual blocks has not been tested.
16929 	 *
16930 	 * Broadcom noted the GRC reset will also reset all sub-components.
16931 	 */
16932 	if (to_device) {
16933 		test_desc.cqid_sqid = (13 << 8) | 2;
16934 
16935 		tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
16936 		udelay(40);
16937 	} else {
16938 		test_desc.cqid_sqid = (16 << 8) | 7;
16939 
16940 		tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
16941 		udelay(40);
16942 	}
16943 	test_desc.flags = 0x00000005;
16944 
16945 	for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
16946 		u32 val;
16947 
16948 		val = *(((u32 *)&test_desc) + i);
16949 		pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
16950 				       sram_dma_descs + (i * sizeof(u32)));
16951 		pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
16952 	}
16953 	pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
16954 
16955 	if (to_device)
16956 		tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
16957 	else
16958 		tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
16959 
16960 	ret = -ENODEV;
16961 	for (i = 0; i < 40; i++) {
16962 		u32 val;
16963 
16964 		if (to_device)
16965 			val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
16966 		else
16967 			val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
16968 		if ((val & 0xffff) == sram_dma_descs) {
16969 			ret = 0;
16970 			break;
16971 		}
16972 
16973 		udelay(100);
16974 	}
16975 
16976 	return ret;
16977 }
16978 
16979 #define TEST_BUFFER_SIZE	0x2000
16980 
16981 static DEFINE_PCI_DEVICE_TABLE(tg3_dma_wait_state_chipsets) = {
16982 	{ PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
16983 	{ },
16984 };
16985 
16986 static int tg3_test_dma(struct tg3 *tp)
16987 {
16988 	dma_addr_t buf_dma;
16989 	u32 *buf, saved_dma_rwctrl;
16990 	int ret = 0;
16991 
16992 	buf = dma_alloc_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE,
16993 				 &buf_dma, GFP_KERNEL);
16994 	if (!buf) {
16995 		ret = -ENOMEM;
16996 		goto out_nofree;
16997 	}
16998 
16999 	tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
17000 			  (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
17001 
17002 	tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
17003 
17004 	if (tg3_flag(tp, 57765_PLUS))
17005 		goto out;
17006 
17007 	if (tg3_flag(tp, PCI_EXPRESS)) {
17008 		/* DMA read watermark not used on PCIE */
17009 		tp->dma_rwctrl |= 0x00180000;
17010 	} else if (!tg3_flag(tp, PCIX_MODE)) {
17011 		if (tg3_asic_rev(tp) == ASIC_REV_5705 ||
17012 		    tg3_asic_rev(tp) == ASIC_REV_5750)
17013 			tp->dma_rwctrl |= 0x003f0000;
17014 		else
17015 			tp->dma_rwctrl |= 0x003f000f;
17016 	} else {
17017 		if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
17018 		    tg3_asic_rev(tp) == ASIC_REV_5704) {
17019 			u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
17020 			u32 read_water = 0x7;
17021 
17022 			/* If the 5704 is behind the EPB bridge, we can
17023 			 * do the less restrictive ONE_DMA workaround for
17024 			 * better performance.
17025 			 */
17026 			if (tg3_flag(tp, 40BIT_DMA_BUG) &&
17027 			    tg3_asic_rev(tp) == ASIC_REV_5704)
17028 				tp->dma_rwctrl |= 0x8000;
17029 			else if (ccval == 0x6 || ccval == 0x7)
17030 				tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
17031 
17032 			if (tg3_asic_rev(tp) == ASIC_REV_5703)
17033 				read_water = 4;
17034 			/* Set bit 23 to enable PCIX hw bug fix */
17035 			tp->dma_rwctrl |=
17036 				(read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
17037 				(0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
17038 				(1 << 23);
17039 		} else if (tg3_asic_rev(tp) == ASIC_REV_5780) {
17040 			/* 5780 always in PCIX mode */
17041 			tp->dma_rwctrl |= 0x00144000;
17042 		} else if (tg3_asic_rev(tp) == ASIC_REV_5714) {
17043 			/* 5714 always in PCIX mode */
17044 			tp->dma_rwctrl |= 0x00148000;
17045 		} else {
17046 			tp->dma_rwctrl |= 0x001b000f;
17047 		}
17048 	}
17049 	if (tg3_flag(tp, ONE_DMA_AT_ONCE))
17050 		tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
17051 
17052 	if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
17053 	    tg3_asic_rev(tp) == ASIC_REV_5704)
17054 		tp->dma_rwctrl &= 0xfffffff0;
17055 
17056 	if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
17057 	    tg3_asic_rev(tp) == ASIC_REV_5701) {
17058 		/* Remove this if it causes problems for some boards. */
17059 		tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
17060 
17061 		/* On 5700/5701 chips, we need to set this bit.
17062 		 * Otherwise the chip will issue cacheline transactions
17063 		 * to streamable DMA memory with not all the byte
17064 		 * enables turned on.  This is an error on several
17065 		 * RISC PCI controllers, in particular sparc64.
17066 		 *
17067 		 * On 5703/5704 chips, this bit has been reassigned
17068 		 * a different meaning.  In particular, it is used
17069 		 * on those chips to enable a PCI-X workaround.
17070 		 */
17071 		tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
17072 	}
17073 
17074 	tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17075 
17076 
17077 	if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
17078 	    tg3_asic_rev(tp) != ASIC_REV_5701)
17079 		goto out;
17080 
17081 	/* It is best to perform DMA test with maximum write burst size
17082 	 * to expose the 5700/5701 write DMA bug.
17083 	 */
17084 	saved_dma_rwctrl = tp->dma_rwctrl;
17085 	tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
17086 	tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17087 
17088 	while (1) {
17089 		u32 *p = buf, i;
17090 
17091 		for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
17092 			p[i] = i;
17093 
17094 		/* Send the buffer to the chip. */
17095 		ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, true);
17096 		if (ret) {
17097 			dev_err(&tp->pdev->dev,
17098 				"%s: Buffer write failed. err = %d\n",
17099 				__func__, ret);
17100 			break;
17101 		}
17102 
17103 		/* Now read it back. */
17104 		ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, false);
17105 		if (ret) {
17106 			dev_err(&tp->pdev->dev, "%s: Buffer read failed. "
17107 				"err = %d\n", __func__, ret);
17108 			break;
17109 		}
17110 
17111 		/* Verify it. */
17112 		for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
17113 			if (p[i] == i)
17114 				continue;
17115 
17116 			if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
17117 			    DMA_RWCTRL_WRITE_BNDRY_16) {
17118 				tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
17119 				tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
17120 				tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17121 				break;
17122 			} else {
17123 				dev_err(&tp->pdev->dev,
17124 					"%s: Buffer corrupted on read back! "
17125 					"(%d != %d)\n", __func__, p[i], i);
17126 				ret = -ENODEV;
17127 				goto out;
17128 			}
17129 		}
17130 
17131 		if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
17132 			/* Success. */
17133 			ret = 0;
17134 			break;
17135 		}
17136 	}
17137 	if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
17138 	    DMA_RWCTRL_WRITE_BNDRY_16) {
17139 		/* DMA test passed without adjusting DMA boundary,
17140 		 * now look for chipsets that are known to expose the
17141 		 * DMA bug without failing the test.
17142 		 */
17143 		if (pci_dev_present(tg3_dma_wait_state_chipsets)) {
17144 			tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
17145 			tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
17146 		} else {
17147 			/* Safe to use the calculated DMA boundary. */
17148 			tp->dma_rwctrl = saved_dma_rwctrl;
17149 		}
17150 
17151 		tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17152 	}
17153 
17154 out:
17155 	dma_free_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, buf, buf_dma);
17156 out_nofree:
17157 	return ret;
17158 }
17159 
17160 static void tg3_init_bufmgr_config(struct tg3 *tp)
17161 {
17162 	if (tg3_flag(tp, 57765_PLUS)) {
17163 		tp->bufmgr_config.mbuf_read_dma_low_water =
17164 			DEFAULT_MB_RDMA_LOW_WATER_5705;
17165 		tp->bufmgr_config.mbuf_mac_rx_low_water =
17166 			DEFAULT_MB_MACRX_LOW_WATER_57765;
17167 		tp->bufmgr_config.mbuf_high_water =
17168 			DEFAULT_MB_HIGH_WATER_57765;
17169 
17170 		tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
17171 			DEFAULT_MB_RDMA_LOW_WATER_5705;
17172 		tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
17173 			DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765;
17174 		tp->bufmgr_config.mbuf_high_water_jumbo =
17175 			DEFAULT_MB_HIGH_WATER_JUMBO_57765;
17176 	} else if (tg3_flag(tp, 5705_PLUS)) {
17177 		tp->bufmgr_config.mbuf_read_dma_low_water =
17178 			DEFAULT_MB_RDMA_LOW_WATER_5705;
17179 		tp->bufmgr_config.mbuf_mac_rx_low_water =
17180 			DEFAULT_MB_MACRX_LOW_WATER_5705;
17181 		tp->bufmgr_config.mbuf_high_water =
17182 			DEFAULT_MB_HIGH_WATER_5705;
17183 		if (tg3_asic_rev(tp) == ASIC_REV_5906) {
17184 			tp->bufmgr_config.mbuf_mac_rx_low_water =
17185 				DEFAULT_MB_MACRX_LOW_WATER_5906;
17186 			tp->bufmgr_config.mbuf_high_water =
17187 				DEFAULT_MB_HIGH_WATER_5906;
17188 		}
17189 
17190 		tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
17191 			DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
17192 		tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
17193 			DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
17194 		tp->bufmgr_config.mbuf_high_water_jumbo =
17195 			DEFAULT_MB_HIGH_WATER_JUMBO_5780;
17196 	} else {
17197 		tp->bufmgr_config.mbuf_read_dma_low_water =
17198 			DEFAULT_MB_RDMA_LOW_WATER;
17199 		tp->bufmgr_config.mbuf_mac_rx_low_water =
17200 			DEFAULT_MB_MACRX_LOW_WATER;
17201 		tp->bufmgr_config.mbuf_high_water =
17202 			DEFAULT_MB_HIGH_WATER;
17203 
17204 		tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
17205 			DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
17206 		tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
17207 			DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
17208 		tp->bufmgr_config.mbuf_high_water_jumbo =
17209 			DEFAULT_MB_HIGH_WATER_JUMBO;
17210 	}
17211 
17212 	tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
17213 	tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
17214 }
17215 
17216 static char *tg3_phy_string(struct tg3 *tp)
17217 {
17218 	switch (tp->phy_id & TG3_PHY_ID_MASK) {
17219 	case TG3_PHY_ID_BCM5400:	return "5400";
17220 	case TG3_PHY_ID_BCM5401:	return "5401";
17221 	case TG3_PHY_ID_BCM5411:	return "5411";
17222 	case TG3_PHY_ID_BCM5701:	return "5701";
17223 	case TG3_PHY_ID_BCM5703:	return "5703";
17224 	case TG3_PHY_ID_BCM5704:	return "5704";
17225 	case TG3_PHY_ID_BCM5705:	return "5705";
17226 	case TG3_PHY_ID_BCM5750:	return "5750";
17227 	case TG3_PHY_ID_BCM5752:	return "5752";
17228 	case TG3_PHY_ID_BCM5714:	return "5714";
17229 	case TG3_PHY_ID_BCM5780:	return "5780";
17230 	case TG3_PHY_ID_BCM5755:	return "5755";
17231 	case TG3_PHY_ID_BCM5787:	return "5787";
17232 	case TG3_PHY_ID_BCM5784:	return "5784";
17233 	case TG3_PHY_ID_BCM5756:	return "5722/5756";
17234 	case TG3_PHY_ID_BCM5906:	return "5906";
17235 	case TG3_PHY_ID_BCM5761:	return "5761";
17236 	case TG3_PHY_ID_BCM5718C:	return "5718C";
17237 	case TG3_PHY_ID_BCM5718S:	return "5718S";
17238 	case TG3_PHY_ID_BCM57765:	return "57765";
17239 	case TG3_PHY_ID_BCM5719C:	return "5719C";
17240 	case TG3_PHY_ID_BCM5720C:	return "5720C";
17241 	case TG3_PHY_ID_BCM5762:	return "5762C";
17242 	case TG3_PHY_ID_BCM8002:	return "8002/serdes";
17243 	case 0:			return "serdes";
17244 	default:		return "unknown";
17245 	}
17246 }
17247 
17248 static char *tg3_bus_string(struct tg3 *tp, char *str)
17249 {
17250 	if (tg3_flag(tp, PCI_EXPRESS)) {
17251 		strcpy(str, "PCI Express");
17252 		return str;
17253 	} else if (tg3_flag(tp, PCIX_MODE)) {
17254 		u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
17255 
17256 		strcpy(str, "PCIX:");
17257 
17258 		if ((clock_ctrl == 7) ||
17259 		    ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
17260 		     GRC_MISC_CFG_BOARD_ID_5704CIOBE))
17261 			strcat(str, "133MHz");
17262 		else if (clock_ctrl == 0)
17263 			strcat(str, "33MHz");
17264 		else if (clock_ctrl == 2)
17265 			strcat(str, "50MHz");
17266 		else if (clock_ctrl == 4)
17267 			strcat(str, "66MHz");
17268 		else if (clock_ctrl == 6)
17269 			strcat(str, "100MHz");
17270 	} else {
17271 		strcpy(str, "PCI:");
17272 		if (tg3_flag(tp, PCI_HIGH_SPEED))
17273 			strcat(str, "66MHz");
17274 		else
17275 			strcat(str, "33MHz");
17276 	}
17277 	if (tg3_flag(tp, PCI_32BIT))
17278 		strcat(str, ":32-bit");
17279 	else
17280 		strcat(str, ":64-bit");
17281 	return str;
17282 }
17283 
17284 static void tg3_init_coal(struct tg3 *tp)
17285 {
17286 	struct ethtool_coalesce *ec = &tp->coal;
17287 
17288 	memset(ec, 0, sizeof(*ec));
17289 	ec->cmd = ETHTOOL_GCOALESCE;
17290 	ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
17291 	ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
17292 	ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
17293 	ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
17294 	ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
17295 	ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
17296 	ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
17297 	ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
17298 	ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
17299 
17300 	if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
17301 				 HOSTCC_MODE_CLRTICK_TXBD)) {
17302 		ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
17303 		ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
17304 		ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
17305 		ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
17306 	}
17307 
17308 	if (tg3_flag(tp, 5705_PLUS)) {
17309 		ec->rx_coalesce_usecs_irq = 0;
17310 		ec->tx_coalesce_usecs_irq = 0;
17311 		ec->stats_block_coalesce_usecs = 0;
17312 	}
17313 }
17314 
17315 static int tg3_init_one(struct pci_dev *pdev,
17316 				  const struct pci_device_id *ent)
17317 {
17318 	struct net_device *dev;
17319 	struct tg3 *tp;
17320 	int i, err;
17321 	u32 sndmbx, rcvmbx, intmbx;
17322 	char str[40];
17323 	u64 dma_mask, persist_dma_mask;
17324 	netdev_features_t features = 0;
17325 
17326 	printk_once(KERN_INFO "%s\n", version);
17327 
17328 	err = pci_enable_device(pdev);
17329 	if (err) {
17330 		dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
17331 		return err;
17332 	}
17333 
17334 	err = pci_request_regions(pdev, DRV_MODULE_NAME);
17335 	if (err) {
17336 		dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
17337 		goto err_out_disable_pdev;
17338 	}
17339 
17340 	pci_set_master(pdev);
17341 
17342 	dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS);
17343 	if (!dev) {
17344 		err = -ENOMEM;
17345 		goto err_out_free_res;
17346 	}
17347 
17348 	SET_NETDEV_DEV(dev, &pdev->dev);
17349 
17350 	tp = netdev_priv(dev);
17351 	tp->pdev = pdev;
17352 	tp->dev = dev;
17353 	tp->rx_mode = TG3_DEF_RX_MODE;
17354 	tp->tx_mode = TG3_DEF_TX_MODE;
17355 	tp->irq_sync = 1;
17356 
17357 	if (tg3_debug > 0)
17358 		tp->msg_enable = tg3_debug;
17359 	else
17360 		tp->msg_enable = TG3_DEF_MSG_ENABLE;
17361 
17362 	if (pdev_is_ssb_gige_core(pdev)) {
17363 		tg3_flag_set(tp, IS_SSB_CORE);
17364 		if (ssb_gige_must_flush_posted_writes(pdev))
17365 			tg3_flag_set(tp, FLUSH_POSTED_WRITES);
17366 		if (ssb_gige_one_dma_at_once(pdev))
17367 			tg3_flag_set(tp, ONE_DMA_AT_ONCE);
17368 		if (ssb_gige_have_roboswitch(pdev)) {
17369 			tg3_flag_set(tp, USE_PHYLIB);
17370 			tg3_flag_set(tp, ROBOSWITCH);
17371 		}
17372 		if (ssb_gige_is_rgmii(pdev))
17373 			tg3_flag_set(tp, RGMII_MODE);
17374 	}
17375 
17376 	/* The word/byte swap controls here control register access byte
17377 	 * swapping.  DMA data byte swapping is controlled in the GRC_MODE
17378 	 * setting below.
17379 	 */
17380 	tp->misc_host_ctrl =
17381 		MISC_HOST_CTRL_MASK_PCI_INT |
17382 		MISC_HOST_CTRL_WORD_SWAP |
17383 		MISC_HOST_CTRL_INDIR_ACCESS |
17384 		MISC_HOST_CTRL_PCISTATE_RW;
17385 
17386 	/* The NONFRM (non-frame) byte/word swap controls take effect
17387 	 * on descriptor entries, anything which isn't packet data.
17388 	 *
17389 	 * The StrongARM chips on the board (one for tx, one for rx)
17390 	 * are running in big-endian mode.
17391 	 */
17392 	tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
17393 			GRC_MODE_WSWAP_NONFRM_DATA);
17394 #ifdef __BIG_ENDIAN
17395 	tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
17396 #endif
17397 	spin_lock_init(&tp->lock);
17398 	spin_lock_init(&tp->indirect_lock);
17399 	INIT_WORK(&tp->reset_task, tg3_reset_task);
17400 
17401 	tp->regs = pci_ioremap_bar(pdev, BAR_0);
17402 	if (!tp->regs) {
17403 		dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
17404 		err = -ENOMEM;
17405 		goto err_out_free_dev;
17406 	}
17407 
17408 	if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
17409 	    tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761E ||
17410 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S ||
17411 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761SE ||
17412 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
17413 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
17414 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
17415 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
17416 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 ||
17417 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_57767 ||
17418 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_57764 ||
17419 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 ||
17420 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 ||
17421 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727 ||
17422 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_57787) {
17423 		tg3_flag_set(tp, ENABLE_APE);
17424 		tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
17425 		if (!tp->aperegs) {
17426 			dev_err(&pdev->dev,
17427 				"Cannot map APE registers, aborting\n");
17428 			err = -ENOMEM;
17429 			goto err_out_iounmap;
17430 		}
17431 	}
17432 
17433 	tp->rx_pending = TG3_DEF_RX_RING_PENDING;
17434 	tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
17435 
17436 	dev->ethtool_ops = &tg3_ethtool_ops;
17437 	dev->watchdog_timeo = TG3_TX_TIMEOUT;
17438 	dev->netdev_ops = &tg3_netdev_ops;
17439 	dev->irq = pdev->irq;
17440 
17441 	err = tg3_get_invariants(tp, ent);
17442 	if (err) {
17443 		dev_err(&pdev->dev,
17444 			"Problem fetching invariants of chip, aborting\n");
17445 		goto err_out_apeunmap;
17446 	}
17447 
17448 	/* The EPB bridge inside 5714, 5715, and 5780 and any
17449 	 * device behind the EPB cannot support DMA addresses > 40-bit.
17450 	 * On 64-bit systems with IOMMU, use 40-bit dma_mask.
17451 	 * On 64-bit systems without IOMMU, use 64-bit dma_mask and
17452 	 * do DMA address check in tg3_start_xmit().
17453 	 */
17454 	if (tg3_flag(tp, IS_5788))
17455 		persist_dma_mask = dma_mask = DMA_BIT_MASK(32);
17456 	else if (tg3_flag(tp, 40BIT_DMA_BUG)) {
17457 		persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
17458 #ifdef CONFIG_HIGHMEM
17459 		dma_mask = DMA_BIT_MASK(64);
17460 #endif
17461 	} else
17462 		persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
17463 
17464 	/* Configure DMA attributes. */
17465 	if (dma_mask > DMA_BIT_MASK(32)) {
17466 		err = pci_set_dma_mask(pdev, dma_mask);
17467 		if (!err) {
17468 			features |= NETIF_F_HIGHDMA;
17469 			err = pci_set_consistent_dma_mask(pdev,
17470 							  persist_dma_mask);
17471 			if (err < 0) {
17472 				dev_err(&pdev->dev, "Unable to obtain 64 bit "
17473 					"DMA for consistent allocations\n");
17474 				goto err_out_apeunmap;
17475 			}
17476 		}
17477 	}
17478 	if (err || dma_mask == DMA_BIT_MASK(32)) {
17479 		err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
17480 		if (err) {
17481 			dev_err(&pdev->dev,
17482 				"No usable DMA configuration, aborting\n");
17483 			goto err_out_apeunmap;
17484 		}
17485 	}
17486 
17487 	tg3_init_bufmgr_config(tp);
17488 
17489 	features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
17490 
17491 	/* 5700 B0 chips do not support checksumming correctly due
17492 	 * to hardware bugs.
17493 	 */
17494 	if (tg3_chip_rev_id(tp) != CHIPREV_ID_5700_B0) {
17495 		features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
17496 
17497 		if (tg3_flag(tp, 5755_PLUS))
17498 			features |= NETIF_F_IPV6_CSUM;
17499 	}
17500 
17501 	/* TSO is on by default on chips that support hardware TSO.
17502 	 * Firmware TSO on older chips gives lower performance, so it
17503 	 * is off by default, but can be enabled using ethtool.
17504 	 */
17505 	if ((tg3_flag(tp, HW_TSO_1) ||
17506 	     tg3_flag(tp, HW_TSO_2) ||
17507 	     tg3_flag(tp, HW_TSO_3)) &&
17508 	    (features & NETIF_F_IP_CSUM))
17509 		features |= NETIF_F_TSO;
17510 	if (tg3_flag(tp, HW_TSO_2) || tg3_flag(tp, HW_TSO_3)) {
17511 		if (features & NETIF_F_IPV6_CSUM)
17512 			features |= NETIF_F_TSO6;
17513 		if (tg3_flag(tp, HW_TSO_3) ||
17514 		    tg3_asic_rev(tp) == ASIC_REV_5761 ||
17515 		    (tg3_asic_rev(tp) == ASIC_REV_5784 &&
17516 		     tg3_chip_rev(tp) != CHIPREV_5784_AX) ||
17517 		    tg3_asic_rev(tp) == ASIC_REV_5785 ||
17518 		    tg3_asic_rev(tp) == ASIC_REV_57780)
17519 			features |= NETIF_F_TSO_ECN;
17520 	}
17521 
17522 	dev->features |= features;
17523 	dev->vlan_features |= features;
17524 
17525 	/*
17526 	 * Add loopback capability only for a subset of devices that support
17527 	 * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY
17528 	 * loopback for the remaining devices.
17529 	 */
17530 	if (tg3_asic_rev(tp) != ASIC_REV_5780 &&
17531 	    !tg3_flag(tp, CPMU_PRESENT))
17532 		/* Add the loopback capability */
17533 		features |= NETIF_F_LOOPBACK;
17534 
17535 	dev->hw_features |= features;
17536 
17537 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 &&
17538 	    !tg3_flag(tp, TSO_CAPABLE) &&
17539 	    !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
17540 		tg3_flag_set(tp, MAX_RXPEND_64);
17541 		tp->rx_pending = 63;
17542 	}
17543 
17544 	err = tg3_get_device_address(tp);
17545 	if (err) {
17546 		dev_err(&pdev->dev,
17547 			"Could not obtain valid ethernet address, aborting\n");
17548 		goto err_out_apeunmap;
17549 	}
17550 
17551 	/*
17552 	 * Reset chip in case UNDI or EFI driver did not shutdown
17553 	 * DMA self test will enable WDMAC and we'll see (spurious)
17554 	 * pending DMA on the PCI bus at that point.
17555 	 */
17556 	if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
17557 	    (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
17558 		tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
17559 		tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
17560 	}
17561 
17562 	err = tg3_test_dma(tp);
17563 	if (err) {
17564 		dev_err(&pdev->dev, "DMA engine test failed, aborting\n");
17565 		goto err_out_apeunmap;
17566 	}
17567 
17568 	intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
17569 	rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
17570 	sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
17571 	for (i = 0; i < tp->irq_max; i++) {
17572 		struct tg3_napi *tnapi = &tp->napi[i];
17573 
17574 		tnapi->tp = tp;
17575 		tnapi->tx_pending = TG3_DEF_TX_RING_PENDING;
17576 
17577 		tnapi->int_mbox = intmbx;
17578 		if (i <= 4)
17579 			intmbx += 0x8;
17580 		else
17581 			intmbx += 0x4;
17582 
17583 		tnapi->consmbox = rcvmbx;
17584 		tnapi->prodmbox = sndmbx;
17585 
17586 		if (i)
17587 			tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1);
17588 		else
17589 			tnapi->coal_now = HOSTCC_MODE_NOW;
17590 
17591 		if (!tg3_flag(tp, SUPPORT_MSIX))
17592 			break;
17593 
17594 		/*
17595 		 * If we support MSIX, we'll be using RSS.  If we're using
17596 		 * RSS, the first vector only handles link interrupts and the
17597 		 * remaining vectors handle rx and tx interrupts.  Reuse the
17598 		 * mailbox values for the next iteration.  The values we setup
17599 		 * above are still useful for the single vectored mode.
17600 		 */
17601 		if (!i)
17602 			continue;
17603 
17604 		rcvmbx += 0x8;
17605 
17606 		if (sndmbx & 0x4)
17607 			sndmbx -= 0x4;
17608 		else
17609 			sndmbx += 0xc;
17610 	}
17611 
17612 	tg3_init_coal(tp);
17613 
17614 	pci_set_drvdata(pdev, dev);
17615 
17616 	if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
17617 	    tg3_asic_rev(tp) == ASIC_REV_5720 ||
17618 	    tg3_asic_rev(tp) == ASIC_REV_5762)
17619 		tg3_flag_set(tp, PTP_CAPABLE);
17620 
17621 	tg3_timer_init(tp);
17622 
17623 	tg3_carrier_off(tp);
17624 
17625 	err = register_netdev(dev);
17626 	if (err) {
17627 		dev_err(&pdev->dev, "Cannot register net device, aborting\n");
17628 		goto err_out_apeunmap;
17629 	}
17630 
17631 	netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
17632 		    tp->board_part_number,
17633 		    tg3_chip_rev_id(tp),
17634 		    tg3_bus_string(tp, str),
17635 		    dev->dev_addr);
17636 
17637 	if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
17638 		struct phy_device *phydev;
17639 		phydev = tp->mdio_bus->phy_map[tp->phy_addr];
17640 		netdev_info(dev,
17641 			    "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
17642 			    phydev->drv->name, dev_name(&phydev->dev));
17643 	} else {
17644 		char *ethtype;
17645 
17646 		if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
17647 			ethtype = "10/100Base-TX";
17648 		else if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
17649 			ethtype = "1000Base-SX";
17650 		else
17651 			ethtype = "10/100/1000Base-T";
17652 
17653 		netdev_info(dev, "attached PHY is %s (%s Ethernet) "
17654 			    "(WireSpeed[%d], EEE[%d])\n",
17655 			    tg3_phy_string(tp), ethtype,
17656 			    (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) == 0,
17657 			    (tp->phy_flags & TG3_PHYFLG_EEE_CAP) != 0);
17658 	}
17659 
17660 	netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
17661 		    (dev->features & NETIF_F_RXCSUM) != 0,
17662 		    tg3_flag(tp, USE_LINKCHG_REG) != 0,
17663 		    (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) != 0,
17664 		    tg3_flag(tp, ENABLE_ASF) != 0,
17665 		    tg3_flag(tp, TSO_CAPABLE) != 0);
17666 	netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
17667 		    tp->dma_rwctrl,
17668 		    pdev->dma_mask == DMA_BIT_MASK(32) ? 32 :
17669 		    ((u64)pdev->dma_mask) == DMA_BIT_MASK(40) ? 40 : 64);
17670 
17671 	pci_save_state(pdev);
17672 
17673 	return 0;
17674 
17675 err_out_apeunmap:
17676 	if (tp->aperegs) {
17677 		iounmap(tp->aperegs);
17678 		tp->aperegs = NULL;
17679 	}
17680 
17681 err_out_iounmap:
17682 	if (tp->regs) {
17683 		iounmap(tp->regs);
17684 		tp->regs = NULL;
17685 	}
17686 
17687 err_out_free_dev:
17688 	free_netdev(dev);
17689 
17690 err_out_free_res:
17691 	pci_release_regions(pdev);
17692 
17693 err_out_disable_pdev:
17694 	if (pci_is_enabled(pdev))
17695 		pci_disable_device(pdev);
17696 	return err;
17697 }
17698 
17699 static void tg3_remove_one(struct pci_dev *pdev)
17700 {
17701 	struct net_device *dev = pci_get_drvdata(pdev);
17702 
17703 	if (dev) {
17704 		struct tg3 *tp = netdev_priv(dev);
17705 
17706 		release_firmware(tp->fw);
17707 
17708 		tg3_reset_task_cancel(tp);
17709 
17710 		if (tg3_flag(tp, USE_PHYLIB)) {
17711 			tg3_phy_fini(tp);
17712 			tg3_mdio_fini(tp);
17713 		}
17714 
17715 		unregister_netdev(dev);
17716 		if (tp->aperegs) {
17717 			iounmap(tp->aperegs);
17718 			tp->aperegs = NULL;
17719 		}
17720 		if (tp->regs) {
17721 			iounmap(tp->regs);
17722 			tp->regs = NULL;
17723 		}
17724 		free_netdev(dev);
17725 		pci_release_regions(pdev);
17726 		pci_disable_device(pdev);
17727 	}
17728 }
17729 
17730 #ifdef CONFIG_PM_SLEEP
17731 static int tg3_suspend(struct device *device)
17732 {
17733 	struct pci_dev *pdev = to_pci_dev(device);
17734 	struct net_device *dev = pci_get_drvdata(pdev);
17735 	struct tg3 *tp = netdev_priv(dev);
17736 	int err = 0;
17737 
17738 	rtnl_lock();
17739 
17740 	if (!netif_running(dev))
17741 		goto unlock;
17742 
17743 	tg3_reset_task_cancel(tp);
17744 	tg3_phy_stop(tp);
17745 	tg3_netif_stop(tp);
17746 
17747 	tg3_timer_stop(tp);
17748 
17749 	tg3_full_lock(tp, 1);
17750 	tg3_disable_ints(tp);
17751 	tg3_full_unlock(tp);
17752 
17753 	netif_device_detach(dev);
17754 
17755 	tg3_full_lock(tp, 0);
17756 	tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
17757 	tg3_flag_clear(tp, INIT_COMPLETE);
17758 	tg3_full_unlock(tp);
17759 
17760 	err = tg3_power_down_prepare(tp);
17761 	if (err) {
17762 		int err2;
17763 
17764 		tg3_full_lock(tp, 0);
17765 
17766 		tg3_flag_set(tp, INIT_COMPLETE);
17767 		err2 = tg3_restart_hw(tp, true);
17768 		if (err2)
17769 			goto out;
17770 
17771 		tg3_timer_start(tp);
17772 
17773 		netif_device_attach(dev);
17774 		tg3_netif_start(tp);
17775 
17776 out:
17777 		tg3_full_unlock(tp);
17778 
17779 		if (!err2)
17780 			tg3_phy_start(tp);
17781 	}
17782 
17783 unlock:
17784 	rtnl_unlock();
17785 	return err;
17786 }
17787 
17788 static int tg3_resume(struct device *device)
17789 {
17790 	struct pci_dev *pdev = to_pci_dev(device);
17791 	struct net_device *dev = pci_get_drvdata(pdev);
17792 	struct tg3 *tp = netdev_priv(dev);
17793 	int err = 0;
17794 
17795 	rtnl_lock();
17796 
17797 	if (!netif_running(dev))
17798 		goto unlock;
17799 
17800 	netif_device_attach(dev);
17801 
17802 	tg3_full_lock(tp, 0);
17803 
17804 	tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
17805 
17806 	tg3_flag_set(tp, INIT_COMPLETE);
17807 	err = tg3_restart_hw(tp,
17808 			     !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN));
17809 	if (err)
17810 		goto out;
17811 
17812 	tg3_timer_start(tp);
17813 
17814 	tg3_netif_start(tp);
17815 
17816 out:
17817 	tg3_full_unlock(tp);
17818 
17819 	if (!err)
17820 		tg3_phy_start(tp);
17821 
17822 unlock:
17823 	rtnl_unlock();
17824 	return err;
17825 }
17826 #endif /* CONFIG_PM_SLEEP */
17827 
17828 static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume);
17829 
17830 static void tg3_shutdown(struct pci_dev *pdev)
17831 {
17832 	struct net_device *dev = pci_get_drvdata(pdev);
17833 	struct tg3 *tp = netdev_priv(dev);
17834 
17835 	rtnl_lock();
17836 	netif_device_detach(dev);
17837 
17838 	if (netif_running(dev))
17839 		dev_close(dev);
17840 
17841 	if (system_state == SYSTEM_POWER_OFF)
17842 		tg3_power_down(tp);
17843 
17844 	rtnl_unlock();
17845 }
17846 
17847 /**
17848  * tg3_io_error_detected - called when PCI error is detected
17849  * @pdev: Pointer to PCI device
17850  * @state: The current pci connection state
17851  *
17852  * This function is called after a PCI bus error affecting
17853  * this device has been detected.
17854  */
17855 static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
17856 					      pci_channel_state_t state)
17857 {
17858 	struct net_device *netdev = pci_get_drvdata(pdev);
17859 	struct tg3 *tp = netdev_priv(netdev);
17860 	pci_ers_result_t err = PCI_ERS_RESULT_NEED_RESET;
17861 
17862 	netdev_info(netdev, "PCI I/O error detected\n");
17863 
17864 	rtnl_lock();
17865 
17866 	/* We probably don't have netdev yet */
17867 	if (!netdev || !netif_running(netdev))
17868 		goto done;
17869 
17870 	tg3_phy_stop(tp);
17871 
17872 	tg3_netif_stop(tp);
17873 
17874 	tg3_timer_stop(tp);
17875 
17876 	/* Want to make sure that the reset task doesn't run */
17877 	tg3_reset_task_cancel(tp);
17878 
17879 	netif_device_detach(netdev);
17880 
17881 	/* Clean up software state, even if MMIO is blocked */
17882 	tg3_full_lock(tp, 0);
17883 	tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
17884 	tg3_full_unlock(tp);
17885 
17886 done:
17887 	if (state == pci_channel_io_perm_failure) {
17888 		if (netdev) {
17889 			tg3_napi_enable(tp);
17890 			dev_close(netdev);
17891 		}
17892 		err = PCI_ERS_RESULT_DISCONNECT;
17893 	} else {
17894 		pci_disable_device(pdev);
17895 	}
17896 
17897 	rtnl_unlock();
17898 
17899 	return err;
17900 }
17901 
17902 /**
17903  * tg3_io_slot_reset - called after the pci bus has been reset.
17904  * @pdev: Pointer to PCI device
17905  *
17906  * Restart the card from scratch, as if from a cold-boot.
17907  * At this point, the card has exprienced a hard reset,
17908  * followed by fixups by BIOS, and has its config space
17909  * set up identically to what it was at cold boot.
17910  */
17911 static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev)
17912 {
17913 	struct net_device *netdev = pci_get_drvdata(pdev);
17914 	struct tg3 *tp = netdev_priv(netdev);
17915 	pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
17916 	int err;
17917 
17918 	rtnl_lock();
17919 
17920 	if (pci_enable_device(pdev)) {
17921 		dev_err(&pdev->dev,
17922 			"Cannot re-enable PCI device after reset.\n");
17923 		goto done;
17924 	}
17925 
17926 	pci_set_master(pdev);
17927 	pci_restore_state(pdev);
17928 	pci_save_state(pdev);
17929 
17930 	if (!netdev || !netif_running(netdev)) {
17931 		rc = PCI_ERS_RESULT_RECOVERED;
17932 		goto done;
17933 	}
17934 
17935 	err = tg3_power_up(tp);
17936 	if (err)
17937 		goto done;
17938 
17939 	rc = PCI_ERS_RESULT_RECOVERED;
17940 
17941 done:
17942 	if (rc != PCI_ERS_RESULT_RECOVERED && netdev && netif_running(netdev)) {
17943 		tg3_napi_enable(tp);
17944 		dev_close(netdev);
17945 	}
17946 	rtnl_unlock();
17947 
17948 	return rc;
17949 }
17950 
17951 /**
17952  * tg3_io_resume - called when traffic can start flowing again.
17953  * @pdev: Pointer to PCI device
17954  *
17955  * This callback is called when the error recovery driver tells
17956  * us that its OK to resume normal operation.
17957  */
17958 static void tg3_io_resume(struct pci_dev *pdev)
17959 {
17960 	struct net_device *netdev = pci_get_drvdata(pdev);
17961 	struct tg3 *tp = netdev_priv(netdev);
17962 	int err;
17963 
17964 	rtnl_lock();
17965 
17966 	if (!netif_running(netdev))
17967 		goto done;
17968 
17969 	tg3_full_lock(tp, 0);
17970 	tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
17971 	tg3_flag_set(tp, INIT_COMPLETE);
17972 	err = tg3_restart_hw(tp, true);
17973 	if (err) {
17974 		tg3_full_unlock(tp);
17975 		netdev_err(netdev, "Cannot restart hardware after reset.\n");
17976 		goto done;
17977 	}
17978 
17979 	netif_device_attach(netdev);
17980 
17981 	tg3_timer_start(tp);
17982 
17983 	tg3_netif_start(tp);
17984 
17985 	tg3_full_unlock(tp);
17986 
17987 	tg3_phy_start(tp);
17988 
17989 done:
17990 	rtnl_unlock();
17991 }
17992 
17993 static const struct pci_error_handlers tg3_err_handler = {
17994 	.error_detected	= tg3_io_error_detected,
17995 	.slot_reset	= tg3_io_slot_reset,
17996 	.resume		= tg3_io_resume
17997 };
17998 
17999 static struct pci_driver tg3_driver = {
18000 	.name		= DRV_MODULE_NAME,
18001 	.id_table	= tg3_pci_tbl,
18002 	.probe		= tg3_init_one,
18003 	.remove		= tg3_remove_one,
18004 	.err_handler	= &tg3_err_handler,
18005 	.driver.pm	= &tg3_pm_ops,
18006 	.shutdown	= tg3_shutdown,
18007 };
18008 
18009 module_pci_driver(tg3_driver);
18010