1 /*
2  * tg3.c: Broadcom Tigon3 ethernet driver.
3  *
4  * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5  * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6  * Copyright (C) 2004 Sun Microsystems Inc.
7  * Copyright (C) 2005-2016 Broadcom Corporation.
8  * Copyright (C) 2016-2017 Broadcom Limited.
9  *
10  * Firmware is:
11  *	Derived from proprietary unpublished source code,
12  *	Copyright (C) 2000-2016 Broadcom Corporation.
13  *	Copyright (C) 2016-2017 Broadcom Ltd.
14  *
15  *	Permission is hereby granted for the distribution of this firmware
16  *	data in hexadecimal or equivalent format, provided this copyright
17  *	notice is accompanying it.
18  */
19 
20 
21 #include <linux/module.h>
22 #include <linux/moduleparam.h>
23 #include <linux/stringify.h>
24 #include <linux/kernel.h>
25 #include <linux/sched/signal.h>
26 #include <linux/types.h>
27 #include <linux/compiler.h>
28 #include <linux/slab.h>
29 #include <linux/delay.h>
30 #include <linux/in.h>
31 #include <linux/interrupt.h>
32 #include <linux/ioport.h>
33 #include <linux/pci.h>
34 #include <linux/netdevice.h>
35 #include <linux/etherdevice.h>
36 #include <linux/skbuff.h>
37 #include <linux/ethtool.h>
38 #include <linux/mdio.h>
39 #include <linux/mii.h>
40 #include <linux/phy.h>
41 #include <linux/brcmphy.h>
42 #include <linux/if.h>
43 #include <linux/if_vlan.h>
44 #include <linux/ip.h>
45 #include <linux/tcp.h>
46 #include <linux/workqueue.h>
47 #include <linux/prefetch.h>
48 #include <linux/dma-mapping.h>
49 #include <linux/firmware.h>
50 #include <linux/ssb/ssb_driver_gige.h>
51 #include <linux/hwmon.h>
52 #include <linux/hwmon-sysfs.h>
53 #include <linux/crc32poly.h>
54 
55 #include <net/checksum.h>
56 #include <net/ip.h>
57 
58 #include <linux/io.h>
59 #include <asm/byteorder.h>
60 #include <linux/uaccess.h>
61 
62 #include <uapi/linux/net_tstamp.h>
63 #include <linux/ptp_clock_kernel.h>
64 
65 #ifdef CONFIG_SPARC
66 #include <asm/idprom.h>
67 #include <asm/prom.h>
68 #endif
69 
70 #define BAR_0	0
71 #define BAR_2	2
72 
73 #include "tg3.h"
74 
75 /* Functions & macros to verify TG3_FLAGS types */
76 
77 static inline int _tg3_flag(enum TG3_FLAGS flag, unsigned long *bits)
78 {
79 	return test_bit(flag, bits);
80 }
81 
82 static inline void _tg3_flag_set(enum TG3_FLAGS flag, unsigned long *bits)
83 {
84 	set_bit(flag, bits);
85 }
86 
87 static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
88 {
89 	clear_bit(flag, bits);
90 }
91 
92 #define tg3_flag(tp, flag)				\
93 	_tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags)
94 #define tg3_flag_set(tp, flag)				\
95 	_tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags)
96 #define tg3_flag_clear(tp, flag)			\
97 	_tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags)
98 
99 #define DRV_MODULE_NAME		"tg3"
100 #define TG3_MAJ_NUM			3
101 #define TG3_MIN_NUM			137
102 #define DRV_MODULE_VERSION	\
103 	__stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
104 #define DRV_MODULE_RELDATE	"May 11, 2014"
105 
106 #define RESET_KIND_SHUTDOWN	0
107 #define RESET_KIND_INIT		1
108 #define RESET_KIND_SUSPEND	2
109 
110 #define TG3_DEF_RX_MODE		0
111 #define TG3_DEF_TX_MODE		0
112 #define TG3_DEF_MSG_ENABLE	  \
113 	(NETIF_MSG_DRV		| \
114 	 NETIF_MSG_PROBE	| \
115 	 NETIF_MSG_LINK		| \
116 	 NETIF_MSG_TIMER	| \
117 	 NETIF_MSG_IFDOWN	| \
118 	 NETIF_MSG_IFUP		| \
119 	 NETIF_MSG_RX_ERR	| \
120 	 NETIF_MSG_TX_ERR)
121 
122 #define TG3_GRC_LCLCTL_PWRSW_DELAY	100
123 
124 /* length of time before we decide the hardware is borked,
125  * and dev->tx_timeout() should be called to fix the problem
126  */
127 
128 #define TG3_TX_TIMEOUT			(5 * HZ)
129 
130 /* hardware minimum and maximum for a single frame's data payload */
131 #define TG3_MIN_MTU			ETH_ZLEN
132 #define TG3_MAX_MTU(tp)	\
133 	(tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500)
134 
135 /* These numbers seem to be hard coded in the NIC firmware somehow.
136  * You can't change the ring sizes, but you can change where you place
137  * them in the NIC onboard memory.
138  */
139 #define TG3_RX_STD_RING_SIZE(tp) \
140 	(tg3_flag(tp, LRG_PROD_RING_CAP) ? \
141 	 TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700)
142 #define TG3_DEF_RX_RING_PENDING		200
143 #define TG3_RX_JMB_RING_SIZE(tp) \
144 	(tg3_flag(tp, LRG_PROD_RING_CAP) ? \
145 	 TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
146 #define TG3_DEF_RX_JUMBO_RING_PENDING	100
147 
148 /* Do not place this n-ring entries value into the tp struct itself,
149  * we really want to expose these constants to GCC so that modulo et
150  * al.  operations are done with shifts and masks instead of with
151  * hw multiply/modulo instructions.  Another solution would be to
152  * replace things like '% foo' with '& (foo - 1)'.
153  */
154 
155 #define TG3_TX_RING_SIZE		512
156 #define TG3_DEF_TX_RING_PENDING		(TG3_TX_RING_SIZE - 1)
157 
158 #define TG3_RX_STD_RING_BYTES(tp) \
159 	(sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
160 #define TG3_RX_JMB_RING_BYTES(tp) \
161 	(sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
162 #define TG3_RX_RCB_RING_BYTES(tp) \
163 	(sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1))
164 #define TG3_TX_RING_BYTES	(sizeof(struct tg3_tx_buffer_desc) * \
165 				 TG3_TX_RING_SIZE)
166 #define NEXT_TX(N)		(((N) + 1) & (TG3_TX_RING_SIZE - 1))
167 
168 #define TG3_DMA_BYTE_ENAB		64
169 
170 #define TG3_RX_STD_DMA_SZ		1536
171 #define TG3_RX_JMB_DMA_SZ		9046
172 
173 #define TG3_RX_DMA_TO_MAP_SZ(x)		((x) + TG3_DMA_BYTE_ENAB)
174 
175 #define TG3_RX_STD_MAP_SZ		TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
176 #define TG3_RX_JMB_MAP_SZ		TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
177 
178 #define TG3_RX_STD_BUFF_RING_SIZE(tp) \
179 	(sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
180 
181 #define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
182 	(sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
183 
184 /* Due to a hardware bug, the 5701 can only DMA to memory addresses
185  * that are at least dword aligned when used in PCIX mode.  The driver
186  * works around this bug by double copying the packet.  This workaround
187  * is built into the normal double copy length check for efficiency.
188  *
189  * However, the double copy is only necessary on those architectures
190  * where unaligned memory accesses are inefficient.  For those architectures
191  * where unaligned memory accesses incur little penalty, we can reintegrate
192  * the 5701 in the normal rx path.  Doing so saves a device structure
193  * dereference by hardcoding the double copy threshold in place.
194  */
195 #define TG3_RX_COPY_THRESHOLD		256
196 #if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
197 	#define TG3_RX_COPY_THRESH(tp)	TG3_RX_COPY_THRESHOLD
198 #else
199 	#define TG3_RX_COPY_THRESH(tp)	((tp)->rx_copy_thresh)
200 #endif
201 
202 #if (NET_IP_ALIGN != 0)
203 #define TG3_RX_OFFSET(tp)	((tp)->rx_offset)
204 #else
205 #define TG3_RX_OFFSET(tp)	(NET_SKB_PAD)
206 #endif
207 
208 /* minimum number of free TX descriptors required to wake up TX process */
209 #define TG3_TX_WAKEUP_THRESH(tnapi)		((tnapi)->tx_pending / 4)
210 #define TG3_TX_BD_DMA_MAX_2K		2048
211 #define TG3_TX_BD_DMA_MAX_4K		4096
212 
213 #define TG3_RAW_IP_ALIGN 2
214 
215 #define TG3_MAX_UCAST_ADDR(tp) (tg3_flag((tp), ENABLE_ASF) ? 2 : 3)
216 #define TG3_UCAST_ADDR_IDX(tp) (tg3_flag((tp), ENABLE_ASF) ? 2 : 1)
217 
218 #define TG3_FW_UPDATE_TIMEOUT_SEC	5
219 #define TG3_FW_UPDATE_FREQ_SEC		(TG3_FW_UPDATE_TIMEOUT_SEC / 2)
220 
221 #define FIRMWARE_TG3		"tigon/tg3.bin"
222 #define FIRMWARE_TG357766	"tigon/tg357766.bin"
223 #define FIRMWARE_TG3TSO		"tigon/tg3_tso.bin"
224 #define FIRMWARE_TG3TSO5	"tigon/tg3_tso5.bin"
225 
226 static char version[] =
227 	DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")";
228 
229 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
230 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
231 MODULE_LICENSE("GPL");
232 MODULE_VERSION(DRV_MODULE_VERSION);
233 MODULE_FIRMWARE(FIRMWARE_TG3);
234 MODULE_FIRMWARE(FIRMWARE_TG3TSO);
235 MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
236 
237 static int tg3_debug = -1;	/* -1 == use TG3_DEF_MSG_ENABLE as value */
238 module_param(tg3_debug, int, 0);
239 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
240 
241 #define TG3_DRV_DATA_FLAG_10_100_ONLY	0x0001
242 #define TG3_DRV_DATA_FLAG_5705_10_100	0x0002
243 
244 static const struct pci_device_id tg3_pci_tbl[] = {
245 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
246 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
247 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
248 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
249 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
250 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
251 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
252 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
253 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
254 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
255 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
256 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
257 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
258 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
259 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
260 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
261 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
262 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
263 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901),
264 	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
265 			TG3_DRV_DATA_FLAG_5705_10_100},
266 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2),
267 	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
268 			TG3_DRV_DATA_FLAG_5705_10_100},
269 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
270 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F),
271 	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
272 			TG3_DRV_DATA_FLAG_5705_10_100},
273 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
274 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
275 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750)},
276 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
277 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
278 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F),
279 	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
280 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
281 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
282 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
283 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
284 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F),
285 	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
286 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
287 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
288 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
289 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
290 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
291 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
292 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
293 	{PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5787M,
294 			PCI_VENDOR_ID_LENOVO,
295 			TG3PCI_SUBDEVICE_ID_LENOVO_5787M),
296 	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
297 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
298 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F),
299 	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
300 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
301 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
302 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
303 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
304 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
305 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
306 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
307 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
308 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
309 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
310 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
311 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
312 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
313 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
314 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
315 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
316 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)},
317 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)},
318 	{PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
319 			PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_A),
320 	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
321 	{PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
322 			PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_B),
323 	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
324 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
325 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
326 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790),
327 	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
328 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
329 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
330 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717_C)},
331 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
332 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)},
333 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)},
334 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)},
335 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)},
336 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791),
337 	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
338 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795),
339 	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
340 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)},
341 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)},
342 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57762)},
343 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57766)},
344 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5762)},
345 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5725)},
346 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5727)},
347 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57764)},
348 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57767)},
349 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57787)},
350 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57782)},
351 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57786)},
352 	{PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
353 	{PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
354 	{PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
355 	{PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
356 	{PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
357 	{PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
358 	{PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
359 	{PCI_DEVICE(0x10cf, 0x11a2)}, /* Fujitsu 1000base-SX with BCM5703SKHB */
360 	{}
361 };
362 
363 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
364 
365 static const struct {
366 	const char string[ETH_GSTRING_LEN];
367 } ethtool_stats_keys[] = {
368 	{ "rx_octets" },
369 	{ "rx_fragments" },
370 	{ "rx_ucast_packets" },
371 	{ "rx_mcast_packets" },
372 	{ "rx_bcast_packets" },
373 	{ "rx_fcs_errors" },
374 	{ "rx_align_errors" },
375 	{ "rx_xon_pause_rcvd" },
376 	{ "rx_xoff_pause_rcvd" },
377 	{ "rx_mac_ctrl_rcvd" },
378 	{ "rx_xoff_entered" },
379 	{ "rx_frame_too_long_errors" },
380 	{ "rx_jabbers" },
381 	{ "rx_undersize_packets" },
382 	{ "rx_in_length_errors" },
383 	{ "rx_out_length_errors" },
384 	{ "rx_64_or_less_octet_packets" },
385 	{ "rx_65_to_127_octet_packets" },
386 	{ "rx_128_to_255_octet_packets" },
387 	{ "rx_256_to_511_octet_packets" },
388 	{ "rx_512_to_1023_octet_packets" },
389 	{ "rx_1024_to_1522_octet_packets" },
390 	{ "rx_1523_to_2047_octet_packets" },
391 	{ "rx_2048_to_4095_octet_packets" },
392 	{ "rx_4096_to_8191_octet_packets" },
393 	{ "rx_8192_to_9022_octet_packets" },
394 
395 	{ "tx_octets" },
396 	{ "tx_collisions" },
397 
398 	{ "tx_xon_sent" },
399 	{ "tx_xoff_sent" },
400 	{ "tx_flow_control" },
401 	{ "tx_mac_errors" },
402 	{ "tx_single_collisions" },
403 	{ "tx_mult_collisions" },
404 	{ "tx_deferred" },
405 	{ "tx_excessive_collisions" },
406 	{ "tx_late_collisions" },
407 	{ "tx_collide_2times" },
408 	{ "tx_collide_3times" },
409 	{ "tx_collide_4times" },
410 	{ "tx_collide_5times" },
411 	{ "tx_collide_6times" },
412 	{ "tx_collide_7times" },
413 	{ "tx_collide_8times" },
414 	{ "tx_collide_9times" },
415 	{ "tx_collide_10times" },
416 	{ "tx_collide_11times" },
417 	{ "tx_collide_12times" },
418 	{ "tx_collide_13times" },
419 	{ "tx_collide_14times" },
420 	{ "tx_collide_15times" },
421 	{ "tx_ucast_packets" },
422 	{ "tx_mcast_packets" },
423 	{ "tx_bcast_packets" },
424 	{ "tx_carrier_sense_errors" },
425 	{ "tx_discards" },
426 	{ "tx_errors" },
427 
428 	{ "dma_writeq_full" },
429 	{ "dma_write_prioq_full" },
430 	{ "rxbds_empty" },
431 	{ "rx_discards" },
432 	{ "rx_errors" },
433 	{ "rx_threshold_hit" },
434 
435 	{ "dma_readq_full" },
436 	{ "dma_read_prioq_full" },
437 	{ "tx_comp_queue_full" },
438 
439 	{ "ring_set_send_prod_index" },
440 	{ "ring_status_update" },
441 	{ "nic_irqs" },
442 	{ "nic_avoided_irqs" },
443 	{ "nic_tx_threshold_hit" },
444 
445 	{ "mbuf_lwm_thresh_hit" },
446 };
447 
448 #define TG3_NUM_STATS	ARRAY_SIZE(ethtool_stats_keys)
449 #define TG3_NVRAM_TEST		0
450 #define TG3_LINK_TEST		1
451 #define TG3_REGISTER_TEST	2
452 #define TG3_MEMORY_TEST		3
453 #define TG3_MAC_LOOPB_TEST	4
454 #define TG3_PHY_LOOPB_TEST	5
455 #define TG3_EXT_LOOPB_TEST	6
456 #define TG3_INTERRUPT_TEST	7
457 
458 
459 static const struct {
460 	const char string[ETH_GSTRING_LEN];
461 } ethtool_test_keys[] = {
462 	[TG3_NVRAM_TEST]	= { "nvram test        (online) " },
463 	[TG3_LINK_TEST]		= { "link test         (online) " },
464 	[TG3_REGISTER_TEST]	= { "register test     (offline)" },
465 	[TG3_MEMORY_TEST]	= { "memory test       (offline)" },
466 	[TG3_MAC_LOOPB_TEST]	= { "mac loopback test (offline)" },
467 	[TG3_PHY_LOOPB_TEST]	= { "phy loopback test (offline)" },
468 	[TG3_EXT_LOOPB_TEST]	= { "ext loopback test (offline)" },
469 	[TG3_INTERRUPT_TEST]	= { "interrupt test    (offline)" },
470 };
471 
472 #define TG3_NUM_TEST	ARRAY_SIZE(ethtool_test_keys)
473 
474 
475 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
476 {
477 	writel(val, tp->regs + off);
478 }
479 
480 static u32 tg3_read32(struct tg3 *tp, u32 off)
481 {
482 	return readl(tp->regs + off);
483 }
484 
485 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
486 {
487 	writel(val, tp->aperegs + off);
488 }
489 
490 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
491 {
492 	return readl(tp->aperegs + off);
493 }
494 
495 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
496 {
497 	unsigned long flags;
498 
499 	spin_lock_irqsave(&tp->indirect_lock, flags);
500 	pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
501 	pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
502 	spin_unlock_irqrestore(&tp->indirect_lock, flags);
503 }
504 
505 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
506 {
507 	writel(val, tp->regs + off);
508 	readl(tp->regs + off);
509 }
510 
511 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
512 {
513 	unsigned long flags;
514 	u32 val;
515 
516 	spin_lock_irqsave(&tp->indirect_lock, flags);
517 	pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
518 	pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
519 	spin_unlock_irqrestore(&tp->indirect_lock, flags);
520 	return val;
521 }
522 
523 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
524 {
525 	unsigned long flags;
526 
527 	if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
528 		pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
529 				       TG3_64BIT_REG_LOW, val);
530 		return;
531 	}
532 	if (off == TG3_RX_STD_PROD_IDX_REG) {
533 		pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
534 				       TG3_64BIT_REG_LOW, val);
535 		return;
536 	}
537 
538 	spin_lock_irqsave(&tp->indirect_lock, flags);
539 	pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
540 	pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
541 	spin_unlock_irqrestore(&tp->indirect_lock, flags);
542 
543 	/* In indirect mode when disabling interrupts, we also need
544 	 * to clear the interrupt bit in the GRC local ctrl register.
545 	 */
546 	if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
547 	    (val == 0x1)) {
548 		pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
549 				       tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
550 	}
551 }
552 
553 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
554 {
555 	unsigned long flags;
556 	u32 val;
557 
558 	spin_lock_irqsave(&tp->indirect_lock, flags);
559 	pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
560 	pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
561 	spin_unlock_irqrestore(&tp->indirect_lock, flags);
562 	return val;
563 }
564 
565 /* usec_wait specifies the wait time in usec when writing to certain registers
566  * where it is unsafe to read back the register without some delay.
567  * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
568  * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
569  */
570 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
571 {
572 	if (tg3_flag(tp, PCIX_TARGET_HWBUG) || tg3_flag(tp, ICH_WORKAROUND))
573 		/* Non-posted methods */
574 		tp->write32(tp, off, val);
575 	else {
576 		/* Posted method */
577 		tg3_write32(tp, off, val);
578 		if (usec_wait)
579 			udelay(usec_wait);
580 		tp->read32(tp, off);
581 	}
582 	/* Wait again after the read for the posted method to guarantee that
583 	 * the wait time is met.
584 	 */
585 	if (usec_wait)
586 		udelay(usec_wait);
587 }
588 
589 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
590 {
591 	tp->write32_mbox(tp, off, val);
592 	if (tg3_flag(tp, FLUSH_POSTED_WRITES) ||
593 	    (!tg3_flag(tp, MBOX_WRITE_REORDER) &&
594 	     !tg3_flag(tp, ICH_WORKAROUND)))
595 		tp->read32_mbox(tp, off);
596 }
597 
598 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
599 {
600 	void __iomem *mbox = tp->regs + off;
601 	writel(val, mbox);
602 	if (tg3_flag(tp, TXD_MBOX_HWBUG))
603 		writel(val, mbox);
604 	if (tg3_flag(tp, MBOX_WRITE_REORDER) ||
605 	    tg3_flag(tp, FLUSH_POSTED_WRITES))
606 		readl(mbox);
607 }
608 
609 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
610 {
611 	return readl(tp->regs + off + GRCMBOX_BASE);
612 }
613 
614 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
615 {
616 	writel(val, tp->regs + off + GRCMBOX_BASE);
617 }
618 
619 #define tw32_mailbox(reg, val)		tp->write32_mbox(tp, reg, val)
620 #define tw32_mailbox_f(reg, val)	tw32_mailbox_flush(tp, (reg), (val))
621 #define tw32_rx_mbox(reg, val)		tp->write32_rx_mbox(tp, reg, val)
622 #define tw32_tx_mbox(reg, val)		tp->write32_tx_mbox(tp, reg, val)
623 #define tr32_mailbox(reg)		tp->read32_mbox(tp, reg)
624 
625 #define tw32(reg, val)			tp->write32(tp, reg, val)
626 #define tw32_f(reg, val)		_tw32_flush(tp, (reg), (val), 0)
627 #define tw32_wait_f(reg, val, us)	_tw32_flush(tp, (reg), (val), (us))
628 #define tr32(reg)			tp->read32(tp, reg)
629 
630 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
631 {
632 	unsigned long flags;
633 
634 	if (tg3_asic_rev(tp) == ASIC_REV_5906 &&
635 	    (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
636 		return;
637 
638 	spin_lock_irqsave(&tp->indirect_lock, flags);
639 	if (tg3_flag(tp, SRAM_USE_CONFIG)) {
640 		pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
641 		pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
642 
643 		/* Always leave this as zero. */
644 		pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
645 	} else {
646 		tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
647 		tw32_f(TG3PCI_MEM_WIN_DATA, val);
648 
649 		/* Always leave this as zero. */
650 		tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
651 	}
652 	spin_unlock_irqrestore(&tp->indirect_lock, flags);
653 }
654 
655 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
656 {
657 	unsigned long flags;
658 
659 	if (tg3_asic_rev(tp) == ASIC_REV_5906 &&
660 	    (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
661 		*val = 0;
662 		return;
663 	}
664 
665 	spin_lock_irqsave(&tp->indirect_lock, flags);
666 	if (tg3_flag(tp, SRAM_USE_CONFIG)) {
667 		pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
668 		pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
669 
670 		/* Always leave this as zero. */
671 		pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
672 	} else {
673 		tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
674 		*val = tr32(TG3PCI_MEM_WIN_DATA);
675 
676 		/* Always leave this as zero. */
677 		tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
678 	}
679 	spin_unlock_irqrestore(&tp->indirect_lock, flags);
680 }
681 
682 static void tg3_ape_lock_init(struct tg3 *tp)
683 {
684 	int i;
685 	u32 regbase, bit;
686 
687 	if (tg3_asic_rev(tp) == ASIC_REV_5761)
688 		regbase = TG3_APE_LOCK_GRANT;
689 	else
690 		regbase = TG3_APE_PER_LOCK_GRANT;
691 
692 	/* Make sure the driver hasn't any stale locks. */
693 	for (i = TG3_APE_LOCK_PHY0; i <= TG3_APE_LOCK_GPIO; i++) {
694 		switch (i) {
695 		case TG3_APE_LOCK_PHY0:
696 		case TG3_APE_LOCK_PHY1:
697 		case TG3_APE_LOCK_PHY2:
698 		case TG3_APE_LOCK_PHY3:
699 			bit = APE_LOCK_GRANT_DRIVER;
700 			break;
701 		default:
702 			if (!tp->pci_fn)
703 				bit = APE_LOCK_GRANT_DRIVER;
704 			else
705 				bit = 1 << tp->pci_fn;
706 		}
707 		tg3_ape_write32(tp, regbase + 4 * i, bit);
708 	}
709 
710 }
711 
712 static int tg3_ape_lock(struct tg3 *tp, int locknum)
713 {
714 	int i, off;
715 	int ret = 0;
716 	u32 status, req, gnt, bit;
717 
718 	if (!tg3_flag(tp, ENABLE_APE))
719 		return 0;
720 
721 	switch (locknum) {
722 	case TG3_APE_LOCK_GPIO:
723 		if (tg3_asic_rev(tp) == ASIC_REV_5761)
724 			return 0;
725 	case TG3_APE_LOCK_GRC:
726 	case TG3_APE_LOCK_MEM:
727 		if (!tp->pci_fn)
728 			bit = APE_LOCK_REQ_DRIVER;
729 		else
730 			bit = 1 << tp->pci_fn;
731 		break;
732 	case TG3_APE_LOCK_PHY0:
733 	case TG3_APE_LOCK_PHY1:
734 	case TG3_APE_LOCK_PHY2:
735 	case TG3_APE_LOCK_PHY3:
736 		bit = APE_LOCK_REQ_DRIVER;
737 		break;
738 	default:
739 		return -EINVAL;
740 	}
741 
742 	if (tg3_asic_rev(tp) == ASIC_REV_5761) {
743 		req = TG3_APE_LOCK_REQ;
744 		gnt = TG3_APE_LOCK_GRANT;
745 	} else {
746 		req = TG3_APE_PER_LOCK_REQ;
747 		gnt = TG3_APE_PER_LOCK_GRANT;
748 	}
749 
750 	off = 4 * locknum;
751 
752 	tg3_ape_write32(tp, req + off, bit);
753 
754 	/* Wait for up to 1 millisecond to acquire lock. */
755 	for (i = 0; i < 100; i++) {
756 		status = tg3_ape_read32(tp, gnt + off);
757 		if (status == bit)
758 			break;
759 		if (pci_channel_offline(tp->pdev))
760 			break;
761 
762 		udelay(10);
763 	}
764 
765 	if (status != bit) {
766 		/* Revoke the lock request. */
767 		tg3_ape_write32(tp, gnt + off, bit);
768 		ret = -EBUSY;
769 	}
770 
771 	return ret;
772 }
773 
774 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
775 {
776 	u32 gnt, bit;
777 
778 	if (!tg3_flag(tp, ENABLE_APE))
779 		return;
780 
781 	switch (locknum) {
782 	case TG3_APE_LOCK_GPIO:
783 		if (tg3_asic_rev(tp) == ASIC_REV_5761)
784 			return;
785 	case TG3_APE_LOCK_GRC:
786 	case TG3_APE_LOCK_MEM:
787 		if (!tp->pci_fn)
788 			bit = APE_LOCK_GRANT_DRIVER;
789 		else
790 			bit = 1 << tp->pci_fn;
791 		break;
792 	case TG3_APE_LOCK_PHY0:
793 	case TG3_APE_LOCK_PHY1:
794 	case TG3_APE_LOCK_PHY2:
795 	case TG3_APE_LOCK_PHY3:
796 		bit = APE_LOCK_GRANT_DRIVER;
797 		break;
798 	default:
799 		return;
800 	}
801 
802 	if (tg3_asic_rev(tp) == ASIC_REV_5761)
803 		gnt = TG3_APE_LOCK_GRANT;
804 	else
805 		gnt = TG3_APE_PER_LOCK_GRANT;
806 
807 	tg3_ape_write32(tp, gnt + 4 * locknum, bit);
808 }
809 
810 static int tg3_ape_event_lock(struct tg3 *tp, u32 timeout_us)
811 {
812 	u32 apedata;
813 
814 	while (timeout_us) {
815 		if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
816 			return -EBUSY;
817 
818 		apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
819 		if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
820 			break;
821 
822 		tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
823 
824 		udelay(10);
825 		timeout_us -= (timeout_us > 10) ? 10 : timeout_us;
826 	}
827 
828 	return timeout_us ? 0 : -EBUSY;
829 }
830 
831 #ifdef CONFIG_TIGON3_HWMON
832 static int tg3_ape_wait_for_event(struct tg3 *tp, u32 timeout_us)
833 {
834 	u32 i, apedata;
835 
836 	for (i = 0; i < timeout_us / 10; i++) {
837 		apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
838 
839 		if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
840 			break;
841 
842 		udelay(10);
843 	}
844 
845 	return i == timeout_us / 10;
846 }
847 
848 static int tg3_ape_scratchpad_read(struct tg3 *tp, u32 *data, u32 base_off,
849 				   u32 len)
850 {
851 	int err;
852 	u32 i, bufoff, msgoff, maxlen, apedata;
853 
854 	if (!tg3_flag(tp, APE_HAS_NCSI))
855 		return 0;
856 
857 	apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
858 	if (apedata != APE_SEG_SIG_MAGIC)
859 		return -ENODEV;
860 
861 	apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
862 	if (!(apedata & APE_FW_STATUS_READY))
863 		return -EAGAIN;
864 
865 	bufoff = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_OFF) +
866 		 TG3_APE_SHMEM_BASE;
867 	msgoff = bufoff + 2 * sizeof(u32);
868 	maxlen = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_LEN);
869 
870 	while (len) {
871 		u32 length;
872 
873 		/* Cap xfer sizes to scratchpad limits. */
874 		length = (len > maxlen) ? maxlen : len;
875 		len -= length;
876 
877 		apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
878 		if (!(apedata & APE_FW_STATUS_READY))
879 			return -EAGAIN;
880 
881 		/* Wait for up to 1 msec for APE to service previous event. */
882 		err = tg3_ape_event_lock(tp, 1000);
883 		if (err)
884 			return err;
885 
886 		apedata = APE_EVENT_STATUS_DRIVER_EVNT |
887 			  APE_EVENT_STATUS_SCRTCHPD_READ |
888 			  APE_EVENT_STATUS_EVENT_PENDING;
889 		tg3_ape_write32(tp, TG3_APE_EVENT_STATUS, apedata);
890 
891 		tg3_ape_write32(tp, bufoff, base_off);
892 		tg3_ape_write32(tp, bufoff + sizeof(u32), length);
893 
894 		tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
895 		tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
896 
897 		base_off += length;
898 
899 		if (tg3_ape_wait_for_event(tp, 30000))
900 			return -EAGAIN;
901 
902 		for (i = 0; length; i += 4, length -= 4) {
903 			u32 val = tg3_ape_read32(tp, msgoff + i);
904 			memcpy(data, &val, sizeof(u32));
905 			data++;
906 		}
907 	}
908 
909 	return 0;
910 }
911 #endif
912 
913 static int tg3_ape_send_event(struct tg3 *tp, u32 event)
914 {
915 	int err;
916 	u32 apedata;
917 
918 	apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
919 	if (apedata != APE_SEG_SIG_MAGIC)
920 		return -EAGAIN;
921 
922 	apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
923 	if (!(apedata & APE_FW_STATUS_READY))
924 		return -EAGAIN;
925 
926 	/* Wait for up to 20 millisecond for APE to service previous event. */
927 	err = tg3_ape_event_lock(tp, 20000);
928 	if (err)
929 		return err;
930 
931 	tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
932 			event | APE_EVENT_STATUS_EVENT_PENDING);
933 
934 	tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
935 	tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
936 
937 	return 0;
938 }
939 
940 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
941 {
942 	u32 event;
943 	u32 apedata;
944 
945 	if (!tg3_flag(tp, ENABLE_APE))
946 		return;
947 
948 	switch (kind) {
949 	case RESET_KIND_INIT:
950 		tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_COUNT, tp->ape_hb++);
951 		tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
952 				APE_HOST_SEG_SIG_MAGIC);
953 		tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
954 				APE_HOST_SEG_LEN_MAGIC);
955 		apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
956 		tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
957 		tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
958 			APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, TG3_MIN_NUM));
959 		tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
960 				APE_HOST_BEHAV_NO_PHYLOCK);
961 		tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE,
962 				    TG3_APE_HOST_DRVR_STATE_START);
963 
964 		event = APE_EVENT_STATUS_STATE_START;
965 		break;
966 	case RESET_KIND_SHUTDOWN:
967 		if (device_may_wakeup(&tp->pdev->dev) &&
968 		    tg3_flag(tp, WOL_ENABLE)) {
969 			tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED,
970 					    TG3_APE_HOST_WOL_SPEED_AUTO);
971 			apedata = TG3_APE_HOST_DRVR_STATE_WOL;
972 		} else
973 			apedata = TG3_APE_HOST_DRVR_STATE_UNLOAD;
974 
975 		tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, apedata);
976 
977 		event = APE_EVENT_STATUS_STATE_UNLOAD;
978 		break;
979 	default:
980 		return;
981 	}
982 
983 	event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
984 
985 	tg3_ape_send_event(tp, event);
986 }
987 
988 static void tg3_send_ape_heartbeat(struct tg3 *tp,
989 				   unsigned long interval)
990 {
991 	/* Check if hb interval has exceeded */
992 	if (!tg3_flag(tp, ENABLE_APE) ||
993 	    time_before(jiffies, tp->ape_hb_jiffies + interval))
994 		return;
995 
996 	tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_COUNT, tp->ape_hb++);
997 	tp->ape_hb_jiffies = jiffies;
998 }
999 
1000 static void tg3_disable_ints(struct tg3 *tp)
1001 {
1002 	int i;
1003 
1004 	tw32(TG3PCI_MISC_HOST_CTRL,
1005 	     (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
1006 	for (i = 0; i < tp->irq_max; i++)
1007 		tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001);
1008 }
1009 
1010 static void tg3_enable_ints(struct tg3 *tp)
1011 {
1012 	int i;
1013 
1014 	tp->irq_sync = 0;
1015 	wmb();
1016 
1017 	tw32(TG3PCI_MISC_HOST_CTRL,
1018 	     (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
1019 
1020 	tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE;
1021 	for (i = 0; i < tp->irq_cnt; i++) {
1022 		struct tg3_napi *tnapi = &tp->napi[i];
1023 
1024 		tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
1025 		if (tg3_flag(tp, 1SHOT_MSI))
1026 			tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
1027 
1028 		tp->coal_now |= tnapi->coal_now;
1029 	}
1030 
1031 	/* Force an initial interrupt */
1032 	if (!tg3_flag(tp, TAGGED_STATUS) &&
1033 	    (tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
1034 		tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
1035 	else
1036 		tw32(HOSTCC_MODE, tp->coal_now);
1037 
1038 	tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now);
1039 }
1040 
1041 static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
1042 {
1043 	struct tg3 *tp = tnapi->tp;
1044 	struct tg3_hw_status *sblk = tnapi->hw_status;
1045 	unsigned int work_exists = 0;
1046 
1047 	/* check for phy events */
1048 	if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
1049 		if (sblk->status & SD_STATUS_LINK_CHG)
1050 			work_exists = 1;
1051 	}
1052 
1053 	/* check for TX work to do */
1054 	if (sblk->idx[0].tx_consumer != tnapi->tx_cons)
1055 		work_exists = 1;
1056 
1057 	/* check for RX work to do */
1058 	if (tnapi->rx_rcb_prod_idx &&
1059 	    *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
1060 		work_exists = 1;
1061 
1062 	return work_exists;
1063 }
1064 
1065 /* tg3_int_reenable
1066  *  similar to tg3_enable_ints, but it accurately determines whether there
1067  *  is new work pending and can return without flushing the PIO write
1068  *  which reenables interrupts
1069  */
1070 static void tg3_int_reenable(struct tg3_napi *tnapi)
1071 {
1072 	struct tg3 *tp = tnapi->tp;
1073 
1074 	tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
1075 	mmiowb();
1076 
1077 	/* When doing tagged status, this work check is unnecessary.
1078 	 * The last_tag we write above tells the chip which piece of
1079 	 * work we've completed.
1080 	 */
1081 	if (!tg3_flag(tp, TAGGED_STATUS) && tg3_has_work(tnapi))
1082 		tw32(HOSTCC_MODE, tp->coalesce_mode |
1083 		     HOSTCC_MODE_ENABLE | tnapi->coal_now);
1084 }
1085 
1086 static void tg3_switch_clocks(struct tg3 *tp)
1087 {
1088 	u32 clock_ctrl;
1089 	u32 orig_clock_ctrl;
1090 
1091 	if (tg3_flag(tp, CPMU_PRESENT) || tg3_flag(tp, 5780_CLASS))
1092 		return;
1093 
1094 	clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
1095 
1096 	orig_clock_ctrl = clock_ctrl;
1097 	clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
1098 		       CLOCK_CTRL_CLKRUN_OENABLE |
1099 		       0x1f);
1100 	tp->pci_clock_ctrl = clock_ctrl;
1101 
1102 	if (tg3_flag(tp, 5705_PLUS)) {
1103 		if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
1104 			tw32_wait_f(TG3PCI_CLOCK_CTRL,
1105 				    clock_ctrl | CLOCK_CTRL_625_CORE, 40);
1106 		}
1107 	} else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
1108 		tw32_wait_f(TG3PCI_CLOCK_CTRL,
1109 			    clock_ctrl |
1110 			    (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
1111 			    40);
1112 		tw32_wait_f(TG3PCI_CLOCK_CTRL,
1113 			    clock_ctrl | (CLOCK_CTRL_ALTCLK),
1114 			    40);
1115 	}
1116 	tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
1117 }
1118 
1119 #define PHY_BUSY_LOOPS	5000
1120 
1121 static int __tg3_readphy(struct tg3 *tp, unsigned int phy_addr, int reg,
1122 			 u32 *val)
1123 {
1124 	u32 frame_val;
1125 	unsigned int loops;
1126 	int ret;
1127 
1128 	if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1129 		tw32_f(MAC_MI_MODE,
1130 		     (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1131 		udelay(80);
1132 	}
1133 
1134 	tg3_ape_lock(tp, tp->phy_ape_lock);
1135 
1136 	*val = 0x0;
1137 
1138 	frame_val  = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1139 		      MI_COM_PHY_ADDR_MASK);
1140 	frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1141 		      MI_COM_REG_ADDR_MASK);
1142 	frame_val |= (MI_COM_CMD_READ | MI_COM_START);
1143 
1144 	tw32_f(MAC_MI_COM, frame_val);
1145 
1146 	loops = PHY_BUSY_LOOPS;
1147 	while (loops != 0) {
1148 		udelay(10);
1149 		frame_val = tr32(MAC_MI_COM);
1150 
1151 		if ((frame_val & MI_COM_BUSY) == 0) {
1152 			udelay(5);
1153 			frame_val = tr32(MAC_MI_COM);
1154 			break;
1155 		}
1156 		loops -= 1;
1157 	}
1158 
1159 	ret = -EBUSY;
1160 	if (loops != 0) {
1161 		*val = frame_val & MI_COM_DATA_MASK;
1162 		ret = 0;
1163 	}
1164 
1165 	if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1166 		tw32_f(MAC_MI_MODE, tp->mi_mode);
1167 		udelay(80);
1168 	}
1169 
1170 	tg3_ape_unlock(tp, tp->phy_ape_lock);
1171 
1172 	return ret;
1173 }
1174 
1175 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
1176 {
1177 	return __tg3_readphy(tp, tp->phy_addr, reg, val);
1178 }
1179 
1180 static int __tg3_writephy(struct tg3 *tp, unsigned int phy_addr, int reg,
1181 			  u32 val)
1182 {
1183 	u32 frame_val;
1184 	unsigned int loops;
1185 	int ret;
1186 
1187 	if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
1188 	    (reg == MII_CTRL1000 || reg == MII_TG3_AUX_CTRL))
1189 		return 0;
1190 
1191 	if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1192 		tw32_f(MAC_MI_MODE,
1193 		     (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1194 		udelay(80);
1195 	}
1196 
1197 	tg3_ape_lock(tp, tp->phy_ape_lock);
1198 
1199 	frame_val  = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1200 		      MI_COM_PHY_ADDR_MASK);
1201 	frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1202 		      MI_COM_REG_ADDR_MASK);
1203 	frame_val |= (val & MI_COM_DATA_MASK);
1204 	frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
1205 
1206 	tw32_f(MAC_MI_COM, frame_val);
1207 
1208 	loops = PHY_BUSY_LOOPS;
1209 	while (loops != 0) {
1210 		udelay(10);
1211 		frame_val = tr32(MAC_MI_COM);
1212 		if ((frame_val & MI_COM_BUSY) == 0) {
1213 			udelay(5);
1214 			frame_val = tr32(MAC_MI_COM);
1215 			break;
1216 		}
1217 		loops -= 1;
1218 	}
1219 
1220 	ret = -EBUSY;
1221 	if (loops != 0)
1222 		ret = 0;
1223 
1224 	if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1225 		tw32_f(MAC_MI_MODE, tp->mi_mode);
1226 		udelay(80);
1227 	}
1228 
1229 	tg3_ape_unlock(tp, tp->phy_ape_lock);
1230 
1231 	return ret;
1232 }
1233 
1234 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
1235 {
1236 	return __tg3_writephy(tp, tp->phy_addr, reg, val);
1237 }
1238 
1239 static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val)
1240 {
1241 	int err;
1242 
1243 	err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1244 	if (err)
1245 		goto done;
1246 
1247 	err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1248 	if (err)
1249 		goto done;
1250 
1251 	err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1252 			   MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1253 	if (err)
1254 		goto done;
1255 
1256 	err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, val);
1257 
1258 done:
1259 	return err;
1260 }
1261 
1262 static int tg3_phy_cl45_read(struct tg3 *tp, u32 devad, u32 addr, u32 *val)
1263 {
1264 	int err;
1265 
1266 	err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1267 	if (err)
1268 		goto done;
1269 
1270 	err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1271 	if (err)
1272 		goto done;
1273 
1274 	err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1275 			   MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1276 	if (err)
1277 		goto done;
1278 
1279 	err = tg3_readphy(tp, MII_TG3_MMD_ADDRESS, val);
1280 
1281 done:
1282 	return err;
1283 }
1284 
1285 static int tg3_phydsp_read(struct tg3 *tp, u32 reg, u32 *val)
1286 {
1287 	int err;
1288 
1289 	err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1290 	if (!err)
1291 		err = tg3_readphy(tp, MII_TG3_DSP_RW_PORT, val);
1292 
1293 	return err;
1294 }
1295 
1296 static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
1297 {
1298 	int err;
1299 
1300 	err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1301 	if (!err)
1302 		err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
1303 
1304 	return err;
1305 }
1306 
1307 static int tg3_phy_auxctl_read(struct tg3 *tp, int reg, u32 *val)
1308 {
1309 	int err;
1310 
1311 	err = tg3_writephy(tp, MII_TG3_AUX_CTRL,
1312 			   (reg << MII_TG3_AUXCTL_MISC_RDSEL_SHIFT) |
1313 			   MII_TG3_AUXCTL_SHDWSEL_MISC);
1314 	if (!err)
1315 		err = tg3_readphy(tp, MII_TG3_AUX_CTRL, val);
1316 
1317 	return err;
1318 }
1319 
1320 static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set)
1321 {
1322 	if (reg == MII_TG3_AUXCTL_SHDWSEL_MISC)
1323 		set |= MII_TG3_AUXCTL_MISC_WREN;
1324 
1325 	return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg);
1326 }
1327 
1328 static int tg3_phy_toggle_auxctl_smdsp(struct tg3 *tp, bool enable)
1329 {
1330 	u32 val;
1331 	int err;
1332 
1333 	err = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
1334 
1335 	if (err)
1336 		return err;
1337 
1338 	if (enable)
1339 		val |= MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
1340 	else
1341 		val &= ~MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
1342 
1343 	err = tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
1344 				   val | MII_TG3_AUXCTL_ACTL_TX_6DB);
1345 
1346 	return err;
1347 }
1348 
1349 static int tg3_phy_shdw_write(struct tg3 *tp, int reg, u32 val)
1350 {
1351 	return tg3_writephy(tp, MII_TG3_MISC_SHDW,
1352 			    reg | val | MII_TG3_MISC_SHDW_WREN);
1353 }
1354 
1355 static int tg3_bmcr_reset(struct tg3 *tp)
1356 {
1357 	u32 phy_control;
1358 	int limit, err;
1359 
1360 	/* OK, reset it, and poll the BMCR_RESET bit until it
1361 	 * clears or we time out.
1362 	 */
1363 	phy_control = BMCR_RESET;
1364 	err = tg3_writephy(tp, MII_BMCR, phy_control);
1365 	if (err != 0)
1366 		return -EBUSY;
1367 
1368 	limit = 5000;
1369 	while (limit--) {
1370 		err = tg3_readphy(tp, MII_BMCR, &phy_control);
1371 		if (err != 0)
1372 			return -EBUSY;
1373 
1374 		if ((phy_control & BMCR_RESET) == 0) {
1375 			udelay(40);
1376 			break;
1377 		}
1378 		udelay(10);
1379 	}
1380 	if (limit < 0)
1381 		return -EBUSY;
1382 
1383 	return 0;
1384 }
1385 
1386 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
1387 {
1388 	struct tg3 *tp = bp->priv;
1389 	u32 val;
1390 
1391 	spin_lock_bh(&tp->lock);
1392 
1393 	if (__tg3_readphy(tp, mii_id, reg, &val))
1394 		val = -EIO;
1395 
1396 	spin_unlock_bh(&tp->lock);
1397 
1398 	return val;
1399 }
1400 
1401 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
1402 {
1403 	struct tg3 *tp = bp->priv;
1404 	u32 ret = 0;
1405 
1406 	spin_lock_bh(&tp->lock);
1407 
1408 	if (__tg3_writephy(tp, mii_id, reg, val))
1409 		ret = -EIO;
1410 
1411 	spin_unlock_bh(&tp->lock);
1412 
1413 	return ret;
1414 }
1415 
1416 static void tg3_mdio_config_5785(struct tg3 *tp)
1417 {
1418 	u32 val;
1419 	struct phy_device *phydev;
1420 
1421 	phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
1422 	switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1423 	case PHY_ID_BCM50610:
1424 	case PHY_ID_BCM50610M:
1425 		val = MAC_PHYCFG2_50610_LED_MODES;
1426 		break;
1427 	case PHY_ID_BCMAC131:
1428 		val = MAC_PHYCFG2_AC131_LED_MODES;
1429 		break;
1430 	case PHY_ID_RTL8211C:
1431 		val = MAC_PHYCFG2_RTL8211C_LED_MODES;
1432 		break;
1433 	case PHY_ID_RTL8201E:
1434 		val = MAC_PHYCFG2_RTL8201E_LED_MODES;
1435 		break;
1436 	default:
1437 		return;
1438 	}
1439 
1440 	if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
1441 		tw32(MAC_PHYCFG2, val);
1442 
1443 		val = tr32(MAC_PHYCFG1);
1444 		val &= ~(MAC_PHYCFG1_RGMII_INT |
1445 			 MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK);
1446 		val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT;
1447 		tw32(MAC_PHYCFG1, val);
1448 
1449 		return;
1450 	}
1451 
1452 	if (!tg3_flag(tp, RGMII_INBAND_DISABLE))
1453 		val |= MAC_PHYCFG2_EMODE_MASK_MASK |
1454 		       MAC_PHYCFG2_FMODE_MASK_MASK |
1455 		       MAC_PHYCFG2_GMODE_MASK_MASK |
1456 		       MAC_PHYCFG2_ACT_MASK_MASK   |
1457 		       MAC_PHYCFG2_QUAL_MASK_MASK |
1458 		       MAC_PHYCFG2_INBAND_ENABLE;
1459 
1460 	tw32(MAC_PHYCFG2, val);
1461 
1462 	val = tr32(MAC_PHYCFG1);
1463 	val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
1464 		 MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
1465 	if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1466 		if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1467 			val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
1468 		if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1469 			val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
1470 	}
1471 	val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT |
1472 	       MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV;
1473 	tw32(MAC_PHYCFG1, val);
1474 
1475 	val = tr32(MAC_EXT_RGMII_MODE);
1476 	val &= ~(MAC_RGMII_MODE_RX_INT_B |
1477 		 MAC_RGMII_MODE_RX_QUALITY |
1478 		 MAC_RGMII_MODE_RX_ACTIVITY |
1479 		 MAC_RGMII_MODE_RX_ENG_DET |
1480 		 MAC_RGMII_MODE_TX_ENABLE |
1481 		 MAC_RGMII_MODE_TX_LOWPWR |
1482 		 MAC_RGMII_MODE_TX_RESET);
1483 	if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1484 		if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1485 			val |= MAC_RGMII_MODE_RX_INT_B |
1486 			       MAC_RGMII_MODE_RX_QUALITY |
1487 			       MAC_RGMII_MODE_RX_ACTIVITY |
1488 			       MAC_RGMII_MODE_RX_ENG_DET;
1489 		if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1490 			val |= MAC_RGMII_MODE_TX_ENABLE |
1491 			       MAC_RGMII_MODE_TX_LOWPWR |
1492 			       MAC_RGMII_MODE_TX_RESET;
1493 	}
1494 	tw32(MAC_EXT_RGMII_MODE, val);
1495 }
1496 
1497 static void tg3_mdio_start(struct tg3 *tp)
1498 {
1499 	tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
1500 	tw32_f(MAC_MI_MODE, tp->mi_mode);
1501 	udelay(80);
1502 
1503 	if (tg3_flag(tp, MDIOBUS_INITED) &&
1504 	    tg3_asic_rev(tp) == ASIC_REV_5785)
1505 		tg3_mdio_config_5785(tp);
1506 }
1507 
1508 static int tg3_mdio_init(struct tg3 *tp)
1509 {
1510 	int i;
1511 	u32 reg;
1512 	struct phy_device *phydev;
1513 
1514 	if (tg3_flag(tp, 5717_PLUS)) {
1515 		u32 is_serdes;
1516 
1517 		tp->phy_addr = tp->pci_fn + 1;
1518 
1519 		if (tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0)
1520 			is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
1521 		else
1522 			is_serdes = tr32(TG3_CPMU_PHY_STRAP) &
1523 				    TG3_CPMU_PHY_STRAP_IS_SERDES;
1524 		if (is_serdes)
1525 			tp->phy_addr += 7;
1526 	} else if (tg3_flag(tp, IS_SSB_CORE) && tg3_flag(tp, ROBOSWITCH)) {
1527 		int addr;
1528 
1529 		addr = ssb_gige_get_phyaddr(tp->pdev);
1530 		if (addr < 0)
1531 			return addr;
1532 		tp->phy_addr = addr;
1533 	} else
1534 		tp->phy_addr = TG3_PHY_MII_ADDR;
1535 
1536 	tg3_mdio_start(tp);
1537 
1538 	if (!tg3_flag(tp, USE_PHYLIB) || tg3_flag(tp, MDIOBUS_INITED))
1539 		return 0;
1540 
1541 	tp->mdio_bus = mdiobus_alloc();
1542 	if (tp->mdio_bus == NULL)
1543 		return -ENOMEM;
1544 
1545 	tp->mdio_bus->name     = "tg3 mdio bus";
1546 	snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
1547 		 (tp->pdev->bus->number << 8) | tp->pdev->devfn);
1548 	tp->mdio_bus->priv     = tp;
1549 	tp->mdio_bus->parent   = &tp->pdev->dev;
1550 	tp->mdio_bus->read     = &tg3_mdio_read;
1551 	tp->mdio_bus->write    = &tg3_mdio_write;
1552 	tp->mdio_bus->phy_mask = ~(1 << tp->phy_addr);
1553 
1554 	/* The bus registration will look for all the PHYs on the mdio bus.
1555 	 * Unfortunately, it does not ensure the PHY is powered up before
1556 	 * accessing the PHY ID registers.  A chip reset is the
1557 	 * quickest way to bring the device back to an operational state..
1558 	 */
1559 	if (tg3_readphy(tp, MII_BMCR, &reg) || (reg & BMCR_PDOWN))
1560 		tg3_bmcr_reset(tp);
1561 
1562 	i = mdiobus_register(tp->mdio_bus);
1563 	if (i) {
1564 		dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i);
1565 		mdiobus_free(tp->mdio_bus);
1566 		return i;
1567 	}
1568 
1569 	phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
1570 
1571 	if (!phydev || !phydev->drv) {
1572 		dev_warn(&tp->pdev->dev, "No PHY devices\n");
1573 		mdiobus_unregister(tp->mdio_bus);
1574 		mdiobus_free(tp->mdio_bus);
1575 		return -ENODEV;
1576 	}
1577 
1578 	switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1579 	case PHY_ID_BCM57780:
1580 		phydev->interface = PHY_INTERFACE_MODE_GMII;
1581 		phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1582 		break;
1583 	case PHY_ID_BCM50610:
1584 	case PHY_ID_BCM50610M:
1585 		phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE |
1586 				     PHY_BRCM_RX_REFCLK_UNUSED |
1587 				     PHY_BRCM_DIS_TXCRXC_NOENRGY |
1588 				     PHY_BRCM_AUTO_PWRDWN_ENABLE;
1589 		if (tg3_flag(tp, RGMII_INBAND_DISABLE))
1590 			phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
1591 		if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1592 			phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
1593 		if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1594 			phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
1595 		/* fallthru */
1596 	case PHY_ID_RTL8211C:
1597 		phydev->interface = PHY_INTERFACE_MODE_RGMII;
1598 		break;
1599 	case PHY_ID_RTL8201E:
1600 	case PHY_ID_BCMAC131:
1601 		phydev->interface = PHY_INTERFACE_MODE_MII;
1602 		phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1603 		tp->phy_flags |= TG3_PHYFLG_IS_FET;
1604 		break;
1605 	}
1606 
1607 	tg3_flag_set(tp, MDIOBUS_INITED);
1608 
1609 	if (tg3_asic_rev(tp) == ASIC_REV_5785)
1610 		tg3_mdio_config_5785(tp);
1611 
1612 	return 0;
1613 }
1614 
1615 static void tg3_mdio_fini(struct tg3 *tp)
1616 {
1617 	if (tg3_flag(tp, MDIOBUS_INITED)) {
1618 		tg3_flag_clear(tp, MDIOBUS_INITED);
1619 		mdiobus_unregister(tp->mdio_bus);
1620 		mdiobus_free(tp->mdio_bus);
1621 	}
1622 }
1623 
1624 /* tp->lock is held. */
1625 static inline void tg3_generate_fw_event(struct tg3 *tp)
1626 {
1627 	u32 val;
1628 
1629 	val = tr32(GRC_RX_CPU_EVENT);
1630 	val |= GRC_RX_CPU_DRIVER_EVENT;
1631 	tw32_f(GRC_RX_CPU_EVENT, val);
1632 
1633 	tp->last_event_jiffies = jiffies;
1634 }
1635 
1636 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1637 
1638 /* tp->lock is held. */
1639 static void tg3_wait_for_event_ack(struct tg3 *tp)
1640 {
1641 	int i;
1642 	unsigned int delay_cnt;
1643 	long time_remain;
1644 
1645 	/* If enough time has passed, no wait is necessary. */
1646 	time_remain = (long)(tp->last_event_jiffies + 1 +
1647 		      usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1648 		      (long)jiffies;
1649 	if (time_remain < 0)
1650 		return;
1651 
1652 	/* Check if we can shorten the wait time. */
1653 	delay_cnt = jiffies_to_usecs(time_remain);
1654 	if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1655 		delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1656 	delay_cnt = (delay_cnt >> 3) + 1;
1657 
1658 	for (i = 0; i < delay_cnt; i++) {
1659 		if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1660 			break;
1661 		if (pci_channel_offline(tp->pdev))
1662 			break;
1663 
1664 		udelay(8);
1665 	}
1666 }
1667 
1668 /* tp->lock is held. */
1669 static void tg3_phy_gather_ump_data(struct tg3 *tp, u32 *data)
1670 {
1671 	u32 reg, val;
1672 
1673 	val = 0;
1674 	if (!tg3_readphy(tp, MII_BMCR, &reg))
1675 		val = reg << 16;
1676 	if (!tg3_readphy(tp, MII_BMSR, &reg))
1677 		val |= (reg & 0xffff);
1678 	*data++ = val;
1679 
1680 	val = 0;
1681 	if (!tg3_readphy(tp, MII_ADVERTISE, &reg))
1682 		val = reg << 16;
1683 	if (!tg3_readphy(tp, MII_LPA, &reg))
1684 		val |= (reg & 0xffff);
1685 	*data++ = val;
1686 
1687 	val = 0;
1688 	if (!(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) {
1689 		if (!tg3_readphy(tp, MII_CTRL1000, &reg))
1690 			val = reg << 16;
1691 		if (!tg3_readphy(tp, MII_STAT1000, &reg))
1692 			val |= (reg & 0xffff);
1693 	}
1694 	*data++ = val;
1695 
1696 	if (!tg3_readphy(tp, MII_PHYADDR, &reg))
1697 		val = reg << 16;
1698 	else
1699 		val = 0;
1700 	*data++ = val;
1701 }
1702 
1703 /* tp->lock is held. */
1704 static void tg3_ump_link_report(struct tg3 *tp)
1705 {
1706 	u32 data[4];
1707 
1708 	if (!tg3_flag(tp, 5780_CLASS) || !tg3_flag(tp, ENABLE_ASF))
1709 		return;
1710 
1711 	tg3_phy_gather_ump_data(tp, data);
1712 
1713 	tg3_wait_for_event_ack(tp);
1714 
1715 	tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1716 	tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1717 	tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x0, data[0]);
1718 	tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x4, data[1]);
1719 	tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x8, data[2]);
1720 	tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0xc, data[3]);
1721 
1722 	tg3_generate_fw_event(tp);
1723 }
1724 
1725 /* tp->lock is held. */
1726 static void tg3_stop_fw(struct tg3 *tp)
1727 {
1728 	if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
1729 		/* Wait for RX cpu to ACK the previous event. */
1730 		tg3_wait_for_event_ack(tp);
1731 
1732 		tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
1733 
1734 		tg3_generate_fw_event(tp);
1735 
1736 		/* Wait for RX cpu to ACK this event. */
1737 		tg3_wait_for_event_ack(tp);
1738 	}
1739 }
1740 
1741 /* tp->lock is held. */
1742 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
1743 {
1744 	tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
1745 		      NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
1746 
1747 	if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1748 		switch (kind) {
1749 		case RESET_KIND_INIT:
1750 			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1751 				      DRV_STATE_START);
1752 			break;
1753 
1754 		case RESET_KIND_SHUTDOWN:
1755 			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1756 				      DRV_STATE_UNLOAD);
1757 			break;
1758 
1759 		case RESET_KIND_SUSPEND:
1760 			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1761 				      DRV_STATE_SUSPEND);
1762 			break;
1763 
1764 		default:
1765 			break;
1766 		}
1767 	}
1768 }
1769 
1770 /* tp->lock is held. */
1771 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
1772 {
1773 	if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1774 		switch (kind) {
1775 		case RESET_KIND_INIT:
1776 			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1777 				      DRV_STATE_START_DONE);
1778 			break;
1779 
1780 		case RESET_KIND_SHUTDOWN:
1781 			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1782 				      DRV_STATE_UNLOAD_DONE);
1783 			break;
1784 
1785 		default:
1786 			break;
1787 		}
1788 	}
1789 }
1790 
1791 /* tp->lock is held. */
1792 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
1793 {
1794 	if (tg3_flag(tp, ENABLE_ASF)) {
1795 		switch (kind) {
1796 		case RESET_KIND_INIT:
1797 			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1798 				      DRV_STATE_START);
1799 			break;
1800 
1801 		case RESET_KIND_SHUTDOWN:
1802 			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1803 				      DRV_STATE_UNLOAD);
1804 			break;
1805 
1806 		case RESET_KIND_SUSPEND:
1807 			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1808 				      DRV_STATE_SUSPEND);
1809 			break;
1810 
1811 		default:
1812 			break;
1813 		}
1814 	}
1815 }
1816 
1817 static int tg3_poll_fw(struct tg3 *tp)
1818 {
1819 	int i;
1820 	u32 val;
1821 
1822 	if (tg3_flag(tp, NO_FWARE_REPORTED))
1823 		return 0;
1824 
1825 	if (tg3_flag(tp, IS_SSB_CORE)) {
1826 		/* We don't use firmware. */
1827 		return 0;
1828 	}
1829 
1830 	if (tg3_asic_rev(tp) == ASIC_REV_5906) {
1831 		/* Wait up to 20ms for init done. */
1832 		for (i = 0; i < 200; i++) {
1833 			if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
1834 				return 0;
1835 			if (pci_channel_offline(tp->pdev))
1836 				return -ENODEV;
1837 
1838 			udelay(100);
1839 		}
1840 		return -ENODEV;
1841 	}
1842 
1843 	/* Wait for firmware initialization to complete. */
1844 	for (i = 0; i < 100000; i++) {
1845 		tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
1846 		if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1847 			break;
1848 		if (pci_channel_offline(tp->pdev)) {
1849 			if (!tg3_flag(tp, NO_FWARE_REPORTED)) {
1850 				tg3_flag_set(tp, NO_FWARE_REPORTED);
1851 				netdev_info(tp->dev, "No firmware running\n");
1852 			}
1853 
1854 			break;
1855 		}
1856 
1857 		udelay(10);
1858 	}
1859 
1860 	/* Chip might not be fitted with firmware.  Some Sun onboard
1861 	 * parts are configured like that.  So don't signal the timeout
1862 	 * of the above loop as an error, but do report the lack of
1863 	 * running firmware once.
1864 	 */
1865 	if (i >= 100000 && !tg3_flag(tp, NO_FWARE_REPORTED)) {
1866 		tg3_flag_set(tp, NO_FWARE_REPORTED);
1867 
1868 		netdev_info(tp->dev, "No firmware running\n");
1869 	}
1870 
1871 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) {
1872 		/* The 57765 A0 needs a little more
1873 		 * time to do some important work.
1874 		 */
1875 		mdelay(10);
1876 	}
1877 
1878 	return 0;
1879 }
1880 
1881 static void tg3_link_report(struct tg3 *tp)
1882 {
1883 	if (!netif_carrier_ok(tp->dev)) {
1884 		netif_info(tp, link, tp->dev, "Link is down\n");
1885 		tg3_ump_link_report(tp);
1886 	} else if (netif_msg_link(tp)) {
1887 		netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n",
1888 			    (tp->link_config.active_speed == SPEED_1000 ?
1889 			     1000 :
1890 			     (tp->link_config.active_speed == SPEED_100 ?
1891 			      100 : 10)),
1892 			    (tp->link_config.active_duplex == DUPLEX_FULL ?
1893 			     "full" : "half"));
1894 
1895 		netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n",
1896 			    (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
1897 			    "on" : "off",
1898 			    (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
1899 			    "on" : "off");
1900 
1901 		if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
1902 			netdev_info(tp->dev, "EEE is %s\n",
1903 				    tp->setlpicnt ? "enabled" : "disabled");
1904 
1905 		tg3_ump_link_report(tp);
1906 	}
1907 
1908 	tp->link_up = netif_carrier_ok(tp->dev);
1909 }
1910 
1911 static u32 tg3_decode_flowctrl_1000T(u32 adv)
1912 {
1913 	u32 flowctrl = 0;
1914 
1915 	if (adv & ADVERTISE_PAUSE_CAP) {
1916 		flowctrl |= FLOW_CTRL_RX;
1917 		if (!(adv & ADVERTISE_PAUSE_ASYM))
1918 			flowctrl |= FLOW_CTRL_TX;
1919 	} else if (adv & ADVERTISE_PAUSE_ASYM)
1920 		flowctrl |= FLOW_CTRL_TX;
1921 
1922 	return flowctrl;
1923 }
1924 
1925 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1926 {
1927 	u16 miireg;
1928 
1929 	if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1930 		miireg = ADVERTISE_1000XPAUSE;
1931 	else if (flow_ctrl & FLOW_CTRL_TX)
1932 		miireg = ADVERTISE_1000XPSE_ASYM;
1933 	else if (flow_ctrl & FLOW_CTRL_RX)
1934 		miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1935 	else
1936 		miireg = 0;
1937 
1938 	return miireg;
1939 }
1940 
1941 static u32 tg3_decode_flowctrl_1000X(u32 adv)
1942 {
1943 	u32 flowctrl = 0;
1944 
1945 	if (adv & ADVERTISE_1000XPAUSE) {
1946 		flowctrl |= FLOW_CTRL_RX;
1947 		if (!(adv & ADVERTISE_1000XPSE_ASYM))
1948 			flowctrl |= FLOW_CTRL_TX;
1949 	} else if (adv & ADVERTISE_1000XPSE_ASYM)
1950 		flowctrl |= FLOW_CTRL_TX;
1951 
1952 	return flowctrl;
1953 }
1954 
1955 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1956 {
1957 	u8 cap = 0;
1958 
1959 	if (lcladv & rmtadv & ADVERTISE_1000XPAUSE) {
1960 		cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1961 	} else if (lcladv & rmtadv & ADVERTISE_1000XPSE_ASYM) {
1962 		if (lcladv & ADVERTISE_1000XPAUSE)
1963 			cap = FLOW_CTRL_RX;
1964 		if (rmtadv & ADVERTISE_1000XPAUSE)
1965 			cap = FLOW_CTRL_TX;
1966 	}
1967 
1968 	return cap;
1969 }
1970 
1971 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1972 {
1973 	u8 autoneg;
1974 	u8 flowctrl = 0;
1975 	u32 old_rx_mode = tp->rx_mode;
1976 	u32 old_tx_mode = tp->tx_mode;
1977 
1978 	if (tg3_flag(tp, USE_PHYLIB))
1979 		autoneg = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr)->autoneg;
1980 	else
1981 		autoneg = tp->link_config.autoneg;
1982 
1983 	if (autoneg == AUTONEG_ENABLE && tg3_flag(tp, PAUSE_AUTONEG)) {
1984 		if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
1985 			flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1986 		else
1987 			flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1988 	} else
1989 		flowctrl = tp->link_config.flowctrl;
1990 
1991 	tp->link_config.active_flowctrl = flowctrl;
1992 
1993 	if (flowctrl & FLOW_CTRL_RX)
1994 		tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1995 	else
1996 		tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1997 
1998 	if (old_rx_mode != tp->rx_mode)
1999 		tw32_f(MAC_RX_MODE, tp->rx_mode);
2000 
2001 	if (flowctrl & FLOW_CTRL_TX)
2002 		tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
2003 	else
2004 		tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
2005 
2006 	if (old_tx_mode != tp->tx_mode)
2007 		tw32_f(MAC_TX_MODE, tp->tx_mode);
2008 }
2009 
2010 static void tg3_adjust_link(struct net_device *dev)
2011 {
2012 	u8 oldflowctrl, linkmesg = 0;
2013 	u32 mac_mode, lcl_adv, rmt_adv;
2014 	struct tg3 *tp = netdev_priv(dev);
2015 	struct phy_device *phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
2016 
2017 	spin_lock_bh(&tp->lock);
2018 
2019 	mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
2020 				    MAC_MODE_HALF_DUPLEX);
2021 
2022 	oldflowctrl = tp->link_config.active_flowctrl;
2023 
2024 	if (phydev->link) {
2025 		lcl_adv = 0;
2026 		rmt_adv = 0;
2027 
2028 		if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
2029 			mac_mode |= MAC_MODE_PORT_MODE_MII;
2030 		else if (phydev->speed == SPEED_1000 ||
2031 			 tg3_asic_rev(tp) != ASIC_REV_5785)
2032 			mac_mode |= MAC_MODE_PORT_MODE_GMII;
2033 		else
2034 			mac_mode |= MAC_MODE_PORT_MODE_MII;
2035 
2036 		if (phydev->duplex == DUPLEX_HALF)
2037 			mac_mode |= MAC_MODE_HALF_DUPLEX;
2038 		else {
2039 			lcl_adv = mii_advertise_flowctrl(
2040 				  tp->link_config.flowctrl);
2041 
2042 			if (phydev->pause)
2043 				rmt_adv = LPA_PAUSE_CAP;
2044 			if (phydev->asym_pause)
2045 				rmt_adv |= LPA_PAUSE_ASYM;
2046 		}
2047 
2048 		tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
2049 	} else
2050 		mac_mode |= MAC_MODE_PORT_MODE_GMII;
2051 
2052 	if (mac_mode != tp->mac_mode) {
2053 		tp->mac_mode = mac_mode;
2054 		tw32_f(MAC_MODE, tp->mac_mode);
2055 		udelay(40);
2056 	}
2057 
2058 	if (tg3_asic_rev(tp) == ASIC_REV_5785) {
2059 		if (phydev->speed == SPEED_10)
2060 			tw32(MAC_MI_STAT,
2061 			     MAC_MI_STAT_10MBPS_MODE |
2062 			     MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
2063 		else
2064 			tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
2065 	}
2066 
2067 	if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
2068 		tw32(MAC_TX_LENGTHS,
2069 		     ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2070 		      (6 << TX_LENGTHS_IPG_SHIFT) |
2071 		      (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
2072 	else
2073 		tw32(MAC_TX_LENGTHS,
2074 		     ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2075 		      (6 << TX_LENGTHS_IPG_SHIFT) |
2076 		      (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
2077 
2078 	if (phydev->link != tp->old_link ||
2079 	    phydev->speed != tp->link_config.active_speed ||
2080 	    phydev->duplex != tp->link_config.active_duplex ||
2081 	    oldflowctrl != tp->link_config.active_flowctrl)
2082 		linkmesg = 1;
2083 
2084 	tp->old_link = phydev->link;
2085 	tp->link_config.active_speed = phydev->speed;
2086 	tp->link_config.active_duplex = phydev->duplex;
2087 
2088 	spin_unlock_bh(&tp->lock);
2089 
2090 	if (linkmesg)
2091 		tg3_link_report(tp);
2092 }
2093 
2094 static int tg3_phy_init(struct tg3 *tp)
2095 {
2096 	struct phy_device *phydev;
2097 
2098 	if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)
2099 		return 0;
2100 
2101 	/* Bring the PHY back to a known state. */
2102 	tg3_bmcr_reset(tp);
2103 
2104 	phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
2105 
2106 	/* Attach the MAC to the PHY. */
2107 	phydev = phy_connect(tp->dev, phydev_name(phydev),
2108 			     tg3_adjust_link, phydev->interface);
2109 	if (IS_ERR(phydev)) {
2110 		dev_err(&tp->pdev->dev, "Could not attach to PHY\n");
2111 		return PTR_ERR(phydev);
2112 	}
2113 
2114 	/* Mask with MAC supported features. */
2115 	switch (phydev->interface) {
2116 	case PHY_INTERFACE_MODE_GMII:
2117 	case PHY_INTERFACE_MODE_RGMII:
2118 		if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
2119 			phydev->supported &= (PHY_GBIT_FEATURES |
2120 					      SUPPORTED_Pause |
2121 					      SUPPORTED_Asym_Pause);
2122 			break;
2123 		}
2124 		/* fallthru */
2125 	case PHY_INTERFACE_MODE_MII:
2126 		phydev->supported &= (PHY_BASIC_FEATURES |
2127 				      SUPPORTED_Pause |
2128 				      SUPPORTED_Asym_Pause);
2129 		break;
2130 	default:
2131 		phy_disconnect(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr));
2132 		return -EINVAL;
2133 	}
2134 
2135 	tp->phy_flags |= TG3_PHYFLG_IS_CONNECTED;
2136 
2137 	phydev->advertising = phydev->supported;
2138 
2139 	phy_attached_info(phydev);
2140 
2141 	return 0;
2142 }
2143 
2144 static void tg3_phy_start(struct tg3 *tp)
2145 {
2146 	struct phy_device *phydev;
2147 
2148 	if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
2149 		return;
2150 
2151 	phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
2152 
2153 	if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
2154 		tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
2155 		phydev->speed = tp->link_config.speed;
2156 		phydev->duplex = tp->link_config.duplex;
2157 		phydev->autoneg = tp->link_config.autoneg;
2158 		phydev->advertising = tp->link_config.advertising;
2159 	}
2160 
2161 	phy_start(phydev);
2162 
2163 	phy_start_aneg(phydev);
2164 }
2165 
2166 static void tg3_phy_stop(struct tg3 *tp)
2167 {
2168 	if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
2169 		return;
2170 
2171 	phy_stop(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr));
2172 }
2173 
2174 static void tg3_phy_fini(struct tg3 *tp)
2175 {
2176 	if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
2177 		phy_disconnect(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr));
2178 		tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED;
2179 	}
2180 }
2181 
2182 static int tg3_phy_set_extloopbk(struct tg3 *tp)
2183 {
2184 	int err;
2185 	u32 val;
2186 
2187 	if (tp->phy_flags & TG3_PHYFLG_IS_FET)
2188 		return 0;
2189 
2190 	if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2191 		/* Cannot do read-modify-write on 5401 */
2192 		err = tg3_phy_auxctl_write(tp,
2193 					   MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2194 					   MII_TG3_AUXCTL_ACTL_EXTLOOPBK |
2195 					   0x4c20);
2196 		goto done;
2197 	}
2198 
2199 	err = tg3_phy_auxctl_read(tp,
2200 				  MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2201 	if (err)
2202 		return err;
2203 
2204 	val |= MII_TG3_AUXCTL_ACTL_EXTLOOPBK;
2205 	err = tg3_phy_auxctl_write(tp,
2206 				   MII_TG3_AUXCTL_SHDWSEL_AUXCTL, val);
2207 
2208 done:
2209 	return err;
2210 }
2211 
2212 static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
2213 {
2214 	u32 phytest;
2215 
2216 	if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2217 		u32 phy;
2218 
2219 		tg3_writephy(tp, MII_TG3_FET_TEST,
2220 			     phytest | MII_TG3_FET_SHADOW_EN);
2221 		if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) {
2222 			if (enable)
2223 				phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD;
2224 			else
2225 				phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD;
2226 			tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy);
2227 		}
2228 		tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2229 	}
2230 }
2231 
2232 static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
2233 {
2234 	u32 reg;
2235 
2236 	if (!tg3_flag(tp, 5705_PLUS) ||
2237 	    (tg3_flag(tp, 5717_PLUS) &&
2238 	     (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
2239 		return;
2240 
2241 	if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2242 		tg3_phy_fet_toggle_apd(tp, enable);
2243 		return;
2244 	}
2245 
2246 	reg = MII_TG3_MISC_SHDW_SCR5_LPED |
2247 	      MII_TG3_MISC_SHDW_SCR5_DLPTLM |
2248 	      MII_TG3_MISC_SHDW_SCR5_SDTL |
2249 	      MII_TG3_MISC_SHDW_SCR5_C125OE;
2250 	if (tg3_asic_rev(tp) != ASIC_REV_5784 || !enable)
2251 		reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
2252 
2253 	tg3_phy_shdw_write(tp, MII_TG3_MISC_SHDW_SCR5_SEL, reg);
2254 
2255 
2256 	reg = MII_TG3_MISC_SHDW_APD_WKTM_84MS;
2257 	if (enable)
2258 		reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
2259 
2260 	tg3_phy_shdw_write(tp, MII_TG3_MISC_SHDW_APD_SEL, reg);
2261 }
2262 
2263 static void tg3_phy_toggle_automdix(struct tg3 *tp, bool enable)
2264 {
2265 	u32 phy;
2266 
2267 	if (!tg3_flag(tp, 5705_PLUS) ||
2268 	    (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
2269 		return;
2270 
2271 	if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2272 		u32 ephy;
2273 
2274 		if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
2275 			u32 reg = MII_TG3_FET_SHDW_MISCCTRL;
2276 
2277 			tg3_writephy(tp, MII_TG3_FET_TEST,
2278 				     ephy | MII_TG3_FET_SHADOW_EN);
2279 			if (!tg3_readphy(tp, reg, &phy)) {
2280 				if (enable)
2281 					phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2282 				else
2283 					phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2284 				tg3_writephy(tp, reg, phy);
2285 			}
2286 			tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
2287 		}
2288 	} else {
2289 		int ret;
2290 
2291 		ret = tg3_phy_auxctl_read(tp,
2292 					  MII_TG3_AUXCTL_SHDWSEL_MISC, &phy);
2293 		if (!ret) {
2294 			if (enable)
2295 				phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2296 			else
2297 				phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2298 			tg3_phy_auxctl_write(tp,
2299 					     MII_TG3_AUXCTL_SHDWSEL_MISC, phy);
2300 		}
2301 	}
2302 }
2303 
2304 static void tg3_phy_set_wirespeed(struct tg3 *tp)
2305 {
2306 	int ret;
2307 	u32 val;
2308 
2309 	if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED)
2310 		return;
2311 
2312 	ret = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, &val);
2313 	if (!ret)
2314 		tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_MISC,
2315 				     val | MII_TG3_AUXCTL_MISC_WIRESPD_EN);
2316 }
2317 
2318 static void tg3_phy_apply_otp(struct tg3 *tp)
2319 {
2320 	u32 otp, phy;
2321 
2322 	if (!tp->phy_otp)
2323 		return;
2324 
2325 	otp = tp->phy_otp;
2326 
2327 	if (tg3_phy_toggle_auxctl_smdsp(tp, true))
2328 		return;
2329 
2330 	phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
2331 	phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
2332 	tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
2333 
2334 	phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
2335 	      ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
2336 	tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
2337 
2338 	phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
2339 	phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
2340 	tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
2341 
2342 	phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
2343 	tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
2344 
2345 	phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
2346 	tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
2347 
2348 	phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
2349 	      ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
2350 	tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
2351 
2352 	tg3_phy_toggle_auxctl_smdsp(tp, false);
2353 }
2354 
2355 static void tg3_eee_pull_config(struct tg3 *tp, struct ethtool_eee *eee)
2356 {
2357 	u32 val;
2358 	struct ethtool_eee *dest = &tp->eee;
2359 
2360 	if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2361 		return;
2362 
2363 	if (eee)
2364 		dest = eee;
2365 
2366 	if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, TG3_CL45_D7_EEERES_STAT, &val))
2367 		return;
2368 
2369 	/* Pull eee_active */
2370 	if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T ||
2371 	    val == TG3_CL45_D7_EEERES_STAT_LP_100TX) {
2372 		dest->eee_active = 1;
2373 	} else
2374 		dest->eee_active = 0;
2375 
2376 	/* Pull lp advertised settings */
2377 	if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, MDIO_AN_EEE_LPABLE, &val))
2378 		return;
2379 	dest->lp_advertised = mmd_eee_adv_to_ethtool_adv_t(val);
2380 
2381 	/* Pull advertised and eee_enabled settings */
2382 	if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, &val))
2383 		return;
2384 	dest->eee_enabled = !!val;
2385 	dest->advertised = mmd_eee_adv_to_ethtool_adv_t(val);
2386 
2387 	/* Pull tx_lpi_enabled */
2388 	val = tr32(TG3_CPMU_EEE_MODE);
2389 	dest->tx_lpi_enabled = !!(val & TG3_CPMU_EEEMD_LPI_IN_TX);
2390 
2391 	/* Pull lpi timer value */
2392 	dest->tx_lpi_timer = tr32(TG3_CPMU_EEE_DBTMR1) & 0xffff;
2393 }
2394 
2395 static void tg3_phy_eee_adjust(struct tg3 *tp, bool current_link_up)
2396 {
2397 	u32 val;
2398 
2399 	if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2400 		return;
2401 
2402 	tp->setlpicnt = 0;
2403 
2404 	if (tp->link_config.autoneg == AUTONEG_ENABLE &&
2405 	    current_link_up &&
2406 	    tp->link_config.active_duplex == DUPLEX_FULL &&
2407 	    (tp->link_config.active_speed == SPEED_100 ||
2408 	     tp->link_config.active_speed == SPEED_1000)) {
2409 		u32 eeectl;
2410 
2411 		if (tp->link_config.active_speed == SPEED_1000)
2412 			eeectl = TG3_CPMU_EEE_CTRL_EXIT_16_5_US;
2413 		else
2414 			eeectl = TG3_CPMU_EEE_CTRL_EXIT_36_US;
2415 
2416 		tw32(TG3_CPMU_EEE_CTRL, eeectl);
2417 
2418 		tg3_eee_pull_config(tp, NULL);
2419 		if (tp->eee.eee_active)
2420 			tp->setlpicnt = 2;
2421 	}
2422 
2423 	if (!tp->setlpicnt) {
2424 		if (current_link_up &&
2425 		   !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2426 			tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0000);
2427 			tg3_phy_toggle_auxctl_smdsp(tp, false);
2428 		}
2429 
2430 		val = tr32(TG3_CPMU_EEE_MODE);
2431 		tw32(TG3_CPMU_EEE_MODE, val & ~TG3_CPMU_EEEMD_LPI_ENABLE);
2432 	}
2433 }
2434 
2435 static void tg3_phy_eee_enable(struct tg3 *tp)
2436 {
2437 	u32 val;
2438 
2439 	if (tp->link_config.active_speed == SPEED_1000 &&
2440 	    (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2441 	     tg3_asic_rev(tp) == ASIC_REV_5719 ||
2442 	     tg3_flag(tp, 57765_CLASS)) &&
2443 	    !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2444 		val = MII_TG3_DSP_TAP26_ALNOKO |
2445 		      MII_TG3_DSP_TAP26_RMRXSTO;
2446 		tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
2447 		tg3_phy_toggle_auxctl_smdsp(tp, false);
2448 	}
2449 
2450 	val = tr32(TG3_CPMU_EEE_MODE);
2451 	tw32(TG3_CPMU_EEE_MODE, val | TG3_CPMU_EEEMD_LPI_ENABLE);
2452 }
2453 
2454 static int tg3_wait_macro_done(struct tg3 *tp)
2455 {
2456 	int limit = 100;
2457 
2458 	while (limit--) {
2459 		u32 tmp32;
2460 
2461 		if (!tg3_readphy(tp, MII_TG3_DSP_CONTROL, &tmp32)) {
2462 			if ((tmp32 & 0x1000) == 0)
2463 				break;
2464 		}
2465 	}
2466 	if (limit < 0)
2467 		return -EBUSY;
2468 
2469 	return 0;
2470 }
2471 
2472 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
2473 {
2474 	static const u32 test_pat[4][6] = {
2475 	{ 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
2476 	{ 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
2477 	{ 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
2478 	{ 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
2479 	};
2480 	int chan;
2481 
2482 	for (chan = 0; chan < 4; chan++) {
2483 		int i;
2484 
2485 		tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2486 			     (chan * 0x2000) | 0x0200);
2487 		tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2488 
2489 		for (i = 0; i < 6; i++)
2490 			tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
2491 				     test_pat[chan][i]);
2492 
2493 		tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2494 		if (tg3_wait_macro_done(tp)) {
2495 			*resetp = 1;
2496 			return -EBUSY;
2497 		}
2498 
2499 		tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2500 			     (chan * 0x2000) | 0x0200);
2501 		tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0082);
2502 		if (tg3_wait_macro_done(tp)) {
2503 			*resetp = 1;
2504 			return -EBUSY;
2505 		}
2506 
2507 		tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0802);
2508 		if (tg3_wait_macro_done(tp)) {
2509 			*resetp = 1;
2510 			return -EBUSY;
2511 		}
2512 
2513 		for (i = 0; i < 6; i += 2) {
2514 			u32 low, high;
2515 
2516 			if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
2517 			    tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
2518 			    tg3_wait_macro_done(tp)) {
2519 				*resetp = 1;
2520 				return -EBUSY;
2521 			}
2522 			low &= 0x7fff;
2523 			high &= 0x000f;
2524 			if (low != test_pat[chan][i] ||
2525 			    high != test_pat[chan][i+1]) {
2526 				tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
2527 				tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
2528 				tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
2529 
2530 				return -EBUSY;
2531 			}
2532 		}
2533 	}
2534 
2535 	return 0;
2536 }
2537 
2538 static int tg3_phy_reset_chanpat(struct tg3 *tp)
2539 {
2540 	int chan;
2541 
2542 	for (chan = 0; chan < 4; chan++) {
2543 		int i;
2544 
2545 		tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2546 			     (chan * 0x2000) | 0x0200);
2547 		tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2548 		for (i = 0; i < 6; i++)
2549 			tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
2550 		tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2551 		if (tg3_wait_macro_done(tp))
2552 			return -EBUSY;
2553 	}
2554 
2555 	return 0;
2556 }
2557 
2558 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
2559 {
2560 	u32 reg32, phy9_orig;
2561 	int retries, do_phy_reset, err;
2562 
2563 	retries = 10;
2564 	do_phy_reset = 1;
2565 	do {
2566 		if (do_phy_reset) {
2567 			err = tg3_bmcr_reset(tp);
2568 			if (err)
2569 				return err;
2570 			do_phy_reset = 0;
2571 		}
2572 
2573 		/* Disable transmitter and interrupt.  */
2574 		if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
2575 			continue;
2576 
2577 		reg32 |= 0x3000;
2578 		tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2579 
2580 		/* Set full-duplex, 1000 mbps.  */
2581 		tg3_writephy(tp, MII_BMCR,
2582 			     BMCR_FULLDPLX | BMCR_SPEED1000);
2583 
2584 		/* Set to master mode.  */
2585 		if (tg3_readphy(tp, MII_CTRL1000, &phy9_orig))
2586 			continue;
2587 
2588 		tg3_writephy(tp, MII_CTRL1000,
2589 			     CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
2590 
2591 		err = tg3_phy_toggle_auxctl_smdsp(tp, true);
2592 		if (err)
2593 			return err;
2594 
2595 		/* Block the PHY control access.  */
2596 		tg3_phydsp_write(tp, 0x8005, 0x0800);
2597 
2598 		err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
2599 		if (!err)
2600 			break;
2601 	} while (--retries);
2602 
2603 	err = tg3_phy_reset_chanpat(tp);
2604 	if (err)
2605 		return err;
2606 
2607 	tg3_phydsp_write(tp, 0x8005, 0x0000);
2608 
2609 	tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
2610 	tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000);
2611 
2612 	tg3_phy_toggle_auxctl_smdsp(tp, false);
2613 
2614 	tg3_writephy(tp, MII_CTRL1000, phy9_orig);
2615 
2616 	err = tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32);
2617 	if (err)
2618 		return err;
2619 
2620 	reg32 &= ~0x3000;
2621 	tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2622 
2623 	return 0;
2624 }
2625 
2626 static void tg3_carrier_off(struct tg3 *tp)
2627 {
2628 	netif_carrier_off(tp->dev);
2629 	tp->link_up = false;
2630 }
2631 
2632 static void tg3_warn_mgmt_link_flap(struct tg3 *tp)
2633 {
2634 	if (tg3_flag(tp, ENABLE_ASF))
2635 		netdev_warn(tp->dev,
2636 			    "Management side-band traffic will be interrupted during phy settings change\n");
2637 }
2638 
2639 /* This will reset the tigon3 PHY if there is no valid
2640  * link unless the FORCE argument is non-zero.
2641  */
2642 static int tg3_phy_reset(struct tg3 *tp)
2643 {
2644 	u32 val, cpmuctrl;
2645 	int err;
2646 
2647 	if (tg3_asic_rev(tp) == ASIC_REV_5906) {
2648 		val = tr32(GRC_MISC_CFG);
2649 		tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
2650 		udelay(40);
2651 	}
2652 	err  = tg3_readphy(tp, MII_BMSR, &val);
2653 	err |= tg3_readphy(tp, MII_BMSR, &val);
2654 	if (err != 0)
2655 		return -EBUSY;
2656 
2657 	if (netif_running(tp->dev) && tp->link_up) {
2658 		netif_carrier_off(tp->dev);
2659 		tg3_link_report(tp);
2660 	}
2661 
2662 	if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
2663 	    tg3_asic_rev(tp) == ASIC_REV_5704 ||
2664 	    tg3_asic_rev(tp) == ASIC_REV_5705) {
2665 		err = tg3_phy_reset_5703_4_5(tp);
2666 		if (err)
2667 			return err;
2668 		goto out;
2669 	}
2670 
2671 	cpmuctrl = 0;
2672 	if (tg3_asic_rev(tp) == ASIC_REV_5784 &&
2673 	    tg3_chip_rev(tp) != CHIPREV_5784_AX) {
2674 		cpmuctrl = tr32(TG3_CPMU_CTRL);
2675 		if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
2676 			tw32(TG3_CPMU_CTRL,
2677 			     cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
2678 	}
2679 
2680 	err = tg3_bmcr_reset(tp);
2681 	if (err)
2682 		return err;
2683 
2684 	if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
2685 		val = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
2686 		tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, val);
2687 
2688 		tw32(TG3_CPMU_CTRL, cpmuctrl);
2689 	}
2690 
2691 	if (tg3_chip_rev(tp) == CHIPREV_5784_AX ||
2692 	    tg3_chip_rev(tp) == CHIPREV_5761_AX) {
2693 		val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2694 		if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
2695 		    CPMU_LSPD_1000MB_MACCLK_12_5) {
2696 			val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2697 			udelay(40);
2698 			tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2699 		}
2700 	}
2701 
2702 	if (tg3_flag(tp, 5717_PLUS) &&
2703 	    (tp->phy_flags & TG3_PHYFLG_MII_SERDES))
2704 		return 0;
2705 
2706 	tg3_phy_apply_otp(tp);
2707 
2708 	if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
2709 		tg3_phy_toggle_apd(tp, true);
2710 	else
2711 		tg3_phy_toggle_apd(tp, false);
2712 
2713 out:
2714 	if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) &&
2715 	    !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2716 		tg3_phydsp_write(tp, 0x201f, 0x2aaa);
2717 		tg3_phydsp_write(tp, 0x000a, 0x0323);
2718 		tg3_phy_toggle_auxctl_smdsp(tp, false);
2719 	}
2720 
2721 	if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) {
2722 		tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2723 		tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2724 	}
2725 
2726 	if (tp->phy_flags & TG3_PHYFLG_BER_BUG) {
2727 		if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2728 			tg3_phydsp_write(tp, 0x000a, 0x310b);
2729 			tg3_phydsp_write(tp, 0x201f, 0x9506);
2730 			tg3_phydsp_write(tp, 0x401f, 0x14e2);
2731 			tg3_phy_toggle_auxctl_smdsp(tp, false);
2732 		}
2733 	} else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) {
2734 		if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2735 			tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
2736 			if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) {
2737 				tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
2738 				tg3_writephy(tp, MII_TG3_TEST1,
2739 					     MII_TG3_TEST1_TRIM_EN | 0x4);
2740 			} else
2741 				tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
2742 
2743 			tg3_phy_toggle_auxctl_smdsp(tp, false);
2744 		}
2745 	}
2746 
2747 	/* Set Extended packet length bit (bit 14) on all chips that */
2748 	/* support jumbo frames */
2749 	if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2750 		/* Cannot do read-modify-write on 5401 */
2751 		tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
2752 	} else if (tg3_flag(tp, JUMBO_CAPABLE)) {
2753 		/* Set bit 14 with read-modify-write to preserve other bits */
2754 		err = tg3_phy_auxctl_read(tp,
2755 					  MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2756 		if (!err)
2757 			tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2758 					   val | MII_TG3_AUXCTL_ACTL_EXTPKTLEN);
2759 	}
2760 
2761 	/* Set phy register 0x10 bit 0 to high fifo elasticity to support
2762 	 * jumbo frames transmission.
2763 	 */
2764 	if (tg3_flag(tp, JUMBO_CAPABLE)) {
2765 		if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &val))
2766 			tg3_writephy(tp, MII_TG3_EXT_CTRL,
2767 				     val | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
2768 	}
2769 
2770 	if (tg3_asic_rev(tp) == ASIC_REV_5906) {
2771 		/* adjust output voltage */
2772 		tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
2773 	}
2774 
2775 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5762_A0)
2776 		tg3_phydsp_write(tp, 0xffb, 0x4000);
2777 
2778 	tg3_phy_toggle_automdix(tp, true);
2779 	tg3_phy_set_wirespeed(tp);
2780 	return 0;
2781 }
2782 
2783 #define TG3_GPIO_MSG_DRVR_PRES		 0x00000001
2784 #define TG3_GPIO_MSG_NEED_VAUX		 0x00000002
2785 #define TG3_GPIO_MSG_MASK		 (TG3_GPIO_MSG_DRVR_PRES | \
2786 					  TG3_GPIO_MSG_NEED_VAUX)
2787 #define TG3_GPIO_MSG_ALL_DRVR_PRES_MASK \
2788 	((TG3_GPIO_MSG_DRVR_PRES << 0) | \
2789 	 (TG3_GPIO_MSG_DRVR_PRES << 4) | \
2790 	 (TG3_GPIO_MSG_DRVR_PRES << 8) | \
2791 	 (TG3_GPIO_MSG_DRVR_PRES << 12))
2792 
2793 #define TG3_GPIO_MSG_ALL_NEED_VAUX_MASK \
2794 	((TG3_GPIO_MSG_NEED_VAUX << 0) | \
2795 	 (TG3_GPIO_MSG_NEED_VAUX << 4) | \
2796 	 (TG3_GPIO_MSG_NEED_VAUX << 8) | \
2797 	 (TG3_GPIO_MSG_NEED_VAUX << 12))
2798 
2799 static inline u32 tg3_set_function_status(struct tg3 *tp, u32 newstat)
2800 {
2801 	u32 status, shift;
2802 
2803 	if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2804 	    tg3_asic_rev(tp) == ASIC_REV_5719)
2805 		status = tg3_ape_read32(tp, TG3_APE_GPIO_MSG);
2806 	else
2807 		status = tr32(TG3_CPMU_DRV_STATUS);
2808 
2809 	shift = TG3_APE_GPIO_MSG_SHIFT + 4 * tp->pci_fn;
2810 	status &= ~(TG3_GPIO_MSG_MASK << shift);
2811 	status |= (newstat << shift);
2812 
2813 	if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2814 	    tg3_asic_rev(tp) == ASIC_REV_5719)
2815 		tg3_ape_write32(tp, TG3_APE_GPIO_MSG, status);
2816 	else
2817 		tw32(TG3_CPMU_DRV_STATUS, status);
2818 
2819 	return status >> TG3_APE_GPIO_MSG_SHIFT;
2820 }
2821 
2822 static inline int tg3_pwrsrc_switch_to_vmain(struct tg3 *tp)
2823 {
2824 	if (!tg3_flag(tp, IS_NIC))
2825 		return 0;
2826 
2827 	if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2828 	    tg3_asic_rev(tp) == ASIC_REV_5719 ||
2829 	    tg3_asic_rev(tp) == ASIC_REV_5720) {
2830 		if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2831 			return -EIO;
2832 
2833 		tg3_set_function_status(tp, TG3_GPIO_MSG_DRVR_PRES);
2834 
2835 		tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2836 			    TG3_GRC_LCLCTL_PWRSW_DELAY);
2837 
2838 		tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2839 	} else {
2840 		tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2841 			    TG3_GRC_LCLCTL_PWRSW_DELAY);
2842 	}
2843 
2844 	return 0;
2845 }
2846 
2847 static void tg3_pwrsrc_die_with_vmain(struct tg3 *tp)
2848 {
2849 	u32 grc_local_ctrl;
2850 
2851 	if (!tg3_flag(tp, IS_NIC) ||
2852 	    tg3_asic_rev(tp) == ASIC_REV_5700 ||
2853 	    tg3_asic_rev(tp) == ASIC_REV_5701)
2854 		return;
2855 
2856 	grc_local_ctrl = tp->grc_local_ctrl | GRC_LCLCTRL_GPIO_OE1;
2857 
2858 	tw32_wait_f(GRC_LOCAL_CTRL,
2859 		    grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2860 		    TG3_GRC_LCLCTL_PWRSW_DELAY);
2861 
2862 	tw32_wait_f(GRC_LOCAL_CTRL,
2863 		    grc_local_ctrl,
2864 		    TG3_GRC_LCLCTL_PWRSW_DELAY);
2865 
2866 	tw32_wait_f(GRC_LOCAL_CTRL,
2867 		    grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2868 		    TG3_GRC_LCLCTL_PWRSW_DELAY);
2869 }
2870 
2871 static void tg3_pwrsrc_switch_to_vaux(struct tg3 *tp)
2872 {
2873 	if (!tg3_flag(tp, IS_NIC))
2874 		return;
2875 
2876 	if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
2877 	    tg3_asic_rev(tp) == ASIC_REV_5701) {
2878 		tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2879 			    (GRC_LCLCTRL_GPIO_OE0 |
2880 			     GRC_LCLCTRL_GPIO_OE1 |
2881 			     GRC_LCLCTRL_GPIO_OE2 |
2882 			     GRC_LCLCTRL_GPIO_OUTPUT0 |
2883 			     GRC_LCLCTRL_GPIO_OUTPUT1),
2884 			    TG3_GRC_LCLCTL_PWRSW_DELAY);
2885 	} else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
2886 		   tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
2887 		/* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2888 		u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
2889 				     GRC_LCLCTRL_GPIO_OE1 |
2890 				     GRC_LCLCTRL_GPIO_OE2 |
2891 				     GRC_LCLCTRL_GPIO_OUTPUT0 |
2892 				     GRC_LCLCTRL_GPIO_OUTPUT1 |
2893 				     tp->grc_local_ctrl;
2894 		tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2895 			    TG3_GRC_LCLCTL_PWRSW_DELAY);
2896 
2897 		grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
2898 		tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2899 			    TG3_GRC_LCLCTL_PWRSW_DELAY);
2900 
2901 		grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
2902 		tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2903 			    TG3_GRC_LCLCTL_PWRSW_DELAY);
2904 	} else {
2905 		u32 no_gpio2;
2906 		u32 grc_local_ctrl = 0;
2907 
2908 		/* Workaround to prevent overdrawing Amps. */
2909 		if (tg3_asic_rev(tp) == ASIC_REV_5714) {
2910 			grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
2911 			tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2912 				    grc_local_ctrl,
2913 				    TG3_GRC_LCLCTL_PWRSW_DELAY);
2914 		}
2915 
2916 		/* On 5753 and variants, GPIO2 cannot be used. */
2917 		no_gpio2 = tp->nic_sram_data_cfg &
2918 			   NIC_SRAM_DATA_CFG_NO_GPIO2;
2919 
2920 		grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
2921 				  GRC_LCLCTRL_GPIO_OE1 |
2922 				  GRC_LCLCTRL_GPIO_OE2 |
2923 				  GRC_LCLCTRL_GPIO_OUTPUT1 |
2924 				  GRC_LCLCTRL_GPIO_OUTPUT2;
2925 		if (no_gpio2) {
2926 			grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
2927 					    GRC_LCLCTRL_GPIO_OUTPUT2);
2928 		}
2929 		tw32_wait_f(GRC_LOCAL_CTRL,
2930 			    tp->grc_local_ctrl | grc_local_ctrl,
2931 			    TG3_GRC_LCLCTL_PWRSW_DELAY);
2932 
2933 		grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
2934 
2935 		tw32_wait_f(GRC_LOCAL_CTRL,
2936 			    tp->grc_local_ctrl | grc_local_ctrl,
2937 			    TG3_GRC_LCLCTL_PWRSW_DELAY);
2938 
2939 		if (!no_gpio2) {
2940 			grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
2941 			tw32_wait_f(GRC_LOCAL_CTRL,
2942 				    tp->grc_local_ctrl | grc_local_ctrl,
2943 				    TG3_GRC_LCLCTL_PWRSW_DELAY);
2944 		}
2945 	}
2946 }
2947 
2948 static void tg3_frob_aux_power_5717(struct tg3 *tp, bool wol_enable)
2949 {
2950 	u32 msg = 0;
2951 
2952 	/* Serialize power state transitions */
2953 	if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2954 		return;
2955 
2956 	if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE) || wol_enable)
2957 		msg = TG3_GPIO_MSG_NEED_VAUX;
2958 
2959 	msg = tg3_set_function_status(tp, msg);
2960 
2961 	if (msg & TG3_GPIO_MSG_ALL_DRVR_PRES_MASK)
2962 		goto done;
2963 
2964 	if (msg & TG3_GPIO_MSG_ALL_NEED_VAUX_MASK)
2965 		tg3_pwrsrc_switch_to_vaux(tp);
2966 	else
2967 		tg3_pwrsrc_die_with_vmain(tp);
2968 
2969 done:
2970 	tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2971 }
2972 
2973 static void tg3_frob_aux_power(struct tg3 *tp, bool include_wol)
2974 {
2975 	bool need_vaux = false;
2976 
2977 	/* The GPIOs do something completely different on 57765. */
2978 	if (!tg3_flag(tp, IS_NIC) || tg3_flag(tp, 57765_CLASS))
2979 		return;
2980 
2981 	if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2982 	    tg3_asic_rev(tp) == ASIC_REV_5719 ||
2983 	    tg3_asic_rev(tp) == ASIC_REV_5720) {
2984 		tg3_frob_aux_power_5717(tp, include_wol ?
2985 					tg3_flag(tp, WOL_ENABLE) != 0 : 0);
2986 		return;
2987 	}
2988 
2989 	if (tp->pdev_peer && tp->pdev_peer != tp->pdev) {
2990 		struct net_device *dev_peer;
2991 
2992 		dev_peer = pci_get_drvdata(tp->pdev_peer);
2993 
2994 		/* remove_one() may have been run on the peer. */
2995 		if (dev_peer) {
2996 			struct tg3 *tp_peer = netdev_priv(dev_peer);
2997 
2998 			if (tg3_flag(tp_peer, INIT_COMPLETE))
2999 				return;
3000 
3001 			if ((include_wol && tg3_flag(tp_peer, WOL_ENABLE)) ||
3002 			    tg3_flag(tp_peer, ENABLE_ASF))
3003 				need_vaux = true;
3004 		}
3005 	}
3006 
3007 	if ((include_wol && tg3_flag(tp, WOL_ENABLE)) ||
3008 	    tg3_flag(tp, ENABLE_ASF))
3009 		need_vaux = true;
3010 
3011 	if (need_vaux)
3012 		tg3_pwrsrc_switch_to_vaux(tp);
3013 	else
3014 		tg3_pwrsrc_die_with_vmain(tp);
3015 }
3016 
3017 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
3018 {
3019 	if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
3020 		return 1;
3021 	else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) {
3022 		if (speed != SPEED_10)
3023 			return 1;
3024 	} else if (speed == SPEED_10)
3025 		return 1;
3026 
3027 	return 0;
3028 }
3029 
3030 static bool tg3_phy_power_bug(struct tg3 *tp)
3031 {
3032 	switch (tg3_asic_rev(tp)) {
3033 	case ASIC_REV_5700:
3034 	case ASIC_REV_5704:
3035 		return true;
3036 	case ASIC_REV_5780:
3037 		if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
3038 			return true;
3039 		return false;
3040 	case ASIC_REV_5717:
3041 		if (!tp->pci_fn)
3042 			return true;
3043 		return false;
3044 	case ASIC_REV_5719:
3045 	case ASIC_REV_5720:
3046 		if ((tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
3047 		    !tp->pci_fn)
3048 			return true;
3049 		return false;
3050 	}
3051 
3052 	return false;
3053 }
3054 
3055 static bool tg3_phy_led_bug(struct tg3 *tp)
3056 {
3057 	switch (tg3_asic_rev(tp)) {
3058 	case ASIC_REV_5719:
3059 	case ASIC_REV_5720:
3060 		if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
3061 		    !tp->pci_fn)
3062 			return true;
3063 		return false;
3064 	}
3065 
3066 	return false;
3067 }
3068 
3069 static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
3070 {
3071 	u32 val;
3072 
3073 	if (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)
3074 		return;
3075 
3076 	if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
3077 		if (tg3_asic_rev(tp) == ASIC_REV_5704) {
3078 			u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
3079 			u32 serdes_cfg = tr32(MAC_SERDES_CFG);
3080 
3081 			sg_dig_ctrl |=
3082 				SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
3083 			tw32(SG_DIG_CTRL, sg_dig_ctrl);
3084 			tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
3085 		}
3086 		return;
3087 	}
3088 
3089 	if (tg3_asic_rev(tp) == ASIC_REV_5906) {
3090 		tg3_bmcr_reset(tp);
3091 		val = tr32(GRC_MISC_CFG);
3092 		tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
3093 		udelay(40);
3094 		return;
3095 	} else if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
3096 		u32 phytest;
3097 		if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
3098 			u32 phy;
3099 
3100 			tg3_writephy(tp, MII_ADVERTISE, 0);
3101 			tg3_writephy(tp, MII_BMCR,
3102 				     BMCR_ANENABLE | BMCR_ANRESTART);
3103 
3104 			tg3_writephy(tp, MII_TG3_FET_TEST,
3105 				     phytest | MII_TG3_FET_SHADOW_EN);
3106 			if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) {
3107 				phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD;
3108 				tg3_writephy(tp,
3109 					     MII_TG3_FET_SHDW_AUXMODE4,
3110 					     phy);
3111 			}
3112 			tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
3113 		}
3114 		return;
3115 	} else if (do_low_power) {
3116 		if (!tg3_phy_led_bug(tp))
3117 			tg3_writephy(tp, MII_TG3_EXT_CTRL,
3118 				     MII_TG3_EXT_CTRL_FORCE_LED_OFF);
3119 
3120 		val = MII_TG3_AUXCTL_PCTL_100TX_LPWR |
3121 		      MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
3122 		      MII_TG3_AUXCTL_PCTL_VREG_11V;
3123 		tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, val);
3124 	}
3125 
3126 	/* The PHY should not be powered down on some chips because
3127 	 * of bugs.
3128 	 */
3129 	if (tg3_phy_power_bug(tp))
3130 		return;
3131 
3132 	if (tg3_chip_rev(tp) == CHIPREV_5784_AX ||
3133 	    tg3_chip_rev(tp) == CHIPREV_5761_AX) {
3134 		val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
3135 		val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
3136 		val |= CPMU_LSPD_1000MB_MACCLK_12_5;
3137 		tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
3138 	}
3139 
3140 	tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
3141 }
3142 
3143 /* tp->lock is held. */
3144 static int tg3_nvram_lock(struct tg3 *tp)
3145 {
3146 	if (tg3_flag(tp, NVRAM)) {
3147 		int i;
3148 
3149 		if (tp->nvram_lock_cnt == 0) {
3150 			tw32(NVRAM_SWARB, SWARB_REQ_SET1);
3151 			for (i = 0; i < 8000; i++) {
3152 				if (tr32(NVRAM_SWARB) & SWARB_GNT1)
3153 					break;
3154 				udelay(20);
3155 			}
3156 			if (i == 8000) {
3157 				tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
3158 				return -ENODEV;
3159 			}
3160 		}
3161 		tp->nvram_lock_cnt++;
3162 	}
3163 	return 0;
3164 }
3165 
3166 /* tp->lock is held. */
3167 static void tg3_nvram_unlock(struct tg3 *tp)
3168 {
3169 	if (tg3_flag(tp, NVRAM)) {
3170 		if (tp->nvram_lock_cnt > 0)
3171 			tp->nvram_lock_cnt--;
3172 		if (tp->nvram_lock_cnt == 0)
3173 			tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
3174 	}
3175 }
3176 
3177 /* tp->lock is held. */
3178 static void tg3_enable_nvram_access(struct tg3 *tp)
3179 {
3180 	if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
3181 		u32 nvaccess = tr32(NVRAM_ACCESS);
3182 
3183 		tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
3184 	}
3185 }
3186 
3187 /* tp->lock is held. */
3188 static void tg3_disable_nvram_access(struct tg3 *tp)
3189 {
3190 	if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
3191 		u32 nvaccess = tr32(NVRAM_ACCESS);
3192 
3193 		tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
3194 	}
3195 }
3196 
3197 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
3198 					u32 offset, u32 *val)
3199 {
3200 	u32 tmp;
3201 	int i;
3202 
3203 	if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
3204 		return -EINVAL;
3205 
3206 	tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
3207 					EEPROM_ADDR_DEVID_MASK |
3208 					EEPROM_ADDR_READ);
3209 	tw32(GRC_EEPROM_ADDR,
3210 	     tmp |
3211 	     (0 << EEPROM_ADDR_DEVID_SHIFT) |
3212 	     ((offset << EEPROM_ADDR_ADDR_SHIFT) &
3213 	      EEPROM_ADDR_ADDR_MASK) |
3214 	     EEPROM_ADDR_READ | EEPROM_ADDR_START);
3215 
3216 	for (i = 0; i < 1000; i++) {
3217 		tmp = tr32(GRC_EEPROM_ADDR);
3218 
3219 		if (tmp & EEPROM_ADDR_COMPLETE)
3220 			break;
3221 		msleep(1);
3222 	}
3223 	if (!(tmp & EEPROM_ADDR_COMPLETE))
3224 		return -EBUSY;
3225 
3226 	tmp = tr32(GRC_EEPROM_DATA);
3227 
3228 	/*
3229 	 * The data will always be opposite the native endian
3230 	 * format.  Perform a blind byteswap to compensate.
3231 	 */
3232 	*val = swab32(tmp);
3233 
3234 	return 0;
3235 }
3236 
3237 #define NVRAM_CMD_TIMEOUT 10000
3238 
3239 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
3240 {
3241 	int i;
3242 
3243 	tw32(NVRAM_CMD, nvram_cmd);
3244 	for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
3245 		usleep_range(10, 40);
3246 		if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
3247 			udelay(10);
3248 			break;
3249 		}
3250 	}
3251 
3252 	if (i == NVRAM_CMD_TIMEOUT)
3253 		return -EBUSY;
3254 
3255 	return 0;
3256 }
3257 
3258 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
3259 {
3260 	if (tg3_flag(tp, NVRAM) &&
3261 	    tg3_flag(tp, NVRAM_BUFFERED) &&
3262 	    tg3_flag(tp, FLASH) &&
3263 	    !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
3264 	    (tp->nvram_jedecnum == JEDEC_ATMEL))
3265 
3266 		addr = ((addr / tp->nvram_pagesize) <<
3267 			ATMEL_AT45DB0X1B_PAGE_POS) +
3268 		       (addr % tp->nvram_pagesize);
3269 
3270 	return addr;
3271 }
3272 
3273 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
3274 {
3275 	if (tg3_flag(tp, NVRAM) &&
3276 	    tg3_flag(tp, NVRAM_BUFFERED) &&
3277 	    tg3_flag(tp, FLASH) &&
3278 	    !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
3279 	    (tp->nvram_jedecnum == JEDEC_ATMEL))
3280 
3281 		addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
3282 			tp->nvram_pagesize) +
3283 		       (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
3284 
3285 	return addr;
3286 }
3287 
3288 /* NOTE: Data read in from NVRAM is byteswapped according to
3289  * the byteswapping settings for all other register accesses.
3290  * tg3 devices are BE devices, so on a BE machine, the data
3291  * returned will be exactly as it is seen in NVRAM.  On a LE
3292  * machine, the 32-bit value will be byteswapped.
3293  */
3294 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
3295 {
3296 	int ret;
3297 
3298 	if (!tg3_flag(tp, NVRAM))
3299 		return tg3_nvram_read_using_eeprom(tp, offset, val);
3300 
3301 	offset = tg3_nvram_phys_addr(tp, offset);
3302 
3303 	if (offset > NVRAM_ADDR_MSK)
3304 		return -EINVAL;
3305 
3306 	ret = tg3_nvram_lock(tp);
3307 	if (ret)
3308 		return ret;
3309 
3310 	tg3_enable_nvram_access(tp);
3311 
3312 	tw32(NVRAM_ADDR, offset);
3313 	ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
3314 		NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
3315 
3316 	if (ret == 0)
3317 		*val = tr32(NVRAM_RDDATA);
3318 
3319 	tg3_disable_nvram_access(tp);
3320 
3321 	tg3_nvram_unlock(tp);
3322 
3323 	return ret;
3324 }
3325 
3326 /* Ensures NVRAM data is in bytestream format. */
3327 static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val)
3328 {
3329 	u32 v;
3330 	int res = tg3_nvram_read(tp, offset, &v);
3331 	if (!res)
3332 		*val = cpu_to_be32(v);
3333 	return res;
3334 }
3335 
3336 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
3337 				    u32 offset, u32 len, u8 *buf)
3338 {
3339 	int i, j, rc = 0;
3340 	u32 val;
3341 
3342 	for (i = 0; i < len; i += 4) {
3343 		u32 addr;
3344 		__be32 data;
3345 
3346 		addr = offset + i;
3347 
3348 		memcpy(&data, buf + i, 4);
3349 
3350 		/*
3351 		 * The SEEPROM interface expects the data to always be opposite
3352 		 * the native endian format.  We accomplish this by reversing
3353 		 * all the operations that would have been performed on the
3354 		 * data from a call to tg3_nvram_read_be32().
3355 		 */
3356 		tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data)));
3357 
3358 		val = tr32(GRC_EEPROM_ADDR);
3359 		tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
3360 
3361 		val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
3362 			EEPROM_ADDR_READ);
3363 		tw32(GRC_EEPROM_ADDR, val |
3364 			(0 << EEPROM_ADDR_DEVID_SHIFT) |
3365 			(addr & EEPROM_ADDR_ADDR_MASK) |
3366 			EEPROM_ADDR_START |
3367 			EEPROM_ADDR_WRITE);
3368 
3369 		for (j = 0; j < 1000; j++) {
3370 			val = tr32(GRC_EEPROM_ADDR);
3371 
3372 			if (val & EEPROM_ADDR_COMPLETE)
3373 				break;
3374 			msleep(1);
3375 		}
3376 		if (!(val & EEPROM_ADDR_COMPLETE)) {
3377 			rc = -EBUSY;
3378 			break;
3379 		}
3380 	}
3381 
3382 	return rc;
3383 }
3384 
3385 /* offset and length are dword aligned */
3386 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
3387 		u8 *buf)
3388 {
3389 	int ret = 0;
3390 	u32 pagesize = tp->nvram_pagesize;
3391 	u32 pagemask = pagesize - 1;
3392 	u32 nvram_cmd;
3393 	u8 *tmp;
3394 
3395 	tmp = kmalloc(pagesize, GFP_KERNEL);
3396 	if (tmp == NULL)
3397 		return -ENOMEM;
3398 
3399 	while (len) {
3400 		int j;
3401 		u32 phy_addr, page_off, size;
3402 
3403 		phy_addr = offset & ~pagemask;
3404 
3405 		for (j = 0; j < pagesize; j += 4) {
3406 			ret = tg3_nvram_read_be32(tp, phy_addr + j,
3407 						  (__be32 *) (tmp + j));
3408 			if (ret)
3409 				break;
3410 		}
3411 		if (ret)
3412 			break;
3413 
3414 		page_off = offset & pagemask;
3415 		size = pagesize;
3416 		if (len < size)
3417 			size = len;
3418 
3419 		len -= size;
3420 
3421 		memcpy(tmp + page_off, buf, size);
3422 
3423 		offset = offset + (pagesize - page_off);
3424 
3425 		tg3_enable_nvram_access(tp);
3426 
3427 		/*
3428 		 * Before we can erase the flash page, we need
3429 		 * to issue a special "write enable" command.
3430 		 */
3431 		nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3432 
3433 		if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3434 			break;
3435 
3436 		/* Erase the target page */
3437 		tw32(NVRAM_ADDR, phy_addr);
3438 
3439 		nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
3440 			NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
3441 
3442 		if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3443 			break;
3444 
3445 		/* Issue another write enable to start the write. */
3446 		nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3447 
3448 		if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3449 			break;
3450 
3451 		for (j = 0; j < pagesize; j += 4) {
3452 			__be32 data;
3453 
3454 			data = *((__be32 *) (tmp + j));
3455 
3456 			tw32(NVRAM_WRDATA, be32_to_cpu(data));
3457 
3458 			tw32(NVRAM_ADDR, phy_addr + j);
3459 
3460 			nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
3461 				NVRAM_CMD_WR;
3462 
3463 			if (j == 0)
3464 				nvram_cmd |= NVRAM_CMD_FIRST;
3465 			else if (j == (pagesize - 4))
3466 				nvram_cmd |= NVRAM_CMD_LAST;
3467 
3468 			ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3469 			if (ret)
3470 				break;
3471 		}
3472 		if (ret)
3473 			break;
3474 	}
3475 
3476 	nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3477 	tg3_nvram_exec_cmd(tp, nvram_cmd);
3478 
3479 	kfree(tmp);
3480 
3481 	return ret;
3482 }
3483 
3484 /* offset and length are dword aligned */
3485 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
3486 		u8 *buf)
3487 {
3488 	int i, ret = 0;
3489 
3490 	for (i = 0; i < len; i += 4, offset += 4) {
3491 		u32 page_off, phy_addr, nvram_cmd;
3492 		__be32 data;
3493 
3494 		memcpy(&data, buf + i, 4);
3495 		tw32(NVRAM_WRDATA, be32_to_cpu(data));
3496 
3497 		page_off = offset % tp->nvram_pagesize;
3498 
3499 		phy_addr = tg3_nvram_phys_addr(tp, offset);
3500 
3501 		nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
3502 
3503 		if (page_off == 0 || i == 0)
3504 			nvram_cmd |= NVRAM_CMD_FIRST;
3505 		if (page_off == (tp->nvram_pagesize - 4))
3506 			nvram_cmd |= NVRAM_CMD_LAST;
3507 
3508 		if (i == (len - 4))
3509 			nvram_cmd |= NVRAM_CMD_LAST;
3510 
3511 		if ((nvram_cmd & NVRAM_CMD_FIRST) ||
3512 		    !tg3_flag(tp, FLASH) ||
3513 		    !tg3_flag(tp, 57765_PLUS))
3514 			tw32(NVRAM_ADDR, phy_addr);
3515 
3516 		if (tg3_asic_rev(tp) != ASIC_REV_5752 &&
3517 		    !tg3_flag(tp, 5755_PLUS) &&
3518 		    (tp->nvram_jedecnum == JEDEC_ST) &&
3519 		    (nvram_cmd & NVRAM_CMD_FIRST)) {
3520 			u32 cmd;
3521 
3522 			cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3523 			ret = tg3_nvram_exec_cmd(tp, cmd);
3524 			if (ret)
3525 				break;
3526 		}
3527 		if (!tg3_flag(tp, FLASH)) {
3528 			/* We always do complete word writes to eeprom. */
3529 			nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
3530 		}
3531 
3532 		ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3533 		if (ret)
3534 			break;
3535 	}
3536 	return ret;
3537 }
3538 
3539 /* offset and length are dword aligned */
3540 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
3541 {
3542 	int ret;
3543 
3544 	if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3545 		tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
3546 		       ~GRC_LCLCTRL_GPIO_OUTPUT1);
3547 		udelay(40);
3548 	}
3549 
3550 	if (!tg3_flag(tp, NVRAM)) {
3551 		ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
3552 	} else {
3553 		u32 grc_mode;
3554 
3555 		ret = tg3_nvram_lock(tp);
3556 		if (ret)
3557 			return ret;
3558 
3559 		tg3_enable_nvram_access(tp);
3560 		if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM))
3561 			tw32(NVRAM_WRITE1, 0x406);
3562 
3563 		grc_mode = tr32(GRC_MODE);
3564 		tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
3565 
3566 		if (tg3_flag(tp, NVRAM_BUFFERED) || !tg3_flag(tp, FLASH)) {
3567 			ret = tg3_nvram_write_block_buffered(tp, offset, len,
3568 				buf);
3569 		} else {
3570 			ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
3571 				buf);
3572 		}
3573 
3574 		grc_mode = tr32(GRC_MODE);
3575 		tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
3576 
3577 		tg3_disable_nvram_access(tp);
3578 		tg3_nvram_unlock(tp);
3579 	}
3580 
3581 	if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3582 		tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
3583 		udelay(40);
3584 	}
3585 
3586 	return ret;
3587 }
3588 
3589 #define RX_CPU_SCRATCH_BASE	0x30000
3590 #define RX_CPU_SCRATCH_SIZE	0x04000
3591 #define TX_CPU_SCRATCH_BASE	0x34000
3592 #define TX_CPU_SCRATCH_SIZE	0x04000
3593 
3594 /* tp->lock is held. */
3595 static int tg3_pause_cpu(struct tg3 *tp, u32 cpu_base)
3596 {
3597 	int i;
3598 	const int iters = 10000;
3599 
3600 	for (i = 0; i < iters; i++) {
3601 		tw32(cpu_base + CPU_STATE, 0xffffffff);
3602 		tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
3603 		if (tr32(cpu_base + CPU_MODE) & CPU_MODE_HALT)
3604 			break;
3605 		if (pci_channel_offline(tp->pdev))
3606 			return -EBUSY;
3607 	}
3608 
3609 	return (i == iters) ? -EBUSY : 0;
3610 }
3611 
3612 /* tp->lock is held. */
3613 static int tg3_rxcpu_pause(struct tg3 *tp)
3614 {
3615 	int rc = tg3_pause_cpu(tp, RX_CPU_BASE);
3616 
3617 	tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3618 	tw32_f(RX_CPU_BASE + CPU_MODE,  CPU_MODE_HALT);
3619 	udelay(10);
3620 
3621 	return rc;
3622 }
3623 
3624 /* tp->lock is held. */
3625 static int tg3_txcpu_pause(struct tg3 *tp)
3626 {
3627 	return tg3_pause_cpu(tp, TX_CPU_BASE);
3628 }
3629 
3630 /* tp->lock is held. */
3631 static void tg3_resume_cpu(struct tg3 *tp, u32 cpu_base)
3632 {
3633 	tw32(cpu_base + CPU_STATE, 0xffffffff);
3634 	tw32_f(cpu_base + CPU_MODE,  0x00000000);
3635 }
3636 
3637 /* tp->lock is held. */
3638 static void tg3_rxcpu_resume(struct tg3 *tp)
3639 {
3640 	tg3_resume_cpu(tp, RX_CPU_BASE);
3641 }
3642 
3643 /* tp->lock is held. */
3644 static int tg3_halt_cpu(struct tg3 *tp, u32 cpu_base)
3645 {
3646 	int rc;
3647 
3648 	BUG_ON(cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS));
3649 
3650 	if (tg3_asic_rev(tp) == ASIC_REV_5906) {
3651 		u32 val = tr32(GRC_VCPU_EXT_CTRL);
3652 
3653 		tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
3654 		return 0;
3655 	}
3656 	if (cpu_base == RX_CPU_BASE) {
3657 		rc = tg3_rxcpu_pause(tp);
3658 	} else {
3659 		/*
3660 		 * There is only an Rx CPU for the 5750 derivative in the
3661 		 * BCM4785.
3662 		 */
3663 		if (tg3_flag(tp, IS_SSB_CORE))
3664 			return 0;
3665 
3666 		rc = tg3_txcpu_pause(tp);
3667 	}
3668 
3669 	if (rc) {
3670 		netdev_err(tp->dev, "%s timed out, %s CPU\n",
3671 			   __func__, cpu_base == RX_CPU_BASE ? "RX" : "TX");
3672 		return -ENODEV;
3673 	}
3674 
3675 	/* Clear firmware's nvram arbitration. */
3676 	if (tg3_flag(tp, NVRAM))
3677 		tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
3678 	return 0;
3679 }
3680 
3681 static int tg3_fw_data_len(struct tg3 *tp,
3682 			   const struct tg3_firmware_hdr *fw_hdr)
3683 {
3684 	int fw_len;
3685 
3686 	/* Non fragmented firmware have one firmware header followed by a
3687 	 * contiguous chunk of data to be written. The length field in that
3688 	 * header is not the length of data to be written but the complete
3689 	 * length of the bss. The data length is determined based on
3690 	 * tp->fw->size minus headers.
3691 	 *
3692 	 * Fragmented firmware have a main header followed by multiple
3693 	 * fragments. Each fragment is identical to non fragmented firmware
3694 	 * with a firmware header followed by a contiguous chunk of data. In
3695 	 * the main header, the length field is unused and set to 0xffffffff.
3696 	 * In each fragment header the length is the entire size of that
3697 	 * fragment i.e. fragment data + header length. Data length is
3698 	 * therefore length field in the header minus TG3_FW_HDR_LEN.
3699 	 */
3700 	if (tp->fw_len == 0xffffffff)
3701 		fw_len = be32_to_cpu(fw_hdr->len);
3702 	else
3703 		fw_len = tp->fw->size;
3704 
3705 	return (fw_len - TG3_FW_HDR_LEN) / sizeof(u32);
3706 }
3707 
3708 /* tp->lock is held. */
3709 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base,
3710 				 u32 cpu_scratch_base, int cpu_scratch_size,
3711 				 const struct tg3_firmware_hdr *fw_hdr)
3712 {
3713 	int err, i;
3714 	void (*write_op)(struct tg3 *, u32, u32);
3715 	int total_len = tp->fw->size;
3716 
3717 	if (cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)) {
3718 		netdev_err(tp->dev,
3719 			   "%s: Trying to load TX cpu firmware which is 5705\n",
3720 			   __func__);
3721 		return -EINVAL;
3722 	}
3723 
3724 	if (tg3_flag(tp, 5705_PLUS) && tg3_asic_rev(tp) != ASIC_REV_57766)
3725 		write_op = tg3_write_mem;
3726 	else
3727 		write_op = tg3_write_indirect_reg32;
3728 
3729 	if (tg3_asic_rev(tp) != ASIC_REV_57766) {
3730 		/* It is possible that bootcode is still loading at this point.
3731 		 * Get the nvram lock first before halting the cpu.
3732 		 */
3733 		int lock_err = tg3_nvram_lock(tp);
3734 		err = tg3_halt_cpu(tp, cpu_base);
3735 		if (!lock_err)
3736 			tg3_nvram_unlock(tp);
3737 		if (err)
3738 			goto out;
3739 
3740 		for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
3741 			write_op(tp, cpu_scratch_base + i, 0);
3742 		tw32(cpu_base + CPU_STATE, 0xffffffff);
3743 		tw32(cpu_base + CPU_MODE,
3744 		     tr32(cpu_base + CPU_MODE) | CPU_MODE_HALT);
3745 	} else {
3746 		/* Subtract additional main header for fragmented firmware and
3747 		 * advance to the first fragment
3748 		 */
3749 		total_len -= TG3_FW_HDR_LEN;
3750 		fw_hdr++;
3751 	}
3752 
3753 	do {
3754 		u32 *fw_data = (u32 *)(fw_hdr + 1);
3755 		for (i = 0; i < tg3_fw_data_len(tp, fw_hdr); i++)
3756 			write_op(tp, cpu_scratch_base +
3757 				     (be32_to_cpu(fw_hdr->base_addr) & 0xffff) +
3758 				     (i * sizeof(u32)),
3759 				 be32_to_cpu(fw_data[i]));
3760 
3761 		total_len -= be32_to_cpu(fw_hdr->len);
3762 
3763 		/* Advance to next fragment */
3764 		fw_hdr = (struct tg3_firmware_hdr *)
3765 			 ((void *)fw_hdr + be32_to_cpu(fw_hdr->len));
3766 	} while (total_len > 0);
3767 
3768 	err = 0;
3769 
3770 out:
3771 	return err;
3772 }
3773 
3774 /* tp->lock is held. */
3775 static int tg3_pause_cpu_and_set_pc(struct tg3 *tp, u32 cpu_base, u32 pc)
3776 {
3777 	int i;
3778 	const int iters = 5;
3779 
3780 	tw32(cpu_base + CPU_STATE, 0xffffffff);
3781 	tw32_f(cpu_base + CPU_PC, pc);
3782 
3783 	for (i = 0; i < iters; i++) {
3784 		if (tr32(cpu_base + CPU_PC) == pc)
3785 			break;
3786 		tw32(cpu_base + CPU_STATE, 0xffffffff);
3787 		tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
3788 		tw32_f(cpu_base + CPU_PC, pc);
3789 		udelay(1000);
3790 	}
3791 
3792 	return (i == iters) ? -EBUSY : 0;
3793 }
3794 
3795 /* tp->lock is held. */
3796 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
3797 {
3798 	const struct tg3_firmware_hdr *fw_hdr;
3799 	int err;
3800 
3801 	fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3802 
3803 	/* Firmware blob starts with version numbers, followed by
3804 	   start address and length. We are setting complete length.
3805 	   length = end_address_of_bss - start_address_of_text.
3806 	   Remainder is the blob to be loaded contiguously
3807 	   from start address. */
3808 
3809 	err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
3810 				    RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
3811 				    fw_hdr);
3812 	if (err)
3813 		return err;
3814 
3815 	err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
3816 				    TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
3817 				    fw_hdr);
3818 	if (err)
3819 		return err;
3820 
3821 	/* Now startup only the RX cpu. */
3822 	err = tg3_pause_cpu_and_set_pc(tp, RX_CPU_BASE,
3823 				       be32_to_cpu(fw_hdr->base_addr));
3824 	if (err) {
3825 		netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x "
3826 			   "should be %08x\n", __func__,
3827 			   tr32(RX_CPU_BASE + CPU_PC),
3828 				be32_to_cpu(fw_hdr->base_addr));
3829 		return -ENODEV;
3830 	}
3831 
3832 	tg3_rxcpu_resume(tp);
3833 
3834 	return 0;
3835 }
3836 
3837 static int tg3_validate_rxcpu_state(struct tg3 *tp)
3838 {
3839 	const int iters = 1000;
3840 	int i;
3841 	u32 val;
3842 
3843 	/* Wait for boot code to complete initialization and enter service
3844 	 * loop. It is then safe to download service patches
3845 	 */
3846 	for (i = 0; i < iters; i++) {
3847 		if (tr32(RX_CPU_HWBKPT) == TG3_SBROM_IN_SERVICE_LOOP)
3848 			break;
3849 
3850 		udelay(10);
3851 	}
3852 
3853 	if (i == iters) {
3854 		netdev_err(tp->dev, "Boot code not ready for service patches\n");
3855 		return -EBUSY;
3856 	}
3857 
3858 	val = tg3_read_indirect_reg32(tp, TG3_57766_FW_HANDSHAKE);
3859 	if (val & 0xff) {
3860 		netdev_warn(tp->dev,
3861 			    "Other patches exist. Not downloading EEE patch\n");
3862 		return -EEXIST;
3863 	}
3864 
3865 	return 0;
3866 }
3867 
3868 /* tp->lock is held. */
3869 static void tg3_load_57766_firmware(struct tg3 *tp)
3870 {
3871 	struct tg3_firmware_hdr *fw_hdr;
3872 
3873 	if (!tg3_flag(tp, NO_NVRAM))
3874 		return;
3875 
3876 	if (tg3_validate_rxcpu_state(tp))
3877 		return;
3878 
3879 	if (!tp->fw)
3880 		return;
3881 
3882 	/* This firmware blob has a different format than older firmware
3883 	 * releases as given below. The main difference is we have fragmented
3884 	 * data to be written to non-contiguous locations.
3885 	 *
3886 	 * In the beginning we have a firmware header identical to other
3887 	 * firmware which consists of version, base addr and length. The length
3888 	 * here is unused and set to 0xffffffff.
3889 	 *
3890 	 * This is followed by a series of firmware fragments which are
3891 	 * individually identical to previous firmware. i.e. they have the
3892 	 * firmware header and followed by data for that fragment. The version
3893 	 * field of the individual fragment header is unused.
3894 	 */
3895 
3896 	fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3897 	if (be32_to_cpu(fw_hdr->base_addr) != TG3_57766_FW_BASE_ADDR)
3898 		return;
3899 
3900 	if (tg3_rxcpu_pause(tp))
3901 		return;
3902 
3903 	/* tg3_load_firmware_cpu() will always succeed for the 57766 */
3904 	tg3_load_firmware_cpu(tp, 0, TG3_57766_FW_BASE_ADDR, 0, fw_hdr);
3905 
3906 	tg3_rxcpu_resume(tp);
3907 }
3908 
3909 /* tp->lock is held. */
3910 static int tg3_load_tso_firmware(struct tg3 *tp)
3911 {
3912 	const struct tg3_firmware_hdr *fw_hdr;
3913 	unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
3914 	int err;
3915 
3916 	if (!tg3_flag(tp, FW_TSO))
3917 		return 0;
3918 
3919 	fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3920 
3921 	/* Firmware blob starts with version numbers, followed by
3922 	   start address and length. We are setting complete length.
3923 	   length = end_address_of_bss - start_address_of_text.
3924 	   Remainder is the blob to be loaded contiguously
3925 	   from start address. */
3926 
3927 	cpu_scratch_size = tp->fw_len;
3928 
3929 	if (tg3_asic_rev(tp) == ASIC_REV_5705) {
3930 		cpu_base = RX_CPU_BASE;
3931 		cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
3932 	} else {
3933 		cpu_base = TX_CPU_BASE;
3934 		cpu_scratch_base = TX_CPU_SCRATCH_BASE;
3935 		cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
3936 	}
3937 
3938 	err = tg3_load_firmware_cpu(tp, cpu_base,
3939 				    cpu_scratch_base, cpu_scratch_size,
3940 				    fw_hdr);
3941 	if (err)
3942 		return err;
3943 
3944 	/* Now startup the cpu. */
3945 	err = tg3_pause_cpu_and_set_pc(tp, cpu_base,
3946 				       be32_to_cpu(fw_hdr->base_addr));
3947 	if (err) {
3948 		netdev_err(tp->dev,
3949 			   "%s fails to set CPU PC, is %08x should be %08x\n",
3950 			   __func__, tr32(cpu_base + CPU_PC),
3951 			   be32_to_cpu(fw_hdr->base_addr));
3952 		return -ENODEV;
3953 	}
3954 
3955 	tg3_resume_cpu(tp, cpu_base);
3956 	return 0;
3957 }
3958 
3959 /* tp->lock is held. */
3960 static void __tg3_set_one_mac_addr(struct tg3 *tp, u8 *mac_addr, int index)
3961 {
3962 	u32 addr_high, addr_low;
3963 
3964 	addr_high = ((mac_addr[0] << 8) | mac_addr[1]);
3965 	addr_low = ((mac_addr[2] << 24) | (mac_addr[3] << 16) |
3966 		    (mac_addr[4] <<  8) | mac_addr[5]);
3967 
3968 	if (index < 4) {
3969 		tw32(MAC_ADDR_0_HIGH + (index * 8), addr_high);
3970 		tw32(MAC_ADDR_0_LOW + (index * 8), addr_low);
3971 	} else {
3972 		index -= 4;
3973 		tw32(MAC_EXTADDR_0_HIGH + (index * 8), addr_high);
3974 		tw32(MAC_EXTADDR_0_LOW + (index * 8), addr_low);
3975 	}
3976 }
3977 
3978 /* tp->lock is held. */
3979 static void __tg3_set_mac_addr(struct tg3 *tp, bool skip_mac_1)
3980 {
3981 	u32 addr_high;
3982 	int i;
3983 
3984 	for (i = 0; i < 4; i++) {
3985 		if (i == 1 && skip_mac_1)
3986 			continue;
3987 		__tg3_set_one_mac_addr(tp, tp->dev->dev_addr, i);
3988 	}
3989 
3990 	if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
3991 	    tg3_asic_rev(tp) == ASIC_REV_5704) {
3992 		for (i = 4; i < 16; i++)
3993 			__tg3_set_one_mac_addr(tp, tp->dev->dev_addr, i);
3994 	}
3995 
3996 	addr_high = (tp->dev->dev_addr[0] +
3997 		     tp->dev->dev_addr[1] +
3998 		     tp->dev->dev_addr[2] +
3999 		     tp->dev->dev_addr[3] +
4000 		     tp->dev->dev_addr[4] +
4001 		     tp->dev->dev_addr[5]) &
4002 		TX_BACKOFF_SEED_MASK;
4003 	tw32(MAC_TX_BACKOFF_SEED, addr_high);
4004 }
4005 
4006 static void tg3_enable_register_access(struct tg3 *tp)
4007 {
4008 	/*
4009 	 * Make sure register accesses (indirect or otherwise) will function
4010 	 * correctly.
4011 	 */
4012 	pci_write_config_dword(tp->pdev,
4013 			       TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl);
4014 }
4015 
4016 static int tg3_power_up(struct tg3 *tp)
4017 {
4018 	int err;
4019 
4020 	tg3_enable_register_access(tp);
4021 
4022 	err = pci_set_power_state(tp->pdev, PCI_D0);
4023 	if (!err) {
4024 		/* Switch out of Vaux if it is a NIC */
4025 		tg3_pwrsrc_switch_to_vmain(tp);
4026 	} else {
4027 		netdev_err(tp->dev, "Transition to D0 failed\n");
4028 	}
4029 
4030 	return err;
4031 }
4032 
4033 static int tg3_setup_phy(struct tg3 *, bool);
4034 
4035 static int tg3_power_down_prepare(struct tg3 *tp)
4036 {
4037 	u32 misc_host_ctrl;
4038 	bool device_should_wake, do_low_power;
4039 
4040 	tg3_enable_register_access(tp);
4041 
4042 	/* Restore the CLKREQ setting. */
4043 	if (tg3_flag(tp, CLKREQ_BUG))
4044 		pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
4045 					 PCI_EXP_LNKCTL_CLKREQ_EN);
4046 
4047 	misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
4048 	tw32(TG3PCI_MISC_HOST_CTRL,
4049 	     misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
4050 
4051 	device_should_wake = device_may_wakeup(&tp->pdev->dev) &&
4052 			     tg3_flag(tp, WOL_ENABLE);
4053 
4054 	if (tg3_flag(tp, USE_PHYLIB)) {
4055 		do_low_power = false;
4056 		if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) &&
4057 		    !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4058 			struct phy_device *phydev;
4059 			u32 phyid, advertising;
4060 
4061 			phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
4062 
4063 			tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
4064 
4065 			tp->link_config.speed = phydev->speed;
4066 			tp->link_config.duplex = phydev->duplex;
4067 			tp->link_config.autoneg = phydev->autoneg;
4068 			tp->link_config.advertising = phydev->advertising;
4069 
4070 			advertising = ADVERTISED_TP |
4071 				      ADVERTISED_Pause |
4072 				      ADVERTISED_Autoneg |
4073 				      ADVERTISED_10baseT_Half;
4074 
4075 			if (tg3_flag(tp, ENABLE_ASF) || device_should_wake) {
4076 				if (tg3_flag(tp, WOL_SPEED_100MB))
4077 					advertising |=
4078 						ADVERTISED_100baseT_Half |
4079 						ADVERTISED_100baseT_Full |
4080 						ADVERTISED_10baseT_Full;
4081 				else
4082 					advertising |= ADVERTISED_10baseT_Full;
4083 			}
4084 
4085 			phydev->advertising = advertising;
4086 
4087 			phy_start_aneg(phydev);
4088 
4089 			phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
4090 			if (phyid != PHY_ID_BCMAC131) {
4091 				phyid &= PHY_BCM_OUI_MASK;
4092 				if (phyid == PHY_BCM_OUI_1 ||
4093 				    phyid == PHY_BCM_OUI_2 ||
4094 				    phyid == PHY_BCM_OUI_3)
4095 					do_low_power = true;
4096 			}
4097 		}
4098 	} else {
4099 		do_low_power = true;
4100 
4101 		if (!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER))
4102 			tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
4103 
4104 		if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
4105 			tg3_setup_phy(tp, false);
4106 	}
4107 
4108 	if (tg3_asic_rev(tp) == ASIC_REV_5906) {
4109 		u32 val;
4110 
4111 		val = tr32(GRC_VCPU_EXT_CTRL);
4112 		tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
4113 	} else if (!tg3_flag(tp, ENABLE_ASF)) {
4114 		int i;
4115 		u32 val;
4116 
4117 		for (i = 0; i < 200; i++) {
4118 			tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
4119 			if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
4120 				break;
4121 			msleep(1);
4122 		}
4123 	}
4124 	if (tg3_flag(tp, WOL_CAP))
4125 		tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
4126 						     WOL_DRV_STATE_SHUTDOWN |
4127 						     WOL_DRV_WOL |
4128 						     WOL_SET_MAGIC_PKT);
4129 
4130 	if (device_should_wake) {
4131 		u32 mac_mode;
4132 
4133 		if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
4134 			if (do_low_power &&
4135 			    !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
4136 				tg3_phy_auxctl_write(tp,
4137 					       MII_TG3_AUXCTL_SHDWSEL_PWRCTL,
4138 					       MII_TG3_AUXCTL_PCTL_WOL_EN |
4139 					       MII_TG3_AUXCTL_PCTL_100TX_LPWR |
4140 					       MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC);
4141 				udelay(40);
4142 			}
4143 
4144 			if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4145 				mac_mode = MAC_MODE_PORT_MODE_GMII;
4146 			else if (tp->phy_flags &
4147 				 TG3_PHYFLG_KEEP_LINK_ON_PWRDN) {
4148 				if (tp->link_config.active_speed == SPEED_1000)
4149 					mac_mode = MAC_MODE_PORT_MODE_GMII;
4150 				else
4151 					mac_mode = MAC_MODE_PORT_MODE_MII;
4152 			} else
4153 				mac_mode = MAC_MODE_PORT_MODE_MII;
4154 
4155 			mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
4156 			if (tg3_asic_rev(tp) == ASIC_REV_5700) {
4157 				u32 speed = tg3_flag(tp, WOL_SPEED_100MB) ?
4158 					     SPEED_100 : SPEED_10;
4159 				if (tg3_5700_link_polarity(tp, speed))
4160 					mac_mode |= MAC_MODE_LINK_POLARITY;
4161 				else
4162 					mac_mode &= ~MAC_MODE_LINK_POLARITY;
4163 			}
4164 		} else {
4165 			mac_mode = MAC_MODE_PORT_MODE_TBI;
4166 		}
4167 
4168 		if (!tg3_flag(tp, 5750_PLUS))
4169 			tw32(MAC_LED_CTRL, tp->led_ctrl);
4170 
4171 		mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
4172 		if ((tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) &&
4173 		    (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)))
4174 			mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
4175 
4176 		if (tg3_flag(tp, ENABLE_APE))
4177 			mac_mode |= MAC_MODE_APE_TX_EN |
4178 				    MAC_MODE_APE_RX_EN |
4179 				    MAC_MODE_TDE_ENABLE;
4180 
4181 		tw32_f(MAC_MODE, mac_mode);
4182 		udelay(100);
4183 
4184 		tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
4185 		udelay(10);
4186 	}
4187 
4188 	if (!tg3_flag(tp, WOL_SPEED_100MB) &&
4189 	    (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4190 	     tg3_asic_rev(tp) == ASIC_REV_5701)) {
4191 		u32 base_val;
4192 
4193 		base_val = tp->pci_clock_ctrl;
4194 		base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
4195 			     CLOCK_CTRL_TXCLK_DISABLE);
4196 
4197 		tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
4198 			    CLOCK_CTRL_PWRDOWN_PLL133, 40);
4199 	} else if (tg3_flag(tp, 5780_CLASS) ||
4200 		   tg3_flag(tp, CPMU_PRESENT) ||
4201 		   tg3_asic_rev(tp) == ASIC_REV_5906) {
4202 		/* do nothing */
4203 	} else if (!(tg3_flag(tp, 5750_PLUS) && tg3_flag(tp, ENABLE_ASF))) {
4204 		u32 newbits1, newbits2;
4205 
4206 		if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4207 		    tg3_asic_rev(tp) == ASIC_REV_5701) {
4208 			newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
4209 				    CLOCK_CTRL_TXCLK_DISABLE |
4210 				    CLOCK_CTRL_ALTCLK);
4211 			newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
4212 		} else if (tg3_flag(tp, 5705_PLUS)) {
4213 			newbits1 = CLOCK_CTRL_625_CORE;
4214 			newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
4215 		} else {
4216 			newbits1 = CLOCK_CTRL_ALTCLK;
4217 			newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
4218 		}
4219 
4220 		tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
4221 			    40);
4222 
4223 		tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
4224 			    40);
4225 
4226 		if (!tg3_flag(tp, 5705_PLUS)) {
4227 			u32 newbits3;
4228 
4229 			if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4230 			    tg3_asic_rev(tp) == ASIC_REV_5701) {
4231 				newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
4232 					    CLOCK_CTRL_TXCLK_DISABLE |
4233 					    CLOCK_CTRL_44MHZ_CORE);
4234 			} else {
4235 				newbits3 = CLOCK_CTRL_44MHZ_CORE;
4236 			}
4237 
4238 			tw32_wait_f(TG3PCI_CLOCK_CTRL,
4239 				    tp->pci_clock_ctrl | newbits3, 40);
4240 		}
4241 	}
4242 
4243 	if (!(device_should_wake) && !tg3_flag(tp, ENABLE_ASF))
4244 		tg3_power_down_phy(tp, do_low_power);
4245 
4246 	tg3_frob_aux_power(tp, true);
4247 
4248 	/* Workaround for unstable PLL clock */
4249 	if ((!tg3_flag(tp, IS_SSB_CORE)) &&
4250 	    ((tg3_chip_rev(tp) == CHIPREV_5750_AX) ||
4251 	     (tg3_chip_rev(tp) == CHIPREV_5750_BX))) {
4252 		u32 val = tr32(0x7d00);
4253 
4254 		val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
4255 		tw32(0x7d00, val);
4256 		if (!tg3_flag(tp, ENABLE_ASF)) {
4257 			int err;
4258 
4259 			err = tg3_nvram_lock(tp);
4260 			tg3_halt_cpu(tp, RX_CPU_BASE);
4261 			if (!err)
4262 				tg3_nvram_unlock(tp);
4263 		}
4264 	}
4265 
4266 	tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
4267 
4268 	tg3_ape_driver_state_change(tp, RESET_KIND_SHUTDOWN);
4269 
4270 	return 0;
4271 }
4272 
4273 static void tg3_power_down(struct tg3 *tp)
4274 {
4275 	pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE));
4276 	pci_set_power_state(tp->pdev, PCI_D3hot);
4277 }
4278 
4279 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
4280 {
4281 	switch (val & MII_TG3_AUX_STAT_SPDMASK) {
4282 	case MII_TG3_AUX_STAT_10HALF:
4283 		*speed = SPEED_10;
4284 		*duplex = DUPLEX_HALF;
4285 		break;
4286 
4287 	case MII_TG3_AUX_STAT_10FULL:
4288 		*speed = SPEED_10;
4289 		*duplex = DUPLEX_FULL;
4290 		break;
4291 
4292 	case MII_TG3_AUX_STAT_100HALF:
4293 		*speed = SPEED_100;
4294 		*duplex = DUPLEX_HALF;
4295 		break;
4296 
4297 	case MII_TG3_AUX_STAT_100FULL:
4298 		*speed = SPEED_100;
4299 		*duplex = DUPLEX_FULL;
4300 		break;
4301 
4302 	case MII_TG3_AUX_STAT_1000HALF:
4303 		*speed = SPEED_1000;
4304 		*duplex = DUPLEX_HALF;
4305 		break;
4306 
4307 	case MII_TG3_AUX_STAT_1000FULL:
4308 		*speed = SPEED_1000;
4309 		*duplex = DUPLEX_FULL;
4310 		break;
4311 
4312 	default:
4313 		if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4314 			*speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
4315 				 SPEED_10;
4316 			*duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
4317 				  DUPLEX_HALF;
4318 			break;
4319 		}
4320 		*speed = SPEED_UNKNOWN;
4321 		*duplex = DUPLEX_UNKNOWN;
4322 		break;
4323 	}
4324 }
4325 
4326 static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
4327 {
4328 	int err = 0;
4329 	u32 val, new_adv;
4330 
4331 	new_adv = ADVERTISE_CSMA;
4332 	new_adv |= ethtool_adv_to_mii_adv_t(advertise) & ADVERTISE_ALL;
4333 	new_adv |= mii_advertise_flowctrl(flowctrl);
4334 
4335 	err = tg3_writephy(tp, MII_ADVERTISE, new_adv);
4336 	if (err)
4337 		goto done;
4338 
4339 	if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4340 		new_adv = ethtool_adv_to_mii_ctrl1000_t(advertise);
4341 
4342 		if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4343 		    tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0)
4344 			new_adv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4345 
4346 		err = tg3_writephy(tp, MII_CTRL1000, new_adv);
4347 		if (err)
4348 			goto done;
4349 	}
4350 
4351 	if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
4352 		goto done;
4353 
4354 	tw32(TG3_CPMU_EEE_MODE,
4355 	     tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE);
4356 
4357 	err = tg3_phy_toggle_auxctl_smdsp(tp, true);
4358 	if (!err) {
4359 		u32 err2;
4360 
4361 		val = 0;
4362 		/* Advertise 100-BaseTX EEE ability */
4363 		if (advertise & ADVERTISED_100baseT_Full)
4364 			val |= MDIO_AN_EEE_ADV_100TX;
4365 		/* Advertise 1000-BaseT EEE ability */
4366 		if (advertise & ADVERTISED_1000baseT_Full)
4367 			val |= MDIO_AN_EEE_ADV_1000T;
4368 
4369 		if (!tp->eee.eee_enabled) {
4370 			val = 0;
4371 			tp->eee.advertised = 0;
4372 		} else {
4373 			tp->eee.advertised = advertise &
4374 					     (ADVERTISED_100baseT_Full |
4375 					      ADVERTISED_1000baseT_Full);
4376 		}
4377 
4378 		err = tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val);
4379 		if (err)
4380 			val = 0;
4381 
4382 		switch (tg3_asic_rev(tp)) {
4383 		case ASIC_REV_5717:
4384 		case ASIC_REV_57765:
4385 		case ASIC_REV_57766:
4386 		case ASIC_REV_5719:
4387 			/* If we advertised any eee advertisements above... */
4388 			if (val)
4389 				val = MII_TG3_DSP_TAP26_ALNOKO |
4390 				      MII_TG3_DSP_TAP26_RMRXSTO |
4391 				      MII_TG3_DSP_TAP26_OPCSINPT;
4392 			tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
4393 			/* Fall through */
4394 		case ASIC_REV_5720:
4395 		case ASIC_REV_5762:
4396 			if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val))
4397 				tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val |
4398 						 MII_TG3_DSP_CH34TP2_HIBW01);
4399 		}
4400 
4401 		err2 = tg3_phy_toggle_auxctl_smdsp(tp, false);
4402 		if (!err)
4403 			err = err2;
4404 	}
4405 
4406 done:
4407 	return err;
4408 }
4409 
4410 static void tg3_phy_copper_begin(struct tg3 *tp)
4411 {
4412 	if (tp->link_config.autoneg == AUTONEG_ENABLE ||
4413 	    (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4414 		u32 adv, fc;
4415 
4416 		if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
4417 		    !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)) {
4418 			adv = ADVERTISED_10baseT_Half |
4419 			      ADVERTISED_10baseT_Full;
4420 			if (tg3_flag(tp, WOL_SPEED_100MB))
4421 				adv |= ADVERTISED_100baseT_Half |
4422 				       ADVERTISED_100baseT_Full;
4423 			if (tp->phy_flags & TG3_PHYFLG_1G_ON_VAUX_OK) {
4424 				if (!(tp->phy_flags &
4425 				      TG3_PHYFLG_DISABLE_1G_HD_ADV))
4426 					adv |= ADVERTISED_1000baseT_Half;
4427 				adv |= ADVERTISED_1000baseT_Full;
4428 			}
4429 
4430 			fc = FLOW_CTRL_TX | FLOW_CTRL_RX;
4431 		} else {
4432 			adv = tp->link_config.advertising;
4433 			if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
4434 				adv &= ~(ADVERTISED_1000baseT_Half |
4435 					 ADVERTISED_1000baseT_Full);
4436 
4437 			fc = tp->link_config.flowctrl;
4438 		}
4439 
4440 		tg3_phy_autoneg_cfg(tp, adv, fc);
4441 
4442 		if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
4443 		    (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)) {
4444 			/* Normally during power down we want to autonegotiate
4445 			 * the lowest possible speed for WOL. However, to avoid
4446 			 * link flap, we leave it untouched.
4447 			 */
4448 			return;
4449 		}
4450 
4451 		tg3_writephy(tp, MII_BMCR,
4452 			     BMCR_ANENABLE | BMCR_ANRESTART);
4453 	} else {
4454 		int i;
4455 		u32 bmcr, orig_bmcr;
4456 
4457 		tp->link_config.active_speed = tp->link_config.speed;
4458 		tp->link_config.active_duplex = tp->link_config.duplex;
4459 
4460 		if (tg3_asic_rev(tp) == ASIC_REV_5714) {
4461 			/* With autoneg disabled, 5715 only links up when the
4462 			 * advertisement register has the configured speed
4463 			 * enabled.
4464 			 */
4465 			tg3_writephy(tp, MII_ADVERTISE, ADVERTISE_ALL);
4466 		}
4467 
4468 		bmcr = 0;
4469 		switch (tp->link_config.speed) {
4470 		default:
4471 		case SPEED_10:
4472 			break;
4473 
4474 		case SPEED_100:
4475 			bmcr |= BMCR_SPEED100;
4476 			break;
4477 
4478 		case SPEED_1000:
4479 			bmcr |= BMCR_SPEED1000;
4480 			break;
4481 		}
4482 
4483 		if (tp->link_config.duplex == DUPLEX_FULL)
4484 			bmcr |= BMCR_FULLDPLX;
4485 
4486 		if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
4487 		    (bmcr != orig_bmcr)) {
4488 			tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
4489 			for (i = 0; i < 1500; i++) {
4490 				u32 tmp;
4491 
4492 				udelay(10);
4493 				if (tg3_readphy(tp, MII_BMSR, &tmp) ||
4494 				    tg3_readphy(tp, MII_BMSR, &tmp))
4495 					continue;
4496 				if (!(tmp & BMSR_LSTATUS)) {
4497 					udelay(40);
4498 					break;
4499 				}
4500 			}
4501 			tg3_writephy(tp, MII_BMCR, bmcr);
4502 			udelay(40);
4503 		}
4504 	}
4505 }
4506 
4507 static int tg3_phy_pull_config(struct tg3 *tp)
4508 {
4509 	int err;
4510 	u32 val;
4511 
4512 	err = tg3_readphy(tp, MII_BMCR, &val);
4513 	if (err)
4514 		goto done;
4515 
4516 	if (!(val & BMCR_ANENABLE)) {
4517 		tp->link_config.autoneg = AUTONEG_DISABLE;
4518 		tp->link_config.advertising = 0;
4519 		tg3_flag_clear(tp, PAUSE_AUTONEG);
4520 
4521 		err = -EIO;
4522 
4523 		switch (val & (BMCR_SPEED1000 | BMCR_SPEED100)) {
4524 		case 0:
4525 			if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
4526 				goto done;
4527 
4528 			tp->link_config.speed = SPEED_10;
4529 			break;
4530 		case BMCR_SPEED100:
4531 			if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
4532 				goto done;
4533 
4534 			tp->link_config.speed = SPEED_100;
4535 			break;
4536 		case BMCR_SPEED1000:
4537 			if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4538 				tp->link_config.speed = SPEED_1000;
4539 				break;
4540 			}
4541 			/* Fall through */
4542 		default:
4543 			goto done;
4544 		}
4545 
4546 		if (val & BMCR_FULLDPLX)
4547 			tp->link_config.duplex = DUPLEX_FULL;
4548 		else
4549 			tp->link_config.duplex = DUPLEX_HALF;
4550 
4551 		tp->link_config.flowctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
4552 
4553 		err = 0;
4554 		goto done;
4555 	}
4556 
4557 	tp->link_config.autoneg = AUTONEG_ENABLE;
4558 	tp->link_config.advertising = ADVERTISED_Autoneg;
4559 	tg3_flag_set(tp, PAUSE_AUTONEG);
4560 
4561 	if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
4562 		u32 adv;
4563 
4564 		err = tg3_readphy(tp, MII_ADVERTISE, &val);
4565 		if (err)
4566 			goto done;
4567 
4568 		adv = mii_adv_to_ethtool_adv_t(val & ADVERTISE_ALL);
4569 		tp->link_config.advertising |= adv | ADVERTISED_TP;
4570 
4571 		tp->link_config.flowctrl = tg3_decode_flowctrl_1000T(val);
4572 	} else {
4573 		tp->link_config.advertising |= ADVERTISED_FIBRE;
4574 	}
4575 
4576 	if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4577 		u32 adv;
4578 
4579 		if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
4580 			err = tg3_readphy(tp, MII_CTRL1000, &val);
4581 			if (err)
4582 				goto done;
4583 
4584 			adv = mii_ctrl1000_to_ethtool_adv_t(val);
4585 		} else {
4586 			err = tg3_readphy(tp, MII_ADVERTISE, &val);
4587 			if (err)
4588 				goto done;
4589 
4590 			adv = tg3_decode_flowctrl_1000X(val);
4591 			tp->link_config.flowctrl = adv;
4592 
4593 			val &= (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL);
4594 			adv = mii_adv_to_ethtool_adv_x(val);
4595 		}
4596 
4597 		tp->link_config.advertising |= adv;
4598 	}
4599 
4600 done:
4601 	return err;
4602 }
4603 
4604 static int tg3_init_5401phy_dsp(struct tg3 *tp)
4605 {
4606 	int err;
4607 
4608 	/* Turn off tap power management. */
4609 	/* Set Extended packet length bit */
4610 	err = tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
4611 
4612 	err |= tg3_phydsp_write(tp, 0x0012, 0x1804);
4613 	err |= tg3_phydsp_write(tp, 0x0013, 0x1204);
4614 	err |= tg3_phydsp_write(tp, 0x8006, 0x0132);
4615 	err |= tg3_phydsp_write(tp, 0x8006, 0x0232);
4616 	err |= tg3_phydsp_write(tp, 0x201f, 0x0a20);
4617 
4618 	udelay(40);
4619 
4620 	return err;
4621 }
4622 
4623 static bool tg3_phy_eee_config_ok(struct tg3 *tp)
4624 {
4625 	struct ethtool_eee eee;
4626 
4627 	if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
4628 		return true;
4629 
4630 	tg3_eee_pull_config(tp, &eee);
4631 
4632 	if (tp->eee.eee_enabled) {
4633 		if (tp->eee.advertised != eee.advertised ||
4634 		    tp->eee.tx_lpi_timer != eee.tx_lpi_timer ||
4635 		    tp->eee.tx_lpi_enabled != eee.tx_lpi_enabled)
4636 			return false;
4637 	} else {
4638 		/* EEE is disabled but we're advertising */
4639 		if (eee.advertised)
4640 			return false;
4641 	}
4642 
4643 	return true;
4644 }
4645 
4646 static bool tg3_phy_copper_an_config_ok(struct tg3 *tp, u32 *lcladv)
4647 {
4648 	u32 advmsk, tgtadv, advertising;
4649 
4650 	advertising = tp->link_config.advertising;
4651 	tgtadv = ethtool_adv_to_mii_adv_t(advertising) & ADVERTISE_ALL;
4652 
4653 	advmsk = ADVERTISE_ALL;
4654 	if (tp->link_config.active_duplex == DUPLEX_FULL) {
4655 		tgtadv |= mii_advertise_flowctrl(tp->link_config.flowctrl);
4656 		advmsk |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
4657 	}
4658 
4659 	if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
4660 		return false;
4661 
4662 	if ((*lcladv & advmsk) != tgtadv)
4663 		return false;
4664 
4665 	if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4666 		u32 tg3_ctrl;
4667 
4668 		tgtadv = ethtool_adv_to_mii_ctrl1000_t(advertising);
4669 
4670 		if (tg3_readphy(tp, MII_CTRL1000, &tg3_ctrl))
4671 			return false;
4672 
4673 		if (tgtadv &&
4674 		    (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4675 		     tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0)) {
4676 			tgtadv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4677 			tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL |
4678 				     CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
4679 		} else {
4680 			tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL);
4681 		}
4682 
4683 		if (tg3_ctrl != tgtadv)
4684 			return false;
4685 	}
4686 
4687 	return true;
4688 }
4689 
4690 static bool tg3_phy_copper_fetch_rmtadv(struct tg3 *tp, u32 *rmtadv)
4691 {
4692 	u32 lpeth = 0;
4693 
4694 	if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4695 		u32 val;
4696 
4697 		if (tg3_readphy(tp, MII_STAT1000, &val))
4698 			return false;
4699 
4700 		lpeth = mii_stat1000_to_ethtool_lpa_t(val);
4701 	}
4702 
4703 	if (tg3_readphy(tp, MII_LPA, rmtadv))
4704 		return false;
4705 
4706 	lpeth |= mii_lpa_to_ethtool_lpa_t(*rmtadv);
4707 	tp->link_config.rmt_adv = lpeth;
4708 
4709 	return true;
4710 }
4711 
4712 static bool tg3_test_and_report_link_chg(struct tg3 *tp, bool curr_link_up)
4713 {
4714 	if (curr_link_up != tp->link_up) {
4715 		if (curr_link_up) {
4716 			netif_carrier_on(tp->dev);
4717 		} else {
4718 			netif_carrier_off(tp->dev);
4719 			if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4720 				tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4721 		}
4722 
4723 		tg3_link_report(tp);
4724 		return true;
4725 	}
4726 
4727 	return false;
4728 }
4729 
4730 static void tg3_clear_mac_status(struct tg3 *tp)
4731 {
4732 	tw32(MAC_EVENT, 0);
4733 
4734 	tw32_f(MAC_STATUS,
4735 	       MAC_STATUS_SYNC_CHANGED |
4736 	       MAC_STATUS_CFG_CHANGED |
4737 	       MAC_STATUS_MI_COMPLETION |
4738 	       MAC_STATUS_LNKSTATE_CHANGED);
4739 	udelay(40);
4740 }
4741 
4742 static void tg3_setup_eee(struct tg3 *tp)
4743 {
4744 	u32 val;
4745 
4746 	val = TG3_CPMU_EEE_LNKIDL_PCIE_NL0 |
4747 	      TG3_CPMU_EEE_LNKIDL_UART_IDL;
4748 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0)
4749 		val |= TG3_CPMU_EEE_LNKIDL_APE_TX_MT;
4750 
4751 	tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL, val);
4752 
4753 	tw32_f(TG3_CPMU_EEE_CTRL,
4754 	       TG3_CPMU_EEE_CTRL_EXIT_20_1_US);
4755 
4756 	val = TG3_CPMU_EEEMD_ERLY_L1_XIT_DET |
4757 	      (tp->eee.tx_lpi_enabled ? TG3_CPMU_EEEMD_LPI_IN_TX : 0) |
4758 	      TG3_CPMU_EEEMD_LPI_IN_RX |
4759 	      TG3_CPMU_EEEMD_EEE_ENABLE;
4760 
4761 	if (tg3_asic_rev(tp) != ASIC_REV_5717)
4762 		val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN;
4763 
4764 	if (tg3_flag(tp, ENABLE_APE))
4765 		val |= TG3_CPMU_EEEMD_APE_TX_DET_EN;
4766 
4767 	tw32_f(TG3_CPMU_EEE_MODE, tp->eee.eee_enabled ? val : 0);
4768 
4769 	tw32_f(TG3_CPMU_EEE_DBTMR1,
4770 	       TG3_CPMU_DBTMR1_PCIEXIT_2047US |
4771 	       (tp->eee.tx_lpi_timer & 0xffff));
4772 
4773 	tw32_f(TG3_CPMU_EEE_DBTMR2,
4774 	       TG3_CPMU_DBTMR2_APE_TX_2047US |
4775 	       TG3_CPMU_DBTMR2_TXIDXEQ_2047US);
4776 }
4777 
4778 static int tg3_setup_copper_phy(struct tg3 *tp, bool force_reset)
4779 {
4780 	bool current_link_up;
4781 	u32 bmsr, val;
4782 	u32 lcl_adv, rmt_adv;
4783 	u16 current_speed;
4784 	u8 current_duplex;
4785 	int i, err;
4786 
4787 	tg3_clear_mac_status(tp);
4788 
4789 	if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
4790 		tw32_f(MAC_MI_MODE,
4791 		     (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
4792 		udelay(80);
4793 	}
4794 
4795 	tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, 0);
4796 
4797 	/* Some third-party PHYs need to be reset on link going
4798 	 * down.
4799 	 */
4800 	if ((tg3_asic_rev(tp) == ASIC_REV_5703 ||
4801 	     tg3_asic_rev(tp) == ASIC_REV_5704 ||
4802 	     tg3_asic_rev(tp) == ASIC_REV_5705) &&
4803 	    tp->link_up) {
4804 		tg3_readphy(tp, MII_BMSR, &bmsr);
4805 		if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4806 		    !(bmsr & BMSR_LSTATUS))
4807 			force_reset = true;
4808 	}
4809 	if (force_reset)
4810 		tg3_phy_reset(tp);
4811 
4812 	if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
4813 		tg3_readphy(tp, MII_BMSR, &bmsr);
4814 		if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
4815 		    !tg3_flag(tp, INIT_COMPLETE))
4816 			bmsr = 0;
4817 
4818 		if (!(bmsr & BMSR_LSTATUS)) {
4819 			err = tg3_init_5401phy_dsp(tp);
4820 			if (err)
4821 				return err;
4822 
4823 			tg3_readphy(tp, MII_BMSR, &bmsr);
4824 			for (i = 0; i < 1000; i++) {
4825 				udelay(10);
4826 				if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4827 				    (bmsr & BMSR_LSTATUS)) {
4828 					udelay(40);
4829 					break;
4830 				}
4831 			}
4832 
4833 			if ((tp->phy_id & TG3_PHY_ID_REV_MASK) ==
4834 			    TG3_PHY_REV_BCM5401_B0 &&
4835 			    !(bmsr & BMSR_LSTATUS) &&
4836 			    tp->link_config.active_speed == SPEED_1000) {
4837 				err = tg3_phy_reset(tp);
4838 				if (!err)
4839 					err = tg3_init_5401phy_dsp(tp);
4840 				if (err)
4841 					return err;
4842 			}
4843 		}
4844 	} else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4845 		   tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0) {
4846 		/* 5701 {A0,B0} CRC bug workaround */
4847 		tg3_writephy(tp, 0x15, 0x0a75);
4848 		tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4849 		tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
4850 		tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4851 	}
4852 
4853 	/* Clear pending interrupts... */
4854 	tg3_readphy(tp, MII_TG3_ISTAT, &val);
4855 	tg3_readphy(tp, MII_TG3_ISTAT, &val);
4856 
4857 	if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT)
4858 		tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
4859 	else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET))
4860 		tg3_writephy(tp, MII_TG3_IMASK, ~0);
4861 
4862 	if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4863 	    tg3_asic_rev(tp) == ASIC_REV_5701) {
4864 		if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
4865 			tg3_writephy(tp, MII_TG3_EXT_CTRL,
4866 				     MII_TG3_EXT_CTRL_LNK3_LED_MODE);
4867 		else
4868 			tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
4869 	}
4870 
4871 	current_link_up = false;
4872 	current_speed = SPEED_UNKNOWN;
4873 	current_duplex = DUPLEX_UNKNOWN;
4874 	tp->phy_flags &= ~TG3_PHYFLG_MDIX_STATE;
4875 	tp->link_config.rmt_adv = 0;
4876 
4877 	if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) {
4878 		err = tg3_phy_auxctl_read(tp,
4879 					  MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4880 					  &val);
4881 		if (!err && !(val & (1 << 10))) {
4882 			tg3_phy_auxctl_write(tp,
4883 					     MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4884 					     val | (1 << 10));
4885 			goto relink;
4886 		}
4887 	}
4888 
4889 	bmsr = 0;
4890 	for (i = 0; i < 100; i++) {
4891 		tg3_readphy(tp, MII_BMSR, &bmsr);
4892 		if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4893 		    (bmsr & BMSR_LSTATUS))
4894 			break;
4895 		udelay(40);
4896 	}
4897 
4898 	if (bmsr & BMSR_LSTATUS) {
4899 		u32 aux_stat, bmcr;
4900 
4901 		tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
4902 		for (i = 0; i < 2000; i++) {
4903 			udelay(10);
4904 			if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
4905 			    aux_stat)
4906 				break;
4907 		}
4908 
4909 		tg3_aux_stat_to_speed_duplex(tp, aux_stat,
4910 					     &current_speed,
4911 					     &current_duplex);
4912 
4913 		bmcr = 0;
4914 		for (i = 0; i < 200; i++) {
4915 			tg3_readphy(tp, MII_BMCR, &bmcr);
4916 			if (tg3_readphy(tp, MII_BMCR, &bmcr))
4917 				continue;
4918 			if (bmcr && bmcr != 0x7fff)
4919 				break;
4920 			udelay(10);
4921 		}
4922 
4923 		lcl_adv = 0;
4924 		rmt_adv = 0;
4925 
4926 		tp->link_config.active_speed = current_speed;
4927 		tp->link_config.active_duplex = current_duplex;
4928 
4929 		if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4930 			bool eee_config_ok = tg3_phy_eee_config_ok(tp);
4931 
4932 			if ((bmcr & BMCR_ANENABLE) &&
4933 			    eee_config_ok &&
4934 			    tg3_phy_copper_an_config_ok(tp, &lcl_adv) &&
4935 			    tg3_phy_copper_fetch_rmtadv(tp, &rmt_adv))
4936 				current_link_up = true;
4937 
4938 			/* EEE settings changes take effect only after a phy
4939 			 * reset.  If we have skipped a reset due to Link Flap
4940 			 * Avoidance being enabled, do it now.
4941 			 */
4942 			if (!eee_config_ok &&
4943 			    (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
4944 			    !force_reset) {
4945 				tg3_setup_eee(tp);
4946 				tg3_phy_reset(tp);
4947 			}
4948 		} else {
4949 			if (!(bmcr & BMCR_ANENABLE) &&
4950 			    tp->link_config.speed == current_speed &&
4951 			    tp->link_config.duplex == current_duplex) {
4952 				current_link_up = true;
4953 			}
4954 		}
4955 
4956 		if (current_link_up &&
4957 		    tp->link_config.active_duplex == DUPLEX_FULL) {
4958 			u32 reg, bit;
4959 
4960 			if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4961 				reg = MII_TG3_FET_GEN_STAT;
4962 				bit = MII_TG3_FET_GEN_STAT_MDIXSTAT;
4963 			} else {
4964 				reg = MII_TG3_EXT_STAT;
4965 				bit = MII_TG3_EXT_STAT_MDIX;
4966 			}
4967 
4968 			if (!tg3_readphy(tp, reg, &val) && (val & bit))
4969 				tp->phy_flags |= TG3_PHYFLG_MDIX_STATE;
4970 
4971 			tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
4972 		}
4973 	}
4974 
4975 relink:
4976 	if (!current_link_up || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4977 		tg3_phy_copper_begin(tp);
4978 
4979 		if (tg3_flag(tp, ROBOSWITCH)) {
4980 			current_link_up = true;
4981 			/* FIXME: when BCM5325 switch is used use 100 MBit/s */
4982 			current_speed = SPEED_1000;
4983 			current_duplex = DUPLEX_FULL;
4984 			tp->link_config.active_speed = current_speed;
4985 			tp->link_config.active_duplex = current_duplex;
4986 		}
4987 
4988 		tg3_readphy(tp, MII_BMSR, &bmsr);
4989 		if ((!tg3_readphy(tp, MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) ||
4990 		    (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
4991 			current_link_up = true;
4992 	}
4993 
4994 	tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
4995 	if (current_link_up) {
4996 		if (tp->link_config.active_speed == SPEED_100 ||
4997 		    tp->link_config.active_speed == SPEED_10)
4998 			tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4999 		else
5000 			tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5001 	} else if (tp->phy_flags & TG3_PHYFLG_IS_FET)
5002 		tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
5003 	else
5004 		tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5005 
5006 	/* In order for the 5750 core in BCM4785 chip to work properly
5007 	 * in RGMII mode, the Led Control Register must be set up.
5008 	 */
5009 	if (tg3_flag(tp, RGMII_MODE)) {
5010 		u32 led_ctrl = tr32(MAC_LED_CTRL);
5011 		led_ctrl &= ~(LED_CTRL_1000MBPS_ON | LED_CTRL_100MBPS_ON);
5012 
5013 		if (tp->link_config.active_speed == SPEED_10)
5014 			led_ctrl |= LED_CTRL_LNKLED_OVERRIDE;
5015 		else if (tp->link_config.active_speed == SPEED_100)
5016 			led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE |
5017 				     LED_CTRL_100MBPS_ON);
5018 		else if (tp->link_config.active_speed == SPEED_1000)
5019 			led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE |
5020 				     LED_CTRL_1000MBPS_ON);
5021 
5022 		tw32(MAC_LED_CTRL, led_ctrl);
5023 		udelay(40);
5024 	}
5025 
5026 	tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
5027 	if (tp->link_config.active_duplex == DUPLEX_HALF)
5028 		tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
5029 
5030 	if (tg3_asic_rev(tp) == ASIC_REV_5700) {
5031 		if (current_link_up &&
5032 		    tg3_5700_link_polarity(tp, tp->link_config.active_speed))
5033 			tp->mac_mode |= MAC_MODE_LINK_POLARITY;
5034 		else
5035 			tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
5036 	}
5037 
5038 	/* ??? Without this setting Netgear GA302T PHY does not
5039 	 * ??? send/receive packets...
5040 	 */
5041 	if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 &&
5042 	    tg3_chip_rev_id(tp) == CHIPREV_ID_5700_ALTIMA) {
5043 		tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
5044 		tw32_f(MAC_MI_MODE, tp->mi_mode);
5045 		udelay(80);
5046 	}
5047 
5048 	tw32_f(MAC_MODE, tp->mac_mode);
5049 	udelay(40);
5050 
5051 	tg3_phy_eee_adjust(tp, current_link_up);
5052 
5053 	if (tg3_flag(tp, USE_LINKCHG_REG)) {
5054 		/* Polled via timer. */
5055 		tw32_f(MAC_EVENT, 0);
5056 	} else {
5057 		tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5058 	}
5059 	udelay(40);
5060 
5061 	if (tg3_asic_rev(tp) == ASIC_REV_5700 &&
5062 	    current_link_up &&
5063 	    tp->link_config.active_speed == SPEED_1000 &&
5064 	    (tg3_flag(tp, PCIX_MODE) || tg3_flag(tp, PCI_HIGH_SPEED))) {
5065 		udelay(120);
5066 		tw32_f(MAC_STATUS,
5067 		     (MAC_STATUS_SYNC_CHANGED |
5068 		      MAC_STATUS_CFG_CHANGED));
5069 		udelay(40);
5070 		tg3_write_mem(tp,
5071 			      NIC_SRAM_FIRMWARE_MBOX,
5072 			      NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
5073 	}
5074 
5075 	/* Prevent send BD corruption. */
5076 	if (tg3_flag(tp, CLKREQ_BUG)) {
5077 		if (tp->link_config.active_speed == SPEED_100 ||
5078 		    tp->link_config.active_speed == SPEED_10)
5079 			pcie_capability_clear_word(tp->pdev, PCI_EXP_LNKCTL,
5080 						   PCI_EXP_LNKCTL_CLKREQ_EN);
5081 		else
5082 			pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
5083 						 PCI_EXP_LNKCTL_CLKREQ_EN);
5084 	}
5085 
5086 	tg3_test_and_report_link_chg(tp, current_link_up);
5087 
5088 	return 0;
5089 }
5090 
5091 struct tg3_fiber_aneginfo {
5092 	int state;
5093 #define ANEG_STATE_UNKNOWN		0
5094 #define ANEG_STATE_AN_ENABLE		1
5095 #define ANEG_STATE_RESTART_INIT		2
5096 #define ANEG_STATE_RESTART		3
5097 #define ANEG_STATE_DISABLE_LINK_OK	4
5098 #define ANEG_STATE_ABILITY_DETECT_INIT	5
5099 #define ANEG_STATE_ABILITY_DETECT	6
5100 #define ANEG_STATE_ACK_DETECT_INIT	7
5101 #define ANEG_STATE_ACK_DETECT		8
5102 #define ANEG_STATE_COMPLETE_ACK_INIT	9
5103 #define ANEG_STATE_COMPLETE_ACK		10
5104 #define ANEG_STATE_IDLE_DETECT_INIT	11
5105 #define ANEG_STATE_IDLE_DETECT		12
5106 #define ANEG_STATE_LINK_OK		13
5107 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT	14
5108 #define ANEG_STATE_NEXT_PAGE_WAIT	15
5109 
5110 	u32 flags;
5111 #define MR_AN_ENABLE		0x00000001
5112 #define MR_RESTART_AN		0x00000002
5113 #define MR_AN_COMPLETE		0x00000004
5114 #define MR_PAGE_RX		0x00000008
5115 #define MR_NP_LOADED		0x00000010
5116 #define MR_TOGGLE_TX		0x00000020
5117 #define MR_LP_ADV_FULL_DUPLEX	0x00000040
5118 #define MR_LP_ADV_HALF_DUPLEX	0x00000080
5119 #define MR_LP_ADV_SYM_PAUSE	0x00000100
5120 #define MR_LP_ADV_ASYM_PAUSE	0x00000200
5121 #define MR_LP_ADV_REMOTE_FAULT1	0x00000400
5122 #define MR_LP_ADV_REMOTE_FAULT2	0x00000800
5123 #define MR_LP_ADV_NEXT_PAGE	0x00001000
5124 #define MR_TOGGLE_RX		0x00002000
5125 #define MR_NP_RX		0x00004000
5126 
5127 #define MR_LINK_OK		0x80000000
5128 
5129 	unsigned long link_time, cur_time;
5130 
5131 	u32 ability_match_cfg;
5132 	int ability_match_count;
5133 
5134 	char ability_match, idle_match, ack_match;
5135 
5136 	u32 txconfig, rxconfig;
5137 #define ANEG_CFG_NP		0x00000080
5138 #define ANEG_CFG_ACK		0x00000040
5139 #define ANEG_CFG_RF2		0x00000020
5140 #define ANEG_CFG_RF1		0x00000010
5141 #define ANEG_CFG_PS2		0x00000001
5142 #define ANEG_CFG_PS1		0x00008000
5143 #define ANEG_CFG_HD		0x00004000
5144 #define ANEG_CFG_FD		0x00002000
5145 #define ANEG_CFG_INVAL		0x00001f06
5146 
5147 };
5148 #define ANEG_OK		0
5149 #define ANEG_DONE	1
5150 #define ANEG_TIMER_ENAB	2
5151 #define ANEG_FAILED	-1
5152 
5153 #define ANEG_STATE_SETTLE_TIME	10000
5154 
5155 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
5156 				   struct tg3_fiber_aneginfo *ap)
5157 {
5158 	u16 flowctrl;
5159 	unsigned long delta;
5160 	u32 rx_cfg_reg;
5161 	int ret;
5162 
5163 	if (ap->state == ANEG_STATE_UNKNOWN) {
5164 		ap->rxconfig = 0;
5165 		ap->link_time = 0;
5166 		ap->cur_time = 0;
5167 		ap->ability_match_cfg = 0;
5168 		ap->ability_match_count = 0;
5169 		ap->ability_match = 0;
5170 		ap->idle_match = 0;
5171 		ap->ack_match = 0;
5172 	}
5173 	ap->cur_time++;
5174 
5175 	if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
5176 		rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
5177 
5178 		if (rx_cfg_reg != ap->ability_match_cfg) {
5179 			ap->ability_match_cfg = rx_cfg_reg;
5180 			ap->ability_match = 0;
5181 			ap->ability_match_count = 0;
5182 		} else {
5183 			if (++ap->ability_match_count > 1) {
5184 				ap->ability_match = 1;
5185 				ap->ability_match_cfg = rx_cfg_reg;
5186 			}
5187 		}
5188 		if (rx_cfg_reg & ANEG_CFG_ACK)
5189 			ap->ack_match = 1;
5190 		else
5191 			ap->ack_match = 0;
5192 
5193 		ap->idle_match = 0;
5194 	} else {
5195 		ap->idle_match = 1;
5196 		ap->ability_match_cfg = 0;
5197 		ap->ability_match_count = 0;
5198 		ap->ability_match = 0;
5199 		ap->ack_match = 0;
5200 
5201 		rx_cfg_reg = 0;
5202 	}
5203 
5204 	ap->rxconfig = rx_cfg_reg;
5205 	ret = ANEG_OK;
5206 
5207 	switch (ap->state) {
5208 	case ANEG_STATE_UNKNOWN:
5209 		if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
5210 			ap->state = ANEG_STATE_AN_ENABLE;
5211 
5212 		/* fallthru */
5213 	case ANEG_STATE_AN_ENABLE:
5214 		ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
5215 		if (ap->flags & MR_AN_ENABLE) {
5216 			ap->link_time = 0;
5217 			ap->cur_time = 0;
5218 			ap->ability_match_cfg = 0;
5219 			ap->ability_match_count = 0;
5220 			ap->ability_match = 0;
5221 			ap->idle_match = 0;
5222 			ap->ack_match = 0;
5223 
5224 			ap->state = ANEG_STATE_RESTART_INIT;
5225 		} else {
5226 			ap->state = ANEG_STATE_DISABLE_LINK_OK;
5227 		}
5228 		break;
5229 
5230 	case ANEG_STATE_RESTART_INIT:
5231 		ap->link_time = ap->cur_time;
5232 		ap->flags &= ~(MR_NP_LOADED);
5233 		ap->txconfig = 0;
5234 		tw32(MAC_TX_AUTO_NEG, 0);
5235 		tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5236 		tw32_f(MAC_MODE, tp->mac_mode);
5237 		udelay(40);
5238 
5239 		ret = ANEG_TIMER_ENAB;
5240 		ap->state = ANEG_STATE_RESTART;
5241 
5242 		/* fallthru */
5243 	case ANEG_STATE_RESTART:
5244 		delta = ap->cur_time - ap->link_time;
5245 		if (delta > ANEG_STATE_SETTLE_TIME)
5246 			ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
5247 		else
5248 			ret = ANEG_TIMER_ENAB;
5249 		break;
5250 
5251 	case ANEG_STATE_DISABLE_LINK_OK:
5252 		ret = ANEG_DONE;
5253 		break;
5254 
5255 	case ANEG_STATE_ABILITY_DETECT_INIT:
5256 		ap->flags &= ~(MR_TOGGLE_TX);
5257 		ap->txconfig = ANEG_CFG_FD;
5258 		flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5259 		if (flowctrl & ADVERTISE_1000XPAUSE)
5260 			ap->txconfig |= ANEG_CFG_PS1;
5261 		if (flowctrl & ADVERTISE_1000XPSE_ASYM)
5262 			ap->txconfig |= ANEG_CFG_PS2;
5263 		tw32(MAC_TX_AUTO_NEG, ap->txconfig);
5264 		tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5265 		tw32_f(MAC_MODE, tp->mac_mode);
5266 		udelay(40);
5267 
5268 		ap->state = ANEG_STATE_ABILITY_DETECT;
5269 		break;
5270 
5271 	case ANEG_STATE_ABILITY_DETECT:
5272 		if (ap->ability_match != 0 && ap->rxconfig != 0)
5273 			ap->state = ANEG_STATE_ACK_DETECT_INIT;
5274 		break;
5275 
5276 	case ANEG_STATE_ACK_DETECT_INIT:
5277 		ap->txconfig |= ANEG_CFG_ACK;
5278 		tw32(MAC_TX_AUTO_NEG, ap->txconfig);
5279 		tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5280 		tw32_f(MAC_MODE, tp->mac_mode);
5281 		udelay(40);
5282 
5283 		ap->state = ANEG_STATE_ACK_DETECT;
5284 
5285 		/* fallthru */
5286 	case ANEG_STATE_ACK_DETECT:
5287 		if (ap->ack_match != 0) {
5288 			if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
5289 			    (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
5290 				ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
5291 			} else {
5292 				ap->state = ANEG_STATE_AN_ENABLE;
5293 			}
5294 		} else if (ap->ability_match != 0 &&
5295 			   ap->rxconfig == 0) {
5296 			ap->state = ANEG_STATE_AN_ENABLE;
5297 		}
5298 		break;
5299 
5300 	case ANEG_STATE_COMPLETE_ACK_INIT:
5301 		if (ap->rxconfig & ANEG_CFG_INVAL) {
5302 			ret = ANEG_FAILED;
5303 			break;
5304 		}
5305 		ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
5306 			       MR_LP_ADV_HALF_DUPLEX |
5307 			       MR_LP_ADV_SYM_PAUSE |
5308 			       MR_LP_ADV_ASYM_PAUSE |
5309 			       MR_LP_ADV_REMOTE_FAULT1 |
5310 			       MR_LP_ADV_REMOTE_FAULT2 |
5311 			       MR_LP_ADV_NEXT_PAGE |
5312 			       MR_TOGGLE_RX |
5313 			       MR_NP_RX);
5314 		if (ap->rxconfig & ANEG_CFG_FD)
5315 			ap->flags |= MR_LP_ADV_FULL_DUPLEX;
5316 		if (ap->rxconfig & ANEG_CFG_HD)
5317 			ap->flags |= MR_LP_ADV_HALF_DUPLEX;
5318 		if (ap->rxconfig & ANEG_CFG_PS1)
5319 			ap->flags |= MR_LP_ADV_SYM_PAUSE;
5320 		if (ap->rxconfig & ANEG_CFG_PS2)
5321 			ap->flags |= MR_LP_ADV_ASYM_PAUSE;
5322 		if (ap->rxconfig & ANEG_CFG_RF1)
5323 			ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
5324 		if (ap->rxconfig & ANEG_CFG_RF2)
5325 			ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
5326 		if (ap->rxconfig & ANEG_CFG_NP)
5327 			ap->flags |= MR_LP_ADV_NEXT_PAGE;
5328 
5329 		ap->link_time = ap->cur_time;
5330 
5331 		ap->flags ^= (MR_TOGGLE_TX);
5332 		if (ap->rxconfig & 0x0008)
5333 			ap->flags |= MR_TOGGLE_RX;
5334 		if (ap->rxconfig & ANEG_CFG_NP)
5335 			ap->flags |= MR_NP_RX;
5336 		ap->flags |= MR_PAGE_RX;
5337 
5338 		ap->state = ANEG_STATE_COMPLETE_ACK;
5339 		ret = ANEG_TIMER_ENAB;
5340 		break;
5341 
5342 	case ANEG_STATE_COMPLETE_ACK:
5343 		if (ap->ability_match != 0 &&
5344 		    ap->rxconfig == 0) {
5345 			ap->state = ANEG_STATE_AN_ENABLE;
5346 			break;
5347 		}
5348 		delta = ap->cur_time - ap->link_time;
5349 		if (delta > ANEG_STATE_SETTLE_TIME) {
5350 			if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
5351 				ap->state = ANEG_STATE_IDLE_DETECT_INIT;
5352 			} else {
5353 				if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
5354 				    !(ap->flags & MR_NP_RX)) {
5355 					ap->state = ANEG_STATE_IDLE_DETECT_INIT;
5356 				} else {
5357 					ret = ANEG_FAILED;
5358 				}
5359 			}
5360 		}
5361 		break;
5362 
5363 	case ANEG_STATE_IDLE_DETECT_INIT:
5364 		ap->link_time = ap->cur_time;
5365 		tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
5366 		tw32_f(MAC_MODE, tp->mac_mode);
5367 		udelay(40);
5368 
5369 		ap->state = ANEG_STATE_IDLE_DETECT;
5370 		ret = ANEG_TIMER_ENAB;
5371 		break;
5372 
5373 	case ANEG_STATE_IDLE_DETECT:
5374 		if (ap->ability_match != 0 &&
5375 		    ap->rxconfig == 0) {
5376 			ap->state = ANEG_STATE_AN_ENABLE;
5377 			break;
5378 		}
5379 		delta = ap->cur_time - ap->link_time;
5380 		if (delta > ANEG_STATE_SETTLE_TIME) {
5381 			/* XXX another gem from the Broadcom driver :( */
5382 			ap->state = ANEG_STATE_LINK_OK;
5383 		}
5384 		break;
5385 
5386 	case ANEG_STATE_LINK_OK:
5387 		ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
5388 		ret = ANEG_DONE;
5389 		break;
5390 
5391 	case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
5392 		/* ??? unimplemented */
5393 		break;
5394 
5395 	case ANEG_STATE_NEXT_PAGE_WAIT:
5396 		/* ??? unimplemented */
5397 		break;
5398 
5399 	default:
5400 		ret = ANEG_FAILED;
5401 		break;
5402 	}
5403 
5404 	return ret;
5405 }
5406 
5407 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
5408 {
5409 	int res = 0;
5410 	struct tg3_fiber_aneginfo aninfo;
5411 	int status = ANEG_FAILED;
5412 	unsigned int tick;
5413 	u32 tmp;
5414 
5415 	tw32_f(MAC_TX_AUTO_NEG, 0);
5416 
5417 	tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
5418 	tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
5419 	udelay(40);
5420 
5421 	tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
5422 	udelay(40);
5423 
5424 	memset(&aninfo, 0, sizeof(aninfo));
5425 	aninfo.flags |= MR_AN_ENABLE;
5426 	aninfo.state = ANEG_STATE_UNKNOWN;
5427 	aninfo.cur_time = 0;
5428 	tick = 0;
5429 	while (++tick < 195000) {
5430 		status = tg3_fiber_aneg_smachine(tp, &aninfo);
5431 		if (status == ANEG_DONE || status == ANEG_FAILED)
5432 			break;
5433 
5434 		udelay(1);
5435 	}
5436 
5437 	tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
5438 	tw32_f(MAC_MODE, tp->mac_mode);
5439 	udelay(40);
5440 
5441 	*txflags = aninfo.txconfig;
5442 	*rxflags = aninfo.flags;
5443 
5444 	if (status == ANEG_DONE &&
5445 	    (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
5446 			     MR_LP_ADV_FULL_DUPLEX)))
5447 		res = 1;
5448 
5449 	return res;
5450 }
5451 
5452 static void tg3_init_bcm8002(struct tg3 *tp)
5453 {
5454 	u32 mac_status = tr32(MAC_STATUS);
5455 	int i;
5456 
5457 	/* Reset when initting first time or we have a link. */
5458 	if (tg3_flag(tp, INIT_COMPLETE) &&
5459 	    !(mac_status & MAC_STATUS_PCS_SYNCED))
5460 		return;
5461 
5462 	/* Set PLL lock range. */
5463 	tg3_writephy(tp, 0x16, 0x8007);
5464 
5465 	/* SW reset */
5466 	tg3_writephy(tp, MII_BMCR, BMCR_RESET);
5467 
5468 	/* Wait for reset to complete. */
5469 	/* XXX schedule_timeout() ... */
5470 	for (i = 0; i < 500; i++)
5471 		udelay(10);
5472 
5473 	/* Config mode; select PMA/Ch 1 regs. */
5474 	tg3_writephy(tp, 0x10, 0x8411);
5475 
5476 	/* Enable auto-lock and comdet, select txclk for tx. */
5477 	tg3_writephy(tp, 0x11, 0x0a10);
5478 
5479 	tg3_writephy(tp, 0x18, 0x00a0);
5480 	tg3_writephy(tp, 0x16, 0x41ff);
5481 
5482 	/* Assert and deassert POR. */
5483 	tg3_writephy(tp, 0x13, 0x0400);
5484 	udelay(40);
5485 	tg3_writephy(tp, 0x13, 0x0000);
5486 
5487 	tg3_writephy(tp, 0x11, 0x0a50);
5488 	udelay(40);
5489 	tg3_writephy(tp, 0x11, 0x0a10);
5490 
5491 	/* Wait for signal to stabilize */
5492 	/* XXX schedule_timeout() ... */
5493 	for (i = 0; i < 15000; i++)
5494 		udelay(10);
5495 
5496 	/* Deselect the channel register so we can read the PHYID
5497 	 * later.
5498 	 */
5499 	tg3_writephy(tp, 0x10, 0x8011);
5500 }
5501 
5502 static bool tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
5503 {
5504 	u16 flowctrl;
5505 	bool current_link_up;
5506 	u32 sg_dig_ctrl, sg_dig_status;
5507 	u32 serdes_cfg, expected_sg_dig_ctrl;
5508 	int workaround, port_a;
5509 
5510 	serdes_cfg = 0;
5511 	expected_sg_dig_ctrl = 0;
5512 	workaround = 0;
5513 	port_a = 1;
5514 	current_link_up = false;
5515 
5516 	if (tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A0 &&
5517 	    tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A1) {
5518 		workaround = 1;
5519 		if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
5520 			port_a = 0;
5521 
5522 		/* preserve bits 0-11,13,14 for signal pre-emphasis */
5523 		/* preserve bits 20-23 for voltage regulator */
5524 		serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
5525 	}
5526 
5527 	sg_dig_ctrl = tr32(SG_DIG_CTRL);
5528 
5529 	if (tp->link_config.autoneg != AUTONEG_ENABLE) {
5530 		if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
5531 			if (workaround) {
5532 				u32 val = serdes_cfg;
5533 
5534 				if (port_a)
5535 					val |= 0xc010000;
5536 				else
5537 					val |= 0x4010000;
5538 				tw32_f(MAC_SERDES_CFG, val);
5539 			}
5540 
5541 			tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
5542 		}
5543 		if (mac_status & MAC_STATUS_PCS_SYNCED) {
5544 			tg3_setup_flow_control(tp, 0, 0);
5545 			current_link_up = true;
5546 		}
5547 		goto out;
5548 	}
5549 
5550 	/* Want auto-negotiation.  */
5551 	expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
5552 
5553 	flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5554 	if (flowctrl & ADVERTISE_1000XPAUSE)
5555 		expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
5556 	if (flowctrl & ADVERTISE_1000XPSE_ASYM)
5557 		expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
5558 
5559 	if (sg_dig_ctrl != expected_sg_dig_ctrl) {
5560 		if ((tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT) &&
5561 		    tp->serdes_counter &&
5562 		    ((mac_status & (MAC_STATUS_PCS_SYNCED |
5563 				    MAC_STATUS_RCVD_CFG)) ==
5564 		     MAC_STATUS_PCS_SYNCED)) {
5565 			tp->serdes_counter--;
5566 			current_link_up = true;
5567 			goto out;
5568 		}
5569 restart_autoneg:
5570 		if (workaround)
5571 			tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
5572 		tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
5573 		udelay(5);
5574 		tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
5575 
5576 		tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
5577 		tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5578 	} else if (mac_status & (MAC_STATUS_PCS_SYNCED |
5579 				 MAC_STATUS_SIGNAL_DET)) {
5580 		sg_dig_status = tr32(SG_DIG_STATUS);
5581 		mac_status = tr32(MAC_STATUS);
5582 
5583 		if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
5584 		    (mac_status & MAC_STATUS_PCS_SYNCED)) {
5585 			u32 local_adv = 0, remote_adv = 0;
5586 
5587 			if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
5588 				local_adv |= ADVERTISE_1000XPAUSE;
5589 			if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
5590 				local_adv |= ADVERTISE_1000XPSE_ASYM;
5591 
5592 			if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
5593 				remote_adv |= LPA_1000XPAUSE;
5594 			if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
5595 				remote_adv |= LPA_1000XPAUSE_ASYM;
5596 
5597 			tp->link_config.rmt_adv =
5598 					   mii_adv_to_ethtool_adv_x(remote_adv);
5599 
5600 			tg3_setup_flow_control(tp, local_adv, remote_adv);
5601 			current_link_up = true;
5602 			tp->serdes_counter = 0;
5603 			tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5604 		} else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
5605 			if (tp->serdes_counter)
5606 				tp->serdes_counter--;
5607 			else {
5608 				if (workaround) {
5609 					u32 val = serdes_cfg;
5610 
5611 					if (port_a)
5612 						val |= 0xc010000;
5613 					else
5614 						val |= 0x4010000;
5615 
5616 					tw32_f(MAC_SERDES_CFG, val);
5617 				}
5618 
5619 				tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
5620 				udelay(40);
5621 
5622 				/* Link parallel detection - link is up */
5623 				/* only if we have PCS_SYNC and not */
5624 				/* receiving config code words */
5625 				mac_status = tr32(MAC_STATUS);
5626 				if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
5627 				    !(mac_status & MAC_STATUS_RCVD_CFG)) {
5628 					tg3_setup_flow_control(tp, 0, 0);
5629 					current_link_up = true;
5630 					tp->phy_flags |=
5631 						TG3_PHYFLG_PARALLEL_DETECT;
5632 					tp->serdes_counter =
5633 						SERDES_PARALLEL_DET_TIMEOUT;
5634 				} else
5635 					goto restart_autoneg;
5636 			}
5637 		}
5638 	} else {
5639 		tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
5640 		tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5641 	}
5642 
5643 out:
5644 	return current_link_up;
5645 }
5646 
5647 static bool tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
5648 {
5649 	bool current_link_up = false;
5650 
5651 	if (!(mac_status & MAC_STATUS_PCS_SYNCED))
5652 		goto out;
5653 
5654 	if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5655 		u32 txflags, rxflags;
5656 		int i;
5657 
5658 		if (fiber_autoneg(tp, &txflags, &rxflags)) {
5659 			u32 local_adv = 0, remote_adv = 0;
5660 
5661 			if (txflags & ANEG_CFG_PS1)
5662 				local_adv |= ADVERTISE_1000XPAUSE;
5663 			if (txflags & ANEG_CFG_PS2)
5664 				local_adv |= ADVERTISE_1000XPSE_ASYM;
5665 
5666 			if (rxflags & MR_LP_ADV_SYM_PAUSE)
5667 				remote_adv |= LPA_1000XPAUSE;
5668 			if (rxflags & MR_LP_ADV_ASYM_PAUSE)
5669 				remote_adv |= LPA_1000XPAUSE_ASYM;
5670 
5671 			tp->link_config.rmt_adv =
5672 					   mii_adv_to_ethtool_adv_x(remote_adv);
5673 
5674 			tg3_setup_flow_control(tp, local_adv, remote_adv);
5675 
5676 			current_link_up = true;
5677 		}
5678 		for (i = 0; i < 30; i++) {
5679 			udelay(20);
5680 			tw32_f(MAC_STATUS,
5681 			       (MAC_STATUS_SYNC_CHANGED |
5682 				MAC_STATUS_CFG_CHANGED));
5683 			udelay(40);
5684 			if ((tr32(MAC_STATUS) &
5685 			     (MAC_STATUS_SYNC_CHANGED |
5686 			      MAC_STATUS_CFG_CHANGED)) == 0)
5687 				break;
5688 		}
5689 
5690 		mac_status = tr32(MAC_STATUS);
5691 		if (!current_link_up &&
5692 		    (mac_status & MAC_STATUS_PCS_SYNCED) &&
5693 		    !(mac_status & MAC_STATUS_RCVD_CFG))
5694 			current_link_up = true;
5695 	} else {
5696 		tg3_setup_flow_control(tp, 0, 0);
5697 
5698 		/* Forcing 1000FD link up. */
5699 		current_link_up = true;
5700 
5701 		tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
5702 		udelay(40);
5703 
5704 		tw32_f(MAC_MODE, tp->mac_mode);
5705 		udelay(40);
5706 	}
5707 
5708 out:
5709 	return current_link_up;
5710 }
5711 
5712 static int tg3_setup_fiber_phy(struct tg3 *tp, bool force_reset)
5713 {
5714 	u32 orig_pause_cfg;
5715 	u16 orig_active_speed;
5716 	u8 orig_active_duplex;
5717 	u32 mac_status;
5718 	bool current_link_up;
5719 	int i;
5720 
5721 	orig_pause_cfg = tp->link_config.active_flowctrl;
5722 	orig_active_speed = tp->link_config.active_speed;
5723 	orig_active_duplex = tp->link_config.active_duplex;
5724 
5725 	if (!tg3_flag(tp, HW_AUTONEG) &&
5726 	    tp->link_up &&
5727 	    tg3_flag(tp, INIT_COMPLETE)) {
5728 		mac_status = tr32(MAC_STATUS);
5729 		mac_status &= (MAC_STATUS_PCS_SYNCED |
5730 			       MAC_STATUS_SIGNAL_DET |
5731 			       MAC_STATUS_CFG_CHANGED |
5732 			       MAC_STATUS_RCVD_CFG);
5733 		if (mac_status == (MAC_STATUS_PCS_SYNCED |
5734 				   MAC_STATUS_SIGNAL_DET)) {
5735 			tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5736 					    MAC_STATUS_CFG_CHANGED));
5737 			return 0;
5738 		}
5739 	}
5740 
5741 	tw32_f(MAC_TX_AUTO_NEG, 0);
5742 
5743 	tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
5744 	tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
5745 	tw32_f(MAC_MODE, tp->mac_mode);
5746 	udelay(40);
5747 
5748 	if (tp->phy_id == TG3_PHY_ID_BCM8002)
5749 		tg3_init_bcm8002(tp);
5750 
5751 	/* Enable link change event even when serdes polling.  */
5752 	tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5753 	udelay(40);
5754 
5755 	current_link_up = false;
5756 	tp->link_config.rmt_adv = 0;
5757 	mac_status = tr32(MAC_STATUS);
5758 
5759 	if (tg3_flag(tp, HW_AUTONEG))
5760 		current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
5761 	else
5762 		current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
5763 
5764 	tp->napi[0].hw_status->status =
5765 		(SD_STATUS_UPDATED |
5766 		 (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG));
5767 
5768 	for (i = 0; i < 100; i++) {
5769 		tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5770 				    MAC_STATUS_CFG_CHANGED));
5771 		udelay(5);
5772 		if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
5773 					 MAC_STATUS_CFG_CHANGED |
5774 					 MAC_STATUS_LNKSTATE_CHANGED)) == 0)
5775 			break;
5776 	}
5777 
5778 	mac_status = tr32(MAC_STATUS);
5779 	if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
5780 		current_link_up = false;
5781 		if (tp->link_config.autoneg == AUTONEG_ENABLE &&
5782 		    tp->serdes_counter == 0) {
5783 			tw32_f(MAC_MODE, (tp->mac_mode |
5784 					  MAC_MODE_SEND_CONFIGS));
5785 			udelay(1);
5786 			tw32_f(MAC_MODE, tp->mac_mode);
5787 		}
5788 	}
5789 
5790 	if (current_link_up) {
5791 		tp->link_config.active_speed = SPEED_1000;
5792 		tp->link_config.active_duplex = DUPLEX_FULL;
5793 		tw32(MAC_LED_CTRL, (tp->led_ctrl |
5794 				    LED_CTRL_LNKLED_OVERRIDE |
5795 				    LED_CTRL_1000MBPS_ON));
5796 	} else {
5797 		tp->link_config.active_speed = SPEED_UNKNOWN;
5798 		tp->link_config.active_duplex = DUPLEX_UNKNOWN;
5799 		tw32(MAC_LED_CTRL, (tp->led_ctrl |
5800 				    LED_CTRL_LNKLED_OVERRIDE |
5801 				    LED_CTRL_TRAFFIC_OVERRIDE));
5802 	}
5803 
5804 	if (!tg3_test_and_report_link_chg(tp, current_link_up)) {
5805 		u32 now_pause_cfg = tp->link_config.active_flowctrl;
5806 		if (orig_pause_cfg != now_pause_cfg ||
5807 		    orig_active_speed != tp->link_config.active_speed ||
5808 		    orig_active_duplex != tp->link_config.active_duplex)
5809 			tg3_link_report(tp);
5810 	}
5811 
5812 	return 0;
5813 }
5814 
5815 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, bool force_reset)
5816 {
5817 	int err = 0;
5818 	u32 bmsr, bmcr;
5819 	u16 current_speed = SPEED_UNKNOWN;
5820 	u8 current_duplex = DUPLEX_UNKNOWN;
5821 	bool current_link_up = false;
5822 	u32 local_adv, remote_adv, sgsr;
5823 
5824 	if ((tg3_asic_rev(tp) == ASIC_REV_5719 ||
5825 	     tg3_asic_rev(tp) == ASIC_REV_5720) &&
5826 	     !tg3_readphy(tp, SERDES_TG3_1000X_STATUS, &sgsr) &&
5827 	     (sgsr & SERDES_TG3_SGMII_MODE)) {
5828 
5829 		if (force_reset)
5830 			tg3_phy_reset(tp);
5831 
5832 		tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
5833 
5834 		if (!(sgsr & SERDES_TG3_LINK_UP)) {
5835 			tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5836 		} else {
5837 			current_link_up = true;
5838 			if (sgsr & SERDES_TG3_SPEED_1000) {
5839 				current_speed = SPEED_1000;
5840 				tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5841 			} else if (sgsr & SERDES_TG3_SPEED_100) {
5842 				current_speed = SPEED_100;
5843 				tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
5844 			} else {
5845 				current_speed = SPEED_10;
5846 				tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
5847 			}
5848 
5849 			if (sgsr & SERDES_TG3_FULL_DUPLEX)
5850 				current_duplex = DUPLEX_FULL;
5851 			else
5852 				current_duplex = DUPLEX_HALF;
5853 		}
5854 
5855 		tw32_f(MAC_MODE, tp->mac_mode);
5856 		udelay(40);
5857 
5858 		tg3_clear_mac_status(tp);
5859 
5860 		goto fiber_setup_done;
5861 	}
5862 
5863 	tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5864 	tw32_f(MAC_MODE, tp->mac_mode);
5865 	udelay(40);
5866 
5867 	tg3_clear_mac_status(tp);
5868 
5869 	if (force_reset)
5870 		tg3_phy_reset(tp);
5871 
5872 	tp->link_config.rmt_adv = 0;
5873 
5874 	err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5875 	err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5876 	if (tg3_asic_rev(tp) == ASIC_REV_5714) {
5877 		if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5878 			bmsr |= BMSR_LSTATUS;
5879 		else
5880 			bmsr &= ~BMSR_LSTATUS;
5881 	}
5882 
5883 	err |= tg3_readphy(tp, MII_BMCR, &bmcr);
5884 
5885 	if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
5886 	    (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
5887 		/* do nothing, just check for link up at the end */
5888 	} else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5889 		u32 adv, newadv;
5890 
5891 		err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5892 		newadv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
5893 				 ADVERTISE_1000XPAUSE |
5894 				 ADVERTISE_1000XPSE_ASYM |
5895 				 ADVERTISE_SLCT);
5896 
5897 		newadv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5898 		newadv |= ethtool_adv_to_mii_adv_x(tp->link_config.advertising);
5899 
5900 		if ((newadv != adv) || !(bmcr & BMCR_ANENABLE)) {
5901 			tg3_writephy(tp, MII_ADVERTISE, newadv);
5902 			bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
5903 			tg3_writephy(tp, MII_BMCR, bmcr);
5904 
5905 			tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5906 			tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
5907 			tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5908 
5909 			return err;
5910 		}
5911 	} else {
5912 		u32 new_bmcr;
5913 
5914 		bmcr &= ~BMCR_SPEED1000;
5915 		new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
5916 
5917 		if (tp->link_config.duplex == DUPLEX_FULL)
5918 			new_bmcr |= BMCR_FULLDPLX;
5919 
5920 		if (new_bmcr != bmcr) {
5921 			/* BMCR_SPEED1000 is a reserved bit that needs
5922 			 * to be set on write.
5923 			 */
5924 			new_bmcr |= BMCR_SPEED1000;
5925 
5926 			/* Force a linkdown */
5927 			if (tp->link_up) {
5928 				u32 adv;
5929 
5930 				err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5931 				adv &= ~(ADVERTISE_1000XFULL |
5932 					 ADVERTISE_1000XHALF |
5933 					 ADVERTISE_SLCT);
5934 				tg3_writephy(tp, MII_ADVERTISE, adv);
5935 				tg3_writephy(tp, MII_BMCR, bmcr |
5936 							   BMCR_ANRESTART |
5937 							   BMCR_ANENABLE);
5938 				udelay(10);
5939 				tg3_carrier_off(tp);
5940 			}
5941 			tg3_writephy(tp, MII_BMCR, new_bmcr);
5942 			bmcr = new_bmcr;
5943 			err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5944 			err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5945 			if (tg3_asic_rev(tp) == ASIC_REV_5714) {
5946 				if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5947 					bmsr |= BMSR_LSTATUS;
5948 				else
5949 					bmsr &= ~BMSR_LSTATUS;
5950 			}
5951 			tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5952 		}
5953 	}
5954 
5955 	if (bmsr & BMSR_LSTATUS) {
5956 		current_speed = SPEED_1000;
5957 		current_link_up = true;
5958 		if (bmcr & BMCR_FULLDPLX)
5959 			current_duplex = DUPLEX_FULL;
5960 		else
5961 			current_duplex = DUPLEX_HALF;
5962 
5963 		local_adv = 0;
5964 		remote_adv = 0;
5965 
5966 		if (bmcr & BMCR_ANENABLE) {
5967 			u32 common;
5968 
5969 			err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
5970 			err |= tg3_readphy(tp, MII_LPA, &remote_adv);
5971 			common = local_adv & remote_adv;
5972 			if (common & (ADVERTISE_1000XHALF |
5973 				      ADVERTISE_1000XFULL)) {
5974 				if (common & ADVERTISE_1000XFULL)
5975 					current_duplex = DUPLEX_FULL;
5976 				else
5977 					current_duplex = DUPLEX_HALF;
5978 
5979 				tp->link_config.rmt_adv =
5980 					   mii_adv_to_ethtool_adv_x(remote_adv);
5981 			} else if (!tg3_flag(tp, 5780_CLASS)) {
5982 				/* Link is up via parallel detect */
5983 			} else {
5984 				current_link_up = false;
5985 			}
5986 		}
5987 	}
5988 
5989 fiber_setup_done:
5990 	if (current_link_up && current_duplex == DUPLEX_FULL)
5991 		tg3_setup_flow_control(tp, local_adv, remote_adv);
5992 
5993 	tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
5994 	if (tp->link_config.active_duplex == DUPLEX_HALF)
5995 		tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
5996 
5997 	tw32_f(MAC_MODE, tp->mac_mode);
5998 	udelay(40);
5999 
6000 	tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
6001 
6002 	tp->link_config.active_speed = current_speed;
6003 	tp->link_config.active_duplex = current_duplex;
6004 
6005 	tg3_test_and_report_link_chg(tp, current_link_up);
6006 	return err;
6007 }
6008 
6009 static void tg3_serdes_parallel_detect(struct tg3 *tp)
6010 {
6011 	if (tp->serdes_counter) {
6012 		/* Give autoneg time to complete. */
6013 		tp->serdes_counter--;
6014 		return;
6015 	}
6016 
6017 	if (!tp->link_up &&
6018 	    (tp->link_config.autoneg == AUTONEG_ENABLE)) {
6019 		u32 bmcr;
6020 
6021 		tg3_readphy(tp, MII_BMCR, &bmcr);
6022 		if (bmcr & BMCR_ANENABLE) {
6023 			u32 phy1, phy2;
6024 
6025 			/* Select shadow register 0x1f */
6026 			tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x7c00);
6027 			tg3_readphy(tp, MII_TG3_MISC_SHDW, &phy1);
6028 
6029 			/* Select expansion interrupt status register */
6030 			tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
6031 					 MII_TG3_DSP_EXP1_INT_STAT);
6032 			tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
6033 			tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
6034 
6035 			if ((phy1 & 0x10) && !(phy2 & 0x20)) {
6036 				/* We have signal detect and not receiving
6037 				 * config code words, link is up by parallel
6038 				 * detection.
6039 				 */
6040 
6041 				bmcr &= ~BMCR_ANENABLE;
6042 				bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
6043 				tg3_writephy(tp, MII_BMCR, bmcr);
6044 				tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT;
6045 			}
6046 		}
6047 	} else if (tp->link_up &&
6048 		   (tp->link_config.autoneg == AUTONEG_ENABLE) &&
6049 		   (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
6050 		u32 phy2;
6051 
6052 		/* Select expansion interrupt status register */
6053 		tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
6054 				 MII_TG3_DSP_EXP1_INT_STAT);
6055 		tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
6056 		if (phy2 & 0x20) {
6057 			u32 bmcr;
6058 
6059 			/* Config code words received, turn on autoneg. */
6060 			tg3_readphy(tp, MII_BMCR, &bmcr);
6061 			tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
6062 
6063 			tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
6064 
6065 		}
6066 	}
6067 }
6068 
6069 static int tg3_setup_phy(struct tg3 *tp, bool force_reset)
6070 {
6071 	u32 val;
6072 	int err;
6073 
6074 	if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
6075 		err = tg3_setup_fiber_phy(tp, force_reset);
6076 	else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
6077 		err = tg3_setup_fiber_mii_phy(tp, force_reset);
6078 	else
6079 		err = tg3_setup_copper_phy(tp, force_reset);
6080 
6081 	if (tg3_chip_rev(tp) == CHIPREV_5784_AX) {
6082 		u32 scale;
6083 
6084 		val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
6085 		if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
6086 			scale = 65;
6087 		else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
6088 			scale = 6;
6089 		else
6090 			scale = 12;
6091 
6092 		val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
6093 		val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
6094 		tw32(GRC_MISC_CFG, val);
6095 	}
6096 
6097 	val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
6098 	      (6 << TX_LENGTHS_IPG_SHIFT);
6099 	if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
6100 	    tg3_asic_rev(tp) == ASIC_REV_5762)
6101 		val |= tr32(MAC_TX_LENGTHS) &
6102 		       (TX_LENGTHS_JMB_FRM_LEN_MSK |
6103 			TX_LENGTHS_CNT_DWN_VAL_MSK);
6104 
6105 	if (tp->link_config.active_speed == SPEED_1000 &&
6106 	    tp->link_config.active_duplex == DUPLEX_HALF)
6107 		tw32(MAC_TX_LENGTHS, val |
6108 		     (0xff << TX_LENGTHS_SLOT_TIME_SHIFT));
6109 	else
6110 		tw32(MAC_TX_LENGTHS, val |
6111 		     (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
6112 
6113 	if (!tg3_flag(tp, 5705_PLUS)) {
6114 		if (tp->link_up) {
6115 			tw32(HOSTCC_STAT_COAL_TICKS,
6116 			     tp->coal.stats_block_coalesce_usecs);
6117 		} else {
6118 			tw32(HOSTCC_STAT_COAL_TICKS, 0);
6119 		}
6120 	}
6121 
6122 	if (tg3_flag(tp, ASPM_WORKAROUND)) {
6123 		val = tr32(PCIE_PWR_MGMT_THRESH);
6124 		if (!tp->link_up)
6125 			val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
6126 			      tp->pwrmgmt_thresh;
6127 		else
6128 			val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
6129 		tw32(PCIE_PWR_MGMT_THRESH, val);
6130 	}
6131 
6132 	return err;
6133 }
6134 
6135 /* tp->lock must be held */
6136 static u64 tg3_refclk_read(struct tg3 *tp)
6137 {
6138 	u64 stamp = tr32(TG3_EAV_REF_CLCK_LSB);
6139 	return stamp | (u64)tr32(TG3_EAV_REF_CLCK_MSB) << 32;
6140 }
6141 
6142 /* tp->lock must be held */
6143 static void tg3_refclk_write(struct tg3 *tp, u64 newval)
6144 {
6145 	u32 clock_ctl = tr32(TG3_EAV_REF_CLCK_CTL);
6146 
6147 	tw32(TG3_EAV_REF_CLCK_CTL, clock_ctl | TG3_EAV_REF_CLCK_CTL_STOP);
6148 	tw32(TG3_EAV_REF_CLCK_LSB, newval & 0xffffffff);
6149 	tw32(TG3_EAV_REF_CLCK_MSB, newval >> 32);
6150 	tw32_f(TG3_EAV_REF_CLCK_CTL, clock_ctl | TG3_EAV_REF_CLCK_CTL_RESUME);
6151 }
6152 
6153 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync);
6154 static inline void tg3_full_unlock(struct tg3 *tp);
6155 static int tg3_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info)
6156 {
6157 	struct tg3 *tp = netdev_priv(dev);
6158 
6159 	info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
6160 				SOF_TIMESTAMPING_RX_SOFTWARE |
6161 				SOF_TIMESTAMPING_SOFTWARE;
6162 
6163 	if (tg3_flag(tp, PTP_CAPABLE)) {
6164 		info->so_timestamping |= SOF_TIMESTAMPING_TX_HARDWARE |
6165 					SOF_TIMESTAMPING_RX_HARDWARE |
6166 					SOF_TIMESTAMPING_RAW_HARDWARE;
6167 	}
6168 
6169 	if (tp->ptp_clock)
6170 		info->phc_index = ptp_clock_index(tp->ptp_clock);
6171 	else
6172 		info->phc_index = -1;
6173 
6174 	info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
6175 
6176 	info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
6177 			   (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
6178 			   (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
6179 			   (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT);
6180 	return 0;
6181 }
6182 
6183 static int tg3_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
6184 {
6185 	struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6186 	bool neg_adj = false;
6187 	u32 correction = 0;
6188 
6189 	if (ppb < 0) {
6190 		neg_adj = true;
6191 		ppb = -ppb;
6192 	}
6193 
6194 	/* Frequency adjustment is performed using hardware with a 24 bit
6195 	 * accumulator and a programmable correction value. On each clk, the
6196 	 * correction value gets added to the accumulator and when it
6197 	 * overflows, the time counter is incremented/decremented.
6198 	 *
6199 	 * So conversion from ppb to correction value is
6200 	 *		ppb * (1 << 24) / 1000000000
6201 	 */
6202 	correction = div_u64((u64)ppb * (1 << 24), 1000000000ULL) &
6203 		     TG3_EAV_REF_CLK_CORRECT_MASK;
6204 
6205 	tg3_full_lock(tp, 0);
6206 
6207 	if (correction)
6208 		tw32(TG3_EAV_REF_CLK_CORRECT_CTL,
6209 		     TG3_EAV_REF_CLK_CORRECT_EN |
6210 		     (neg_adj ? TG3_EAV_REF_CLK_CORRECT_NEG : 0) | correction);
6211 	else
6212 		tw32(TG3_EAV_REF_CLK_CORRECT_CTL, 0);
6213 
6214 	tg3_full_unlock(tp);
6215 
6216 	return 0;
6217 }
6218 
6219 static int tg3_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
6220 {
6221 	struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6222 
6223 	tg3_full_lock(tp, 0);
6224 	tp->ptp_adjust += delta;
6225 	tg3_full_unlock(tp);
6226 
6227 	return 0;
6228 }
6229 
6230 static int tg3_ptp_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts)
6231 {
6232 	u64 ns;
6233 	struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6234 
6235 	tg3_full_lock(tp, 0);
6236 	ns = tg3_refclk_read(tp);
6237 	ns += tp->ptp_adjust;
6238 	tg3_full_unlock(tp);
6239 
6240 	*ts = ns_to_timespec64(ns);
6241 
6242 	return 0;
6243 }
6244 
6245 static int tg3_ptp_settime(struct ptp_clock_info *ptp,
6246 			   const struct timespec64 *ts)
6247 {
6248 	u64 ns;
6249 	struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6250 
6251 	ns = timespec64_to_ns(ts);
6252 
6253 	tg3_full_lock(tp, 0);
6254 	tg3_refclk_write(tp, ns);
6255 	tp->ptp_adjust = 0;
6256 	tg3_full_unlock(tp);
6257 
6258 	return 0;
6259 }
6260 
6261 static int tg3_ptp_enable(struct ptp_clock_info *ptp,
6262 			  struct ptp_clock_request *rq, int on)
6263 {
6264 	struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6265 	u32 clock_ctl;
6266 	int rval = 0;
6267 
6268 	switch (rq->type) {
6269 	case PTP_CLK_REQ_PEROUT:
6270 		if (rq->perout.index != 0)
6271 			return -EINVAL;
6272 
6273 		tg3_full_lock(tp, 0);
6274 		clock_ctl = tr32(TG3_EAV_REF_CLCK_CTL);
6275 		clock_ctl &= ~TG3_EAV_CTL_TSYNC_GPIO_MASK;
6276 
6277 		if (on) {
6278 			u64 nsec;
6279 
6280 			nsec = rq->perout.start.sec * 1000000000ULL +
6281 			       rq->perout.start.nsec;
6282 
6283 			if (rq->perout.period.sec || rq->perout.period.nsec) {
6284 				netdev_warn(tp->dev,
6285 					    "Device supports only a one-shot timesync output, period must be 0\n");
6286 				rval = -EINVAL;
6287 				goto err_out;
6288 			}
6289 
6290 			if (nsec & (1ULL << 63)) {
6291 				netdev_warn(tp->dev,
6292 					    "Start value (nsec) is over limit. Maximum size of start is only 63 bits\n");
6293 				rval = -EINVAL;
6294 				goto err_out;
6295 			}
6296 
6297 			tw32(TG3_EAV_WATCHDOG0_LSB, (nsec & 0xffffffff));
6298 			tw32(TG3_EAV_WATCHDOG0_MSB,
6299 			     TG3_EAV_WATCHDOG0_EN |
6300 			     ((nsec >> 32) & TG3_EAV_WATCHDOG_MSB_MASK));
6301 
6302 			tw32(TG3_EAV_REF_CLCK_CTL,
6303 			     clock_ctl | TG3_EAV_CTL_TSYNC_WDOG0);
6304 		} else {
6305 			tw32(TG3_EAV_WATCHDOG0_MSB, 0);
6306 			tw32(TG3_EAV_REF_CLCK_CTL, clock_ctl);
6307 		}
6308 
6309 err_out:
6310 		tg3_full_unlock(tp);
6311 		return rval;
6312 
6313 	default:
6314 		break;
6315 	}
6316 
6317 	return -EOPNOTSUPP;
6318 }
6319 
6320 static const struct ptp_clock_info tg3_ptp_caps = {
6321 	.owner		= THIS_MODULE,
6322 	.name		= "tg3 clock",
6323 	.max_adj	= 250000000,
6324 	.n_alarm	= 0,
6325 	.n_ext_ts	= 0,
6326 	.n_per_out	= 1,
6327 	.n_pins		= 0,
6328 	.pps		= 0,
6329 	.adjfreq	= tg3_ptp_adjfreq,
6330 	.adjtime	= tg3_ptp_adjtime,
6331 	.gettime64	= tg3_ptp_gettime,
6332 	.settime64	= tg3_ptp_settime,
6333 	.enable		= tg3_ptp_enable,
6334 };
6335 
6336 static void tg3_hwclock_to_timestamp(struct tg3 *tp, u64 hwclock,
6337 				     struct skb_shared_hwtstamps *timestamp)
6338 {
6339 	memset(timestamp, 0, sizeof(struct skb_shared_hwtstamps));
6340 	timestamp->hwtstamp  = ns_to_ktime((hwclock & TG3_TSTAMP_MASK) +
6341 					   tp->ptp_adjust);
6342 }
6343 
6344 /* tp->lock must be held */
6345 static void tg3_ptp_init(struct tg3 *tp)
6346 {
6347 	if (!tg3_flag(tp, PTP_CAPABLE))
6348 		return;
6349 
6350 	/* Initialize the hardware clock to the system time. */
6351 	tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()));
6352 	tp->ptp_adjust = 0;
6353 	tp->ptp_info = tg3_ptp_caps;
6354 }
6355 
6356 /* tp->lock must be held */
6357 static void tg3_ptp_resume(struct tg3 *tp)
6358 {
6359 	if (!tg3_flag(tp, PTP_CAPABLE))
6360 		return;
6361 
6362 	tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()) + tp->ptp_adjust);
6363 	tp->ptp_adjust = 0;
6364 }
6365 
6366 static void tg3_ptp_fini(struct tg3 *tp)
6367 {
6368 	if (!tg3_flag(tp, PTP_CAPABLE) || !tp->ptp_clock)
6369 		return;
6370 
6371 	ptp_clock_unregister(tp->ptp_clock);
6372 	tp->ptp_clock = NULL;
6373 	tp->ptp_adjust = 0;
6374 }
6375 
6376 static inline int tg3_irq_sync(struct tg3 *tp)
6377 {
6378 	return tp->irq_sync;
6379 }
6380 
6381 static inline void tg3_rd32_loop(struct tg3 *tp, u32 *dst, u32 off, u32 len)
6382 {
6383 	int i;
6384 
6385 	dst = (u32 *)((u8 *)dst + off);
6386 	for (i = 0; i < len; i += sizeof(u32))
6387 		*dst++ = tr32(off + i);
6388 }
6389 
6390 static void tg3_dump_legacy_regs(struct tg3 *tp, u32 *regs)
6391 {
6392 	tg3_rd32_loop(tp, regs, TG3PCI_VENDOR, 0xb0);
6393 	tg3_rd32_loop(tp, regs, MAILBOX_INTERRUPT_0, 0x200);
6394 	tg3_rd32_loop(tp, regs, MAC_MODE, 0x4f0);
6395 	tg3_rd32_loop(tp, regs, SNDDATAI_MODE, 0xe0);
6396 	tg3_rd32_loop(tp, regs, SNDDATAC_MODE, 0x04);
6397 	tg3_rd32_loop(tp, regs, SNDBDS_MODE, 0x80);
6398 	tg3_rd32_loop(tp, regs, SNDBDI_MODE, 0x48);
6399 	tg3_rd32_loop(tp, regs, SNDBDC_MODE, 0x04);
6400 	tg3_rd32_loop(tp, regs, RCVLPC_MODE, 0x20);
6401 	tg3_rd32_loop(tp, regs, RCVLPC_SELLST_BASE, 0x15c);
6402 	tg3_rd32_loop(tp, regs, RCVDBDI_MODE, 0x0c);
6403 	tg3_rd32_loop(tp, regs, RCVDBDI_JUMBO_BD, 0x3c);
6404 	tg3_rd32_loop(tp, regs, RCVDBDI_BD_PROD_IDX_0, 0x44);
6405 	tg3_rd32_loop(tp, regs, RCVDCC_MODE, 0x04);
6406 	tg3_rd32_loop(tp, regs, RCVBDI_MODE, 0x20);
6407 	tg3_rd32_loop(tp, regs, RCVCC_MODE, 0x14);
6408 	tg3_rd32_loop(tp, regs, RCVLSC_MODE, 0x08);
6409 	tg3_rd32_loop(tp, regs, MBFREE_MODE, 0x08);
6410 	tg3_rd32_loop(tp, regs, HOSTCC_MODE, 0x100);
6411 
6412 	if (tg3_flag(tp, SUPPORT_MSIX))
6413 		tg3_rd32_loop(tp, regs, HOSTCC_RXCOL_TICKS_VEC1, 0x180);
6414 
6415 	tg3_rd32_loop(tp, regs, MEMARB_MODE, 0x10);
6416 	tg3_rd32_loop(tp, regs, BUFMGR_MODE, 0x58);
6417 	tg3_rd32_loop(tp, regs, RDMAC_MODE, 0x08);
6418 	tg3_rd32_loop(tp, regs, WDMAC_MODE, 0x08);
6419 	tg3_rd32_loop(tp, regs, RX_CPU_MODE, 0x04);
6420 	tg3_rd32_loop(tp, regs, RX_CPU_STATE, 0x04);
6421 	tg3_rd32_loop(tp, regs, RX_CPU_PGMCTR, 0x04);
6422 	tg3_rd32_loop(tp, regs, RX_CPU_HWBKPT, 0x04);
6423 
6424 	if (!tg3_flag(tp, 5705_PLUS)) {
6425 		tg3_rd32_loop(tp, regs, TX_CPU_MODE, 0x04);
6426 		tg3_rd32_loop(tp, regs, TX_CPU_STATE, 0x04);
6427 		tg3_rd32_loop(tp, regs, TX_CPU_PGMCTR, 0x04);
6428 	}
6429 
6430 	tg3_rd32_loop(tp, regs, GRCMBOX_INTERRUPT_0, 0x110);
6431 	tg3_rd32_loop(tp, regs, FTQ_RESET, 0x120);
6432 	tg3_rd32_loop(tp, regs, MSGINT_MODE, 0x0c);
6433 	tg3_rd32_loop(tp, regs, DMAC_MODE, 0x04);
6434 	tg3_rd32_loop(tp, regs, GRC_MODE, 0x4c);
6435 
6436 	if (tg3_flag(tp, NVRAM))
6437 		tg3_rd32_loop(tp, regs, NVRAM_CMD, 0x24);
6438 }
6439 
6440 static void tg3_dump_state(struct tg3 *tp)
6441 {
6442 	int i;
6443 	u32 *regs;
6444 
6445 	regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC);
6446 	if (!regs)
6447 		return;
6448 
6449 	if (tg3_flag(tp, PCI_EXPRESS)) {
6450 		/* Read up to but not including private PCI registers */
6451 		for (i = 0; i < TG3_PCIE_TLDLPL_PORT; i += sizeof(u32))
6452 			regs[i / sizeof(u32)] = tr32(i);
6453 	} else
6454 		tg3_dump_legacy_regs(tp, regs);
6455 
6456 	for (i = 0; i < TG3_REG_BLK_SIZE / sizeof(u32); i += 4) {
6457 		if (!regs[i + 0] && !regs[i + 1] &&
6458 		    !regs[i + 2] && !regs[i + 3])
6459 			continue;
6460 
6461 		netdev_err(tp->dev, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
6462 			   i * 4,
6463 			   regs[i + 0], regs[i + 1], regs[i + 2], regs[i + 3]);
6464 	}
6465 
6466 	kfree(regs);
6467 
6468 	for (i = 0; i < tp->irq_cnt; i++) {
6469 		struct tg3_napi *tnapi = &tp->napi[i];
6470 
6471 		/* SW status block */
6472 		netdev_err(tp->dev,
6473 			 "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
6474 			   i,
6475 			   tnapi->hw_status->status,
6476 			   tnapi->hw_status->status_tag,
6477 			   tnapi->hw_status->rx_jumbo_consumer,
6478 			   tnapi->hw_status->rx_consumer,
6479 			   tnapi->hw_status->rx_mini_consumer,
6480 			   tnapi->hw_status->idx[0].rx_producer,
6481 			   tnapi->hw_status->idx[0].tx_consumer);
6482 
6483 		netdev_err(tp->dev,
6484 		"%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n",
6485 			   i,
6486 			   tnapi->last_tag, tnapi->last_irq_tag,
6487 			   tnapi->tx_prod, tnapi->tx_cons, tnapi->tx_pending,
6488 			   tnapi->rx_rcb_ptr,
6489 			   tnapi->prodring.rx_std_prod_idx,
6490 			   tnapi->prodring.rx_std_cons_idx,
6491 			   tnapi->prodring.rx_jmb_prod_idx,
6492 			   tnapi->prodring.rx_jmb_cons_idx);
6493 	}
6494 }
6495 
6496 /* This is called whenever we suspect that the system chipset is re-
6497  * ordering the sequence of MMIO to the tx send mailbox. The symptom
6498  * is bogus tx completions. We try to recover by setting the
6499  * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
6500  * in the workqueue.
6501  */
6502 static void tg3_tx_recover(struct tg3 *tp)
6503 {
6504 	BUG_ON(tg3_flag(tp, MBOX_WRITE_REORDER) ||
6505 	       tp->write32_tx_mbox == tg3_write_indirect_mbox);
6506 
6507 	netdev_warn(tp->dev,
6508 		    "The system may be re-ordering memory-mapped I/O "
6509 		    "cycles to the network device, attempting to recover. "
6510 		    "Please report the problem to the driver maintainer "
6511 		    "and include system chipset information.\n");
6512 
6513 	tg3_flag_set(tp, TX_RECOVERY_PENDING);
6514 }
6515 
6516 static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
6517 {
6518 	/* Tell compiler to fetch tx indices from memory. */
6519 	barrier();
6520 	return tnapi->tx_pending -
6521 	       ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1));
6522 }
6523 
6524 /* Tigon3 never reports partial packet sends.  So we do not
6525  * need special logic to handle SKBs that have not had all
6526  * of their frags sent yet, like SunGEM does.
6527  */
6528 static void tg3_tx(struct tg3_napi *tnapi)
6529 {
6530 	struct tg3 *tp = tnapi->tp;
6531 	u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer;
6532 	u32 sw_idx = tnapi->tx_cons;
6533 	struct netdev_queue *txq;
6534 	int index = tnapi - tp->napi;
6535 	unsigned int pkts_compl = 0, bytes_compl = 0;
6536 
6537 	if (tg3_flag(tp, ENABLE_TSS))
6538 		index--;
6539 
6540 	txq = netdev_get_tx_queue(tp->dev, index);
6541 
6542 	while (sw_idx != hw_idx) {
6543 		struct tg3_tx_ring_info *ri = &tnapi->tx_buffers[sw_idx];
6544 		struct sk_buff *skb = ri->skb;
6545 		int i, tx_bug = 0;
6546 
6547 		if (unlikely(skb == NULL)) {
6548 			tg3_tx_recover(tp);
6549 			return;
6550 		}
6551 
6552 		if (tnapi->tx_ring[sw_idx].len_flags & TXD_FLAG_HWTSTAMP) {
6553 			struct skb_shared_hwtstamps timestamp;
6554 			u64 hwclock = tr32(TG3_TX_TSTAMP_LSB);
6555 			hwclock |= (u64)tr32(TG3_TX_TSTAMP_MSB) << 32;
6556 
6557 			tg3_hwclock_to_timestamp(tp, hwclock, &timestamp);
6558 
6559 			skb_tstamp_tx(skb, &timestamp);
6560 		}
6561 
6562 		pci_unmap_single(tp->pdev,
6563 				 dma_unmap_addr(ri, mapping),
6564 				 skb_headlen(skb),
6565 				 PCI_DMA_TODEVICE);
6566 
6567 		ri->skb = NULL;
6568 
6569 		while (ri->fragmented) {
6570 			ri->fragmented = false;
6571 			sw_idx = NEXT_TX(sw_idx);
6572 			ri = &tnapi->tx_buffers[sw_idx];
6573 		}
6574 
6575 		sw_idx = NEXT_TX(sw_idx);
6576 
6577 		for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
6578 			ri = &tnapi->tx_buffers[sw_idx];
6579 			if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
6580 				tx_bug = 1;
6581 
6582 			pci_unmap_page(tp->pdev,
6583 				       dma_unmap_addr(ri, mapping),
6584 				       skb_frag_size(&skb_shinfo(skb)->frags[i]),
6585 				       PCI_DMA_TODEVICE);
6586 
6587 			while (ri->fragmented) {
6588 				ri->fragmented = false;
6589 				sw_idx = NEXT_TX(sw_idx);
6590 				ri = &tnapi->tx_buffers[sw_idx];
6591 			}
6592 
6593 			sw_idx = NEXT_TX(sw_idx);
6594 		}
6595 
6596 		pkts_compl++;
6597 		bytes_compl += skb->len;
6598 
6599 		dev_consume_skb_any(skb);
6600 
6601 		if (unlikely(tx_bug)) {
6602 			tg3_tx_recover(tp);
6603 			return;
6604 		}
6605 	}
6606 
6607 	netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
6608 
6609 	tnapi->tx_cons = sw_idx;
6610 
6611 	/* Need to make the tx_cons update visible to tg3_start_xmit()
6612 	 * before checking for netif_queue_stopped().  Without the
6613 	 * memory barrier, there is a small possibility that tg3_start_xmit()
6614 	 * will miss it and cause the queue to be stopped forever.
6615 	 */
6616 	smp_mb();
6617 
6618 	if (unlikely(netif_tx_queue_stopped(txq) &&
6619 		     (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) {
6620 		__netif_tx_lock(txq, smp_processor_id());
6621 		if (netif_tx_queue_stopped(txq) &&
6622 		    (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))
6623 			netif_tx_wake_queue(txq);
6624 		__netif_tx_unlock(txq);
6625 	}
6626 }
6627 
6628 static void tg3_frag_free(bool is_frag, void *data)
6629 {
6630 	if (is_frag)
6631 		skb_free_frag(data);
6632 	else
6633 		kfree(data);
6634 }
6635 
6636 static void tg3_rx_data_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
6637 {
6638 	unsigned int skb_size = SKB_DATA_ALIGN(map_sz + TG3_RX_OFFSET(tp)) +
6639 		   SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
6640 
6641 	if (!ri->data)
6642 		return;
6643 
6644 	pci_unmap_single(tp->pdev, dma_unmap_addr(ri, mapping),
6645 			 map_sz, PCI_DMA_FROMDEVICE);
6646 	tg3_frag_free(skb_size <= PAGE_SIZE, ri->data);
6647 	ri->data = NULL;
6648 }
6649 
6650 
6651 /* Returns size of skb allocated or < 0 on error.
6652  *
6653  * We only need to fill in the address because the other members
6654  * of the RX descriptor are invariant, see tg3_init_rings.
6655  *
6656  * Note the purposeful assymetry of cpu vs. chip accesses.  For
6657  * posting buffers we only dirty the first cache line of the RX
6658  * descriptor (containing the address).  Whereas for the RX status
6659  * buffers the cpu only reads the last cacheline of the RX descriptor
6660  * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
6661  */
6662 static int tg3_alloc_rx_data(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
6663 			     u32 opaque_key, u32 dest_idx_unmasked,
6664 			     unsigned int *frag_size)
6665 {
6666 	struct tg3_rx_buffer_desc *desc;
6667 	struct ring_info *map;
6668 	u8 *data;
6669 	dma_addr_t mapping;
6670 	int skb_size, data_size, dest_idx;
6671 
6672 	switch (opaque_key) {
6673 	case RXD_OPAQUE_RING_STD:
6674 		dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
6675 		desc = &tpr->rx_std[dest_idx];
6676 		map = &tpr->rx_std_buffers[dest_idx];
6677 		data_size = tp->rx_pkt_map_sz;
6678 		break;
6679 
6680 	case RXD_OPAQUE_RING_JUMBO:
6681 		dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
6682 		desc = &tpr->rx_jmb[dest_idx].std;
6683 		map = &tpr->rx_jmb_buffers[dest_idx];
6684 		data_size = TG3_RX_JMB_MAP_SZ;
6685 		break;
6686 
6687 	default:
6688 		return -EINVAL;
6689 	}
6690 
6691 	/* Do not overwrite any of the map or rp information
6692 	 * until we are sure we can commit to a new buffer.
6693 	 *
6694 	 * Callers depend upon this behavior and assume that
6695 	 * we leave everything unchanged if we fail.
6696 	 */
6697 	skb_size = SKB_DATA_ALIGN(data_size + TG3_RX_OFFSET(tp)) +
6698 		   SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
6699 	if (skb_size <= PAGE_SIZE) {
6700 		data = netdev_alloc_frag(skb_size);
6701 		*frag_size = skb_size;
6702 	} else {
6703 		data = kmalloc(skb_size, GFP_ATOMIC);
6704 		*frag_size = 0;
6705 	}
6706 	if (!data)
6707 		return -ENOMEM;
6708 
6709 	mapping = pci_map_single(tp->pdev,
6710 				 data + TG3_RX_OFFSET(tp),
6711 				 data_size,
6712 				 PCI_DMA_FROMDEVICE);
6713 	if (unlikely(pci_dma_mapping_error(tp->pdev, mapping))) {
6714 		tg3_frag_free(skb_size <= PAGE_SIZE, data);
6715 		return -EIO;
6716 	}
6717 
6718 	map->data = data;
6719 	dma_unmap_addr_set(map, mapping, mapping);
6720 
6721 	desc->addr_hi = ((u64)mapping >> 32);
6722 	desc->addr_lo = ((u64)mapping & 0xffffffff);
6723 
6724 	return data_size;
6725 }
6726 
6727 /* We only need to move over in the address because the other
6728  * members of the RX descriptor are invariant.  See notes above
6729  * tg3_alloc_rx_data for full details.
6730  */
6731 static void tg3_recycle_rx(struct tg3_napi *tnapi,
6732 			   struct tg3_rx_prodring_set *dpr,
6733 			   u32 opaque_key, int src_idx,
6734 			   u32 dest_idx_unmasked)
6735 {
6736 	struct tg3 *tp = tnapi->tp;
6737 	struct tg3_rx_buffer_desc *src_desc, *dest_desc;
6738 	struct ring_info *src_map, *dest_map;
6739 	struct tg3_rx_prodring_set *spr = &tp->napi[0].prodring;
6740 	int dest_idx;
6741 
6742 	switch (opaque_key) {
6743 	case RXD_OPAQUE_RING_STD:
6744 		dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
6745 		dest_desc = &dpr->rx_std[dest_idx];
6746 		dest_map = &dpr->rx_std_buffers[dest_idx];
6747 		src_desc = &spr->rx_std[src_idx];
6748 		src_map = &spr->rx_std_buffers[src_idx];
6749 		break;
6750 
6751 	case RXD_OPAQUE_RING_JUMBO:
6752 		dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
6753 		dest_desc = &dpr->rx_jmb[dest_idx].std;
6754 		dest_map = &dpr->rx_jmb_buffers[dest_idx];
6755 		src_desc = &spr->rx_jmb[src_idx].std;
6756 		src_map = &spr->rx_jmb_buffers[src_idx];
6757 		break;
6758 
6759 	default:
6760 		return;
6761 	}
6762 
6763 	dest_map->data = src_map->data;
6764 	dma_unmap_addr_set(dest_map, mapping,
6765 			   dma_unmap_addr(src_map, mapping));
6766 	dest_desc->addr_hi = src_desc->addr_hi;
6767 	dest_desc->addr_lo = src_desc->addr_lo;
6768 
6769 	/* Ensure that the update to the skb happens after the physical
6770 	 * addresses have been transferred to the new BD location.
6771 	 */
6772 	smp_wmb();
6773 
6774 	src_map->data = NULL;
6775 }
6776 
6777 /* The RX ring scheme is composed of multiple rings which post fresh
6778  * buffers to the chip, and one special ring the chip uses to report
6779  * status back to the host.
6780  *
6781  * The special ring reports the status of received packets to the
6782  * host.  The chip does not write into the original descriptor the
6783  * RX buffer was obtained from.  The chip simply takes the original
6784  * descriptor as provided by the host, updates the status and length
6785  * field, then writes this into the next status ring entry.
6786  *
6787  * Each ring the host uses to post buffers to the chip is described
6788  * by a TG3_BDINFO entry in the chips SRAM area.  When a packet arrives,
6789  * it is first placed into the on-chip ram.  When the packet's length
6790  * is known, it walks down the TG3_BDINFO entries to select the ring.
6791  * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
6792  * which is within the range of the new packet's length is chosen.
6793  *
6794  * The "separate ring for rx status" scheme may sound queer, but it makes
6795  * sense from a cache coherency perspective.  If only the host writes
6796  * to the buffer post rings, and only the chip writes to the rx status
6797  * rings, then cache lines never move beyond shared-modified state.
6798  * If both the host and chip were to write into the same ring, cache line
6799  * eviction could occur since both entities want it in an exclusive state.
6800  */
6801 static int tg3_rx(struct tg3_napi *tnapi, int budget)
6802 {
6803 	struct tg3 *tp = tnapi->tp;
6804 	u32 work_mask, rx_std_posted = 0;
6805 	u32 std_prod_idx, jmb_prod_idx;
6806 	u32 sw_idx = tnapi->rx_rcb_ptr;
6807 	u16 hw_idx;
6808 	int received;
6809 	struct tg3_rx_prodring_set *tpr = &tnapi->prodring;
6810 
6811 	hw_idx = *(tnapi->rx_rcb_prod_idx);
6812 	/*
6813 	 * We need to order the read of hw_idx and the read of
6814 	 * the opaque cookie.
6815 	 */
6816 	rmb();
6817 	work_mask = 0;
6818 	received = 0;
6819 	std_prod_idx = tpr->rx_std_prod_idx;
6820 	jmb_prod_idx = tpr->rx_jmb_prod_idx;
6821 	while (sw_idx != hw_idx && budget > 0) {
6822 		struct ring_info *ri;
6823 		struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
6824 		unsigned int len;
6825 		struct sk_buff *skb;
6826 		dma_addr_t dma_addr;
6827 		u32 opaque_key, desc_idx, *post_ptr;
6828 		u8 *data;
6829 		u64 tstamp = 0;
6830 
6831 		desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
6832 		opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
6833 		if (opaque_key == RXD_OPAQUE_RING_STD) {
6834 			ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx];
6835 			dma_addr = dma_unmap_addr(ri, mapping);
6836 			data = ri->data;
6837 			post_ptr = &std_prod_idx;
6838 			rx_std_posted++;
6839 		} else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
6840 			ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx];
6841 			dma_addr = dma_unmap_addr(ri, mapping);
6842 			data = ri->data;
6843 			post_ptr = &jmb_prod_idx;
6844 		} else
6845 			goto next_pkt_nopost;
6846 
6847 		work_mask |= opaque_key;
6848 
6849 		if (desc->err_vlan & RXD_ERR_MASK) {
6850 		drop_it:
6851 			tg3_recycle_rx(tnapi, tpr, opaque_key,
6852 				       desc_idx, *post_ptr);
6853 		drop_it_no_recycle:
6854 			/* Other statistics kept track of by card. */
6855 			tp->rx_dropped++;
6856 			goto next_pkt;
6857 		}
6858 
6859 		prefetch(data + TG3_RX_OFFSET(tp));
6860 		len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
6861 		      ETH_FCS_LEN;
6862 
6863 		if ((desc->type_flags & RXD_FLAG_PTPSTAT_MASK) ==
6864 		     RXD_FLAG_PTPSTAT_PTPV1 ||
6865 		    (desc->type_flags & RXD_FLAG_PTPSTAT_MASK) ==
6866 		     RXD_FLAG_PTPSTAT_PTPV2) {
6867 			tstamp = tr32(TG3_RX_TSTAMP_LSB);
6868 			tstamp |= (u64)tr32(TG3_RX_TSTAMP_MSB) << 32;
6869 		}
6870 
6871 		if (len > TG3_RX_COPY_THRESH(tp)) {
6872 			int skb_size;
6873 			unsigned int frag_size;
6874 
6875 			skb_size = tg3_alloc_rx_data(tp, tpr, opaque_key,
6876 						    *post_ptr, &frag_size);
6877 			if (skb_size < 0)
6878 				goto drop_it;
6879 
6880 			pci_unmap_single(tp->pdev, dma_addr, skb_size,
6881 					 PCI_DMA_FROMDEVICE);
6882 
6883 			/* Ensure that the update to the data happens
6884 			 * after the usage of the old DMA mapping.
6885 			 */
6886 			smp_wmb();
6887 
6888 			ri->data = NULL;
6889 
6890 			skb = build_skb(data, frag_size);
6891 			if (!skb) {
6892 				tg3_frag_free(frag_size != 0, data);
6893 				goto drop_it_no_recycle;
6894 			}
6895 			skb_reserve(skb, TG3_RX_OFFSET(tp));
6896 		} else {
6897 			tg3_recycle_rx(tnapi, tpr, opaque_key,
6898 				       desc_idx, *post_ptr);
6899 
6900 			skb = netdev_alloc_skb(tp->dev,
6901 					       len + TG3_RAW_IP_ALIGN);
6902 			if (skb == NULL)
6903 				goto drop_it_no_recycle;
6904 
6905 			skb_reserve(skb, TG3_RAW_IP_ALIGN);
6906 			pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
6907 			memcpy(skb->data,
6908 			       data + TG3_RX_OFFSET(tp),
6909 			       len);
6910 			pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
6911 		}
6912 
6913 		skb_put(skb, len);
6914 		if (tstamp)
6915 			tg3_hwclock_to_timestamp(tp, tstamp,
6916 						 skb_hwtstamps(skb));
6917 
6918 		if ((tp->dev->features & NETIF_F_RXCSUM) &&
6919 		    (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
6920 		    (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
6921 		      >> RXD_TCPCSUM_SHIFT) == 0xffff))
6922 			skb->ip_summed = CHECKSUM_UNNECESSARY;
6923 		else
6924 			skb_checksum_none_assert(skb);
6925 
6926 		skb->protocol = eth_type_trans(skb, tp->dev);
6927 
6928 		if (len > (tp->dev->mtu + ETH_HLEN) &&
6929 		    skb->protocol != htons(ETH_P_8021Q) &&
6930 		    skb->protocol != htons(ETH_P_8021AD)) {
6931 			dev_kfree_skb_any(skb);
6932 			goto drop_it_no_recycle;
6933 		}
6934 
6935 		if (desc->type_flags & RXD_FLAG_VLAN &&
6936 		    !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG))
6937 			__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
6938 					       desc->err_vlan & RXD_VLAN_MASK);
6939 
6940 		napi_gro_receive(&tnapi->napi, skb);
6941 
6942 		received++;
6943 		budget--;
6944 
6945 next_pkt:
6946 		(*post_ptr)++;
6947 
6948 		if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
6949 			tpr->rx_std_prod_idx = std_prod_idx &
6950 					       tp->rx_std_ring_mask;
6951 			tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6952 				     tpr->rx_std_prod_idx);
6953 			work_mask &= ~RXD_OPAQUE_RING_STD;
6954 			rx_std_posted = 0;
6955 		}
6956 next_pkt_nopost:
6957 		sw_idx++;
6958 		sw_idx &= tp->rx_ret_ring_mask;
6959 
6960 		/* Refresh hw_idx to see if there is new work */
6961 		if (sw_idx == hw_idx) {
6962 			hw_idx = *(tnapi->rx_rcb_prod_idx);
6963 			rmb();
6964 		}
6965 	}
6966 
6967 	/* ACK the status ring. */
6968 	tnapi->rx_rcb_ptr = sw_idx;
6969 	tw32_rx_mbox(tnapi->consmbox, sw_idx);
6970 
6971 	/* Refill RX ring(s). */
6972 	if (!tg3_flag(tp, ENABLE_RSS)) {
6973 		/* Sync BD data before updating mailbox */
6974 		wmb();
6975 
6976 		if (work_mask & RXD_OPAQUE_RING_STD) {
6977 			tpr->rx_std_prod_idx = std_prod_idx &
6978 					       tp->rx_std_ring_mask;
6979 			tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6980 				     tpr->rx_std_prod_idx);
6981 		}
6982 		if (work_mask & RXD_OPAQUE_RING_JUMBO) {
6983 			tpr->rx_jmb_prod_idx = jmb_prod_idx &
6984 					       tp->rx_jmb_ring_mask;
6985 			tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
6986 				     tpr->rx_jmb_prod_idx);
6987 		}
6988 		mmiowb();
6989 	} else if (work_mask) {
6990 		/* rx_std_buffers[] and rx_jmb_buffers[] entries must be
6991 		 * updated before the producer indices can be updated.
6992 		 */
6993 		smp_wmb();
6994 
6995 		tpr->rx_std_prod_idx = std_prod_idx & tp->rx_std_ring_mask;
6996 		tpr->rx_jmb_prod_idx = jmb_prod_idx & tp->rx_jmb_ring_mask;
6997 
6998 		if (tnapi != &tp->napi[1]) {
6999 			tp->rx_refill = true;
7000 			napi_schedule(&tp->napi[1].napi);
7001 		}
7002 	}
7003 
7004 	return received;
7005 }
7006 
7007 static void tg3_poll_link(struct tg3 *tp)
7008 {
7009 	/* handle link change and other phy events */
7010 	if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
7011 		struct tg3_hw_status *sblk = tp->napi[0].hw_status;
7012 
7013 		if (sblk->status & SD_STATUS_LINK_CHG) {
7014 			sblk->status = SD_STATUS_UPDATED |
7015 				       (sblk->status & ~SD_STATUS_LINK_CHG);
7016 			spin_lock(&tp->lock);
7017 			if (tg3_flag(tp, USE_PHYLIB)) {
7018 				tw32_f(MAC_STATUS,
7019 				     (MAC_STATUS_SYNC_CHANGED |
7020 				      MAC_STATUS_CFG_CHANGED |
7021 				      MAC_STATUS_MI_COMPLETION |
7022 				      MAC_STATUS_LNKSTATE_CHANGED));
7023 				udelay(40);
7024 			} else
7025 				tg3_setup_phy(tp, false);
7026 			spin_unlock(&tp->lock);
7027 		}
7028 	}
7029 }
7030 
7031 static int tg3_rx_prodring_xfer(struct tg3 *tp,
7032 				struct tg3_rx_prodring_set *dpr,
7033 				struct tg3_rx_prodring_set *spr)
7034 {
7035 	u32 si, di, cpycnt, src_prod_idx;
7036 	int i, err = 0;
7037 
7038 	while (1) {
7039 		src_prod_idx = spr->rx_std_prod_idx;
7040 
7041 		/* Make sure updates to the rx_std_buffers[] entries and the
7042 		 * standard producer index are seen in the correct order.
7043 		 */
7044 		smp_rmb();
7045 
7046 		if (spr->rx_std_cons_idx == src_prod_idx)
7047 			break;
7048 
7049 		if (spr->rx_std_cons_idx < src_prod_idx)
7050 			cpycnt = src_prod_idx - spr->rx_std_cons_idx;
7051 		else
7052 			cpycnt = tp->rx_std_ring_mask + 1 -
7053 				 spr->rx_std_cons_idx;
7054 
7055 		cpycnt = min(cpycnt,
7056 			     tp->rx_std_ring_mask + 1 - dpr->rx_std_prod_idx);
7057 
7058 		si = spr->rx_std_cons_idx;
7059 		di = dpr->rx_std_prod_idx;
7060 
7061 		for (i = di; i < di + cpycnt; i++) {
7062 			if (dpr->rx_std_buffers[i].data) {
7063 				cpycnt = i - di;
7064 				err = -ENOSPC;
7065 				break;
7066 			}
7067 		}
7068 
7069 		if (!cpycnt)
7070 			break;
7071 
7072 		/* Ensure that updates to the rx_std_buffers ring and the
7073 		 * shadowed hardware producer ring from tg3_recycle_skb() are
7074 		 * ordered correctly WRT the skb check above.
7075 		 */
7076 		smp_rmb();
7077 
7078 		memcpy(&dpr->rx_std_buffers[di],
7079 		       &spr->rx_std_buffers[si],
7080 		       cpycnt * sizeof(struct ring_info));
7081 
7082 		for (i = 0; i < cpycnt; i++, di++, si++) {
7083 			struct tg3_rx_buffer_desc *sbd, *dbd;
7084 			sbd = &spr->rx_std[si];
7085 			dbd = &dpr->rx_std[di];
7086 			dbd->addr_hi = sbd->addr_hi;
7087 			dbd->addr_lo = sbd->addr_lo;
7088 		}
7089 
7090 		spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) &
7091 				       tp->rx_std_ring_mask;
7092 		dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) &
7093 				       tp->rx_std_ring_mask;
7094 	}
7095 
7096 	while (1) {
7097 		src_prod_idx = spr->rx_jmb_prod_idx;
7098 
7099 		/* Make sure updates to the rx_jmb_buffers[] entries and
7100 		 * the jumbo producer index are seen in the correct order.
7101 		 */
7102 		smp_rmb();
7103 
7104 		if (spr->rx_jmb_cons_idx == src_prod_idx)
7105 			break;
7106 
7107 		if (spr->rx_jmb_cons_idx < src_prod_idx)
7108 			cpycnt = src_prod_idx - spr->rx_jmb_cons_idx;
7109 		else
7110 			cpycnt = tp->rx_jmb_ring_mask + 1 -
7111 				 spr->rx_jmb_cons_idx;
7112 
7113 		cpycnt = min(cpycnt,
7114 			     tp->rx_jmb_ring_mask + 1 - dpr->rx_jmb_prod_idx);
7115 
7116 		si = spr->rx_jmb_cons_idx;
7117 		di = dpr->rx_jmb_prod_idx;
7118 
7119 		for (i = di; i < di + cpycnt; i++) {
7120 			if (dpr->rx_jmb_buffers[i].data) {
7121 				cpycnt = i - di;
7122 				err = -ENOSPC;
7123 				break;
7124 			}
7125 		}
7126 
7127 		if (!cpycnt)
7128 			break;
7129 
7130 		/* Ensure that updates to the rx_jmb_buffers ring and the
7131 		 * shadowed hardware producer ring from tg3_recycle_skb() are
7132 		 * ordered correctly WRT the skb check above.
7133 		 */
7134 		smp_rmb();
7135 
7136 		memcpy(&dpr->rx_jmb_buffers[di],
7137 		       &spr->rx_jmb_buffers[si],
7138 		       cpycnt * sizeof(struct ring_info));
7139 
7140 		for (i = 0; i < cpycnt; i++, di++, si++) {
7141 			struct tg3_rx_buffer_desc *sbd, *dbd;
7142 			sbd = &spr->rx_jmb[si].std;
7143 			dbd = &dpr->rx_jmb[di].std;
7144 			dbd->addr_hi = sbd->addr_hi;
7145 			dbd->addr_lo = sbd->addr_lo;
7146 		}
7147 
7148 		spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) &
7149 				       tp->rx_jmb_ring_mask;
7150 		dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) &
7151 				       tp->rx_jmb_ring_mask;
7152 	}
7153 
7154 	return err;
7155 }
7156 
7157 static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
7158 {
7159 	struct tg3 *tp = tnapi->tp;
7160 
7161 	/* run TX completion thread */
7162 	if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
7163 		tg3_tx(tnapi);
7164 		if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7165 			return work_done;
7166 	}
7167 
7168 	if (!tnapi->rx_rcb_prod_idx)
7169 		return work_done;
7170 
7171 	/* run RX thread, within the bounds set by NAPI.
7172 	 * All RX "locking" is done by ensuring outside
7173 	 * code synchronizes with tg3->napi.poll()
7174 	 */
7175 	if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
7176 		work_done += tg3_rx(tnapi, budget - work_done);
7177 
7178 	if (tg3_flag(tp, ENABLE_RSS) && tnapi == &tp->napi[1]) {
7179 		struct tg3_rx_prodring_set *dpr = &tp->napi[0].prodring;
7180 		int i, err = 0;
7181 		u32 std_prod_idx = dpr->rx_std_prod_idx;
7182 		u32 jmb_prod_idx = dpr->rx_jmb_prod_idx;
7183 
7184 		tp->rx_refill = false;
7185 		for (i = 1; i <= tp->rxq_cnt; i++)
7186 			err |= tg3_rx_prodring_xfer(tp, dpr,
7187 						    &tp->napi[i].prodring);
7188 
7189 		wmb();
7190 
7191 		if (std_prod_idx != dpr->rx_std_prod_idx)
7192 			tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
7193 				     dpr->rx_std_prod_idx);
7194 
7195 		if (jmb_prod_idx != dpr->rx_jmb_prod_idx)
7196 			tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
7197 				     dpr->rx_jmb_prod_idx);
7198 
7199 		mmiowb();
7200 
7201 		if (err)
7202 			tw32_f(HOSTCC_MODE, tp->coal_now);
7203 	}
7204 
7205 	return work_done;
7206 }
7207 
7208 static inline void tg3_reset_task_schedule(struct tg3 *tp)
7209 {
7210 	if (!test_and_set_bit(TG3_FLAG_RESET_TASK_PENDING, tp->tg3_flags))
7211 		schedule_work(&tp->reset_task);
7212 }
7213 
7214 static inline void tg3_reset_task_cancel(struct tg3 *tp)
7215 {
7216 	cancel_work_sync(&tp->reset_task);
7217 	tg3_flag_clear(tp, RESET_TASK_PENDING);
7218 	tg3_flag_clear(tp, TX_RECOVERY_PENDING);
7219 }
7220 
7221 static int tg3_poll_msix(struct napi_struct *napi, int budget)
7222 {
7223 	struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
7224 	struct tg3 *tp = tnapi->tp;
7225 	int work_done = 0;
7226 	struct tg3_hw_status *sblk = tnapi->hw_status;
7227 
7228 	while (1) {
7229 		work_done = tg3_poll_work(tnapi, work_done, budget);
7230 
7231 		if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7232 			goto tx_recovery;
7233 
7234 		if (unlikely(work_done >= budget))
7235 			break;
7236 
7237 		/* tp->last_tag is used in tg3_int_reenable() below
7238 		 * to tell the hw how much work has been processed,
7239 		 * so we must read it before checking for more work.
7240 		 */
7241 		tnapi->last_tag = sblk->status_tag;
7242 		tnapi->last_irq_tag = tnapi->last_tag;
7243 		rmb();
7244 
7245 		/* check for RX/TX work to do */
7246 		if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons &&
7247 			   *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) {
7248 
7249 			/* This test here is not race free, but will reduce
7250 			 * the number of interrupts by looping again.
7251 			 */
7252 			if (tnapi == &tp->napi[1] && tp->rx_refill)
7253 				continue;
7254 
7255 			napi_complete_done(napi, work_done);
7256 			/* Reenable interrupts. */
7257 			tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
7258 
7259 			/* This test here is synchronized by napi_schedule()
7260 			 * and napi_complete() to close the race condition.
7261 			 */
7262 			if (unlikely(tnapi == &tp->napi[1] && tp->rx_refill)) {
7263 				tw32(HOSTCC_MODE, tp->coalesce_mode |
7264 						  HOSTCC_MODE_ENABLE |
7265 						  tnapi->coal_now);
7266 			}
7267 			mmiowb();
7268 			break;
7269 		}
7270 	}
7271 
7272 	tg3_send_ape_heartbeat(tp, TG3_APE_HB_INTERVAL << 1);
7273 	return work_done;
7274 
7275 tx_recovery:
7276 	/* work_done is guaranteed to be less than budget. */
7277 	napi_complete(napi);
7278 	tg3_reset_task_schedule(tp);
7279 	return work_done;
7280 }
7281 
7282 static void tg3_process_error(struct tg3 *tp)
7283 {
7284 	u32 val;
7285 	bool real_error = false;
7286 
7287 	if (tg3_flag(tp, ERROR_PROCESSED))
7288 		return;
7289 
7290 	/* Check Flow Attention register */
7291 	val = tr32(HOSTCC_FLOW_ATTN);
7292 	if (val & ~HOSTCC_FLOW_ATTN_MBUF_LWM) {
7293 		netdev_err(tp->dev, "FLOW Attention error.  Resetting chip.\n");
7294 		real_error = true;
7295 	}
7296 
7297 	if (tr32(MSGINT_STATUS) & ~MSGINT_STATUS_MSI_REQ) {
7298 		netdev_err(tp->dev, "MSI Status error.  Resetting chip.\n");
7299 		real_error = true;
7300 	}
7301 
7302 	if (tr32(RDMAC_STATUS) || tr32(WDMAC_STATUS)) {
7303 		netdev_err(tp->dev, "DMA Status error.  Resetting chip.\n");
7304 		real_error = true;
7305 	}
7306 
7307 	if (!real_error)
7308 		return;
7309 
7310 	tg3_dump_state(tp);
7311 
7312 	tg3_flag_set(tp, ERROR_PROCESSED);
7313 	tg3_reset_task_schedule(tp);
7314 }
7315 
7316 static int tg3_poll(struct napi_struct *napi, int budget)
7317 {
7318 	struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
7319 	struct tg3 *tp = tnapi->tp;
7320 	int work_done = 0;
7321 	struct tg3_hw_status *sblk = tnapi->hw_status;
7322 
7323 	while (1) {
7324 		if (sblk->status & SD_STATUS_ERROR)
7325 			tg3_process_error(tp);
7326 
7327 		tg3_poll_link(tp);
7328 
7329 		work_done = tg3_poll_work(tnapi, work_done, budget);
7330 
7331 		if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7332 			goto tx_recovery;
7333 
7334 		if (unlikely(work_done >= budget))
7335 			break;
7336 
7337 		if (tg3_flag(tp, TAGGED_STATUS)) {
7338 			/* tp->last_tag is used in tg3_int_reenable() below
7339 			 * to tell the hw how much work has been processed,
7340 			 * so we must read it before checking for more work.
7341 			 */
7342 			tnapi->last_tag = sblk->status_tag;
7343 			tnapi->last_irq_tag = tnapi->last_tag;
7344 			rmb();
7345 		} else
7346 			sblk->status &= ~SD_STATUS_UPDATED;
7347 
7348 		if (likely(!tg3_has_work(tnapi))) {
7349 			napi_complete_done(napi, work_done);
7350 			tg3_int_reenable(tnapi);
7351 			break;
7352 		}
7353 	}
7354 
7355 	tg3_send_ape_heartbeat(tp, TG3_APE_HB_INTERVAL << 1);
7356 	return work_done;
7357 
7358 tx_recovery:
7359 	/* work_done is guaranteed to be less than budget. */
7360 	napi_complete(napi);
7361 	tg3_reset_task_schedule(tp);
7362 	return work_done;
7363 }
7364 
7365 static void tg3_napi_disable(struct tg3 *tp)
7366 {
7367 	int i;
7368 
7369 	for (i = tp->irq_cnt - 1; i >= 0; i--)
7370 		napi_disable(&tp->napi[i].napi);
7371 }
7372 
7373 static void tg3_napi_enable(struct tg3 *tp)
7374 {
7375 	int i;
7376 
7377 	for (i = 0; i < tp->irq_cnt; i++)
7378 		napi_enable(&tp->napi[i].napi);
7379 }
7380 
7381 static void tg3_napi_init(struct tg3 *tp)
7382 {
7383 	int i;
7384 
7385 	netif_napi_add(tp->dev, &tp->napi[0].napi, tg3_poll, 64);
7386 	for (i = 1; i < tp->irq_cnt; i++)
7387 		netif_napi_add(tp->dev, &tp->napi[i].napi, tg3_poll_msix, 64);
7388 }
7389 
7390 static void tg3_napi_fini(struct tg3 *tp)
7391 {
7392 	int i;
7393 
7394 	for (i = 0; i < tp->irq_cnt; i++)
7395 		netif_napi_del(&tp->napi[i].napi);
7396 }
7397 
7398 static inline void tg3_netif_stop(struct tg3 *tp)
7399 {
7400 	netif_trans_update(tp->dev);	/* prevent tx timeout */
7401 	tg3_napi_disable(tp);
7402 	netif_carrier_off(tp->dev);
7403 	netif_tx_disable(tp->dev);
7404 }
7405 
7406 /* tp->lock must be held */
7407 static inline void tg3_netif_start(struct tg3 *tp)
7408 {
7409 	tg3_ptp_resume(tp);
7410 
7411 	/* NOTE: unconditional netif_tx_wake_all_queues is only
7412 	 * appropriate so long as all callers are assured to
7413 	 * have free tx slots (such as after tg3_init_hw)
7414 	 */
7415 	netif_tx_wake_all_queues(tp->dev);
7416 
7417 	if (tp->link_up)
7418 		netif_carrier_on(tp->dev);
7419 
7420 	tg3_napi_enable(tp);
7421 	tp->napi[0].hw_status->status |= SD_STATUS_UPDATED;
7422 	tg3_enable_ints(tp);
7423 }
7424 
7425 static void tg3_irq_quiesce(struct tg3 *tp)
7426 	__releases(tp->lock)
7427 	__acquires(tp->lock)
7428 {
7429 	int i;
7430 
7431 	BUG_ON(tp->irq_sync);
7432 
7433 	tp->irq_sync = 1;
7434 	smp_mb();
7435 
7436 	spin_unlock_bh(&tp->lock);
7437 
7438 	for (i = 0; i < tp->irq_cnt; i++)
7439 		synchronize_irq(tp->napi[i].irq_vec);
7440 
7441 	spin_lock_bh(&tp->lock);
7442 }
7443 
7444 /* Fully shutdown all tg3 driver activity elsewhere in the system.
7445  * If irq_sync is non-zero, then the IRQ handler must be synchronized
7446  * with as well.  Most of the time, this is not necessary except when
7447  * shutting down the device.
7448  */
7449 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
7450 {
7451 	spin_lock_bh(&tp->lock);
7452 	if (irq_sync)
7453 		tg3_irq_quiesce(tp);
7454 }
7455 
7456 static inline void tg3_full_unlock(struct tg3 *tp)
7457 {
7458 	spin_unlock_bh(&tp->lock);
7459 }
7460 
7461 /* One-shot MSI handler - Chip automatically disables interrupt
7462  * after sending MSI so driver doesn't have to do it.
7463  */
7464 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
7465 {
7466 	struct tg3_napi *tnapi = dev_id;
7467 	struct tg3 *tp = tnapi->tp;
7468 
7469 	prefetch(tnapi->hw_status);
7470 	if (tnapi->rx_rcb)
7471 		prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7472 
7473 	if (likely(!tg3_irq_sync(tp)))
7474 		napi_schedule(&tnapi->napi);
7475 
7476 	return IRQ_HANDLED;
7477 }
7478 
7479 /* MSI ISR - No need to check for interrupt sharing and no need to
7480  * flush status block and interrupt mailbox. PCI ordering rules
7481  * guarantee that MSI will arrive after the status block.
7482  */
7483 static irqreturn_t tg3_msi(int irq, void *dev_id)
7484 {
7485 	struct tg3_napi *tnapi = dev_id;
7486 	struct tg3 *tp = tnapi->tp;
7487 
7488 	prefetch(tnapi->hw_status);
7489 	if (tnapi->rx_rcb)
7490 		prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7491 	/*
7492 	 * Writing any value to intr-mbox-0 clears PCI INTA# and
7493 	 * chip-internal interrupt pending events.
7494 	 * Writing non-zero to intr-mbox-0 additional tells the
7495 	 * NIC to stop sending us irqs, engaging "in-intr-handler"
7496 	 * event coalescing.
7497 	 */
7498 	tw32_mailbox(tnapi->int_mbox, 0x00000001);
7499 	if (likely(!tg3_irq_sync(tp)))
7500 		napi_schedule(&tnapi->napi);
7501 
7502 	return IRQ_RETVAL(1);
7503 }
7504 
7505 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
7506 {
7507 	struct tg3_napi *tnapi = dev_id;
7508 	struct tg3 *tp = tnapi->tp;
7509 	struct tg3_hw_status *sblk = tnapi->hw_status;
7510 	unsigned int handled = 1;
7511 
7512 	/* In INTx mode, it is possible for the interrupt to arrive at
7513 	 * the CPU before the status block posted prior to the interrupt.
7514 	 * Reading the PCI State register will confirm whether the
7515 	 * interrupt is ours and will flush the status block.
7516 	 */
7517 	if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
7518 		if (tg3_flag(tp, CHIP_RESETTING) ||
7519 		    (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7520 			handled = 0;
7521 			goto out;
7522 		}
7523 	}
7524 
7525 	/*
7526 	 * Writing any value to intr-mbox-0 clears PCI INTA# and
7527 	 * chip-internal interrupt pending events.
7528 	 * Writing non-zero to intr-mbox-0 additional tells the
7529 	 * NIC to stop sending us irqs, engaging "in-intr-handler"
7530 	 * event coalescing.
7531 	 *
7532 	 * Flush the mailbox to de-assert the IRQ immediately to prevent
7533 	 * spurious interrupts.  The flush impacts performance but
7534 	 * excessive spurious interrupts can be worse in some cases.
7535 	 */
7536 	tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
7537 	if (tg3_irq_sync(tp))
7538 		goto out;
7539 	sblk->status &= ~SD_STATUS_UPDATED;
7540 	if (likely(tg3_has_work(tnapi))) {
7541 		prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7542 		napi_schedule(&tnapi->napi);
7543 	} else {
7544 		/* No work, shared interrupt perhaps?  re-enable
7545 		 * interrupts, and flush that PCI write
7546 		 */
7547 		tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
7548 			       0x00000000);
7549 	}
7550 out:
7551 	return IRQ_RETVAL(handled);
7552 }
7553 
7554 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
7555 {
7556 	struct tg3_napi *tnapi = dev_id;
7557 	struct tg3 *tp = tnapi->tp;
7558 	struct tg3_hw_status *sblk = tnapi->hw_status;
7559 	unsigned int handled = 1;
7560 
7561 	/* In INTx mode, it is possible for the interrupt to arrive at
7562 	 * the CPU before the status block posted prior to the interrupt.
7563 	 * Reading the PCI State register will confirm whether the
7564 	 * interrupt is ours and will flush the status block.
7565 	 */
7566 	if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) {
7567 		if (tg3_flag(tp, CHIP_RESETTING) ||
7568 		    (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7569 			handled = 0;
7570 			goto out;
7571 		}
7572 	}
7573 
7574 	/*
7575 	 * writing any value to intr-mbox-0 clears PCI INTA# and
7576 	 * chip-internal interrupt pending events.
7577 	 * writing non-zero to intr-mbox-0 additional tells the
7578 	 * NIC to stop sending us irqs, engaging "in-intr-handler"
7579 	 * event coalescing.
7580 	 *
7581 	 * Flush the mailbox to de-assert the IRQ immediately to prevent
7582 	 * spurious interrupts.  The flush impacts performance but
7583 	 * excessive spurious interrupts can be worse in some cases.
7584 	 */
7585 	tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
7586 
7587 	/*
7588 	 * In a shared interrupt configuration, sometimes other devices'
7589 	 * interrupts will scream.  We record the current status tag here
7590 	 * so that the above check can report that the screaming interrupts
7591 	 * are unhandled.  Eventually they will be silenced.
7592 	 */
7593 	tnapi->last_irq_tag = sblk->status_tag;
7594 
7595 	if (tg3_irq_sync(tp))
7596 		goto out;
7597 
7598 	prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7599 
7600 	napi_schedule(&tnapi->napi);
7601 
7602 out:
7603 	return IRQ_RETVAL(handled);
7604 }
7605 
7606 /* ISR for interrupt test */
7607 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
7608 {
7609 	struct tg3_napi *tnapi = dev_id;
7610 	struct tg3 *tp = tnapi->tp;
7611 	struct tg3_hw_status *sblk = tnapi->hw_status;
7612 
7613 	if ((sblk->status & SD_STATUS_UPDATED) ||
7614 	    !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7615 		tg3_disable_ints(tp);
7616 		return IRQ_RETVAL(1);
7617 	}
7618 	return IRQ_RETVAL(0);
7619 }
7620 
7621 #ifdef CONFIG_NET_POLL_CONTROLLER
7622 static void tg3_poll_controller(struct net_device *dev)
7623 {
7624 	int i;
7625 	struct tg3 *tp = netdev_priv(dev);
7626 
7627 	if (tg3_irq_sync(tp))
7628 		return;
7629 
7630 	for (i = 0; i < tp->irq_cnt; i++)
7631 		tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]);
7632 }
7633 #endif
7634 
7635 static void tg3_tx_timeout(struct net_device *dev)
7636 {
7637 	struct tg3 *tp = netdev_priv(dev);
7638 
7639 	if (netif_msg_tx_err(tp)) {
7640 		netdev_err(dev, "transmit timed out, resetting\n");
7641 		tg3_dump_state(tp);
7642 	}
7643 
7644 	tg3_reset_task_schedule(tp);
7645 }
7646 
7647 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
7648 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
7649 {
7650 	u32 base = (u32) mapping & 0xffffffff;
7651 
7652 	return base + len + 8 < base;
7653 }
7654 
7655 /* Test for TSO DMA buffers that cross into regions which are within MSS bytes
7656  * of any 4GB boundaries: 4G, 8G, etc
7657  */
7658 static inline int tg3_4g_tso_overflow_test(struct tg3 *tp, dma_addr_t mapping,
7659 					   u32 len, u32 mss)
7660 {
7661 	if (tg3_asic_rev(tp) == ASIC_REV_5762 && mss) {
7662 		u32 base = (u32) mapping & 0xffffffff;
7663 
7664 		return ((base + len + (mss & 0x3fff)) < base);
7665 	}
7666 	return 0;
7667 }
7668 
7669 /* Test for DMA addresses > 40-bit */
7670 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
7671 					  int len)
7672 {
7673 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
7674 	if (tg3_flag(tp, 40BIT_DMA_BUG))
7675 		return ((u64) mapping + len) > DMA_BIT_MASK(40);
7676 	return 0;
7677 #else
7678 	return 0;
7679 #endif
7680 }
7681 
7682 static inline void tg3_tx_set_bd(struct tg3_tx_buffer_desc *txbd,
7683 				 dma_addr_t mapping, u32 len, u32 flags,
7684 				 u32 mss, u32 vlan)
7685 {
7686 	txbd->addr_hi = ((u64) mapping >> 32);
7687 	txbd->addr_lo = ((u64) mapping & 0xffffffff);
7688 	txbd->len_flags = (len << TXD_LEN_SHIFT) | (flags & 0x0000ffff);
7689 	txbd->vlan_tag = (mss << TXD_MSS_SHIFT) | (vlan << TXD_VLAN_TAG_SHIFT);
7690 }
7691 
7692 static bool tg3_tx_frag_set(struct tg3_napi *tnapi, u32 *entry, u32 *budget,
7693 			    dma_addr_t map, u32 len, u32 flags,
7694 			    u32 mss, u32 vlan)
7695 {
7696 	struct tg3 *tp = tnapi->tp;
7697 	bool hwbug = false;
7698 
7699 	if (tg3_flag(tp, SHORT_DMA_BUG) && len <= 8)
7700 		hwbug = true;
7701 
7702 	if (tg3_4g_overflow_test(map, len))
7703 		hwbug = true;
7704 
7705 	if (tg3_4g_tso_overflow_test(tp, map, len, mss))
7706 		hwbug = true;
7707 
7708 	if (tg3_40bit_overflow_test(tp, map, len))
7709 		hwbug = true;
7710 
7711 	if (tp->dma_limit) {
7712 		u32 prvidx = *entry;
7713 		u32 tmp_flag = flags & ~TXD_FLAG_END;
7714 		while (len > tp->dma_limit && *budget) {
7715 			u32 frag_len = tp->dma_limit;
7716 			len -= tp->dma_limit;
7717 
7718 			/* Avoid the 8byte DMA problem */
7719 			if (len <= 8) {
7720 				len += tp->dma_limit / 2;
7721 				frag_len = tp->dma_limit / 2;
7722 			}
7723 
7724 			tnapi->tx_buffers[*entry].fragmented = true;
7725 
7726 			tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7727 				      frag_len, tmp_flag, mss, vlan);
7728 			*budget -= 1;
7729 			prvidx = *entry;
7730 			*entry = NEXT_TX(*entry);
7731 
7732 			map += frag_len;
7733 		}
7734 
7735 		if (len) {
7736 			if (*budget) {
7737 				tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7738 					      len, flags, mss, vlan);
7739 				*budget -= 1;
7740 				*entry = NEXT_TX(*entry);
7741 			} else {
7742 				hwbug = true;
7743 				tnapi->tx_buffers[prvidx].fragmented = false;
7744 			}
7745 		}
7746 	} else {
7747 		tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7748 			      len, flags, mss, vlan);
7749 		*entry = NEXT_TX(*entry);
7750 	}
7751 
7752 	return hwbug;
7753 }
7754 
7755 static void tg3_tx_skb_unmap(struct tg3_napi *tnapi, u32 entry, int last)
7756 {
7757 	int i;
7758 	struct sk_buff *skb;
7759 	struct tg3_tx_ring_info *txb = &tnapi->tx_buffers[entry];
7760 
7761 	skb = txb->skb;
7762 	txb->skb = NULL;
7763 
7764 	pci_unmap_single(tnapi->tp->pdev,
7765 			 dma_unmap_addr(txb, mapping),
7766 			 skb_headlen(skb),
7767 			 PCI_DMA_TODEVICE);
7768 
7769 	while (txb->fragmented) {
7770 		txb->fragmented = false;
7771 		entry = NEXT_TX(entry);
7772 		txb = &tnapi->tx_buffers[entry];
7773 	}
7774 
7775 	for (i = 0; i <= last; i++) {
7776 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
7777 
7778 		entry = NEXT_TX(entry);
7779 		txb = &tnapi->tx_buffers[entry];
7780 
7781 		pci_unmap_page(tnapi->tp->pdev,
7782 			       dma_unmap_addr(txb, mapping),
7783 			       skb_frag_size(frag), PCI_DMA_TODEVICE);
7784 
7785 		while (txb->fragmented) {
7786 			txb->fragmented = false;
7787 			entry = NEXT_TX(entry);
7788 			txb = &tnapi->tx_buffers[entry];
7789 		}
7790 	}
7791 }
7792 
7793 /* Workaround 4GB and 40-bit hardware DMA bugs. */
7794 static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
7795 				       struct sk_buff **pskb,
7796 				       u32 *entry, u32 *budget,
7797 				       u32 base_flags, u32 mss, u32 vlan)
7798 {
7799 	struct tg3 *tp = tnapi->tp;
7800 	struct sk_buff *new_skb, *skb = *pskb;
7801 	dma_addr_t new_addr = 0;
7802 	int ret = 0;
7803 
7804 	if (tg3_asic_rev(tp) != ASIC_REV_5701)
7805 		new_skb = skb_copy(skb, GFP_ATOMIC);
7806 	else {
7807 		int more_headroom = 4 - ((unsigned long)skb->data & 3);
7808 
7809 		new_skb = skb_copy_expand(skb,
7810 					  skb_headroom(skb) + more_headroom,
7811 					  skb_tailroom(skb), GFP_ATOMIC);
7812 	}
7813 
7814 	if (!new_skb) {
7815 		ret = -1;
7816 	} else {
7817 		/* New SKB is guaranteed to be linear. */
7818 		new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
7819 					  PCI_DMA_TODEVICE);
7820 		/* Make sure the mapping succeeded */
7821 		if (pci_dma_mapping_error(tp->pdev, new_addr)) {
7822 			dev_kfree_skb_any(new_skb);
7823 			ret = -1;
7824 		} else {
7825 			u32 save_entry = *entry;
7826 
7827 			base_flags |= TXD_FLAG_END;
7828 
7829 			tnapi->tx_buffers[*entry].skb = new_skb;
7830 			dma_unmap_addr_set(&tnapi->tx_buffers[*entry],
7831 					   mapping, new_addr);
7832 
7833 			if (tg3_tx_frag_set(tnapi, entry, budget, new_addr,
7834 					    new_skb->len, base_flags,
7835 					    mss, vlan)) {
7836 				tg3_tx_skb_unmap(tnapi, save_entry, -1);
7837 				dev_kfree_skb_any(new_skb);
7838 				ret = -1;
7839 			}
7840 		}
7841 	}
7842 
7843 	dev_consume_skb_any(skb);
7844 	*pskb = new_skb;
7845 	return ret;
7846 }
7847 
7848 static bool tg3_tso_bug_gso_check(struct tg3_napi *tnapi, struct sk_buff *skb)
7849 {
7850 	/* Check if we will never have enough descriptors,
7851 	 * as gso_segs can be more than current ring size
7852 	 */
7853 	return skb_shinfo(skb)->gso_segs < tnapi->tx_pending / 3;
7854 }
7855 
7856 static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *);
7857 
7858 /* Use GSO to workaround all TSO packets that meet HW bug conditions
7859  * indicated in tg3_tx_frag_set()
7860  */
7861 static int tg3_tso_bug(struct tg3 *tp, struct tg3_napi *tnapi,
7862 		       struct netdev_queue *txq, struct sk_buff *skb)
7863 {
7864 	struct sk_buff *segs, *nskb;
7865 	u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3;
7866 
7867 	/* Estimate the number of fragments in the worst case */
7868 	if (unlikely(tg3_tx_avail(tnapi) <= frag_cnt_est)) {
7869 		netif_tx_stop_queue(txq);
7870 
7871 		/* netif_tx_stop_queue() must be done before checking
7872 		 * checking tx index in tg3_tx_avail() below, because in
7873 		 * tg3_tx(), we update tx index before checking for
7874 		 * netif_tx_queue_stopped().
7875 		 */
7876 		smp_mb();
7877 		if (tg3_tx_avail(tnapi) <= frag_cnt_est)
7878 			return NETDEV_TX_BUSY;
7879 
7880 		netif_tx_wake_queue(txq);
7881 	}
7882 
7883 	segs = skb_gso_segment(skb, tp->dev->features &
7884 				    ~(NETIF_F_TSO | NETIF_F_TSO6));
7885 	if (IS_ERR(segs) || !segs)
7886 		goto tg3_tso_bug_end;
7887 
7888 	do {
7889 		nskb = segs;
7890 		segs = segs->next;
7891 		nskb->next = NULL;
7892 		tg3_start_xmit(nskb, tp->dev);
7893 	} while (segs);
7894 
7895 tg3_tso_bug_end:
7896 	dev_consume_skb_any(skb);
7897 
7898 	return NETDEV_TX_OK;
7899 }
7900 
7901 /* hard_start_xmit for all devices */
7902 static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
7903 {
7904 	struct tg3 *tp = netdev_priv(dev);
7905 	u32 len, entry, base_flags, mss, vlan = 0;
7906 	u32 budget;
7907 	int i = -1, would_hit_hwbug;
7908 	dma_addr_t mapping;
7909 	struct tg3_napi *tnapi;
7910 	struct netdev_queue *txq;
7911 	unsigned int last;
7912 	struct iphdr *iph = NULL;
7913 	struct tcphdr *tcph = NULL;
7914 	__sum16 tcp_csum = 0, ip_csum = 0;
7915 	__be16 ip_tot_len = 0;
7916 
7917 	txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
7918 	tnapi = &tp->napi[skb_get_queue_mapping(skb)];
7919 	if (tg3_flag(tp, ENABLE_TSS))
7920 		tnapi++;
7921 
7922 	budget = tg3_tx_avail(tnapi);
7923 
7924 	/* We are running in BH disabled context with netif_tx_lock
7925 	 * and TX reclaim runs via tp->napi.poll inside of a software
7926 	 * interrupt.  Furthermore, IRQ processing runs lockless so we have
7927 	 * no IRQ context deadlocks to worry about either.  Rejoice!
7928 	 */
7929 	if (unlikely(budget <= (skb_shinfo(skb)->nr_frags + 1))) {
7930 		if (!netif_tx_queue_stopped(txq)) {
7931 			netif_tx_stop_queue(txq);
7932 
7933 			/* This is a hard error, log it. */
7934 			netdev_err(dev,
7935 				   "BUG! Tx Ring full when queue awake!\n");
7936 		}
7937 		return NETDEV_TX_BUSY;
7938 	}
7939 
7940 	entry = tnapi->tx_prod;
7941 	base_flags = 0;
7942 
7943 	mss = skb_shinfo(skb)->gso_size;
7944 	if (mss) {
7945 		u32 tcp_opt_len, hdr_len;
7946 
7947 		if (skb_cow_head(skb, 0))
7948 			goto drop;
7949 
7950 		iph = ip_hdr(skb);
7951 		tcp_opt_len = tcp_optlen(skb);
7952 
7953 		hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb) - ETH_HLEN;
7954 
7955 		/* HW/FW can not correctly segment packets that have been
7956 		 * vlan encapsulated.
7957 		 */
7958 		if (skb->protocol == htons(ETH_P_8021Q) ||
7959 		    skb->protocol == htons(ETH_P_8021AD)) {
7960 			if (tg3_tso_bug_gso_check(tnapi, skb))
7961 				return tg3_tso_bug(tp, tnapi, txq, skb);
7962 			goto drop;
7963 		}
7964 
7965 		if (!skb_is_gso_v6(skb)) {
7966 			if (unlikely((ETH_HLEN + hdr_len) > 80) &&
7967 			    tg3_flag(tp, TSO_BUG)) {
7968 				if (tg3_tso_bug_gso_check(tnapi, skb))
7969 					return tg3_tso_bug(tp, tnapi, txq, skb);
7970 				goto drop;
7971 			}
7972 			ip_csum = iph->check;
7973 			ip_tot_len = iph->tot_len;
7974 			iph->check = 0;
7975 			iph->tot_len = htons(mss + hdr_len);
7976 		}
7977 
7978 		base_flags |= (TXD_FLAG_CPU_PRE_DMA |
7979 			       TXD_FLAG_CPU_POST_DMA);
7980 
7981 		tcph = tcp_hdr(skb);
7982 		tcp_csum = tcph->check;
7983 
7984 		if (tg3_flag(tp, HW_TSO_1) ||
7985 		    tg3_flag(tp, HW_TSO_2) ||
7986 		    tg3_flag(tp, HW_TSO_3)) {
7987 			tcph->check = 0;
7988 			base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
7989 		} else {
7990 			tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
7991 							 0, IPPROTO_TCP, 0);
7992 		}
7993 
7994 		if (tg3_flag(tp, HW_TSO_3)) {
7995 			mss |= (hdr_len & 0xc) << 12;
7996 			if (hdr_len & 0x10)
7997 				base_flags |= 0x00000010;
7998 			base_flags |= (hdr_len & 0x3e0) << 5;
7999 		} else if (tg3_flag(tp, HW_TSO_2))
8000 			mss |= hdr_len << 9;
8001 		else if (tg3_flag(tp, HW_TSO_1) ||
8002 			 tg3_asic_rev(tp) == ASIC_REV_5705) {
8003 			if (tcp_opt_len || iph->ihl > 5) {
8004 				int tsflags;
8005 
8006 				tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
8007 				mss |= (tsflags << 11);
8008 			}
8009 		} else {
8010 			if (tcp_opt_len || iph->ihl > 5) {
8011 				int tsflags;
8012 
8013 				tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
8014 				base_flags |= tsflags << 12;
8015 			}
8016 		}
8017 	} else if (skb->ip_summed == CHECKSUM_PARTIAL) {
8018 		/* HW/FW can not correctly checksum packets that have been
8019 		 * vlan encapsulated.
8020 		 */
8021 		if (skb->protocol == htons(ETH_P_8021Q) ||
8022 		    skb->protocol == htons(ETH_P_8021AD)) {
8023 			if (skb_checksum_help(skb))
8024 				goto drop;
8025 		} else  {
8026 			base_flags |= TXD_FLAG_TCPUDP_CSUM;
8027 		}
8028 	}
8029 
8030 	if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
8031 	    !mss && skb->len > VLAN_ETH_FRAME_LEN)
8032 		base_flags |= TXD_FLAG_JMB_PKT;
8033 
8034 	if (skb_vlan_tag_present(skb)) {
8035 		base_flags |= TXD_FLAG_VLAN;
8036 		vlan = skb_vlan_tag_get(skb);
8037 	}
8038 
8039 	if ((unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) &&
8040 	    tg3_flag(tp, TX_TSTAMP_EN)) {
8041 		skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
8042 		base_flags |= TXD_FLAG_HWTSTAMP;
8043 	}
8044 
8045 	len = skb_headlen(skb);
8046 
8047 	mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
8048 	if (pci_dma_mapping_error(tp->pdev, mapping))
8049 		goto drop;
8050 
8051 
8052 	tnapi->tx_buffers[entry].skb = skb;
8053 	dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
8054 
8055 	would_hit_hwbug = 0;
8056 
8057 	if (tg3_flag(tp, 5701_DMA_BUG))
8058 		would_hit_hwbug = 1;
8059 
8060 	if (tg3_tx_frag_set(tnapi, &entry, &budget, mapping, len, base_flags |
8061 			  ((skb_shinfo(skb)->nr_frags == 0) ? TXD_FLAG_END : 0),
8062 			    mss, vlan)) {
8063 		would_hit_hwbug = 1;
8064 	} else if (skb_shinfo(skb)->nr_frags > 0) {
8065 		u32 tmp_mss = mss;
8066 
8067 		if (!tg3_flag(tp, HW_TSO_1) &&
8068 		    !tg3_flag(tp, HW_TSO_2) &&
8069 		    !tg3_flag(tp, HW_TSO_3))
8070 			tmp_mss = 0;
8071 
8072 		/* Now loop through additional data
8073 		 * fragments, and queue them.
8074 		 */
8075 		last = skb_shinfo(skb)->nr_frags - 1;
8076 		for (i = 0; i <= last; i++) {
8077 			skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
8078 
8079 			len = skb_frag_size(frag);
8080 			mapping = skb_frag_dma_map(&tp->pdev->dev, frag, 0,
8081 						   len, DMA_TO_DEVICE);
8082 
8083 			tnapi->tx_buffers[entry].skb = NULL;
8084 			dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
8085 					   mapping);
8086 			if (dma_mapping_error(&tp->pdev->dev, mapping))
8087 				goto dma_error;
8088 
8089 			if (!budget ||
8090 			    tg3_tx_frag_set(tnapi, &entry, &budget, mapping,
8091 					    len, base_flags |
8092 					    ((i == last) ? TXD_FLAG_END : 0),
8093 					    tmp_mss, vlan)) {
8094 				would_hit_hwbug = 1;
8095 				break;
8096 			}
8097 		}
8098 	}
8099 
8100 	if (would_hit_hwbug) {
8101 		tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i);
8102 
8103 		if (mss && tg3_tso_bug_gso_check(tnapi, skb)) {
8104 			/* If it's a TSO packet, do GSO instead of
8105 			 * allocating and copying to a large linear SKB
8106 			 */
8107 			if (ip_tot_len) {
8108 				iph->check = ip_csum;
8109 				iph->tot_len = ip_tot_len;
8110 			}
8111 			tcph->check = tcp_csum;
8112 			return tg3_tso_bug(tp, tnapi, txq, skb);
8113 		}
8114 
8115 		/* If the workaround fails due to memory/mapping
8116 		 * failure, silently drop this packet.
8117 		 */
8118 		entry = tnapi->tx_prod;
8119 		budget = tg3_tx_avail(tnapi);
8120 		if (tigon3_dma_hwbug_workaround(tnapi, &skb, &entry, &budget,
8121 						base_flags, mss, vlan))
8122 			goto drop_nofree;
8123 	}
8124 
8125 	skb_tx_timestamp(skb);
8126 	netdev_tx_sent_queue(txq, skb->len);
8127 
8128 	/* Sync BD data before updating mailbox */
8129 	wmb();
8130 
8131 	tnapi->tx_prod = entry;
8132 	if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
8133 		netif_tx_stop_queue(txq);
8134 
8135 		/* netif_tx_stop_queue() must be done before checking
8136 		 * checking tx index in tg3_tx_avail() below, because in
8137 		 * tg3_tx(), we update tx index before checking for
8138 		 * netif_tx_queue_stopped().
8139 		 */
8140 		smp_mb();
8141 		if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
8142 			netif_tx_wake_queue(txq);
8143 	}
8144 
8145 	if (!skb->xmit_more || netif_xmit_stopped(txq)) {
8146 		/* Packets are ready, update Tx producer idx on card. */
8147 		tw32_tx_mbox(tnapi->prodmbox, entry);
8148 		mmiowb();
8149 	}
8150 
8151 	return NETDEV_TX_OK;
8152 
8153 dma_error:
8154 	tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, --i);
8155 	tnapi->tx_buffers[tnapi->tx_prod].skb = NULL;
8156 drop:
8157 	dev_kfree_skb_any(skb);
8158 drop_nofree:
8159 	tp->tx_dropped++;
8160 	return NETDEV_TX_OK;
8161 }
8162 
8163 static void tg3_mac_loopback(struct tg3 *tp, bool enable)
8164 {
8165 	if (enable) {
8166 		tp->mac_mode &= ~(MAC_MODE_HALF_DUPLEX |
8167 				  MAC_MODE_PORT_MODE_MASK);
8168 
8169 		tp->mac_mode |= MAC_MODE_PORT_INT_LPBACK;
8170 
8171 		if (!tg3_flag(tp, 5705_PLUS))
8172 			tp->mac_mode |= MAC_MODE_LINK_POLARITY;
8173 
8174 		if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
8175 			tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
8176 		else
8177 			tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
8178 	} else {
8179 		tp->mac_mode &= ~MAC_MODE_PORT_INT_LPBACK;
8180 
8181 		if (tg3_flag(tp, 5705_PLUS) ||
8182 		    (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) ||
8183 		    tg3_asic_rev(tp) == ASIC_REV_5700)
8184 			tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
8185 	}
8186 
8187 	tw32(MAC_MODE, tp->mac_mode);
8188 	udelay(40);
8189 }
8190 
8191 static int tg3_phy_lpbk_set(struct tg3 *tp, u32 speed, bool extlpbk)
8192 {
8193 	u32 val, bmcr, mac_mode, ptest = 0;
8194 
8195 	tg3_phy_toggle_apd(tp, false);
8196 	tg3_phy_toggle_automdix(tp, false);
8197 
8198 	if (extlpbk && tg3_phy_set_extloopbk(tp))
8199 		return -EIO;
8200 
8201 	bmcr = BMCR_FULLDPLX;
8202 	switch (speed) {
8203 	case SPEED_10:
8204 		break;
8205 	case SPEED_100:
8206 		bmcr |= BMCR_SPEED100;
8207 		break;
8208 	case SPEED_1000:
8209 	default:
8210 		if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
8211 			speed = SPEED_100;
8212 			bmcr |= BMCR_SPEED100;
8213 		} else {
8214 			speed = SPEED_1000;
8215 			bmcr |= BMCR_SPEED1000;
8216 		}
8217 	}
8218 
8219 	if (extlpbk) {
8220 		if (!(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
8221 			tg3_readphy(tp, MII_CTRL1000, &val);
8222 			val |= CTL1000_AS_MASTER |
8223 			       CTL1000_ENABLE_MASTER;
8224 			tg3_writephy(tp, MII_CTRL1000, val);
8225 		} else {
8226 			ptest = MII_TG3_FET_PTEST_TRIM_SEL |
8227 				MII_TG3_FET_PTEST_TRIM_2;
8228 			tg3_writephy(tp, MII_TG3_FET_PTEST, ptest);
8229 		}
8230 	} else
8231 		bmcr |= BMCR_LOOPBACK;
8232 
8233 	tg3_writephy(tp, MII_BMCR, bmcr);
8234 
8235 	/* The write needs to be flushed for the FETs */
8236 	if (tp->phy_flags & TG3_PHYFLG_IS_FET)
8237 		tg3_readphy(tp, MII_BMCR, &bmcr);
8238 
8239 	udelay(40);
8240 
8241 	if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
8242 	    tg3_asic_rev(tp) == ASIC_REV_5785) {
8243 		tg3_writephy(tp, MII_TG3_FET_PTEST, ptest |
8244 			     MII_TG3_FET_PTEST_FRC_TX_LINK |
8245 			     MII_TG3_FET_PTEST_FRC_TX_LOCK);
8246 
8247 		/* The write needs to be flushed for the AC131 */
8248 		tg3_readphy(tp, MII_TG3_FET_PTEST, &val);
8249 	}
8250 
8251 	/* Reset to prevent losing 1st rx packet intermittently */
8252 	if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
8253 	    tg3_flag(tp, 5780_CLASS)) {
8254 		tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8255 		udelay(10);
8256 		tw32_f(MAC_RX_MODE, tp->rx_mode);
8257 	}
8258 
8259 	mac_mode = tp->mac_mode &
8260 		   ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
8261 	if (speed == SPEED_1000)
8262 		mac_mode |= MAC_MODE_PORT_MODE_GMII;
8263 	else
8264 		mac_mode |= MAC_MODE_PORT_MODE_MII;
8265 
8266 	if (tg3_asic_rev(tp) == ASIC_REV_5700) {
8267 		u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK;
8268 
8269 		if (masked_phy_id == TG3_PHY_ID_BCM5401)
8270 			mac_mode &= ~MAC_MODE_LINK_POLARITY;
8271 		else if (masked_phy_id == TG3_PHY_ID_BCM5411)
8272 			mac_mode |= MAC_MODE_LINK_POLARITY;
8273 
8274 		tg3_writephy(tp, MII_TG3_EXT_CTRL,
8275 			     MII_TG3_EXT_CTRL_LNK3_LED_MODE);
8276 	}
8277 
8278 	tw32(MAC_MODE, mac_mode);
8279 	udelay(40);
8280 
8281 	return 0;
8282 }
8283 
8284 static void tg3_set_loopback(struct net_device *dev, netdev_features_t features)
8285 {
8286 	struct tg3 *tp = netdev_priv(dev);
8287 
8288 	if (features & NETIF_F_LOOPBACK) {
8289 		if (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)
8290 			return;
8291 
8292 		spin_lock_bh(&tp->lock);
8293 		tg3_mac_loopback(tp, true);
8294 		netif_carrier_on(tp->dev);
8295 		spin_unlock_bh(&tp->lock);
8296 		netdev_info(dev, "Internal MAC loopback mode enabled.\n");
8297 	} else {
8298 		if (!(tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
8299 			return;
8300 
8301 		spin_lock_bh(&tp->lock);
8302 		tg3_mac_loopback(tp, false);
8303 		/* Force link status check */
8304 		tg3_setup_phy(tp, true);
8305 		spin_unlock_bh(&tp->lock);
8306 		netdev_info(dev, "Internal MAC loopback mode disabled.\n");
8307 	}
8308 }
8309 
8310 static netdev_features_t tg3_fix_features(struct net_device *dev,
8311 	netdev_features_t features)
8312 {
8313 	struct tg3 *tp = netdev_priv(dev);
8314 
8315 	if (dev->mtu > ETH_DATA_LEN && tg3_flag(tp, 5780_CLASS))
8316 		features &= ~NETIF_F_ALL_TSO;
8317 
8318 	return features;
8319 }
8320 
8321 static int tg3_set_features(struct net_device *dev, netdev_features_t features)
8322 {
8323 	netdev_features_t changed = dev->features ^ features;
8324 
8325 	if ((changed & NETIF_F_LOOPBACK) && netif_running(dev))
8326 		tg3_set_loopback(dev, features);
8327 
8328 	return 0;
8329 }
8330 
8331 static void tg3_rx_prodring_free(struct tg3 *tp,
8332 				 struct tg3_rx_prodring_set *tpr)
8333 {
8334 	int i;
8335 
8336 	if (tpr != &tp->napi[0].prodring) {
8337 		for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx;
8338 		     i = (i + 1) & tp->rx_std_ring_mask)
8339 			tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
8340 					tp->rx_pkt_map_sz);
8341 
8342 		if (tg3_flag(tp, JUMBO_CAPABLE)) {
8343 			for (i = tpr->rx_jmb_cons_idx;
8344 			     i != tpr->rx_jmb_prod_idx;
8345 			     i = (i + 1) & tp->rx_jmb_ring_mask) {
8346 				tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
8347 						TG3_RX_JMB_MAP_SZ);
8348 			}
8349 		}
8350 
8351 		return;
8352 	}
8353 
8354 	for (i = 0; i <= tp->rx_std_ring_mask; i++)
8355 		tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
8356 				tp->rx_pkt_map_sz);
8357 
8358 	if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
8359 		for (i = 0; i <= tp->rx_jmb_ring_mask; i++)
8360 			tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
8361 					TG3_RX_JMB_MAP_SZ);
8362 	}
8363 }
8364 
8365 /* Initialize rx rings for packet processing.
8366  *
8367  * The chip has been shut down and the driver detached from
8368  * the networking, so no interrupts or new tx packets will
8369  * end up in the driver.  tp->{tx,}lock are held and thus
8370  * we may not sleep.
8371  */
8372 static int tg3_rx_prodring_alloc(struct tg3 *tp,
8373 				 struct tg3_rx_prodring_set *tpr)
8374 {
8375 	u32 i, rx_pkt_dma_sz;
8376 
8377 	tpr->rx_std_cons_idx = 0;
8378 	tpr->rx_std_prod_idx = 0;
8379 	tpr->rx_jmb_cons_idx = 0;
8380 	tpr->rx_jmb_prod_idx = 0;
8381 
8382 	if (tpr != &tp->napi[0].prodring) {
8383 		memset(&tpr->rx_std_buffers[0], 0,
8384 		       TG3_RX_STD_BUFF_RING_SIZE(tp));
8385 		if (tpr->rx_jmb_buffers)
8386 			memset(&tpr->rx_jmb_buffers[0], 0,
8387 			       TG3_RX_JMB_BUFF_RING_SIZE(tp));
8388 		goto done;
8389 	}
8390 
8391 	/* Zero out all descriptors. */
8392 	memset(tpr->rx_std, 0, TG3_RX_STD_RING_BYTES(tp));
8393 
8394 	rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ;
8395 	if (tg3_flag(tp, 5780_CLASS) &&
8396 	    tp->dev->mtu > ETH_DATA_LEN)
8397 		rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ;
8398 	tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz);
8399 
8400 	/* Initialize invariants of the rings, we only set this
8401 	 * stuff once.  This works because the card does not
8402 	 * write into the rx buffer posting rings.
8403 	 */
8404 	for (i = 0; i <= tp->rx_std_ring_mask; i++) {
8405 		struct tg3_rx_buffer_desc *rxd;
8406 
8407 		rxd = &tpr->rx_std[i];
8408 		rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT;
8409 		rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
8410 		rxd->opaque = (RXD_OPAQUE_RING_STD |
8411 			       (i << RXD_OPAQUE_INDEX_SHIFT));
8412 	}
8413 
8414 	/* Now allocate fresh SKBs for each rx ring. */
8415 	for (i = 0; i < tp->rx_pending; i++) {
8416 		unsigned int frag_size;
8417 
8418 		if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_STD, i,
8419 				      &frag_size) < 0) {
8420 			netdev_warn(tp->dev,
8421 				    "Using a smaller RX standard ring. Only "
8422 				    "%d out of %d buffers were allocated "
8423 				    "successfully\n", i, tp->rx_pending);
8424 			if (i == 0)
8425 				goto initfail;
8426 			tp->rx_pending = i;
8427 			break;
8428 		}
8429 	}
8430 
8431 	if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
8432 		goto done;
8433 
8434 	memset(tpr->rx_jmb, 0, TG3_RX_JMB_RING_BYTES(tp));
8435 
8436 	if (!tg3_flag(tp, JUMBO_RING_ENABLE))
8437 		goto done;
8438 
8439 	for (i = 0; i <= tp->rx_jmb_ring_mask; i++) {
8440 		struct tg3_rx_buffer_desc *rxd;
8441 
8442 		rxd = &tpr->rx_jmb[i].std;
8443 		rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT;
8444 		rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
8445 				  RXD_FLAG_JUMBO;
8446 		rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
8447 		       (i << RXD_OPAQUE_INDEX_SHIFT));
8448 	}
8449 
8450 	for (i = 0; i < tp->rx_jumbo_pending; i++) {
8451 		unsigned int frag_size;
8452 
8453 		if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_JUMBO, i,
8454 				      &frag_size) < 0) {
8455 			netdev_warn(tp->dev,
8456 				    "Using a smaller RX jumbo ring. Only %d "
8457 				    "out of %d buffers were allocated "
8458 				    "successfully\n", i, tp->rx_jumbo_pending);
8459 			if (i == 0)
8460 				goto initfail;
8461 			tp->rx_jumbo_pending = i;
8462 			break;
8463 		}
8464 	}
8465 
8466 done:
8467 	return 0;
8468 
8469 initfail:
8470 	tg3_rx_prodring_free(tp, tpr);
8471 	return -ENOMEM;
8472 }
8473 
8474 static void tg3_rx_prodring_fini(struct tg3 *tp,
8475 				 struct tg3_rx_prodring_set *tpr)
8476 {
8477 	kfree(tpr->rx_std_buffers);
8478 	tpr->rx_std_buffers = NULL;
8479 	kfree(tpr->rx_jmb_buffers);
8480 	tpr->rx_jmb_buffers = NULL;
8481 	if (tpr->rx_std) {
8482 		dma_free_coherent(&tp->pdev->dev, TG3_RX_STD_RING_BYTES(tp),
8483 				  tpr->rx_std, tpr->rx_std_mapping);
8484 		tpr->rx_std = NULL;
8485 	}
8486 	if (tpr->rx_jmb) {
8487 		dma_free_coherent(&tp->pdev->dev, TG3_RX_JMB_RING_BYTES(tp),
8488 				  tpr->rx_jmb, tpr->rx_jmb_mapping);
8489 		tpr->rx_jmb = NULL;
8490 	}
8491 }
8492 
8493 static int tg3_rx_prodring_init(struct tg3 *tp,
8494 				struct tg3_rx_prodring_set *tpr)
8495 {
8496 	tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp),
8497 				      GFP_KERNEL);
8498 	if (!tpr->rx_std_buffers)
8499 		return -ENOMEM;
8500 
8501 	tpr->rx_std = dma_alloc_coherent(&tp->pdev->dev,
8502 					 TG3_RX_STD_RING_BYTES(tp),
8503 					 &tpr->rx_std_mapping,
8504 					 GFP_KERNEL);
8505 	if (!tpr->rx_std)
8506 		goto err_out;
8507 
8508 	if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
8509 		tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp),
8510 					      GFP_KERNEL);
8511 		if (!tpr->rx_jmb_buffers)
8512 			goto err_out;
8513 
8514 		tpr->rx_jmb = dma_alloc_coherent(&tp->pdev->dev,
8515 						 TG3_RX_JMB_RING_BYTES(tp),
8516 						 &tpr->rx_jmb_mapping,
8517 						 GFP_KERNEL);
8518 		if (!tpr->rx_jmb)
8519 			goto err_out;
8520 	}
8521 
8522 	return 0;
8523 
8524 err_out:
8525 	tg3_rx_prodring_fini(tp, tpr);
8526 	return -ENOMEM;
8527 }
8528 
8529 /* Free up pending packets in all rx/tx rings.
8530  *
8531  * The chip has been shut down and the driver detached from
8532  * the networking, so no interrupts or new tx packets will
8533  * end up in the driver.  tp->{tx,}lock is not held and we are not
8534  * in an interrupt context and thus may sleep.
8535  */
8536 static void tg3_free_rings(struct tg3 *tp)
8537 {
8538 	int i, j;
8539 
8540 	for (j = 0; j < tp->irq_cnt; j++) {
8541 		struct tg3_napi *tnapi = &tp->napi[j];
8542 
8543 		tg3_rx_prodring_free(tp, &tnapi->prodring);
8544 
8545 		if (!tnapi->tx_buffers)
8546 			continue;
8547 
8548 		for (i = 0; i < TG3_TX_RING_SIZE; i++) {
8549 			struct sk_buff *skb = tnapi->tx_buffers[i].skb;
8550 
8551 			if (!skb)
8552 				continue;
8553 
8554 			tg3_tx_skb_unmap(tnapi, i,
8555 					 skb_shinfo(skb)->nr_frags - 1);
8556 
8557 			dev_consume_skb_any(skb);
8558 		}
8559 		netdev_tx_reset_queue(netdev_get_tx_queue(tp->dev, j));
8560 	}
8561 }
8562 
8563 /* Initialize tx/rx rings for packet processing.
8564  *
8565  * The chip has been shut down and the driver detached from
8566  * the networking, so no interrupts or new tx packets will
8567  * end up in the driver.  tp->{tx,}lock are held and thus
8568  * we may not sleep.
8569  */
8570 static int tg3_init_rings(struct tg3 *tp)
8571 {
8572 	int i;
8573 
8574 	/* Free up all the SKBs. */
8575 	tg3_free_rings(tp);
8576 
8577 	for (i = 0; i < tp->irq_cnt; i++) {
8578 		struct tg3_napi *tnapi = &tp->napi[i];
8579 
8580 		tnapi->last_tag = 0;
8581 		tnapi->last_irq_tag = 0;
8582 		tnapi->hw_status->status = 0;
8583 		tnapi->hw_status->status_tag = 0;
8584 		memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8585 
8586 		tnapi->tx_prod = 0;
8587 		tnapi->tx_cons = 0;
8588 		if (tnapi->tx_ring)
8589 			memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES);
8590 
8591 		tnapi->rx_rcb_ptr = 0;
8592 		if (tnapi->rx_rcb)
8593 			memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
8594 
8595 		if (tnapi->prodring.rx_std &&
8596 		    tg3_rx_prodring_alloc(tp, &tnapi->prodring)) {
8597 			tg3_free_rings(tp);
8598 			return -ENOMEM;
8599 		}
8600 	}
8601 
8602 	return 0;
8603 }
8604 
8605 static void tg3_mem_tx_release(struct tg3 *tp)
8606 {
8607 	int i;
8608 
8609 	for (i = 0; i < tp->irq_max; i++) {
8610 		struct tg3_napi *tnapi = &tp->napi[i];
8611 
8612 		if (tnapi->tx_ring) {
8613 			dma_free_coherent(&tp->pdev->dev, TG3_TX_RING_BYTES,
8614 				tnapi->tx_ring, tnapi->tx_desc_mapping);
8615 			tnapi->tx_ring = NULL;
8616 		}
8617 
8618 		kfree(tnapi->tx_buffers);
8619 		tnapi->tx_buffers = NULL;
8620 	}
8621 }
8622 
8623 static int tg3_mem_tx_acquire(struct tg3 *tp)
8624 {
8625 	int i;
8626 	struct tg3_napi *tnapi = &tp->napi[0];
8627 
8628 	/* If multivector TSS is enabled, vector 0 does not handle
8629 	 * tx interrupts.  Don't allocate any resources for it.
8630 	 */
8631 	if (tg3_flag(tp, ENABLE_TSS))
8632 		tnapi++;
8633 
8634 	for (i = 0; i < tp->txq_cnt; i++, tnapi++) {
8635 		tnapi->tx_buffers = kcalloc(TG3_TX_RING_SIZE,
8636 					    sizeof(struct tg3_tx_ring_info),
8637 					    GFP_KERNEL);
8638 		if (!tnapi->tx_buffers)
8639 			goto err_out;
8640 
8641 		tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev,
8642 						    TG3_TX_RING_BYTES,
8643 						    &tnapi->tx_desc_mapping,
8644 						    GFP_KERNEL);
8645 		if (!tnapi->tx_ring)
8646 			goto err_out;
8647 	}
8648 
8649 	return 0;
8650 
8651 err_out:
8652 	tg3_mem_tx_release(tp);
8653 	return -ENOMEM;
8654 }
8655 
8656 static void tg3_mem_rx_release(struct tg3 *tp)
8657 {
8658 	int i;
8659 
8660 	for (i = 0; i < tp->irq_max; i++) {
8661 		struct tg3_napi *tnapi = &tp->napi[i];
8662 
8663 		tg3_rx_prodring_fini(tp, &tnapi->prodring);
8664 
8665 		if (!tnapi->rx_rcb)
8666 			continue;
8667 
8668 		dma_free_coherent(&tp->pdev->dev,
8669 				  TG3_RX_RCB_RING_BYTES(tp),
8670 				  tnapi->rx_rcb,
8671 				  tnapi->rx_rcb_mapping);
8672 		tnapi->rx_rcb = NULL;
8673 	}
8674 }
8675 
8676 static int tg3_mem_rx_acquire(struct tg3 *tp)
8677 {
8678 	unsigned int i, limit;
8679 
8680 	limit = tp->rxq_cnt;
8681 
8682 	/* If RSS is enabled, we need a (dummy) producer ring
8683 	 * set on vector zero.  This is the true hw prodring.
8684 	 */
8685 	if (tg3_flag(tp, ENABLE_RSS))
8686 		limit++;
8687 
8688 	for (i = 0; i < limit; i++) {
8689 		struct tg3_napi *tnapi = &tp->napi[i];
8690 
8691 		if (tg3_rx_prodring_init(tp, &tnapi->prodring))
8692 			goto err_out;
8693 
8694 		/* If multivector RSS is enabled, vector 0
8695 		 * does not handle rx or tx interrupts.
8696 		 * Don't allocate any resources for it.
8697 		 */
8698 		if (!i && tg3_flag(tp, ENABLE_RSS))
8699 			continue;
8700 
8701 		tnapi->rx_rcb = dma_zalloc_coherent(&tp->pdev->dev,
8702 						    TG3_RX_RCB_RING_BYTES(tp),
8703 						    &tnapi->rx_rcb_mapping,
8704 						    GFP_KERNEL);
8705 		if (!tnapi->rx_rcb)
8706 			goto err_out;
8707 	}
8708 
8709 	return 0;
8710 
8711 err_out:
8712 	tg3_mem_rx_release(tp);
8713 	return -ENOMEM;
8714 }
8715 
8716 /*
8717  * Must not be invoked with interrupt sources disabled and
8718  * the hardware shutdown down.
8719  */
8720 static void tg3_free_consistent(struct tg3 *tp)
8721 {
8722 	int i;
8723 
8724 	for (i = 0; i < tp->irq_cnt; i++) {
8725 		struct tg3_napi *tnapi = &tp->napi[i];
8726 
8727 		if (tnapi->hw_status) {
8728 			dma_free_coherent(&tp->pdev->dev, TG3_HW_STATUS_SIZE,
8729 					  tnapi->hw_status,
8730 					  tnapi->status_mapping);
8731 			tnapi->hw_status = NULL;
8732 		}
8733 	}
8734 
8735 	tg3_mem_rx_release(tp);
8736 	tg3_mem_tx_release(tp);
8737 
8738 	/* tp->hw_stats can be referenced safely:
8739 	 *     1. under rtnl_lock
8740 	 *     2. or under tp->lock if TG3_FLAG_INIT_COMPLETE is set.
8741 	 */
8742 	if (tp->hw_stats) {
8743 		dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats),
8744 				  tp->hw_stats, tp->stats_mapping);
8745 		tp->hw_stats = NULL;
8746 	}
8747 }
8748 
8749 /*
8750  * Must not be invoked with interrupt sources disabled and
8751  * the hardware shutdown down.  Can sleep.
8752  */
8753 static int tg3_alloc_consistent(struct tg3 *tp)
8754 {
8755 	int i;
8756 
8757 	tp->hw_stats = dma_zalloc_coherent(&tp->pdev->dev,
8758 					   sizeof(struct tg3_hw_stats),
8759 					   &tp->stats_mapping, GFP_KERNEL);
8760 	if (!tp->hw_stats)
8761 		goto err_out;
8762 
8763 	for (i = 0; i < tp->irq_cnt; i++) {
8764 		struct tg3_napi *tnapi = &tp->napi[i];
8765 		struct tg3_hw_status *sblk;
8766 
8767 		tnapi->hw_status = dma_zalloc_coherent(&tp->pdev->dev,
8768 						       TG3_HW_STATUS_SIZE,
8769 						       &tnapi->status_mapping,
8770 						       GFP_KERNEL);
8771 		if (!tnapi->hw_status)
8772 			goto err_out;
8773 
8774 		sblk = tnapi->hw_status;
8775 
8776 		if (tg3_flag(tp, ENABLE_RSS)) {
8777 			u16 *prodptr = NULL;
8778 
8779 			/*
8780 			 * When RSS is enabled, the status block format changes
8781 			 * slightly.  The "rx_jumbo_consumer", "reserved",
8782 			 * and "rx_mini_consumer" members get mapped to the
8783 			 * other three rx return ring producer indexes.
8784 			 */
8785 			switch (i) {
8786 			case 1:
8787 				prodptr = &sblk->idx[0].rx_producer;
8788 				break;
8789 			case 2:
8790 				prodptr = &sblk->rx_jumbo_consumer;
8791 				break;
8792 			case 3:
8793 				prodptr = &sblk->reserved;
8794 				break;
8795 			case 4:
8796 				prodptr = &sblk->rx_mini_consumer;
8797 				break;
8798 			}
8799 			tnapi->rx_rcb_prod_idx = prodptr;
8800 		} else {
8801 			tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer;
8802 		}
8803 	}
8804 
8805 	if (tg3_mem_tx_acquire(tp) || tg3_mem_rx_acquire(tp))
8806 		goto err_out;
8807 
8808 	return 0;
8809 
8810 err_out:
8811 	tg3_free_consistent(tp);
8812 	return -ENOMEM;
8813 }
8814 
8815 #define MAX_WAIT_CNT 1000
8816 
8817 /* To stop a block, clear the enable bit and poll till it
8818  * clears.  tp->lock is held.
8819  */
8820 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, bool silent)
8821 {
8822 	unsigned int i;
8823 	u32 val;
8824 
8825 	if (tg3_flag(tp, 5705_PLUS)) {
8826 		switch (ofs) {
8827 		case RCVLSC_MODE:
8828 		case DMAC_MODE:
8829 		case MBFREE_MODE:
8830 		case BUFMGR_MODE:
8831 		case MEMARB_MODE:
8832 			/* We can't enable/disable these bits of the
8833 			 * 5705/5750, just say success.
8834 			 */
8835 			return 0;
8836 
8837 		default:
8838 			break;
8839 		}
8840 	}
8841 
8842 	val = tr32(ofs);
8843 	val &= ~enable_bit;
8844 	tw32_f(ofs, val);
8845 
8846 	for (i = 0; i < MAX_WAIT_CNT; i++) {
8847 		if (pci_channel_offline(tp->pdev)) {
8848 			dev_err(&tp->pdev->dev,
8849 				"tg3_stop_block device offline, "
8850 				"ofs=%lx enable_bit=%x\n",
8851 				ofs, enable_bit);
8852 			return -ENODEV;
8853 		}
8854 
8855 		udelay(100);
8856 		val = tr32(ofs);
8857 		if ((val & enable_bit) == 0)
8858 			break;
8859 	}
8860 
8861 	if (i == MAX_WAIT_CNT && !silent) {
8862 		dev_err(&tp->pdev->dev,
8863 			"tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
8864 			ofs, enable_bit);
8865 		return -ENODEV;
8866 	}
8867 
8868 	return 0;
8869 }
8870 
8871 /* tp->lock is held. */
8872 static int tg3_abort_hw(struct tg3 *tp, bool silent)
8873 {
8874 	int i, err;
8875 
8876 	tg3_disable_ints(tp);
8877 
8878 	if (pci_channel_offline(tp->pdev)) {
8879 		tp->rx_mode &= ~(RX_MODE_ENABLE | TX_MODE_ENABLE);
8880 		tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
8881 		err = -ENODEV;
8882 		goto err_no_dev;
8883 	}
8884 
8885 	tp->rx_mode &= ~RX_MODE_ENABLE;
8886 	tw32_f(MAC_RX_MODE, tp->rx_mode);
8887 	udelay(10);
8888 
8889 	err  = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
8890 	err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
8891 	err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
8892 	err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
8893 	err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
8894 	err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
8895 
8896 	err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
8897 	err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
8898 	err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
8899 	err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
8900 	err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
8901 	err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
8902 	err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
8903 
8904 	tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
8905 	tw32_f(MAC_MODE, tp->mac_mode);
8906 	udelay(40);
8907 
8908 	tp->tx_mode &= ~TX_MODE_ENABLE;
8909 	tw32_f(MAC_TX_MODE, tp->tx_mode);
8910 
8911 	for (i = 0; i < MAX_WAIT_CNT; i++) {
8912 		udelay(100);
8913 		if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
8914 			break;
8915 	}
8916 	if (i >= MAX_WAIT_CNT) {
8917 		dev_err(&tp->pdev->dev,
8918 			"%s timed out, TX_MODE_ENABLE will not clear "
8919 			"MAC_TX_MODE=%08x\n", __func__, tr32(MAC_TX_MODE));
8920 		err |= -ENODEV;
8921 	}
8922 
8923 	err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
8924 	err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
8925 	err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
8926 
8927 	tw32(FTQ_RESET, 0xffffffff);
8928 	tw32(FTQ_RESET, 0x00000000);
8929 
8930 	err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
8931 	err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
8932 
8933 err_no_dev:
8934 	for (i = 0; i < tp->irq_cnt; i++) {
8935 		struct tg3_napi *tnapi = &tp->napi[i];
8936 		if (tnapi->hw_status)
8937 			memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8938 	}
8939 
8940 	return err;
8941 }
8942 
8943 /* Save PCI command register before chip reset */
8944 static void tg3_save_pci_state(struct tg3 *tp)
8945 {
8946 	pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
8947 }
8948 
8949 /* Restore PCI state after chip reset */
8950 static void tg3_restore_pci_state(struct tg3 *tp)
8951 {
8952 	u32 val;
8953 
8954 	/* Re-enable indirect register accesses. */
8955 	pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
8956 			       tp->misc_host_ctrl);
8957 
8958 	/* Set MAX PCI retry to zero. */
8959 	val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
8960 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0 &&
8961 	    tg3_flag(tp, PCIX_MODE))
8962 		val |= PCISTATE_RETRY_SAME_DMA;
8963 	/* Allow reads and writes to the APE register and memory space. */
8964 	if (tg3_flag(tp, ENABLE_APE))
8965 		val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
8966 		       PCISTATE_ALLOW_APE_SHMEM_WR |
8967 		       PCISTATE_ALLOW_APE_PSPACE_WR;
8968 	pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
8969 
8970 	pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
8971 
8972 	if (!tg3_flag(tp, PCI_EXPRESS)) {
8973 		pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
8974 				      tp->pci_cacheline_sz);
8975 		pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
8976 				      tp->pci_lat_timer);
8977 	}
8978 
8979 	/* Make sure PCI-X relaxed ordering bit is clear. */
8980 	if (tg3_flag(tp, PCIX_MODE)) {
8981 		u16 pcix_cmd;
8982 
8983 		pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8984 				     &pcix_cmd);
8985 		pcix_cmd &= ~PCI_X_CMD_ERO;
8986 		pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8987 				      pcix_cmd);
8988 	}
8989 
8990 	if (tg3_flag(tp, 5780_CLASS)) {
8991 
8992 		/* Chip reset on 5780 will reset MSI enable bit,
8993 		 * so need to restore it.
8994 		 */
8995 		if (tg3_flag(tp, USING_MSI)) {
8996 			u16 ctrl;
8997 
8998 			pci_read_config_word(tp->pdev,
8999 					     tp->msi_cap + PCI_MSI_FLAGS,
9000 					     &ctrl);
9001 			pci_write_config_word(tp->pdev,
9002 					      tp->msi_cap + PCI_MSI_FLAGS,
9003 					      ctrl | PCI_MSI_FLAGS_ENABLE);
9004 			val = tr32(MSGINT_MODE);
9005 			tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
9006 		}
9007 	}
9008 }
9009 
9010 static void tg3_override_clk(struct tg3 *tp)
9011 {
9012 	u32 val;
9013 
9014 	switch (tg3_asic_rev(tp)) {
9015 	case ASIC_REV_5717:
9016 		val = tr32(TG3_CPMU_CLCK_ORIDE_ENABLE);
9017 		tw32(TG3_CPMU_CLCK_ORIDE_ENABLE, val |
9018 		     TG3_CPMU_MAC_ORIDE_ENABLE);
9019 		break;
9020 
9021 	case ASIC_REV_5719:
9022 	case ASIC_REV_5720:
9023 		tw32(TG3_CPMU_CLCK_ORIDE, CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
9024 		break;
9025 
9026 	default:
9027 		return;
9028 	}
9029 }
9030 
9031 static void tg3_restore_clk(struct tg3 *tp)
9032 {
9033 	u32 val;
9034 
9035 	switch (tg3_asic_rev(tp)) {
9036 	case ASIC_REV_5717:
9037 		val = tr32(TG3_CPMU_CLCK_ORIDE_ENABLE);
9038 		tw32(TG3_CPMU_CLCK_ORIDE_ENABLE,
9039 		     val & ~TG3_CPMU_MAC_ORIDE_ENABLE);
9040 		break;
9041 
9042 	case ASIC_REV_5719:
9043 	case ASIC_REV_5720:
9044 		val = tr32(TG3_CPMU_CLCK_ORIDE);
9045 		tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
9046 		break;
9047 
9048 	default:
9049 		return;
9050 	}
9051 }
9052 
9053 /* tp->lock is held. */
9054 static int tg3_chip_reset(struct tg3 *tp)
9055 	__releases(tp->lock)
9056 	__acquires(tp->lock)
9057 {
9058 	u32 val;
9059 	void (*write_op)(struct tg3 *, u32, u32);
9060 	int i, err;
9061 
9062 	if (!pci_device_is_present(tp->pdev))
9063 		return -ENODEV;
9064 
9065 	tg3_nvram_lock(tp);
9066 
9067 	tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
9068 
9069 	/* No matching tg3_nvram_unlock() after this because
9070 	 * chip reset below will undo the nvram lock.
9071 	 */
9072 	tp->nvram_lock_cnt = 0;
9073 
9074 	/* GRC_MISC_CFG core clock reset will clear the memory
9075 	 * enable bit in PCI register 4 and the MSI enable bit
9076 	 * on some chips, so we save relevant registers here.
9077 	 */
9078 	tg3_save_pci_state(tp);
9079 
9080 	if (tg3_asic_rev(tp) == ASIC_REV_5752 ||
9081 	    tg3_flag(tp, 5755_PLUS))
9082 		tw32(GRC_FASTBOOT_PC, 0);
9083 
9084 	/*
9085 	 * We must avoid the readl() that normally takes place.
9086 	 * It locks machines, causes machine checks, and other
9087 	 * fun things.  So, temporarily disable the 5701
9088 	 * hardware workaround, while we do the reset.
9089 	 */
9090 	write_op = tp->write32;
9091 	if (write_op == tg3_write_flush_reg32)
9092 		tp->write32 = tg3_write32;
9093 
9094 	/* Prevent the irq handler from reading or writing PCI registers
9095 	 * during chip reset when the memory enable bit in the PCI command
9096 	 * register may be cleared.  The chip does not generate interrupt
9097 	 * at this time, but the irq handler may still be called due to irq
9098 	 * sharing or irqpoll.
9099 	 */
9100 	tg3_flag_set(tp, CHIP_RESETTING);
9101 	for (i = 0; i < tp->irq_cnt; i++) {
9102 		struct tg3_napi *tnapi = &tp->napi[i];
9103 		if (tnapi->hw_status) {
9104 			tnapi->hw_status->status = 0;
9105 			tnapi->hw_status->status_tag = 0;
9106 		}
9107 		tnapi->last_tag = 0;
9108 		tnapi->last_irq_tag = 0;
9109 	}
9110 	smp_mb();
9111 
9112 	tg3_full_unlock(tp);
9113 
9114 	for (i = 0; i < tp->irq_cnt; i++)
9115 		synchronize_irq(tp->napi[i].irq_vec);
9116 
9117 	tg3_full_lock(tp, 0);
9118 
9119 	if (tg3_asic_rev(tp) == ASIC_REV_57780) {
9120 		val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
9121 		tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
9122 	}
9123 
9124 	/* do the reset */
9125 	val = GRC_MISC_CFG_CORECLK_RESET;
9126 
9127 	if (tg3_flag(tp, PCI_EXPRESS)) {
9128 		/* Force PCIe 1.0a mode */
9129 		if (tg3_asic_rev(tp) != ASIC_REV_5785 &&
9130 		    !tg3_flag(tp, 57765_PLUS) &&
9131 		    tr32(TG3_PCIE_PHY_TSTCTL) ==
9132 		    (TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM))
9133 			tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM);
9134 
9135 		if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0) {
9136 			tw32(GRC_MISC_CFG, (1 << 29));
9137 			val |= (1 << 29);
9138 		}
9139 	}
9140 
9141 	if (tg3_asic_rev(tp) == ASIC_REV_5906) {
9142 		tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
9143 		tw32(GRC_VCPU_EXT_CTRL,
9144 		     tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
9145 	}
9146 
9147 	/* Set the clock to the highest frequency to avoid timeouts. With link
9148 	 * aware mode, the clock speed could be slow and bootcode does not
9149 	 * complete within the expected time. Override the clock to allow the
9150 	 * bootcode to finish sooner and then restore it.
9151 	 */
9152 	tg3_override_clk(tp);
9153 
9154 	/* Manage gphy power for all CPMU absent PCIe devices. */
9155 	if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, CPMU_PRESENT))
9156 		val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
9157 
9158 	tw32(GRC_MISC_CFG, val);
9159 
9160 	/* restore 5701 hardware bug workaround write method */
9161 	tp->write32 = write_op;
9162 
9163 	/* Unfortunately, we have to delay before the PCI read back.
9164 	 * Some 575X chips even will not respond to a PCI cfg access
9165 	 * when the reset command is given to the chip.
9166 	 *
9167 	 * How do these hardware designers expect things to work
9168 	 * properly if the PCI write is posted for a long period
9169 	 * of time?  It is always necessary to have some method by
9170 	 * which a register read back can occur to push the write
9171 	 * out which does the reset.
9172 	 *
9173 	 * For most tg3 variants the trick below was working.
9174 	 * Ho hum...
9175 	 */
9176 	udelay(120);
9177 
9178 	/* Flush PCI posted writes.  The normal MMIO registers
9179 	 * are inaccessible at this time so this is the only
9180 	 * way to make this reliably (actually, this is no longer
9181 	 * the case, see above).  I tried to use indirect
9182 	 * register read/write but this upset some 5701 variants.
9183 	 */
9184 	pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
9185 
9186 	udelay(120);
9187 
9188 	if (tg3_flag(tp, PCI_EXPRESS) && pci_is_pcie(tp->pdev)) {
9189 		u16 val16;
9190 
9191 		if (tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A0) {
9192 			int j;
9193 			u32 cfg_val;
9194 
9195 			/* Wait for link training to complete.  */
9196 			for (j = 0; j < 5000; j++)
9197 				udelay(100);
9198 
9199 			pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
9200 			pci_write_config_dword(tp->pdev, 0xc4,
9201 					       cfg_val | (1 << 15));
9202 		}
9203 
9204 		/* Clear the "no snoop" and "relaxed ordering" bits. */
9205 		val16 = PCI_EXP_DEVCTL_RELAX_EN | PCI_EXP_DEVCTL_NOSNOOP_EN;
9206 		/*
9207 		 * Older PCIe devices only support the 128 byte
9208 		 * MPS setting.  Enforce the restriction.
9209 		 */
9210 		if (!tg3_flag(tp, CPMU_PRESENT))
9211 			val16 |= PCI_EXP_DEVCTL_PAYLOAD;
9212 		pcie_capability_clear_word(tp->pdev, PCI_EXP_DEVCTL, val16);
9213 
9214 		/* Clear error status */
9215 		pcie_capability_write_word(tp->pdev, PCI_EXP_DEVSTA,
9216 				      PCI_EXP_DEVSTA_CED |
9217 				      PCI_EXP_DEVSTA_NFED |
9218 				      PCI_EXP_DEVSTA_FED |
9219 				      PCI_EXP_DEVSTA_URD);
9220 	}
9221 
9222 	tg3_restore_pci_state(tp);
9223 
9224 	tg3_flag_clear(tp, CHIP_RESETTING);
9225 	tg3_flag_clear(tp, ERROR_PROCESSED);
9226 
9227 	val = 0;
9228 	if (tg3_flag(tp, 5780_CLASS))
9229 		val = tr32(MEMARB_MODE);
9230 	tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
9231 
9232 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A3) {
9233 		tg3_stop_fw(tp);
9234 		tw32(0x5000, 0x400);
9235 	}
9236 
9237 	if (tg3_flag(tp, IS_SSB_CORE)) {
9238 		/*
9239 		 * BCM4785: In order to avoid repercussions from using
9240 		 * potentially defective internal ROM, stop the Rx RISC CPU,
9241 		 * which is not required.
9242 		 */
9243 		tg3_stop_fw(tp);
9244 		tg3_halt_cpu(tp, RX_CPU_BASE);
9245 	}
9246 
9247 	err = tg3_poll_fw(tp);
9248 	if (err)
9249 		return err;
9250 
9251 	tw32(GRC_MODE, tp->grc_mode);
9252 
9253 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0) {
9254 		val = tr32(0xc4);
9255 
9256 		tw32(0xc4, val | (1 << 15));
9257 	}
9258 
9259 	if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
9260 	    tg3_asic_rev(tp) == ASIC_REV_5705) {
9261 		tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
9262 		if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0)
9263 			tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
9264 		tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
9265 	}
9266 
9267 	if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
9268 		tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
9269 		val = tp->mac_mode;
9270 	} else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
9271 		tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
9272 		val = tp->mac_mode;
9273 	} else
9274 		val = 0;
9275 
9276 	tw32_f(MAC_MODE, val);
9277 	udelay(40);
9278 
9279 	tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
9280 
9281 	tg3_mdio_start(tp);
9282 
9283 	if (tg3_flag(tp, PCI_EXPRESS) &&
9284 	    tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0 &&
9285 	    tg3_asic_rev(tp) != ASIC_REV_5785 &&
9286 	    !tg3_flag(tp, 57765_PLUS)) {
9287 		val = tr32(0x7c00);
9288 
9289 		tw32(0x7c00, val | (1 << 25));
9290 	}
9291 
9292 	tg3_restore_clk(tp);
9293 
9294 	/* Reprobe ASF enable state.  */
9295 	tg3_flag_clear(tp, ENABLE_ASF);
9296 	tp->phy_flags &= ~(TG3_PHYFLG_1G_ON_VAUX_OK |
9297 			   TG3_PHYFLG_KEEP_LINK_ON_PWRDN);
9298 
9299 	tg3_flag_clear(tp, ASF_NEW_HANDSHAKE);
9300 	tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
9301 	if (val == NIC_SRAM_DATA_SIG_MAGIC) {
9302 		u32 nic_cfg;
9303 
9304 		tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
9305 		if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
9306 			tg3_flag_set(tp, ENABLE_ASF);
9307 			tp->last_event_jiffies = jiffies;
9308 			if (tg3_flag(tp, 5750_PLUS))
9309 				tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
9310 
9311 			tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &nic_cfg);
9312 			if (nic_cfg & NIC_SRAM_1G_ON_VAUX_OK)
9313 				tp->phy_flags |= TG3_PHYFLG_1G_ON_VAUX_OK;
9314 			if (nic_cfg & NIC_SRAM_LNK_FLAP_AVOID)
9315 				tp->phy_flags |= TG3_PHYFLG_KEEP_LINK_ON_PWRDN;
9316 		}
9317 	}
9318 
9319 	return 0;
9320 }
9321 
9322 static void tg3_get_nstats(struct tg3 *, struct rtnl_link_stats64 *);
9323 static void tg3_get_estats(struct tg3 *, struct tg3_ethtool_stats *);
9324 static void __tg3_set_rx_mode(struct net_device *);
9325 
9326 /* tp->lock is held. */
9327 static int tg3_halt(struct tg3 *tp, int kind, bool silent)
9328 {
9329 	int err;
9330 
9331 	tg3_stop_fw(tp);
9332 
9333 	tg3_write_sig_pre_reset(tp, kind);
9334 
9335 	tg3_abort_hw(tp, silent);
9336 	err = tg3_chip_reset(tp);
9337 
9338 	__tg3_set_mac_addr(tp, false);
9339 
9340 	tg3_write_sig_legacy(tp, kind);
9341 	tg3_write_sig_post_reset(tp, kind);
9342 
9343 	if (tp->hw_stats) {
9344 		/* Save the stats across chip resets... */
9345 		tg3_get_nstats(tp, &tp->net_stats_prev);
9346 		tg3_get_estats(tp, &tp->estats_prev);
9347 
9348 		/* And make sure the next sample is new data */
9349 		memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
9350 	}
9351 
9352 	return err;
9353 }
9354 
9355 static int tg3_set_mac_addr(struct net_device *dev, void *p)
9356 {
9357 	struct tg3 *tp = netdev_priv(dev);
9358 	struct sockaddr *addr = p;
9359 	int err = 0;
9360 	bool skip_mac_1 = false;
9361 
9362 	if (!is_valid_ether_addr(addr->sa_data))
9363 		return -EADDRNOTAVAIL;
9364 
9365 	memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
9366 
9367 	if (!netif_running(dev))
9368 		return 0;
9369 
9370 	if (tg3_flag(tp, ENABLE_ASF)) {
9371 		u32 addr0_high, addr0_low, addr1_high, addr1_low;
9372 
9373 		addr0_high = tr32(MAC_ADDR_0_HIGH);
9374 		addr0_low = tr32(MAC_ADDR_0_LOW);
9375 		addr1_high = tr32(MAC_ADDR_1_HIGH);
9376 		addr1_low = tr32(MAC_ADDR_1_LOW);
9377 
9378 		/* Skip MAC addr 1 if ASF is using it. */
9379 		if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
9380 		    !(addr1_high == 0 && addr1_low == 0))
9381 			skip_mac_1 = true;
9382 	}
9383 	spin_lock_bh(&tp->lock);
9384 	__tg3_set_mac_addr(tp, skip_mac_1);
9385 	__tg3_set_rx_mode(dev);
9386 	spin_unlock_bh(&tp->lock);
9387 
9388 	return err;
9389 }
9390 
9391 /* tp->lock is held. */
9392 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
9393 			   dma_addr_t mapping, u32 maxlen_flags,
9394 			   u32 nic_addr)
9395 {
9396 	tg3_write_mem(tp,
9397 		      (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
9398 		      ((u64) mapping >> 32));
9399 	tg3_write_mem(tp,
9400 		      (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
9401 		      ((u64) mapping & 0xffffffff));
9402 	tg3_write_mem(tp,
9403 		      (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
9404 		       maxlen_flags);
9405 
9406 	if (!tg3_flag(tp, 5705_PLUS))
9407 		tg3_write_mem(tp,
9408 			      (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
9409 			      nic_addr);
9410 }
9411 
9412 
9413 static void tg3_coal_tx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
9414 {
9415 	int i = 0;
9416 
9417 	if (!tg3_flag(tp, ENABLE_TSS)) {
9418 		tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
9419 		tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
9420 		tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
9421 	} else {
9422 		tw32(HOSTCC_TXCOL_TICKS, 0);
9423 		tw32(HOSTCC_TXMAX_FRAMES, 0);
9424 		tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
9425 
9426 		for (; i < tp->txq_cnt; i++) {
9427 			u32 reg;
9428 
9429 			reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18;
9430 			tw32(reg, ec->tx_coalesce_usecs);
9431 			reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18;
9432 			tw32(reg, ec->tx_max_coalesced_frames);
9433 			reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18;
9434 			tw32(reg, ec->tx_max_coalesced_frames_irq);
9435 		}
9436 	}
9437 
9438 	for (; i < tp->irq_max - 1; i++) {
9439 		tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0);
9440 		tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0);
9441 		tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
9442 	}
9443 }
9444 
9445 static void tg3_coal_rx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
9446 {
9447 	int i = 0;
9448 	u32 limit = tp->rxq_cnt;
9449 
9450 	if (!tg3_flag(tp, ENABLE_RSS)) {
9451 		tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
9452 		tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
9453 		tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
9454 		limit--;
9455 	} else {
9456 		tw32(HOSTCC_RXCOL_TICKS, 0);
9457 		tw32(HOSTCC_RXMAX_FRAMES, 0);
9458 		tw32(HOSTCC_RXCOAL_MAXF_INT, 0);
9459 	}
9460 
9461 	for (; i < limit; i++) {
9462 		u32 reg;
9463 
9464 		reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18;
9465 		tw32(reg, ec->rx_coalesce_usecs);
9466 		reg = HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18;
9467 		tw32(reg, ec->rx_max_coalesced_frames);
9468 		reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18;
9469 		tw32(reg, ec->rx_max_coalesced_frames_irq);
9470 	}
9471 
9472 	for (; i < tp->irq_max - 1; i++) {
9473 		tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0);
9474 		tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0);
9475 		tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
9476 	}
9477 }
9478 
9479 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
9480 {
9481 	tg3_coal_tx_init(tp, ec);
9482 	tg3_coal_rx_init(tp, ec);
9483 
9484 	if (!tg3_flag(tp, 5705_PLUS)) {
9485 		u32 val = ec->stats_block_coalesce_usecs;
9486 
9487 		tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
9488 		tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
9489 
9490 		if (!tp->link_up)
9491 			val = 0;
9492 
9493 		tw32(HOSTCC_STAT_COAL_TICKS, val);
9494 	}
9495 }
9496 
9497 /* tp->lock is held. */
9498 static void tg3_tx_rcbs_disable(struct tg3 *tp)
9499 {
9500 	u32 txrcb, limit;
9501 
9502 	/* Disable all transmit rings but the first. */
9503 	if (!tg3_flag(tp, 5705_PLUS))
9504 		limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16;
9505 	else if (tg3_flag(tp, 5717_PLUS))
9506 		limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4;
9507 	else if (tg3_flag(tp, 57765_CLASS) ||
9508 		 tg3_asic_rev(tp) == ASIC_REV_5762)
9509 		limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2;
9510 	else
9511 		limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
9512 
9513 	for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
9514 	     txrcb < limit; txrcb += TG3_BDINFO_SIZE)
9515 		tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS,
9516 			      BDINFO_FLAGS_DISABLED);
9517 }
9518 
9519 /* tp->lock is held. */
9520 static void tg3_tx_rcbs_init(struct tg3 *tp)
9521 {
9522 	int i = 0;
9523 	u32 txrcb = NIC_SRAM_SEND_RCB;
9524 
9525 	if (tg3_flag(tp, ENABLE_TSS))
9526 		i++;
9527 
9528 	for (; i < tp->irq_max; i++, txrcb += TG3_BDINFO_SIZE) {
9529 		struct tg3_napi *tnapi = &tp->napi[i];
9530 
9531 		if (!tnapi->tx_ring)
9532 			continue;
9533 
9534 		tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
9535 			       (TG3_TX_RING_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT),
9536 			       NIC_SRAM_TX_BUFFER_DESC);
9537 	}
9538 }
9539 
9540 /* tp->lock is held. */
9541 static void tg3_rx_ret_rcbs_disable(struct tg3 *tp)
9542 {
9543 	u32 rxrcb, limit;
9544 
9545 	/* Disable all receive return rings but the first. */
9546 	if (tg3_flag(tp, 5717_PLUS))
9547 		limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17;
9548 	else if (!tg3_flag(tp, 5705_PLUS))
9549 		limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
9550 	else if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
9551 		 tg3_asic_rev(tp) == ASIC_REV_5762 ||
9552 		 tg3_flag(tp, 57765_CLASS))
9553 		limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4;
9554 	else
9555 		limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
9556 
9557 	for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
9558 	     rxrcb < limit; rxrcb += TG3_BDINFO_SIZE)
9559 		tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS,
9560 			      BDINFO_FLAGS_DISABLED);
9561 }
9562 
9563 /* tp->lock is held. */
9564 static void tg3_rx_ret_rcbs_init(struct tg3 *tp)
9565 {
9566 	int i = 0;
9567 	u32 rxrcb = NIC_SRAM_RCV_RET_RCB;
9568 
9569 	if (tg3_flag(tp, ENABLE_RSS))
9570 		i++;
9571 
9572 	for (; i < tp->irq_max; i++, rxrcb += TG3_BDINFO_SIZE) {
9573 		struct tg3_napi *tnapi = &tp->napi[i];
9574 
9575 		if (!tnapi->rx_rcb)
9576 			continue;
9577 
9578 		tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
9579 			       (tp->rx_ret_ring_mask + 1) <<
9580 				BDINFO_FLAGS_MAXLEN_SHIFT, 0);
9581 	}
9582 }
9583 
9584 /* tp->lock is held. */
9585 static void tg3_rings_reset(struct tg3 *tp)
9586 {
9587 	int i;
9588 	u32 stblk;
9589 	struct tg3_napi *tnapi = &tp->napi[0];
9590 
9591 	tg3_tx_rcbs_disable(tp);
9592 
9593 	tg3_rx_ret_rcbs_disable(tp);
9594 
9595 	/* Disable interrupts */
9596 	tw32_mailbox_f(tp->napi[0].int_mbox, 1);
9597 	tp->napi[0].chk_msi_cnt = 0;
9598 	tp->napi[0].last_rx_cons = 0;
9599 	tp->napi[0].last_tx_cons = 0;
9600 
9601 	/* Zero mailbox registers. */
9602 	if (tg3_flag(tp, SUPPORT_MSIX)) {
9603 		for (i = 1; i < tp->irq_max; i++) {
9604 			tp->napi[i].tx_prod = 0;
9605 			tp->napi[i].tx_cons = 0;
9606 			if (tg3_flag(tp, ENABLE_TSS))
9607 				tw32_mailbox(tp->napi[i].prodmbox, 0);
9608 			tw32_rx_mbox(tp->napi[i].consmbox, 0);
9609 			tw32_mailbox_f(tp->napi[i].int_mbox, 1);
9610 			tp->napi[i].chk_msi_cnt = 0;
9611 			tp->napi[i].last_rx_cons = 0;
9612 			tp->napi[i].last_tx_cons = 0;
9613 		}
9614 		if (!tg3_flag(tp, ENABLE_TSS))
9615 			tw32_mailbox(tp->napi[0].prodmbox, 0);
9616 	} else {
9617 		tp->napi[0].tx_prod = 0;
9618 		tp->napi[0].tx_cons = 0;
9619 		tw32_mailbox(tp->napi[0].prodmbox, 0);
9620 		tw32_rx_mbox(tp->napi[0].consmbox, 0);
9621 	}
9622 
9623 	/* Make sure the NIC-based send BD rings are disabled. */
9624 	if (!tg3_flag(tp, 5705_PLUS)) {
9625 		u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW;
9626 		for (i = 0; i < 16; i++)
9627 			tw32_tx_mbox(mbox + i * 8, 0);
9628 	}
9629 
9630 	/* Clear status block in ram. */
9631 	memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
9632 
9633 	/* Set status block DMA address */
9634 	tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
9635 	     ((u64) tnapi->status_mapping >> 32));
9636 	tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
9637 	     ((u64) tnapi->status_mapping & 0xffffffff));
9638 
9639 	stblk = HOSTCC_STATBLCK_RING1;
9640 
9641 	for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) {
9642 		u64 mapping = (u64)tnapi->status_mapping;
9643 		tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32);
9644 		tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff);
9645 		stblk += 8;
9646 
9647 		/* Clear status block in ram. */
9648 		memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
9649 	}
9650 
9651 	tg3_tx_rcbs_init(tp);
9652 	tg3_rx_ret_rcbs_init(tp);
9653 }
9654 
9655 static void tg3_setup_rxbd_thresholds(struct tg3 *tp)
9656 {
9657 	u32 val, bdcache_maxcnt, host_rep_thresh, nic_rep_thresh;
9658 
9659 	if (!tg3_flag(tp, 5750_PLUS) ||
9660 	    tg3_flag(tp, 5780_CLASS) ||
9661 	    tg3_asic_rev(tp) == ASIC_REV_5750 ||
9662 	    tg3_asic_rev(tp) == ASIC_REV_5752 ||
9663 	    tg3_flag(tp, 57765_PLUS))
9664 		bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5700;
9665 	else if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
9666 		 tg3_asic_rev(tp) == ASIC_REV_5787)
9667 		bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5755;
9668 	else
9669 		bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5906;
9670 
9671 	nic_rep_thresh = min(bdcache_maxcnt / 2, tp->rx_std_max_post);
9672 	host_rep_thresh = max_t(u32, tp->rx_pending / 8, 1);
9673 
9674 	val = min(nic_rep_thresh, host_rep_thresh);
9675 	tw32(RCVBDI_STD_THRESH, val);
9676 
9677 	if (tg3_flag(tp, 57765_PLUS))
9678 		tw32(STD_REPLENISH_LWM, bdcache_maxcnt);
9679 
9680 	if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
9681 		return;
9682 
9683 	bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700;
9684 
9685 	host_rep_thresh = max_t(u32, tp->rx_jumbo_pending / 8, 1);
9686 
9687 	val = min(bdcache_maxcnt / 2, host_rep_thresh);
9688 	tw32(RCVBDI_JUMBO_THRESH, val);
9689 
9690 	if (tg3_flag(tp, 57765_PLUS))
9691 		tw32(JMB_REPLENISH_LWM, bdcache_maxcnt);
9692 }
9693 
9694 static inline u32 calc_crc(unsigned char *buf, int len)
9695 {
9696 	u32 reg;
9697 	u32 tmp;
9698 	int j, k;
9699 
9700 	reg = 0xffffffff;
9701 
9702 	for (j = 0; j < len; j++) {
9703 		reg ^= buf[j];
9704 
9705 		for (k = 0; k < 8; k++) {
9706 			tmp = reg & 0x01;
9707 
9708 			reg >>= 1;
9709 
9710 			if (tmp)
9711 				reg ^= CRC32_POLY_LE;
9712 		}
9713 	}
9714 
9715 	return ~reg;
9716 }
9717 
9718 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
9719 {
9720 	/* accept or reject all multicast frames */
9721 	tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
9722 	tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
9723 	tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
9724 	tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
9725 }
9726 
9727 static void __tg3_set_rx_mode(struct net_device *dev)
9728 {
9729 	struct tg3 *tp = netdev_priv(dev);
9730 	u32 rx_mode;
9731 
9732 	rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
9733 				  RX_MODE_KEEP_VLAN_TAG);
9734 
9735 #if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE)
9736 	/* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
9737 	 * flag clear.
9738 	 */
9739 	if (!tg3_flag(tp, ENABLE_ASF))
9740 		rx_mode |= RX_MODE_KEEP_VLAN_TAG;
9741 #endif
9742 
9743 	if (dev->flags & IFF_PROMISC) {
9744 		/* Promiscuous mode. */
9745 		rx_mode |= RX_MODE_PROMISC;
9746 	} else if (dev->flags & IFF_ALLMULTI) {
9747 		/* Accept all multicast. */
9748 		tg3_set_multi(tp, 1);
9749 	} else if (netdev_mc_empty(dev)) {
9750 		/* Reject all multicast. */
9751 		tg3_set_multi(tp, 0);
9752 	} else {
9753 		/* Accept one or more multicast(s). */
9754 		struct netdev_hw_addr *ha;
9755 		u32 mc_filter[4] = { 0, };
9756 		u32 regidx;
9757 		u32 bit;
9758 		u32 crc;
9759 
9760 		netdev_for_each_mc_addr(ha, dev) {
9761 			crc = calc_crc(ha->addr, ETH_ALEN);
9762 			bit = ~crc & 0x7f;
9763 			regidx = (bit & 0x60) >> 5;
9764 			bit &= 0x1f;
9765 			mc_filter[regidx] |= (1 << bit);
9766 		}
9767 
9768 		tw32(MAC_HASH_REG_0, mc_filter[0]);
9769 		tw32(MAC_HASH_REG_1, mc_filter[1]);
9770 		tw32(MAC_HASH_REG_2, mc_filter[2]);
9771 		tw32(MAC_HASH_REG_3, mc_filter[3]);
9772 	}
9773 
9774 	if (netdev_uc_count(dev) > TG3_MAX_UCAST_ADDR(tp)) {
9775 		rx_mode |= RX_MODE_PROMISC;
9776 	} else if (!(dev->flags & IFF_PROMISC)) {
9777 		/* Add all entries into to the mac addr filter list */
9778 		int i = 0;
9779 		struct netdev_hw_addr *ha;
9780 
9781 		netdev_for_each_uc_addr(ha, dev) {
9782 			__tg3_set_one_mac_addr(tp, ha->addr,
9783 					       i + TG3_UCAST_ADDR_IDX(tp));
9784 			i++;
9785 		}
9786 	}
9787 
9788 	if (rx_mode != tp->rx_mode) {
9789 		tp->rx_mode = rx_mode;
9790 		tw32_f(MAC_RX_MODE, rx_mode);
9791 		udelay(10);
9792 	}
9793 }
9794 
9795 static void tg3_rss_init_dflt_indir_tbl(struct tg3 *tp, u32 qcnt)
9796 {
9797 	int i;
9798 
9799 	for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
9800 		tp->rss_ind_tbl[i] = ethtool_rxfh_indir_default(i, qcnt);
9801 }
9802 
9803 static void tg3_rss_check_indir_tbl(struct tg3 *tp)
9804 {
9805 	int i;
9806 
9807 	if (!tg3_flag(tp, SUPPORT_MSIX))
9808 		return;
9809 
9810 	if (tp->rxq_cnt == 1) {
9811 		memset(&tp->rss_ind_tbl[0], 0, sizeof(tp->rss_ind_tbl));
9812 		return;
9813 	}
9814 
9815 	/* Validate table against current IRQ count */
9816 	for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) {
9817 		if (tp->rss_ind_tbl[i] >= tp->rxq_cnt)
9818 			break;
9819 	}
9820 
9821 	if (i != TG3_RSS_INDIR_TBL_SIZE)
9822 		tg3_rss_init_dflt_indir_tbl(tp, tp->rxq_cnt);
9823 }
9824 
9825 static void tg3_rss_write_indir_tbl(struct tg3 *tp)
9826 {
9827 	int i = 0;
9828 	u32 reg = MAC_RSS_INDIR_TBL_0;
9829 
9830 	while (i < TG3_RSS_INDIR_TBL_SIZE) {
9831 		u32 val = tp->rss_ind_tbl[i];
9832 		i++;
9833 		for (; i % 8; i++) {
9834 			val <<= 4;
9835 			val |= tp->rss_ind_tbl[i];
9836 		}
9837 		tw32(reg, val);
9838 		reg += 4;
9839 	}
9840 }
9841 
9842 static inline u32 tg3_lso_rd_dma_workaround_bit(struct tg3 *tp)
9843 {
9844 	if (tg3_asic_rev(tp) == ASIC_REV_5719)
9845 		return TG3_LSO_RD_DMA_TX_LENGTH_WA_5719;
9846 	else
9847 		return TG3_LSO_RD_DMA_TX_LENGTH_WA_5720;
9848 }
9849 
9850 /* tp->lock is held. */
9851 static int tg3_reset_hw(struct tg3 *tp, bool reset_phy)
9852 {
9853 	u32 val, rdmac_mode;
9854 	int i, err, limit;
9855 	struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
9856 
9857 	tg3_disable_ints(tp);
9858 
9859 	tg3_stop_fw(tp);
9860 
9861 	tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
9862 
9863 	if (tg3_flag(tp, INIT_COMPLETE))
9864 		tg3_abort_hw(tp, 1);
9865 
9866 	if ((tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
9867 	    !(tp->phy_flags & TG3_PHYFLG_USER_CONFIGURED)) {
9868 		tg3_phy_pull_config(tp);
9869 		tg3_eee_pull_config(tp, NULL);
9870 		tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
9871 	}
9872 
9873 	/* Enable MAC control of LPI */
9874 	if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
9875 		tg3_setup_eee(tp);
9876 
9877 	if (reset_phy)
9878 		tg3_phy_reset(tp);
9879 
9880 	err = tg3_chip_reset(tp);
9881 	if (err)
9882 		return err;
9883 
9884 	tg3_write_sig_legacy(tp, RESET_KIND_INIT);
9885 
9886 	if (tg3_chip_rev(tp) == CHIPREV_5784_AX) {
9887 		val = tr32(TG3_CPMU_CTRL);
9888 		val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
9889 		tw32(TG3_CPMU_CTRL, val);
9890 
9891 		val = tr32(TG3_CPMU_LSPD_10MB_CLK);
9892 		val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
9893 		val |= CPMU_LSPD_10MB_MACCLK_6_25;
9894 		tw32(TG3_CPMU_LSPD_10MB_CLK, val);
9895 
9896 		val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
9897 		val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
9898 		val |= CPMU_LNK_AWARE_MACCLK_6_25;
9899 		tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
9900 
9901 		val = tr32(TG3_CPMU_HST_ACC);
9902 		val &= ~CPMU_HST_ACC_MACCLK_MASK;
9903 		val |= CPMU_HST_ACC_MACCLK_6_25;
9904 		tw32(TG3_CPMU_HST_ACC, val);
9905 	}
9906 
9907 	if (tg3_asic_rev(tp) == ASIC_REV_57780) {
9908 		val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK;
9909 		val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN |
9910 		       PCIE_PWR_MGMT_L1_THRESH_4MS;
9911 		tw32(PCIE_PWR_MGMT_THRESH, val);
9912 
9913 		val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK;
9914 		tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS);
9915 
9916 		tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR);
9917 
9918 		val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
9919 		tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
9920 	}
9921 
9922 	if (tg3_flag(tp, L1PLLPD_EN)) {
9923 		u32 grc_mode = tr32(GRC_MODE);
9924 
9925 		/* Access the lower 1K of PL PCIE block registers. */
9926 		val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9927 		tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
9928 
9929 		val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1);
9930 		tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1,
9931 		     val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN);
9932 
9933 		tw32(GRC_MODE, grc_mode);
9934 	}
9935 
9936 	if (tg3_flag(tp, 57765_CLASS)) {
9937 		if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) {
9938 			u32 grc_mode = tr32(GRC_MODE);
9939 
9940 			/* Access the lower 1K of PL PCIE block registers. */
9941 			val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9942 			tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
9943 
9944 			val = tr32(TG3_PCIE_TLDLPL_PORT +
9945 				   TG3_PCIE_PL_LO_PHYCTL5);
9946 			tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5,
9947 			     val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ);
9948 
9949 			tw32(GRC_MODE, grc_mode);
9950 		}
9951 
9952 		if (tg3_chip_rev(tp) != CHIPREV_57765_AX) {
9953 			u32 grc_mode;
9954 
9955 			/* Fix transmit hangs */
9956 			val = tr32(TG3_CPMU_PADRNG_CTL);
9957 			val |= TG3_CPMU_PADRNG_CTL_RDIV2;
9958 			tw32(TG3_CPMU_PADRNG_CTL, val);
9959 
9960 			grc_mode = tr32(GRC_MODE);
9961 
9962 			/* Access the lower 1K of DL PCIE block registers. */
9963 			val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9964 			tw32(GRC_MODE, val | GRC_MODE_PCIE_DL_SEL);
9965 
9966 			val = tr32(TG3_PCIE_TLDLPL_PORT +
9967 				   TG3_PCIE_DL_LO_FTSMAX);
9968 			val &= ~TG3_PCIE_DL_LO_FTSMAX_MSK;
9969 			tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_DL_LO_FTSMAX,
9970 			     val | TG3_PCIE_DL_LO_FTSMAX_VAL);
9971 
9972 			tw32(GRC_MODE, grc_mode);
9973 		}
9974 
9975 		val = tr32(TG3_CPMU_LSPD_10MB_CLK);
9976 		val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
9977 		val |= CPMU_LSPD_10MB_MACCLK_6_25;
9978 		tw32(TG3_CPMU_LSPD_10MB_CLK, val);
9979 	}
9980 
9981 	/* This works around an issue with Athlon chipsets on
9982 	 * B3 tigon3 silicon.  This bit has no effect on any
9983 	 * other revision.  But do not set this on PCI Express
9984 	 * chips and don't even touch the clocks if the CPMU is present.
9985 	 */
9986 	if (!tg3_flag(tp, CPMU_PRESENT)) {
9987 		if (!tg3_flag(tp, PCI_EXPRESS))
9988 			tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
9989 		tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
9990 	}
9991 
9992 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0 &&
9993 	    tg3_flag(tp, PCIX_MODE)) {
9994 		val = tr32(TG3PCI_PCISTATE);
9995 		val |= PCISTATE_RETRY_SAME_DMA;
9996 		tw32(TG3PCI_PCISTATE, val);
9997 	}
9998 
9999 	if (tg3_flag(tp, ENABLE_APE)) {
10000 		/* Allow reads and writes to the
10001 		 * APE register and memory space.
10002 		 */
10003 		val = tr32(TG3PCI_PCISTATE);
10004 		val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
10005 		       PCISTATE_ALLOW_APE_SHMEM_WR |
10006 		       PCISTATE_ALLOW_APE_PSPACE_WR;
10007 		tw32(TG3PCI_PCISTATE, val);
10008 	}
10009 
10010 	if (tg3_chip_rev(tp) == CHIPREV_5704_BX) {
10011 		/* Enable some hw fixes.  */
10012 		val = tr32(TG3PCI_MSI_DATA);
10013 		val |= (1 << 26) | (1 << 28) | (1 << 29);
10014 		tw32(TG3PCI_MSI_DATA, val);
10015 	}
10016 
10017 	/* Descriptor ring init may make accesses to the
10018 	 * NIC SRAM area to setup the TX descriptors, so we
10019 	 * can only do this after the hardware has been
10020 	 * successfully reset.
10021 	 */
10022 	err = tg3_init_rings(tp);
10023 	if (err)
10024 		return err;
10025 
10026 	if (tg3_flag(tp, 57765_PLUS)) {
10027 		val = tr32(TG3PCI_DMA_RW_CTRL) &
10028 		      ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
10029 		if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0)
10030 			val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK;
10031 		if (!tg3_flag(tp, 57765_CLASS) &&
10032 		    tg3_asic_rev(tp) != ASIC_REV_5717 &&
10033 		    tg3_asic_rev(tp) != ASIC_REV_5762)
10034 			val |= DMA_RWCTRL_TAGGED_STAT_WA;
10035 		tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
10036 	} else if (tg3_asic_rev(tp) != ASIC_REV_5784 &&
10037 		   tg3_asic_rev(tp) != ASIC_REV_5761) {
10038 		/* This value is determined during the probe time DMA
10039 		 * engine test, tg3_test_dma.
10040 		 */
10041 		tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
10042 	}
10043 
10044 	tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
10045 			  GRC_MODE_4X_NIC_SEND_RINGS |
10046 			  GRC_MODE_NO_TX_PHDR_CSUM |
10047 			  GRC_MODE_NO_RX_PHDR_CSUM);
10048 	tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
10049 
10050 	/* Pseudo-header checksum is done by hardware logic and not
10051 	 * the offload processers, so make the chip do the pseudo-
10052 	 * header checksums on receive.  For transmit it is more
10053 	 * convenient to do the pseudo-header checksum in software
10054 	 * as Linux does that on transmit for us in all cases.
10055 	 */
10056 	tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
10057 
10058 	val = GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP;
10059 	if (tp->rxptpctl)
10060 		tw32(TG3_RX_PTP_CTL,
10061 		     tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK);
10062 
10063 	if (tg3_flag(tp, PTP_CAPABLE))
10064 		val |= GRC_MODE_TIME_SYNC_ENABLE;
10065 
10066 	tw32(GRC_MODE, tp->grc_mode | val);
10067 
10068 	/* On one of the AMD platform, MRRS is restricted to 4000 because of
10069 	 * south bridge limitation. As a workaround, Driver is setting MRRS
10070 	 * to 2048 instead of default 4096.
10071 	 */
10072 	if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
10073 	    tp->pdev->subsystem_device == TG3PCI_SUBDEVICE_ID_DELL_5762) {
10074 		val = tr32(TG3PCI_DEV_STATUS_CTRL) & ~MAX_READ_REQ_MASK;
10075 		tw32(TG3PCI_DEV_STATUS_CTRL, val | MAX_READ_REQ_SIZE_2048);
10076 	}
10077 
10078 	/* Setup the timer prescalar register.  Clock is always 66Mhz. */
10079 	val = tr32(GRC_MISC_CFG);
10080 	val &= ~0xff;
10081 	val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
10082 	tw32(GRC_MISC_CFG, val);
10083 
10084 	/* Initialize MBUF/DESC pool. */
10085 	if (tg3_flag(tp, 5750_PLUS)) {
10086 		/* Do nothing.  */
10087 	} else if (tg3_asic_rev(tp) != ASIC_REV_5705) {
10088 		tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
10089 		if (tg3_asic_rev(tp) == ASIC_REV_5704)
10090 			tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
10091 		else
10092 			tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
10093 		tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
10094 		tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
10095 	} else if (tg3_flag(tp, TSO_CAPABLE)) {
10096 		int fw_len;
10097 
10098 		fw_len = tp->fw_len;
10099 		fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
10100 		tw32(BUFMGR_MB_POOL_ADDR,
10101 		     NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
10102 		tw32(BUFMGR_MB_POOL_SIZE,
10103 		     NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
10104 	}
10105 
10106 	if (tp->dev->mtu <= ETH_DATA_LEN) {
10107 		tw32(BUFMGR_MB_RDMA_LOW_WATER,
10108 		     tp->bufmgr_config.mbuf_read_dma_low_water);
10109 		tw32(BUFMGR_MB_MACRX_LOW_WATER,
10110 		     tp->bufmgr_config.mbuf_mac_rx_low_water);
10111 		tw32(BUFMGR_MB_HIGH_WATER,
10112 		     tp->bufmgr_config.mbuf_high_water);
10113 	} else {
10114 		tw32(BUFMGR_MB_RDMA_LOW_WATER,
10115 		     tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
10116 		tw32(BUFMGR_MB_MACRX_LOW_WATER,
10117 		     tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
10118 		tw32(BUFMGR_MB_HIGH_WATER,
10119 		     tp->bufmgr_config.mbuf_high_water_jumbo);
10120 	}
10121 	tw32(BUFMGR_DMA_LOW_WATER,
10122 	     tp->bufmgr_config.dma_low_water);
10123 	tw32(BUFMGR_DMA_HIGH_WATER,
10124 	     tp->bufmgr_config.dma_high_water);
10125 
10126 	val = BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE;
10127 	if (tg3_asic_rev(tp) == ASIC_REV_5719)
10128 		val |= BUFMGR_MODE_NO_TX_UNDERRUN;
10129 	if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
10130 	    tg3_asic_rev(tp) == ASIC_REV_5762 ||
10131 	    tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
10132 	    tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0)
10133 		val |= BUFMGR_MODE_MBLOW_ATTN_ENAB;
10134 	tw32(BUFMGR_MODE, val);
10135 	for (i = 0; i < 2000; i++) {
10136 		if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
10137 			break;
10138 		udelay(10);
10139 	}
10140 	if (i >= 2000) {
10141 		netdev_err(tp->dev, "%s cannot enable BUFMGR\n", __func__);
10142 		return -ENODEV;
10143 	}
10144 
10145 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5906_A1)
10146 		tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
10147 
10148 	tg3_setup_rxbd_thresholds(tp);
10149 
10150 	/* Initialize TG3_BDINFO's at:
10151 	 *  RCVDBDI_STD_BD:	standard eth size rx ring
10152 	 *  RCVDBDI_JUMBO_BD:	jumbo frame rx ring
10153 	 *  RCVDBDI_MINI_BD:	small frame rx ring (??? does not work)
10154 	 *
10155 	 * like so:
10156 	 *  TG3_BDINFO_HOST_ADDR:	high/low parts of DMA address of ring
10157 	 *  TG3_BDINFO_MAXLEN_FLAGS:	(rx max buffer size << 16) |
10158 	 *                              ring attribute flags
10159 	 *  TG3_BDINFO_NIC_ADDR:	location of descriptors in nic SRAM
10160 	 *
10161 	 * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
10162 	 * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
10163 	 *
10164 	 * The size of each ring is fixed in the firmware, but the location is
10165 	 * configurable.
10166 	 */
10167 	tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
10168 	     ((u64) tpr->rx_std_mapping >> 32));
10169 	tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
10170 	     ((u64) tpr->rx_std_mapping & 0xffffffff));
10171 	if (!tg3_flag(tp, 5717_PLUS))
10172 		tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
10173 		     NIC_SRAM_RX_BUFFER_DESC);
10174 
10175 	/* Disable the mini ring */
10176 	if (!tg3_flag(tp, 5705_PLUS))
10177 		tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
10178 		     BDINFO_FLAGS_DISABLED);
10179 
10180 	/* Program the jumbo buffer descriptor ring control
10181 	 * blocks on those devices that have them.
10182 	 */
10183 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
10184 	    (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))) {
10185 
10186 		if (tg3_flag(tp, JUMBO_RING_ENABLE)) {
10187 			tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
10188 			     ((u64) tpr->rx_jmb_mapping >> 32));
10189 			tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
10190 			     ((u64) tpr->rx_jmb_mapping & 0xffffffff));
10191 			val = TG3_RX_JMB_RING_SIZE(tp) <<
10192 			      BDINFO_FLAGS_MAXLEN_SHIFT;
10193 			tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
10194 			     val | BDINFO_FLAGS_USE_EXT_RECV);
10195 			if (!tg3_flag(tp, USE_JUMBO_BDFLAG) ||
10196 			    tg3_flag(tp, 57765_CLASS) ||
10197 			    tg3_asic_rev(tp) == ASIC_REV_5762)
10198 				tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
10199 				     NIC_SRAM_RX_JUMBO_BUFFER_DESC);
10200 		} else {
10201 			tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
10202 			     BDINFO_FLAGS_DISABLED);
10203 		}
10204 
10205 		if (tg3_flag(tp, 57765_PLUS)) {
10206 			val = TG3_RX_STD_RING_SIZE(tp);
10207 			val <<= BDINFO_FLAGS_MAXLEN_SHIFT;
10208 			val |= (TG3_RX_STD_DMA_SZ << 2);
10209 		} else
10210 			val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT;
10211 	} else
10212 		val = TG3_RX_STD_MAX_SIZE_5700 << BDINFO_FLAGS_MAXLEN_SHIFT;
10213 
10214 	tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val);
10215 
10216 	tpr->rx_std_prod_idx = tp->rx_pending;
10217 	tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx);
10218 
10219 	tpr->rx_jmb_prod_idx =
10220 		tg3_flag(tp, JUMBO_RING_ENABLE) ? tp->rx_jumbo_pending : 0;
10221 	tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx);
10222 
10223 	tg3_rings_reset(tp);
10224 
10225 	/* Initialize MAC address and backoff seed. */
10226 	__tg3_set_mac_addr(tp, false);
10227 
10228 	/* MTU + ethernet header + FCS + optional VLAN tag */
10229 	tw32(MAC_RX_MTU_SIZE,
10230 	     tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
10231 
10232 	/* The slot time is changed by tg3_setup_phy if we
10233 	 * run at gigabit with half duplex.
10234 	 */
10235 	val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
10236 	      (6 << TX_LENGTHS_IPG_SHIFT) |
10237 	      (32 << TX_LENGTHS_SLOT_TIME_SHIFT);
10238 
10239 	if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
10240 	    tg3_asic_rev(tp) == ASIC_REV_5762)
10241 		val |= tr32(MAC_TX_LENGTHS) &
10242 		       (TX_LENGTHS_JMB_FRM_LEN_MSK |
10243 			TX_LENGTHS_CNT_DWN_VAL_MSK);
10244 
10245 	tw32(MAC_TX_LENGTHS, val);
10246 
10247 	/* Receive rules. */
10248 	tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
10249 	tw32(RCVLPC_CONFIG, 0x0181);
10250 
10251 	/* Calculate RDMAC_MODE setting early, we need it to determine
10252 	 * the RCVLPC_STATE_ENABLE mask.
10253 	 */
10254 	rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
10255 		      RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
10256 		      RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
10257 		      RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
10258 		      RDMAC_MODE_LNGREAD_ENAB);
10259 
10260 	if (tg3_asic_rev(tp) == ASIC_REV_5717)
10261 		rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS;
10262 
10263 	if (tg3_asic_rev(tp) == ASIC_REV_5784 ||
10264 	    tg3_asic_rev(tp) == ASIC_REV_5785 ||
10265 	    tg3_asic_rev(tp) == ASIC_REV_57780)
10266 		rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
10267 			      RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
10268 			      RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
10269 
10270 	if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
10271 	    tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
10272 		if (tg3_flag(tp, TSO_CAPABLE) &&
10273 		    tg3_asic_rev(tp) == ASIC_REV_5705) {
10274 			rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
10275 		} else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
10276 			   !tg3_flag(tp, IS_5788)) {
10277 			rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
10278 		}
10279 	}
10280 
10281 	if (tg3_flag(tp, PCI_EXPRESS))
10282 		rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
10283 
10284 	if (tg3_asic_rev(tp) == ASIC_REV_57766) {
10285 		tp->dma_limit = 0;
10286 		if (tp->dev->mtu <= ETH_DATA_LEN) {
10287 			rdmac_mode |= RDMAC_MODE_JMB_2K_MMRR;
10288 			tp->dma_limit = TG3_TX_BD_DMA_MAX_2K;
10289 		}
10290 	}
10291 
10292 	if (tg3_flag(tp, HW_TSO_1) ||
10293 	    tg3_flag(tp, HW_TSO_2) ||
10294 	    tg3_flag(tp, HW_TSO_3))
10295 		rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN;
10296 
10297 	if (tg3_flag(tp, 57765_PLUS) ||
10298 	    tg3_asic_rev(tp) == ASIC_REV_5785 ||
10299 	    tg3_asic_rev(tp) == ASIC_REV_57780)
10300 		rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;
10301 
10302 	if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
10303 	    tg3_asic_rev(tp) == ASIC_REV_5762)
10304 		rdmac_mode |= tr32(RDMAC_MODE) & RDMAC_MODE_H2BNC_VLAN_DET;
10305 
10306 	if (tg3_asic_rev(tp) == ASIC_REV_5761 ||
10307 	    tg3_asic_rev(tp) == ASIC_REV_5784 ||
10308 	    tg3_asic_rev(tp) == ASIC_REV_5785 ||
10309 	    tg3_asic_rev(tp) == ASIC_REV_57780 ||
10310 	    tg3_flag(tp, 57765_PLUS)) {
10311 		u32 tgtreg;
10312 
10313 		if (tg3_asic_rev(tp) == ASIC_REV_5762)
10314 			tgtreg = TG3_RDMA_RSRVCTRL_REG2;
10315 		else
10316 			tgtreg = TG3_RDMA_RSRVCTRL_REG;
10317 
10318 		val = tr32(tgtreg);
10319 		if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
10320 		    tg3_asic_rev(tp) == ASIC_REV_5762) {
10321 			val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK |
10322 				 TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK |
10323 				 TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK);
10324 			val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B |
10325 			       TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
10326 			       TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K;
10327 		}
10328 		tw32(tgtreg, val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
10329 	}
10330 
10331 	if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
10332 	    tg3_asic_rev(tp) == ASIC_REV_5720 ||
10333 	    tg3_asic_rev(tp) == ASIC_REV_5762) {
10334 		u32 tgtreg;
10335 
10336 		if (tg3_asic_rev(tp) == ASIC_REV_5762)
10337 			tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL2;
10338 		else
10339 			tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL;
10340 
10341 		val = tr32(tgtreg);
10342 		tw32(tgtreg, val |
10343 		     TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K |
10344 		     TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K);
10345 	}
10346 
10347 	/* Receive/send statistics. */
10348 	if (tg3_flag(tp, 5750_PLUS)) {
10349 		val = tr32(RCVLPC_STATS_ENABLE);
10350 		val &= ~RCVLPC_STATSENAB_DACK_FIX;
10351 		tw32(RCVLPC_STATS_ENABLE, val);
10352 	} else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
10353 		   tg3_flag(tp, TSO_CAPABLE)) {
10354 		val = tr32(RCVLPC_STATS_ENABLE);
10355 		val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
10356 		tw32(RCVLPC_STATS_ENABLE, val);
10357 	} else {
10358 		tw32(RCVLPC_STATS_ENABLE, 0xffffff);
10359 	}
10360 	tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
10361 	tw32(SNDDATAI_STATSENAB, 0xffffff);
10362 	tw32(SNDDATAI_STATSCTRL,
10363 	     (SNDDATAI_SCTRL_ENABLE |
10364 	      SNDDATAI_SCTRL_FASTUPD));
10365 
10366 	/* Setup host coalescing engine. */
10367 	tw32(HOSTCC_MODE, 0);
10368 	for (i = 0; i < 2000; i++) {
10369 		if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
10370 			break;
10371 		udelay(10);
10372 	}
10373 
10374 	__tg3_set_coalesce(tp, &tp->coal);
10375 
10376 	if (!tg3_flag(tp, 5705_PLUS)) {
10377 		/* Status/statistics block address.  See tg3_timer,
10378 		 * the tg3_periodic_fetch_stats call there, and
10379 		 * tg3_get_stats to see how this works for 5705/5750 chips.
10380 		 */
10381 		tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
10382 		     ((u64) tp->stats_mapping >> 32));
10383 		tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
10384 		     ((u64) tp->stats_mapping & 0xffffffff));
10385 		tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
10386 
10387 		tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
10388 
10389 		/* Clear statistics and status block memory areas */
10390 		for (i = NIC_SRAM_STATS_BLK;
10391 		     i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
10392 		     i += sizeof(u32)) {
10393 			tg3_write_mem(tp, i, 0);
10394 			udelay(40);
10395 		}
10396 	}
10397 
10398 	tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
10399 
10400 	tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
10401 	tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
10402 	if (!tg3_flag(tp, 5705_PLUS))
10403 		tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
10404 
10405 	if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
10406 		tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
10407 		/* reset to prevent losing 1st rx packet intermittently */
10408 		tw32_f(MAC_RX_MODE, RX_MODE_RESET);
10409 		udelay(10);
10410 	}
10411 
10412 	tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
10413 			MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE |
10414 			MAC_MODE_FHDE_ENABLE;
10415 	if (tg3_flag(tp, ENABLE_APE))
10416 		tp->mac_mode |= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
10417 	if (!tg3_flag(tp, 5705_PLUS) &&
10418 	    !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
10419 	    tg3_asic_rev(tp) != ASIC_REV_5700)
10420 		tp->mac_mode |= MAC_MODE_LINK_POLARITY;
10421 	tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
10422 	udelay(40);
10423 
10424 	/* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
10425 	 * If TG3_FLAG_IS_NIC is zero, we should read the
10426 	 * register to preserve the GPIO settings for LOMs. The GPIOs,
10427 	 * whether used as inputs or outputs, are set by boot code after
10428 	 * reset.
10429 	 */
10430 	if (!tg3_flag(tp, IS_NIC)) {
10431 		u32 gpio_mask;
10432 
10433 		gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
10434 			    GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
10435 			    GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
10436 
10437 		if (tg3_asic_rev(tp) == ASIC_REV_5752)
10438 			gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
10439 				     GRC_LCLCTRL_GPIO_OUTPUT3;
10440 
10441 		if (tg3_asic_rev(tp) == ASIC_REV_5755)
10442 			gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
10443 
10444 		tp->grc_local_ctrl &= ~gpio_mask;
10445 		tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
10446 
10447 		/* GPIO1 must be driven high for eeprom write protect */
10448 		if (tg3_flag(tp, EEPROM_WRITE_PROT))
10449 			tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
10450 					       GRC_LCLCTRL_GPIO_OUTPUT1);
10451 	}
10452 	tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
10453 	udelay(100);
10454 
10455 	if (tg3_flag(tp, USING_MSIX)) {
10456 		val = tr32(MSGINT_MODE);
10457 		val |= MSGINT_MODE_ENABLE;
10458 		if (tp->irq_cnt > 1)
10459 			val |= MSGINT_MODE_MULTIVEC_EN;
10460 		if (!tg3_flag(tp, 1SHOT_MSI))
10461 			val |= MSGINT_MODE_ONE_SHOT_DISABLE;
10462 		tw32(MSGINT_MODE, val);
10463 	}
10464 
10465 	if (!tg3_flag(tp, 5705_PLUS)) {
10466 		tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
10467 		udelay(40);
10468 	}
10469 
10470 	val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
10471 	       WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
10472 	       WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
10473 	       WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
10474 	       WDMAC_MODE_LNGREAD_ENAB);
10475 
10476 	if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
10477 	    tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
10478 		if (tg3_flag(tp, TSO_CAPABLE) &&
10479 		    (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 ||
10480 		     tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A2)) {
10481 			/* nothing */
10482 		} else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
10483 			   !tg3_flag(tp, IS_5788)) {
10484 			val |= WDMAC_MODE_RX_ACCEL;
10485 		}
10486 	}
10487 
10488 	/* Enable host coalescing bug fix */
10489 	if (tg3_flag(tp, 5755_PLUS))
10490 		val |= WDMAC_MODE_STATUS_TAG_FIX;
10491 
10492 	if (tg3_asic_rev(tp) == ASIC_REV_5785)
10493 		val |= WDMAC_MODE_BURST_ALL_DATA;
10494 
10495 	tw32_f(WDMAC_MODE, val);
10496 	udelay(40);
10497 
10498 	if (tg3_flag(tp, PCIX_MODE)) {
10499 		u16 pcix_cmd;
10500 
10501 		pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
10502 				     &pcix_cmd);
10503 		if (tg3_asic_rev(tp) == ASIC_REV_5703) {
10504 			pcix_cmd &= ~PCI_X_CMD_MAX_READ;
10505 			pcix_cmd |= PCI_X_CMD_READ_2K;
10506 		} else if (tg3_asic_rev(tp) == ASIC_REV_5704) {
10507 			pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
10508 			pcix_cmd |= PCI_X_CMD_READ_2K;
10509 		}
10510 		pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
10511 				      pcix_cmd);
10512 	}
10513 
10514 	tw32_f(RDMAC_MODE, rdmac_mode);
10515 	udelay(40);
10516 
10517 	if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
10518 	    tg3_asic_rev(tp) == ASIC_REV_5720) {
10519 		for (i = 0; i < TG3_NUM_RDMA_CHANNELS; i++) {
10520 			if (tr32(TG3_RDMA_LENGTH + (i << 2)) > TG3_MAX_MTU(tp))
10521 				break;
10522 		}
10523 		if (i < TG3_NUM_RDMA_CHANNELS) {
10524 			val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
10525 			val |= tg3_lso_rd_dma_workaround_bit(tp);
10526 			tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
10527 			tg3_flag_set(tp, 5719_5720_RDMA_BUG);
10528 		}
10529 	}
10530 
10531 	tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
10532 	if (!tg3_flag(tp, 5705_PLUS))
10533 		tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
10534 
10535 	if (tg3_asic_rev(tp) == ASIC_REV_5761)
10536 		tw32(SNDDATAC_MODE,
10537 		     SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
10538 	else
10539 		tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
10540 
10541 	tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
10542 	tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
10543 	val = RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ;
10544 	if (tg3_flag(tp, LRG_PROD_RING_CAP))
10545 		val |= RCVDBDI_MODE_LRG_RING_SZ;
10546 	tw32(RCVDBDI_MODE, val);
10547 	tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
10548 	if (tg3_flag(tp, HW_TSO_1) ||
10549 	    tg3_flag(tp, HW_TSO_2) ||
10550 	    tg3_flag(tp, HW_TSO_3))
10551 		tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
10552 	val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE;
10553 	if (tg3_flag(tp, ENABLE_TSS))
10554 		val |= SNDBDI_MODE_MULTI_TXQ_EN;
10555 	tw32(SNDBDI_MODE, val);
10556 	tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
10557 
10558 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) {
10559 		err = tg3_load_5701_a0_firmware_fix(tp);
10560 		if (err)
10561 			return err;
10562 	}
10563 
10564 	if (tg3_asic_rev(tp) == ASIC_REV_57766) {
10565 		/* Ignore any errors for the firmware download. If download
10566 		 * fails, the device will operate with EEE disabled
10567 		 */
10568 		tg3_load_57766_firmware(tp);
10569 	}
10570 
10571 	if (tg3_flag(tp, TSO_CAPABLE)) {
10572 		err = tg3_load_tso_firmware(tp);
10573 		if (err)
10574 			return err;
10575 	}
10576 
10577 	tp->tx_mode = TX_MODE_ENABLE;
10578 
10579 	if (tg3_flag(tp, 5755_PLUS) ||
10580 	    tg3_asic_rev(tp) == ASIC_REV_5906)
10581 		tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX;
10582 
10583 	if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
10584 	    tg3_asic_rev(tp) == ASIC_REV_5762) {
10585 		val = TX_MODE_JMB_FRM_LEN | TX_MODE_CNT_DN_MODE;
10586 		tp->tx_mode &= ~val;
10587 		tp->tx_mode |= tr32(MAC_TX_MODE) & val;
10588 	}
10589 
10590 	tw32_f(MAC_TX_MODE, tp->tx_mode);
10591 	udelay(100);
10592 
10593 	if (tg3_flag(tp, ENABLE_RSS)) {
10594 		u32 rss_key[10];
10595 
10596 		tg3_rss_write_indir_tbl(tp);
10597 
10598 		netdev_rss_key_fill(rss_key, 10 * sizeof(u32));
10599 
10600 		for (i = 0; i < 10 ; i++)
10601 			tw32(MAC_RSS_HASH_KEY_0 + i*4, rss_key[i]);
10602 	}
10603 
10604 	tp->rx_mode = RX_MODE_ENABLE;
10605 	if (tg3_flag(tp, 5755_PLUS))
10606 		tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
10607 
10608 	if (tg3_asic_rev(tp) == ASIC_REV_5762)
10609 		tp->rx_mode |= RX_MODE_IPV4_FRAG_FIX;
10610 
10611 	if (tg3_flag(tp, ENABLE_RSS))
10612 		tp->rx_mode |= RX_MODE_RSS_ENABLE |
10613 			       RX_MODE_RSS_ITBL_HASH_BITS_7 |
10614 			       RX_MODE_RSS_IPV6_HASH_EN |
10615 			       RX_MODE_RSS_TCP_IPV6_HASH_EN |
10616 			       RX_MODE_RSS_IPV4_HASH_EN |
10617 			       RX_MODE_RSS_TCP_IPV4_HASH_EN;
10618 
10619 	tw32_f(MAC_RX_MODE, tp->rx_mode);
10620 	udelay(10);
10621 
10622 	tw32(MAC_LED_CTRL, tp->led_ctrl);
10623 
10624 	tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
10625 	if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
10626 		tw32_f(MAC_RX_MODE, RX_MODE_RESET);
10627 		udelay(10);
10628 	}
10629 	tw32_f(MAC_RX_MODE, tp->rx_mode);
10630 	udelay(10);
10631 
10632 	if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
10633 		if ((tg3_asic_rev(tp) == ASIC_REV_5704) &&
10634 		    !(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) {
10635 			/* Set drive transmission level to 1.2V  */
10636 			/* only if the signal pre-emphasis bit is not set  */
10637 			val = tr32(MAC_SERDES_CFG);
10638 			val &= 0xfffff000;
10639 			val |= 0x880;
10640 			tw32(MAC_SERDES_CFG, val);
10641 		}
10642 		if (tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A1)
10643 			tw32(MAC_SERDES_CFG, 0x616000);
10644 	}
10645 
10646 	/* Prevent chip from dropping frames when flow control
10647 	 * is enabled.
10648 	 */
10649 	if (tg3_flag(tp, 57765_CLASS))
10650 		val = 1;
10651 	else
10652 		val = 2;
10653 	tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val);
10654 
10655 	if (tg3_asic_rev(tp) == ASIC_REV_5704 &&
10656 	    (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
10657 		/* Use hardware link auto-negotiation */
10658 		tg3_flag_set(tp, HW_AUTONEG);
10659 	}
10660 
10661 	if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
10662 	    tg3_asic_rev(tp) == ASIC_REV_5714) {
10663 		u32 tmp;
10664 
10665 		tmp = tr32(SERDES_RX_CTRL);
10666 		tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
10667 		tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
10668 		tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
10669 		tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
10670 	}
10671 
10672 	if (!tg3_flag(tp, USE_PHYLIB)) {
10673 		if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10674 			tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
10675 
10676 		err = tg3_setup_phy(tp, false);
10677 		if (err)
10678 			return err;
10679 
10680 		if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
10681 		    !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
10682 			u32 tmp;
10683 
10684 			/* Clear CRC stats. */
10685 			if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
10686 				tg3_writephy(tp, MII_TG3_TEST1,
10687 					     tmp | MII_TG3_TEST1_CRC_EN);
10688 				tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &tmp);
10689 			}
10690 		}
10691 	}
10692 
10693 	__tg3_set_rx_mode(tp->dev);
10694 
10695 	/* Initialize receive rules. */
10696 	tw32(MAC_RCV_RULE_0,  0xc2000000 & RCV_RULE_DISABLE_MASK);
10697 	tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
10698 	tw32(MAC_RCV_RULE_1,  0x86000004 & RCV_RULE_DISABLE_MASK);
10699 	tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
10700 
10701 	if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS))
10702 		limit = 8;
10703 	else
10704 		limit = 16;
10705 	if (tg3_flag(tp, ENABLE_ASF))
10706 		limit -= 4;
10707 	switch (limit) {
10708 	case 16:
10709 		tw32(MAC_RCV_RULE_15,  0); tw32(MAC_RCV_VALUE_15,  0);
10710 	case 15:
10711 		tw32(MAC_RCV_RULE_14,  0); tw32(MAC_RCV_VALUE_14,  0);
10712 	case 14:
10713 		tw32(MAC_RCV_RULE_13,  0); tw32(MAC_RCV_VALUE_13,  0);
10714 	case 13:
10715 		tw32(MAC_RCV_RULE_12,  0); tw32(MAC_RCV_VALUE_12,  0);
10716 	case 12:
10717 		tw32(MAC_RCV_RULE_11,  0); tw32(MAC_RCV_VALUE_11,  0);
10718 	case 11:
10719 		tw32(MAC_RCV_RULE_10,  0); tw32(MAC_RCV_VALUE_10,  0);
10720 	case 10:
10721 		tw32(MAC_RCV_RULE_9,  0); tw32(MAC_RCV_VALUE_9,  0);
10722 	case 9:
10723 		tw32(MAC_RCV_RULE_8,  0); tw32(MAC_RCV_VALUE_8,  0);
10724 	case 8:
10725 		tw32(MAC_RCV_RULE_7,  0); tw32(MAC_RCV_VALUE_7,  0);
10726 	case 7:
10727 		tw32(MAC_RCV_RULE_6,  0); tw32(MAC_RCV_VALUE_6,  0);
10728 	case 6:
10729 		tw32(MAC_RCV_RULE_5,  0); tw32(MAC_RCV_VALUE_5,  0);
10730 	case 5:
10731 		tw32(MAC_RCV_RULE_4,  0); tw32(MAC_RCV_VALUE_4,  0);
10732 	case 4:
10733 		/* tw32(MAC_RCV_RULE_3,  0); tw32(MAC_RCV_VALUE_3,  0); */
10734 	case 3:
10735 		/* tw32(MAC_RCV_RULE_2,  0); tw32(MAC_RCV_VALUE_2,  0); */
10736 	case 2:
10737 	case 1:
10738 
10739 	default:
10740 		break;
10741 	}
10742 
10743 	if (tg3_flag(tp, ENABLE_APE))
10744 		/* Write our heartbeat update interval to APE. */
10745 		tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
10746 				APE_HOST_HEARTBEAT_INT_5SEC);
10747 
10748 	tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
10749 
10750 	return 0;
10751 }
10752 
10753 /* Called at device open time to get the chip ready for
10754  * packet processing.  Invoked with tp->lock held.
10755  */
10756 static int tg3_init_hw(struct tg3 *tp, bool reset_phy)
10757 {
10758 	/* Chip may have been just powered on. If so, the boot code may still
10759 	 * be running initialization. Wait for it to finish to avoid races in
10760 	 * accessing the hardware.
10761 	 */
10762 	tg3_enable_register_access(tp);
10763 	tg3_poll_fw(tp);
10764 
10765 	tg3_switch_clocks(tp);
10766 
10767 	tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
10768 
10769 	return tg3_reset_hw(tp, reset_phy);
10770 }
10771 
10772 #ifdef CONFIG_TIGON3_HWMON
10773 static void tg3_sd_scan_scratchpad(struct tg3 *tp, struct tg3_ocir *ocir)
10774 {
10775 	int i;
10776 
10777 	for (i = 0; i < TG3_SD_NUM_RECS; i++, ocir++) {
10778 		u32 off = i * TG3_OCIR_LEN, len = TG3_OCIR_LEN;
10779 
10780 		tg3_ape_scratchpad_read(tp, (u32 *) ocir, off, len);
10781 		off += len;
10782 
10783 		if (ocir->signature != TG3_OCIR_SIG_MAGIC ||
10784 		    !(ocir->version_flags & TG3_OCIR_FLAG_ACTIVE))
10785 			memset(ocir, 0, TG3_OCIR_LEN);
10786 	}
10787 }
10788 
10789 /* sysfs attributes for hwmon */
10790 static ssize_t tg3_show_temp(struct device *dev,
10791 			     struct device_attribute *devattr, char *buf)
10792 {
10793 	struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
10794 	struct tg3 *tp = dev_get_drvdata(dev);
10795 	u32 temperature;
10796 
10797 	spin_lock_bh(&tp->lock);
10798 	tg3_ape_scratchpad_read(tp, &temperature, attr->index,
10799 				sizeof(temperature));
10800 	spin_unlock_bh(&tp->lock);
10801 	return sprintf(buf, "%u\n", temperature * 1000);
10802 }
10803 
10804 
10805 static SENSOR_DEVICE_ATTR(temp1_input, 0444, tg3_show_temp, NULL,
10806 			  TG3_TEMP_SENSOR_OFFSET);
10807 static SENSOR_DEVICE_ATTR(temp1_crit, 0444, tg3_show_temp, NULL,
10808 			  TG3_TEMP_CAUTION_OFFSET);
10809 static SENSOR_DEVICE_ATTR(temp1_max, 0444, tg3_show_temp, NULL,
10810 			  TG3_TEMP_MAX_OFFSET);
10811 
10812 static struct attribute *tg3_attrs[] = {
10813 	&sensor_dev_attr_temp1_input.dev_attr.attr,
10814 	&sensor_dev_attr_temp1_crit.dev_attr.attr,
10815 	&sensor_dev_attr_temp1_max.dev_attr.attr,
10816 	NULL
10817 };
10818 ATTRIBUTE_GROUPS(tg3);
10819 
10820 static void tg3_hwmon_close(struct tg3 *tp)
10821 {
10822 	if (tp->hwmon_dev) {
10823 		hwmon_device_unregister(tp->hwmon_dev);
10824 		tp->hwmon_dev = NULL;
10825 	}
10826 }
10827 
10828 static void tg3_hwmon_open(struct tg3 *tp)
10829 {
10830 	int i;
10831 	u32 size = 0;
10832 	struct pci_dev *pdev = tp->pdev;
10833 	struct tg3_ocir ocirs[TG3_SD_NUM_RECS];
10834 
10835 	tg3_sd_scan_scratchpad(tp, ocirs);
10836 
10837 	for (i = 0; i < TG3_SD_NUM_RECS; i++) {
10838 		if (!ocirs[i].src_data_length)
10839 			continue;
10840 
10841 		size += ocirs[i].src_hdr_length;
10842 		size += ocirs[i].src_data_length;
10843 	}
10844 
10845 	if (!size)
10846 		return;
10847 
10848 	tp->hwmon_dev = hwmon_device_register_with_groups(&pdev->dev, "tg3",
10849 							  tp, tg3_groups);
10850 	if (IS_ERR(tp->hwmon_dev)) {
10851 		tp->hwmon_dev = NULL;
10852 		dev_err(&pdev->dev, "Cannot register hwmon device, aborting\n");
10853 	}
10854 }
10855 #else
10856 static inline void tg3_hwmon_close(struct tg3 *tp) { }
10857 static inline void tg3_hwmon_open(struct tg3 *tp) { }
10858 #endif /* CONFIG_TIGON3_HWMON */
10859 
10860 
10861 #define TG3_STAT_ADD32(PSTAT, REG) \
10862 do {	u32 __val = tr32(REG); \
10863 	(PSTAT)->low += __val; \
10864 	if ((PSTAT)->low < __val) \
10865 		(PSTAT)->high += 1; \
10866 } while (0)
10867 
10868 static void tg3_periodic_fetch_stats(struct tg3 *tp)
10869 {
10870 	struct tg3_hw_stats *sp = tp->hw_stats;
10871 
10872 	if (!tp->link_up)
10873 		return;
10874 
10875 	TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
10876 	TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
10877 	TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
10878 	TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
10879 	TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
10880 	TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
10881 	TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
10882 	TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
10883 	TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
10884 	TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
10885 	TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
10886 	TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
10887 	TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
10888 	if (unlikely(tg3_flag(tp, 5719_5720_RDMA_BUG) &&
10889 		     (sp->tx_ucast_packets.low + sp->tx_mcast_packets.low +
10890 		      sp->tx_bcast_packets.low) > TG3_NUM_RDMA_CHANNELS)) {
10891 		u32 val;
10892 
10893 		val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
10894 		val &= ~tg3_lso_rd_dma_workaround_bit(tp);
10895 		tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
10896 		tg3_flag_clear(tp, 5719_5720_RDMA_BUG);
10897 	}
10898 
10899 	TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
10900 	TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
10901 	TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
10902 	TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
10903 	TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
10904 	TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
10905 	TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
10906 	TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
10907 	TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
10908 	TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
10909 	TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
10910 	TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
10911 	TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
10912 	TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
10913 
10914 	TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
10915 	if (tg3_asic_rev(tp) != ASIC_REV_5717 &&
10916 	    tg3_asic_rev(tp) != ASIC_REV_5762 &&
10917 	    tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0 &&
10918 	    tg3_chip_rev_id(tp) != CHIPREV_ID_5720_A0) {
10919 		TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
10920 	} else {
10921 		u32 val = tr32(HOSTCC_FLOW_ATTN);
10922 		val = (val & HOSTCC_FLOW_ATTN_MBUF_LWM) ? 1 : 0;
10923 		if (val) {
10924 			tw32(HOSTCC_FLOW_ATTN, HOSTCC_FLOW_ATTN_MBUF_LWM);
10925 			sp->rx_discards.low += val;
10926 			if (sp->rx_discards.low < val)
10927 				sp->rx_discards.high += 1;
10928 		}
10929 		sp->mbuf_lwm_thresh_hit = sp->rx_discards;
10930 	}
10931 	TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
10932 }
10933 
10934 static void tg3_chk_missed_msi(struct tg3 *tp)
10935 {
10936 	u32 i;
10937 
10938 	for (i = 0; i < tp->irq_cnt; i++) {
10939 		struct tg3_napi *tnapi = &tp->napi[i];
10940 
10941 		if (tg3_has_work(tnapi)) {
10942 			if (tnapi->last_rx_cons == tnapi->rx_rcb_ptr &&
10943 			    tnapi->last_tx_cons == tnapi->tx_cons) {
10944 				if (tnapi->chk_msi_cnt < 1) {
10945 					tnapi->chk_msi_cnt++;
10946 					return;
10947 				}
10948 				tg3_msi(0, tnapi);
10949 			}
10950 		}
10951 		tnapi->chk_msi_cnt = 0;
10952 		tnapi->last_rx_cons = tnapi->rx_rcb_ptr;
10953 		tnapi->last_tx_cons = tnapi->tx_cons;
10954 	}
10955 }
10956 
10957 static void tg3_timer(struct timer_list *t)
10958 {
10959 	struct tg3 *tp = from_timer(tp, t, timer);
10960 
10961 	spin_lock(&tp->lock);
10962 
10963 	if (tp->irq_sync || tg3_flag(tp, RESET_TASK_PENDING)) {
10964 		spin_unlock(&tp->lock);
10965 		goto restart_timer;
10966 	}
10967 
10968 	if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
10969 	    tg3_flag(tp, 57765_CLASS))
10970 		tg3_chk_missed_msi(tp);
10971 
10972 	if (tg3_flag(tp, FLUSH_POSTED_WRITES)) {
10973 		/* BCM4785: Flush posted writes from GbE to host memory. */
10974 		tr32(HOSTCC_MODE);
10975 	}
10976 
10977 	if (!tg3_flag(tp, TAGGED_STATUS)) {
10978 		/* All of this garbage is because when using non-tagged
10979 		 * IRQ status the mailbox/status_block protocol the chip
10980 		 * uses with the cpu is race prone.
10981 		 */
10982 		if (tp->napi[0].hw_status->status & SD_STATUS_UPDATED) {
10983 			tw32(GRC_LOCAL_CTRL,
10984 			     tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
10985 		} else {
10986 			tw32(HOSTCC_MODE, tp->coalesce_mode |
10987 			     HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW);
10988 		}
10989 
10990 		if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
10991 			spin_unlock(&tp->lock);
10992 			tg3_reset_task_schedule(tp);
10993 			goto restart_timer;
10994 		}
10995 	}
10996 
10997 	/* This part only runs once per second. */
10998 	if (!--tp->timer_counter) {
10999 		if (tg3_flag(tp, 5705_PLUS))
11000 			tg3_periodic_fetch_stats(tp);
11001 
11002 		if (tp->setlpicnt && !--tp->setlpicnt)
11003 			tg3_phy_eee_enable(tp);
11004 
11005 		if (tg3_flag(tp, USE_LINKCHG_REG)) {
11006 			u32 mac_stat;
11007 			int phy_event;
11008 
11009 			mac_stat = tr32(MAC_STATUS);
11010 
11011 			phy_event = 0;
11012 			if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) {
11013 				if (mac_stat & MAC_STATUS_MI_INTERRUPT)
11014 					phy_event = 1;
11015 			} else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
11016 				phy_event = 1;
11017 
11018 			if (phy_event)
11019 				tg3_setup_phy(tp, false);
11020 		} else if (tg3_flag(tp, POLL_SERDES)) {
11021 			u32 mac_stat = tr32(MAC_STATUS);
11022 			int need_setup = 0;
11023 
11024 			if (tp->link_up &&
11025 			    (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
11026 				need_setup = 1;
11027 			}
11028 			if (!tp->link_up &&
11029 			    (mac_stat & (MAC_STATUS_PCS_SYNCED |
11030 					 MAC_STATUS_SIGNAL_DET))) {
11031 				need_setup = 1;
11032 			}
11033 			if (need_setup) {
11034 				if (!tp->serdes_counter) {
11035 					tw32_f(MAC_MODE,
11036 					     (tp->mac_mode &
11037 					      ~MAC_MODE_PORT_MODE_MASK));
11038 					udelay(40);
11039 					tw32_f(MAC_MODE, tp->mac_mode);
11040 					udelay(40);
11041 				}
11042 				tg3_setup_phy(tp, false);
11043 			}
11044 		} else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
11045 			   tg3_flag(tp, 5780_CLASS)) {
11046 			tg3_serdes_parallel_detect(tp);
11047 		} else if (tg3_flag(tp, POLL_CPMU_LINK)) {
11048 			u32 cpmu = tr32(TG3_CPMU_STATUS);
11049 			bool link_up = !((cpmu & TG3_CPMU_STATUS_LINK_MASK) ==
11050 					 TG3_CPMU_STATUS_LINK_MASK);
11051 
11052 			if (link_up != tp->link_up)
11053 				tg3_setup_phy(tp, false);
11054 		}
11055 
11056 		tp->timer_counter = tp->timer_multiplier;
11057 	}
11058 
11059 	/* Heartbeat is only sent once every 2 seconds.
11060 	 *
11061 	 * The heartbeat is to tell the ASF firmware that the host
11062 	 * driver is still alive.  In the event that the OS crashes,
11063 	 * ASF needs to reset the hardware to free up the FIFO space
11064 	 * that may be filled with rx packets destined for the host.
11065 	 * If the FIFO is full, ASF will no longer function properly.
11066 	 *
11067 	 * Unintended resets have been reported on real time kernels
11068 	 * where the timer doesn't run on time.  Netpoll will also have
11069 	 * same problem.
11070 	 *
11071 	 * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
11072 	 * to check the ring condition when the heartbeat is expiring
11073 	 * before doing the reset.  This will prevent most unintended
11074 	 * resets.
11075 	 */
11076 	if (!--tp->asf_counter) {
11077 		if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
11078 			tg3_wait_for_event_ack(tp);
11079 
11080 			tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
11081 				      FWCMD_NICDRV_ALIVE3);
11082 			tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
11083 			tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX,
11084 				      TG3_FW_UPDATE_TIMEOUT_SEC);
11085 
11086 			tg3_generate_fw_event(tp);
11087 		}
11088 		tp->asf_counter = tp->asf_multiplier;
11089 	}
11090 
11091 	/* Update the APE heartbeat every 5 seconds.*/
11092 	tg3_send_ape_heartbeat(tp, TG3_APE_HB_INTERVAL);
11093 
11094 	spin_unlock(&tp->lock);
11095 
11096 restart_timer:
11097 	tp->timer.expires = jiffies + tp->timer_offset;
11098 	add_timer(&tp->timer);
11099 }
11100 
11101 static void tg3_timer_init(struct tg3 *tp)
11102 {
11103 	if (tg3_flag(tp, TAGGED_STATUS) &&
11104 	    tg3_asic_rev(tp) != ASIC_REV_5717 &&
11105 	    !tg3_flag(tp, 57765_CLASS))
11106 		tp->timer_offset = HZ;
11107 	else
11108 		tp->timer_offset = HZ / 10;
11109 
11110 	BUG_ON(tp->timer_offset > HZ);
11111 
11112 	tp->timer_multiplier = (HZ / tp->timer_offset);
11113 	tp->asf_multiplier = (HZ / tp->timer_offset) *
11114 			     TG3_FW_UPDATE_FREQ_SEC;
11115 
11116 	timer_setup(&tp->timer, tg3_timer, 0);
11117 }
11118 
11119 static void tg3_timer_start(struct tg3 *tp)
11120 {
11121 	tp->asf_counter   = tp->asf_multiplier;
11122 	tp->timer_counter = tp->timer_multiplier;
11123 
11124 	tp->timer.expires = jiffies + tp->timer_offset;
11125 	add_timer(&tp->timer);
11126 }
11127 
11128 static void tg3_timer_stop(struct tg3 *tp)
11129 {
11130 	del_timer_sync(&tp->timer);
11131 }
11132 
11133 /* Restart hardware after configuration changes, self-test, etc.
11134  * Invoked with tp->lock held.
11135  */
11136 static int tg3_restart_hw(struct tg3 *tp, bool reset_phy)
11137 	__releases(tp->lock)
11138 	__acquires(tp->lock)
11139 {
11140 	int err;
11141 
11142 	err = tg3_init_hw(tp, reset_phy);
11143 	if (err) {
11144 		netdev_err(tp->dev,
11145 			   "Failed to re-initialize device, aborting\n");
11146 		tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11147 		tg3_full_unlock(tp);
11148 		tg3_timer_stop(tp);
11149 		tp->irq_sync = 0;
11150 		tg3_napi_enable(tp);
11151 		dev_close(tp->dev);
11152 		tg3_full_lock(tp, 0);
11153 	}
11154 	return err;
11155 }
11156 
11157 static void tg3_reset_task(struct work_struct *work)
11158 {
11159 	struct tg3 *tp = container_of(work, struct tg3, reset_task);
11160 	int err;
11161 
11162 	rtnl_lock();
11163 	tg3_full_lock(tp, 0);
11164 
11165 	if (!netif_running(tp->dev)) {
11166 		tg3_flag_clear(tp, RESET_TASK_PENDING);
11167 		tg3_full_unlock(tp);
11168 		rtnl_unlock();
11169 		return;
11170 	}
11171 
11172 	tg3_full_unlock(tp);
11173 
11174 	tg3_phy_stop(tp);
11175 
11176 	tg3_netif_stop(tp);
11177 
11178 	tg3_full_lock(tp, 1);
11179 
11180 	if (tg3_flag(tp, TX_RECOVERY_PENDING)) {
11181 		tp->write32_tx_mbox = tg3_write32_tx_mbox;
11182 		tp->write32_rx_mbox = tg3_write_flush_reg32;
11183 		tg3_flag_set(tp, MBOX_WRITE_REORDER);
11184 		tg3_flag_clear(tp, TX_RECOVERY_PENDING);
11185 	}
11186 
11187 	tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
11188 	err = tg3_init_hw(tp, true);
11189 	if (err)
11190 		goto out;
11191 
11192 	tg3_netif_start(tp);
11193 
11194 out:
11195 	tg3_full_unlock(tp);
11196 
11197 	if (!err)
11198 		tg3_phy_start(tp);
11199 
11200 	tg3_flag_clear(tp, RESET_TASK_PENDING);
11201 	rtnl_unlock();
11202 }
11203 
11204 static int tg3_request_irq(struct tg3 *tp, int irq_num)
11205 {
11206 	irq_handler_t fn;
11207 	unsigned long flags;
11208 	char *name;
11209 	struct tg3_napi *tnapi = &tp->napi[irq_num];
11210 
11211 	if (tp->irq_cnt == 1)
11212 		name = tp->dev->name;
11213 	else {
11214 		name = &tnapi->irq_lbl[0];
11215 		if (tnapi->tx_buffers && tnapi->rx_rcb)
11216 			snprintf(name, IFNAMSIZ,
11217 				 "%s-txrx-%d", tp->dev->name, irq_num);
11218 		else if (tnapi->tx_buffers)
11219 			snprintf(name, IFNAMSIZ,
11220 				 "%s-tx-%d", tp->dev->name, irq_num);
11221 		else if (tnapi->rx_rcb)
11222 			snprintf(name, IFNAMSIZ,
11223 				 "%s-rx-%d", tp->dev->name, irq_num);
11224 		else
11225 			snprintf(name, IFNAMSIZ,
11226 				 "%s-%d", tp->dev->name, irq_num);
11227 		name[IFNAMSIZ-1] = 0;
11228 	}
11229 
11230 	if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
11231 		fn = tg3_msi;
11232 		if (tg3_flag(tp, 1SHOT_MSI))
11233 			fn = tg3_msi_1shot;
11234 		flags = 0;
11235 	} else {
11236 		fn = tg3_interrupt;
11237 		if (tg3_flag(tp, TAGGED_STATUS))
11238 			fn = tg3_interrupt_tagged;
11239 		flags = IRQF_SHARED;
11240 	}
11241 
11242 	return request_irq(tnapi->irq_vec, fn, flags, name, tnapi);
11243 }
11244 
11245 static int tg3_test_interrupt(struct tg3 *tp)
11246 {
11247 	struct tg3_napi *tnapi = &tp->napi[0];
11248 	struct net_device *dev = tp->dev;
11249 	int err, i, intr_ok = 0;
11250 	u32 val;
11251 
11252 	if (!netif_running(dev))
11253 		return -ENODEV;
11254 
11255 	tg3_disable_ints(tp);
11256 
11257 	free_irq(tnapi->irq_vec, tnapi);
11258 
11259 	/*
11260 	 * Turn off MSI one shot mode.  Otherwise this test has no
11261 	 * observable way to know whether the interrupt was delivered.
11262 	 */
11263 	if (tg3_flag(tp, 57765_PLUS)) {
11264 		val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE;
11265 		tw32(MSGINT_MODE, val);
11266 	}
11267 
11268 	err = request_irq(tnapi->irq_vec, tg3_test_isr,
11269 			  IRQF_SHARED, dev->name, tnapi);
11270 	if (err)
11271 		return err;
11272 
11273 	tnapi->hw_status->status &= ~SD_STATUS_UPDATED;
11274 	tg3_enable_ints(tp);
11275 
11276 	tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
11277 	       tnapi->coal_now);
11278 
11279 	for (i = 0; i < 5; i++) {
11280 		u32 int_mbox, misc_host_ctrl;
11281 
11282 		int_mbox = tr32_mailbox(tnapi->int_mbox);
11283 		misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
11284 
11285 		if ((int_mbox != 0) ||
11286 		    (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
11287 			intr_ok = 1;
11288 			break;
11289 		}
11290 
11291 		if (tg3_flag(tp, 57765_PLUS) &&
11292 		    tnapi->hw_status->status_tag != tnapi->last_tag)
11293 			tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
11294 
11295 		msleep(10);
11296 	}
11297 
11298 	tg3_disable_ints(tp);
11299 
11300 	free_irq(tnapi->irq_vec, tnapi);
11301 
11302 	err = tg3_request_irq(tp, 0);
11303 
11304 	if (err)
11305 		return err;
11306 
11307 	if (intr_ok) {
11308 		/* Reenable MSI one shot mode. */
11309 		if (tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, 1SHOT_MSI)) {
11310 			val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE;
11311 			tw32(MSGINT_MODE, val);
11312 		}
11313 		return 0;
11314 	}
11315 
11316 	return -EIO;
11317 }
11318 
11319 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
11320  * successfully restored
11321  */
11322 static int tg3_test_msi(struct tg3 *tp)
11323 {
11324 	int err;
11325 	u16 pci_cmd;
11326 
11327 	if (!tg3_flag(tp, USING_MSI))
11328 		return 0;
11329 
11330 	/* Turn off SERR reporting in case MSI terminates with Master
11331 	 * Abort.
11332 	 */
11333 	pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
11334 	pci_write_config_word(tp->pdev, PCI_COMMAND,
11335 			      pci_cmd & ~PCI_COMMAND_SERR);
11336 
11337 	err = tg3_test_interrupt(tp);
11338 
11339 	pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
11340 
11341 	if (!err)
11342 		return 0;
11343 
11344 	/* other failures */
11345 	if (err != -EIO)
11346 		return err;
11347 
11348 	/* MSI test failed, go back to INTx mode */
11349 	netdev_warn(tp->dev, "No interrupt was generated using MSI. Switching "
11350 		    "to INTx mode. Please report this failure to the PCI "
11351 		    "maintainer and include system chipset information\n");
11352 
11353 	free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
11354 
11355 	pci_disable_msi(tp->pdev);
11356 
11357 	tg3_flag_clear(tp, USING_MSI);
11358 	tp->napi[0].irq_vec = tp->pdev->irq;
11359 
11360 	err = tg3_request_irq(tp, 0);
11361 	if (err)
11362 		return err;
11363 
11364 	/* Need to reset the chip because the MSI cycle may have terminated
11365 	 * with Master Abort.
11366 	 */
11367 	tg3_full_lock(tp, 1);
11368 
11369 	tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11370 	err = tg3_init_hw(tp, true);
11371 
11372 	tg3_full_unlock(tp);
11373 
11374 	if (err)
11375 		free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
11376 
11377 	return err;
11378 }
11379 
11380 static int tg3_request_firmware(struct tg3 *tp)
11381 {
11382 	const struct tg3_firmware_hdr *fw_hdr;
11383 
11384 	if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) {
11385 		netdev_err(tp->dev, "Failed to load firmware \"%s\"\n",
11386 			   tp->fw_needed);
11387 		return -ENOENT;
11388 	}
11389 
11390 	fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
11391 
11392 	/* Firmware blob starts with version numbers, followed by
11393 	 * start address and _full_ length including BSS sections
11394 	 * (which must be longer than the actual data, of course
11395 	 */
11396 
11397 	tp->fw_len = be32_to_cpu(fw_hdr->len);	/* includes bss */
11398 	if (tp->fw_len < (tp->fw->size - TG3_FW_HDR_LEN)) {
11399 		netdev_err(tp->dev, "bogus length %d in \"%s\"\n",
11400 			   tp->fw_len, tp->fw_needed);
11401 		release_firmware(tp->fw);
11402 		tp->fw = NULL;
11403 		return -EINVAL;
11404 	}
11405 
11406 	/* We no longer need firmware; we have it. */
11407 	tp->fw_needed = NULL;
11408 	return 0;
11409 }
11410 
11411 static u32 tg3_irq_count(struct tg3 *tp)
11412 {
11413 	u32 irq_cnt = max(tp->rxq_cnt, tp->txq_cnt);
11414 
11415 	if (irq_cnt > 1) {
11416 		/* We want as many rx rings enabled as there are cpus.
11417 		 * In multiqueue MSI-X mode, the first MSI-X vector
11418 		 * only deals with link interrupts, etc, so we add
11419 		 * one to the number of vectors we are requesting.
11420 		 */
11421 		irq_cnt = min_t(unsigned, irq_cnt + 1, tp->irq_max);
11422 	}
11423 
11424 	return irq_cnt;
11425 }
11426 
11427 static bool tg3_enable_msix(struct tg3 *tp)
11428 {
11429 	int i, rc;
11430 	struct msix_entry msix_ent[TG3_IRQ_MAX_VECS];
11431 
11432 	tp->txq_cnt = tp->txq_req;
11433 	tp->rxq_cnt = tp->rxq_req;
11434 	if (!tp->rxq_cnt)
11435 		tp->rxq_cnt = netif_get_num_default_rss_queues();
11436 	if (tp->rxq_cnt > tp->rxq_max)
11437 		tp->rxq_cnt = tp->rxq_max;
11438 
11439 	/* Disable multiple TX rings by default.  Simple round-robin hardware
11440 	 * scheduling of the TX rings can cause starvation of rings with
11441 	 * small packets when other rings have TSO or jumbo packets.
11442 	 */
11443 	if (!tp->txq_req)
11444 		tp->txq_cnt = 1;
11445 
11446 	tp->irq_cnt = tg3_irq_count(tp);
11447 
11448 	for (i = 0; i < tp->irq_max; i++) {
11449 		msix_ent[i].entry  = i;
11450 		msix_ent[i].vector = 0;
11451 	}
11452 
11453 	rc = pci_enable_msix_range(tp->pdev, msix_ent, 1, tp->irq_cnt);
11454 	if (rc < 0) {
11455 		return false;
11456 	} else if (rc < tp->irq_cnt) {
11457 		netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n",
11458 			      tp->irq_cnt, rc);
11459 		tp->irq_cnt = rc;
11460 		tp->rxq_cnt = max(rc - 1, 1);
11461 		if (tp->txq_cnt)
11462 			tp->txq_cnt = min(tp->rxq_cnt, tp->txq_max);
11463 	}
11464 
11465 	for (i = 0; i < tp->irq_max; i++)
11466 		tp->napi[i].irq_vec = msix_ent[i].vector;
11467 
11468 	if (netif_set_real_num_rx_queues(tp->dev, tp->rxq_cnt)) {
11469 		pci_disable_msix(tp->pdev);
11470 		return false;
11471 	}
11472 
11473 	if (tp->irq_cnt == 1)
11474 		return true;
11475 
11476 	tg3_flag_set(tp, ENABLE_RSS);
11477 
11478 	if (tp->txq_cnt > 1)
11479 		tg3_flag_set(tp, ENABLE_TSS);
11480 
11481 	netif_set_real_num_tx_queues(tp->dev, tp->txq_cnt);
11482 
11483 	return true;
11484 }
11485 
11486 static void tg3_ints_init(struct tg3 *tp)
11487 {
11488 	if ((tg3_flag(tp, SUPPORT_MSI) || tg3_flag(tp, SUPPORT_MSIX)) &&
11489 	    !tg3_flag(tp, TAGGED_STATUS)) {
11490 		/* All MSI supporting chips should support tagged
11491 		 * status.  Assert that this is the case.
11492 		 */
11493 		netdev_warn(tp->dev,
11494 			    "MSI without TAGGED_STATUS? Not using MSI\n");
11495 		goto defcfg;
11496 	}
11497 
11498 	if (tg3_flag(tp, SUPPORT_MSIX) && tg3_enable_msix(tp))
11499 		tg3_flag_set(tp, USING_MSIX);
11500 	else if (tg3_flag(tp, SUPPORT_MSI) && pci_enable_msi(tp->pdev) == 0)
11501 		tg3_flag_set(tp, USING_MSI);
11502 
11503 	if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
11504 		u32 msi_mode = tr32(MSGINT_MODE);
11505 		if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1)
11506 			msi_mode |= MSGINT_MODE_MULTIVEC_EN;
11507 		if (!tg3_flag(tp, 1SHOT_MSI))
11508 			msi_mode |= MSGINT_MODE_ONE_SHOT_DISABLE;
11509 		tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
11510 	}
11511 defcfg:
11512 	if (!tg3_flag(tp, USING_MSIX)) {
11513 		tp->irq_cnt = 1;
11514 		tp->napi[0].irq_vec = tp->pdev->irq;
11515 	}
11516 
11517 	if (tp->irq_cnt == 1) {
11518 		tp->txq_cnt = 1;
11519 		tp->rxq_cnt = 1;
11520 		netif_set_real_num_tx_queues(tp->dev, 1);
11521 		netif_set_real_num_rx_queues(tp->dev, 1);
11522 	}
11523 }
11524 
11525 static void tg3_ints_fini(struct tg3 *tp)
11526 {
11527 	if (tg3_flag(tp, USING_MSIX))
11528 		pci_disable_msix(tp->pdev);
11529 	else if (tg3_flag(tp, USING_MSI))
11530 		pci_disable_msi(tp->pdev);
11531 	tg3_flag_clear(tp, USING_MSI);
11532 	tg3_flag_clear(tp, USING_MSIX);
11533 	tg3_flag_clear(tp, ENABLE_RSS);
11534 	tg3_flag_clear(tp, ENABLE_TSS);
11535 }
11536 
11537 static int tg3_start(struct tg3 *tp, bool reset_phy, bool test_irq,
11538 		     bool init)
11539 {
11540 	struct net_device *dev = tp->dev;
11541 	int i, err;
11542 
11543 	/*
11544 	 * Setup interrupts first so we know how
11545 	 * many NAPI resources to allocate
11546 	 */
11547 	tg3_ints_init(tp);
11548 
11549 	tg3_rss_check_indir_tbl(tp);
11550 
11551 	/* The placement of this call is tied
11552 	 * to the setup and use of Host TX descriptors.
11553 	 */
11554 	err = tg3_alloc_consistent(tp);
11555 	if (err)
11556 		goto out_ints_fini;
11557 
11558 	tg3_napi_init(tp);
11559 
11560 	tg3_napi_enable(tp);
11561 
11562 	for (i = 0; i < tp->irq_cnt; i++) {
11563 		err = tg3_request_irq(tp, i);
11564 		if (err) {
11565 			for (i--; i >= 0; i--) {
11566 				struct tg3_napi *tnapi = &tp->napi[i];
11567 
11568 				free_irq(tnapi->irq_vec, tnapi);
11569 			}
11570 			goto out_napi_fini;
11571 		}
11572 	}
11573 
11574 	tg3_full_lock(tp, 0);
11575 
11576 	if (init)
11577 		tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
11578 
11579 	err = tg3_init_hw(tp, reset_phy);
11580 	if (err) {
11581 		tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11582 		tg3_free_rings(tp);
11583 	}
11584 
11585 	tg3_full_unlock(tp);
11586 
11587 	if (err)
11588 		goto out_free_irq;
11589 
11590 	if (test_irq && tg3_flag(tp, USING_MSI)) {
11591 		err = tg3_test_msi(tp);
11592 
11593 		if (err) {
11594 			tg3_full_lock(tp, 0);
11595 			tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11596 			tg3_free_rings(tp);
11597 			tg3_full_unlock(tp);
11598 
11599 			goto out_napi_fini;
11600 		}
11601 
11602 		if (!tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
11603 			u32 val = tr32(PCIE_TRANSACTION_CFG);
11604 
11605 			tw32(PCIE_TRANSACTION_CFG,
11606 			     val | PCIE_TRANS_CFG_1SHOT_MSI);
11607 		}
11608 	}
11609 
11610 	tg3_phy_start(tp);
11611 
11612 	tg3_hwmon_open(tp);
11613 
11614 	tg3_full_lock(tp, 0);
11615 
11616 	tg3_timer_start(tp);
11617 	tg3_flag_set(tp, INIT_COMPLETE);
11618 	tg3_enable_ints(tp);
11619 
11620 	tg3_ptp_resume(tp);
11621 
11622 	tg3_full_unlock(tp);
11623 
11624 	netif_tx_start_all_queues(dev);
11625 
11626 	/*
11627 	 * Reset loopback feature if it was turned on while the device was down
11628 	 * make sure that it's installed properly now.
11629 	 */
11630 	if (dev->features & NETIF_F_LOOPBACK)
11631 		tg3_set_loopback(dev, dev->features);
11632 
11633 	return 0;
11634 
11635 out_free_irq:
11636 	for (i = tp->irq_cnt - 1; i >= 0; i--) {
11637 		struct tg3_napi *tnapi = &tp->napi[i];
11638 		free_irq(tnapi->irq_vec, tnapi);
11639 	}
11640 
11641 out_napi_fini:
11642 	tg3_napi_disable(tp);
11643 	tg3_napi_fini(tp);
11644 	tg3_free_consistent(tp);
11645 
11646 out_ints_fini:
11647 	tg3_ints_fini(tp);
11648 
11649 	return err;
11650 }
11651 
11652 static void tg3_stop(struct tg3 *tp)
11653 {
11654 	int i;
11655 
11656 	tg3_reset_task_cancel(tp);
11657 	tg3_netif_stop(tp);
11658 
11659 	tg3_timer_stop(tp);
11660 
11661 	tg3_hwmon_close(tp);
11662 
11663 	tg3_phy_stop(tp);
11664 
11665 	tg3_full_lock(tp, 1);
11666 
11667 	tg3_disable_ints(tp);
11668 
11669 	tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11670 	tg3_free_rings(tp);
11671 	tg3_flag_clear(tp, INIT_COMPLETE);
11672 
11673 	tg3_full_unlock(tp);
11674 
11675 	for (i = tp->irq_cnt - 1; i >= 0; i--) {
11676 		struct tg3_napi *tnapi = &tp->napi[i];
11677 		free_irq(tnapi->irq_vec, tnapi);
11678 	}
11679 
11680 	tg3_ints_fini(tp);
11681 
11682 	tg3_napi_fini(tp);
11683 
11684 	tg3_free_consistent(tp);
11685 }
11686 
11687 static int tg3_open(struct net_device *dev)
11688 {
11689 	struct tg3 *tp = netdev_priv(dev);
11690 	int err;
11691 
11692 	if (tp->pcierr_recovery) {
11693 		netdev_err(dev, "Failed to open device. PCI error recovery "
11694 			   "in progress\n");
11695 		return -EAGAIN;
11696 	}
11697 
11698 	if (tp->fw_needed) {
11699 		err = tg3_request_firmware(tp);
11700 		if (tg3_asic_rev(tp) == ASIC_REV_57766) {
11701 			if (err) {
11702 				netdev_warn(tp->dev, "EEE capability disabled\n");
11703 				tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
11704 			} else if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
11705 				netdev_warn(tp->dev, "EEE capability restored\n");
11706 				tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
11707 			}
11708 		} else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) {
11709 			if (err)
11710 				return err;
11711 		} else if (err) {
11712 			netdev_warn(tp->dev, "TSO capability disabled\n");
11713 			tg3_flag_clear(tp, TSO_CAPABLE);
11714 		} else if (!tg3_flag(tp, TSO_CAPABLE)) {
11715 			netdev_notice(tp->dev, "TSO capability restored\n");
11716 			tg3_flag_set(tp, TSO_CAPABLE);
11717 		}
11718 	}
11719 
11720 	tg3_carrier_off(tp);
11721 
11722 	err = tg3_power_up(tp);
11723 	if (err)
11724 		return err;
11725 
11726 	tg3_full_lock(tp, 0);
11727 
11728 	tg3_disable_ints(tp);
11729 	tg3_flag_clear(tp, INIT_COMPLETE);
11730 
11731 	tg3_full_unlock(tp);
11732 
11733 	err = tg3_start(tp,
11734 			!(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN),
11735 			true, true);
11736 	if (err) {
11737 		tg3_frob_aux_power(tp, false);
11738 		pci_set_power_state(tp->pdev, PCI_D3hot);
11739 	}
11740 
11741 	return err;
11742 }
11743 
11744 static int tg3_close(struct net_device *dev)
11745 {
11746 	struct tg3 *tp = netdev_priv(dev);
11747 
11748 	if (tp->pcierr_recovery) {
11749 		netdev_err(dev, "Failed to close device. PCI error recovery "
11750 			   "in progress\n");
11751 		return -EAGAIN;
11752 	}
11753 
11754 	tg3_stop(tp);
11755 
11756 	if (pci_device_is_present(tp->pdev)) {
11757 		tg3_power_down_prepare(tp);
11758 
11759 		tg3_carrier_off(tp);
11760 	}
11761 	return 0;
11762 }
11763 
11764 static inline u64 get_stat64(tg3_stat64_t *val)
11765 {
11766        return ((u64)val->high << 32) | ((u64)val->low);
11767 }
11768 
11769 static u64 tg3_calc_crc_errors(struct tg3 *tp)
11770 {
11771 	struct tg3_hw_stats *hw_stats = tp->hw_stats;
11772 
11773 	if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
11774 	    (tg3_asic_rev(tp) == ASIC_REV_5700 ||
11775 	     tg3_asic_rev(tp) == ASIC_REV_5701)) {
11776 		u32 val;
11777 
11778 		if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
11779 			tg3_writephy(tp, MII_TG3_TEST1,
11780 				     val | MII_TG3_TEST1_CRC_EN);
11781 			tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &val);
11782 		} else
11783 			val = 0;
11784 
11785 		tp->phy_crc_errors += val;
11786 
11787 		return tp->phy_crc_errors;
11788 	}
11789 
11790 	return get_stat64(&hw_stats->rx_fcs_errors);
11791 }
11792 
11793 #define ESTAT_ADD(member) \
11794 	estats->member =	old_estats->member + \
11795 				get_stat64(&hw_stats->member)
11796 
11797 static void tg3_get_estats(struct tg3 *tp, struct tg3_ethtool_stats *estats)
11798 {
11799 	struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
11800 	struct tg3_hw_stats *hw_stats = tp->hw_stats;
11801 
11802 	ESTAT_ADD(rx_octets);
11803 	ESTAT_ADD(rx_fragments);
11804 	ESTAT_ADD(rx_ucast_packets);
11805 	ESTAT_ADD(rx_mcast_packets);
11806 	ESTAT_ADD(rx_bcast_packets);
11807 	ESTAT_ADD(rx_fcs_errors);
11808 	ESTAT_ADD(rx_align_errors);
11809 	ESTAT_ADD(rx_xon_pause_rcvd);
11810 	ESTAT_ADD(rx_xoff_pause_rcvd);
11811 	ESTAT_ADD(rx_mac_ctrl_rcvd);
11812 	ESTAT_ADD(rx_xoff_entered);
11813 	ESTAT_ADD(rx_frame_too_long_errors);
11814 	ESTAT_ADD(rx_jabbers);
11815 	ESTAT_ADD(rx_undersize_packets);
11816 	ESTAT_ADD(rx_in_length_errors);
11817 	ESTAT_ADD(rx_out_length_errors);
11818 	ESTAT_ADD(rx_64_or_less_octet_packets);
11819 	ESTAT_ADD(rx_65_to_127_octet_packets);
11820 	ESTAT_ADD(rx_128_to_255_octet_packets);
11821 	ESTAT_ADD(rx_256_to_511_octet_packets);
11822 	ESTAT_ADD(rx_512_to_1023_octet_packets);
11823 	ESTAT_ADD(rx_1024_to_1522_octet_packets);
11824 	ESTAT_ADD(rx_1523_to_2047_octet_packets);
11825 	ESTAT_ADD(rx_2048_to_4095_octet_packets);
11826 	ESTAT_ADD(rx_4096_to_8191_octet_packets);
11827 	ESTAT_ADD(rx_8192_to_9022_octet_packets);
11828 
11829 	ESTAT_ADD(tx_octets);
11830 	ESTAT_ADD(tx_collisions);
11831 	ESTAT_ADD(tx_xon_sent);
11832 	ESTAT_ADD(tx_xoff_sent);
11833 	ESTAT_ADD(tx_flow_control);
11834 	ESTAT_ADD(tx_mac_errors);
11835 	ESTAT_ADD(tx_single_collisions);
11836 	ESTAT_ADD(tx_mult_collisions);
11837 	ESTAT_ADD(tx_deferred);
11838 	ESTAT_ADD(tx_excessive_collisions);
11839 	ESTAT_ADD(tx_late_collisions);
11840 	ESTAT_ADD(tx_collide_2times);
11841 	ESTAT_ADD(tx_collide_3times);
11842 	ESTAT_ADD(tx_collide_4times);
11843 	ESTAT_ADD(tx_collide_5times);
11844 	ESTAT_ADD(tx_collide_6times);
11845 	ESTAT_ADD(tx_collide_7times);
11846 	ESTAT_ADD(tx_collide_8times);
11847 	ESTAT_ADD(tx_collide_9times);
11848 	ESTAT_ADD(tx_collide_10times);
11849 	ESTAT_ADD(tx_collide_11times);
11850 	ESTAT_ADD(tx_collide_12times);
11851 	ESTAT_ADD(tx_collide_13times);
11852 	ESTAT_ADD(tx_collide_14times);
11853 	ESTAT_ADD(tx_collide_15times);
11854 	ESTAT_ADD(tx_ucast_packets);
11855 	ESTAT_ADD(tx_mcast_packets);
11856 	ESTAT_ADD(tx_bcast_packets);
11857 	ESTAT_ADD(tx_carrier_sense_errors);
11858 	ESTAT_ADD(tx_discards);
11859 	ESTAT_ADD(tx_errors);
11860 
11861 	ESTAT_ADD(dma_writeq_full);
11862 	ESTAT_ADD(dma_write_prioq_full);
11863 	ESTAT_ADD(rxbds_empty);
11864 	ESTAT_ADD(rx_discards);
11865 	ESTAT_ADD(rx_errors);
11866 	ESTAT_ADD(rx_threshold_hit);
11867 
11868 	ESTAT_ADD(dma_readq_full);
11869 	ESTAT_ADD(dma_read_prioq_full);
11870 	ESTAT_ADD(tx_comp_queue_full);
11871 
11872 	ESTAT_ADD(ring_set_send_prod_index);
11873 	ESTAT_ADD(ring_status_update);
11874 	ESTAT_ADD(nic_irqs);
11875 	ESTAT_ADD(nic_avoided_irqs);
11876 	ESTAT_ADD(nic_tx_threshold_hit);
11877 
11878 	ESTAT_ADD(mbuf_lwm_thresh_hit);
11879 }
11880 
11881 static void tg3_get_nstats(struct tg3 *tp, struct rtnl_link_stats64 *stats)
11882 {
11883 	struct rtnl_link_stats64 *old_stats = &tp->net_stats_prev;
11884 	struct tg3_hw_stats *hw_stats = tp->hw_stats;
11885 
11886 	stats->rx_packets = old_stats->rx_packets +
11887 		get_stat64(&hw_stats->rx_ucast_packets) +
11888 		get_stat64(&hw_stats->rx_mcast_packets) +
11889 		get_stat64(&hw_stats->rx_bcast_packets);
11890 
11891 	stats->tx_packets = old_stats->tx_packets +
11892 		get_stat64(&hw_stats->tx_ucast_packets) +
11893 		get_stat64(&hw_stats->tx_mcast_packets) +
11894 		get_stat64(&hw_stats->tx_bcast_packets);
11895 
11896 	stats->rx_bytes = old_stats->rx_bytes +
11897 		get_stat64(&hw_stats->rx_octets);
11898 	stats->tx_bytes = old_stats->tx_bytes +
11899 		get_stat64(&hw_stats->tx_octets);
11900 
11901 	stats->rx_errors = old_stats->rx_errors +
11902 		get_stat64(&hw_stats->rx_errors);
11903 	stats->tx_errors = old_stats->tx_errors +
11904 		get_stat64(&hw_stats->tx_errors) +
11905 		get_stat64(&hw_stats->tx_mac_errors) +
11906 		get_stat64(&hw_stats->tx_carrier_sense_errors) +
11907 		get_stat64(&hw_stats->tx_discards);
11908 
11909 	stats->multicast = old_stats->multicast +
11910 		get_stat64(&hw_stats->rx_mcast_packets);
11911 	stats->collisions = old_stats->collisions +
11912 		get_stat64(&hw_stats->tx_collisions);
11913 
11914 	stats->rx_length_errors = old_stats->rx_length_errors +
11915 		get_stat64(&hw_stats->rx_frame_too_long_errors) +
11916 		get_stat64(&hw_stats->rx_undersize_packets);
11917 
11918 	stats->rx_frame_errors = old_stats->rx_frame_errors +
11919 		get_stat64(&hw_stats->rx_align_errors);
11920 	stats->tx_aborted_errors = old_stats->tx_aborted_errors +
11921 		get_stat64(&hw_stats->tx_discards);
11922 	stats->tx_carrier_errors = old_stats->tx_carrier_errors +
11923 		get_stat64(&hw_stats->tx_carrier_sense_errors);
11924 
11925 	stats->rx_crc_errors = old_stats->rx_crc_errors +
11926 		tg3_calc_crc_errors(tp);
11927 
11928 	stats->rx_missed_errors = old_stats->rx_missed_errors +
11929 		get_stat64(&hw_stats->rx_discards);
11930 
11931 	stats->rx_dropped = tp->rx_dropped;
11932 	stats->tx_dropped = tp->tx_dropped;
11933 }
11934 
11935 static int tg3_get_regs_len(struct net_device *dev)
11936 {
11937 	return TG3_REG_BLK_SIZE;
11938 }
11939 
11940 static void tg3_get_regs(struct net_device *dev,
11941 		struct ethtool_regs *regs, void *_p)
11942 {
11943 	struct tg3 *tp = netdev_priv(dev);
11944 
11945 	regs->version = 0;
11946 
11947 	memset(_p, 0, TG3_REG_BLK_SIZE);
11948 
11949 	if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11950 		return;
11951 
11952 	tg3_full_lock(tp, 0);
11953 
11954 	tg3_dump_legacy_regs(tp, (u32 *)_p);
11955 
11956 	tg3_full_unlock(tp);
11957 }
11958 
11959 static int tg3_get_eeprom_len(struct net_device *dev)
11960 {
11961 	struct tg3 *tp = netdev_priv(dev);
11962 
11963 	return tp->nvram_size;
11964 }
11965 
11966 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
11967 {
11968 	struct tg3 *tp = netdev_priv(dev);
11969 	int ret, cpmu_restore = 0;
11970 	u8  *pd;
11971 	u32 i, offset, len, b_offset, b_count, cpmu_val = 0;
11972 	__be32 val;
11973 
11974 	if (tg3_flag(tp, NO_NVRAM))
11975 		return -EINVAL;
11976 
11977 	offset = eeprom->offset;
11978 	len = eeprom->len;
11979 	eeprom->len = 0;
11980 
11981 	eeprom->magic = TG3_EEPROM_MAGIC;
11982 
11983 	/* Override clock, link aware and link idle modes */
11984 	if (tg3_flag(tp, CPMU_PRESENT)) {
11985 		cpmu_val = tr32(TG3_CPMU_CTRL);
11986 		if (cpmu_val & (CPMU_CTRL_LINK_AWARE_MODE |
11987 				CPMU_CTRL_LINK_IDLE_MODE)) {
11988 			tw32(TG3_CPMU_CTRL, cpmu_val &
11989 					    ~(CPMU_CTRL_LINK_AWARE_MODE |
11990 					     CPMU_CTRL_LINK_IDLE_MODE));
11991 			cpmu_restore = 1;
11992 		}
11993 	}
11994 	tg3_override_clk(tp);
11995 
11996 	if (offset & 3) {
11997 		/* adjustments to start on required 4 byte boundary */
11998 		b_offset = offset & 3;
11999 		b_count = 4 - b_offset;
12000 		if (b_count > len) {
12001 			/* i.e. offset=1 len=2 */
12002 			b_count = len;
12003 		}
12004 		ret = tg3_nvram_read_be32(tp, offset-b_offset, &val);
12005 		if (ret)
12006 			goto eeprom_done;
12007 		memcpy(data, ((char *)&val) + b_offset, b_count);
12008 		len -= b_count;
12009 		offset += b_count;
12010 		eeprom->len += b_count;
12011 	}
12012 
12013 	/* read bytes up to the last 4 byte boundary */
12014 	pd = &data[eeprom->len];
12015 	for (i = 0; i < (len - (len & 3)); i += 4) {
12016 		ret = tg3_nvram_read_be32(tp, offset + i, &val);
12017 		if (ret) {
12018 			if (i)
12019 				i -= 4;
12020 			eeprom->len += i;
12021 			goto eeprom_done;
12022 		}
12023 		memcpy(pd + i, &val, 4);
12024 		if (need_resched()) {
12025 			if (signal_pending(current)) {
12026 				eeprom->len += i;
12027 				ret = -EINTR;
12028 				goto eeprom_done;
12029 			}
12030 			cond_resched();
12031 		}
12032 	}
12033 	eeprom->len += i;
12034 
12035 	if (len & 3) {
12036 		/* read last bytes not ending on 4 byte boundary */
12037 		pd = &data[eeprom->len];
12038 		b_count = len & 3;
12039 		b_offset = offset + len - b_count;
12040 		ret = tg3_nvram_read_be32(tp, b_offset, &val);
12041 		if (ret)
12042 			goto eeprom_done;
12043 		memcpy(pd, &val, b_count);
12044 		eeprom->len += b_count;
12045 	}
12046 	ret = 0;
12047 
12048 eeprom_done:
12049 	/* Restore clock, link aware and link idle modes */
12050 	tg3_restore_clk(tp);
12051 	if (cpmu_restore)
12052 		tw32(TG3_CPMU_CTRL, cpmu_val);
12053 
12054 	return ret;
12055 }
12056 
12057 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
12058 {
12059 	struct tg3 *tp = netdev_priv(dev);
12060 	int ret;
12061 	u32 offset, len, b_offset, odd_len;
12062 	u8 *buf;
12063 	__be32 start = 0, end;
12064 
12065 	if (tg3_flag(tp, NO_NVRAM) ||
12066 	    eeprom->magic != TG3_EEPROM_MAGIC)
12067 		return -EINVAL;
12068 
12069 	offset = eeprom->offset;
12070 	len = eeprom->len;
12071 
12072 	if ((b_offset = (offset & 3))) {
12073 		/* adjustments to start on required 4 byte boundary */
12074 		ret = tg3_nvram_read_be32(tp, offset-b_offset, &start);
12075 		if (ret)
12076 			return ret;
12077 		len += b_offset;
12078 		offset &= ~3;
12079 		if (len < 4)
12080 			len = 4;
12081 	}
12082 
12083 	odd_len = 0;
12084 	if (len & 3) {
12085 		/* adjustments to end on required 4 byte boundary */
12086 		odd_len = 1;
12087 		len = (len + 3) & ~3;
12088 		ret = tg3_nvram_read_be32(tp, offset+len-4, &end);
12089 		if (ret)
12090 			return ret;
12091 	}
12092 
12093 	buf = data;
12094 	if (b_offset || odd_len) {
12095 		buf = kmalloc(len, GFP_KERNEL);
12096 		if (!buf)
12097 			return -ENOMEM;
12098 		if (b_offset)
12099 			memcpy(buf, &start, 4);
12100 		if (odd_len)
12101 			memcpy(buf+len-4, &end, 4);
12102 		memcpy(buf + b_offset, data, eeprom->len);
12103 	}
12104 
12105 	ret = tg3_nvram_write_block(tp, offset, len, buf);
12106 
12107 	if (buf != data)
12108 		kfree(buf);
12109 
12110 	return ret;
12111 }
12112 
12113 static int tg3_get_link_ksettings(struct net_device *dev,
12114 				  struct ethtool_link_ksettings *cmd)
12115 {
12116 	struct tg3 *tp = netdev_priv(dev);
12117 	u32 supported, advertising;
12118 
12119 	if (tg3_flag(tp, USE_PHYLIB)) {
12120 		struct phy_device *phydev;
12121 		if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
12122 			return -EAGAIN;
12123 		phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
12124 		phy_ethtool_ksettings_get(phydev, cmd);
12125 
12126 		return 0;
12127 	}
12128 
12129 	supported = (SUPPORTED_Autoneg);
12130 
12131 	if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
12132 		supported |= (SUPPORTED_1000baseT_Half |
12133 			      SUPPORTED_1000baseT_Full);
12134 
12135 	if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
12136 		supported |= (SUPPORTED_100baseT_Half |
12137 			      SUPPORTED_100baseT_Full |
12138 			      SUPPORTED_10baseT_Half |
12139 			      SUPPORTED_10baseT_Full |
12140 			      SUPPORTED_TP);
12141 		cmd->base.port = PORT_TP;
12142 	} else {
12143 		supported |= SUPPORTED_FIBRE;
12144 		cmd->base.port = PORT_FIBRE;
12145 	}
12146 	ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
12147 						supported);
12148 
12149 	advertising = tp->link_config.advertising;
12150 	if (tg3_flag(tp, PAUSE_AUTONEG)) {
12151 		if (tp->link_config.flowctrl & FLOW_CTRL_RX) {
12152 			if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
12153 				advertising |= ADVERTISED_Pause;
12154 			} else {
12155 				advertising |= ADVERTISED_Pause |
12156 					ADVERTISED_Asym_Pause;
12157 			}
12158 		} else if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
12159 			advertising |= ADVERTISED_Asym_Pause;
12160 		}
12161 	}
12162 	ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
12163 						advertising);
12164 
12165 	if (netif_running(dev) && tp->link_up) {
12166 		cmd->base.speed = tp->link_config.active_speed;
12167 		cmd->base.duplex = tp->link_config.active_duplex;
12168 		ethtool_convert_legacy_u32_to_link_mode(
12169 			cmd->link_modes.lp_advertising,
12170 			tp->link_config.rmt_adv);
12171 
12172 		if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
12173 			if (tp->phy_flags & TG3_PHYFLG_MDIX_STATE)
12174 				cmd->base.eth_tp_mdix = ETH_TP_MDI_X;
12175 			else
12176 				cmd->base.eth_tp_mdix = ETH_TP_MDI;
12177 		}
12178 	} else {
12179 		cmd->base.speed = SPEED_UNKNOWN;
12180 		cmd->base.duplex = DUPLEX_UNKNOWN;
12181 		cmd->base.eth_tp_mdix = ETH_TP_MDI_INVALID;
12182 	}
12183 	cmd->base.phy_address = tp->phy_addr;
12184 	cmd->base.autoneg = tp->link_config.autoneg;
12185 	return 0;
12186 }
12187 
12188 static int tg3_set_link_ksettings(struct net_device *dev,
12189 				  const struct ethtool_link_ksettings *cmd)
12190 {
12191 	struct tg3 *tp = netdev_priv(dev);
12192 	u32 speed = cmd->base.speed;
12193 	u32 advertising;
12194 
12195 	if (tg3_flag(tp, USE_PHYLIB)) {
12196 		struct phy_device *phydev;
12197 		if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
12198 			return -EAGAIN;
12199 		phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
12200 		return phy_ethtool_ksettings_set(phydev, cmd);
12201 	}
12202 
12203 	if (cmd->base.autoneg != AUTONEG_ENABLE &&
12204 	    cmd->base.autoneg != AUTONEG_DISABLE)
12205 		return -EINVAL;
12206 
12207 	if (cmd->base.autoneg == AUTONEG_DISABLE &&
12208 	    cmd->base.duplex != DUPLEX_FULL &&
12209 	    cmd->base.duplex != DUPLEX_HALF)
12210 		return -EINVAL;
12211 
12212 	ethtool_convert_link_mode_to_legacy_u32(&advertising,
12213 						cmd->link_modes.advertising);
12214 
12215 	if (cmd->base.autoneg == AUTONEG_ENABLE) {
12216 		u32 mask = ADVERTISED_Autoneg |
12217 			   ADVERTISED_Pause |
12218 			   ADVERTISED_Asym_Pause;
12219 
12220 		if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
12221 			mask |= ADVERTISED_1000baseT_Half |
12222 				ADVERTISED_1000baseT_Full;
12223 
12224 		if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
12225 			mask |= ADVERTISED_100baseT_Half |
12226 				ADVERTISED_100baseT_Full |
12227 				ADVERTISED_10baseT_Half |
12228 				ADVERTISED_10baseT_Full |
12229 				ADVERTISED_TP;
12230 		else
12231 			mask |= ADVERTISED_FIBRE;
12232 
12233 		if (advertising & ~mask)
12234 			return -EINVAL;
12235 
12236 		mask &= (ADVERTISED_1000baseT_Half |
12237 			 ADVERTISED_1000baseT_Full |
12238 			 ADVERTISED_100baseT_Half |
12239 			 ADVERTISED_100baseT_Full |
12240 			 ADVERTISED_10baseT_Half |
12241 			 ADVERTISED_10baseT_Full);
12242 
12243 		advertising &= mask;
12244 	} else {
12245 		if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) {
12246 			if (speed != SPEED_1000)
12247 				return -EINVAL;
12248 
12249 			if (cmd->base.duplex != DUPLEX_FULL)
12250 				return -EINVAL;
12251 		} else {
12252 			if (speed != SPEED_100 &&
12253 			    speed != SPEED_10)
12254 				return -EINVAL;
12255 		}
12256 	}
12257 
12258 	tg3_full_lock(tp, 0);
12259 
12260 	tp->link_config.autoneg = cmd->base.autoneg;
12261 	if (cmd->base.autoneg == AUTONEG_ENABLE) {
12262 		tp->link_config.advertising = (advertising |
12263 					      ADVERTISED_Autoneg);
12264 		tp->link_config.speed = SPEED_UNKNOWN;
12265 		tp->link_config.duplex = DUPLEX_UNKNOWN;
12266 	} else {
12267 		tp->link_config.advertising = 0;
12268 		tp->link_config.speed = speed;
12269 		tp->link_config.duplex = cmd->base.duplex;
12270 	}
12271 
12272 	tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
12273 
12274 	tg3_warn_mgmt_link_flap(tp);
12275 
12276 	if (netif_running(dev))
12277 		tg3_setup_phy(tp, true);
12278 
12279 	tg3_full_unlock(tp);
12280 
12281 	return 0;
12282 }
12283 
12284 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
12285 {
12286 	struct tg3 *tp = netdev_priv(dev);
12287 
12288 	strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
12289 	strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
12290 	strlcpy(info->fw_version, tp->fw_ver, sizeof(info->fw_version));
12291 	strlcpy(info->bus_info, pci_name(tp->pdev), sizeof(info->bus_info));
12292 }
12293 
12294 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
12295 {
12296 	struct tg3 *tp = netdev_priv(dev);
12297 
12298 	if (tg3_flag(tp, WOL_CAP) && device_can_wakeup(&tp->pdev->dev))
12299 		wol->supported = WAKE_MAGIC;
12300 	else
12301 		wol->supported = 0;
12302 	wol->wolopts = 0;
12303 	if (tg3_flag(tp, WOL_ENABLE) && device_can_wakeup(&tp->pdev->dev))
12304 		wol->wolopts = WAKE_MAGIC;
12305 	memset(&wol->sopass, 0, sizeof(wol->sopass));
12306 }
12307 
12308 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
12309 {
12310 	struct tg3 *tp = netdev_priv(dev);
12311 	struct device *dp = &tp->pdev->dev;
12312 
12313 	if (wol->wolopts & ~WAKE_MAGIC)
12314 		return -EINVAL;
12315 	if ((wol->wolopts & WAKE_MAGIC) &&
12316 	    !(tg3_flag(tp, WOL_CAP) && device_can_wakeup(dp)))
12317 		return -EINVAL;
12318 
12319 	device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC);
12320 
12321 	if (device_may_wakeup(dp))
12322 		tg3_flag_set(tp, WOL_ENABLE);
12323 	else
12324 		tg3_flag_clear(tp, WOL_ENABLE);
12325 
12326 	return 0;
12327 }
12328 
12329 static u32 tg3_get_msglevel(struct net_device *dev)
12330 {
12331 	struct tg3 *tp = netdev_priv(dev);
12332 	return tp->msg_enable;
12333 }
12334 
12335 static void tg3_set_msglevel(struct net_device *dev, u32 value)
12336 {
12337 	struct tg3 *tp = netdev_priv(dev);
12338 	tp->msg_enable = value;
12339 }
12340 
12341 static int tg3_nway_reset(struct net_device *dev)
12342 {
12343 	struct tg3 *tp = netdev_priv(dev);
12344 	int r;
12345 
12346 	if (!netif_running(dev))
12347 		return -EAGAIN;
12348 
12349 	if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
12350 		return -EINVAL;
12351 
12352 	tg3_warn_mgmt_link_flap(tp);
12353 
12354 	if (tg3_flag(tp, USE_PHYLIB)) {
12355 		if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
12356 			return -EAGAIN;
12357 		r = phy_start_aneg(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr));
12358 	} else {
12359 		u32 bmcr;
12360 
12361 		spin_lock_bh(&tp->lock);
12362 		r = -EINVAL;
12363 		tg3_readphy(tp, MII_BMCR, &bmcr);
12364 		if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
12365 		    ((bmcr & BMCR_ANENABLE) ||
12366 		     (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT))) {
12367 			tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
12368 						   BMCR_ANENABLE);
12369 			r = 0;
12370 		}
12371 		spin_unlock_bh(&tp->lock);
12372 	}
12373 
12374 	return r;
12375 }
12376 
12377 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
12378 {
12379 	struct tg3 *tp = netdev_priv(dev);
12380 
12381 	ering->rx_max_pending = tp->rx_std_ring_mask;
12382 	if (tg3_flag(tp, JUMBO_RING_ENABLE))
12383 		ering->rx_jumbo_max_pending = tp->rx_jmb_ring_mask;
12384 	else
12385 		ering->rx_jumbo_max_pending = 0;
12386 
12387 	ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
12388 
12389 	ering->rx_pending = tp->rx_pending;
12390 	if (tg3_flag(tp, JUMBO_RING_ENABLE))
12391 		ering->rx_jumbo_pending = tp->rx_jumbo_pending;
12392 	else
12393 		ering->rx_jumbo_pending = 0;
12394 
12395 	ering->tx_pending = tp->napi[0].tx_pending;
12396 }
12397 
12398 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
12399 {
12400 	struct tg3 *tp = netdev_priv(dev);
12401 	int i, irq_sync = 0, err = 0;
12402 
12403 	if ((ering->rx_pending > tp->rx_std_ring_mask) ||
12404 	    (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) ||
12405 	    (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
12406 	    (ering->tx_pending <= MAX_SKB_FRAGS) ||
12407 	    (tg3_flag(tp, TSO_BUG) &&
12408 	     (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
12409 		return -EINVAL;
12410 
12411 	if (netif_running(dev)) {
12412 		tg3_phy_stop(tp);
12413 		tg3_netif_stop(tp);
12414 		irq_sync = 1;
12415 	}
12416 
12417 	tg3_full_lock(tp, irq_sync);
12418 
12419 	tp->rx_pending = ering->rx_pending;
12420 
12421 	if (tg3_flag(tp, MAX_RXPEND_64) &&
12422 	    tp->rx_pending > 63)
12423 		tp->rx_pending = 63;
12424 
12425 	if (tg3_flag(tp, JUMBO_RING_ENABLE))
12426 		tp->rx_jumbo_pending = ering->rx_jumbo_pending;
12427 
12428 	for (i = 0; i < tp->irq_max; i++)
12429 		tp->napi[i].tx_pending = ering->tx_pending;
12430 
12431 	if (netif_running(dev)) {
12432 		tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12433 		err = tg3_restart_hw(tp, false);
12434 		if (!err)
12435 			tg3_netif_start(tp);
12436 	}
12437 
12438 	tg3_full_unlock(tp);
12439 
12440 	if (irq_sync && !err)
12441 		tg3_phy_start(tp);
12442 
12443 	return err;
12444 }
12445 
12446 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
12447 {
12448 	struct tg3 *tp = netdev_priv(dev);
12449 
12450 	epause->autoneg = !!tg3_flag(tp, PAUSE_AUTONEG);
12451 
12452 	if (tp->link_config.flowctrl & FLOW_CTRL_RX)
12453 		epause->rx_pause = 1;
12454 	else
12455 		epause->rx_pause = 0;
12456 
12457 	if (tp->link_config.flowctrl & FLOW_CTRL_TX)
12458 		epause->tx_pause = 1;
12459 	else
12460 		epause->tx_pause = 0;
12461 }
12462 
12463 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
12464 {
12465 	struct tg3 *tp = netdev_priv(dev);
12466 	int err = 0;
12467 
12468 	if (tp->link_config.autoneg == AUTONEG_ENABLE)
12469 		tg3_warn_mgmt_link_flap(tp);
12470 
12471 	if (tg3_flag(tp, USE_PHYLIB)) {
12472 		u32 newadv;
12473 		struct phy_device *phydev;
12474 
12475 		phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
12476 
12477 		if (!(phydev->supported & SUPPORTED_Pause) ||
12478 		    (!(phydev->supported & SUPPORTED_Asym_Pause) &&
12479 		     (epause->rx_pause != epause->tx_pause)))
12480 			return -EINVAL;
12481 
12482 		tp->link_config.flowctrl = 0;
12483 		if (epause->rx_pause) {
12484 			tp->link_config.flowctrl |= FLOW_CTRL_RX;
12485 
12486 			if (epause->tx_pause) {
12487 				tp->link_config.flowctrl |= FLOW_CTRL_TX;
12488 				newadv = ADVERTISED_Pause;
12489 			} else
12490 				newadv = ADVERTISED_Pause |
12491 					 ADVERTISED_Asym_Pause;
12492 		} else if (epause->tx_pause) {
12493 			tp->link_config.flowctrl |= FLOW_CTRL_TX;
12494 			newadv = ADVERTISED_Asym_Pause;
12495 		} else
12496 			newadv = 0;
12497 
12498 		if (epause->autoneg)
12499 			tg3_flag_set(tp, PAUSE_AUTONEG);
12500 		else
12501 			tg3_flag_clear(tp, PAUSE_AUTONEG);
12502 
12503 		if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
12504 			u32 oldadv = phydev->advertising &
12505 				     (ADVERTISED_Pause | ADVERTISED_Asym_Pause);
12506 			if (oldadv != newadv) {
12507 				phydev->advertising &=
12508 					~(ADVERTISED_Pause |
12509 					  ADVERTISED_Asym_Pause);
12510 				phydev->advertising |= newadv;
12511 				if (phydev->autoneg) {
12512 					/*
12513 					 * Always renegotiate the link to
12514 					 * inform our link partner of our
12515 					 * flow control settings, even if the
12516 					 * flow control is forced.  Let
12517 					 * tg3_adjust_link() do the final
12518 					 * flow control setup.
12519 					 */
12520 					return phy_start_aneg(phydev);
12521 				}
12522 			}
12523 
12524 			if (!epause->autoneg)
12525 				tg3_setup_flow_control(tp, 0, 0);
12526 		} else {
12527 			tp->link_config.advertising &=
12528 					~(ADVERTISED_Pause |
12529 					  ADVERTISED_Asym_Pause);
12530 			tp->link_config.advertising |= newadv;
12531 		}
12532 	} else {
12533 		int irq_sync = 0;
12534 
12535 		if (netif_running(dev)) {
12536 			tg3_netif_stop(tp);
12537 			irq_sync = 1;
12538 		}
12539 
12540 		tg3_full_lock(tp, irq_sync);
12541 
12542 		if (epause->autoneg)
12543 			tg3_flag_set(tp, PAUSE_AUTONEG);
12544 		else
12545 			tg3_flag_clear(tp, PAUSE_AUTONEG);
12546 		if (epause->rx_pause)
12547 			tp->link_config.flowctrl |= FLOW_CTRL_RX;
12548 		else
12549 			tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
12550 		if (epause->tx_pause)
12551 			tp->link_config.flowctrl |= FLOW_CTRL_TX;
12552 		else
12553 			tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
12554 
12555 		if (netif_running(dev)) {
12556 			tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12557 			err = tg3_restart_hw(tp, false);
12558 			if (!err)
12559 				tg3_netif_start(tp);
12560 		}
12561 
12562 		tg3_full_unlock(tp);
12563 	}
12564 
12565 	tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
12566 
12567 	return err;
12568 }
12569 
12570 static int tg3_get_sset_count(struct net_device *dev, int sset)
12571 {
12572 	switch (sset) {
12573 	case ETH_SS_TEST:
12574 		return TG3_NUM_TEST;
12575 	case ETH_SS_STATS:
12576 		return TG3_NUM_STATS;
12577 	default:
12578 		return -EOPNOTSUPP;
12579 	}
12580 }
12581 
12582 static int tg3_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
12583 			 u32 *rules __always_unused)
12584 {
12585 	struct tg3 *tp = netdev_priv(dev);
12586 
12587 	if (!tg3_flag(tp, SUPPORT_MSIX))
12588 		return -EOPNOTSUPP;
12589 
12590 	switch (info->cmd) {
12591 	case ETHTOOL_GRXRINGS:
12592 		if (netif_running(tp->dev))
12593 			info->data = tp->rxq_cnt;
12594 		else {
12595 			info->data = num_online_cpus();
12596 			if (info->data > TG3_RSS_MAX_NUM_QS)
12597 				info->data = TG3_RSS_MAX_NUM_QS;
12598 		}
12599 
12600 		return 0;
12601 
12602 	default:
12603 		return -EOPNOTSUPP;
12604 	}
12605 }
12606 
12607 static u32 tg3_get_rxfh_indir_size(struct net_device *dev)
12608 {
12609 	u32 size = 0;
12610 	struct tg3 *tp = netdev_priv(dev);
12611 
12612 	if (tg3_flag(tp, SUPPORT_MSIX))
12613 		size = TG3_RSS_INDIR_TBL_SIZE;
12614 
12615 	return size;
12616 }
12617 
12618 static int tg3_get_rxfh(struct net_device *dev, u32 *indir, u8 *key, u8 *hfunc)
12619 {
12620 	struct tg3 *tp = netdev_priv(dev);
12621 	int i;
12622 
12623 	if (hfunc)
12624 		*hfunc = ETH_RSS_HASH_TOP;
12625 	if (!indir)
12626 		return 0;
12627 
12628 	for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
12629 		indir[i] = tp->rss_ind_tbl[i];
12630 
12631 	return 0;
12632 }
12633 
12634 static int tg3_set_rxfh(struct net_device *dev, const u32 *indir, const u8 *key,
12635 			const u8 hfunc)
12636 {
12637 	struct tg3 *tp = netdev_priv(dev);
12638 	size_t i;
12639 
12640 	/* We require at least one supported parameter to be changed and no
12641 	 * change in any of the unsupported parameters
12642 	 */
12643 	if (key ||
12644 	    (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP))
12645 		return -EOPNOTSUPP;
12646 
12647 	if (!indir)
12648 		return 0;
12649 
12650 	for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
12651 		tp->rss_ind_tbl[i] = indir[i];
12652 
12653 	if (!netif_running(dev) || !tg3_flag(tp, ENABLE_RSS))
12654 		return 0;
12655 
12656 	/* It is legal to write the indirection
12657 	 * table while the device is running.
12658 	 */
12659 	tg3_full_lock(tp, 0);
12660 	tg3_rss_write_indir_tbl(tp);
12661 	tg3_full_unlock(tp);
12662 
12663 	return 0;
12664 }
12665 
12666 static void tg3_get_channels(struct net_device *dev,
12667 			     struct ethtool_channels *channel)
12668 {
12669 	struct tg3 *tp = netdev_priv(dev);
12670 	u32 deflt_qs = netif_get_num_default_rss_queues();
12671 
12672 	channel->max_rx = tp->rxq_max;
12673 	channel->max_tx = tp->txq_max;
12674 
12675 	if (netif_running(dev)) {
12676 		channel->rx_count = tp->rxq_cnt;
12677 		channel->tx_count = tp->txq_cnt;
12678 	} else {
12679 		if (tp->rxq_req)
12680 			channel->rx_count = tp->rxq_req;
12681 		else
12682 			channel->rx_count = min(deflt_qs, tp->rxq_max);
12683 
12684 		if (tp->txq_req)
12685 			channel->tx_count = tp->txq_req;
12686 		else
12687 			channel->tx_count = min(deflt_qs, tp->txq_max);
12688 	}
12689 }
12690 
12691 static int tg3_set_channels(struct net_device *dev,
12692 			    struct ethtool_channels *channel)
12693 {
12694 	struct tg3 *tp = netdev_priv(dev);
12695 
12696 	if (!tg3_flag(tp, SUPPORT_MSIX))
12697 		return -EOPNOTSUPP;
12698 
12699 	if (channel->rx_count > tp->rxq_max ||
12700 	    channel->tx_count > tp->txq_max)
12701 		return -EINVAL;
12702 
12703 	tp->rxq_req = channel->rx_count;
12704 	tp->txq_req = channel->tx_count;
12705 
12706 	if (!netif_running(dev))
12707 		return 0;
12708 
12709 	tg3_stop(tp);
12710 
12711 	tg3_carrier_off(tp);
12712 
12713 	tg3_start(tp, true, false, false);
12714 
12715 	return 0;
12716 }
12717 
12718 static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
12719 {
12720 	switch (stringset) {
12721 	case ETH_SS_STATS:
12722 		memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
12723 		break;
12724 	case ETH_SS_TEST:
12725 		memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
12726 		break;
12727 	default:
12728 		WARN_ON(1);	/* we need a WARN() */
12729 		break;
12730 	}
12731 }
12732 
12733 static int tg3_set_phys_id(struct net_device *dev,
12734 			    enum ethtool_phys_id_state state)
12735 {
12736 	struct tg3 *tp = netdev_priv(dev);
12737 
12738 	if (!netif_running(tp->dev))
12739 		return -EAGAIN;
12740 
12741 	switch (state) {
12742 	case ETHTOOL_ID_ACTIVE:
12743 		return 1;	/* cycle on/off once per second */
12744 
12745 	case ETHTOOL_ID_ON:
12746 		tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
12747 		     LED_CTRL_1000MBPS_ON |
12748 		     LED_CTRL_100MBPS_ON |
12749 		     LED_CTRL_10MBPS_ON |
12750 		     LED_CTRL_TRAFFIC_OVERRIDE |
12751 		     LED_CTRL_TRAFFIC_BLINK |
12752 		     LED_CTRL_TRAFFIC_LED);
12753 		break;
12754 
12755 	case ETHTOOL_ID_OFF:
12756 		tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
12757 		     LED_CTRL_TRAFFIC_OVERRIDE);
12758 		break;
12759 
12760 	case ETHTOOL_ID_INACTIVE:
12761 		tw32(MAC_LED_CTRL, tp->led_ctrl);
12762 		break;
12763 	}
12764 
12765 	return 0;
12766 }
12767 
12768 static void tg3_get_ethtool_stats(struct net_device *dev,
12769 				   struct ethtool_stats *estats, u64 *tmp_stats)
12770 {
12771 	struct tg3 *tp = netdev_priv(dev);
12772 
12773 	if (tp->hw_stats)
12774 		tg3_get_estats(tp, (struct tg3_ethtool_stats *)tmp_stats);
12775 	else
12776 		memset(tmp_stats, 0, sizeof(struct tg3_ethtool_stats));
12777 }
12778 
12779 static __be32 *tg3_vpd_readblock(struct tg3 *tp, u32 *vpdlen)
12780 {
12781 	int i;
12782 	__be32 *buf;
12783 	u32 offset = 0, len = 0;
12784 	u32 magic, val;
12785 
12786 	if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &magic))
12787 		return NULL;
12788 
12789 	if (magic == TG3_EEPROM_MAGIC) {
12790 		for (offset = TG3_NVM_DIR_START;
12791 		     offset < TG3_NVM_DIR_END;
12792 		     offset += TG3_NVM_DIRENT_SIZE) {
12793 			if (tg3_nvram_read(tp, offset, &val))
12794 				return NULL;
12795 
12796 			if ((val >> TG3_NVM_DIRTYPE_SHIFT) ==
12797 			    TG3_NVM_DIRTYPE_EXTVPD)
12798 				break;
12799 		}
12800 
12801 		if (offset != TG3_NVM_DIR_END) {
12802 			len = (val & TG3_NVM_DIRTYPE_LENMSK) * 4;
12803 			if (tg3_nvram_read(tp, offset + 4, &offset))
12804 				return NULL;
12805 
12806 			offset = tg3_nvram_logical_addr(tp, offset);
12807 		}
12808 	}
12809 
12810 	if (!offset || !len) {
12811 		offset = TG3_NVM_VPD_OFF;
12812 		len = TG3_NVM_VPD_LEN;
12813 	}
12814 
12815 	buf = kmalloc(len, GFP_KERNEL);
12816 	if (buf == NULL)
12817 		return NULL;
12818 
12819 	if (magic == TG3_EEPROM_MAGIC) {
12820 		for (i = 0; i < len; i += 4) {
12821 			/* The data is in little-endian format in NVRAM.
12822 			 * Use the big-endian read routines to preserve
12823 			 * the byte order as it exists in NVRAM.
12824 			 */
12825 			if (tg3_nvram_read_be32(tp, offset + i, &buf[i/4]))
12826 				goto error;
12827 		}
12828 	} else {
12829 		u8 *ptr;
12830 		ssize_t cnt;
12831 		unsigned int pos = 0;
12832 
12833 		ptr = (u8 *)&buf[0];
12834 		for (i = 0; pos < len && i < 3; i++, pos += cnt, ptr += cnt) {
12835 			cnt = pci_read_vpd(tp->pdev, pos,
12836 					   len - pos, ptr);
12837 			if (cnt == -ETIMEDOUT || cnt == -EINTR)
12838 				cnt = 0;
12839 			else if (cnt < 0)
12840 				goto error;
12841 		}
12842 		if (pos != len)
12843 			goto error;
12844 	}
12845 
12846 	*vpdlen = len;
12847 
12848 	return buf;
12849 
12850 error:
12851 	kfree(buf);
12852 	return NULL;
12853 }
12854 
12855 #define NVRAM_TEST_SIZE 0x100
12856 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE	0x14
12857 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE	0x18
12858 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE	0x1c
12859 #define NVRAM_SELFBOOT_FORMAT1_4_SIZE	0x20
12860 #define NVRAM_SELFBOOT_FORMAT1_5_SIZE	0x24
12861 #define NVRAM_SELFBOOT_FORMAT1_6_SIZE	0x50
12862 #define NVRAM_SELFBOOT_HW_SIZE 0x20
12863 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
12864 
12865 static int tg3_test_nvram(struct tg3 *tp)
12866 {
12867 	u32 csum, magic, len;
12868 	__be32 *buf;
12869 	int i, j, k, err = 0, size;
12870 
12871 	if (tg3_flag(tp, NO_NVRAM))
12872 		return 0;
12873 
12874 	if (tg3_nvram_read(tp, 0, &magic) != 0)
12875 		return -EIO;
12876 
12877 	if (magic == TG3_EEPROM_MAGIC)
12878 		size = NVRAM_TEST_SIZE;
12879 	else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
12880 		if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
12881 		    TG3_EEPROM_SB_FORMAT_1) {
12882 			switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
12883 			case TG3_EEPROM_SB_REVISION_0:
12884 				size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
12885 				break;
12886 			case TG3_EEPROM_SB_REVISION_2:
12887 				size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
12888 				break;
12889 			case TG3_EEPROM_SB_REVISION_3:
12890 				size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
12891 				break;
12892 			case TG3_EEPROM_SB_REVISION_4:
12893 				size = NVRAM_SELFBOOT_FORMAT1_4_SIZE;
12894 				break;
12895 			case TG3_EEPROM_SB_REVISION_5:
12896 				size = NVRAM_SELFBOOT_FORMAT1_5_SIZE;
12897 				break;
12898 			case TG3_EEPROM_SB_REVISION_6:
12899 				size = NVRAM_SELFBOOT_FORMAT1_6_SIZE;
12900 				break;
12901 			default:
12902 				return -EIO;
12903 			}
12904 		} else
12905 			return 0;
12906 	} else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
12907 		size = NVRAM_SELFBOOT_HW_SIZE;
12908 	else
12909 		return -EIO;
12910 
12911 	buf = kmalloc(size, GFP_KERNEL);
12912 	if (buf == NULL)
12913 		return -ENOMEM;
12914 
12915 	err = -EIO;
12916 	for (i = 0, j = 0; i < size; i += 4, j++) {
12917 		err = tg3_nvram_read_be32(tp, i, &buf[j]);
12918 		if (err)
12919 			break;
12920 	}
12921 	if (i < size)
12922 		goto out;
12923 
12924 	/* Selfboot format */
12925 	magic = be32_to_cpu(buf[0]);
12926 	if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
12927 	    TG3_EEPROM_MAGIC_FW) {
12928 		u8 *buf8 = (u8 *) buf, csum8 = 0;
12929 
12930 		if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
12931 		    TG3_EEPROM_SB_REVISION_2) {
12932 			/* For rev 2, the csum doesn't include the MBA. */
12933 			for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
12934 				csum8 += buf8[i];
12935 			for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
12936 				csum8 += buf8[i];
12937 		} else {
12938 			for (i = 0; i < size; i++)
12939 				csum8 += buf8[i];
12940 		}
12941 
12942 		if (csum8 == 0) {
12943 			err = 0;
12944 			goto out;
12945 		}
12946 
12947 		err = -EIO;
12948 		goto out;
12949 	}
12950 
12951 	if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
12952 	    TG3_EEPROM_MAGIC_HW) {
12953 		u8 data[NVRAM_SELFBOOT_DATA_SIZE];
12954 		u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
12955 		u8 *buf8 = (u8 *) buf;
12956 
12957 		/* Separate the parity bits and the data bytes.  */
12958 		for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
12959 			if ((i == 0) || (i == 8)) {
12960 				int l;
12961 				u8 msk;
12962 
12963 				for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
12964 					parity[k++] = buf8[i] & msk;
12965 				i++;
12966 			} else if (i == 16) {
12967 				int l;
12968 				u8 msk;
12969 
12970 				for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
12971 					parity[k++] = buf8[i] & msk;
12972 				i++;
12973 
12974 				for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
12975 					parity[k++] = buf8[i] & msk;
12976 				i++;
12977 			}
12978 			data[j++] = buf8[i];
12979 		}
12980 
12981 		err = -EIO;
12982 		for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
12983 			u8 hw8 = hweight8(data[i]);
12984 
12985 			if ((hw8 & 0x1) && parity[i])
12986 				goto out;
12987 			else if (!(hw8 & 0x1) && !parity[i])
12988 				goto out;
12989 		}
12990 		err = 0;
12991 		goto out;
12992 	}
12993 
12994 	err = -EIO;
12995 
12996 	/* Bootstrap checksum at offset 0x10 */
12997 	csum = calc_crc((unsigned char *) buf, 0x10);
12998 	if (csum != le32_to_cpu(buf[0x10/4]))
12999 		goto out;
13000 
13001 	/* Manufacturing block starts at offset 0x74, checksum at 0xfc */
13002 	csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
13003 	if (csum != le32_to_cpu(buf[0xfc/4]))
13004 		goto out;
13005 
13006 	kfree(buf);
13007 
13008 	buf = tg3_vpd_readblock(tp, &len);
13009 	if (!buf)
13010 		return -ENOMEM;
13011 
13012 	i = pci_vpd_find_tag((u8 *)buf, 0, len, PCI_VPD_LRDT_RO_DATA);
13013 	if (i > 0) {
13014 		j = pci_vpd_lrdt_size(&((u8 *)buf)[i]);
13015 		if (j < 0)
13016 			goto out;
13017 
13018 		if (i + PCI_VPD_LRDT_TAG_SIZE + j > len)
13019 			goto out;
13020 
13021 		i += PCI_VPD_LRDT_TAG_SIZE;
13022 		j = pci_vpd_find_info_keyword((u8 *)buf, i, j,
13023 					      PCI_VPD_RO_KEYWORD_CHKSUM);
13024 		if (j > 0) {
13025 			u8 csum8 = 0;
13026 
13027 			j += PCI_VPD_INFO_FLD_HDR_SIZE;
13028 
13029 			for (i = 0; i <= j; i++)
13030 				csum8 += ((u8 *)buf)[i];
13031 
13032 			if (csum8)
13033 				goto out;
13034 		}
13035 	}
13036 
13037 	err = 0;
13038 
13039 out:
13040 	kfree(buf);
13041 	return err;
13042 }
13043 
13044 #define TG3_SERDES_TIMEOUT_SEC	2
13045 #define TG3_COPPER_TIMEOUT_SEC	6
13046 
13047 static int tg3_test_link(struct tg3 *tp)
13048 {
13049 	int i, max;
13050 
13051 	if (!netif_running(tp->dev))
13052 		return -ENODEV;
13053 
13054 	if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
13055 		max = TG3_SERDES_TIMEOUT_SEC;
13056 	else
13057 		max = TG3_COPPER_TIMEOUT_SEC;
13058 
13059 	for (i = 0; i < max; i++) {
13060 		if (tp->link_up)
13061 			return 0;
13062 
13063 		if (msleep_interruptible(1000))
13064 			break;
13065 	}
13066 
13067 	return -EIO;
13068 }
13069 
13070 /* Only test the commonly used registers */
13071 static int tg3_test_registers(struct tg3 *tp)
13072 {
13073 	int i, is_5705, is_5750;
13074 	u32 offset, read_mask, write_mask, val, save_val, read_val;
13075 	static struct {
13076 		u16 offset;
13077 		u16 flags;
13078 #define TG3_FL_5705	0x1
13079 #define TG3_FL_NOT_5705	0x2
13080 #define TG3_FL_NOT_5788	0x4
13081 #define TG3_FL_NOT_5750	0x8
13082 		u32 read_mask;
13083 		u32 write_mask;
13084 	} reg_tbl[] = {
13085 		/* MAC Control Registers */
13086 		{ MAC_MODE, TG3_FL_NOT_5705,
13087 			0x00000000, 0x00ef6f8c },
13088 		{ MAC_MODE, TG3_FL_5705,
13089 			0x00000000, 0x01ef6b8c },
13090 		{ MAC_STATUS, TG3_FL_NOT_5705,
13091 			0x03800107, 0x00000000 },
13092 		{ MAC_STATUS, TG3_FL_5705,
13093 			0x03800100, 0x00000000 },
13094 		{ MAC_ADDR_0_HIGH, 0x0000,
13095 			0x00000000, 0x0000ffff },
13096 		{ MAC_ADDR_0_LOW, 0x0000,
13097 			0x00000000, 0xffffffff },
13098 		{ MAC_RX_MTU_SIZE, 0x0000,
13099 			0x00000000, 0x0000ffff },
13100 		{ MAC_TX_MODE, 0x0000,
13101 			0x00000000, 0x00000070 },
13102 		{ MAC_TX_LENGTHS, 0x0000,
13103 			0x00000000, 0x00003fff },
13104 		{ MAC_RX_MODE, TG3_FL_NOT_5705,
13105 			0x00000000, 0x000007fc },
13106 		{ MAC_RX_MODE, TG3_FL_5705,
13107 			0x00000000, 0x000007dc },
13108 		{ MAC_HASH_REG_0, 0x0000,
13109 			0x00000000, 0xffffffff },
13110 		{ MAC_HASH_REG_1, 0x0000,
13111 			0x00000000, 0xffffffff },
13112 		{ MAC_HASH_REG_2, 0x0000,
13113 			0x00000000, 0xffffffff },
13114 		{ MAC_HASH_REG_3, 0x0000,
13115 			0x00000000, 0xffffffff },
13116 
13117 		/* Receive Data and Receive BD Initiator Control Registers. */
13118 		{ RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
13119 			0x00000000, 0xffffffff },
13120 		{ RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
13121 			0x00000000, 0xffffffff },
13122 		{ RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
13123 			0x00000000, 0x00000003 },
13124 		{ RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
13125 			0x00000000, 0xffffffff },
13126 		{ RCVDBDI_STD_BD+0, 0x0000,
13127 			0x00000000, 0xffffffff },
13128 		{ RCVDBDI_STD_BD+4, 0x0000,
13129 			0x00000000, 0xffffffff },
13130 		{ RCVDBDI_STD_BD+8, 0x0000,
13131 			0x00000000, 0xffff0002 },
13132 		{ RCVDBDI_STD_BD+0xc, 0x0000,
13133 			0x00000000, 0xffffffff },
13134 
13135 		/* Receive BD Initiator Control Registers. */
13136 		{ RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
13137 			0x00000000, 0xffffffff },
13138 		{ RCVBDI_STD_THRESH, TG3_FL_5705,
13139 			0x00000000, 0x000003ff },
13140 		{ RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
13141 			0x00000000, 0xffffffff },
13142 
13143 		/* Host Coalescing Control Registers. */
13144 		{ HOSTCC_MODE, TG3_FL_NOT_5705,
13145 			0x00000000, 0x00000004 },
13146 		{ HOSTCC_MODE, TG3_FL_5705,
13147 			0x00000000, 0x000000f6 },
13148 		{ HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
13149 			0x00000000, 0xffffffff },
13150 		{ HOSTCC_RXCOL_TICKS, TG3_FL_5705,
13151 			0x00000000, 0x000003ff },
13152 		{ HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
13153 			0x00000000, 0xffffffff },
13154 		{ HOSTCC_TXCOL_TICKS, TG3_FL_5705,
13155 			0x00000000, 0x000003ff },
13156 		{ HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
13157 			0x00000000, 0xffffffff },
13158 		{ HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
13159 			0x00000000, 0x000000ff },
13160 		{ HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
13161 			0x00000000, 0xffffffff },
13162 		{ HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
13163 			0x00000000, 0x000000ff },
13164 		{ HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
13165 			0x00000000, 0xffffffff },
13166 		{ HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
13167 			0x00000000, 0xffffffff },
13168 		{ HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
13169 			0x00000000, 0xffffffff },
13170 		{ HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
13171 			0x00000000, 0x000000ff },
13172 		{ HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
13173 			0x00000000, 0xffffffff },
13174 		{ HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
13175 			0x00000000, 0x000000ff },
13176 		{ HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
13177 			0x00000000, 0xffffffff },
13178 		{ HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
13179 			0x00000000, 0xffffffff },
13180 		{ HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
13181 			0x00000000, 0xffffffff },
13182 		{ HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
13183 			0x00000000, 0xffffffff },
13184 		{ HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
13185 			0x00000000, 0xffffffff },
13186 		{ HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
13187 			0xffffffff, 0x00000000 },
13188 		{ HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
13189 			0xffffffff, 0x00000000 },
13190 
13191 		/* Buffer Manager Control Registers. */
13192 		{ BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
13193 			0x00000000, 0x007fff80 },
13194 		{ BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
13195 			0x00000000, 0x007fffff },
13196 		{ BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
13197 			0x00000000, 0x0000003f },
13198 		{ BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
13199 			0x00000000, 0x000001ff },
13200 		{ BUFMGR_MB_HIGH_WATER, 0x0000,
13201 			0x00000000, 0x000001ff },
13202 		{ BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
13203 			0xffffffff, 0x00000000 },
13204 		{ BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
13205 			0xffffffff, 0x00000000 },
13206 
13207 		/* Mailbox Registers */
13208 		{ GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
13209 			0x00000000, 0x000001ff },
13210 		{ GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
13211 			0x00000000, 0x000001ff },
13212 		{ GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
13213 			0x00000000, 0x000007ff },
13214 		{ GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
13215 			0x00000000, 0x000001ff },
13216 
13217 		{ 0xffff, 0x0000, 0x00000000, 0x00000000 },
13218 	};
13219 
13220 	is_5705 = is_5750 = 0;
13221 	if (tg3_flag(tp, 5705_PLUS)) {
13222 		is_5705 = 1;
13223 		if (tg3_flag(tp, 5750_PLUS))
13224 			is_5750 = 1;
13225 	}
13226 
13227 	for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
13228 		if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
13229 			continue;
13230 
13231 		if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
13232 			continue;
13233 
13234 		if (tg3_flag(tp, IS_5788) &&
13235 		    (reg_tbl[i].flags & TG3_FL_NOT_5788))
13236 			continue;
13237 
13238 		if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
13239 			continue;
13240 
13241 		offset = (u32) reg_tbl[i].offset;
13242 		read_mask = reg_tbl[i].read_mask;
13243 		write_mask = reg_tbl[i].write_mask;
13244 
13245 		/* Save the original register content */
13246 		save_val = tr32(offset);
13247 
13248 		/* Determine the read-only value. */
13249 		read_val = save_val & read_mask;
13250 
13251 		/* Write zero to the register, then make sure the read-only bits
13252 		 * are not changed and the read/write bits are all zeros.
13253 		 */
13254 		tw32(offset, 0);
13255 
13256 		val = tr32(offset);
13257 
13258 		/* Test the read-only and read/write bits. */
13259 		if (((val & read_mask) != read_val) || (val & write_mask))
13260 			goto out;
13261 
13262 		/* Write ones to all the bits defined by RdMask and WrMask, then
13263 		 * make sure the read-only bits are not changed and the
13264 		 * read/write bits are all ones.
13265 		 */
13266 		tw32(offset, read_mask | write_mask);
13267 
13268 		val = tr32(offset);
13269 
13270 		/* Test the read-only bits. */
13271 		if ((val & read_mask) != read_val)
13272 			goto out;
13273 
13274 		/* Test the read/write bits. */
13275 		if ((val & write_mask) != write_mask)
13276 			goto out;
13277 
13278 		tw32(offset, save_val);
13279 	}
13280 
13281 	return 0;
13282 
13283 out:
13284 	if (netif_msg_hw(tp))
13285 		netdev_err(tp->dev,
13286 			   "Register test failed at offset %x\n", offset);
13287 	tw32(offset, save_val);
13288 	return -EIO;
13289 }
13290 
13291 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
13292 {
13293 	static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
13294 	int i;
13295 	u32 j;
13296 
13297 	for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
13298 		for (j = 0; j < len; j += 4) {
13299 			u32 val;
13300 
13301 			tg3_write_mem(tp, offset + j, test_pattern[i]);
13302 			tg3_read_mem(tp, offset + j, &val);
13303 			if (val != test_pattern[i])
13304 				return -EIO;
13305 		}
13306 	}
13307 	return 0;
13308 }
13309 
13310 static int tg3_test_memory(struct tg3 *tp)
13311 {
13312 	static struct mem_entry {
13313 		u32 offset;
13314 		u32 len;
13315 	} mem_tbl_570x[] = {
13316 		{ 0x00000000, 0x00b50},
13317 		{ 0x00002000, 0x1c000},
13318 		{ 0xffffffff, 0x00000}
13319 	}, mem_tbl_5705[] = {
13320 		{ 0x00000100, 0x0000c},
13321 		{ 0x00000200, 0x00008},
13322 		{ 0x00004000, 0x00800},
13323 		{ 0x00006000, 0x01000},
13324 		{ 0x00008000, 0x02000},
13325 		{ 0x00010000, 0x0e000},
13326 		{ 0xffffffff, 0x00000}
13327 	}, mem_tbl_5755[] = {
13328 		{ 0x00000200, 0x00008},
13329 		{ 0x00004000, 0x00800},
13330 		{ 0x00006000, 0x00800},
13331 		{ 0x00008000, 0x02000},
13332 		{ 0x00010000, 0x0c000},
13333 		{ 0xffffffff, 0x00000}
13334 	}, mem_tbl_5906[] = {
13335 		{ 0x00000200, 0x00008},
13336 		{ 0x00004000, 0x00400},
13337 		{ 0x00006000, 0x00400},
13338 		{ 0x00008000, 0x01000},
13339 		{ 0x00010000, 0x01000},
13340 		{ 0xffffffff, 0x00000}
13341 	}, mem_tbl_5717[] = {
13342 		{ 0x00000200, 0x00008},
13343 		{ 0x00010000, 0x0a000},
13344 		{ 0x00020000, 0x13c00},
13345 		{ 0xffffffff, 0x00000}
13346 	}, mem_tbl_57765[] = {
13347 		{ 0x00000200, 0x00008},
13348 		{ 0x00004000, 0x00800},
13349 		{ 0x00006000, 0x09800},
13350 		{ 0x00010000, 0x0a000},
13351 		{ 0xffffffff, 0x00000}
13352 	};
13353 	struct mem_entry *mem_tbl;
13354 	int err = 0;
13355 	int i;
13356 
13357 	if (tg3_flag(tp, 5717_PLUS))
13358 		mem_tbl = mem_tbl_5717;
13359 	else if (tg3_flag(tp, 57765_CLASS) ||
13360 		 tg3_asic_rev(tp) == ASIC_REV_5762)
13361 		mem_tbl = mem_tbl_57765;
13362 	else if (tg3_flag(tp, 5755_PLUS))
13363 		mem_tbl = mem_tbl_5755;
13364 	else if (tg3_asic_rev(tp) == ASIC_REV_5906)
13365 		mem_tbl = mem_tbl_5906;
13366 	else if (tg3_flag(tp, 5705_PLUS))
13367 		mem_tbl = mem_tbl_5705;
13368 	else
13369 		mem_tbl = mem_tbl_570x;
13370 
13371 	for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
13372 		err = tg3_do_mem_test(tp, mem_tbl[i].offset, mem_tbl[i].len);
13373 		if (err)
13374 			break;
13375 	}
13376 
13377 	return err;
13378 }
13379 
13380 #define TG3_TSO_MSS		500
13381 
13382 #define TG3_TSO_IP_HDR_LEN	20
13383 #define TG3_TSO_TCP_HDR_LEN	20
13384 #define TG3_TSO_TCP_OPT_LEN	12
13385 
13386 static const u8 tg3_tso_header[] = {
13387 0x08, 0x00,
13388 0x45, 0x00, 0x00, 0x00,
13389 0x00, 0x00, 0x40, 0x00,
13390 0x40, 0x06, 0x00, 0x00,
13391 0x0a, 0x00, 0x00, 0x01,
13392 0x0a, 0x00, 0x00, 0x02,
13393 0x0d, 0x00, 0xe0, 0x00,
13394 0x00, 0x00, 0x01, 0x00,
13395 0x00, 0x00, 0x02, 0x00,
13396 0x80, 0x10, 0x10, 0x00,
13397 0x14, 0x09, 0x00, 0x00,
13398 0x01, 0x01, 0x08, 0x0a,
13399 0x11, 0x11, 0x11, 0x11,
13400 0x11, 0x11, 0x11, 0x11,
13401 };
13402 
13403 static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, bool tso_loopback)
13404 {
13405 	u32 rx_start_idx, rx_idx, tx_idx, opaque_key;
13406 	u32 base_flags = 0, mss = 0, desc_idx, coal_now, data_off, val;
13407 	u32 budget;
13408 	struct sk_buff *skb;
13409 	u8 *tx_data, *rx_data;
13410 	dma_addr_t map;
13411 	int num_pkts, tx_len, rx_len, i, err;
13412 	struct tg3_rx_buffer_desc *desc;
13413 	struct tg3_napi *tnapi, *rnapi;
13414 	struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
13415 
13416 	tnapi = &tp->napi[0];
13417 	rnapi = &tp->napi[0];
13418 	if (tp->irq_cnt > 1) {
13419 		if (tg3_flag(tp, ENABLE_RSS))
13420 			rnapi = &tp->napi[1];
13421 		if (tg3_flag(tp, ENABLE_TSS))
13422 			tnapi = &tp->napi[1];
13423 	}
13424 	coal_now = tnapi->coal_now | rnapi->coal_now;
13425 
13426 	err = -EIO;
13427 
13428 	tx_len = pktsz;
13429 	skb = netdev_alloc_skb(tp->dev, tx_len);
13430 	if (!skb)
13431 		return -ENOMEM;
13432 
13433 	tx_data = skb_put(skb, tx_len);
13434 	memcpy(tx_data, tp->dev->dev_addr, ETH_ALEN);
13435 	memset(tx_data + ETH_ALEN, 0x0, 8);
13436 
13437 	tw32(MAC_RX_MTU_SIZE, tx_len + ETH_FCS_LEN);
13438 
13439 	if (tso_loopback) {
13440 		struct iphdr *iph = (struct iphdr *)&tx_data[ETH_HLEN];
13441 
13442 		u32 hdr_len = TG3_TSO_IP_HDR_LEN + TG3_TSO_TCP_HDR_LEN +
13443 			      TG3_TSO_TCP_OPT_LEN;
13444 
13445 		memcpy(tx_data + ETH_ALEN * 2, tg3_tso_header,
13446 		       sizeof(tg3_tso_header));
13447 		mss = TG3_TSO_MSS;
13448 
13449 		val = tx_len - ETH_ALEN * 2 - sizeof(tg3_tso_header);
13450 		num_pkts = DIV_ROUND_UP(val, TG3_TSO_MSS);
13451 
13452 		/* Set the total length field in the IP header */
13453 		iph->tot_len = htons((u16)(mss + hdr_len));
13454 
13455 		base_flags = (TXD_FLAG_CPU_PRE_DMA |
13456 			      TXD_FLAG_CPU_POST_DMA);
13457 
13458 		if (tg3_flag(tp, HW_TSO_1) ||
13459 		    tg3_flag(tp, HW_TSO_2) ||
13460 		    tg3_flag(tp, HW_TSO_3)) {
13461 			struct tcphdr *th;
13462 			val = ETH_HLEN + TG3_TSO_IP_HDR_LEN;
13463 			th = (struct tcphdr *)&tx_data[val];
13464 			th->check = 0;
13465 		} else
13466 			base_flags |= TXD_FLAG_TCPUDP_CSUM;
13467 
13468 		if (tg3_flag(tp, HW_TSO_3)) {
13469 			mss |= (hdr_len & 0xc) << 12;
13470 			if (hdr_len & 0x10)
13471 				base_flags |= 0x00000010;
13472 			base_flags |= (hdr_len & 0x3e0) << 5;
13473 		} else if (tg3_flag(tp, HW_TSO_2))
13474 			mss |= hdr_len << 9;
13475 		else if (tg3_flag(tp, HW_TSO_1) ||
13476 			 tg3_asic_rev(tp) == ASIC_REV_5705) {
13477 			mss |= (TG3_TSO_TCP_OPT_LEN << 9);
13478 		} else {
13479 			base_flags |= (TG3_TSO_TCP_OPT_LEN << 10);
13480 		}
13481 
13482 		data_off = ETH_ALEN * 2 + sizeof(tg3_tso_header);
13483 	} else {
13484 		num_pkts = 1;
13485 		data_off = ETH_HLEN;
13486 
13487 		if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
13488 		    tx_len > VLAN_ETH_FRAME_LEN)
13489 			base_flags |= TXD_FLAG_JMB_PKT;
13490 	}
13491 
13492 	for (i = data_off; i < tx_len; i++)
13493 		tx_data[i] = (u8) (i & 0xff);
13494 
13495 	map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
13496 	if (pci_dma_mapping_error(tp->pdev, map)) {
13497 		dev_kfree_skb(skb);
13498 		return -EIO;
13499 	}
13500 
13501 	val = tnapi->tx_prod;
13502 	tnapi->tx_buffers[val].skb = skb;
13503 	dma_unmap_addr_set(&tnapi->tx_buffers[val], mapping, map);
13504 
13505 	tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
13506 	       rnapi->coal_now);
13507 
13508 	udelay(10);
13509 
13510 	rx_start_idx = rnapi->hw_status->idx[0].rx_producer;
13511 
13512 	budget = tg3_tx_avail(tnapi);
13513 	if (tg3_tx_frag_set(tnapi, &val, &budget, map, tx_len,
13514 			    base_flags | TXD_FLAG_END, mss, 0)) {
13515 		tnapi->tx_buffers[val].skb = NULL;
13516 		dev_kfree_skb(skb);
13517 		return -EIO;
13518 	}
13519 
13520 	tnapi->tx_prod++;
13521 
13522 	/* Sync BD data before updating mailbox */
13523 	wmb();
13524 
13525 	tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod);
13526 	tr32_mailbox(tnapi->prodmbox);
13527 
13528 	udelay(10);
13529 
13530 	/* 350 usec to allow enough time on some 10/100 Mbps devices.  */
13531 	for (i = 0; i < 35; i++) {
13532 		tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
13533 		       coal_now);
13534 
13535 		udelay(10);
13536 
13537 		tx_idx = tnapi->hw_status->idx[0].tx_consumer;
13538 		rx_idx = rnapi->hw_status->idx[0].rx_producer;
13539 		if ((tx_idx == tnapi->tx_prod) &&
13540 		    (rx_idx == (rx_start_idx + num_pkts)))
13541 			break;
13542 	}
13543 
13544 	tg3_tx_skb_unmap(tnapi, tnapi->tx_prod - 1, -1);
13545 	dev_kfree_skb(skb);
13546 
13547 	if (tx_idx != tnapi->tx_prod)
13548 		goto out;
13549 
13550 	if (rx_idx != rx_start_idx + num_pkts)
13551 		goto out;
13552 
13553 	val = data_off;
13554 	while (rx_idx != rx_start_idx) {
13555 		desc = &rnapi->rx_rcb[rx_start_idx++];
13556 		desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
13557 		opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
13558 
13559 		if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
13560 		    (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
13561 			goto out;
13562 
13563 		rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT)
13564 			 - ETH_FCS_LEN;
13565 
13566 		if (!tso_loopback) {
13567 			if (rx_len != tx_len)
13568 				goto out;
13569 
13570 			if (pktsz <= TG3_RX_STD_DMA_SZ - ETH_FCS_LEN) {
13571 				if (opaque_key != RXD_OPAQUE_RING_STD)
13572 					goto out;
13573 			} else {
13574 				if (opaque_key != RXD_OPAQUE_RING_JUMBO)
13575 					goto out;
13576 			}
13577 		} else if ((desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
13578 			   (desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
13579 			    >> RXD_TCPCSUM_SHIFT != 0xffff) {
13580 			goto out;
13581 		}
13582 
13583 		if (opaque_key == RXD_OPAQUE_RING_STD) {
13584 			rx_data = tpr->rx_std_buffers[desc_idx].data;
13585 			map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx],
13586 					     mapping);
13587 		} else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
13588 			rx_data = tpr->rx_jmb_buffers[desc_idx].data;
13589 			map = dma_unmap_addr(&tpr->rx_jmb_buffers[desc_idx],
13590 					     mapping);
13591 		} else
13592 			goto out;
13593 
13594 		pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len,
13595 					    PCI_DMA_FROMDEVICE);
13596 
13597 		rx_data += TG3_RX_OFFSET(tp);
13598 		for (i = data_off; i < rx_len; i++, val++) {
13599 			if (*(rx_data + i) != (u8) (val & 0xff))
13600 				goto out;
13601 		}
13602 	}
13603 
13604 	err = 0;
13605 
13606 	/* tg3_free_rings will unmap and free the rx_data */
13607 out:
13608 	return err;
13609 }
13610 
13611 #define TG3_STD_LOOPBACK_FAILED		1
13612 #define TG3_JMB_LOOPBACK_FAILED		2
13613 #define TG3_TSO_LOOPBACK_FAILED		4
13614 #define TG3_LOOPBACK_FAILED \
13615 	(TG3_STD_LOOPBACK_FAILED | \
13616 	 TG3_JMB_LOOPBACK_FAILED | \
13617 	 TG3_TSO_LOOPBACK_FAILED)
13618 
13619 static int tg3_test_loopback(struct tg3 *tp, u64 *data, bool do_extlpbk)
13620 {
13621 	int err = -EIO;
13622 	u32 eee_cap;
13623 	u32 jmb_pkt_sz = 9000;
13624 
13625 	if (tp->dma_limit)
13626 		jmb_pkt_sz = tp->dma_limit - ETH_HLEN;
13627 
13628 	eee_cap = tp->phy_flags & TG3_PHYFLG_EEE_CAP;
13629 	tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
13630 
13631 	if (!netif_running(tp->dev)) {
13632 		data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13633 		data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13634 		if (do_extlpbk)
13635 			data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13636 		goto done;
13637 	}
13638 
13639 	err = tg3_reset_hw(tp, true);
13640 	if (err) {
13641 		data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13642 		data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13643 		if (do_extlpbk)
13644 			data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13645 		goto done;
13646 	}
13647 
13648 	if (tg3_flag(tp, ENABLE_RSS)) {
13649 		int i;
13650 
13651 		/* Reroute all rx packets to the 1st queue */
13652 		for (i = MAC_RSS_INDIR_TBL_0;
13653 		     i < MAC_RSS_INDIR_TBL_0 + TG3_RSS_INDIR_TBL_SIZE; i += 4)
13654 			tw32(i, 0x0);
13655 	}
13656 
13657 	/* HW errata - mac loopback fails in some cases on 5780.
13658 	 * Normal traffic and PHY loopback are not affected by
13659 	 * errata.  Also, the MAC loopback test is deprecated for
13660 	 * all newer ASIC revisions.
13661 	 */
13662 	if (tg3_asic_rev(tp) != ASIC_REV_5780 &&
13663 	    !tg3_flag(tp, CPMU_PRESENT)) {
13664 		tg3_mac_loopback(tp, true);
13665 
13666 		if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13667 			data[TG3_MAC_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED;
13668 
13669 		if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13670 		    tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13671 			data[TG3_MAC_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED;
13672 
13673 		tg3_mac_loopback(tp, false);
13674 	}
13675 
13676 	if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
13677 	    !tg3_flag(tp, USE_PHYLIB)) {
13678 		int i;
13679 
13680 		tg3_phy_lpbk_set(tp, 0, false);
13681 
13682 		/* Wait for link */
13683 		for (i = 0; i < 100; i++) {
13684 			if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
13685 				break;
13686 			mdelay(1);
13687 		}
13688 
13689 		if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13690 			data[TG3_PHY_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED;
13691 		if (tg3_flag(tp, TSO_CAPABLE) &&
13692 		    tg3_run_loopback(tp, ETH_FRAME_LEN, true))
13693 			data[TG3_PHY_LOOPB_TEST] |= TG3_TSO_LOOPBACK_FAILED;
13694 		if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13695 		    tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13696 			data[TG3_PHY_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED;
13697 
13698 		if (do_extlpbk) {
13699 			tg3_phy_lpbk_set(tp, 0, true);
13700 
13701 			/* All link indications report up, but the hardware
13702 			 * isn't really ready for about 20 msec.  Double it
13703 			 * to be sure.
13704 			 */
13705 			mdelay(40);
13706 
13707 			if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13708 				data[TG3_EXT_LOOPB_TEST] |=
13709 							TG3_STD_LOOPBACK_FAILED;
13710 			if (tg3_flag(tp, TSO_CAPABLE) &&
13711 			    tg3_run_loopback(tp, ETH_FRAME_LEN, true))
13712 				data[TG3_EXT_LOOPB_TEST] |=
13713 							TG3_TSO_LOOPBACK_FAILED;
13714 			if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13715 			    tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13716 				data[TG3_EXT_LOOPB_TEST] |=
13717 							TG3_JMB_LOOPBACK_FAILED;
13718 		}
13719 
13720 		/* Re-enable gphy autopowerdown. */
13721 		if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
13722 			tg3_phy_toggle_apd(tp, true);
13723 	}
13724 
13725 	err = (data[TG3_MAC_LOOPB_TEST] | data[TG3_PHY_LOOPB_TEST] |
13726 	       data[TG3_EXT_LOOPB_TEST]) ? -EIO : 0;
13727 
13728 done:
13729 	tp->phy_flags |= eee_cap;
13730 
13731 	return err;
13732 }
13733 
13734 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
13735 			  u64 *data)
13736 {
13737 	struct tg3 *tp = netdev_priv(dev);
13738 	bool doextlpbk = etest->flags & ETH_TEST_FL_EXTERNAL_LB;
13739 
13740 	if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
13741 		if (tg3_power_up(tp)) {
13742 			etest->flags |= ETH_TEST_FL_FAILED;
13743 			memset(data, 1, sizeof(u64) * TG3_NUM_TEST);
13744 			return;
13745 		}
13746 		tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
13747 	}
13748 
13749 	memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
13750 
13751 	if (tg3_test_nvram(tp) != 0) {
13752 		etest->flags |= ETH_TEST_FL_FAILED;
13753 		data[TG3_NVRAM_TEST] = 1;
13754 	}
13755 	if (!doextlpbk && tg3_test_link(tp)) {
13756 		etest->flags |= ETH_TEST_FL_FAILED;
13757 		data[TG3_LINK_TEST] = 1;
13758 	}
13759 	if (etest->flags & ETH_TEST_FL_OFFLINE) {
13760 		int err, err2 = 0, irq_sync = 0;
13761 
13762 		if (netif_running(dev)) {
13763 			tg3_phy_stop(tp);
13764 			tg3_netif_stop(tp);
13765 			irq_sync = 1;
13766 		}
13767 
13768 		tg3_full_lock(tp, irq_sync);
13769 		tg3_halt(tp, RESET_KIND_SUSPEND, 1);
13770 		err = tg3_nvram_lock(tp);
13771 		tg3_halt_cpu(tp, RX_CPU_BASE);
13772 		if (!tg3_flag(tp, 5705_PLUS))
13773 			tg3_halt_cpu(tp, TX_CPU_BASE);
13774 		if (!err)
13775 			tg3_nvram_unlock(tp);
13776 
13777 		if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
13778 			tg3_phy_reset(tp);
13779 
13780 		if (tg3_test_registers(tp) != 0) {
13781 			etest->flags |= ETH_TEST_FL_FAILED;
13782 			data[TG3_REGISTER_TEST] = 1;
13783 		}
13784 
13785 		if (tg3_test_memory(tp) != 0) {
13786 			etest->flags |= ETH_TEST_FL_FAILED;
13787 			data[TG3_MEMORY_TEST] = 1;
13788 		}
13789 
13790 		if (doextlpbk)
13791 			etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE;
13792 
13793 		if (tg3_test_loopback(tp, data, doextlpbk))
13794 			etest->flags |= ETH_TEST_FL_FAILED;
13795 
13796 		tg3_full_unlock(tp);
13797 
13798 		if (tg3_test_interrupt(tp) != 0) {
13799 			etest->flags |= ETH_TEST_FL_FAILED;
13800 			data[TG3_INTERRUPT_TEST] = 1;
13801 		}
13802 
13803 		tg3_full_lock(tp, 0);
13804 
13805 		tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
13806 		if (netif_running(dev)) {
13807 			tg3_flag_set(tp, INIT_COMPLETE);
13808 			err2 = tg3_restart_hw(tp, true);
13809 			if (!err2)
13810 				tg3_netif_start(tp);
13811 		}
13812 
13813 		tg3_full_unlock(tp);
13814 
13815 		if (irq_sync && !err2)
13816 			tg3_phy_start(tp);
13817 	}
13818 	if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
13819 		tg3_power_down_prepare(tp);
13820 
13821 }
13822 
13823 static int tg3_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
13824 {
13825 	struct tg3 *tp = netdev_priv(dev);
13826 	struct hwtstamp_config stmpconf;
13827 
13828 	if (!tg3_flag(tp, PTP_CAPABLE))
13829 		return -EOPNOTSUPP;
13830 
13831 	if (copy_from_user(&stmpconf, ifr->ifr_data, sizeof(stmpconf)))
13832 		return -EFAULT;
13833 
13834 	if (stmpconf.flags)
13835 		return -EINVAL;
13836 
13837 	if (stmpconf.tx_type != HWTSTAMP_TX_ON &&
13838 	    stmpconf.tx_type != HWTSTAMP_TX_OFF)
13839 		return -ERANGE;
13840 
13841 	switch (stmpconf.rx_filter) {
13842 	case HWTSTAMP_FILTER_NONE:
13843 		tp->rxptpctl = 0;
13844 		break;
13845 	case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
13846 		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13847 			       TG3_RX_PTP_CTL_ALL_V1_EVENTS;
13848 		break;
13849 	case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
13850 		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13851 			       TG3_RX_PTP_CTL_SYNC_EVNT;
13852 		break;
13853 	case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
13854 		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13855 			       TG3_RX_PTP_CTL_DELAY_REQ;
13856 		break;
13857 	case HWTSTAMP_FILTER_PTP_V2_EVENT:
13858 		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13859 			       TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13860 		break;
13861 	case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
13862 		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13863 			       TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13864 		break;
13865 	case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
13866 		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13867 			       TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13868 		break;
13869 	case HWTSTAMP_FILTER_PTP_V2_SYNC:
13870 		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13871 			       TG3_RX_PTP_CTL_SYNC_EVNT;
13872 		break;
13873 	case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
13874 		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13875 			       TG3_RX_PTP_CTL_SYNC_EVNT;
13876 		break;
13877 	case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
13878 		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13879 			       TG3_RX_PTP_CTL_SYNC_EVNT;
13880 		break;
13881 	case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
13882 		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13883 			       TG3_RX_PTP_CTL_DELAY_REQ;
13884 		break;
13885 	case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
13886 		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13887 			       TG3_RX_PTP_CTL_DELAY_REQ;
13888 		break;
13889 	case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
13890 		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13891 			       TG3_RX_PTP_CTL_DELAY_REQ;
13892 		break;
13893 	default:
13894 		return -ERANGE;
13895 	}
13896 
13897 	if (netif_running(dev) && tp->rxptpctl)
13898 		tw32(TG3_RX_PTP_CTL,
13899 		     tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK);
13900 
13901 	if (stmpconf.tx_type == HWTSTAMP_TX_ON)
13902 		tg3_flag_set(tp, TX_TSTAMP_EN);
13903 	else
13904 		tg3_flag_clear(tp, TX_TSTAMP_EN);
13905 
13906 	return copy_to_user(ifr->ifr_data, &stmpconf, sizeof(stmpconf)) ?
13907 		-EFAULT : 0;
13908 }
13909 
13910 static int tg3_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
13911 {
13912 	struct tg3 *tp = netdev_priv(dev);
13913 	struct hwtstamp_config stmpconf;
13914 
13915 	if (!tg3_flag(tp, PTP_CAPABLE))
13916 		return -EOPNOTSUPP;
13917 
13918 	stmpconf.flags = 0;
13919 	stmpconf.tx_type = (tg3_flag(tp, TX_TSTAMP_EN) ?
13920 			    HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF);
13921 
13922 	switch (tp->rxptpctl) {
13923 	case 0:
13924 		stmpconf.rx_filter = HWTSTAMP_FILTER_NONE;
13925 		break;
13926 	case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_ALL_V1_EVENTS:
13927 		stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
13928 		break;
13929 	case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
13930 		stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
13931 		break;
13932 	case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_DELAY_REQ:
13933 		stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
13934 		break;
13935 	case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS:
13936 		stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
13937 		break;
13938 	case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS:
13939 		stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
13940 		break;
13941 	case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS:
13942 		stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
13943 		break;
13944 	case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
13945 		stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
13946 		break;
13947 	case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
13948 		stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_SYNC;
13949 		break;
13950 	case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
13951 		stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
13952 		break;
13953 	case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_DELAY_REQ:
13954 		stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
13955 		break;
13956 	case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_DELAY_REQ:
13957 		stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ;
13958 		break;
13959 	case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_DELAY_REQ:
13960 		stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
13961 		break;
13962 	default:
13963 		WARN_ON_ONCE(1);
13964 		return -ERANGE;
13965 	}
13966 
13967 	return copy_to_user(ifr->ifr_data, &stmpconf, sizeof(stmpconf)) ?
13968 		-EFAULT : 0;
13969 }
13970 
13971 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
13972 {
13973 	struct mii_ioctl_data *data = if_mii(ifr);
13974 	struct tg3 *tp = netdev_priv(dev);
13975 	int err;
13976 
13977 	if (tg3_flag(tp, USE_PHYLIB)) {
13978 		struct phy_device *phydev;
13979 		if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
13980 			return -EAGAIN;
13981 		phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
13982 		return phy_mii_ioctl(phydev, ifr, cmd);
13983 	}
13984 
13985 	switch (cmd) {
13986 	case SIOCGMIIPHY:
13987 		data->phy_id = tp->phy_addr;
13988 
13989 		/* fallthru */
13990 	case SIOCGMIIREG: {
13991 		u32 mii_regval;
13992 
13993 		if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
13994 			break;			/* We have no PHY */
13995 
13996 		if (!netif_running(dev))
13997 			return -EAGAIN;
13998 
13999 		spin_lock_bh(&tp->lock);
14000 		err = __tg3_readphy(tp, data->phy_id & 0x1f,
14001 				    data->reg_num & 0x1f, &mii_regval);
14002 		spin_unlock_bh(&tp->lock);
14003 
14004 		data->val_out = mii_regval;
14005 
14006 		return err;
14007 	}
14008 
14009 	case SIOCSMIIREG:
14010 		if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
14011 			break;			/* We have no PHY */
14012 
14013 		if (!netif_running(dev))
14014 			return -EAGAIN;
14015 
14016 		spin_lock_bh(&tp->lock);
14017 		err = __tg3_writephy(tp, data->phy_id & 0x1f,
14018 				     data->reg_num & 0x1f, data->val_in);
14019 		spin_unlock_bh(&tp->lock);
14020 
14021 		return err;
14022 
14023 	case SIOCSHWTSTAMP:
14024 		return tg3_hwtstamp_set(dev, ifr);
14025 
14026 	case SIOCGHWTSTAMP:
14027 		return tg3_hwtstamp_get(dev, ifr);
14028 
14029 	default:
14030 		/* do nothing */
14031 		break;
14032 	}
14033 	return -EOPNOTSUPP;
14034 }
14035 
14036 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
14037 {
14038 	struct tg3 *tp = netdev_priv(dev);
14039 
14040 	memcpy(ec, &tp->coal, sizeof(*ec));
14041 	return 0;
14042 }
14043 
14044 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
14045 {
14046 	struct tg3 *tp = netdev_priv(dev);
14047 	u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
14048 	u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
14049 
14050 	if (!tg3_flag(tp, 5705_PLUS)) {
14051 		max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
14052 		max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
14053 		max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
14054 		min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
14055 	}
14056 
14057 	if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
14058 	    (!ec->rx_coalesce_usecs) ||
14059 	    (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
14060 	    (!ec->tx_coalesce_usecs) ||
14061 	    (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
14062 	    (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
14063 	    (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
14064 	    (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
14065 	    (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
14066 	    (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
14067 	    (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
14068 	    (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
14069 		return -EINVAL;
14070 
14071 	/* Only copy relevant parameters, ignore all others. */
14072 	tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
14073 	tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
14074 	tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
14075 	tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
14076 	tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
14077 	tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
14078 	tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
14079 	tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
14080 	tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
14081 
14082 	if (netif_running(dev)) {
14083 		tg3_full_lock(tp, 0);
14084 		__tg3_set_coalesce(tp, &tp->coal);
14085 		tg3_full_unlock(tp);
14086 	}
14087 	return 0;
14088 }
14089 
14090 static int tg3_set_eee(struct net_device *dev, struct ethtool_eee *edata)
14091 {
14092 	struct tg3 *tp = netdev_priv(dev);
14093 
14094 	if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
14095 		netdev_warn(tp->dev, "Board does not support EEE!\n");
14096 		return -EOPNOTSUPP;
14097 	}
14098 
14099 	if (edata->advertised != tp->eee.advertised) {
14100 		netdev_warn(tp->dev,
14101 			    "Direct manipulation of EEE advertisement is not supported\n");
14102 		return -EINVAL;
14103 	}
14104 
14105 	if (edata->tx_lpi_timer > TG3_CPMU_DBTMR1_LNKIDLE_MAX) {
14106 		netdev_warn(tp->dev,
14107 			    "Maximal Tx Lpi timer supported is %#x(u)\n",
14108 			    TG3_CPMU_DBTMR1_LNKIDLE_MAX);
14109 		return -EINVAL;
14110 	}
14111 
14112 	tp->eee = *edata;
14113 
14114 	tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
14115 	tg3_warn_mgmt_link_flap(tp);
14116 
14117 	if (netif_running(tp->dev)) {
14118 		tg3_full_lock(tp, 0);
14119 		tg3_setup_eee(tp);
14120 		tg3_phy_reset(tp);
14121 		tg3_full_unlock(tp);
14122 	}
14123 
14124 	return 0;
14125 }
14126 
14127 static int tg3_get_eee(struct net_device *dev, struct ethtool_eee *edata)
14128 {
14129 	struct tg3 *tp = netdev_priv(dev);
14130 
14131 	if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
14132 		netdev_warn(tp->dev,
14133 			    "Board does not support EEE!\n");
14134 		return -EOPNOTSUPP;
14135 	}
14136 
14137 	*edata = tp->eee;
14138 	return 0;
14139 }
14140 
14141 static const struct ethtool_ops tg3_ethtool_ops = {
14142 	.get_drvinfo		= tg3_get_drvinfo,
14143 	.get_regs_len		= tg3_get_regs_len,
14144 	.get_regs		= tg3_get_regs,
14145 	.get_wol		= tg3_get_wol,
14146 	.set_wol		= tg3_set_wol,
14147 	.get_msglevel		= tg3_get_msglevel,
14148 	.set_msglevel		= tg3_set_msglevel,
14149 	.nway_reset		= tg3_nway_reset,
14150 	.get_link		= ethtool_op_get_link,
14151 	.get_eeprom_len		= tg3_get_eeprom_len,
14152 	.get_eeprom		= tg3_get_eeprom,
14153 	.set_eeprom		= tg3_set_eeprom,
14154 	.get_ringparam		= tg3_get_ringparam,
14155 	.set_ringparam		= tg3_set_ringparam,
14156 	.get_pauseparam		= tg3_get_pauseparam,
14157 	.set_pauseparam		= tg3_set_pauseparam,
14158 	.self_test		= tg3_self_test,
14159 	.get_strings		= tg3_get_strings,
14160 	.set_phys_id		= tg3_set_phys_id,
14161 	.get_ethtool_stats	= tg3_get_ethtool_stats,
14162 	.get_coalesce		= tg3_get_coalesce,
14163 	.set_coalesce		= tg3_set_coalesce,
14164 	.get_sset_count		= tg3_get_sset_count,
14165 	.get_rxnfc		= tg3_get_rxnfc,
14166 	.get_rxfh_indir_size    = tg3_get_rxfh_indir_size,
14167 	.get_rxfh		= tg3_get_rxfh,
14168 	.set_rxfh		= tg3_set_rxfh,
14169 	.get_channels		= tg3_get_channels,
14170 	.set_channels		= tg3_set_channels,
14171 	.get_ts_info		= tg3_get_ts_info,
14172 	.get_eee		= tg3_get_eee,
14173 	.set_eee		= tg3_set_eee,
14174 	.get_link_ksettings	= tg3_get_link_ksettings,
14175 	.set_link_ksettings	= tg3_set_link_ksettings,
14176 };
14177 
14178 static void tg3_get_stats64(struct net_device *dev,
14179 			    struct rtnl_link_stats64 *stats)
14180 {
14181 	struct tg3 *tp = netdev_priv(dev);
14182 
14183 	spin_lock_bh(&tp->lock);
14184 	if (!tp->hw_stats || !tg3_flag(tp, INIT_COMPLETE)) {
14185 		*stats = tp->net_stats_prev;
14186 		spin_unlock_bh(&tp->lock);
14187 		return;
14188 	}
14189 
14190 	tg3_get_nstats(tp, stats);
14191 	spin_unlock_bh(&tp->lock);
14192 }
14193 
14194 static void tg3_set_rx_mode(struct net_device *dev)
14195 {
14196 	struct tg3 *tp = netdev_priv(dev);
14197 
14198 	if (!netif_running(dev))
14199 		return;
14200 
14201 	tg3_full_lock(tp, 0);
14202 	__tg3_set_rx_mode(dev);
14203 	tg3_full_unlock(tp);
14204 }
14205 
14206 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
14207 			       int new_mtu)
14208 {
14209 	dev->mtu = new_mtu;
14210 
14211 	if (new_mtu > ETH_DATA_LEN) {
14212 		if (tg3_flag(tp, 5780_CLASS)) {
14213 			netdev_update_features(dev);
14214 			tg3_flag_clear(tp, TSO_CAPABLE);
14215 		} else {
14216 			tg3_flag_set(tp, JUMBO_RING_ENABLE);
14217 		}
14218 	} else {
14219 		if (tg3_flag(tp, 5780_CLASS)) {
14220 			tg3_flag_set(tp, TSO_CAPABLE);
14221 			netdev_update_features(dev);
14222 		}
14223 		tg3_flag_clear(tp, JUMBO_RING_ENABLE);
14224 	}
14225 }
14226 
14227 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
14228 {
14229 	struct tg3 *tp = netdev_priv(dev);
14230 	int err;
14231 	bool reset_phy = false;
14232 
14233 	if (!netif_running(dev)) {
14234 		/* We'll just catch it later when the
14235 		 * device is up'd.
14236 		 */
14237 		tg3_set_mtu(dev, tp, new_mtu);
14238 		return 0;
14239 	}
14240 
14241 	tg3_phy_stop(tp);
14242 
14243 	tg3_netif_stop(tp);
14244 
14245 	tg3_set_mtu(dev, tp, new_mtu);
14246 
14247 	tg3_full_lock(tp, 1);
14248 
14249 	tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
14250 
14251 	/* Reset PHY, otherwise the read DMA engine will be in a mode that
14252 	 * breaks all requests to 256 bytes.
14253 	 */
14254 	if (tg3_asic_rev(tp) == ASIC_REV_57766 ||
14255 	    tg3_asic_rev(tp) == ASIC_REV_5717 ||
14256 	    tg3_asic_rev(tp) == ASIC_REV_5719 ||
14257 	    tg3_asic_rev(tp) == ASIC_REV_5720)
14258 		reset_phy = true;
14259 
14260 	err = tg3_restart_hw(tp, reset_phy);
14261 
14262 	if (!err)
14263 		tg3_netif_start(tp);
14264 
14265 	tg3_full_unlock(tp);
14266 
14267 	if (!err)
14268 		tg3_phy_start(tp);
14269 
14270 	return err;
14271 }
14272 
14273 static const struct net_device_ops tg3_netdev_ops = {
14274 	.ndo_open		= tg3_open,
14275 	.ndo_stop		= tg3_close,
14276 	.ndo_start_xmit		= tg3_start_xmit,
14277 	.ndo_get_stats64	= tg3_get_stats64,
14278 	.ndo_validate_addr	= eth_validate_addr,
14279 	.ndo_set_rx_mode	= tg3_set_rx_mode,
14280 	.ndo_set_mac_address	= tg3_set_mac_addr,
14281 	.ndo_do_ioctl		= tg3_ioctl,
14282 	.ndo_tx_timeout		= tg3_tx_timeout,
14283 	.ndo_change_mtu		= tg3_change_mtu,
14284 	.ndo_fix_features	= tg3_fix_features,
14285 	.ndo_set_features	= tg3_set_features,
14286 #ifdef CONFIG_NET_POLL_CONTROLLER
14287 	.ndo_poll_controller	= tg3_poll_controller,
14288 #endif
14289 };
14290 
14291 static void tg3_get_eeprom_size(struct tg3 *tp)
14292 {
14293 	u32 cursize, val, magic;
14294 
14295 	tp->nvram_size = EEPROM_CHIP_SIZE;
14296 
14297 	if (tg3_nvram_read(tp, 0, &magic) != 0)
14298 		return;
14299 
14300 	if ((magic != TG3_EEPROM_MAGIC) &&
14301 	    ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
14302 	    ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
14303 		return;
14304 
14305 	/*
14306 	 * Size the chip by reading offsets at increasing powers of two.
14307 	 * When we encounter our validation signature, we know the addressing
14308 	 * has wrapped around, and thus have our chip size.
14309 	 */
14310 	cursize = 0x10;
14311 
14312 	while (cursize < tp->nvram_size) {
14313 		if (tg3_nvram_read(tp, cursize, &val) != 0)
14314 			return;
14315 
14316 		if (val == magic)
14317 			break;
14318 
14319 		cursize <<= 1;
14320 	}
14321 
14322 	tp->nvram_size = cursize;
14323 }
14324 
14325 static void tg3_get_nvram_size(struct tg3 *tp)
14326 {
14327 	u32 val;
14328 
14329 	if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &val) != 0)
14330 		return;
14331 
14332 	/* Selfboot format */
14333 	if (val != TG3_EEPROM_MAGIC) {
14334 		tg3_get_eeprom_size(tp);
14335 		return;
14336 	}
14337 
14338 	if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
14339 		if (val != 0) {
14340 			/* This is confusing.  We want to operate on the
14341 			 * 16-bit value at offset 0xf2.  The tg3_nvram_read()
14342 			 * call will read from NVRAM and byteswap the data
14343 			 * according to the byteswapping settings for all
14344 			 * other register accesses.  This ensures the data we
14345 			 * want will always reside in the lower 16-bits.
14346 			 * However, the data in NVRAM is in LE format, which
14347 			 * means the data from the NVRAM read will always be
14348 			 * opposite the endianness of the CPU.  The 16-bit
14349 			 * byteswap then brings the data to CPU endianness.
14350 			 */
14351 			tp->nvram_size = swab16((u16)(val & 0x0000ffff)) * 1024;
14352 			return;
14353 		}
14354 	}
14355 	tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14356 }
14357 
14358 static void tg3_get_nvram_info(struct tg3 *tp)
14359 {
14360 	u32 nvcfg1;
14361 
14362 	nvcfg1 = tr32(NVRAM_CFG1);
14363 	if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
14364 		tg3_flag_set(tp, FLASH);
14365 	} else {
14366 		nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14367 		tw32(NVRAM_CFG1, nvcfg1);
14368 	}
14369 
14370 	if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
14371 	    tg3_flag(tp, 5780_CLASS)) {
14372 		switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
14373 		case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
14374 			tp->nvram_jedecnum = JEDEC_ATMEL;
14375 			tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
14376 			tg3_flag_set(tp, NVRAM_BUFFERED);
14377 			break;
14378 		case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
14379 			tp->nvram_jedecnum = JEDEC_ATMEL;
14380 			tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
14381 			break;
14382 		case FLASH_VENDOR_ATMEL_EEPROM:
14383 			tp->nvram_jedecnum = JEDEC_ATMEL;
14384 			tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14385 			tg3_flag_set(tp, NVRAM_BUFFERED);
14386 			break;
14387 		case FLASH_VENDOR_ST:
14388 			tp->nvram_jedecnum = JEDEC_ST;
14389 			tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
14390 			tg3_flag_set(tp, NVRAM_BUFFERED);
14391 			break;
14392 		case FLASH_VENDOR_SAIFUN:
14393 			tp->nvram_jedecnum = JEDEC_SAIFUN;
14394 			tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
14395 			break;
14396 		case FLASH_VENDOR_SST_SMALL:
14397 		case FLASH_VENDOR_SST_LARGE:
14398 			tp->nvram_jedecnum = JEDEC_SST;
14399 			tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
14400 			break;
14401 		}
14402 	} else {
14403 		tp->nvram_jedecnum = JEDEC_ATMEL;
14404 		tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
14405 		tg3_flag_set(tp, NVRAM_BUFFERED);
14406 	}
14407 }
14408 
14409 static void tg3_nvram_get_pagesize(struct tg3 *tp, u32 nvmcfg1)
14410 {
14411 	switch (nvmcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
14412 	case FLASH_5752PAGE_SIZE_256:
14413 		tp->nvram_pagesize = 256;
14414 		break;
14415 	case FLASH_5752PAGE_SIZE_512:
14416 		tp->nvram_pagesize = 512;
14417 		break;
14418 	case FLASH_5752PAGE_SIZE_1K:
14419 		tp->nvram_pagesize = 1024;
14420 		break;
14421 	case FLASH_5752PAGE_SIZE_2K:
14422 		tp->nvram_pagesize = 2048;
14423 		break;
14424 	case FLASH_5752PAGE_SIZE_4K:
14425 		tp->nvram_pagesize = 4096;
14426 		break;
14427 	case FLASH_5752PAGE_SIZE_264:
14428 		tp->nvram_pagesize = 264;
14429 		break;
14430 	case FLASH_5752PAGE_SIZE_528:
14431 		tp->nvram_pagesize = 528;
14432 		break;
14433 	}
14434 }
14435 
14436 static void tg3_get_5752_nvram_info(struct tg3 *tp)
14437 {
14438 	u32 nvcfg1;
14439 
14440 	nvcfg1 = tr32(NVRAM_CFG1);
14441 
14442 	/* NVRAM protection for TPM */
14443 	if (nvcfg1 & (1 << 27))
14444 		tg3_flag_set(tp, PROTECTED_NVRAM);
14445 
14446 	switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14447 	case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
14448 	case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
14449 		tp->nvram_jedecnum = JEDEC_ATMEL;
14450 		tg3_flag_set(tp, NVRAM_BUFFERED);
14451 		break;
14452 	case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14453 		tp->nvram_jedecnum = JEDEC_ATMEL;
14454 		tg3_flag_set(tp, NVRAM_BUFFERED);
14455 		tg3_flag_set(tp, FLASH);
14456 		break;
14457 	case FLASH_5752VENDOR_ST_M45PE10:
14458 	case FLASH_5752VENDOR_ST_M45PE20:
14459 	case FLASH_5752VENDOR_ST_M45PE40:
14460 		tp->nvram_jedecnum = JEDEC_ST;
14461 		tg3_flag_set(tp, NVRAM_BUFFERED);
14462 		tg3_flag_set(tp, FLASH);
14463 		break;
14464 	}
14465 
14466 	if (tg3_flag(tp, FLASH)) {
14467 		tg3_nvram_get_pagesize(tp, nvcfg1);
14468 	} else {
14469 		/* For eeprom, set pagesize to maximum eeprom size */
14470 		tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14471 
14472 		nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14473 		tw32(NVRAM_CFG1, nvcfg1);
14474 	}
14475 }
14476 
14477 static void tg3_get_5755_nvram_info(struct tg3 *tp)
14478 {
14479 	u32 nvcfg1, protect = 0;
14480 
14481 	nvcfg1 = tr32(NVRAM_CFG1);
14482 
14483 	/* NVRAM protection for TPM */
14484 	if (nvcfg1 & (1 << 27)) {
14485 		tg3_flag_set(tp, PROTECTED_NVRAM);
14486 		protect = 1;
14487 	}
14488 
14489 	nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
14490 	switch (nvcfg1) {
14491 	case FLASH_5755VENDOR_ATMEL_FLASH_1:
14492 	case FLASH_5755VENDOR_ATMEL_FLASH_2:
14493 	case FLASH_5755VENDOR_ATMEL_FLASH_3:
14494 	case FLASH_5755VENDOR_ATMEL_FLASH_5:
14495 		tp->nvram_jedecnum = JEDEC_ATMEL;
14496 		tg3_flag_set(tp, NVRAM_BUFFERED);
14497 		tg3_flag_set(tp, FLASH);
14498 		tp->nvram_pagesize = 264;
14499 		if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
14500 		    nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
14501 			tp->nvram_size = (protect ? 0x3e200 :
14502 					  TG3_NVRAM_SIZE_512KB);
14503 		else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
14504 			tp->nvram_size = (protect ? 0x1f200 :
14505 					  TG3_NVRAM_SIZE_256KB);
14506 		else
14507 			tp->nvram_size = (protect ? 0x1f200 :
14508 					  TG3_NVRAM_SIZE_128KB);
14509 		break;
14510 	case FLASH_5752VENDOR_ST_M45PE10:
14511 	case FLASH_5752VENDOR_ST_M45PE20:
14512 	case FLASH_5752VENDOR_ST_M45PE40:
14513 		tp->nvram_jedecnum = JEDEC_ST;
14514 		tg3_flag_set(tp, NVRAM_BUFFERED);
14515 		tg3_flag_set(tp, FLASH);
14516 		tp->nvram_pagesize = 256;
14517 		if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
14518 			tp->nvram_size = (protect ?
14519 					  TG3_NVRAM_SIZE_64KB :
14520 					  TG3_NVRAM_SIZE_128KB);
14521 		else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
14522 			tp->nvram_size = (protect ?
14523 					  TG3_NVRAM_SIZE_64KB :
14524 					  TG3_NVRAM_SIZE_256KB);
14525 		else
14526 			tp->nvram_size = (protect ?
14527 					  TG3_NVRAM_SIZE_128KB :
14528 					  TG3_NVRAM_SIZE_512KB);
14529 		break;
14530 	}
14531 }
14532 
14533 static void tg3_get_5787_nvram_info(struct tg3 *tp)
14534 {
14535 	u32 nvcfg1;
14536 
14537 	nvcfg1 = tr32(NVRAM_CFG1);
14538 
14539 	switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14540 	case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
14541 	case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
14542 	case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
14543 	case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
14544 		tp->nvram_jedecnum = JEDEC_ATMEL;
14545 		tg3_flag_set(tp, NVRAM_BUFFERED);
14546 		tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14547 
14548 		nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14549 		tw32(NVRAM_CFG1, nvcfg1);
14550 		break;
14551 	case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14552 	case FLASH_5755VENDOR_ATMEL_FLASH_1:
14553 	case FLASH_5755VENDOR_ATMEL_FLASH_2:
14554 	case FLASH_5755VENDOR_ATMEL_FLASH_3:
14555 		tp->nvram_jedecnum = JEDEC_ATMEL;
14556 		tg3_flag_set(tp, NVRAM_BUFFERED);
14557 		tg3_flag_set(tp, FLASH);
14558 		tp->nvram_pagesize = 264;
14559 		break;
14560 	case FLASH_5752VENDOR_ST_M45PE10:
14561 	case FLASH_5752VENDOR_ST_M45PE20:
14562 	case FLASH_5752VENDOR_ST_M45PE40:
14563 		tp->nvram_jedecnum = JEDEC_ST;
14564 		tg3_flag_set(tp, NVRAM_BUFFERED);
14565 		tg3_flag_set(tp, FLASH);
14566 		tp->nvram_pagesize = 256;
14567 		break;
14568 	}
14569 }
14570 
14571 static void tg3_get_5761_nvram_info(struct tg3 *tp)
14572 {
14573 	u32 nvcfg1, protect = 0;
14574 
14575 	nvcfg1 = tr32(NVRAM_CFG1);
14576 
14577 	/* NVRAM protection for TPM */
14578 	if (nvcfg1 & (1 << 27)) {
14579 		tg3_flag_set(tp, PROTECTED_NVRAM);
14580 		protect = 1;
14581 	}
14582 
14583 	nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
14584 	switch (nvcfg1) {
14585 	case FLASH_5761VENDOR_ATMEL_ADB021D:
14586 	case FLASH_5761VENDOR_ATMEL_ADB041D:
14587 	case FLASH_5761VENDOR_ATMEL_ADB081D:
14588 	case FLASH_5761VENDOR_ATMEL_ADB161D:
14589 	case FLASH_5761VENDOR_ATMEL_MDB021D:
14590 	case FLASH_5761VENDOR_ATMEL_MDB041D:
14591 	case FLASH_5761VENDOR_ATMEL_MDB081D:
14592 	case FLASH_5761VENDOR_ATMEL_MDB161D:
14593 		tp->nvram_jedecnum = JEDEC_ATMEL;
14594 		tg3_flag_set(tp, NVRAM_BUFFERED);
14595 		tg3_flag_set(tp, FLASH);
14596 		tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14597 		tp->nvram_pagesize = 256;
14598 		break;
14599 	case FLASH_5761VENDOR_ST_A_M45PE20:
14600 	case FLASH_5761VENDOR_ST_A_M45PE40:
14601 	case FLASH_5761VENDOR_ST_A_M45PE80:
14602 	case FLASH_5761VENDOR_ST_A_M45PE16:
14603 	case FLASH_5761VENDOR_ST_M_M45PE20:
14604 	case FLASH_5761VENDOR_ST_M_M45PE40:
14605 	case FLASH_5761VENDOR_ST_M_M45PE80:
14606 	case FLASH_5761VENDOR_ST_M_M45PE16:
14607 		tp->nvram_jedecnum = JEDEC_ST;
14608 		tg3_flag_set(tp, NVRAM_BUFFERED);
14609 		tg3_flag_set(tp, FLASH);
14610 		tp->nvram_pagesize = 256;
14611 		break;
14612 	}
14613 
14614 	if (protect) {
14615 		tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
14616 	} else {
14617 		switch (nvcfg1) {
14618 		case FLASH_5761VENDOR_ATMEL_ADB161D:
14619 		case FLASH_5761VENDOR_ATMEL_MDB161D:
14620 		case FLASH_5761VENDOR_ST_A_M45PE16:
14621 		case FLASH_5761VENDOR_ST_M_M45PE16:
14622 			tp->nvram_size = TG3_NVRAM_SIZE_2MB;
14623 			break;
14624 		case FLASH_5761VENDOR_ATMEL_ADB081D:
14625 		case FLASH_5761VENDOR_ATMEL_MDB081D:
14626 		case FLASH_5761VENDOR_ST_A_M45PE80:
14627 		case FLASH_5761VENDOR_ST_M_M45PE80:
14628 			tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14629 			break;
14630 		case FLASH_5761VENDOR_ATMEL_ADB041D:
14631 		case FLASH_5761VENDOR_ATMEL_MDB041D:
14632 		case FLASH_5761VENDOR_ST_A_M45PE40:
14633 		case FLASH_5761VENDOR_ST_M_M45PE40:
14634 			tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14635 			break;
14636 		case FLASH_5761VENDOR_ATMEL_ADB021D:
14637 		case FLASH_5761VENDOR_ATMEL_MDB021D:
14638 		case FLASH_5761VENDOR_ST_A_M45PE20:
14639 		case FLASH_5761VENDOR_ST_M_M45PE20:
14640 			tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14641 			break;
14642 		}
14643 	}
14644 }
14645 
14646 static void tg3_get_5906_nvram_info(struct tg3 *tp)
14647 {
14648 	tp->nvram_jedecnum = JEDEC_ATMEL;
14649 	tg3_flag_set(tp, NVRAM_BUFFERED);
14650 	tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14651 }
14652 
14653 static void tg3_get_57780_nvram_info(struct tg3 *tp)
14654 {
14655 	u32 nvcfg1;
14656 
14657 	nvcfg1 = tr32(NVRAM_CFG1);
14658 
14659 	switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14660 	case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
14661 	case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
14662 		tp->nvram_jedecnum = JEDEC_ATMEL;
14663 		tg3_flag_set(tp, NVRAM_BUFFERED);
14664 		tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14665 
14666 		nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14667 		tw32(NVRAM_CFG1, nvcfg1);
14668 		return;
14669 	case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14670 	case FLASH_57780VENDOR_ATMEL_AT45DB011D:
14671 	case FLASH_57780VENDOR_ATMEL_AT45DB011B:
14672 	case FLASH_57780VENDOR_ATMEL_AT45DB021D:
14673 	case FLASH_57780VENDOR_ATMEL_AT45DB021B:
14674 	case FLASH_57780VENDOR_ATMEL_AT45DB041D:
14675 	case FLASH_57780VENDOR_ATMEL_AT45DB041B:
14676 		tp->nvram_jedecnum = JEDEC_ATMEL;
14677 		tg3_flag_set(tp, NVRAM_BUFFERED);
14678 		tg3_flag_set(tp, FLASH);
14679 
14680 		switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14681 		case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14682 		case FLASH_57780VENDOR_ATMEL_AT45DB011D:
14683 		case FLASH_57780VENDOR_ATMEL_AT45DB011B:
14684 			tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14685 			break;
14686 		case FLASH_57780VENDOR_ATMEL_AT45DB021D:
14687 		case FLASH_57780VENDOR_ATMEL_AT45DB021B:
14688 			tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14689 			break;
14690 		case FLASH_57780VENDOR_ATMEL_AT45DB041D:
14691 		case FLASH_57780VENDOR_ATMEL_AT45DB041B:
14692 			tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14693 			break;
14694 		}
14695 		break;
14696 	case FLASH_5752VENDOR_ST_M45PE10:
14697 	case FLASH_5752VENDOR_ST_M45PE20:
14698 	case FLASH_5752VENDOR_ST_M45PE40:
14699 		tp->nvram_jedecnum = JEDEC_ST;
14700 		tg3_flag_set(tp, NVRAM_BUFFERED);
14701 		tg3_flag_set(tp, FLASH);
14702 
14703 		switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14704 		case FLASH_5752VENDOR_ST_M45PE10:
14705 			tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14706 			break;
14707 		case FLASH_5752VENDOR_ST_M45PE20:
14708 			tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14709 			break;
14710 		case FLASH_5752VENDOR_ST_M45PE40:
14711 			tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14712 			break;
14713 		}
14714 		break;
14715 	default:
14716 		tg3_flag_set(tp, NO_NVRAM);
14717 		return;
14718 	}
14719 
14720 	tg3_nvram_get_pagesize(tp, nvcfg1);
14721 	if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14722 		tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14723 }
14724 
14725 
14726 static void tg3_get_5717_nvram_info(struct tg3 *tp)
14727 {
14728 	u32 nvcfg1;
14729 
14730 	nvcfg1 = tr32(NVRAM_CFG1);
14731 
14732 	switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14733 	case FLASH_5717VENDOR_ATMEL_EEPROM:
14734 	case FLASH_5717VENDOR_MICRO_EEPROM:
14735 		tp->nvram_jedecnum = JEDEC_ATMEL;
14736 		tg3_flag_set(tp, NVRAM_BUFFERED);
14737 		tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14738 
14739 		nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14740 		tw32(NVRAM_CFG1, nvcfg1);
14741 		return;
14742 	case FLASH_5717VENDOR_ATMEL_MDB011D:
14743 	case FLASH_5717VENDOR_ATMEL_ADB011B:
14744 	case FLASH_5717VENDOR_ATMEL_ADB011D:
14745 	case FLASH_5717VENDOR_ATMEL_MDB021D:
14746 	case FLASH_5717VENDOR_ATMEL_ADB021B:
14747 	case FLASH_5717VENDOR_ATMEL_ADB021D:
14748 	case FLASH_5717VENDOR_ATMEL_45USPT:
14749 		tp->nvram_jedecnum = JEDEC_ATMEL;
14750 		tg3_flag_set(tp, NVRAM_BUFFERED);
14751 		tg3_flag_set(tp, FLASH);
14752 
14753 		switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14754 		case FLASH_5717VENDOR_ATMEL_MDB021D:
14755 			/* Detect size with tg3_nvram_get_size() */
14756 			break;
14757 		case FLASH_5717VENDOR_ATMEL_ADB021B:
14758 		case FLASH_5717VENDOR_ATMEL_ADB021D:
14759 			tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14760 			break;
14761 		default:
14762 			tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14763 			break;
14764 		}
14765 		break;
14766 	case FLASH_5717VENDOR_ST_M_M25PE10:
14767 	case FLASH_5717VENDOR_ST_A_M25PE10:
14768 	case FLASH_5717VENDOR_ST_M_M45PE10:
14769 	case FLASH_5717VENDOR_ST_A_M45PE10:
14770 	case FLASH_5717VENDOR_ST_M_M25PE20:
14771 	case FLASH_5717VENDOR_ST_A_M25PE20:
14772 	case FLASH_5717VENDOR_ST_M_M45PE20:
14773 	case FLASH_5717VENDOR_ST_A_M45PE20:
14774 	case FLASH_5717VENDOR_ST_25USPT:
14775 	case FLASH_5717VENDOR_ST_45USPT:
14776 		tp->nvram_jedecnum = JEDEC_ST;
14777 		tg3_flag_set(tp, NVRAM_BUFFERED);
14778 		tg3_flag_set(tp, FLASH);
14779 
14780 		switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14781 		case FLASH_5717VENDOR_ST_M_M25PE20:
14782 		case FLASH_5717VENDOR_ST_M_M45PE20:
14783 			/* Detect size with tg3_nvram_get_size() */
14784 			break;
14785 		case FLASH_5717VENDOR_ST_A_M25PE20:
14786 		case FLASH_5717VENDOR_ST_A_M45PE20:
14787 			tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14788 			break;
14789 		default:
14790 			tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14791 			break;
14792 		}
14793 		break;
14794 	default:
14795 		tg3_flag_set(tp, NO_NVRAM);
14796 		return;
14797 	}
14798 
14799 	tg3_nvram_get_pagesize(tp, nvcfg1);
14800 	if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14801 		tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14802 }
14803 
14804 static void tg3_get_5720_nvram_info(struct tg3 *tp)
14805 {
14806 	u32 nvcfg1, nvmpinstrp, nv_status;
14807 
14808 	nvcfg1 = tr32(NVRAM_CFG1);
14809 	nvmpinstrp = nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK;
14810 
14811 	if (tg3_asic_rev(tp) == ASIC_REV_5762) {
14812 		if (!(nvcfg1 & NVRAM_CFG1_5762VENDOR_MASK)) {
14813 			tg3_flag_set(tp, NO_NVRAM);
14814 			return;
14815 		}
14816 
14817 		switch (nvmpinstrp) {
14818 		case FLASH_5762_MX25L_100:
14819 		case FLASH_5762_MX25L_200:
14820 		case FLASH_5762_MX25L_400:
14821 		case FLASH_5762_MX25L_800:
14822 		case FLASH_5762_MX25L_160_320:
14823 			tp->nvram_pagesize = 4096;
14824 			tp->nvram_jedecnum = JEDEC_MACRONIX;
14825 			tg3_flag_set(tp, NVRAM_BUFFERED);
14826 			tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14827 			tg3_flag_set(tp, FLASH);
14828 			nv_status = tr32(NVRAM_AUTOSENSE_STATUS);
14829 			tp->nvram_size =
14830 				(1 << (nv_status >> AUTOSENSE_DEVID &
14831 						AUTOSENSE_DEVID_MASK)
14832 					<< AUTOSENSE_SIZE_IN_MB);
14833 			return;
14834 
14835 		case FLASH_5762_EEPROM_HD:
14836 			nvmpinstrp = FLASH_5720_EEPROM_HD;
14837 			break;
14838 		case FLASH_5762_EEPROM_LD:
14839 			nvmpinstrp = FLASH_5720_EEPROM_LD;
14840 			break;
14841 		case FLASH_5720VENDOR_M_ST_M45PE20:
14842 			/* This pinstrap supports multiple sizes, so force it
14843 			 * to read the actual size from location 0xf0.
14844 			 */
14845 			nvmpinstrp = FLASH_5720VENDOR_ST_45USPT;
14846 			break;
14847 		}
14848 	}
14849 
14850 	switch (nvmpinstrp) {
14851 	case FLASH_5720_EEPROM_HD:
14852 	case FLASH_5720_EEPROM_LD:
14853 		tp->nvram_jedecnum = JEDEC_ATMEL;
14854 		tg3_flag_set(tp, NVRAM_BUFFERED);
14855 
14856 		nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14857 		tw32(NVRAM_CFG1, nvcfg1);
14858 		if (nvmpinstrp == FLASH_5720_EEPROM_HD)
14859 			tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14860 		else
14861 			tp->nvram_pagesize = ATMEL_AT24C02_CHIP_SIZE;
14862 		return;
14863 	case FLASH_5720VENDOR_M_ATMEL_DB011D:
14864 	case FLASH_5720VENDOR_A_ATMEL_DB011B:
14865 	case FLASH_5720VENDOR_A_ATMEL_DB011D:
14866 	case FLASH_5720VENDOR_M_ATMEL_DB021D:
14867 	case FLASH_5720VENDOR_A_ATMEL_DB021B:
14868 	case FLASH_5720VENDOR_A_ATMEL_DB021D:
14869 	case FLASH_5720VENDOR_M_ATMEL_DB041D:
14870 	case FLASH_5720VENDOR_A_ATMEL_DB041B:
14871 	case FLASH_5720VENDOR_A_ATMEL_DB041D:
14872 	case FLASH_5720VENDOR_M_ATMEL_DB081D:
14873 	case FLASH_5720VENDOR_A_ATMEL_DB081D:
14874 	case FLASH_5720VENDOR_ATMEL_45USPT:
14875 		tp->nvram_jedecnum = JEDEC_ATMEL;
14876 		tg3_flag_set(tp, NVRAM_BUFFERED);
14877 		tg3_flag_set(tp, FLASH);
14878 
14879 		switch (nvmpinstrp) {
14880 		case FLASH_5720VENDOR_M_ATMEL_DB021D:
14881 		case FLASH_5720VENDOR_A_ATMEL_DB021B:
14882 		case FLASH_5720VENDOR_A_ATMEL_DB021D:
14883 			tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14884 			break;
14885 		case FLASH_5720VENDOR_M_ATMEL_DB041D:
14886 		case FLASH_5720VENDOR_A_ATMEL_DB041B:
14887 		case FLASH_5720VENDOR_A_ATMEL_DB041D:
14888 			tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14889 			break;
14890 		case FLASH_5720VENDOR_M_ATMEL_DB081D:
14891 		case FLASH_5720VENDOR_A_ATMEL_DB081D:
14892 			tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14893 			break;
14894 		default:
14895 			if (tg3_asic_rev(tp) != ASIC_REV_5762)
14896 				tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14897 			break;
14898 		}
14899 		break;
14900 	case FLASH_5720VENDOR_M_ST_M25PE10:
14901 	case FLASH_5720VENDOR_M_ST_M45PE10:
14902 	case FLASH_5720VENDOR_A_ST_M25PE10:
14903 	case FLASH_5720VENDOR_A_ST_M45PE10:
14904 	case FLASH_5720VENDOR_M_ST_M25PE20:
14905 	case FLASH_5720VENDOR_M_ST_M45PE20:
14906 	case FLASH_5720VENDOR_A_ST_M25PE20:
14907 	case FLASH_5720VENDOR_A_ST_M45PE20:
14908 	case FLASH_5720VENDOR_M_ST_M25PE40:
14909 	case FLASH_5720VENDOR_M_ST_M45PE40:
14910 	case FLASH_5720VENDOR_A_ST_M25PE40:
14911 	case FLASH_5720VENDOR_A_ST_M45PE40:
14912 	case FLASH_5720VENDOR_M_ST_M25PE80:
14913 	case FLASH_5720VENDOR_M_ST_M45PE80:
14914 	case FLASH_5720VENDOR_A_ST_M25PE80:
14915 	case FLASH_5720VENDOR_A_ST_M45PE80:
14916 	case FLASH_5720VENDOR_ST_25USPT:
14917 	case FLASH_5720VENDOR_ST_45USPT:
14918 		tp->nvram_jedecnum = JEDEC_ST;
14919 		tg3_flag_set(tp, NVRAM_BUFFERED);
14920 		tg3_flag_set(tp, FLASH);
14921 
14922 		switch (nvmpinstrp) {
14923 		case FLASH_5720VENDOR_M_ST_M25PE20:
14924 		case FLASH_5720VENDOR_M_ST_M45PE20:
14925 		case FLASH_5720VENDOR_A_ST_M25PE20:
14926 		case FLASH_5720VENDOR_A_ST_M45PE20:
14927 			tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14928 			break;
14929 		case FLASH_5720VENDOR_M_ST_M25PE40:
14930 		case FLASH_5720VENDOR_M_ST_M45PE40:
14931 		case FLASH_5720VENDOR_A_ST_M25PE40:
14932 		case FLASH_5720VENDOR_A_ST_M45PE40:
14933 			tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14934 			break;
14935 		case FLASH_5720VENDOR_M_ST_M25PE80:
14936 		case FLASH_5720VENDOR_M_ST_M45PE80:
14937 		case FLASH_5720VENDOR_A_ST_M25PE80:
14938 		case FLASH_5720VENDOR_A_ST_M45PE80:
14939 			tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14940 			break;
14941 		default:
14942 			if (tg3_asic_rev(tp) != ASIC_REV_5762)
14943 				tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14944 			break;
14945 		}
14946 		break;
14947 	default:
14948 		tg3_flag_set(tp, NO_NVRAM);
14949 		return;
14950 	}
14951 
14952 	tg3_nvram_get_pagesize(tp, nvcfg1);
14953 	if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14954 		tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14955 
14956 	if (tg3_asic_rev(tp) == ASIC_REV_5762) {
14957 		u32 val;
14958 
14959 		if (tg3_nvram_read(tp, 0, &val))
14960 			return;
14961 
14962 		if (val != TG3_EEPROM_MAGIC &&
14963 		    (val & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW)
14964 			tg3_flag_set(tp, NO_NVRAM);
14965 	}
14966 }
14967 
14968 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
14969 static void tg3_nvram_init(struct tg3 *tp)
14970 {
14971 	if (tg3_flag(tp, IS_SSB_CORE)) {
14972 		/* No NVRAM and EEPROM on the SSB Broadcom GigE core. */
14973 		tg3_flag_clear(tp, NVRAM);
14974 		tg3_flag_clear(tp, NVRAM_BUFFERED);
14975 		tg3_flag_set(tp, NO_NVRAM);
14976 		return;
14977 	}
14978 
14979 	tw32_f(GRC_EEPROM_ADDR,
14980 	     (EEPROM_ADDR_FSM_RESET |
14981 	      (EEPROM_DEFAULT_CLOCK_PERIOD <<
14982 	       EEPROM_ADDR_CLKPERD_SHIFT)));
14983 
14984 	msleep(1);
14985 
14986 	/* Enable seeprom accesses. */
14987 	tw32_f(GRC_LOCAL_CTRL,
14988 	     tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
14989 	udelay(100);
14990 
14991 	if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
14992 	    tg3_asic_rev(tp) != ASIC_REV_5701) {
14993 		tg3_flag_set(tp, NVRAM);
14994 
14995 		if (tg3_nvram_lock(tp)) {
14996 			netdev_warn(tp->dev,
14997 				    "Cannot get nvram lock, %s failed\n",
14998 				    __func__);
14999 			return;
15000 		}
15001 		tg3_enable_nvram_access(tp);
15002 
15003 		tp->nvram_size = 0;
15004 
15005 		if (tg3_asic_rev(tp) == ASIC_REV_5752)
15006 			tg3_get_5752_nvram_info(tp);
15007 		else if (tg3_asic_rev(tp) == ASIC_REV_5755)
15008 			tg3_get_5755_nvram_info(tp);
15009 		else if (tg3_asic_rev(tp) == ASIC_REV_5787 ||
15010 			 tg3_asic_rev(tp) == ASIC_REV_5784 ||
15011 			 tg3_asic_rev(tp) == ASIC_REV_5785)
15012 			tg3_get_5787_nvram_info(tp);
15013 		else if (tg3_asic_rev(tp) == ASIC_REV_5761)
15014 			tg3_get_5761_nvram_info(tp);
15015 		else if (tg3_asic_rev(tp) == ASIC_REV_5906)
15016 			tg3_get_5906_nvram_info(tp);
15017 		else if (tg3_asic_rev(tp) == ASIC_REV_57780 ||
15018 			 tg3_flag(tp, 57765_CLASS))
15019 			tg3_get_57780_nvram_info(tp);
15020 		else if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
15021 			 tg3_asic_rev(tp) == ASIC_REV_5719)
15022 			tg3_get_5717_nvram_info(tp);
15023 		else if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
15024 			 tg3_asic_rev(tp) == ASIC_REV_5762)
15025 			tg3_get_5720_nvram_info(tp);
15026 		else
15027 			tg3_get_nvram_info(tp);
15028 
15029 		if (tp->nvram_size == 0)
15030 			tg3_get_nvram_size(tp);
15031 
15032 		tg3_disable_nvram_access(tp);
15033 		tg3_nvram_unlock(tp);
15034 
15035 	} else {
15036 		tg3_flag_clear(tp, NVRAM);
15037 		tg3_flag_clear(tp, NVRAM_BUFFERED);
15038 
15039 		tg3_get_eeprom_size(tp);
15040 	}
15041 }
15042 
15043 struct subsys_tbl_ent {
15044 	u16 subsys_vendor, subsys_devid;
15045 	u32 phy_id;
15046 };
15047 
15048 static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
15049 	/* Broadcom boards. */
15050 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
15051 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6, TG3_PHY_ID_BCM5401 },
15052 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
15053 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5, TG3_PHY_ID_BCM5701 },
15054 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
15055 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6, TG3_PHY_ID_BCM8002 },
15056 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
15057 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9, 0 },
15058 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
15059 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1, TG3_PHY_ID_BCM5701 },
15060 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
15061 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8, TG3_PHY_ID_BCM5701 },
15062 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
15063 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7, 0 },
15064 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
15065 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10, TG3_PHY_ID_BCM5701 },
15066 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
15067 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12, TG3_PHY_ID_BCM5701 },
15068 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
15069 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1, TG3_PHY_ID_BCM5703 },
15070 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
15071 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2, TG3_PHY_ID_BCM5703 },
15072 
15073 	/* 3com boards. */
15074 	{ TG3PCI_SUBVENDOR_ID_3COM,
15075 	  TG3PCI_SUBDEVICE_ID_3COM_3C996T, TG3_PHY_ID_BCM5401 },
15076 	{ TG3PCI_SUBVENDOR_ID_3COM,
15077 	  TG3PCI_SUBDEVICE_ID_3COM_3C996BT, TG3_PHY_ID_BCM5701 },
15078 	{ TG3PCI_SUBVENDOR_ID_3COM,
15079 	  TG3PCI_SUBDEVICE_ID_3COM_3C996SX, 0 },
15080 	{ TG3PCI_SUBVENDOR_ID_3COM,
15081 	  TG3PCI_SUBDEVICE_ID_3COM_3C1000T, TG3_PHY_ID_BCM5701 },
15082 	{ TG3PCI_SUBVENDOR_ID_3COM,
15083 	  TG3PCI_SUBDEVICE_ID_3COM_3C940BR01, TG3_PHY_ID_BCM5701 },
15084 
15085 	/* DELL boards. */
15086 	{ TG3PCI_SUBVENDOR_ID_DELL,
15087 	  TG3PCI_SUBDEVICE_ID_DELL_VIPER, TG3_PHY_ID_BCM5401 },
15088 	{ TG3PCI_SUBVENDOR_ID_DELL,
15089 	  TG3PCI_SUBDEVICE_ID_DELL_JAGUAR, TG3_PHY_ID_BCM5401 },
15090 	{ TG3PCI_SUBVENDOR_ID_DELL,
15091 	  TG3PCI_SUBDEVICE_ID_DELL_MERLOT, TG3_PHY_ID_BCM5411 },
15092 	{ TG3PCI_SUBVENDOR_ID_DELL,
15093 	  TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT, TG3_PHY_ID_BCM5411 },
15094 
15095 	/* Compaq boards. */
15096 	{ TG3PCI_SUBVENDOR_ID_COMPAQ,
15097 	  TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE, TG3_PHY_ID_BCM5701 },
15098 	{ TG3PCI_SUBVENDOR_ID_COMPAQ,
15099 	  TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2, TG3_PHY_ID_BCM5701 },
15100 	{ TG3PCI_SUBVENDOR_ID_COMPAQ,
15101 	  TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING, 0 },
15102 	{ TG3PCI_SUBVENDOR_ID_COMPAQ,
15103 	  TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780, TG3_PHY_ID_BCM5701 },
15104 	{ TG3PCI_SUBVENDOR_ID_COMPAQ,
15105 	  TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2, TG3_PHY_ID_BCM5701 },
15106 
15107 	/* IBM boards. */
15108 	{ TG3PCI_SUBVENDOR_ID_IBM,
15109 	  TG3PCI_SUBDEVICE_ID_IBM_5703SAX2, 0 }
15110 };
15111 
15112 static struct subsys_tbl_ent *tg3_lookup_by_subsys(struct tg3 *tp)
15113 {
15114 	int i;
15115 
15116 	for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
15117 		if ((subsys_id_to_phy_id[i].subsys_vendor ==
15118 		     tp->pdev->subsystem_vendor) &&
15119 		    (subsys_id_to_phy_id[i].subsys_devid ==
15120 		     tp->pdev->subsystem_device))
15121 			return &subsys_id_to_phy_id[i];
15122 	}
15123 	return NULL;
15124 }
15125 
15126 static void tg3_get_eeprom_hw_cfg(struct tg3 *tp)
15127 {
15128 	u32 val;
15129 
15130 	tp->phy_id = TG3_PHY_ID_INVALID;
15131 	tp->led_ctrl = LED_CTRL_MODE_PHY_1;
15132 
15133 	/* Assume an onboard device and WOL capable by default.  */
15134 	tg3_flag_set(tp, EEPROM_WRITE_PROT);
15135 	tg3_flag_set(tp, WOL_CAP);
15136 
15137 	if (tg3_asic_rev(tp) == ASIC_REV_5906) {
15138 		if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
15139 			tg3_flag_clear(tp, EEPROM_WRITE_PROT);
15140 			tg3_flag_set(tp, IS_NIC);
15141 		}
15142 		val = tr32(VCPU_CFGSHDW);
15143 		if (val & VCPU_CFGSHDW_ASPM_DBNC)
15144 			tg3_flag_set(tp, ASPM_WORKAROUND);
15145 		if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
15146 		    (val & VCPU_CFGSHDW_WOL_MAGPKT)) {
15147 			tg3_flag_set(tp, WOL_ENABLE);
15148 			device_set_wakeup_enable(&tp->pdev->dev, true);
15149 		}
15150 		goto done;
15151 	}
15152 
15153 	tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
15154 	if (val == NIC_SRAM_DATA_SIG_MAGIC) {
15155 		u32 nic_cfg, led_cfg;
15156 		u32 cfg2 = 0, cfg4 = 0, cfg5 = 0;
15157 		u32 nic_phy_id, ver, eeprom_phy_id;
15158 		int eeprom_phy_serdes = 0;
15159 
15160 		tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
15161 		tp->nic_sram_data_cfg = nic_cfg;
15162 
15163 		tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
15164 		ver >>= NIC_SRAM_DATA_VER_SHIFT;
15165 		if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
15166 		    tg3_asic_rev(tp) != ASIC_REV_5701 &&
15167 		    tg3_asic_rev(tp) != ASIC_REV_5703 &&
15168 		    (ver > 0) && (ver < 0x100))
15169 			tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
15170 
15171 		if (tg3_asic_rev(tp) == ASIC_REV_5785)
15172 			tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
15173 
15174 		if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
15175 		    tg3_asic_rev(tp) == ASIC_REV_5719 ||
15176 		    tg3_asic_rev(tp) == ASIC_REV_5720)
15177 			tg3_read_mem(tp, NIC_SRAM_DATA_CFG_5, &cfg5);
15178 
15179 		if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
15180 		    NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
15181 			eeprom_phy_serdes = 1;
15182 
15183 		tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
15184 		if (nic_phy_id != 0) {
15185 			u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
15186 			u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
15187 
15188 			eeprom_phy_id  = (id1 >> 16) << 10;
15189 			eeprom_phy_id |= (id2 & 0xfc00) << 16;
15190 			eeprom_phy_id |= (id2 & 0x03ff) <<  0;
15191 		} else
15192 			eeprom_phy_id = 0;
15193 
15194 		tp->phy_id = eeprom_phy_id;
15195 		if (eeprom_phy_serdes) {
15196 			if (!tg3_flag(tp, 5705_PLUS))
15197 				tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
15198 			else
15199 				tp->phy_flags |= TG3_PHYFLG_MII_SERDES;
15200 		}
15201 
15202 		if (tg3_flag(tp, 5750_PLUS))
15203 			led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
15204 				    SHASTA_EXT_LED_MODE_MASK);
15205 		else
15206 			led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
15207 
15208 		switch (led_cfg) {
15209 		default:
15210 		case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
15211 			tp->led_ctrl = LED_CTRL_MODE_PHY_1;
15212 			break;
15213 
15214 		case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
15215 			tp->led_ctrl = LED_CTRL_MODE_PHY_2;
15216 			break;
15217 
15218 		case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
15219 			tp->led_ctrl = LED_CTRL_MODE_MAC;
15220 
15221 			/* Default to PHY_1_MODE if 0 (MAC_MODE) is
15222 			 * read on some older 5700/5701 bootcode.
15223 			 */
15224 			if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
15225 			    tg3_asic_rev(tp) == ASIC_REV_5701)
15226 				tp->led_ctrl = LED_CTRL_MODE_PHY_1;
15227 
15228 			break;
15229 
15230 		case SHASTA_EXT_LED_SHARED:
15231 			tp->led_ctrl = LED_CTRL_MODE_SHARED;
15232 			if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0 &&
15233 			    tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A1)
15234 				tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
15235 						 LED_CTRL_MODE_PHY_2);
15236 
15237 			if (tg3_flag(tp, 5717_PLUS) ||
15238 			    tg3_asic_rev(tp) == ASIC_REV_5762)
15239 				tp->led_ctrl |= LED_CTRL_BLINK_RATE_OVERRIDE |
15240 						LED_CTRL_BLINK_RATE_MASK;
15241 
15242 			break;
15243 
15244 		case SHASTA_EXT_LED_MAC:
15245 			tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
15246 			break;
15247 
15248 		case SHASTA_EXT_LED_COMBO:
15249 			tp->led_ctrl = LED_CTRL_MODE_COMBO;
15250 			if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0)
15251 				tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
15252 						 LED_CTRL_MODE_PHY_2);
15253 			break;
15254 
15255 		}
15256 
15257 		if ((tg3_asic_rev(tp) == ASIC_REV_5700 ||
15258 		     tg3_asic_rev(tp) == ASIC_REV_5701) &&
15259 		    tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
15260 			tp->led_ctrl = LED_CTRL_MODE_PHY_2;
15261 
15262 		if (tg3_chip_rev(tp) == CHIPREV_5784_AX)
15263 			tp->led_ctrl = LED_CTRL_MODE_PHY_1;
15264 
15265 		if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
15266 			tg3_flag_set(tp, EEPROM_WRITE_PROT);
15267 			if ((tp->pdev->subsystem_vendor ==
15268 			     PCI_VENDOR_ID_ARIMA) &&
15269 			    (tp->pdev->subsystem_device == 0x205a ||
15270 			     tp->pdev->subsystem_device == 0x2063))
15271 				tg3_flag_clear(tp, EEPROM_WRITE_PROT);
15272 		} else {
15273 			tg3_flag_clear(tp, EEPROM_WRITE_PROT);
15274 			tg3_flag_set(tp, IS_NIC);
15275 		}
15276 
15277 		if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
15278 			tg3_flag_set(tp, ENABLE_ASF);
15279 			if (tg3_flag(tp, 5750_PLUS))
15280 				tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
15281 		}
15282 
15283 		if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
15284 		    tg3_flag(tp, 5750_PLUS))
15285 			tg3_flag_set(tp, ENABLE_APE);
15286 
15287 		if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES &&
15288 		    !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
15289 			tg3_flag_clear(tp, WOL_CAP);
15290 
15291 		if (tg3_flag(tp, WOL_CAP) &&
15292 		    (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)) {
15293 			tg3_flag_set(tp, WOL_ENABLE);
15294 			device_set_wakeup_enable(&tp->pdev->dev, true);
15295 		}
15296 
15297 		if (cfg2 & (1 << 17))
15298 			tp->phy_flags |= TG3_PHYFLG_CAPACITIVE_COUPLING;
15299 
15300 		/* serdes signal pre-emphasis in register 0x590 set by */
15301 		/* bootcode if bit 18 is set */
15302 		if (cfg2 & (1 << 18))
15303 			tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS;
15304 
15305 		if ((tg3_flag(tp, 57765_PLUS) ||
15306 		     (tg3_asic_rev(tp) == ASIC_REV_5784 &&
15307 		      tg3_chip_rev(tp) != CHIPREV_5784_AX)) &&
15308 		    (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
15309 			tp->phy_flags |= TG3_PHYFLG_ENABLE_APD;
15310 
15311 		if (tg3_flag(tp, PCI_EXPRESS)) {
15312 			u32 cfg3;
15313 
15314 			tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
15315 			if (tg3_asic_rev(tp) != ASIC_REV_5785 &&
15316 			    !tg3_flag(tp, 57765_PLUS) &&
15317 			    (cfg3 & NIC_SRAM_ASPM_DEBOUNCE))
15318 				tg3_flag_set(tp, ASPM_WORKAROUND);
15319 			if (cfg3 & NIC_SRAM_LNK_FLAP_AVOID)
15320 				tp->phy_flags |= TG3_PHYFLG_KEEP_LINK_ON_PWRDN;
15321 			if (cfg3 & NIC_SRAM_1G_ON_VAUX_OK)
15322 				tp->phy_flags |= TG3_PHYFLG_1G_ON_VAUX_OK;
15323 		}
15324 
15325 		if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE)
15326 			tg3_flag_set(tp, RGMII_INBAND_DISABLE);
15327 		if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
15328 			tg3_flag_set(tp, RGMII_EXT_IBND_RX_EN);
15329 		if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
15330 			tg3_flag_set(tp, RGMII_EXT_IBND_TX_EN);
15331 
15332 		if (cfg5 & NIC_SRAM_DISABLE_1G_HALF_ADV)
15333 			tp->phy_flags |= TG3_PHYFLG_DISABLE_1G_HD_ADV;
15334 	}
15335 done:
15336 	if (tg3_flag(tp, WOL_CAP))
15337 		device_set_wakeup_enable(&tp->pdev->dev,
15338 					 tg3_flag(tp, WOL_ENABLE));
15339 	else
15340 		device_set_wakeup_capable(&tp->pdev->dev, false);
15341 }
15342 
15343 static int tg3_ape_otp_read(struct tg3 *tp, u32 offset, u32 *val)
15344 {
15345 	int i, err;
15346 	u32 val2, off = offset * 8;
15347 
15348 	err = tg3_nvram_lock(tp);
15349 	if (err)
15350 		return err;
15351 
15352 	tg3_ape_write32(tp, TG3_APE_OTP_ADDR, off | APE_OTP_ADDR_CPU_ENABLE);
15353 	tg3_ape_write32(tp, TG3_APE_OTP_CTRL, APE_OTP_CTRL_PROG_EN |
15354 			APE_OTP_CTRL_CMD_RD | APE_OTP_CTRL_START);
15355 	tg3_ape_read32(tp, TG3_APE_OTP_CTRL);
15356 	udelay(10);
15357 
15358 	for (i = 0; i < 100; i++) {
15359 		val2 = tg3_ape_read32(tp, TG3_APE_OTP_STATUS);
15360 		if (val2 & APE_OTP_STATUS_CMD_DONE) {
15361 			*val = tg3_ape_read32(tp, TG3_APE_OTP_RD_DATA);
15362 			break;
15363 		}
15364 		udelay(10);
15365 	}
15366 
15367 	tg3_ape_write32(tp, TG3_APE_OTP_CTRL, 0);
15368 
15369 	tg3_nvram_unlock(tp);
15370 	if (val2 & APE_OTP_STATUS_CMD_DONE)
15371 		return 0;
15372 
15373 	return -EBUSY;
15374 }
15375 
15376 static int tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
15377 {
15378 	int i;
15379 	u32 val;
15380 
15381 	tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
15382 	tw32(OTP_CTRL, cmd);
15383 
15384 	/* Wait for up to 1 ms for command to execute. */
15385 	for (i = 0; i < 100; i++) {
15386 		val = tr32(OTP_STATUS);
15387 		if (val & OTP_STATUS_CMD_DONE)
15388 			break;
15389 		udelay(10);
15390 	}
15391 
15392 	return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
15393 }
15394 
15395 /* Read the gphy configuration from the OTP region of the chip.  The gphy
15396  * configuration is a 32-bit value that straddles the alignment boundary.
15397  * We do two 32-bit reads and then shift and merge the results.
15398  */
15399 static u32 tg3_read_otp_phycfg(struct tg3 *tp)
15400 {
15401 	u32 bhalf_otp, thalf_otp;
15402 
15403 	tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
15404 
15405 	if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
15406 		return 0;
15407 
15408 	tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
15409 
15410 	if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
15411 		return 0;
15412 
15413 	thalf_otp = tr32(OTP_READ_DATA);
15414 
15415 	tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
15416 
15417 	if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
15418 		return 0;
15419 
15420 	bhalf_otp = tr32(OTP_READ_DATA);
15421 
15422 	return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
15423 }
15424 
15425 static void tg3_phy_init_link_config(struct tg3 *tp)
15426 {
15427 	u32 adv = ADVERTISED_Autoneg;
15428 
15429 	if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
15430 		if (!(tp->phy_flags & TG3_PHYFLG_DISABLE_1G_HD_ADV))
15431 			adv |= ADVERTISED_1000baseT_Half;
15432 		adv |= ADVERTISED_1000baseT_Full;
15433 	}
15434 
15435 	if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
15436 		adv |= ADVERTISED_100baseT_Half |
15437 		       ADVERTISED_100baseT_Full |
15438 		       ADVERTISED_10baseT_Half |
15439 		       ADVERTISED_10baseT_Full |
15440 		       ADVERTISED_TP;
15441 	else
15442 		adv |= ADVERTISED_FIBRE;
15443 
15444 	tp->link_config.advertising = adv;
15445 	tp->link_config.speed = SPEED_UNKNOWN;
15446 	tp->link_config.duplex = DUPLEX_UNKNOWN;
15447 	tp->link_config.autoneg = AUTONEG_ENABLE;
15448 	tp->link_config.active_speed = SPEED_UNKNOWN;
15449 	tp->link_config.active_duplex = DUPLEX_UNKNOWN;
15450 
15451 	tp->old_link = -1;
15452 }
15453 
15454 static int tg3_phy_probe(struct tg3 *tp)
15455 {
15456 	u32 hw_phy_id_1, hw_phy_id_2;
15457 	u32 hw_phy_id, hw_phy_id_masked;
15458 	int err;
15459 
15460 	/* flow control autonegotiation is default behavior */
15461 	tg3_flag_set(tp, PAUSE_AUTONEG);
15462 	tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
15463 
15464 	if (tg3_flag(tp, ENABLE_APE)) {
15465 		switch (tp->pci_fn) {
15466 		case 0:
15467 			tp->phy_ape_lock = TG3_APE_LOCK_PHY0;
15468 			break;
15469 		case 1:
15470 			tp->phy_ape_lock = TG3_APE_LOCK_PHY1;
15471 			break;
15472 		case 2:
15473 			tp->phy_ape_lock = TG3_APE_LOCK_PHY2;
15474 			break;
15475 		case 3:
15476 			tp->phy_ape_lock = TG3_APE_LOCK_PHY3;
15477 			break;
15478 		}
15479 	}
15480 
15481 	if (!tg3_flag(tp, ENABLE_ASF) &&
15482 	    !(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
15483 	    !(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
15484 		tp->phy_flags &= ~(TG3_PHYFLG_1G_ON_VAUX_OK |
15485 				   TG3_PHYFLG_KEEP_LINK_ON_PWRDN);
15486 
15487 	if (tg3_flag(tp, USE_PHYLIB))
15488 		return tg3_phy_init(tp);
15489 
15490 	/* Reading the PHY ID register can conflict with ASF
15491 	 * firmware access to the PHY hardware.
15492 	 */
15493 	err = 0;
15494 	if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)) {
15495 		hw_phy_id = hw_phy_id_masked = TG3_PHY_ID_INVALID;
15496 	} else {
15497 		/* Now read the physical PHY_ID from the chip and verify
15498 		 * that it is sane.  If it doesn't look good, we fall back
15499 		 * to either the hard-coded table based PHY_ID and failing
15500 		 * that the value found in the eeprom area.
15501 		 */
15502 		err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
15503 		err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
15504 
15505 		hw_phy_id  = (hw_phy_id_1 & 0xffff) << 10;
15506 		hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
15507 		hw_phy_id |= (hw_phy_id_2 & 0x03ff) <<  0;
15508 
15509 		hw_phy_id_masked = hw_phy_id & TG3_PHY_ID_MASK;
15510 	}
15511 
15512 	if (!err && TG3_KNOWN_PHY_ID(hw_phy_id_masked)) {
15513 		tp->phy_id = hw_phy_id;
15514 		if (hw_phy_id_masked == TG3_PHY_ID_BCM8002)
15515 			tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
15516 		else
15517 			tp->phy_flags &= ~TG3_PHYFLG_PHY_SERDES;
15518 	} else {
15519 		if (tp->phy_id != TG3_PHY_ID_INVALID) {
15520 			/* Do nothing, phy ID already set up in
15521 			 * tg3_get_eeprom_hw_cfg().
15522 			 */
15523 		} else {
15524 			struct subsys_tbl_ent *p;
15525 
15526 			/* No eeprom signature?  Try the hardcoded
15527 			 * subsys device table.
15528 			 */
15529 			p = tg3_lookup_by_subsys(tp);
15530 			if (p) {
15531 				tp->phy_id = p->phy_id;
15532 			} else if (!tg3_flag(tp, IS_SSB_CORE)) {
15533 				/* For now we saw the IDs 0xbc050cd0,
15534 				 * 0xbc050f80 and 0xbc050c30 on devices
15535 				 * connected to an BCM4785 and there are
15536 				 * probably more. Just assume that the phy is
15537 				 * supported when it is connected to a SSB core
15538 				 * for now.
15539 				 */
15540 				return -ENODEV;
15541 			}
15542 
15543 			if (!tp->phy_id ||
15544 			    tp->phy_id == TG3_PHY_ID_BCM8002)
15545 				tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
15546 		}
15547 	}
15548 
15549 	if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
15550 	    (tg3_asic_rev(tp) == ASIC_REV_5719 ||
15551 	     tg3_asic_rev(tp) == ASIC_REV_5720 ||
15552 	     tg3_asic_rev(tp) == ASIC_REV_57766 ||
15553 	     tg3_asic_rev(tp) == ASIC_REV_5762 ||
15554 	     (tg3_asic_rev(tp) == ASIC_REV_5717 &&
15555 	      tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0) ||
15556 	     (tg3_asic_rev(tp) == ASIC_REV_57765 &&
15557 	      tg3_chip_rev_id(tp) != CHIPREV_ID_57765_A0))) {
15558 		tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
15559 
15560 		tp->eee.supported = SUPPORTED_100baseT_Full |
15561 				    SUPPORTED_1000baseT_Full;
15562 		tp->eee.advertised = ADVERTISED_100baseT_Full |
15563 				     ADVERTISED_1000baseT_Full;
15564 		tp->eee.eee_enabled = 1;
15565 		tp->eee.tx_lpi_enabled = 1;
15566 		tp->eee.tx_lpi_timer = TG3_CPMU_DBTMR1_LNKIDLE_2047US;
15567 	}
15568 
15569 	tg3_phy_init_link_config(tp);
15570 
15571 	if (!(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
15572 	    !(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
15573 	    !tg3_flag(tp, ENABLE_APE) &&
15574 	    !tg3_flag(tp, ENABLE_ASF)) {
15575 		u32 bmsr, dummy;
15576 
15577 		tg3_readphy(tp, MII_BMSR, &bmsr);
15578 		if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
15579 		    (bmsr & BMSR_LSTATUS))
15580 			goto skip_phy_reset;
15581 
15582 		err = tg3_phy_reset(tp);
15583 		if (err)
15584 			return err;
15585 
15586 		tg3_phy_set_wirespeed(tp);
15587 
15588 		if (!tg3_phy_copper_an_config_ok(tp, &dummy)) {
15589 			tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
15590 					    tp->link_config.flowctrl);
15591 
15592 			tg3_writephy(tp, MII_BMCR,
15593 				     BMCR_ANENABLE | BMCR_ANRESTART);
15594 		}
15595 	}
15596 
15597 skip_phy_reset:
15598 	if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
15599 		err = tg3_init_5401phy_dsp(tp);
15600 		if (err)
15601 			return err;
15602 
15603 		err = tg3_init_5401phy_dsp(tp);
15604 	}
15605 
15606 	return err;
15607 }
15608 
15609 static void tg3_read_vpd(struct tg3 *tp)
15610 {
15611 	u8 *vpd_data;
15612 	unsigned int block_end, rosize, len;
15613 	u32 vpdlen;
15614 	int j, i = 0;
15615 
15616 	vpd_data = (u8 *)tg3_vpd_readblock(tp, &vpdlen);
15617 	if (!vpd_data)
15618 		goto out_no_vpd;
15619 
15620 	i = pci_vpd_find_tag(vpd_data, 0, vpdlen, PCI_VPD_LRDT_RO_DATA);
15621 	if (i < 0)
15622 		goto out_not_found;
15623 
15624 	rosize = pci_vpd_lrdt_size(&vpd_data[i]);
15625 	block_end = i + PCI_VPD_LRDT_TAG_SIZE + rosize;
15626 	i += PCI_VPD_LRDT_TAG_SIZE;
15627 
15628 	if (block_end > vpdlen)
15629 		goto out_not_found;
15630 
15631 	j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
15632 				      PCI_VPD_RO_KEYWORD_MFR_ID);
15633 	if (j > 0) {
15634 		len = pci_vpd_info_field_size(&vpd_data[j]);
15635 
15636 		j += PCI_VPD_INFO_FLD_HDR_SIZE;
15637 		if (j + len > block_end || len != 4 ||
15638 		    memcmp(&vpd_data[j], "1028", 4))
15639 			goto partno;
15640 
15641 		j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
15642 					      PCI_VPD_RO_KEYWORD_VENDOR0);
15643 		if (j < 0)
15644 			goto partno;
15645 
15646 		len = pci_vpd_info_field_size(&vpd_data[j]);
15647 
15648 		j += PCI_VPD_INFO_FLD_HDR_SIZE;
15649 		if (j + len > block_end)
15650 			goto partno;
15651 
15652 		if (len >= sizeof(tp->fw_ver))
15653 			len = sizeof(tp->fw_ver) - 1;
15654 		memset(tp->fw_ver, 0, sizeof(tp->fw_ver));
15655 		snprintf(tp->fw_ver, sizeof(tp->fw_ver), "%.*s bc ", len,
15656 			 &vpd_data[j]);
15657 	}
15658 
15659 partno:
15660 	i = pci_vpd_find_info_keyword(vpd_data, i, rosize,
15661 				      PCI_VPD_RO_KEYWORD_PARTNO);
15662 	if (i < 0)
15663 		goto out_not_found;
15664 
15665 	len = pci_vpd_info_field_size(&vpd_data[i]);
15666 
15667 	i += PCI_VPD_INFO_FLD_HDR_SIZE;
15668 	if (len > TG3_BPN_SIZE ||
15669 	    (len + i) > vpdlen)
15670 		goto out_not_found;
15671 
15672 	memcpy(tp->board_part_number, &vpd_data[i], len);
15673 
15674 out_not_found:
15675 	kfree(vpd_data);
15676 	if (tp->board_part_number[0])
15677 		return;
15678 
15679 out_no_vpd:
15680 	if (tg3_asic_rev(tp) == ASIC_REV_5717) {
15681 		if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
15682 		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C)
15683 			strcpy(tp->board_part_number, "BCM5717");
15684 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718)
15685 			strcpy(tp->board_part_number, "BCM5718");
15686 		else
15687 			goto nomatch;
15688 	} else if (tg3_asic_rev(tp) == ASIC_REV_57780) {
15689 		if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780)
15690 			strcpy(tp->board_part_number, "BCM57780");
15691 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760)
15692 			strcpy(tp->board_part_number, "BCM57760");
15693 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790)
15694 			strcpy(tp->board_part_number, "BCM57790");
15695 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788)
15696 			strcpy(tp->board_part_number, "BCM57788");
15697 		else
15698 			goto nomatch;
15699 	} else if (tg3_asic_rev(tp) == ASIC_REV_57765) {
15700 		if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761)
15701 			strcpy(tp->board_part_number, "BCM57761");
15702 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765)
15703 			strcpy(tp->board_part_number, "BCM57765");
15704 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781)
15705 			strcpy(tp->board_part_number, "BCM57781");
15706 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785)
15707 			strcpy(tp->board_part_number, "BCM57785");
15708 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791)
15709 			strcpy(tp->board_part_number, "BCM57791");
15710 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
15711 			strcpy(tp->board_part_number, "BCM57795");
15712 		else
15713 			goto nomatch;
15714 	} else if (tg3_asic_rev(tp) == ASIC_REV_57766) {
15715 		if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762)
15716 			strcpy(tp->board_part_number, "BCM57762");
15717 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766)
15718 			strcpy(tp->board_part_number, "BCM57766");
15719 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782)
15720 			strcpy(tp->board_part_number, "BCM57782");
15721 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
15722 			strcpy(tp->board_part_number, "BCM57786");
15723 		else
15724 			goto nomatch;
15725 	} else if (tg3_asic_rev(tp) == ASIC_REV_5906) {
15726 		strcpy(tp->board_part_number, "BCM95906");
15727 	} else {
15728 nomatch:
15729 		strcpy(tp->board_part_number, "none");
15730 	}
15731 }
15732 
15733 static int tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
15734 {
15735 	u32 val;
15736 
15737 	if (tg3_nvram_read(tp, offset, &val) ||
15738 	    (val & 0xfc000000) != 0x0c000000 ||
15739 	    tg3_nvram_read(tp, offset + 4, &val) ||
15740 	    val != 0)
15741 		return 0;
15742 
15743 	return 1;
15744 }
15745 
15746 static void tg3_read_bc_ver(struct tg3 *tp)
15747 {
15748 	u32 val, offset, start, ver_offset;
15749 	int i, dst_off;
15750 	bool newver = false;
15751 
15752 	if (tg3_nvram_read(tp, 0xc, &offset) ||
15753 	    tg3_nvram_read(tp, 0x4, &start))
15754 		return;
15755 
15756 	offset = tg3_nvram_logical_addr(tp, offset);
15757 
15758 	if (tg3_nvram_read(tp, offset, &val))
15759 		return;
15760 
15761 	if ((val & 0xfc000000) == 0x0c000000) {
15762 		if (tg3_nvram_read(tp, offset + 4, &val))
15763 			return;
15764 
15765 		if (val == 0)
15766 			newver = true;
15767 	}
15768 
15769 	dst_off = strlen(tp->fw_ver);
15770 
15771 	if (newver) {
15772 		if (TG3_VER_SIZE - dst_off < 16 ||
15773 		    tg3_nvram_read(tp, offset + 8, &ver_offset))
15774 			return;
15775 
15776 		offset = offset + ver_offset - start;
15777 		for (i = 0; i < 16; i += 4) {
15778 			__be32 v;
15779 			if (tg3_nvram_read_be32(tp, offset + i, &v))
15780 				return;
15781 
15782 			memcpy(tp->fw_ver + dst_off + i, &v, sizeof(v));
15783 		}
15784 	} else {
15785 		u32 major, minor;
15786 
15787 		if (tg3_nvram_read(tp, TG3_NVM_PTREV_BCVER, &ver_offset))
15788 			return;
15789 
15790 		major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >>
15791 			TG3_NVM_BCVER_MAJSFT;
15792 		minor = ver_offset & TG3_NVM_BCVER_MINMSK;
15793 		snprintf(&tp->fw_ver[dst_off], TG3_VER_SIZE - dst_off,
15794 			 "v%d.%02d", major, minor);
15795 	}
15796 }
15797 
15798 static void tg3_read_hwsb_ver(struct tg3 *tp)
15799 {
15800 	u32 val, major, minor;
15801 
15802 	/* Use native endian representation */
15803 	if (tg3_nvram_read(tp, TG3_NVM_HWSB_CFG1, &val))
15804 		return;
15805 
15806 	major = (val & TG3_NVM_HWSB_CFG1_MAJMSK) >>
15807 		TG3_NVM_HWSB_CFG1_MAJSFT;
15808 	minor = (val & TG3_NVM_HWSB_CFG1_MINMSK) >>
15809 		TG3_NVM_HWSB_CFG1_MINSFT;
15810 
15811 	snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor);
15812 }
15813 
15814 static void tg3_read_sb_ver(struct tg3 *tp, u32 val)
15815 {
15816 	u32 offset, major, minor, build;
15817 
15818 	strncat(tp->fw_ver, "sb", TG3_VER_SIZE - strlen(tp->fw_ver) - 1);
15819 
15820 	if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1)
15821 		return;
15822 
15823 	switch (val & TG3_EEPROM_SB_REVISION_MASK) {
15824 	case TG3_EEPROM_SB_REVISION_0:
15825 		offset = TG3_EEPROM_SB_F1R0_EDH_OFF;
15826 		break;
15827 	case TG3_EEPROM_SB_REVISION_2:
15828 		offset = TG3_EEPROM_SB_F1R2_EDH_OFF;
15829 		break;
15830 	case TG3_EEPROM_SB_REVISION_3:
15831 		offset = TG3_EEPROM_SB_F1R3_EDH_OFF;
15832 		break;
15833 	case TG3_EEPROM_SB_REVISION_4:
15834 		offset = TG3_EEPROM_SB_F1R4_EDH_OFF;
15835 		break;
15836 	case TG3_EEPROM_SB_REVISION_5:
15837 		offset = TG3_EEPROM_SB_F1R5_EDH_OFF;
15838 		break;
15839 	case TG3_EEPROM_SB_REVISION_6:
15840 		offset = TG3_EEPROM_SB_F1R6_EDH_OFF;
15841 		break;
15842 	default:
15843 		return;
15844 	}
15845 
15846 	if (tg3_nvram_read(tp, offset, &val))
15847 		return;
15848 
15849 	build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >>
15850 		TG3_EEPROM_SB_EDH_BLD_SHFT;
15851 	major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >>
15852 		TG3_EEPROM_SB_EDH_MAJ_SHFT;
15853 	minor =  val & TG3_EEPROM_SB_EDH_MIN_MASK;
15854 
15855 	if (minor > 99 || build > 26)
15856 		return;
15857 
15858 	offset = strlen(tp->fw_ver);
15859 	snprintf(&tp->fw_ver[offset], TG3_VER_SIZE - offset,
15860 		 " v%d.%02d", major, minor);
15861 
15862 	if (build > 0) {
15863 		offset = strlen(tp->fw_ver);
15864 		if (offset < TG3_VER_SIZE - 1)
15865 			tp->fw_ver[offset] = 'a' + build - 1;
15866 	}
15867 }
15868 
15869 static void tg3_read_mgmtfw_ver(struct tg3 *tp)
15870 {
15871 	u32 val, offset, start;
15872 	int i, vlen;
15873 
15874 	for (offset = TG3_NVM_DIR_START;
15875 	     offset < TG3_NVM_DIR_END;
15876 	     offset += TG3_NVM_DIRENT_SIZE) {
15877 		if (tg3_nvram_read(tp, offset, &val))
15878 			return;
15879 
15880 		if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
15881 			break;
15882 	}
15883 
15884 	if (offset == TG3_NVM_DIR_END)
15885 		return;
15886 
15887 	if (!tg3_flag(tp, 5705_PLUS))
15888 		start = 0x08000000;
15889 	else if (tg3_nvram_read(tp, offset - 4, &start))
15890 		return;
15891 
15892 	if (tg3_nvram_read(tp, offset + 4, &offset) ||
15893 	    !tg3_fw_img_is_valid(tp, offset) ||
15894 	    tg3_nvram_read(tp, offset + 8, &val))
15895 		return;
15896 
15897 	offset += val - start;
15898 
15899 	vlen = strlen(tp->fw_ver);
15900 
15901 	tp->fw_ver[vlen++] = ',';
15902 	tp->fw_ver[vlen++] = ' ';
15903 
15904 	for (i = 0; i < 4; i++) {
15905 		__be32 v;
15906 		if (tg3_nvram_read_be32(tp, offset, &v))
15907 			return;
15908 
15909 		offset += sizeof(v);
15910 
15911 		if (vlen > TG3_VER_SIZE - sizeof(v)) {
15912 			memcpy(&tp->fw_ver[vlen], &v, TG3_VER_SIZE - vlen);
15913 			break;
15914 		}
15915 
15916 		memcpy(&tp->fw_ver[vlen], &v, sizeof(v));
15917 		vlen += sizeof(v);
15918 	}
15919 }
15920 
15921 static void tg3_probe_ncsi(struct tg3 *tp)
15922 {
15923 	u32 apedata;
15924 
15925 	apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
15926 	if (apedata != APE_SEG_SIG_MAGIC)
15927 		return;
15928 
15929 	apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
15930 	if (!(apedata & APE_FW_STATUS_READY))
15931 		return;
15932 
15933 	if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI)
15934 		tg3_flag_set(tp, APE_HAS_NCSI);
15935 }
15936 
15937 static void tg3_read_dash_ver(struct tg3 *tp)
15938 {
15939 	int vlen;
15940 	u32 apedata;
15941 	char *fwtype;
15942 
15943 	apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION);
15944 
15945 	if (tg3_flag(tp, APE_HAS_NCSI))
15946 		fwtype = "NCSI";
15947 	else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725)
15948 		fwtype = "SMASH";
15949 	else
15950 		fwtype = "DASH";
15951 
15952 	vlen = strlen(tp->fw_ver);
15953 
15954 	snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " %s v%d.%d.%d.%d",
15955 		 fwtype,
15956 		 (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT,
15957 		 (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT,
15958 		 (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT,
15959 		 (apedata & APE_FW_VERSION_BLDMSK));
15960 }
15961 
15962 static void tg3_read_otp_ver(struct tg3 *tp)
15963 {
15964 	u32 val, val2;
15965 
15966 	if (tg3_asic_rev(tp) != ASIC_REV_5762)
15967 		return;
15968 
15969 	if (!tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0, &val) &&
15970 	    !tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0 + 4, &val2) &&
15971 	    TG3_OTP_MAGIC0_VALID(val)) {
15972 		u64 val64 = (u64) val << 32 | val2;
15973 		u32 ver = 0;
15974 		int i, vlen;
15975 
15976 		for (i = 0; i < 7; i++) {
15977 			if ((val64 & 0xff) == 0)
15978 				break;
15979 			ver = val64 & 0xff;
15980 			val64 >>= 8;
15981 		}
15982 		vlen = strlen(tp->fw_ver);
15983 		snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " .%02d", ver);
15984 	}
15985 }
15986 
15987 static void tg3_read_fw_ver(struct tg3 *tp)
15988 {
15989 	u32 val;
15990 	bool vpd_vers = false;
15991 
15992 	if (tp->fw_ver[0] != 0)
15993 		vpd_vers = true;
15994 
15995 	if (tg3_flag(tp, NO_NVRAM)) {
15996 		strcat(tp->fw_ver, "sb");
15997 		tg3_read_otp_ver(tp);
15998 		return;
15999 	}
16000 
16001 	if (tg3_nvram_read(tp, 0, &val))
16002 		return;
16003 
16004 	if (val == TG3_EEPROM_MAGIC)
16005 		tg3_read_bc_ver(tp);
16006 	else if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW)
16007 		tg3_read_sb_ver(tp, val);
16008 	else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
16009 		tg3_read_hwsb_ver(tp);
16010 
16011 	if (tg3_flag(tp, ENABLE_ASF)) {
16012 		if (tg3_flag(tp, ENABLE_APE)) {
16013 			tg3_probe_ncsi(tp);
16014 			if (!vpd_vers)
16015 				tg3_read_dash_ver(tp);
16016 		} else if (!vpd_vers) {
16017 			tg3_read_mgmtfw_ver(tp);
16018 		}
16019 	}
16020 
16021 	tp->fw_ver[TG3_VER_SIZE - 1] = 0;
16022 }
16023 
16024 static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp)
16025 {
16026 	if (tg3_flag(tp, LRG_PROD_RING_CAP))
16027 		return TG3_RX_RET_MAX_SIZE_5717;
16028 	else if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))
16029 		return TG3_RX_RET_MAX_SIZE_5700;
16030 	else
16031 		return TG3_RX_RET_MAX_SIZE_5705;
16032 }
16033 
16034 static const struct pci_device_id tg3_write_reorder_chipsets[] = {
16035 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C) },
16036 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE) },
16037 	{ PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8385_0) },
16038 	{ },
16039 };
16040 
16041 static struct pci_dev *tg3_find_peer(struct tg3 *tp)
16042 {
16043 	struct pci_dev *peer;
16044 	unsigned int func, devnr = tp->pdev->devfn & ~7;
16045 
16046 	for (func = 0; func < 8; func++) {
16047 		peer = pci_get_slot(tp->pdev->bus, devnr | func);
16048 		if (peer && peer != tp->pdev)
16049 			break;
16050 		pci_dev_put(peer);
16051 	}
16052 	/* 5704 can be configured in single-port mode, set peer to
16053 	 * tp->pdev in that case.
16054 	 */
16055 	if (!peer) {
16056 		peer = tp->pdev;
16057 		return peer;
16058 	}
16059 
16060 	/*
16061 	 * We don't need to keep the refcount elevated; there's no way
16062 	 * to remove one half of this device without removing the other
16063 	 */
16064 	pci_dev_put(peer);
16065 
16066 	return peer;
16067 }
16068 
16069 static void tg3_detect_asic_rev(struct tg3 *tp, u32 misc_ctrl_reg)
16070 {
16071 	tp->pci_chip_rev_id = misc_ctrl_reg >> MISC_HOST_CTRL_CHIPREV_SHIFT;
16072 	if (tg3_asic_rev(tp) == ASIC_REV_USE_PROD_ID_REG) {
16073 		u32 reg;
16074 
16075 		/* All devices that use the alternate
16076 		 * ASIC REV location have a CPMU.
16077 		 */
16078 		tg3_flag_set(tp, CPMU_PRESENT);
16079 
16080 		if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
16081 		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
16082 		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
16083 		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
16084 		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 ||
16085 		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_57767 ||
16086 		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_57764 ||
16087 		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 ||
16088 		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 ||
16089 		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727 ||
16090 		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_57787)
16091 			reg = TG3PCI_GEN2_PRODID_ASICREV;
16092 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 ||
16093 			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 ||
16094 			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 ||
16095 			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 ||
16096 			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
16097 			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
16098 			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762 ||
16099 			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766 ||
16100 			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782 ||
16101 			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
16102 			reg = TG3PCI_GEN15_PRODID_ASICREV;
16103 		else
16104 			reg = TG3PCI_PRODID_ASICREV;
16105 
16106 		pci_read_config_dword(tp->pdev, reg, &tp->pci_chip_rev_id);
16107 	}
16108 
16109 	/* Wrong chip ID in 5752 A0. This code can be removed later
16110 	 * as A0 is not in production.
16111 	 */
16112 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5752_A0_HW)
16113 		tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
16114 
16115 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5717_C0)
16116 		tp->pci_chip_rev_id = CHIPREV_ID_5720_A0;
16117 
16118 	if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16119 	    tg3_asic_rev(tp) == ASIC_REV_5719 ||
16120 	    tg3_asic_rev(tp) == ASIC_REV_5720)
16121 		tg3_flag_set(tp, 5717_PLUS);
16122 
16123 	if (tg3_asic_rev(tp) == ASIC_REV_57765 ||
16124 	    tg3_asic_rev(tp) == ASIC_REV_57766)
16125 		tg3_flag_set(tp, 57765_CLASS);
16126 
16127 	if (tg3_flag(tp, 57765_CLASS) || tg3_flag(tp, 5717_PLUS) ||
16128 	     tg3_asic_rev(tp) == ASIC_REV_5762)
16129 		tg3_flag_set(tp, 57765_PLUS);
16130 
16131 	/* Intentionally exclude ASIC_REV_5906 */
16132 	if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
16133 	    tg3_asic_rev(tp) == ASIC_REV_5787 ||
16134 	    tg3_asic_rev(tp) == ASIC_REV_5784 ||
16135 	    tg3_asic_rev(tp) == ASIC_REV_5761 ||
16136 	    tg3_asic_rev(tp) == ASIC_REV_5785 ||
16137 	    tg3_asic_rev(tp) == ASIC_REV_57780 ||
16138 	    tg3_flag(tp, 57765_PLUS))
16139 		tg3_flag_set(tp, 5755_PLUS);
16140 
16141 	if (tg3_asic_rev(tp) == ASIC_REV_5780 ||
16142 	    tg3_asic_rev(tp) == ASIC_REV_5714)
16143 		tg3_flag_set(tp, 5780_CLASS);
16144 
16145 	if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
16146 	    tg3_asic_rev(tp) == ASIC_REV_5752 ||
16147 	    tg3_asic_rev(tp) == ASIC_REV_5906 ||
16148 	    tg3_flag(tp, 5755_PLUS) ||
16149 	    tg3_flag(tp, 5780_CLASS))
16150 		tg3_flag_set(tp, 5750_PLUS);
16151 
16152 	if (tg3_asic_rev(tp) == ASIC_REV_5705 ||
16153 	    tg3_flag(tp, 5750_PLUS))
16154 		tg3_flag_set(tp, 5705_PLUS);
16155 }
16156 
16157 static bool tg3_10_100_only_device(struct tg3 *tp,
16158 				   const struct pci_device_id *ent)
16159 {
16160 	u32 grc_misc_cfg = tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK;
16161 
16162 	if ((tg3_asic_rev(tp) == ASIC_REV_5703 &&
16163 	     (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
16164 	    (tp->phy_flags & TG3_PHYFLG_IS_FET))
16165 		return true;
16166 
16167 	if (ent->driver_data & TG3_DRV_DATA_FLAG_10_100_ONLY) {
16168 		if (tg3_asic_rev(tp) == ASIC_REV_5705) {
16169 			if (ent->driver_data & TG3_DRV_DATA_FLAG_5705_10_100)
16170 				return true;
16171 		} else {
16172 			return true;
16173 		}
16174 	}
16175 
16176 	return false;
16177 }
16178 
16179 static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
16180 {
16181 	u32 misc_ctrl_reg;
16182 	u32 pci_state_reg, grc_misc_cfg;
16183 	u32 val;
16184 	u16 pci_cmd;
16185 	int err;
16186 
16187 	/* Force memory write invalidate off.  If we leave it on,
16188 	 * then on 5700_BX chips we have to enable a workaround.
16189 	 * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
16190 	 * to match the cacheline size.  The Broadcom driver have this
16191 	 * workaround but turns MWI off all the times so never uses
16192 	 * it.  This seems to suggest that the workaround is insufficient.
16193 	 */
16194 	pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
16195 	pci_cmd &= ~PCI_COMMAND_INVALIDATE;
16196 	pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
16197 
16198 	/* Important! -- Make sure register accesses are byteswapped
16199 	 * correctly.  Also, for those chips that require it, make
16200 	 * sure that indirect register accesses are enabled before
16201 	 * the first operation.
16202 	 */
16203 	pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
16204 			      &misc_ctrl_reg);
16205 	tp->misc_host_ctrl |= (misc_ctrl_reg &
16206 			       MISC_HOST_CTRL_CHIPREV);
16207 	pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
16208 			       tp->misc_host_ctrl);
16209 
16210 	tg3_detect_asic_rev(tp, misc_ctrl_reg);
16211 
16212 	/* If we have 5702/03 A1 or A2 on certain ICH chipsets,
16213 	 * we need to disable memory and use config. cycles
16214 	 * only to access all registers. The 5702/03 chips
16215 	 * can mistakenly decode the special cycles from the
16216 	 * ICH chipsets as memory write cycles, causing corruption
16217 	 * of register and memory space. Only certain ICH bridges
16218 	 * will drive special cycles with non-zero data during the
16219 	 * address phase which can fall within the 5703's address
16220 	 * range. This is not an ICH bug as the PCI spec allows
16221 	 * non-zero address during special cycles. However, only
16222 	 * these ICH bridges are known to drive non-zero addresses
16223 	 * during special cycles.
16224 	 *
16225 	 * Since special cycles do not cross PCI bridges, we only
16226 	 * enable this workaround if the 5703 is on the secondary
16227 	 * bus of these ICH bridges.
16228 	 */
16229 	if ((tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A1) ||
16230 	    (tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A2)) {
16231 		static struct tg3_dev_id {
16232 			u32	vendor;
16233 			u32	device;
16234 			u32	rev;
16235 		} ich_chipsets[] = {
16236 			{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
16237 			  PCI_ANY_ID },
16238 			{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
16239 			  PCI_ANY_ID },
16240 			{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
16241 			  0xa },
16242 			{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
16243 			  PCI_ANY_ID },
16244 			{ },
16245 		};
16246 		struct tg3_dev_id *pci_id = &ich_chipsets[0];
16247 		struct pci_dev *bridge = NULL;
16248 
16249 		while (pci_id->vendor != 0) {
16250 			bridge = pci_get_device(pci_id->vendor, pci_id->device,
16251 						bridge);
16252 			if (!bridge) {
16253 				pci_id++;
16254 				continue;
16255 			}
16256 			if (pci_id->rev != PCI_ANY_ID) {
16257 				if (bridge->revision > pci_id->rev)
16258 					continue;
16259 			}
16260 			if (bridge->subordinate &&
16261 			    (bridge->subordinate->number ==
16262 			     tp->pdev->bus->number)) {
16263 				tg3_flag_set(tp, ICH_WORKAROUND);
16264 				pci_dev_put(bridge);
16265 				break;
16266 			}
16267 		}
16268 	}
16269 
16270 	if (tg3_asic_rev(tp) == ASIC_REV_5701) {
16271 		static struct tg3_dev_id {
16272 			u32	vendor;
16273 			u32	device;
16274 		} bridge_chipsets[] = {
16275 			{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
16276 			{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
16277 			{ },
16278 		};
16279 		struct tg3_dev_id *pci_id = &bridge_chipsets[0];
16280 		struct pci_dev *bridge = NULL;
16281 
16282 		while (pci_id->vendor != 0) {
16283 			bridge = pci_get_device(pci_id->vendor,
16284 						pci_id->device,
16285 						bridge);
16286 			if (!bridge) {
16287 				pci_id++;
16288 				continue;
16289 			}
16290 			if (bridge->subordinate &&
16291 			    (bridge->subordinate->number <=
16292 			     tp->pdev->bus->number) &&
16293 			    (bridge->subordinate->busn_res.end >=
16294 			     tp->pdev->bus->number)) {
16295 				tg3_flag_set(tp, 5701_DMA_BUG);
16296 				pci_dev_put(bridge);
16297 				break;
16298 			}
16299 		}
16300 	}
16301 
16302 	/* The EPB bridge inside 5714, 5715, and 5780 cannot support
16303 	 * DMA addresses > 40-bit. This bridge may have other additional
16304 	 * 57xx devices behind it in some 4-port NIC designs for example.
16305 	 * Any tg3 device found behind the bridge will also need the 40-bit
16306 	 * DMA workaround.
16307 	 */
16308 	if (tg3_flag(tp, 5780_CLASS)) {
16309 		tg3_flag_set(tp, 40BIT_DMA_BUG);
16310 		tp->msi_cap = tp->pdev->msi_cap;
16311 	} else {
16312 		struct pci_dev *bridge = NULL;
16313 
16314 		do {
16315 			bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
16316 						PCI_DEVICE_ID_SERVERWORKS_EPB,
16317 						bridge);
16318 			if (bridge && bridge->subordinate &&
16319 			    (bridge->subordinate->number <=
16320 			     tp->pdev->bus->number) &&
16321 			    (bridge->subordinate->busn_res.end >=
16322 			     tp->pdev->bus->number)) {
16323 				tg3_flag_set(tp, 40BIT_DMA_BUG);
16324 				pci_dev_put(bridge);
16325 				break;
16326 			}
16327 		} while (bridge);
16328 	}
16329 
16330 	if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
16331 	    tg3_asic_rev(tp) == ASIC_REV_5714)
16332 		tp->pdev_peer = tg3_find_peer(tp);
16333 
16334 	/* Determine TSO capabilities */
16335 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0)
16336 		; /* Do nothing. HW bug. */
16337 	else if (tg3_flag(tp, 57765_PLUS))
16338 		tg3_flag_set(tp, HW_TSO_3);
16339 	else if (tg3_flag(tp, 5755_PLUS) ||
16340 		 tg3_asic_rev(tp) == ASIC_REV_5906)
16341 		tg3_flag_set(tp, HW_TSO_2);
16342 	else if (tg3_flag(tp, 5750_PLUS)) {
16343 		tg3_flag_set(tp, HW_TSO_1);
16344 		tg3_flag_set(tp, TSO_BUG);
16345 		if (tg3_asic_rev(tp) == ASIC_REV_5750 &&
16346 		    tg3_chip_rev_id(tp) >= CHIPREV_ID_5750_C2)
16347 			tg3_flag_clear(tp, TSO_BUG);
16348 	} else if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
16349 		   tg3_asic_rev(tp) != ASIC_REV_5701 &&
16350 		   tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
16351 		tg3_flag_set(tp, FW_TSO);
16352 		tg3_flag_set(tp, TSO_BUG);
16353 		if (tg3_asic_rev(tp) == ASIC_REV_5705)
16354 			tp->fw_needed = FIRMWARE_TG3TSO5;
16355 		else
16356 			tp->fw_needed = FIRMWARE_TG3TSO;
16357 	}
16358 
16359 	/* Selectively allow TSO based on operating conditions */
16360 	if (tg3_flag(tp, HW_TSO_1) ||
16361 	    tg3_flag(tp, HW_TSO_2) ||
16362 	    tg3_flag(tp, HW_TSO_3) ||
16363 	    tg3_flag(tp, FW_TSO)) {
16364 		/* For firmware TSO, assume ASF is disabled.
16365 		 * We'll disable TSO later if we discover ASF
16366 		 * is enabled in tg3_get_eeprom_hw_cfg().
16367 		 */
16368 		tg3_flag_set(tp, TSO_CAPABLE);
16369 	} else {
16370 		tg3_flag_clear(tp, TSO_CAPABLE);
16371 		tg3_flag_clear(tp, TSO_BUG);
16372 		tp->fw_needed = NULL;
16373 	}
16374 
16375 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0)
16376 		tp->fw_needed = FIRMWARE_TG3;
16377 
16378 	if (tg3_asic_rev(tp) == ASIC_REV_57766)
16379 		tp->fw_needed = FIRMWARE_TG357766;
16380 
16381 	tp->irq_max = 1;
16382 
16383 	if (tg3_flag(tp, 5750_PLUS)) {
16384 		tg3_flag_set(tp, SUPPORT_MSI);
16385 		if (tg3_chip_rev(tp) == CHIPREV_5750_AX ||
16386 		    tg3_chip_rev(tp) == CHIPREV_5750_BX ||
16387 		    (tg3_asic_rev(tp) == ASIC_REV_5714 &&
16388 		     tg3_chip_rev_id(tp) <= CHIPREV_ID_5714_A2 &&
16389 		     tp->pdev_peer == tp->pdev))
16390 			tg3_flag_clear(tp, SUPPORT_MSI);
16391 
16392 		if (tg3_flag(tp, 5755_PLUS) ||
16393 		    tg3_asic_rev(tp) == ASIC_REV_5906) {
16394 			tg3_flag_set(tp, 1SHOT_MSI);
16395 		}
16396 
16397 		if (tg3_flag(tp, 57765_PLUS)) {
16398 			tg3_flag_set(tp, SUPPORT_MSIX);
16399 			tp->irq_max = TG3_IRQ_MAX_VECS;
16400 		}
16401 	}
16402 
16403 	tp->txq_max = 1;
16404 	tp->rxq_max = 1;
16405 	if (tp->irq_max > 1) {
16406 		tp->rxq_max = TG3_RSS_MAX_NUM_QS;
16407 		tg3_rss_init_dflt_indir_tbl(tp, TG3_RSS_MAX_NUM_QS);
16408 
16409 		if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
16410 		    tg3_asic_rev(tp) == ASIC_REV_5720)
16411 			tp->txq_max = tp->irq_max - 1;
16412 	}
16413 
16414 	if (tg3_flag(tp, 5755_PLUS) ||
16415 	    tg3_asic_rev(tp) == ASIC_REV_5906)
16416 		tg3_flag_set(tp, SHORT_DMA_BUG);
16417 
16418 	if (tg3_asic_rev(tp) == ASIC_REV_5719)
16419 		tp->dma_limit = TG3_TX_BD_DMA_MAX_4K;
16420 
16421 	if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16422 	    tg3_asic_rev(tp) == ASIC_REV_5719 ||
16423 	    tg3_asic_rev(tp) == ASIC_REV_5720 ||
16424 	    tg3_asic_rev(tp) == ASIC_REV_5762)
16425 		tg3_flag_set(tp, LRG_PROD_RING_CAP);
16426 
16427 	if (tg3_flag(tp, 57765_PLUS) &&
16428 	    tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0)
16429 		tg3_flag_set(tp, USE_JUMBO_BDFLAG);
16430 
16431 	if (!tg3_flag(tp, 5705_PLUS) ||
16432 	    tg3_flag(tp, 5780_CLASS) ||
16433 	    tg3_flag(tp, USE_JUMBO_BDFLAG))
16434 		tg3_flag_set(tp, JUMBO_CAPABLE);
16435 
16436 	pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
16437 			      &pci_state_reg);
16438 
16439 	if (pci_is_pcie(tp->pdev)) {
16440 		u16 lnkctl;
16441 
16442 		tg3_flag_set(tp, PCI_EXPRESS);
16443 
16444 		pcie_capability_read_word(tp->pdev, PCI_EXP_LNKCTL, &lnkctl);
16445 		if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
16446 			if (tg3_asic_rev(tp) == ASIC_REV_5906) {
16447 				tg3_flag_clear(tp, HW_TSO_2);
16448 				tg3_flag_clear(tp, TSO_CAPABLE);
16449 			}
16450 			if (tg3_asic_rev(tp) == ASIC_REV_5784 ||
16451 			    tg3_asic_rev(tp) == ASIC_REV_5761 ||
16452 			    tg3_chip_rev_id(tp) == CHIPREV_ID_57780_A0 ||
16453 			    tg3_chip_rev_id(tp) == CHIPREV_ID_57780_A1)
16454 				tg3_flag_set(tp, CLKREQ_BUG);
16455 		} else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5717_A0) {
16456 			tg3_flag_set(tp, L1PLLPD_EN);
16457 		}
16458 	} else if (tg3_asic_rev(tp) == ASIC_REV_5785) {
16459 		/* BCM5785 devices are effectively PCIe devices, and should
16460 		 * follow PCIe codepaths, but do not have a PCIe capabilities
16461 		 * section.
16462 		 */
16463 		tg3_flag_set(tp, PCI_EXPRESS);
16464 	} else if (!tg3_flag(tp, 5705_PLUS) ||
16465 		   tg3_flag(tp, 5780_CLASS)) {
16466 		tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
16467 		if (!tp->pcix_cap) {
16468 			dev_err(&tp->pdev->dev,
16469 				"Cannot find PCI-X capability, aborting\n");
16470 			return -EIO;
16471 		}
16472 
16473 		if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE))
16474 			tg3_flag_set(tp, PCIX_MODE);
16475 	}
16476 
16477 	/* If we have an AMD 762 or VIA K8T800 chipset, write
16478 	 * reordering to the mailbox registers done by the host
16479 	 * controller can cause major troubles.  We read back from
16480 	 * every mailbox register write to force the writes to be
16481 	 * posted to the chip in order.
16482 	 */
16483 	if (pci_dev_present(tg3_write_reorder_chipsets) &&
16484 	    !tg3_flag(tp, PCI_EXPRESS))
16485 		tg3_flag_set(tp, MBOX_WRITE_REORDER);
16486 
16487 	pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
16488 			     &tp->pci_cacheline_sz);
16489 	pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER,
16490 			     &tp->pci_lat_timer);
16491 	if (tg3_asic_rev(tp) == ASIC_REV_5703 &&
16492 	    tp->pci_lat_timer < 64) {
16493 		tp->pci_lat_timer = 64;
16494 		pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
16495 				      tp->pci_lat_timer);
16496 	}
16497 
16498 	/* Important! -- It is critical that the PCI-X hw workaround
16499 	 * situation is decided before the first MMIO register access.
16500 	 */
16501 	if (tg3_chip_rev(tp) == CHIPREV_5700_BX) {
16502 		/* 5700 BX chips need to have their TX producer index
16503 		 * mailboxes written twice to workaround a bug.
16504 		 */
16505 		tg3_flag_set(tp, TXD_MBOX_HWBUG);
16506 
16507 		/* If we are in PCI-X mode, enable register write workaround.
16508 		 *
16509 		 * The workaround is to use indirect register accesses
16510 		 * for all chip writes not to mailbox registers.
16511 		 */
16512 		if (tg3_flag(tp, PCIX_MODE)) {
16513 			u32 pm_reg;
16514 
16515 			tg3_flag_set(tp, PCIX_TARGET_HWBUG);
16516 
16517 			/* The chip can have it's power management PCI config
16518 			 * space registers clobbered due to this bug.
16519 			 * So explicitly force the chip into D0 here.
16520 			 */
16521 			pci_read_config_dword(tp->pdev,
16522 					      tp->pdev->pm_cap + PCI_PM_CTRL,
16523 					      &pm_reg);
16524 			pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
16525 			pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
16526 			pci_write_config_dword(tp->pdev,
16527 					       tp->pdev->pm_cap + PCI_PM_CTRL,
16528 					       pm_reg);
16529 
16530 			/* Also, force SERR#/PERR# in PCI command. */
16531 			pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
16532 			pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
16533 			pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
16534 		}
16535 	}
16536 
16537 	if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
16538 		tg3_flag_set(tp, PCI_HIGH_SPEED);
16539 	if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
16540 		tg3_flag_set(tp, PCI_32BIT);
16541 
16542 	/* Chip-specific fixup from Broadcom driver */
16543 	if ((tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0) &&
16544 	    (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
16545 		pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
16546 		pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
16547 	}
16548 
16549 	/* Default fast path register access methods */
16550 	tp->read32 = tg3_read32;
16551 	tp->write32 = tg3_write32;
16552 	tp->read32_mbox = tg3_read32;
16553 	tp->write32_mbox = tg3_write32;
16554 	tp->write32_tx_mbox = tg3_write32;
16555 	tp->write32_rx_mbox = tg3_write32;
16556 
16557 	/* Various workaround register access methods */
16558 	if (tg3_flag(tp, PCIX_TARGET_HWBUG))
16559 		tp->write32 = tg3_write_indirect_reg32;
16560 	else if (tg3_asic_rev(tp) == ASIC_REV_5701 ||
16561 		 (tg3_flag(tp, PCI_EXPRESS) &&
16562 		  tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A0)) {
16563 		/*
16564 		 * Back to back register writes can cause problems on these
16565 		 * chips, the workaround is to read back all reg writes
16566 		 * except those to mailbox regs.
16567 		 *
16568 		 * See tg3_write_indirect_reg32().
16569 		 */
16570 		tp->write32 = tg3_write_flush_reg32;
16571 	}
16572 
16573 	if (tg3_flag(tp, TXD_MBOX_HWBUG) || tg3_flag(tp, MBOX_WRITE_REORDER)) {
16574 		tp->write32_tx_mbox = tg3_write32_tx_mbox;
16575 		if (tg3_flag(tp, MBOX_WRITE_REORDER))
16576 			tp->write32_rx_mbox = tg3_write_flush_reg32;
16577 	}
16578 
16579 	if (tg3_flag(tp, ICH_WORKAROUND)) {
16580 		tp->read32 = tg3_read_indirect_reg32;
16581 		tp->write32 = tg3_write_indirect_reg32;
16582 		tp->read32_mbox = tg3_read_indirect_mbox;
16583 		tp->write32_mbox = tg3_write_indirect_mbox;
16584 		tp->write32_tx_mbox = tg3_write_indirect_mbox;
16585 		tp->write32_rx_mbox = tg3_write_indirect_mbox;
16586 
16587 		iounmap(tp->regs);
16588 		tp->regs = NULL;
16589 
16590 		pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
16591 		pci_cmd &= ~PCI_COMMAND_MEMORY;
16592 		pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
16593 	}
16594 	if (tg3_asic_rev(tp) == ASIC_REV_5906) {
16595 		tp->read32_mbox = tg3_read32_mbox_5906;
16596 		tp->write32_mbox = tg3_write32_mbox_5906;
16597 		tp->write32_tx_mbox = tg3_write32_mbox_5906;
16598 		tp->write32_rx_mbox = tg3_write32_mbox_5906;
16599 	}
16600 
16601 	if (tp->write32 == tg3_write_indirect_reg32 ||
16602 	    (tg3_flag(tp, PCIX_MODE) &&
16603 	     (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16604 	      tg3_asic_rev(tp) == ASIC_REV_5701)))
16605 		tg3_flag_set(tp, SRAM_USE_CONFIG);
16606 
16607 	/* The memory arbiter has to be enabled in order for SRAM accesses
16608 	 * to succeed.  Normally on powerup the tg3 chip firmware will make
16609 	 * sure it is enabled, but other entities such as system netboot
16610 	 * code might disable it.
16611 	 */
16612 	val = tr32(MEMARB_MODE);
16613 	tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
16614 
16615 	tp->pci_fn = PCI_FUNC(tp->pdev->devfn) & 3;
16616 	if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
16617 	    tg3_flag(tp, 5780_CLASS)) {
16618 		if (tg3_flag(tp, PCIX_MODE)) {
16619 			pci_read_config_dword(tp->pdev,
16620 					      tp->pcix_cap + PCI_X_STATUS,
16621 					      &val);
16622 			tp->pci_fn = val & 0x7;
16623 		}
16624 	} else if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16625 		   tg3_asic_rev(tp) == ASIC_REV_5719 ||
16626 		   tg3_asic_rev(tp) == ASIC_REV_5720) {
16627 		tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val);
16628 		if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) != NIC_SRAM_CPMUSTAT_SIG)
16629 			val = tr32(TG3_CPMU_STATUS);
16630 
16631 		if (tg3_asic_rev(tp) == ASIC_REV_5717)
16632 			tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5717) ? 1 : 0;
16633 		else
16634 			tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5719) >>
16635 				     TG3_CPMU_STATUS_FSHFT_5719;
16636 	}
16637 
16638 	if (tg3_flag(tp, FLUSH_POSTED_WRITES)) {
16639 		tp->write32_tx_mbox = tg3_write_flush_reg32;
16640 		tp->write32_rx_mbox = tg3_write_flush_reg32;
16641 	}
16642 
16643 	/* Get eeprom hw config before calling tg3_set_power_state().
16644 	 * In particular, the TG3_FLAG_IS_NIC flag must be
16645 	 * determined before calling tg3_set_power_state() so that
16646 	 * we know whether or not to switch out of Vaux power.
16647 	 * When the flag is set, it means that GPIO1 is used for eeprom
16648 	 * write protect and also implies that it is a LOM where GPIOs
16649 	 * are not used to switch power.
16650 	 */
16651 	tg3_get_eeprom_hw_cfg(tp);
16652 
16653 	if (tg3_flag(tp, FW_TSO) && tg3_flag(tp, ENABLE_ASF)) {
16654 		tg3_flag_clear(tp, TSO_CAPABLE);
16655 		tg3_flag_clear(tp, TSO_BUG);
16656 		tp->fw_needed = NULL;
16657 	}
16658 
16659 	if (tg3_flag(tp, ENABLE_APE)) {
16660 		/* Allow reads and writes to the
16661 		 * APE register and memory space.
16662 		 */
16663 		pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
16664 				 PCISTATE_ALLOW_APE_SHMEM_WR |
16665 				 PCISTATE_ALLOW_APE_PSPACE_WR;
16666 		pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
16667 				       pci_state_reg);
16668 
16669 		tg3_ape_lock_init(tp);
16670 		tp->ape_hb_interval =
16671 			msecs_to_jiffies(APE_HOST_HEARTBEAT_INT_5SEC);
16672 	}
16673 
16674 	/* Set up tp->grc_local_ctrl before calling
16675 	 * tg3_pwrsrc_switch_to_vmain().  GPIO1 driven high
16676 	 * will bring 5700's external PHY out of reset.
16677 	 * It is also used as eeprom write protect on LOMs.
16678 	 */
16679 	tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
16680 	if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16681 	    tg3_flag(tp, EEPROM_WRITE_PROT))
16682 		tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
16683 				       GRC_LCLCTRL_GPIO_OUTPUT1);
16684 	/* Unused GPIO3 must be driven as output on 5752 because there
16685 	 * are no pull-up resistors on unused GPIO pins.
16686 	 */
16687 	else if (tg3_asic_rev(tp) == ASIC_REV_5752)
16688 		tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
16689 
16690 	if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
16691 	    tg3_asic_rev(tp) == ASIC_REV_57780 ||
16692 	    tg3_flag(tp, 57765_CLASS))
16693 		tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
16694 
16695 	if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
16696 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
16697 		/* Turn off the debug UART. */
16698 		tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
16699 		if (tg3_flag(tp, IS_NIC))
16700 			/* Keep VMain power. */
16701 			tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
16702 					      GRC_LCLCTRL_GPIO_OUTPUT0;
16703 	}
16704 
16705 	if (tg3_asic_rev(tp) == ASIC_REV_5762)
16706 		tp->grc_local_ctrl |=
16707 			tr32(GRC_LOCAL_CTRL) & GRC_LCLCTRL_GPIO_UART_SEL;
16708 
16709 	/* Switch out of Vaux if it is a NIC */
16710 	tg3_pwrsrc_switch_to_vmain(tp);
16711 
16712 	/* Derive initial jumbo mode from MTU assigned in
16713 	 * ether_setup() via the alloc_etherdev() call
16714 	 */
16715 	if (tp->dev->mtu > ETH_DATA_LEN && !tg3_flag(tp, 5780_CLASS))
16716 		tg3_flag_set(tp, JUMBO_RING_ENABLE);
16717 
16718 	/* Determine WakeOnLan speed to use. */
16719 	if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16720 	    tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
16721 	    tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0 ||
16722 	    tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B2) {
16723 		tg3_flag_clear(tp, WOL_SPEED_100MB);
16724 	} else {
16725 		tg3_flag_set(tp, WOL_SPEED_100MB);
16726 	}
16727 
16728 	if (tg3_asic_rev(tp) == ASIC_REV_5906)
16729 		tp->phy_flags |= TG3_PHYFLG_IS_FET;
16730 
16731 	/* A few boards don't want Ethernet@WireSpeed phy feature */
16732 	if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16733 	    (tg3_asic_rev(tp) == ASIC_REV_5705 &&
16734 	     (tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) &&
16735 	     (tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A1)) ||
16736 	    (tp->phy_flags & TG3_PHYFLG_IS_FET) ||
16737 	    (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
16738 		tp->phy_flags |= TG3_PHYFLG_NO_ETH_WIRE_SPEED;
16739 
16740 	if (tg3_chip_rev(tp) == CHIPREV_5703_AX ||
16741 	    tg3_chip_rev(tp) == CHIPREV_5704_AX)
16742 		tp->phy_flags |= TG3_PHYFLG_ADC_BUG;
16743 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0)
16744 		tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG;
16745 
16746 	if (tg3_flag(tp, 5705_PLUS) &&
16747 	    !(tp->phy_flags & TG3_PHYFLG_IS_FET) &&
16748 	    tg3_asic_rev(tp) != ASIC_REV_5785 &&
16749 	    tg3_asic_rev(tp) != ASIC_REV_57780 &&
16750 	    !tg3_flag(tp, 57765_PLUS)) {
16751 		if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
16752 		    tg3_asic_rev(tp) == ASIC_REV_5787 ||
16753 		    tg3_asic_rev(tp) == ASIC_REV_5784 ||
16754 		    tg3_asic_rev(tp) == ASIC_REV_5761) {
16755 			if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
16756 			    tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
16757 				tp->phy_flags |= TG3_PHYFLG_JITTER_BUG;
16758 			if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
16759 				tp->phy_flags |= TG3_PHYFLG_ADJUST_TRIM;
16760 		} else
16761 			tp->phy_flags |= TG3_PHYFLG_BER_BUG;
16762 	}
16763 
16764 	if (tg3_asic_rev(tp) == ASIC_REV_5784 &&
16765 	    tg3_chip_rev(tp) != CHIPREV_5784_AX) {
16766 		tp->phy_otp = tg3_read_otp_phycfg(tp);
16767 		if (tp->phy_otp == 0)
16768 			tp->phy_otp = TG3_OTP_DEFAULT;
16769 	}
16770 
16771 	if (tg3_flag(tp, CPMU_PRESENT))
16772 		tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
16773 	else
16774 		tp->mi_mode = MAC_MI_MODE_BASE;
16775 
16776 	tp->coalesce_mode = 0;
16777 	if (tg3_chip_rev(tp) != CHIPREV_5700_AX &&
16778 	    tg3_chip_rev(tp) != CHIPREV_5700_BX)
16779 		tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
16780 
16781 	/* Set these bits to enable statistics workaround. */
16782 	if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16783 	    tg3_asic_rev(tp) == ASIC_REV_5762 ||
16784 	    tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
16785 	    tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0) {
16786 		tp->coalesce_mode |= HOSTCC_MODE_ATTN;
16787 		tp->grc_mode |= GRC_MODE_IRQ_ON_FLOW_ATTN;
16788 	}
16789 
16790 	if (tg3_asic_rev(tp) == ASIC_REV_5785 ||
16791 	    tg3_asic_rev(tp) == ASIC_REV_57780)
16792 		tg3_flag_set(tp, USE_PHYLIB);
16793 
16794 	err = tg3_mdio_init(tp);
16795 	if (err)
16796 		return err;
16797 
16798 	/* Initialize data/descriptor byte/word swapping. */
16799 	val = tr32(GRC_MODE);
16800 	if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
16801 	    tg3_asic_rev(tp) == ASIC_REV_5762)
16802 		val &= (GRC_MODE_BYTE_SWAP_B2HRX_DATA |
16803 			GRC_MODE_WORD_SWAP_B2HRX_DATA |
16804 			GRC_MODE_B2HRX_ENABLE |
16805 			GRC_MODE_HTX2B_ENABLE |
16806 			GRC_MODE_HOST_STACKUP);
16807 	else
16808 		val &= GRC_MODE_HOST_STACKUP;
16809 
16810 	tw32(GRC_MODE, val | tp->grc_mode);
16811 
16812 	tg3_switch_clocks(tp);
16813 
16814 	/* Clear this out for sanity. */
16815 	tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
16816 
16817 	/* Clear TG3PCI_REG_BASE_ADDR to prevent hangs. */
16818 	tw32(TG3PCI_REG_BASE_ADDR, 0);
16819 
16820 	pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
16821 			      &pci_state_reg);
16822 	if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
16823 	    !tg3_flag(tp, PCIX_TARGET_HWBUG)) {
16824 		if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
16825 		    tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0 ||
16826 		    tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B2 ||
16827 		    tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B5) {
16828 			void __iomem *sram_base;
16829 
16830 			/* Write some dummy words into the SRAM status block
16831 			 * area, see if it reads back correctly.  If the return
16832 			 * value is bad, force enable the PCIX workaround.
16833 			 */
16834 			sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
16835 
16836 			writel(0x00000000, sram_base);
16837 			writel(0x00000000, sram_base + 4);
16838 			writel(0xffffffff, sram_base + 4);
16839 			if (readl(sram_base) != 0x00000000)
16840 				tg3_flag_set(tp, PCIX_TARGET_HWBUG);
16841 		}
16842 	}
16843 
16844 	udelay(50);
16845 	tg3_nvram_init(tp);
16846 
16847 	/* If the device has an NVRAM, no need to load patch firmware */
16848 	if (tg3_asic_rev(tp) == ASIC_REV_57766 &&
16849 	    !tg3_flag(tp, NO_NVRAM))
16850 		tp->fw_needed = NULL;
16851 
16852 	grc_misc_cfg = tr32(GRC_MISC_CFG);
16853 	grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
16854 
16855 	if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
16856 	    (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
16857 	     grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
16858 		tg3_flag_set(tp, IS_5788);
16859 
16860 	if (!tg3_flag(tp, IS_5788) &&
16861 	    tg3_asic_rev(tp) != ASIC_REV_5700)
16862 		tg3_flag_set(tp, TAGGED_STATUS);
16863 	if (tg3_flag(tp, TAGGED_STATUS)) {
16864 		tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
16865 				      HOSTCC_MODE_CLRTICK_TXBD);
16866 
16867 		tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
16868 		pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
16869 				       tp->misc_host_ctrl);
16870 	}
16871 
16872 	/* Preserve the APE MAC_MODE bits */
16873 	if (tg3_flag(tp, ENABLE_APE))
16874 		tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
16875 	else
16876 		tp->mac_mode = 0;
16877 
16878 	if (tg3_10_100_only_device(tp, ent))
16879 		tp->phy_flags |= TG3_PHYFLG_10_100_ONLY;
16880 
16881 	err = tg3_phy_probe(tp);
16882 	if (err) {
16883 		dev_err(&tp->pdev->dev, "phy probe failed, err %d\n", err);
16884 		/* ... but do not return immediately ... */
16885 		tg3_mdio_fini(tp);
16886 	}
16887 
16888 	tg3_read_vpd(tp);
16889 	tg3_read_fw_ver(tp);
16890 
16891 	if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
16892 		tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
16893 	} else {
16894 		if (tg3_asic_rev(tp) == ASIC_REV_5700)
16895 			tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
16896 		else
16897 			tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
16898 	}
16899 
16900 	/* 5700 {AX,BX} chips have a broken status block link
16901 	 * change bit implementation, so we must use the
16902 	 * status register in those cases.
16903 	 */
16904 	if (tg3_asic_rev(tp) == ASIC_REV_5700)
16905 		tg3_flag_set(tp, USE_LINKCHG_REG);
16906 	else
16907 		tg3_flag_clear(tp, USE_LINKCHG_REG);
16908 
16909 	/* The led_ctrl is set during tg3_phy_probe, here we might
16910 	 * have to force the link status polling mechanism based
16911 	 * upon subsystem IDs.
16912 	 */
16913 	if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
16914 	    tg3_asic_rev(tp) == ASIC_REV_5701 &&
16915 	    !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
16916 		tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
16917 		tg3_flag_set(tp, USE_LINKCHG_REG);
16918 	}
16919 
16920 	/* For all SERDES we poll the MAC status register. */
16921 	if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
16922 		tg3_flag_set(tp, POLL_SERDES);
16923 	else
16924 		tg3_flag_clear(tp, POLL_SERDES);
16925 
16926 	if (tg3_flag(tp, ENABLE_APE) && tg3_flag(tp, ENABLE_ASF))
16927 		tg3_flag_set(tp, POLL_CPMU_LINK);
16928 
16929 	tp->rx_offset = NET_SKB_PAD + NET_IP_ALIGN;
16930 	tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;
16931 	if (tg3_asic_rev(tp) == ASIC_REV_5701 &&
16932 	    tg3_flag(tp, PCIX_MODE)) {
16933 		tp->rx_offset = NET_SKB_PAD;
16934 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
16935 		tp->rx_copy_thresh = ~(u16)0;
16936 #endif
16937 	}
16938 
16939 	tp->rx_std_ring_mask = TG3_RX_STD_RING_SIZE(tp) - 1;
16940 	tp->rx_jmb_ring_mask = TG3_RX_JMB_RING_SIZE(tp) - 1;
16941 	tp->rx_ret_ring_mask = tg3_rx_ret_ring_size(tp) - 1;
16942 
16943 	tp->rx_std_max_post = tp->rx_std_ring_mask + 1;
16944 
16945 	/* Increment the rx prod index on the rx std ring by at most
16946 	 * 8 for these chips to workaround hw errata.
16947 	 */
16948 	if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
16949 	    tg3_asic_rev(tp) == ASIC_REV_5752 ||
16950 	    tg3_asic_rev(tp) == ASIC_REV_5755)
16951 		tp->rx_std_max_post = 8;
16952 
16953 	if (tg3_flag(tp, ASPM_WORKAROUND))
16954 		tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
16955 				     PCIE_PWR_MGMT_L1_THRESH_MSK;
16956 
16957 	return err;
16958 }
16959 
16960 #ifdef CONFIG_SPARC
16961 static int tg3_get_macaddr_sparc(struct tg3 *tp)
16962 {
16963 	struct net_device *dev = tp->dev;
16964 	struct pci_dev *pdev = tp->pdev;
16965 	struct device_node *dp = pci_device_to_OF_node(pdev);
16966 	const unsigned char *addr;
16967 	int len;
16968 
16969 	addr = of_get_property(dp, "local-mac-address", &len);
16970 	if (addr && len == ETH_ALEN) {
16971 		memcpy(dev->dev_addr, addr, ETH_ALEN);
16972 		return 0;
16973 	}
16974 	return -ENODEV;
16975 }
16976 
16977 static int tg3_get_default_macaddr_sparc(struct tg3 *tp)
16978 {
16979 	struct net_device *dev = tp->dev;
16980 
16981 	memcpy(dev->dev_addr, idprom->id_ethaddr, ETH_ALEN);
16982 	return 0;
16983 }
16984 #endif
16985 
16986 static int tg3_get_device_address(struct tg3 *tp)
16987 {
16988 	struct net_device *dev = tp->dev;
16989 	u32 hi, lo, mac_offset;
16990 	int addr_ok = 0;
16991 	int err;
16992 
16993 #ifdef CONFIG_SPARC
16994 	if (!tg3_get_macaddr_sparc(tp))
16995 		return 0;
16996 #endif
16997 
16998 	if (tg3_flag(tp, IS_SSB_CORE)) {
16999 		err = ssb_gige_get_macaddr(tp->pdev, &dev->dev_addr[0]);
17000 		if (!err && is_valid_ether_addr(&dev->dev_addr[0]))
17001 			return 0;
17002 	}
17003 
17004 	mac_offset = 0x7c;
17005 	if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
17006 	    tg3_flag(tp, 5780_CLASS)) {
17007 		if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
17008 			mac_offset = 0xcc;
17009 		if (tg3_nvram_lock(tp))
17010 			tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
17011 		else
17012 			tg3_nvram_unlock(tp);
17013 	} else if (tg3_flag(tp, 5717_PLUS)) {
17014 		if (tp->pci_fn & 1)
17015 			mac_offset = 0xcc;
17016 		if (tp->pci_fn > 1)
17017 			mac_offset += 0x18c;
17018 	} else if (tg3_asic_rev(tp) == ASIC_REV_5906)
17019 		mac_offset = 0x10;
17020 
17021 	/* First try to get it from MAC address mailbox. */
17022 	tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
17023 	if ((hi >> 16) == 0x484b) {
17024 		dev->dev_addr[0] = (hi >>  8) & 0xff;
17025 		dev->dev_addr[1] = (hi >>  0) & 0xff;
17026 
17027 		tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
17028 		dev->dev_addr[2] = (lo >> 24) & 0xff;
17029 		dev->dev_addr[3] = (lo >> 16) & 0xff;
17030 		dev->dev_addr[4] = (lo >>  8) & 0xff;
17031 		dev->dev_addr[5] = (lo >>  0) & 0xff;
17032 
17033 		/* Some old bootcode may report a 0 MAC address in SRAM */
17034 		addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
17035 	}
17036 	if (!addr_ok) {
17037 		/* Next, try NVRAM. */
17038 		if (!tg3_flag(tp, NO_NVRAM) &&
17039 		    !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) &&
17040 		    !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) {
17041 			memcpy(&dev->dev_addr[0], ((char *)&hi) + 2, 2);
17042 			memcpy(&dev->dev_addr[2], (char *)&lo, sizeof(lo));
17043 		}
17044 		/* Finally just fetch it out of the MAC control regs. */
17045 		else {
17046 			hi = tr32(MAC_ADDR_0_HIGH);
17047 			lo = tr32(MAC_ADDR_0_LOW);
17048 
17049 			dev->dev_addr[5] = lo & 0xff;
17050 			dev->dev_addr[4] = (lo >> 8) & 0xff;
17051 			dev->dev_addr[3] = (lo >> 16) & 0xff;
17052 			dev->dev_addr[2] = (lo >> 24) & 0xff;
17053 			dev->dev_addr[1] = hi & 0xff;
17054 			dev->dev_addr[0] = (hi >> 8) & 0xff;
17055 		}
17056 	}
17057 
17058 	if (!is_valid_ether_addr(&dev->dev_addr[0])) {
17059 #ifdef CONFIG_SPARC
17060 		if (!tg3_get_default_macaddr_sparc(tp))
17061 			return 0;
17062 #endif
17063 		return -EINVAL;
17064 	}
17065 	return 0;
17066 }
17067 
17068 #define BOUNDARY_SINGLE_CACHELINE	1
17069 #define BOUNDARY_MULTI_CACHELINE	2
17070 
17071 static u32 tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
17072 {
17073 	int cacheline_size;
17074 	u8 byte;
17075 	int goal;
17076 
17077 	pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
17078 	if (byte == 0)
17079 		cacheline_size = 1024;
17080 	else
17081 		cacheline_size = (int) byte * 4;
17082 
17083 	/* On 5703 and later chips, the boundary bits have no
17084 	 * effect.
17085 	 */
17086 	if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
17087 	    tg3_asic_rev(tp) != ASIC_REV_5701 &&
17088 	    !tg3_flag(tp, PCI_EXPRESS))
17089 		goto out;
17090 
17091 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
17092 	goal = BOUNDARY_MULTI_CACHELINE;
17093 #else
17094 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
17095 	goal = BOUNDARY_SINGLE_CACHELINE;
17096 #else
17097 	goal = 0;
17098 #endif
17099 #endif
17100 
17101 	if (tg3_flag(tp, 57765_PLUS)) {
17102 		val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
17103 		goto out;
17104 	}
17105 
17106 	if (!goal)
17107 		goto out;
17108 
17109 	/* PCI controllers on most RISC systems tend to disconnect
17110 	 * when a device tries to burst across a cache-line boundary.
17111 	 * Therefore, letting tg3 do so just wastes PCI bandwidth.
17112 	 *
17113 	 * Unfortunately, for PCI-E there are only limited
17114 	 * write-side controls for this, and thus for reads
17115 	 * we will still get the disconnects.  We'll also waste
17116 	 * these PCI cycles for both read and write for chips
17117 	 * other than 5700 and 5701 which do not implement the
17118 	 * boundary bits.
17119 	 */
17120 	if (tg3_flag(tp, PCIX_MODE) && !tg3_flag(tp, PCI_EXPRESS)) {
17121 		switch (cacheline_size) {
17122 		case 16:
17123 		case 32:
17124 		case 64:
17125 		case 128:
17126 			if (goal == BOUNDARY_SINGLE_CACHELINE) {
17127 				val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
17128 					DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
17129 			} else {
17130 				val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
17131 					DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
17132 			}
17133 			break;
17134 
17135 		case 256:
17136 			val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
17137 				DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
17138 			break;
17139 
17140 		default:
17141 			val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
17142 				DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
17143 			break;
17144 		}
17145 	} else if (tg3_flag(tp, PCI_EXPRESS)) {
17146 		switch (cacheline_size) {
17147 		case 16:
17148 		case 32:
17149 		case 64:
17150 			if (goal == BOUNDARY_SINGLE_CACHELINE) {
17151 				val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
17152 				val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
17153 				break;
17154 			}
17155 			/* fallthrough */
17156 		case 128:
17157 		default:
17158 			val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
17159 			val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
17160 			break;
17161 		}
17162 	} else {
17163 		switch (cacheline_size) {
17164 		case 16:
17165 			if (goal == BOUNDARY_SINGLE_CACHELINE) {
17166 				val |= (DMA_RWCTRL_READ_BNDRY_16 |
17167 					DMA_RWCTRL_WRITE_BNDRY_16);
17168 				break;
17169 			}
17170 			/* fallthrough */
17171 		case 32:
17172 			if (goal == BOUNDARY_SINGLE_CACHELINE) {
17173 				val |= (DMA_RWCTRL_READ_BNDRY_32 |
17174 					DMA_RWCTRL_WRITE_BNDRY_32);
17175 				break;
17176 			}
17177 			/* fallthrough */
17178 		case 64:
17179 			if (goal == BOUNDARY_SINGLE_CACHELINE) {
17180 				val |= (DMA_RWCTRL_READ_BNDRY_64 |
17181 					DMA_RWCTRL_WRITE_BNDRY_64);
17182 				break;
17183 			}
17184 			/* fallthrough */
17185 		case 128:
17186 			if (goal == BOUNDARY_SINGLE_CACHELINE) {
17187 				val |= (DMA_RWCTRL_READ_BNDRY_128 |
17188 					DMA_RWCTRL_WRITE_BNDRY_128);
17189 				break;
17190 			}
17191 			/* fallthrough */
17192 		case 256:
17193 			val |= (DMA_RWCTRL_READ_BNDRY_256 |
17194 				DMA_RWCTRL_WRITE_BNDRY_256);
17195 			break;
17196 		case 512:
17197 			val |= (DMA_RWCTRL_READ_BNDRY_512 |
17198 				DMA_RWCTRL_WRITE_BNDRY_512);
17199 			break;
17200 		case 1024:
17201 		default:
17202 			val |= (DMA_RWCTRL_READ_BNDRY_1024 |
17203 				DMA_RWCTRL_WRITE_BNDRY_1024);
17204 			break;
17205 		}
17206 	}
17207 
17208 out:
17209 	return val;
17210 }
17211 
17212 static int tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma,
17213 			   int size, bool to_device)
17214 {
17215 	struct tg3_internal_buffer_desc test_desc;
17216 	u32 sram_dma_descs;
17217 	int i, ret;
17218 
17219 	sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
17220 
17221 	tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
17222 	tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
17223 	tw32(RDMAC_STATUS, 0);
17224 	tw32(WDMAC_STATUS, 0);
17225 
17226 	tw32(BUFMGR_MODE, 0);
17227 	tw32(FTQ_RESET, 0);
17228 
17229 	test_desc.addr_hi = ((u64) buf_dma) >> 32;
17230 	test_desc.addr_lo = buf_dma & 0xffffffff;
17231 	test_desc.nic_mbuf = 0x00002100;
17232 	test_desc.len = size;
17233 
17234 	/*
17235 	 * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
17236 	 * the *second* time the tg3 driver was getting loaded after an
17237 	 * initial scan.
17238 	 *
17239 	 * Broadcom tells me:
17240 	 *   ...the DMA engine is connected to the GRC block and a DMA
17241 	 *   reset may affect the GRC block in some unpredictable way...
17242 	 *   The behavior of resets to individual blocks has not been tested.
17243 	 *
17244 	 * Broadcom noted the GRC reset will also reset all sub-components.
17245 	 */
17246 	if (to_device) {
17247 		test_desc.cqid_sqid = (13 << 8) | 2;
17248 
17249 		tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
17250 		udelay(40);
17251 	} else {
17252 		test_desc.cqid_sqid = (16 << 8) | 7;
17253 
17254 		tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
17255 		udelay(40);
17256 	}
17257 	test_desc.flags = 0x00000005;
17258 
17259 	for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
17260 		u32 val;
17261 
17262 		val = *(((u32 *)&test_desc) + i);
17263 		pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
17264 				       sram_dma_descs + (i * sizeof(u32)));
17265 		pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
17266 	}
17267 	pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
17268 
17269 	if (to_device)
17270 		tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
17271 	else
17272 		tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
17273 
17274 	ret = -ENODEV;
17275 	for (i = 0; i < 40; i++) {
17276 		u32 val;
17277 
17278 		if (to_device)
17279 			val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
17280 		else
17281 			val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
17282 		if ((val & 0xffff) == sram_dma_descs) {
17283 			ret = 0;
17284 			break;
17285 		}
17286 
17287 		udelay(100);
17288 	}
17289 
17290 	return ret;
17291 }
17292 
17293 #define TEST_BUFFER_SIZE	0x2000
17294 
17295 static const struct pci_device_id tg3_dma_wait_state_chipsets[] = {
17296 	{ PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
17297 	{ },
17298 };
17299 
17300 static int tg3_test_dma(struct tg3 *tp)
17301 {
17302 	dma_addr_t buf_dma;
17303 	u32 *buf, saved_dma_rwctrl;
17304 	int ret = 0;
17305 
17306 	buf = dma_alloc_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE,
17307 				 &buf_dma, GFP_KERNEL);
17308 	if (!buf) {
17309 		ret = -ENOMEM;
17310 		goto out_nofree;
17311 	}
17312 
17313 	tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
17314 			  (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
17315 
17316 	tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
17317 
17318 	if (tg3_flag(tp, 57765_PLUS))
17319 		goto out;
17320 
17321 	if (tg3_flag(tp, PCI_EXPRESS)) {
17322 		/* DMA read watermark not used on PCIE */
17323 		tp->dma_rwctrl |= 0x00180000;
17324 	} else if (!tg3_flag(tp, PCIX_MODE)) {
17325 		if (tg3_asic_rev(tp) == ASIC_REV_5705 ||
17326 		    tg3_asic_rev(tp) == ASIC_REV_5750)
17327 			tp->dma_rwctrl |= 0x003f0000;
17328 		else
17329 			tp->dma_rwctrl |= 0x003f000f;
17330 	} else {
17331 		if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
17332 		    tg3_asic_rev(tp) == ASIC_REV_5704) {
17333 			u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
17334 			u32 read_water = 0x7;
17335 
17336 			/* If the 5704 is behind the EPB bridge, we can
17337 			 * do the less restrictive ONE_DMA workaround for
17338 			 * better performance.
17339 			 */
17340 			if (tg3_flag(tp, 40BIT_DMA_BUG) &&
17341 			    tg3_asic_rev(tp) == ASIC_REV_5704)
17342 				tp->dma_rwctrl |= 0x8000;
17343 			else if (ccval == 0x6 || ccval == 0x7)
17344 				tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
17345 
17346 			if (tg3_asic_rev(tp) == ASIC_REV_5703)
17347 				read_water = 4;
17348 			/* Set bit 23 to enable PCIX hw bug fix */
17349 			tp->dma_rwctrl |=
17350 				(read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
17351 				(0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
17352 				(1 << 23);
17353 		} else if (tg3_asic_rev(tp) == ASIC_REV_5780) {
17354 			/* 5780 always in PCIX mode */
17355 			tp->dma_rwctrl |= 0x00144000;
17356 		} else if (tg3_asic_rev(tp) == ASIC_REV_5714) {
17357 			/* 5714 always in PCIX mode */
17358 			tp->dma_rwctrl |= 0x00148000;
17359 		} else {
17360 			tp->dma_rwctrl |= 0x001b000f;
17361 		}
17362 	}
17363 	if (tg3_flag(tp, ONE_DMA_AT_ONCE))
17364 		tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
17365 
17366 	if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
17367 	    tg3_asic_rev(tp) == ASIC_REV_5704)
17368 		tp->dma_rwctrl &= 0xfffffff0;
17369 
17370 	if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
17371 	    tg3_asic_rev(tp) == ASIC_REV_5701) {
17372 		/* Remove this if it causes problems for some boards. */
17373 		tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
17374 
17375 		/* On 5700/5701 chips, we need to set this bit.
17376 		 * Otherwise the chip will issue cacheline transactions
17377 		 * to streamable DMA memory with not all the byte
17378 		 * enables turned on.  This is an error on several
17379 		 * RISC PCI controllers, in particular sparc64.
17380 		 *
17381 		 * On 5703/5704 chips, this bit has been reassigned
17382 		 * a different meaning.  In particular, it is used
17383 		 * on those chips to enable a PCI-X workaround.
17384 		 */
17385 		tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
17386 	}
17387 
17388 	tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17389 
17390 
17391 	if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
17392 	    tg3_asic_rev(tp) != ASIC_REV_5701)
17393 		goto out;
17394 
17395 	/* It is best to perform DMA test with maximum write burst size
17396 	 * to expose the 5700/5701 write DMA bug.
17397 	 */
17398 	saved_dma_rwctrl = tp->dma_rwctrl;
17399 	tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
17400 	tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17401 
17402 	while (1) {
17403 		u32 *p = buf, i;
17404 
17405 		for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
17406 			p[i] = i;
17407 
17408 		/* Send the buffer to the chip. */
17409 		ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, true);
17410 		if (ret) {
17411 			dev_err(&tp->pdev->dev,
17412 				"%s: Buffer write failed. err = %d\n",
17413 				__func__, ret);
17414 			break;
17415 		}
17416 
17417 		/* Now read it back. */
17418 		ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, false);
17419 		if (ret) {
17420 			dev_err(&tp->pdev->dev, "%s: Buffer read failed. "
17421 				"err = %d\n", __func__, ret);
17422 			break;
17423 		}
17424 
17425 		/* Verify it. */
17426 		for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
17427 			if (p[i] == i)
17428 				continue;
17429 
17430 			if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
17431 			    DMA_RWCTRL_WRITE_BNDRY_16) {
17432 				tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
17433 				tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
17434 				tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17435 				break;
17436 			} else {
17437 				dev_err(&tp->pdev->dev,
17438 					"%s: Buffer corrupted on read back! "
17439 					"(%d != %d)\n", __func__, p[i], i);
17440 				ret = -ENODEV;
17441 				goto out;
17442 			}
17443 		}
17444 
17445 		if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
17446 			/* Success. */
17447 			ret = 0;
17448 			break;
17449 		}
17450 	}
17451 	if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
17452 	    DMA_RWCTRL_WRITE_BNDRY_16) {
17453 		/* DMA test passed without adjusting DMA boundary,
17454 		 * now look for chipsets that are known to expose the
17455 		 * DMA bug without failing the test.
17456 		 */
17457 		if (pci_dev_present(tg3_dma_wait_state_chipsets)) {
17458 			tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
17459 			tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
17460 		} else {
17461 			/* Safe to use the calculated DMA boundary. */
17462 			tp->dma_rwctrl = saved_dma_rwctrl;
17463 		}
17464 
17465 		tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17466 	}
17467 
17468 out:
17469 	dma_free_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, buf, buf_dma);
17470 out_nofree:
17471 	return ret;
17472 }
17473 
17474 static void tg3_init_bufmgr_config(struct tg3 *tp)
17475 {
17476 	if (tg3_flag(tp, 57765_PLUS)) {
17477 		tp->bufmgr_config.mbuf_read_dma_low_water =
17478 			DEFAULT_MB_RDMA_LOW_WATER_5705;
17479 		tp->bufmgr_config.mbuf_mac_rx_low_water =
17480 			DEFAULT_MB_MACRX_LOW_WATER_57765;
17481 		tp->bufmgr_config.mbuf_high_water =
17482 			DEFAULT_MB_HIGH_WATER_57765;
17483 
17484 		tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
17485 			DEFAULT_MB_RDMA_LOW_WATER_5705;
17486 		tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
17487 			DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765;
17488 		tp->bufmgr_config.mbuf_high_water_jumbo =
17489 			DEFAULT_MB_HIGH_WATER_JUMBO_57765;
17490 	} else if (tg3_flag(tp, 5705_PLUS)) {
17491 		tp->bufmgr_config.mbuf_read_dma_low_water =
17492 			DEFAULT_MB_RDMA_LOW_WATER_5705;
17493 		tp->bufmgr_config.mbuf_mac_rx_low_water =
17494 			DEFAULT_MB_MACRX_LOW_WATER_5705;
17495 		tp->bufmgr_config.mbuf_high_water =
17496 			DEFAULT_MB_HIGH_WATER_5705;
17497 		if (tg3_asic_rev(tp) == ASIC_REV_5906) {
17498 			tp->bufmgr_config.mbuf_mac_rx_low_water =
17499 				DEFAULT_MB_MACRX_LOW_WATER_5906;
17500 			tp->bufmgr_config.mbuf_high_water =
17501 				DEFAULT_MB_HIGH_WATER_5906;
17502 		}
17503 
17504 		tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
17505 			DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
17506 		tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
17507 			DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
17508 		tp->bufmgr_config.mbuf_high_water_jumbo =
17509 			DEFAULT_MB_HIGH_WATER_JUMBO_5780;
17510 	} else {
17511 		tp->bufmgr_config.mbuf_read_dma_low_water =
17512 			DEFAULT_MB_RDMA_LOW_WATER;
17513 		tp->bufmgr_config.mbuf_mac_rx_low_water =
17514 			DEFAULT_MB_MACRX_LOW_WATER;
17515 		tp->bufmgr_config.mbuf_high_water =
17516 			DEFAULT_MB_HIGH_WATER;
17517 
17518 		tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
17519 			DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
17520 		tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
17521 			DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
17522 		tp->bufmgr_config.mbuf_high_water_jumbo =
17523 			DEFAULT_MB_HIGH_WATER_JUMBO;
17524 	}
17525 
17526 	tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
17527 	tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
17528 }
17529 
17530 static char *tg3_phy_string(struct tg3 *tp)
17531 {
17532 	switch (tp->phy_id & TG3_PHY_ID_MASK) {
17533 	case TG3_PHY_ID_BCM5400:	return "5400";
17534 	case TG3_PHY_ID_BCM5401:	return "5401";
17535 	case TG3_PHY_ID_BCM5411:	return "5411";
17536 	case TG3_PHY_ID_BCM5701:	return "5701";
17537 	case TG3_PHY_ID_BCM5703:	return "5703";
17538 	case TG3_PHY_ID_BCM5704:	return "5704";
17539 	case TG3_PHY_ID_BCM5705:	return "5705";
17540 	case TG3_PHY_ID_BCM5750:	return "5750";
17541 	case TG3_PHY_ID_BCM5752:	return "5752";
17542 	case TG3_PHY_ID_BCM5714:	return "5714";
17543 	case TG3_PHY_ID_BCM5780:	return "5780";
17544 	case TG3_PHY_ID_BCM5755:	return "5755";
17545 	case TG3_PHY_ID_BCM5787:	return "5787";
17546 	case TG3_PHY_ID_BCM5784:	return "5784";
17547 	case TG3_PHY_ID_BCM5756:	return "5722/5756";
17548 	case TG3_PHY_ID_BCM5906:	return "5906";
17549 	case TG3_PHY_ID_BCM5761:	return "5761";
17550 	case TG3_PHY_ID_BCM5718C:	return "5718C";
17551 	case TG3_PHY_ID_BCM5718S:	return "5718S";
17552 	case TG3_PHY_ID_BCM57765:	return "57765";
17553 	case TG3_PHY_ID_BCM5719C:	return "5719C";
17554 	case TG3_PHY_ID_BCM5720C:	return "5720C";
17555 	case TG3_PHY_ID_BCM5762:	return "5762C";
17556 	case TG3_PHY_ID_BCM8002:	return "8002/serdes";
17557 	case 0:			return "serdes";
17558 	default:		return "unknown";
17559 	}
17560 }
17561 
17562 static char *tg3_bus_string(struct tg3 *tp, char *str)
17563 {
17564 	if (tg3_flag(tp, PCI_EXPRESS)) {
17565 		strcpy(str, "PCI Express");
17566 		return str;
17567 	} else if (tg3_flag(tp, PCIX_MODE)) {
17568 		u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
17569 
17570 		strcpy(str, "PCIX:");
17571 
17572 		if ((clock_ctrl == 7) ||
17573 		    ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
17574 		     GRC_MISC_CFG_BOARD_ID_5704CIOBE))
17575 			strcat(str, "133MHz");
17576 		else if (clock_ctrl == 0)
17577 			strcat(str, "33MHz");
17578 		else if (clock_ctrl == 2)
17579 			strcat(str, "50MHz");
17580 		else if (clock_ctrl == 4)
17581 			strcat(str, "66MHz");
17582 		else if (clock_ctrl == 6)
17583 			strcat(str, "100MHz");
17584 	} else {
17585 		strcpy(str, "PCI:");
17586 		if (tg3_flag(tp, PCI_HIGH_SPEED))
17587 			strcat(str, "66MHz");
17588 		else
17589 			strcat(str, "33MHz");
17590 	}
17591 	if (tg3_flag(tp, PCI_32BIT))
17592 		strcat(str, ":32-bit");
17593 	else
17594 		strcat(str, ":64-bit");
17595 	return str;
17596 }
17597 
17598 static void tg3_init_coal(struct tg3 *tp)
17599 {
17600 	struct ethtool_coalesce *ec = &tp->coal;
17601 
17602 	memset(ec, 0, sizeof(*ec));
17603 	ec->cmd = ETHTOOL_GCOALESCE;
17604 	ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
17605 	ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
17606 	ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
17607 	ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
17608 	ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
17609 	ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
17610 	ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
17611 	ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
17612 	ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
17613 
17614 	if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
17615 				 HOSTCC_MODE_CLRTICK_TXBD)) {
17616 		ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
17617 		ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
17618 		ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
17619 		ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
17620 	}
17621 
17622 	if (tg3_flag(tp, 5705_PLUS)) {
17623 		ec->rx_coalesce_usecs_irq = 0;
17624 		ec->tx_coalesce_usecs_irq = 0;
17625 		ec->stats_block_coalesce_usecs = 0;
17626 	}
17627 }
17628 
17629 static int tg3_init_one(struct pci_dev *pdev,
17630 				  const struct pci_device_id *ent)
17631 {
17632 	struct net_device *dev;
17633 	struct tg3 *tp;
17634 	int i, err;
17635 	u32 sndmbx, rcvmbx, intmbx;
17636 	char str[40];
17637 	u64 dma_mask, persist_dma_mask;
17638 	netdev_features_t features = 0;
17639 
17640 	printk_once(KERN_INFO "%s\n", version);
17641 
17642 	err = pci_enable_device(pdev);
17643 	if (err) {
17644 		dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
17645 		return err;
17646 	}
17647 
17648 	err = pci_request_regions(pdev, DRV_MODULE_NAME);
17649 	if (err) {
17650 		dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
17651 		goto err_out_disable_pdev;
17652 	}
17653 
17654 	pci_set_master(pdev);
17655 
17656 	dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS);
17657 	if (!dev) {
17658 		err = -ENOMEM;
17659 		goto err_out_free_res;
17660 	}
17661 
17662 	SET_NETDEV_DEV(dev, &pdev->dev);
17663 
17664 	tp = netdev_priv(dev);
17665 	tp->pdev = pdev;
17666 	tp->dev = dev;
17667 	tp->rx_mode = TG3_DEF_RX_MODE;
17668 	tp->tx_mode = TG3_DEF_TX_MODE;
17669 	tp->irq_sync = 1;
17670 	tp->pcierr_recovery = false;
17671 
17672 	if (tg3_debug > 0)
17673 		tp->msg_enable = tg3_debug;
17674 	else
17675 		tp->msg_enable = TG3_DEF_MSG_ENABLE;
17676 
17677 	if (pdev_is_ssb_gige_core(pdev)) {
17678 		tg3_flag_set(tp, IS_SSB_CORE);
17679 		if (ssb_gige_must_flush_posted_writes(pdev))
17680 			tg3_flag_set(tp, FLUSH_POSTED_WRITES);
17681 		if (ssb_gige_one_dma_at_once(pdev))
17682 			tg3_flag_set(tp, ONE_DMA_AT_ONCE);
17683 		if (ssb_gige_have_roboswitch(pdev)) {
17684 			tg3_flag_set(tp, USE_PHYLIB);
17685 			tg3_flag_set(tp, ROBOSWITCH);
17686 		}
17687 		if (ssb_gige_is_rgmii(pdev))
17688 			tg3_flag_set(tp, RGMII_MODE);
17689 	}
17690 
17691 	/* The word/byte swap controls here control register access byte
17692 	 * swapping.  DMA data byte swapping is controlled in the GRC_MODE
17693 	 * setting below.
17694 	 */
17695 	tp->misc_host_ctrl =
17696 		MISC_HOST_CTRL_MASK_PCI_INT |
17697 		MISC_HOST_CTRL_WORD_SWAP |
17698 		MISC_HOST_CTRL_INDIR_ACCESS |
17699 		MISC_HOST_CTRL_PCISTATE_RW;
17700 
17701 	/* The NONFRM (non-frame) byte/word swap controls take effect
17702 	 * on descriptor entries, anything which isn't packet data.
17703 	 *
17704 	 * The StrongARM chips on the board (one for tx, one for rx)
17705 	 * are running in big-endian mode.
17706 	 */
17707 	tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
17708 			GRC_MODE_WSWAP_NONFRM_DATA);
17709 #ifdef __BIG_ENDIAN
17710 	tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
17711 #endif
17712 	spin_lock_init(&tp->lock);
17713 	spin_lock_init(&tp->indirect_lock);
17714 	INIT_WORK(&tp->reset_task, tg3_reset_task);
17715 
17716 	tp->regs = pci_ioremap_bar(pdev, BAR_0);
17717 	if (!tp->regs) {
17718 		dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
17719 		err = -ENOMEM;
17720 		goto err_out_free_dev;
17721 	}
17722 
17723 	if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
17724 	    tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761E ||
17725 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S ||
17726 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761SE ||
17727 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
17728 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
17729 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
17730 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
17731 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 ||
17732 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_57767 ||
17733 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_57764 ||
17734 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 ||
17735 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 ||
17736 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727 ||
17737 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_57787) {
17738 		tg3_flag_set(tp, ENABLE_APE);
17739 		tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
17740 		if (!tp->aperegs) {
17741 			dev_err(&pdev->dev,
17742 				"Cannot map APE registers, aborting\n");
17743 			err = -ENOMEM;
17744 			goto err_out_iounmap;
17745 		}
17746 	}
17747 
17748 	tp->rx_pending = TG3_DEF_RX_RING_PENDING;
17749 	tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
17750 
17751 	dev->ethtool_ops = &tg3_ethtool_ops;
17752 	dev->watchdog_timeo = TG3_TX_TIMEOUT;
17753 	dev->netdev_ops = &tg3_netdev_ops;
17754 	dev->irq = pdev->irq;
17755 
17756 	err = tg3_get_invariants(tp, ent);
17757 	if (err) {
17758 		dev_err(&pdev->dev,
17759 			"Problem fetching invariants of chip, aborting\n");
17760 		goto err_out_apeunmap;
17761 	}
17762 
17763 	/* The EPB bridge inside 5714, 5715, and 5780 and any
17764 	 * device behind the EPB cannot support DMA addresses > 40-bit.
17765 	 * On 64-bit systems with IOMMU, use 40-bit dma_mask.
17766 	 * On 64-bit systems without IOMMU, use 64-bit dma_mask and
17767 	 * do DMA address check in tg3_start_xmit().
17768 	 */
17769 	if (tg3_flag(tp, IS_5788))
17770 		persist_dma_mask = dma_mask = DMA_BIT_MASK(32);
17771 	else if (tg3_flag(tp, 40BIT_DMA_BUG)) {
17772 		persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
17773 #ifdef CONFIG_HIGHMEM
17774 		dma_mask = DMA_BIT_MASK(64);
17775 #endif
17776 	} else
17777 		persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
17778 
17779 	/* Configure DMA attributes. */
17780 	if (dma_mask > DMA_BIT_MASK(32)) {
17781 		err = pci_set_dma_mask(pdev, dma_mask);
17782 		if (!err) {
17783 			features |= NETIF_F_HIGHDMA;
17784 			err = pci_set_consistent_dma_mask(pdev,
17785 							  persist_dma_mask);
17786 			if (err < 0) {
17787 				dev_err(&pdev->dev, "Unable to obtain 64 bit "
17788 					"DMA for consistent allocations\n");
17789 				goto err_out_apeunmap;
17790 			}
17791 		}
17792 	}
17793 	if (err || dma_mask == DMA_BIT_MASK(32)) {
17794 		err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
17795 		if (err) {
17796 			dev_err(&pdev->dev,
17797 				"No usable DMA configuration, aborting\n");
17798 			goto err_out_apeunmap;
17799 		}
17800 	}
17801 
17802 	tg3_init_bufmgr_config(tp);
17803 
17804 	/* 5700 B0 chips do not support checksumming correctly due
17805 	 * to hardware bugs.
17806 	 */
17807 	if (tg3_chip_rev_id(tp) != CHIPREV_ID_5700_B0) {
17808 		features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
17809 
17810 		if (tg3_flag(tp, 5755_PLUS))
17811 			features |= NETIF_F_IPV6_CSUM;
17812 	}
17813 
17814 	/* TSO is on by default on chips that support hardware TSO.
17815 	 * Firmware TSO on older chips gives lower performance, so it
17816 	 * is off by default, but can be enabled using ethtool.
17817 	 */
17818 	if ((tg3_flag(tp, HW_TSO_1) ||
17819 	     tg3_flag(tp, HW_TSO_2) ||
17820 	     tg3_flag(tp, HW_TSO_3)) &&
17821 	    (features & NETIF_F_IP_CSUM))
17822 		features |= NETIF_F_TSO;
17823 	if (tg3_flag(tp, HW_TSO_2) || tg3_flag(tp, HW_TSO_3)) {
17824 		if (features & NETIF_F_IPV6_CSUM)
17825 			features |= NETIF_F_TSO6;
17826 		if (tg3_flag(tp, HW_TSO_3) ||
17827 		    tg3_asic_rev(tp) == ASIC_REV_5761 ||
17828 		    (tg3_asic_rev(tp) == ASIC_REV_5784 &&
17829 		     tg3_chip_rev(tp) != CHIPREV_5784_AX) ||
17830 		    tg3_asic_rev(tp) == ASIC_REV_5785 ||
17831 		    tg3_asic_rev(tp) == ASIC_REV_57780)
17832 			features |= NETIF_F_TSO_ECN;
17833 	}
17834 
17835 	dev->features |= features | NETIF_F_HW_VLAN_CTAG_TX |
17836 			 NETIF_F_HW_VLAN_CTAG_RX;
17837 	dev->vlan_features |= features;
17838 
17839 	/*
17840 	 * Add loopback capability only for a subset of devices that support
17841 	 * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY
17842 	 * loopback for the remaining devices.
17843 	 */
17844 	if (tg3_asic_rev(tp) != ASIC_REV_5780 &&
17845 	    !tg3_flag(tp, CPMU_PRESENT))
17846 		/* Add the loopback capability */
17847 		features |= NETIF_F_LOOPBACK;
17848 
17849 	dev->hw_features |= features;
17850 	dev->priv_flags |= IFF_UNICAST_FLT;
17851 
17852 	/* MTU range: 60 - 9000 or 1500, depending on hardware */
17853 	dev->min_mtu = TG3_MIN_MTU;
17854 	dev->max_mtu = TG3_MAX_MTU(tp);
17855 
17856 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 &&
17857 	    !tg3_flag(tp, TSO_CAPABLE) &&
17858 	    !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
17859 		tg3_flag_set(tp, MAX_RXPEND_64);
17860 		tp->rx_pending = 63;
17861 	}
17862 
17863 	err = tg3_get_device_address(tp);
17864 	if (err) {
17865 		dev_err(&pdev->dev,
17866 			"Could not obtain valid ethernet address, aborting\n");
17867 		goto err_out_apeunmap;
17868 	}
17869 
17870 	intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
17871 	rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
17872 	sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
17873 	for (i = 0; i < tp->irq_max; i++) {
17874 		struct tg3_napi *tnapi = &tp->napi[i];
17875 
17876 		tnapi->tp = tp;
17877 		tnapi->tx_pending = TG3_DEF_TX_RING_PENDING;
17878 
17879 		tnapi->int_mbox = intmbx;
17880 		if (i <= 4)
17881 			intmbx += 0x8;
17882 		else
17883 			intmbx += 0x4;
17884 
17885 		tnapi->consmbox = rcvmbx;
17886 		tnapi->prodmbox = sndmbx;
17887 
17888 		if (i)
17889 			tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1);
17890 		else
17891 			tnapi->coal_now = HOSTCC_MODE_NOW;
17892 
17893 		if (!tg3_flag(tp, SUPPORT_MSIX))
17894 			break;
17895 
17896 		/*
17897 		 * If we support MSIX, we'll be using RSS.  If we're using
17898 		 * RSS, the first vector only handles link interrupts and the
17899 		 * remaining vectors handle rx and tx interrupts.  Reuse the
17900 		 * mailbox values for the next iteration.  The values we setup
17901 		 * above are still useful for the single vectored mode.
17902 		 */
17903 		if (!i)
17904 			continue;
17905 
17906 		rcvmbx += 0x8;
17907 
17908 		if (sndmbx & 0x4)
17909 			sndmbx -= 0x4;
17910 		else
17911 			sndmbx += 0xc;
17912 	}
17913 
17914 	/*
17915 	 * Reset chip in case UNDI or EFI driver did not shutdown
17916 	 * DMA self test will enable WDMAC and we'll see (spurious)
17917 	 * pending DMA on the PCI bus at that point.
17918 	 */
17919 	if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
17920 	    (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
17921 		tg3_full_lock(tp, 0);
17922 		tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
17923 		tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
17924 		tg3_full_unlock(tp);
17925 	}
17926 
17927 	err = tg3_test_dma(tp);
17928 	if (err) {
17929 		dev_err(&pdev->dev, "DMA engine test failed, aborting\n");
17930 		goto err_out_apeunmap;
17931 	}
17932 
17933 	tg3_init_coal(tp);
17934 
17935 	pci_set_drvdata(pdev, dev);
17936 
17937 	if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
17938 	    tg3_asic_rev(tp) == ASIC_REV_5720 ||
17939 	    tg3_asic_rev(tp) == ASIC_REV_5762)
17940 		tg3_flag_set(tp, PTP_CAPABLE);
17941 
17942 	tg3_timer_init(tp);
17943 
17944 	tg3_carrier_off(tp);
17945 
17946 	err = register_netdev(dev);
17947 	if (err) {
17948 		dev_err(&pdev->dev, "Cannot register net device, aborting\n");
17949 		goto err_out_apeunmap;
17950 	}
17951 
17952 	if (tg3_flag(tp, PTP_CAPABLE)) {
17953 		tg3_ptp_init(tp);
17954 		tp->ptp_clock = ptp_clock_register(&tp->ptp_info,
17955 						   &tp->pdev->dev);
17956 		if (IS_ERR(tp->ptp_clock))
17957 			tp->ptp_clock = NULL;
17958 	}
17959 
17960 	netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
17961 		    tp->board_part_number,
17962 		    tg3_chip_rev_id(tp),
17963 		    tg3_bus_string(tp, str),
17964 		    dev->dev_addr);
17965 
17966 	if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)) {
17967 		char *ethtype;
17968 
17969 		if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
17970 			ethtype = "10/100Base-TX";
17971 		else if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
17972 			ethtype = "1000Base-SX";
17973 		else
17974 			ethtype = "10/100/1000Base-T";
17975 
17976 		netdev_info(dev, "attached PHY is %s (%s Ethernet) "
17977 			    "(WireSpeed[%d], EEE[%d])\n",
17978 			    tg3_phy_string(tp), ethtype,
17979 			    (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) == 0,
17980 			    (tp->phy_flags & TG3_PHYFLG_EEE_CAP) != 0);
17981 	}
17982 
17983 	netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
17984 		    (dev->features & NETIF_F_RXCSUM) != 0,
17985 		    tg3_flag(tp, USE_LINKCHG_REG) != 0,
17986 		    (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) != 0,
17987 		    tg3_flag(tp, ENABLE_ASF) != 0,
17988 		    tg3_flag(tp, TSO_CAPABLE) != 0);
17989 	netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
17990 		    tp->dma_rwctrl,
17991 		    pdev->dma_mask == DMA_BIT_MASK(32) ? 32 :
17992 		    ((u64)pdev->dma_mask) == DMA_BIT_MASK(40) ? 40 : 64);
17993 
17994 	pci_save_state(pdev);
17995 
17996 	return 0;
17997 
17998 err_out_apeunmap:
17999 	if (tp->aperegs) {
18000 		iounmap(tp->aperegs);
18001 		tp->aperegs = NULL;
18002 	}
18003 
18004 err_out_iounmap:
18005 	if (tp->regs) {
18006 		iounmap(tp->regs);
18007 		tp->regs = NULL;
18008 	}
18009 
18010 err_out_free_dev:
18011 	free_netdev(dev);
18012 
18013 err_out_free_res:
18014 	pci_release_regions(pdev);
18015 
18016 err_out_disable_pdev:
18017 	if (pci_is_enabled(pdev))
18018 		pci_disable_device(pdev);
18019 	return err;
18020 }
18021 
18022 static void tg3_remove_one(struct pci_dev *pdev)
18023 {
18024 	struct net_device *dev = pci_get_drvdata(pdev);
18025 
18026 	if (dev) {
18027 		struct tg3 *tp = netdev_priv(dev);
18028 
18029 		tg3_ptp_fini(tp);
18030 
18031 		release_firmware(tp->fw);
18032 
18033 		tg3_reset_task_cancel(tp);
18034 
18035 		if (tg3_flag(tp, USE_PHYLIB)) {
18036 			tg3_phy_fini(tp);
18037 			tg3_mdio_fini(tp);
18038 		}
18039 
18040 		unregister_netdev(dev);
18041 		if (tp->aperegs) {
18042 			iounmap(tp->aperegs);
18043 			tp->aperegs = NULL;
18044 		}
18045 		if (tp->regs) {
18046 			iounmap(tp->regs);
18047 			tp->regs = NULL;
18048 		}
18049 		free_netdev(dev);
18050 		pci_release_regions(pdev);
18051 		pci_disable_device(pdev);
18052 	}
18053 }
18054 
18055 #ifdef CONFIG_PM_SLEEP
18056 static int tg3_suspend(struct device *device)
18057 {
18058 	struct pci_dev *pdev = to_pci_dev(device);
18059 	struct net_device *dev = pci_get_drvdata(pdev);
18060 	struct tg3 *tp = netdev_priv(dev);
18061 	int err = 0;
18062 
18063 	rtnl_lock();
18064 
18065 	if (!netif_running(dev))
18066 		goto unlock;
18067 
18068 	tg3_reset_task_cancel(tp);
18069 	tg3_phy_stop(tp);
18070 	tg3_netif_stop(tp);
18071 
18072 	tg3_timer_stop(tp);
18073 
18074 	tg3_full_lock(tp, 1);
18075 	tg3_disable_ints(tp);
18076 	tg3_full_unlock(tp);
18077 
18078 	netif_device_detach(dev);
18079 
18080 	tg3_full_lock(tp, 0);
18081 	tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
18082 	tg3_flag_clear(tp, INIT_COMPLETE);
18083 	tg3_full_unlock(tp);
18084 
18085 	err = tg3_power_down_prepare(tp);
18086 	if (err) {
18087 		int err2;
18088 
18089 		tg3_full_lock(tp, 0);
18090 
18091 		tg3_flag_set(tp, INIT_COMPLETE);
18092 		err2 = tg3_restart_hw(tp, true);
18093 		if (err2)
18094 			goto out;
18095 
18096 		tg3_timer_start(tp);
18097 
18098 		netif_device_attach(dev);
18099 		tg3_netif_start(tp);
18100 
18101 out:
18102 		tg3_full_unlock(tp);
18103 
18104 		if (!err2)
18105 			tg3_phy_start(tp);
18106 	}
18107 
18108 unlock:
18109 	rtnl_unlock();
18110 	return err;
18111 }
18112 
18113 static int tg3_resume(struct device *device)
18114 {
18115 	struct pci_dev *pdev = to_pci_dev(device);
18116 	struct net_device *dev = pci_get_drvdata(pdev);
18117 	struct tg3 *tp = netdev_priv(dev);
18118 	int err = 0;
18119 
18120 	rtnl_lock();
18121 
18122 	if (!netif_running(dev))
18123 		goto unlock;
18124 
18125 	netif_device_attach(dev);
18126 
18127 	tg3_full_lock(tp, 0);
18128 
18129 	tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
18130 
18131 	tg3_flag_set(tp, INIT_COMPLETE);
18132 	err = tg3_restart_hw(tp,
18133 			     !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN));
18134 	if (err)
18135 		goto out;
18136 
18137 	tg3_timer_start(tp);
18138 
18139 	tg3_netif_start(tp);
18140 
18141 out:
18142 	tg3_full_unlock(tp);
18143 
18144 	if (!err)
18145 		tg3_phy_start(tp);
18146 
18147 unlock:
18148 	rtnl_unlock();
18149 	return err;
18150 }
18151 #endif /* CONFIG_PM_SLEEP */
18152 
18153 static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume);
18154 
18155 static void tg3_shutdown(struct pci_dev *pdev)
18156 {
18157 	struct net_device *dev = pci_get_drvdata(pdev);
18158 	struct tg3 *tp = netdev_priv(dev);
18159 
18160 	rtnl_lock();
18161 	netif_device_detach(dev);
18162 
18163 	if (netif_running(dev))
18164 		dev_close(dev);
18165 
18166 	if (system_state == SYSTEM_POWER_OFF)
18167 		tg3_power_down(tp);
18168 
18169 	rtnl_unlock();
18170 }
18171 
18172 /**
18173  * tg3_io_error_detected - called when PCI error is detected
18174  * @pdev: Pointer to PCI device
18175  * @state: The current pci connection state
18176  *
18177  * This function is called after a PCI bus error affecting
18178  * this device has been detected.
18179  */
18180 static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
18181 					      pci_channel_state_t state)
18182 {
18183 	struct net_device *netdev = pci_get_drvdata(pdev);
18184 	struct tg3 *tp = netdev_priv(netdev);
18185 	pci_ers_result_t err = PCI_ERS_RESULT_NEED_RESET;
18186 
18187 	netdev_info(netdev, "PCI I/O error detected\n");
18188 
18189 	rtnl_lock();
18190 
18191 	/* We probably don't have netdev yet */
18192 	if (!netdev || !netif_running(netdev))
18193 		goto done;
18194 
18195 	/* We needn't recover from permanent error */
18196 	if (state == pci_channel_io_frozen)
18197 		tp->pcierr_recovery = true;
18198 
18199 	tg3_phy_stop(tp);
18200 
18201 	tg3_netif_stop(tp);
18202 
18203 	tg3_timer_stop(tp);
18204 
18205 	/* Want to make sure that the reset task doesn't run */
18206 	tg3_reset_task_cancel(tp);
18207 
18208 	netif_device_detach(netdev);
18209 
18210 	/* Clean up software state, even if MMIO is blocked */
18211 	tg3_full_lock(tp, 0);
18212 	tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
18213 	tg3_full_unlock(tp);
18214 
18215 done:
18216 	if (state == pci_channel_io_perm_failure) {
18217 		if (netdev) {
18218 			tg3_napi_enable(tp);
18219 			dev_close(netdev);
18220 		}
18221 		err = PCI_ERS_RESULT_DISCONNECT;
18222 	} else {
18223 		pci_disable_device(pdev);
18224 	}
18225 
18226 	rtnl_unlock();
18227 
18228 	return err;
18229 }
18230 
18231 /**
18232  * tg3_io_slot_reset - called after the pci bus has been reset.
18233  * @pdev: Pointer to PCI device
18234  *
18235  * Restart the card from scratch, as if from a cold-boot.
18236  * At this point, the card has exprienced a hard reset,
18237  * followed by fixups by BIOS, and has its config space
18238  * set up identically to what it was at cold boot.
18239  */
18240 static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev)
18241 {
18242 	struct net_device *netdev = pci_get_drvdata(pdev);
18243 	struct tg3 *tp = netdev_priv(netdev);
18244 	pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
18245 	int err;
18246 
18247 	rtnl_lock();
18248 
18249 	if (pci_enable_device(pdev)) {
18250 		dev_err(&pdev->dev,
18251 			"Cannot re-enable PCI device after reset.\n");
18252 		goto done;
18253 	}
18254 
18255 	pci_set_master(pdev);
18256 	pci_restore_state(pdev);
18257 	pci_save_state(pdev);
18258 
18259 	if (!netdev || !netif_running(netdev)) {
18260 		rc = PCI_ERS_RESULT_RECOVERED;
18261 		goto done;
18262 	}
18263 
18264 	err = tg3_power_up(tp);
18265 	if (err)
18266 		goto done;
18267 
18268 	rc = PCI_ERS_RESULT_RECOVERED;
18269 
18270 done:
18271 	if (rc != PCI_ERS_RESULT_RECOVERED && netdev && netif_running(netdev)) {
18272 		tg3_napi_enable(tp);
18273 		dev_close(netdev);
18274 	}
18275 	rtnl_unlock();
18276 
18277 	return rc;
18278 }
18279 
18280 /**
18281  * tg3_io_resume - called when traffic can start flowing again.
18282  * @pdev: Pointer to PCI device
18283  *
18284  * This callback is called when the error recovery driver tells
18285  * us that its OK to resume normal operation.
18286  */
18287 static void tg3_io_resume(struct pci_dev *pdev)
18288 {
18289 	struct net_device *netdev = pci_get_drvdata(pdev);
18290 	struct tg3 *tp = netdev_priv(netdev);
18291 	int err;
18292 
18293 	rtnl_lock();
18294 
18295 	if (!netdev || !netif_running(netdev))
18296 		goto done;
18297 
18298 	tg3_full_lock(tp, 0);
18299 	tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
18300 	tg3_flag_set(tp, INIT_COMPLETE);
18301 	err = tg3_restart_hw(tp, true);
18302 	if (err) {
18303 		tg3_full_unlock(tp);
18304 		netdev_err(netdev, "Cannot restart hardware after reset.\n");
18305 		goto done;
18306 	}
18307 
18308 	netif_device_attach(netdev);
18309 
18310 	tg3_timer_start(tp);
18311 
18312 	tg3_netif_start(tp);
18313 
18314 	tg3_full_unlock(tp);
18315 
18316 	tg3_phy_start(tp);
18317 
18318 done:
18319 	tp->pcierr_recovery = false;
18320 	rtnl_unlock();
18321 }
18322 
18323 static const struct pci_error_handlers tg3_err_handler = {
18324 	.error_detected	= tg3_io_error_detected,
18325 	.slot_reset	= tg3_io_slot_reset,
18326 	.resume		= tg3_io_resume
18327 };
18328 
18329 static struct pci_driver tg3_driver = {
18330 	.name		= DRV_MODULE_NAME,
18331 	.id_table	= tg3_pci_tbl,
18332 	.probe		= tg3_init_one,
18333 	.remove		= tg3_remove_one,
18334 	.err_handler	= &tg3_err_handler,
18335 	.driver.pm	= &tg3_pm_ops,
18336 	.shutdown	= tg3_shutdown,
18337 };
18338 
18339 module_pci_driver(tg3_driver);
18340