1 /*
2  * tg3.c: Broadcom Tigon3 ethernet driver.
3  *
4  * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5  * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6  * Copyright (C) 2004 Sun Microsystems Inc.
7  * Copyright (C) 2005-2014 Broadcom Corporation.
8  *
9  * Firmware is:
10  *	Derived from proprietary unpublished source code,
11  *	Copyright (C) 2000-2003 Broadcom Corporation.
12  *
13  *	Permission is hereby granted for the distribution of this firmware
14  *	data in hexadecimal or equivalent format, provided this copyright
15  *	notice is accompanying it.
16  */
17 
18 
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/stringify.h>
22 #include <linux/kernel.h>
23 #include <linux/types.h>
24 #include <linux/compiler.h>
25 #include <linux/slab.h>
26 #include <linux/delay.h>
27 #include <linux/in.h>
28 #include <linux/interrupt.h>
29 #include <linux/ioport.h>
30 #include <linux/pci.h>
31 #include <linux/netdevice.h>
32 #include <linux/etherdevice.h>
33 #include <linux/skbuff.h>
34 #include <linux/ethtool.h>
35 #include <linux/mdio.h>
36 #include <linux/mii.h>
37 #include <linux/phy.h>
38 #include <linux/brcmphy.h>
39 #include <linux/if.h>
40 #include <linux/if_vlan.h>
41 #include <linux/ip.h>
42 #include <linux/tcp.h>
43 #include <linux/workqueue.h>
44 #include <linux/prefetch.h>
45 #include <linux/dma-mapping.h>
46 #include <linux/firmware.h>
47 #include <linux/ssb/ssb_driver_gige.h>
48 #include <linux/hwmon.h>
49 #include <linux/hwmon-sysfs.h>
50 
51 #include <net/checksum.h>
52 #include <net/ip.h>
53 
54 #include <linux/io.h>
55 #include <asm/byteorder.h>
56 #include <linux/uaccess.h>
57 
58 #include <uapi/linux/net_tstamp.h>
59 #include <linux/ptp_clock_kernel.h>
60 
61 #ifdef CONFIG_SPARC
62 #include <asm/idprom.h>
63 #include <asm/prom.h>
64 #endif
65 
66 #define BAR_0	0
67 #define BAR_2	2
68 
69 #include "tg3.h"
70 
71 /* Functions & macros to verify TG3_FLAGS types */
72 
73 static inline int _tg3_flag(enum TG3_FLAGS flag, unsigned long *bits)
74 {
75 	return test_bit(flag, bits);
76 }
77 
78 static inline void _tg3_flag_set(enum TG3_FLAGS flag, unsigned long *bits)
79 {
80 	set_bit(flag, bits);
81 }
82 
83 static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
84 {
85 	clear_bit(flag, bits);
86 }
87 
88 #define tg3_flag(tp, flag)				\
89 	_tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags)
90 #define tg3_flag_set(tp, flag)				\
91 	_tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags)
92 #define tg3_flag_clear(tp, flag)			\
93 	_tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags)
94 
95 #define DRV_MODULE_NAME		"tg3"
96 #define TG3_MAJ_NUM			3
97 #define TG3_MIN_NUM			137
98 #define DRV_MODULE_VERSION	\
99 	__stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
100 #define DRV_MODULE_RELDATE	"May 11, 2014"
101 
102 #define RESET_KIND_SHUTDOWN	0
103 #define RESET_KIND_INIT		1
104 #define RESET_KIND_SUSPEND	2
105 
106 #define TG3_DEF_RX_MODE		0
107 #define TG3_DEF_TX_MODE		0
108 #define TG3_DEF_MSG_ENABLE	  \
109 	(NETIF_MSG_DRV		| \
110 	 NETIF_MSG_PROBE	| \
111 	 NETIF_MSG_LINK		| \
112 	 NETIF_MSG_TIMER	| \
113 	 NETIF_MSG_IFDOWN	| \
114 	 NETIF_MSG_IFUP		| \
115 	 NETIF_MSG_RX_ERR	| \
116 	 NETIF_MSG_TX_ERR)
117 
118 #define TG3_GRC_LCLCTL_PWRSW_DELAY	100
119 
120 /* length of time before we decide the hardware is borked,
121  * and dev->tx_timeout() should be called to fix the problem
122  */
123 
124 #define TG3_TX_TIMEOUT			(5 * HZ)
125 
126 /* hardware minimum and maximum for a single frame's data payload */
127 #define TG3_MIN_MTU			60
128 #define TG3_MAX_MTU(tp)	\
129 	(tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500)
130 
131 /* These numbers seem to be hard coded in the NIC firmware somehow.
132  * You can't change the ring sizes, but you can change where you place
133  * them in the NIC onboard memory.
134  */
135 #define TG3_RX_STD_RING_SIZE(tp) \
136 	(tg3_flag(tp, LRG_PROD_RING_CAP) ? \
137 	 TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700)
138 #define TG3_DEF_RX_RING_PENDING		200
139 #define TG3_RX_JMB_RING_SIZE(tp) \
140 	(tg3_flag(tp, LRG_PROD_RING_CAP) ? \
141 	 TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
142 #define TG3_DEF_RX_JUMBO_RING_PENDING	100
143 
144 /* Do not place this n-ring entries value into the tp struct itself,
145  * we really want to expose these constants to GCC so that modulo et
146  * al.  operations are done with shifts and masks instead of with
147  * hw multiply/modulo instructions.  Another solution would be to
148  * replace things like '% foo' with '& (foo - 1)'.
149  */
150 
151 #define TG3_TX_RING_SIZE		512
152 #define TG3_DEF_TX_RING_PENDING		(TG3_TX_RING_SIZE - 1)
153 
154 #define TG3_RX_STD_RING_BYTES(tp) \
155 	(sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
156 #define TG3_RX_JMB_RING_BYTES(tp) \
157 	(sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
158 #define TG3_RX_RCB_RING_BYTES(tp) \
159 	(sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1))
160 #define TG3_TX_RING_BYTES	(sizeof(struct tg3_tx_buffer_desc) * \
161 				 TG3_TX_RING_SIZE)
162 #define NEXT_TX(N)		(((N) + 1) & (TG3_TX_RING_SIZE - 1))
163 
164 #define TG3_DMA_BYTE_ENAB		64
165 
166 #define TG3_RX_STD_DMA_SZ		1536
167 #define TG3_RX_JMB_DMA_SZ		9046
168 
169 #define TG3_RX_DMA_TO_MAP_SZ(x)		((x) + TG3_DMA_BYTE_ENAB)
170 
171 #define TG3_RX_STD_MAP_SZ		TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
172 #define TG3_RX_JMB_MAP_SZ		TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
173 
174 #define TG3_RX_STD_BUFF_RING_SIZE(tp) \
175 	(sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
176 
177 #define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
178 	(sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
179 
180 /* Due to a hardware bug, the 5701 can only DMA to memory addresses
181  * that are at least dword aligned when used in PCIX mode.  The driver
182  * works around this bug by double copying the packet.  This workaround
183  * is built into the normal double copy length check for efficiency.
184  *
185  * However, the double copy is only necessary on those architectures
186  * where unaligned memory accesses are inefficient.  For those architectures
187  * where unaligned memory accesses incur little penalty, we can reintegrate
188  * the 5701 in the normal rx path.  Doing so saves a device structure
189  * dereference by hardcoding the double copy threshold in place.
190  */
191 #define TG3_RX_COPY_THRESHOLD		256
192 #if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
193 	#define TG3_RX_COPY_THRESH(tp)	TG3_RX_COPY_THRESHOLD
194 #else
195 	#define TG3_RX_COPY_THRESH(tp)	((tp)->rx_copy_thresh)
196 #endif
197 
198 #if (NET_IP_ALIGN != 0)
199 #define TG3_RX_OFFSET(tp)	((tp)->rx_offset)
200 #else
201 #define TG3_RX_OFFSET(tp)	(NET_SKB_PAD)
202 #endif
203 
204 /* minimum number of free TX descriptors required to wake up TX process */
205 #define TG3_TX_WAKEUP_THRESH(tnapi)		((tnapi)->tx_pending / 4)
206 #define TG3_TX_BD_DMA_MAX_2K		2048
207 #define TG3_TX_BD_DMA_MAX_4K		4096
208 
209 #define TG3_RAW_IP_ALIGN 2
210 
211 #define TG3_MAX_UCAST_ADDR(tp) (tg3_flag((tp), ENABLE_ASF) ? 2 : 3)
212 #define TG3_UCAST_ADDR_IDX(tp) (tg3_flag((tp), ENABLE_ASF) ? 2 : 1)
213 
214 #define TG3_FW_UPDATE_TIMEOUT_SEC	5
215 #define TG3_FW_UPDATE_FREQ_SEC		(TG3_FW_UPDATE_TIMEOUT_SEC / 2)
216 
217 #define FIRMWARE_TG3		"tigon/tg3.bin"
218 #define FIRMWARE_TG357766	"tigon/tg357766.bin"
219 #define FIRMWARE_TG3TSO		"tigon/tg3_tso.bin"
220 #define FIRMWARE_TG3TSO5	"tigon/tg3_tso5.bin"
221 
222 static char version[] =
223 	DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")";
224 
225 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
226 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
227 MODULE_LICENSE("GPL");
228 MODULE_VERSION(DRV_MODULE_VERSION);
229 MODULE_FIRMWARE(FIRMWARE_TG3);
230 MODULE_FIRMWARE(FIRMWARE_TG3TSO);
231 MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
232 
233 static int tg3_debug = -1;	/* -1 == use TG3_DEF_MSG_ENABLE as value */
234 module_param(tg3_debug, int, 0);
235 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
236 
237 #define TG3_DRV_DATA_FLAG_10_100_ONLY	0x0001
238 #define TG3_DRV_DATA_FLAG_5705_10_100	0x0002
239 
240 static const struct pci_device_id tg3_pci_tbl[] = {
241 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
242 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
243 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
244 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
245 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
246 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
247 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
248 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
249 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
250 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
251 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
252 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
253 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
254 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
255 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
256 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
257 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
258 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
259 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901),
260 	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
261 			TG3_DRV_DATA_FLAG_5705_10_100},
262 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2),
263 	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
264 			TG3_DRV_DATA_FLAG_5705_10_100},
265 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
266 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F),
267 	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
268 			TG3_DRV_DATA_FLAG_5705_10_100},
269 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
270 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
271 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750)},
272 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
273 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
274 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F),
275 	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
276 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
277 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
278 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
279 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
280 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F),
281 	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
282 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
283 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
284 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
285 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
286 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
287 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
288 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
289 	{PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5787M,
290 			PCI_VENDOR_ID_LENOVO,
291 			TG3PCI_SUBDEVICE_ID_LENOVO_5787M),
292 	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
293 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
294 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F),
295 	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
296 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
297 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
298 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
299 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
300 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
301 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
302 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
303 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
304 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
305 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
306 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
307 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
308 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
309 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
310 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
311 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
312 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)},
313 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)},
314 	{PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
315 			PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_A),
316 	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
317 	{PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
318 			PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_B),
319 	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
320 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
321 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
322 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790),
323 	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
324 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
325 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
326 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717_C)},
327 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
328 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)},
329 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)},
330 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)},
331 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)},
332 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791),
333 	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
334 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795),
335 	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
336 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)},
337 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)},
338 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57762)},
339 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57766)},
340 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5762)},
341 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5725)},
342 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5727)},
343 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57764)},
344 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57767)},
345 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57787)},
346 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57782)},
347 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57786)},
348 	{PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
349 	{PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
350 	{PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
351 	{PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
352 	{PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
353 	{PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
354 	{PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
355 	{PCI_DEVICE(0x10cf, 0x11a2)}, /* Fujitsu 1000base-SX with BCM5703SKHB */
356 	{}
357 };
358 
359 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
360 
361 static const struct {
362 	const char string[ETH_GSTRING_LEN];
363 } ethtool_stats_keys[] = {
364 	{ "rx_octets" },
365 	{ "rx_fragments" },
366 	{ "rx_ucast_packets" },
367 	{ "rx_mcast_packets" },
368 	{ "rx_bcast_packets" },
369 	{ "rx_fcs_errors" },
370 	{ "rx_align_errors" },
371 	{ "rx_xon_pause_rcvd" },
372 	{ "rx_xoff_pause_rcvd" },
373 	{ "rx_mac_ctrl_rcvd" },
374 	{ "rx_xoff_entered" },
375 	{ "rx_frame_too_long_errors" },
376 	{ "rx_jabbers" },
377 	{ "rx_undersize_packets" },
378 	{ "rx_in_length_errors" },
379 	{ "rx_out_length_errors" },
380 	{ "rx_64_or_less_octet_packets" },
381 	{ "rx_65_to_127_octet_packets" },
382 	{ "rx_128_to_255_octet_packets" },
383 	{ "rx_256_to_511_octet_packets" },
384 	{ "rx_512_to_1023_octet_packets" },
385 	{ "rx_1024_to_1522_octet_packets" },
386 	{ "rx_1523_to_2047_octet_packets" },
387 	{ "rx_2048_to_4095_octet_packets" },
388 	{ "rx_4096_to_8191_octet_packets" },
389 	{ "rx_8192_to_9022_octet_packets" },
390 
391 	{ "tx_octets" },
392 	{ "tx_collisions" },
393 
394 	{ "tx_xon_sent" },
395 	{ "tx_xoff_sent" },
396 	{ "tx_flow_control" },
397 	{ "tx_mac_errors" },
398 	{ "tx_single_collisions" },
399 	{ "tx_mult_collisions" },
400 	{ "tx_deferred" },
401 	{ "tx_excessive_collisions" },
402 	{ "tx_late_collisions" },
403 	{ "tx_collide_2times" },
404 	{ "tx_collide_3times" },
405 	{ "tx_collide_4times" },
406 	{ "tx_collide_5times" },
407 	{ "tx_collide_6times" },
408 	{ "tx_collide_7times" },
409 	{ "tx_collide_8times" },
410 	{ "tx_collide_9times" },
411 	{ "tx_collide_10times" },
412 	{ "tx_collide_11times" },
413 	{ "tx_collide_12times" },
414 	{ "tx_collide_13times" },
415 	{ "tx_collide_14times" },
416 	{ "tx_collide_15times" },
417 	{ "tx_ucast_packets" },
418 	{ "tx_mcast_packets" },
419 	{ "tx_bcast_packets" },
420 	{ "tx_carrier_sense_errors" },
421 	{ "tx_discards" },
422 	{ "tx_errors" },
423 
424 	{ "dma_writeq_full" },
425 	{ "dma_write_prioq_full" },
426 	{ "rxbds_empty" },
427 	{ "rx_discards" },
428 	{ "rx_errors" },
429 	{ "rx_threshold_hit" },
430 
431 	{ "dma_readq_full" },
432 	{ "dma_read_prioq_full" },
433 	{ "tx_comp_queue_full" },
434 
435 	{ "ring_set_send_prod_index" },
436 	{ "ring_status_update" },
437 	{ "nic_irqs" },
438 	{ "nic_avoided_irqs" },
439 	{ "nic_tx_threshold_hit" },
440 
441 	{ "mbuf_lwm_thresh_hit" },
442 };
443 
444 #define TG3_NUM_STATS	ARRAY_SIZE(ethtool_stats_keys)
445 #define TG3_NVRAM_TEST		0
446 #define TG3_LINK_TEST		1
447 #define TG3_REGISTER_TEST	2
448 #define TG3_MEMORY_TEST		3
449 #define TG3_MAC_LOOPB_TEST	4
450 #define TG3_PHY_LOOPB_TEST	5
451 #define TG3_EXT_LOOPB_TEST	6
452 #define TG3_INTERRUPT_TEST	7
453 
454 
455 static const struct {
456 	const char string[ETH_GSTRING_LEN];
457 } ethtool_test_keys[] = {
458 	[TG3_NVRAM_TEST]	= { "nvram test        (online) " },
459 	[TG3_LINK_TEST]		= { "link test         (online) " },
460 	[TG3_REGISTER_TEST]	= { "register test     (offline)" },
461 	[TG3_MEMORY_TEST]	= { "memory test       (offline)" },
462 	[TG3_MAC_LOOPB_TEST]	= { "mac loopback test (offline)" },
463 	[TG3_PHY_LOOPB_TEST]	= { "phy loopback test (offline)" },
464 	[TG3_EXT_LOOPB_TEST]	= { "ext loopback test (offline)" },
465 	[TG3_INTERRUPT_TEST]	= { "interrupt test    (offline)" },
466 };
467 
468 #define TG3_NUM_TEST	ARRAY_SIZE(ethtool_test_keys)
469 
470 
471 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
472 {
473 	writel(val, tp->regs + off);
474 }
475 
476 static u32 tg3_read32(struct tg3 *tp, u32 off)
477 {
478 	return readl(tp->regs + off);
479 }
480 
481 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
482 {
483 	writel(val, tp->aperegs + off);
484 }
485 
486 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
487 {
488 	return readl(tp->aperegs + off);
489 }
490 
491 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
492 {
493 	unsigned long flags;
494 
495 	spin_lock_irqsave(&tp->indirect_lock, flags);
496 	pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
497 	pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
498 	spin_unlock_irqrestore(&tp->indirect_lock, flags);
499 }
500 
501 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
502 {
503 	writel(val, tp->regs + off);
504 	readl(tp->regs + off);
505 }
506 
507 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
508 {
509 	unsigned long flags;
510 	u32 val;
511 
512 	spin_lock_irqsave(&tp->indirect_lock, flags);
513 	pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
514 	pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
515 	spin_unlock_irqrestore(&tp->indirect_lock, flags);
516 	return val;
517 }
518 
519 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
520 {
521 	unsigned long flags;
522 
523 	if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
524 		pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
525 				       TG3_64BIT_REG_LOW, val);
526 		return;
527 	}
528 	if (off == TG3_RX_STD_PROD_IDX_REG) {
529 		pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
530 				       TG3_64BIT_REG_LOW, val);
531 		return;
532 	}
533 
534 	spin_lock_irqsave(&tp->indirect_lock, flags);
535 	pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
536 	pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
537 	spin_unlock_irqrestore(&tp->indirect_lock, flags);
538 
539 	/* In indirect mode when disabling interrupts, we also need
540 	 * to clear the interrupt bit in the GRC local ctrl register.
541 	 */
542 	if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
543 	    (val == 0x1)) {
544 		pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
545 				       tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
546 	}
547 }
548 
549 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
550 {
551 	unsigned long flags;
552 	u32 val;
553 
554 	spin_lock_irqsave(&tp->indirect_lock, flags);
555 	pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
556 	pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
557 	spin_unlock_irqrestore(&tp->indirect_lock, flags);
558 	return val;
559 }
560 
561 /* usec_wait specifies the wait time in usec when writing to certain registers
562  * where it is unsafe to read back the register without some delay.
563  * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
564  * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
565  */
566 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
567 {
568 	if (tg3_flag(tp, PCIX_TARGET_HWBUG) || tg3_flag(tp, ICH_WORKAROUND))
569 		/* Non-posted methods */
570 		tp->write32(tp, off, val);
571 	else {
572 		/* Posted method */
573 		tg3_write32(tp, off, val);
574 		if (usec_wait)
575 			udelay(usec_wait);
576 		tp->read32(tp, off);
577 	}
578 	/* Wait again after the read for the posted method to guarantee that
579 	 * the wait time is met.
580 	 */
581 	if (usec_wait)
582 		udelay(usec_wait);
583 }
584 
585 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
586 {
587 	tp->write32_mbox(tp, off, val);
588 	if (tg3_flag(tp, FLUSH_POSTED_WRITES) ||
589 	    (!tg3_flag(tp, MBOX_WRITE_REORDER) &&
590 	     !tg3_flag(tp, ICH_WORKAROUND)))
591 		tp->read32_mbox(tp, off);
592 }
593 
594 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
595 {
596 	void __iomem *mbox = tp->regs + off;
597 	writel(val, mbox);
598 	if (tg3_flag(tp, TXD_MBOX_HWBUG))
599 		writel(val, mbox);
600 	if (tg3_flag(tp, MBOX_WRITE_REORDER) ||
601 	    tg3_flag(tp, FLUSH_POSTED_WRITES))
602 		readl(mbox);
603 }
604 
605 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
606 {
607 	return readl(tp->regs + off + GRCMBOX_BASE);
608 }
609 
610 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
611 {
612 	writel(val, tp->regs + off + GRCMBOX_BASE);
613 }
614 
615 #define tw32_mailbox(reg, val)		tp->write32_mbox(tp, reg, val)
616 #define tw32_mailbox_f(reg, val)	tw32_mailbox_flush(tp, (reg), (val))
617 #define tw32_rx_mbox(reg, val)		tp->write32_rx_mbox(tp, reg, val)
618 #define tw32_tx_mbox(reg, val)		tp->write32_tx_mbox(tp, reg, val)
619 #define tr32_mailbox(reg)		tp->read32_mbox(tp, reg)
620 
621 #define tw32(reg, val)			tp->write32(tp, reg, val)
622 #define tw32_f(reg, val)		_tw32_flush(tp, (reg), (val), 0)
623 #define tw32_wait_f(reg, val, us)	_tw32_flush(tp, (reg), (val), (us))
624 #define tr32(reg)			tp->read32(tp, reg)
625 
626 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
627 {
628 	unsigned long flags;
629 
630 	if (tg3_asic_rev(tp) == ASIC_REV_5906 &&
631 	    (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
632 		return;
633 
634 	spin_lock_irqsave(&tp->indirect_lock, flags);
635 	if (tg3_flag(tp, SRAM_USE_CONFIG)) {
636 		pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
637 		pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
638 
639 		/* Always leave this as zero. */
640 		pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
641 	} else {
642 		tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
643 		tw32_f(TG3PCI_MEM_WIN_DATA, val);
644 
645 		/* Always leave this as zero. */
646 		tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
647 	}
648 	spin_unlock_irqrestore(&tp->indirect_lock, flags);
649 }
650 
651 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
652 {
653 	unsigned long flags;
654 
655 	if (tg3_asic_rev(tp) == ASIC_REV_5906 &&
656 	    (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
657 		*val = 0;
658 		return;
659 	}
660 
661 	spin_lock_irqsave(&tp->indirect_lock, flags);
662 	if (tg3_flag(tp, SRAM_USE_CONFIG)) {
663 		pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
664 		pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
665 
666 		/* Always leave this as zero. */
667 		pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
668 	} else {
669 		tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
670 		*val = tr32(TG3PCI_MEM_WIN_DATA);
671 
672 		/* Always leave this as zero. */
673 		tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
674 	}
675 	spin_unlock_irqrestore(&tp->indirect_lock, flags);
676 }
677 
678 static void tg3_ape_lock_init(struct tg3 *tp)
679 {
680 	int i;
681 	u32 regbase, bit;
682 
683 	if (tg3_asic_rev(tp) == ASIC_REV_5761)
684 		regbase = TG3_APE_LOCK_GRANT;
685 	else
686 		regbase = TG3_APE_PER_LOCK_GRANT;
687 
688 	/* Make sure the driver hasn't any stale locks. */
689 	for (i = TG3_APE_LOCK_PHY0; i <= TG3_APE_LOCK_GPIO; i++) {
690 		switch (i) {
691 		case TG3_APE_LOCK_PHY0:
692 		case TG3_APE_LOCK_PHY1:
693 		case TG3_APE_LOCK_PHY2:
694 		case TG3_APE_LOCK_PHY3:
695 			bit = APE_LOCK_GRANT_DRIVER;
696 			break;
697 		default:
698 			if (!tp->pci_fn)
699 				bit = APE_LOCK_GRANT_DRIVER;
700 			else
701 				bit = 1 << tp->pci_fn;
702 		}
703 		tg3_ape_write32(tp, regbase + 4 * i, bit);
704 	}
705 
706 }
707 
708 static int tg3_ape_lock(struct tg3 *tp, int locknum)
709 {
710 	int i, off;
711 	int ret = 0;
712 	u32 status, req, gnt, bit;
713 
714 	if (!tg3_flag(tp, ENABLE_APE))
715 		return 0;
716 
717 	switch (locknum) {
718 	case TG3_APE_LOCK_GPIO:
719 		if (tg3_asic_rev(tp) == ASIC_REV_5761)
720 			return 0;
721 	case TG3_APE_LOCK_GRC:
722 	case TG3_APE_LOCK_MEM:
723 		if (!tp->pci_fn)
724 			bit = APE_LOCK_REQ_DRIVER;
725 		else
726 			bit = 1 << tp->pci_fn;
727 		break;
728 	case TG3_APE_LOCK_PHY0:
729 	case TG3_APE_LOCK_PHY1:
730 	case TG3_APE_LOCK_PHY2:
731 	case TG3_APE_LOCK_PHY3:
732 		bit = APE_LOCK_REQ_DRIVER;
733 		break;
734 	default:
735 		return -EINVAL;
736 	}
737 
738 	if (tg3_asic_rev(tp) == ASIC_REV_5761) {
739 		req = TG3_APE_LOCK_REQ;
740 		gnt = TG3_APE_LOCK_GRANT;
741 	} else {
742 		req = TG3_APE_PER_LOCK_REQ;
743 		gnt = TG3_APE_PER_LOCK_GRANT;
744 	}
745 
746 	off = 4 * locknum;
747 
748 	tg3_ape_write32(tp, req + off, bit);
749 
750 	/* Wait for up to 1 millisecond to acquire lock. */
751 	for (i = 0; i < 100; i++) {
752 		status = tg3_ape_read32(tp, gnt + off);
753 		if (status == bit)
754 			break;
755 		if (pci_channel_offline(tp->pdev))
756 			break;
757 
758 		udelay(10);
759 	}
760 
761 	if (status != bit) {
762 		/* Revoke the lock request. */
763 		tg3_ape_write32(tp, gnt + off, bit);
764 		ret = -EBUSY;
765 	}
766 
767 	return ret;
768 }
769 
770 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
771 {
772 	u32 gnt, bit;
773 
774 	if (!tg3_flag(tp, ENABLE_APE))
775 		return;
776 
777 	switch (locknum) {
778 	case TG3_APE_LOCK_GPIO:
779 		if (tg3_asic_rev(tp) == ASIC_REV_5761)
780 			return;
781 	case TG3_APE_LOCK_GRC:
782 	case TG3_APE_LOCK_MEM:
783 		if (!tp->pci_fn)
784 			bit = APE_LOCK_GRANT_DRIVER;
785 		else
786 			bit = 1 << tp->pci_fn;
787 		break;
788 	case TG3_APE_LOCK_PHY0:
789 	case TG3_APE_LOCK_PHY1:
790 	case TG3_APE_LOCK_PHY2:
791 	case TG3_APE_LOCK_PHY3:
792 		bit = APE_LOCK_GRANT_DRIVER;
793 		break;
794 	default:
795 		return;
796 	}
797 
798 	if (tg3_asic_rev(tp) == ASIC_REV_5761)
799 		gnt = TG3_APE_LOCK_GRANT;
800 	else
801 		gnt = TG3_APE_PER_LOCK_GRANT;
802 
803 	tg3_ape_write32(tp, gnt + 4 * locknum, bit);
804 }
805 
806 static int tg3_ape_event_lock(struct tg3 *tp, u32 timeout_us)
807 {
808 	u32 apedata;
809 
810 	while (timeout_us) {
811 		if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
812 			return -EBUSY;
813 
814 		apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
815 		if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
816 			break;
817 
818 		tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
819 
820 		udelay(10);
821 		timeout_us -= (timeout_us > 10) ? 10 : timeout_us;
822 	}
823 
824 	return timeout_us ? 0 : -EBUSY;
825 }
826 
827 static int tg3_ape_wait_for_event(struct tg3 *tp, u32 timeout_us)
828 {
829 	u32 i, apedata;
830 
831 	for (i = 0; i < timeout_us / 10; i++) {
832 		apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
833 
834 		if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
835 			break;
836 
837 		udelay(10);
838 	}
839 
840 	return i == timeout_us / 10;
841 }
842 
843 static int tg3_ape_scratchpad_read(struct tg3 *tp, u32 *data, u32 base_off,
844 				   u32 len)
845 {
846 	int err;
847 	u32 i, bufoff, msgoff, maxlen, apedata;
848 
849 	if (!tg3_flag(tp, APE_HAS_NCSI))
850 		return 0;
851 
852 	apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
853 	if (apedata != APE_SEG_SIG_MAGIC)
854 		return -ENODEV;
855 
856 	apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
857 	if (!(apedata & APE_FW_STATUS_READY))
858 		return -EAGAIN;
859 
860 	bufoff = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_OFF) +
861 		 TG3_APE_SHMEM_BASE;
862 	msgoff = bufoff + 2 * sizeof(u32);
863 	maxlen = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_LEN);
864 
865 	while (len) {
866 		u32 length;
867 
868 		/* Cap xfer sizes to scratchpad limits. */
869 		length = (len > maxlen) ? maxlen : len;
870 		len -= length;
871 
872 		apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
873 		if (!(apedata & APE_FW_STATUS_READY))
874 			return -EAGAIN;
875 
876 		/* Wait for up to 1 msec for APE to service previous event. */
877 		err = tg3_ape_event_lock(tp, 1000);
878 		if (err)
879 			return err;
880 
881 		apedata = APE_EVENT_STATUS_DRIVER_EVNT |
882 			  APE_EVENT_STATUS_SCRTCHPD_READ |
883 			  APE_EVENT_STATUS_EVENT_PENDING;
884 		tg3_ape_write32(tp, TG3_APE_EVENT_STATUS, apedata);
885 
886 		tg3_ape_write32(tp, bufoff, base_off);
887 		tg3_ape_write32(tp, bufoff + sizeof(u32), length);
888 
889 		tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
890 		tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
891 
892 		base_off += length;
893 
894 		if (tg3_ape_wait_for_event(tp, 30000))
895 			return -EAGAIN;
896 
897 		for (i = 0; length; i += 4, length -= 4) {
898 			u32 val = tg3_ape_read32(tp, msgoff + i);
899 			memcpy(data, &val, sizeof(u32));
900 			data++;
901 		}
902 	}
903 
904 	return 0;
905 }
906 
907 static int tg3_ape_send_event(struct tg3 *tp, u32 event)
908 {
909 	int err;
910 	u32 apedata;
911 
912 	apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
913 	if (apedata != APE_SEG_SIG_MAGIC)
914 		return -EAGAIN;
915 
916 	apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
917 	if (!(apedata & APE_FW_STATUS_READY))
918 		return -EAGAIN;
919 
920 	/* Wait for up to 1 millisecond for APE to service previous event. */
921 	err = tg3_ape_event_lock(tp, 1000);
922 	if (err)
923 		return err;
924 
925 	tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
926 			event | APE_EVENT_STATUS_EVENT_PENDING);
927 
928 	tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
929 	tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
930 
931 	return 0;
932 }
933 
934 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
935 {
936 	u32 event;
937 	u32 apedata;
938 
939 	if (!tg3_flag(tp, ENABLE_APE))
940 		return;
941 
942 	switch (kind) {
943 	case RESET_KIND_INIT:
944 		tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
945 				APE_HOST_SEG_SIG_MAGIC);
946 		tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
947 				APE_HOST_SEG_LEN_MAGIC);
948 		apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
949 		tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
950 		tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
951 			APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, TG3_MIN_NUM));
952 		tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
953 				APE_HOST_BEHAV_NO_PHYLOCK);
954 		tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE,
955 				    TG3_APE_HOST_DRVR_STATE_START);
956 
957 		event = APE_EVENT_STATUS_STATE_START;
958 		break;
959 	case RESET_KIND_SHUTDOWN:
960 		/* With the interface we are currently using,
961 		 * APE does not track driver state.  Wiping
962 		 * out the HOST SEGMENT SIGNATURE forces
963 		 * the APE to assume OS absent status.
964 		 */
965 		tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0);
966 
967 		if (device_may_wakeup(&tp->pdev->dev) &&
968 		    tg3_flag(tp, WOL_ENABLE)) {
969 			tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED,
970 					    TG3_APE_HOST_WOL_SPEED_AUTO);
971 			apedata = TG3_APE_HOST_DRVR_STATE_WOL;
972 		} else
973 			apedata = TG3_APE_HOST_DRVR_STATE_UNLOAD;
974 
975 		tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, apedata);
976 
977 		event = APE_EVENT_STATUS_STATE_UNLOAD;
978 		break;
979 	default:
980 		return;
981 	}
982 
983 	event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
984 
985 	tg3_ape_send_event(tp, event);
986 }
987 
988 static void tg3_disable_ints(struct tg3 *tp)
989 {
990 	int i;
991 
992 	tw32(TG3PCI_MISC_HOST_CTRL,
993 	     (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
994 	for (i = 0; i < tp->irq_max; i++)
995 		tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001);
996 }
997 
998 static void tg3_enable_ints(struct tg3 *tp)
999 {
1000 	int i;
1001 
1002 	tp->irq_sync = 0;
1003 	wmb();
1004 
1005 	tw32(TG3PCI_MISC_HOST_CTRL,
1006 	     (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
1007 
1008 	tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE;
1009 	for (i = 0; i < tp->irq_cnt; i++) {
1010 		struct tg3_napi *tnapi = &tp->napi[i];
1011 
1012 		tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
1013 		if (tg3_flag(tp, 1SHOT_MSI))
1014 			tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
1015 
1016 		tp->coal_now |= tnapi->coal_now;
1017 	}
1018 
1019 	/* Force an initial interrupt */
1020 	if (!tg3_flag(tp, TAGGED_STATUS) &&
1021 	    (tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
1022 		tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
1023 	else
1024 		tw32(HOSTCC_MODE, tp->coal_now);
1025 
1026 	tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now);
1027 }
1028 
1029 static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
1030 {
1031 	struct tg3 *tp = tnapi->tp;
1032 	struct tg3_hw_status *sblk = tnapi->hw_status;
1033 	unsigned int work_exists = 0;
1034 
1035 	/* check for phy events */
1036 	if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
1037 		if (sblk->status & SD_STATUS_LINK_CHG)
1038 			work_exists = 1;
1039 	}
1040 
1041 	/* check for TX work to do */
1042 	if (sblk->idx[0].tx_consumer != tnapi->tx_cons)
1043 		work_exists = 1;
1044 
1045 	/* check for RX work to do */
1046 	if (tnapi->rx_rcb_prod_idx &&
1047 	    *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
1048 		work_exists = 1;
1049 
1050 	return work_exists;
1051 }
1052 
1053 /* tg3_int_reenable
1054  *  similar to tg3_enable_ints, but it accurately determines whether there
1055  *  is new work pending and can return without flushing the PIO write
1056  *  which reenables interrupts
1057  */
1058 static void tg3_int_reenable(struct tg3_napi *tnapi)
1059 {
1060 	struct tg3 *tp = tnapi->tp;
1061 
1062 	tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
1063 	mmiowb();
1064 
1065 	/* When doing tagged status, this work check is unnecessary.
1066 	 * The last_tag we write above tells the chip which piece of
1067 	 * work we've completed.
1068 	 */
1069 	if (!tg3_flag(tp, TAGGED_STATUS) && tg3_has_work(tnapi))
1070 		tw32(HOSTCC_MODE, tp->coalesce_mode |
1071 		     HOSTCC_MODE_ENABLE | tnapi->coal_now);
1072 }
1073 
1074 static void tg3_switch_clocks(struct tg3 *tp)
1075 {
1076 	u32 clock_ctrl;
1077 	u32 orig_clock_ctrl;
1078 
1079 	if (tg3_flag(tp, CPMU_PRESENT) || tg3_flag(tp, 5780_CLASS))
1080 		return;
1081 
1082 	clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
1083 
1084 	orig_clock_ctrl = clock_ctrl;
1085 	clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
1086 		       CLOCK_CTRL_CLKRUN_OENABLE |
1087 		       0x1f);
1088 	tp->pci_clock_ctrl = clock_ctrl;
1089 
1090 	if (tg3_flag(tp, 5705_PLUS)) {
1091 		if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
1092 			tw32_wait_f(TG3PCI_CLOCK_CTRL,
1093 				    clock_ctrl | CLOCK_CTRL_625_CORE, 40);
1094 		}
1095 	} else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
1096 		tw32_wait_f(TG3PCI_CLOCK_CTRL,
1097 			    clock_ctrl |
1098 			    (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
1099 			    40);
1100 		tw32_wait_f(TG3PCI_CLOCK_CTRL,
1101 			    clock_ctrl | (CLOCK_CTRL_ALTCLK),
1102 			    40);
1103 	}
1104 	tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
1105 }
1106 
1107 #define PHY_BUSY_LOOPS	5000
1108 
1109 static int __tg3_readphy(struct tg3 *tp, unsigned int phy_addr, int reg,
1110 			 u32 *val)
1111 {
1112 	u32 frame_val;
1113 	unsigned int loops;
1114 	int ret;
1115 
1116 	if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1117 		tw32_f(MAC_MI_MODE,
1118 		     (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1119 		udelay(80);
1120 	}
1121 
1122 	tg3_ape_lock(tp, tp->phy_ape_lock);
1123 
1124 	*val = 0x0;
1125 
1126 	frame_val  = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1127 		      MI_COM_PHY_ADDR_MASK);
1128 	frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1129 		      MI_COM_REG_ADDR_MASK);
1130 	frame_val |= (MI_COM_CMD_READ | MI_COM_START);
1131 
1132 	tw32_f(MAC_MI_COM, frame_val);
1133 
1134 	loops = PHY_BUSY_LOOPS;
1135 	while (loops != 0) {
1136 		udelay(10);
1137 		frame_val = tr32(MAC_MI_COM);
1138 
1139 		if ((frame_val & MI_COM_BUSY) == 0) {
1140 			udelay(5);
1141 			frame_val = tr32(MAC_MI_COM);
1142 			break;
1143 		}
1144 		loops -= 1;
1145 	}
1146 
1147 	ret = -EBUSY;
1148 	if (loops != 0) {
1149 		*val = frame_val & MI_COM_DATA_MASK;
1150 		ret = 0;
1151 	}
1152 
1153 	if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1154 		tw32_f(MAC_MI_MODE, tp->mi_mode);
1155 		udelay(80);
1156 	}
1157 
1158 	tg3_ape_unlock(tp, tp->phy_ape_lock);
1159 
1160 	return ret;
1161 }
1162 
1163 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
1164 {
1165 	return __tg3_readphy(tp, tp->phy_addr, reg, val);
1166 }
1167 
1168 static int __tg3_writephy(struct tg3 *tp, unsigned int phy_addr, int reg,
1169 			  u32 val)
1170 {
1171 	u32 frame_val;
1172 	unsigned int loops;
1173 	int ret;
1174 
1175 	if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
1176 	    (reg == MII_CTRL1000 || reg == MII_TG3_AUX_CTRL))
1177 		return 0;
1178 
1179 	if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1180 		tw32_f(MAC_MI_MODE,
1181 		     (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1182 		udelay(80);
1183 	}
1184 
1185 	tg3_ape_lock(tp, tp->phy_ape_lock);
1186 
1187 	frame_val  = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1188 		      MI_COM_PHY_ADDR_MASK);
1189 	frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1190 		      MI_COM_REG_ADDR_MASK);
1191 	frame_val |= (val & MI_COM_DATA_MASK);
1192 	frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
1193 
1194 	tw32_f(MAC_MI_COM, frame_val);
1195 
1196 	loops = PHY_BUSY_LOOPS;
1197 	while (loops != 0) {
1198 		udelay(10);
1199 		frame_val = tr32(MAC_MI_COM);
1200 		if ((frame_val & MI_COM_BUSY) == 0) {
1201 			udelay(5);
1202 			frame_val = tr32(MAC_MI_COM);
1203 			break;
1204 		}
1205 		loops -= 1;
1206 	}
1207 
1208 	ret = -EBUSY;
1209 	if (loops != 0)
1210 		ret = 0;
1211 
1212 	if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1213 		tw32_f(MAC_MI_MODE, tp->mi_mode);
1214 		udelay(80);
1215 	}
1216 
1217 	tg3_ape_unlock(tp, tp->phy_ape_lock);
1218 
1219 	return ret;
1220 }
1221 
1222 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
1223 {
1224 	return __tg3_writephy(tp, tp->phy_addr, reg, val);
1225 }
1226 
1227 static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val)
1228 {
1229 	int err;
1230 
1231 	err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1232 	if (err)
1233 		goto done;
1234 
1235 	err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1236 	if (err)
1237 		goto done;
1238 
1239 	err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1240 			   MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1241 	if (err)
1242 		goto done;
1243 
1244 	err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, val);
1245 
1246 done:
1247 	return err;
1248 }
1249 
1250 static int tg3_phy_cl45_read(struct tg3 *tp, u32 devad, u32 addr, u32 *val)
1251 {
1252 	int err;
1253 
1254 	err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1255 	if (err)
1256 		goto done;
1257 
1258 	err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1259 	if (err)
1260 		goto done;
1261 
1262 	err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1263 			   MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1264 	if (err)
1265 		goto done;
1266 
1267 	err = tg3_readphy(tp, MII_TG3_MMD_ADDRESS, val);
1268 
1269 done:
1270 	return err;
1271 }
1272 
1273 static int tg3_phydsp_read(struct tg3 *tp, u32 reg, u32 *val)
1274 {
1275 	int err;
1276 
1277 	err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1278 	if (!err)
1279 		err = tg3_readphy(tp, MII_TG3_DSP_RW_PORT, val);
1280 
1281 	return err;
1282 }
1283 
1284 static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
1285 {
1286 	int err;
1287 
1288 	err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1289 	if (!err)
1290 		err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
1291 
1292 	return err;
1293 }
1294 
1295 static int tg3_phy_auxctl_read(struct tg3 *tp, int reg, u32 *val)
1296 {
1297 	int err;
1298 
1299 	err = tg3_writephy(tp, MII_TG3_AUX_CTRL,
1300 			   (reg << MII_TG3_AUXCTL_MISC_RDSEL_SHIFT) |
1301 			   MII_TG3_AUXCTL_SHDWSEL_MISC);
1302 	if (!err)
1303 		err = tg3_readphy(tp, MII_TG3_AUX_CTRL, val);
1304 
1305 	return err;
1306 }
1307 
1308 static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set)
1309 {
1310 	if (reg == MII_TG3_AUXCTL_SHDWSEL_MISC)
1311 		set |= MII_TG3_AUXCTL_MISC_WREN;
1312 
1313 	return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg);
1314 }
1315 
1316 static int tg3_phy_toggle_auxctl_smdsp(struct tg3 *tp, bool enable)
1317 {
1318 	u32 val;
1319 	int err;
1320 
1321 	err = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
1322 
1323 	if (err)
1324 		return err;
1325 
1326 	if (enable)
1327 		val |= MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
1328 	else
1329 		val &= ~MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
1330 
1331 	err = tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
1332 				   val | MII_TG3_AUXCTL_ACTL_TX_6DB);
1333 
1334 	return err;
1335 }
1336 
1337 static int tg3_phy_shdw_write(struct tg3 *tp, int reg, u32 val)
1338 {
1339 	return tg3_writephy(tp, MII_TG3_MISC_SHDW,
1340 			    reg | val | MII_TG3_MISC_SHDW_WREN);
1341 }
1342 
1343 static int tg3_bmcr_reset(struct tg3 *tp)
1344 {
1345 	u32 phy_control;
1346 	int limit, err;
1347 
1348 	/* OK, reset it, and poll the BMCR_RESET bit until it
1349 	 * clears or we time out.
1350 	 */
1351 	phy_control = BMCR_RESET;
1352 	err = tg3_writephy(tp, MII_BMCR, phy_control);
1353 	if (err != 0)
1354 		return -EBUSY;
1355 
1356 	limit = 5000;
1357 	while (limit--) {
1358 		err = tg3_readphy(tp, MII_BMCR, &phy_control);
1359 		if (err != 0)
1360 			return -EBUSY;
1361 
1362 		if ((phy_control & BMCR_RESET) == 0) {
1363 			udelay(40);
1364 			break;
1365 		}
1366 		udelay(10);
1367 	}
1368 	if (limit < 0)
1369 		return -EBUSY;
1370 
1371 	return 0;
1372 }
1373 
1374 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
1375 {
1376 	struct tg3 *tp = bp->priv;
1377 	u32 val;
1378 
1379 	spin_lock_bh(&tp->lock);
1380 
1381 	if (__tg3_readphy(tp, mii_id, reg, &val))
1382 		val = -EIO;
1383 
1384 	spin_unlock_bh(&tp->lock);
1385 
1386 	return val;
1387 }
1388 
1389 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
1390 {
1391 	struct tg3 *tp = bp->priv;
1392 	u32 ret = 0;
1393 
1394 	spin_lock_bh(&tp->lock);
1395 
1396 	if (__tg3_writephy(tp, mii_id, reg, val))
1397 		ret = -EIO;
1398 
1399 	spin_unlock_bh(&tp->lock);
1400 
1401 	return ret;
1402 }
1403 
1404 static void tg3_mdio_config_5785(struct tg3 *tp)
1405 {
1406 	u32 val;
1407 	struct phy_device *phydev;
1408 
1409 	phydev = tp->mdio_bus->phy_map[tp->phy_addr];
1410 	switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1411 	case PHY_ID_BCM50610:
1412 	case PHY_ID_BCM50610M:
1413 		val = MAC_PHYCFG2_50610_LED_MODES;
1414 		break;
1415 	case PHY_ID_BCMAC131:
1416 		val = MAC_PHYCFG2_AC131_LED_MODES;
1417 		break;
1418 	case PHY_ID_RTL8211C:
1419 		val = MAC_PHYCFG2_RTL8211C_LED_MODES;
1420 		break;
1421 	case PHY_ID_RTL8201E:
1422 		val = MAC_PHYCFG2_RTL8201E_LED_MODES;
1423 		break;
1424 	default:
1425 		return;
1426 	}
1427 
1428 	if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
1429 		tw32(MAC_PHYCFG2, val);
1430 
1431 		val = tr32(MAC_PHYCFG1);
1432 		val &= ~(MAC_PHYCFG1_RGMII_INT |
1433 			 MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK);
1434 		val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT;
1435 		tw32(MAC_PHYCFG1, val);
1436 
1437 		return;
1438 	}
1439 
1440 	if (!tg3_flag(tp, RGMII_INBAND_DISABLE))
1441 		val |= MAC_PHYCFG2_EMODE_MASK_MASK |
1442 		       MAC_PHYCFG2_FMODE_MASK_MASK |
1443 		       MAC_PHYCFG2_GMODE_MASK_MASK |
1444 		       MAC_PHYCFG2_ACT_MASK_MASK   |
1445 		       MAC_PHYCFG2_QUAL_MASK_MASK |
1446 		       MAC_PHYCFG2_INBAND_ENABLE;
1447 
1448 	tw32(MAC_PHYCFG2, val);
1449 
1450 	val = tr32(MAC_PHYCFG1);
1451 	val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
1452 		 MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
1453 	if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1454 		if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1455 			val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
1456 		if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1457 			val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
1458 	}
1459 	val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT |
1460 	       MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV;
1461 	tw32(MAC_PHYCFG1, val);
1462 
1463 	val = tr32(MAC_EXT_RGMII_MODE);
1464 	val &= ~(MAC_RGMII_MODE_RX_INT_B |
1465 		 MAC_RGMII_MODE_RX_QUALITY |
1466 		 MAC_RGMII_MODE_RX_ACTIVITY |
1467 		 MAC_RGMII_MODE_RX_ENG_DET |
1468 		 MAC_RGMII_MODE_TX_ENABLE |
1469 		 MAC_RGMII_MODE_TX_LOWPWR |
1470 		 MAC_RGMII_MODE_TX_RESET);
1471 	if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1472 		if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1473 			val |= MAC_RGMII_MODE_RX_INT_B |
1474 			       MAC_RGMII_MODE_RX_QUALITY |
1475 			       MAC_RGMII_MODE_RX_ACTIVITY |
1476 			       MAC_RGMII_MODE_RX_ENG_DET;
1477 		if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1478 			val |= MAC_RGMII_MODE_TX_ENABLE |
1479 			       MAC_RGMII_MODE_TX_LOWPWR |
1480 			       MAC_RGMII_MODE_TX_RESET;
1481 	}
1482 	tw32(MAC_EXT_RGMII_MODE, val);
1483 }
1484 
1485 static void tg3_mdio_start(struct tg3 *tp)
1486 {
1487 	tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
1488 	tw32_f(MAC_MI_MODE, tp->mi_mode);
1489 	udelay(80);
1490 
1491 	if (tg3_flag(tp, MDIOBUS_INITED) &&
1492 	    tg3_asic_rev(tp) == ASIC_REV_5785)
1493 		tg3_mdio_config_5785(tp);
1494 }
1495 
1496 static int tg3_mdio_init(struct tg3 *tp)
1497 {
1498 	int i;
1499 	u32 reg;
1500 	struct phy_device *phydev;
1501 
1502 	if (tg3_flag(tp, 5717_PLUS)) {
1503 		u32 is_serdes;
1504 
1505 		tp->phy_addr = tp->pci_fn + 1;
1506 
1507 		if (tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0)
1508 			is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
1509 		else
1510 			is_serdes = tr32(TG3_CPMU_PHY_STRAP) &
1511 				    TG3_CPMU_PHY_STRAP_IS_SERDES;
1512 		if (is_serdes)
1513 			tp->phy_addr += 7;
1514 	} else if (tg3_flag(tp, IS_SSB_CORE) && tg3_flag(tp, ROBOSWITCH)) {
1515 		int addr;
1516 
1517 		addr = ssb_gige_get_phyaddr(tp->pdev);
1518 		if (addr < 0)
1519 			return addr;
1520 		tp->phy_addr = addr;
1521 	} else
1522 		tp->phy_addr = TG3_PHY_MII_ADDR;
1523 
1524 	tg3_mdio_start(tp);
1525 
1526 	if (!tg3_flag(tp, USE_PHYLIB) || tg3_flag(tp, MDIOBUS_INITED))
1527 		return 0;
1528 
1529 	tp->mdio_bus = mdiobus_alloc();
1530 	if (tp->mdio_bus == NULL)
1531 		return -ENOMEM;
1532 
1533 	tp->mdio_bus->name     = "tg3 mdio bus";
1534 	snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
1535 		 (tp->pdev->bus->number << 8) | tp->pdev->devfn);
1536 	tp->mdio_bus->priv     = tp;
1537 	tp->mdio_bus->parent   = &tp->pdev->dev;
1538 	tp->mdio_bus->read     = &tg3_mdio_read;
1539 	tp->mdio_bus->write    = &tg3_mdio_write;
1540 	tp->mdio_bus->phy_mask = ~(1 << tp->phy_addr);
1541 	tp->mdio_bus->irq      = &tp->mdio_irq[0];
1542 
1543 	for (i = 0; i < PHY_MAX_ADDR; i++)
1544 		tp->mdio_bus->irq[i] = PHY_POLL;
1545 
1546 	/* The bus registration will look for all the PHYs on the mdio bus.
1547 	 * Unfortunately, it does not ensure the PHY is powered up before
1548 	 * accessing the PHY ID registers.  A chip reset is the
1549 	 * quickest way to bring the device back to an operational state..
1550 	 */
1551 	if (tg3_readphy(tp, MII_BMCR, &reg) || (reg & BMCR_PDOWN))
1552 		tg3_bmcr_reset(tp);
1553 
1554 	i = mdiobus_register(tp->mdio_bus);
1555 	if (i) {
1556 		dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i);
1557 		mdiobus_free(tp->mdio_bus);
1558 		return i;
1559 	}
1560 
1561 	phydev = tp->mdio_bus->phy_map[tp->phy_addr];
1562 
1563 	if (!phydev || !phydev->drv) {
1564 		dev_warn(&tp->pdev->dev, "No PHY devices\n");
1565 		mdiobus_unregister(tp->mdio_bus);
1566 		mdiobus_free(tp->mdio_bus);
1567 		return -ENODEV;
1568 	}
1569 
1570 	switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1571 	case PHY_ID_BCM57780:
1572 		phydev->interface = PHY_INTERFACE_MODE_GMII;
1573 		phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1574 		break;
1575 	case PHY_ID_BCM50610:
1576 	case PHY_ID_BCM50610M:
1577 		phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE |
1578 				     PHY_BRCM_RX_REFCLK_UNUSED |
1579 				     PHY_BRCM_DIS_TXCRXC_NOENRGY |
1580 				     PHY_BRCM_AUTO_PWRDWN_ENABLE;
1581 		if (tg3_flag(tp, RGMII_INBAND_DISABLE))
1582 			phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
1583 		if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1584 			phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
1585 		if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1586 			phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
1587 		/* fallthru */
1588 	case PHY_ID_RTL8211C:
1589 		phydev->interface = PHY_INTERFACE_MODE_RGMII;
1590 		break;
1591 	case PHY_ID_RTL8201E:
1592 	case PHY_ID_BCMAC131:
1593 		phydev->interface = PHY_INTERFACE_MODE_MII;
1594 		phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1595 		tp->phy_flags |= TG3_PHYFLG_IS_FET;
1596 		break;
1597 	}
1598 
1599 	tg3_flag_set(tp, MDIOBUS_INITED);
1600 
1601 	if (tg3_asic_rev(tp) == ASIC_REV_5785)
1602 		tg3_mdio_config_5785(tp);
1603 
1604 	return 0;
1605 }
1606 
1607 static void tg3_mdio_fini(struct tg3 *tp)
1608 {
1609 	if (tg3_flag(tp, MDIOBUS_INITED)) {
1610 		tg3_flag_clear(tp, MDIOBUS_INITED);
1611 		mdiobus_unregister(tp->mdio_bus);
1612 		mdiobus_free(tp->mdio_bus);
1613 	}
1614 }
1615 
1616 /* tp->lock is held. */
1617 static inline void tg3_generate_fw_event(struct tg3 *tp)
1618 {
1619 	u32 val;
1620 
1621 	val = tr32(GRC_RX_CPU_EVENT);
1622 	val |= GRC_RX_CPU_DRIVER_EVENT;
1623 	tw32_f(GRC_RX_CPU_EVENT, val);
1624 
1625 	tp->last_event_jiffies = jiffies;
1626 }
1627 
1628 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1629 
1630 /* tp->lock is held. */
1631 static void tg3_wait_for_event_ack(struct tg3 *tp)
1632 {
1633 	int i;
1634 	unsigned int delay_cnt;
1635 	long time_remain;
1636 
1637 	/* If enough time has passed, no wait is necessary. */
1638 	time_remain = (long)(tp->last_event_jiffies + 1 +
1639 		      usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1640 		      (long)jiffies;
1641 	if (time_remain < 0)
1642 		return;
1643 
1644 	/* Check if we can shorten the wait time. */
1645 	delay_cnt = jiffies_to_usecs(time_remain);
1646 	if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1647 		delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1648 	delay_cnt = (delay_cnt >> 3) + 1;
1649 
1650 	for (i = 0; i < delay_cnt; i++) {
1651 		if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1652 			break;
1653 		if (pci_channel_offline(tp->pdev))
1654 			break;
1655 
1656 		udelay(8);
1657 	}
1658 }
1659 
1660 /* tp->lock is held. */
1661 static void tg3_phy_gather_ump_data(struct tg3 *tp, u32 *data)
1662 {
1663 	u32 reg, val;
1664 
1665 	val = 0;
1666 	if (!tg3_readphy(tp, MII_BMCR, &reg))
1667 		val = reg << 16;
1668 	if (!tg3_readphy(tp, MII_BMSR, &reg))
1669 		val |= (reg & 0xffff);
1670 	*data++ = val;
1671 
1672 	val = 0;
1673 	if (!tg3_readphy(tp, MII_ADVERTISE, &reg))
1674 		val = reg << 16;
1675 	if (!tg3_readphy(tp, MII_LPA, &reg))
1676 		val |= (reg & 0xffff);
1677 	*data++ = val;
1678 
1679 	val = 0;
1680 	if (!(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) {
1681 		if (!tg3_readphy(tp, MII_CTRL1000, &reg))
1682 			val = reg << 16;
1683 		if (!tg3_readphy(tp, MII_STAT1000, &reg))
1684 			val |= (reg & 0xffff);
1685 	}
1686 	*data++ = val;
1687 
1688 	if (!tg3_readphy(tp, MII_PHYADDR, &reg))
1689 		val = reg << 16;
1690 	else
1691 		val = 0;
1692 	*data++ = val;
1693 }
1694 
1695 /* tp->lock is held. */
1696 static void tg3_ump_link_report(struct tg3 *tp)
1697 {
1698 	u32 data[4];
1699 
1700 	if (!tg3_flag(tp, 5780_CLASS) || !tg3_flag(tp, ENABLE_ASF))
1701 		return;
1702 
1703 	tg3_phy_gather_ump_data(tp, data);
1704 
1705 	tg3_wait_for_event_ack(tp);
1706 
1707 	tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1708 	tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1709 	tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x0, data[0]);
1710 	tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x4, data[1]);
1711 	tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x8, data[2]);
1712 	tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0xc, data[3]);
1713 
1714 	tg3_generate_fw_event(tp);
1715 }
1716 
1717 /* tp->lock is held. */
1718 static void tg3_stop_fw(struct tg3 *tp)
1719 {
1720 	if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
1721 		/* Wait for RX cpu to ACK the previous event. */
1722 		tg3_wait_for_event_ack(tp);
1723 
1724 		tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
1725 
1726 		tg3_generate_fw_event(tp);
1727 
1728 		/* Wait for RX cpu to ACK this event. */
1729 		tg3_wait_for_event_ack(tp);
1730 	}
1731 }
1732 
1733 /* tp->lock is held. */
1734 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
1735 {
1736 	tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
1737 		      NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
1738 
1739 	if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1740 		switch (kind) {
1741 		case RESET_KIND_INIT:
1742 			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1743 				      DRV_STATE_START);
1744 			break;
1745 
1746 		case RESET_KIND_SHUTDOWN:
1747 			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1748 				      DRV_STATE_UNLOAD);
1749 			break;
1750 
1751 		case RESET_KIND_SUSPEND:
1752 			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1753 				      DRV_STATE_SUSPEND);
1754 			break;
1755 
1756 		default:
1757 			break;
1758 		}
1759 	}
1760 }
1761 
1762 /* tp->lock is held. */
1763 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
1764 {
1765 	if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1766 		switch (kind) {
1767 		case RESET_KIND_INIT:
1768 			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1769 				      DRV_STATE_START_DONE);
1770 			break;
1771 
1772 		case RESET_KIND_SHUTDOWN:
1773 			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1774 				      DRV_STATE_UNLOAD_DONE);
1775 			break;
1776 
1777 		default:
1778 			break;
1779 		}
1780 	}
1781 }
1782 
1783 /* tp->lock is held. */
1784 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
1785 {
1786 	if (tg3_flag(tp, ENABLE_ASF)) {
1787 		switch (kind) {
1788 		case RESET_KIND_INIT:
1789 			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1790 				      DRV_STATE_START);
1791 			break;
1792 
1793 		case RESET_KIND_SHUTDOWN:
1794 			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1795 				      DRV_STATE_UNLOAD);
1796 			break;
1797 
1798 		case RESET_KIND_SUSPEND:
1799 			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1800 				      DRV_STATE_SUSPEND);
1801 			break;
1802 
1803 		default:
1804 			break;
1805 		}
1806 	}
1807 }
1808 
1809 static int tg3_poll_fw(struct tg3 *tp)
1810 {
1811 	int i;
1812 	u32 val;
1813 
1814 	if (tg3_flag(tp, NO_FWARE_REPORTED))
1815 		return 0;
1816 
1817 	if (tg3_flag(tp, IS_SSB_CORE)) {
1818 		/* We don't use firmware. */
1819 		return 0;
1820 	}
1821 
1822 	if (tg3_asic_rev(tp) == ASIC_REV_5906) {
1823 		/* Wait up to 20ms for init done. */
1824 		for (i = 0; i < 200; i++) {
1825 			if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
1826 				return 0;
1827 			if (pci_channel_offline(tp->pdev))
1828 				return -ENODEV;
1829 
1830 			udelay(100);
1831 		}
1832 		return -ENODEV;
1833 	}
1834 
1835 	/* Wait for firmware initialization to complete. */
1836 	for (i = 0; i < 100000; i++) {
1837 		tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
1838 		if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1839 			break;
1840 		if (pci_channel_offline(tp->pdev)) {
1841 			if (!tg3_flag(tp, NO_FWARE_REPORTED)) {
1842 				tg3_flag_set(tp, NO_FWARE_REPORTED);
1843 				netdev_info(tp->dev, "No firmware running\n");
1844 			}
1845 
1846 			break;
1847 		}
1848 
1849 		udelay(10);
1850 	}
1851 
1852 	/* Chip might not be fitted with firmware.  Some Sun onboard
1853 	 * parts are configured like that.  So don't signal the timeout
1854 	 * of the above loop as an error, but do report the lack of
1855 	 * running firmware once.
1856 	 */
1857 	if (i >= 100000 && !tg3_flag(tp, NO_FWARE_REPORTED)) {
1858 		tg3_flag_set(tp, NO_FWARE_REPORTED);
1859 
1860 		netdev_info(tp->dev, "No firmware running\n");
1861 	}
1862 
1863 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) {
1864 		/* The 57765 A0 needs a little more
1865 		 * time to do some important work.
1866 		 */
1867 		mdelay(10);
1868 	}
1869 
1870 	return 0;
1871 }
1872 
1873 static void tg3_link_report(struct tg3 *tp)
1874 {
1875 	if (!netif_carrier_ok(tp->dev)) {
1876 		netif_info(tp, link, tp->dev, "Link is down\n");
1877 		tg3_ump_link_report(tp);
1878 	} else if (netif_msg_link(tp)) {
1879 		netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n",
1880 			    (tp->link_config.active_speed == SPEED_1000 ?
1881 			     1000 :
1882 			     (tp->link_config.active_speed == SPEED_100 ?
1883 			      100 : 10)),
1884 			    (tp->link_config.active_duplex == DUPLEX_FULL ?
1885 			     "full" : "half"));
1886 
1887 		netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n",
1888 			    (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
1889 			    "on" : "off",
1890 			    (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
1891 			    "on" : "off");
1892 
1893 		if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
1894 			netdev_info(tp->dev, "EEE is %s\n",
1895 				    tp->setlpicnt ? "enabled" : "disabled");
1896 
1897 		tg3_ump_link_report(tp);
1898 	}
1899 
1900 	tp->link_up = netif_carrier_ok(tp->dev);
1901 }
1902 
1903 static u32 tg3_decode_flowctrl_1000T(u32 adv)
1904 {
1905 	u32 flowctrl = 0;
1906 
1907 	if (adv & ADVERTISE_PAUSE_CAP) {
1908 		flowctrl |= FLOW_CTRL_RX;
1909 		if (!(adv & ADVERTISE_PAUSE_ASYM))
1910 			flowctrl |= FLOW_CTRL_TX;
1911 	} else if (adv & ADVERTISE_PAUSE_ASYM)
1912 		flowctrl |= FLOW_CTRL_TX;
1913 
1914 	return flowctrl;
1915 }
1916 
1917 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1918 {
1919 	u16 miireg;
1920 
1921 	if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1922 		miireg = ADVERTISE_1000XPAUSE;
1923 	else if (flow_ctrl & FLOW_CTRL_TX)
1924 		miireg = ADVERTISE_1000XPSE_ASYM;
1925 	else if (flow_ctrl & FLOW_CTRL_RX)
1926 		miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1927 	else
1928 		miireg = 0;
1929 
1930 	return miireg;
1931 }
1932 
1933 static u32 tg3_decode_flowctrl_1000X(u32 adv)
1934 {
1935 	u32 flowctrl = 0;
1936 
1937 	if (adv & ADVERTISE_1000XPAUSE) {
1938 		flowctrl |= FLOW_CTRL_RX;
1939 		if (!(adv & ADVERTISE_1000XPSE_ASYM))
1940 			flowctrl |= FLOW_CTRL_TX;
1941 	} else if (adv & ADVERTISE_1000XPSE_ASYM)
1942 		flowctrl |= FLOW_CTRL_TX;
1943 
1944 	return flowctrl;
1945 }
1946 
1947 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1948 {
1949 	u8 cap = 0;
1950 
1951 	if (lcladv & rmtadv & ADVERTISE_1000XPAUSE) {
1952 		cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1953 	} else if (lcladv & rmtadv & ADVERTISE_1000XPSE_ASYM) {
1954 		if (lcladv & ADVERTISE_1000XPAUSE)
1955 			cap = FLOW_CTRL_RX;
1956 		if (rmtadv & ADVERTISE_1000XPAUSE)
1957 			cap = FLOW_CTRL_TX;
1958 	}
1959 
1960 	return cap;
1961 }
1962 
1963 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1964 {
1965 	u8 autoneg;
1966 	u8 flowctrl = 0;
1967 	u32 old_rx_mode = tp->rx_mode;
1968 	u32 old_tx_mode = tp->tx_mode;
1969 
1970 	if (tg3_flag(tp, USE_PHYLIB))
1971 		autoneg = tp->mdio_bus->phy_map[tp->phy_addr]->autoneg;
1972 	else
1973 		autoneg = tp->link_config.autoneg;
1974 
1975 	if (autoneg == AUTONEG_ENABLE && tg3_flag(tp, PAUSE_AUTONEG)) {
1976 		if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
1977 			flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1978 		else
1979 			flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1980 	} else
1981 		flowctrl = tp->link_config.flowctrl;
1982 
1983 	tp->link_config.active_flowctrl = flowctrl;
1984 
1985 	if (flowctrl & FLOW_CTRL_RX)
1986 		tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1987 	else
1988 		tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1989 
1990 	if (old_rx_mode != tp->rx_mode)
1991 		tw32_f(MAC_RX_MODE, tp->rx_mode);
1992 
1993 	if (flowctrl & FLOW_CTRL_TX)
1994 		tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1995 	else
1996 		tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1997 
1998 	if (old_tx_mode != tp->tx_mode)
1999 		tw32_f(MAC_TX_MODE, tp->tx_mode);
2000 }
2001 
2002 static void tg3_adjust_link(struct net_device *dev)
2003 {
2004 	u8 oldflowctrl, linkmesg = 0;
2005 	u32 mac_mode, lcl_adv, rmt_adv;
2006 	struct tg3 *tp = netdev_priv(dev);
2007 	struct phy_device *phydev = tp->mdio_bus->phy_map[tp->phy_addr];
2008 
2009 	spin_lock_bh(&tp->lock);
2010 
2011 	mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
2012 				    MAC_MODE_HALF_DUPLEX);
2013 
2014 	oldflowctrl = tp->link_config.active_flowctrl;
2015 
2016 	if (phydev->link) {
2017 		lcl_adv = 0;
2018 		rmt_adv = 0;
2019 
2020 		if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
2021 			mac_mode |= MAC_MODE_PORT_MODE_MII;
2022 		else if (phydev->speed == SPEED_1000 ||
2023 			 tg3_asic_rev(tp) != ASIC_REV_5785)
2024 			mac_mode |= MAC_MODE_PORT_MODE_GMII;
2025 		else
2026 			mac_mode |= MAC_MODE_PORT_MODE_MII;
2027 
2028 		if (phydev->duplex == DUPLEX_HALF)
2029 			mac_mode |= MAC_MODE_HALF_DUPLEX;
2030 		else {
2031 			lcl_adv = mii_advertise_flowctrl(
2032 				  tp->link_config.flowctrl);
2033 
2034 			if (phydev->pause)
2035 				rmt_adv = LPA_PAUSE_CAP;
2036 			if (phydev->asym_pause)
2037 				rmt_adv |= LPA_PAUSE_ASYM;
2038 		}
2039 
2040 		tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
2041 	} else
2042 		mac_mode |= MAC_MODE_PORT_MODE_GMII;
2043 
2044 	if (mac_mode != tp->mac_mode) {
2045 		tp->mac_mode = mac_mode;
2046 		tw32_f(MAC_MODE, tp->mac_mode);
2047 		udelay(40);
2048 	}
2049 
2050 	if (tg3_asic_rev(tp) == ASIC_REV_5785) {
2051 		if (phydev->speed == SPEED_10)
2052 			tw32(MAC_MI_STAT,
2053 			     MAC_MI_STAT_10MBPS_MODE |
2054 			     MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
2055 		else
2056 			tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
2057 	}
2058 
2059 	if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
2060 		tw32(MAC_TX_LENGTHS,
2061 		     ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2062 		      (6 << TX_LENGTHS_IPG_SHIFT) |
2063 		      (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
2064 	else
2065 		tw32(MAC_TX_LENGTHS,
2066 		     ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2067 		      (6 << TX_LENGTHS_IPG_SHIFT) |
2068 		      (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
2069 
2070 	if (phydev->link != tp->old_link ||
2071 	    phydev->speed != tp->link_config.active_speed ||
2072 	    phydev->duplex != tp->link_config.active_duplex ||
2073 	    oldflowctrl != tp->link_config.active_flowctrl)
2074 		linkmesg = 1;
2075 
2076 	tp->old_link = phydev->link;
2077 	tp->link_config.active_speed = phydev->speed;
2078 	tp->link_config.active_duplex = phydev->duplex;
2079 
2080 	spin_unlock_bh(&tp->lock);
2081 
2082 	if (linkmesg)
2083 		tg3_link_report(tp);
2084 }
2085 
2086 static int tg3_phy_init(struct tg3 *tp)
2087 {
2088 	struct phy_device *phydev;
2089 
2090 	if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)
2091 		return 0;
2092 
2093 	/* Bring the PHY back to a known state. */
2094 	tg3_bmcr_reset(tp);
2095 
2096 	phydev = tp->mdio_bus->phy_map[tp->phy_addr];
2097 
2098 	/* Attach the MAC to the PHY. */
2099 	phydev = phy_connect(tp->dev, dev_name(&phydev->dev),
2100 			     tg3_adjust_link, phydev->interface);
2101 	if (IS_ERR(phydev)) {
2102 		dev_err(&tp->pdev->dev, "Could not attach to PHY\n");
2103 		return PTR_ERR(phydev);
2104 	}
2105 
2106 	/* Mask with MAC supported features. */
2107 	switch (phydev->interface) {
2108 	case PHY_INTERFACE_MODE_GMII:
2109 	case PHY_INTERFACE_MODE_RGMII:
2110 		if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
2111 			phydev->supported &= (PHY_GBIT_FEATURES |
2112 					      SUPPORTED_Pause |
2113 					      SUPPORTED_Asym_Pause);
2114 			break;
2115 		}
2116 		/* fallthru */
2117 	case PHY_INTERFACE_MODE_MII:
2118 		phydev->supported &= (PHY_BASIC_FEATURES |
2119 				      SUPPORTED_Pause |
2120 				      SUPPORTED_Asym_Pause);
2121 		break;
2122 	default:
2123 		phy_disconnect(tp->mdio_bus->phy_map[tp->phy_addr]);
2124 		return -EINVAL;
2125 	}
2126 
2127 	tp->phy_flags |= TG3_PHYFLG_IS_CONNECTED;
2128 
2129 	phydev->advertising = phydev->supported;
2130 
2131 	return 0;
2132 }
2133 
2134 static void tg3_phy_start(struct tg3 *tp)
2135 {
2136 	struct phy_device *phydev;
2137 
2138 	if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
2139 		return;
2140 
2141 	phydev = tp->mdio_bus->phy_map[tp->phy_addr];
2142 
2143 	if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
2144 		tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
2145 		phydev->speed = tp->link_config.speed;
2146 		phydev->duplex = tp->link_config.duplex;
2147 		phydev->autoneg = tp->link_config.autoneg;
2148 		phydev->advertising = tp->link_config.advertising;
2149 	}
2150 
2151 	phy_start(phydev);
2152 
2153 	phy_start_aneg(phydev);
2154 }
2155 
2156 static void tg3_phy_stop(struct tg3 *tp)
2157 {
2158 	if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
2159 		return;
2160 
2161 	phy_stop(tp->mdio_bus->phy_map[tp->phy_addr]);
2162 }
2163 
2164 static void tg3_phy_fini(struct tg3 *tp)
2165 {
2166 	if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
2167 		phy_disconnect(tp->mdio_bus->phy_map[tp->phy_addr]);
2168 		tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED;
2169 	}
2170 }
2171 
2172 static int tg3_phy_set_extloopbk(struct tg3 *tp)
2173 {
2174 	int err;
2175 	u32 val;
2176 
2177 	if (tp->phy_flags & TG3_PHYFLG_IS_FET)
2178 		return 0;
2179 
2180 	if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2181 		/* Cannot do read-modify-write on 5401 */
2182 		err = tg3_phy_auxctl_write(tp,
2183 					   MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2184 					   MII_TG3_AUXCTL_ACTL_EXTLOOPBK |
2185 					   0x4c20);
2186 		goto done;
2187 	}
2188 
2189 	err = tg3_phy_auxctl_read(tp,
2190 				  MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2191 	if (err)
2192 		return err;
2193 
2194 	val |= MII_TG3_AUXCTL_ACTL_EXTLOOPBK;
2195 	err = tg3_phy_auxctl_write(tp,
2196 				   MII_TG3_AUXCTL_SHDWSEL_AUXCTL, val);
2197 
2198 done:
2199 	return err;
2200 }
2201 
2202 static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
2203 {
2204 	u32 phytest;
2205 
2206 	if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2207 		u32 phy;
2208 
2209 		tg3_writephy(tp, MII_TG3_FET_TEST,
2210 			     phytest | MII_TG3_FET_SHADOW_EN);
2211 		if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) {
2212 			if (enable)
2213 				phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD;
2214 			else
2215 				phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD;
2216 			tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy);
2217 		}
2218 		tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2219 	}
2220 }
2221 
2222 static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
2223 {
2224 	u32 reg;
2225 
2226 	if (!tg3_flag(tp, 5705_PLUS) ||
2227 	    (tg3_flag(tp, 5717_PLUS) &&
2228 	     (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
2229 		return;
2230 
2231 	if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2232 		tg3_phy_fet_toggle_apd(tp, enable);
2233 		return;
2234 	}
2235 
2236 	reg = MII_TG3_MISC_SHDW_SCR5_LPED |
2237 	      MII_TG3_MISC_SHDW_SCR5_DLPTLM |
2238 	      MII_TG3_MISC_SHDW_SCR5_SDTL |
2239 	      MII_TG3_MISC_SHDW_SCR5_C125OE;
2240 	if (tg3_asic_rev(tp) != ASIC_REV_5784 || !enable)
2241 		reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
2242 
2243 	tg3_phy_shdw_write(tp, MII_TG3_MISC_SHDW_SCR5_SEL, reg);
2244 
2245 
2246 	reg = MII_TG3_MISC_SHDW_APD_WKTM_84MS;
2247 	if (enable)
2248 		reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
2249 
2250 	tg3_phy_shdw_write(tp, MII_TG3_MISC_SHDW_APD_SEL, reg);
2251 }
2252 
2253 static void tg3_phy_toggle_automdix(struct tg3 *tp, bool enable)
2254 {
2255 	u32 phy;
2256 
2257 	if (!tg3_flag(tp, 5705_PLUS) ||
2258 	    (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
2259 		return;
2260 
2261 	if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2262 		u32 ephy;
2263 
2264 		if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
2265 			u32 reg = MII_TG3_FET_SHDW_MISCCTRL;
2266 
2267 			tg3_writephy(tp, MII_TG3_FET_TEST,
2268 				     ephy | MII_TG3_FET_SHADOW_EN);
2269 			if (!tg3_readphy(tp, reg, &phy)) {
2270 				if (enable)
2271 					phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2272 				else
2273 					phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2274 				tg3_writephy(tp, reg, phy);
2275 			}
2276 			tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
2277 		}
2278 	} else {
2279 		int ret;
2280 
2281 		ret = tg3_phy_auxctl_read(tp,
2282 					  MII_TG3_AUXCTL_SHDWSEL_MISC, &phy);
2283 		if (!ret) {
2284 			if (enable)
2285 				phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2286 			else
2287 				phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2288 			tg3_phy_auxctl_write(tp,
2289 					     MII_TG3_AUXCTL_SHDWSEL_MISC, phy);
2290 		}
2291 	}
2292 }
2293 
2294 static void tg3_phy_set_wirespeed(struct tg3 *tp)
2295 {
2296 	int ret;
2297 	u32 val;
2298 
2299 	if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED)
2300 		return;
2301 
2302 	ret = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, &val);
2303 	if (!ret)
2304 		tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_MISC,
2305 				     val | MII_TG3_AUXCTL_MISC_WIRESPD_EN);
2306 }
2307 
2308 static void tg3_phy_apply_otp(struct tg3 *tp)
2309 {
2310 	u32 otp, phy;
2311 
2312 	if (!tp->phy_otp)
2313 		return;
2314 
2315 	otp = tp->phy_otp;
2316 
2317 	if (tg3_phy_toggle_auxctl_smdsp(tp, true))
2318 		return;
2319 
2320 	phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
2321 	phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
2322 	tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
2323 
2324 	phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
2325 	      ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
2326 	tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
2327 
2328 	phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
2329 	phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
2330 	tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
2331 
2332 	phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
2333 	tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
2334 
2335 	phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
2336 	tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
2337 
2338 	phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
2339 	      ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
2340 	tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
2341 
2342 	tg3_phy_toggle_auxctl_smdsp(tp, false);
2343 }
2344 
2345 static void tg3_eee_pull_config(struct tg3 *tp, struct ethtool_eee *eee)
2346 {
2347 	u32 val;
2348 	struct ethtool_eee *dest = &tp->eee;
2349 
2350 	if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2351 		return;
2352 
2353 	if (eee)
2354 		dest = eee;
2355 
2356 	if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, TG3_CL45_D7_EEERES_STAT, &val))
2357 		return;
2358 
2359 	/* Pull eee_active */
2360 	if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T ||
2361 	    val == TG3_CL45_D7_EEERES_STAT_LP_100TX) {
2362 		dest->eee_active = 1;
2363 	} else
2364 		dest->eee_active = 0;
2365 
2366 	/* Pull lp advertised settings */
2367 	if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, MDIO_AN_EEE_LPABLE, &val))
2368 		return;
2369 	dest->lp_advertised = mmd_eee_adv_to_ethtool_adv_t(val);
2370 
2371 	/* Pull advertised and eee_enabled settings */
2372 	if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, &val))
2373 		return;
2374 	dest->eee_enabled = !!val;
2375 	dest->advertised = mmd_eee_adv_to_ethtool_adv_t(val);
2376 
2377 	/* Pull tx_lpi_enabled */
2378 	val = tr32(TG3_CPMU_EEE_MODE);
2379 	dest->tx_lpi_enabled = !!(val & TG3_CPMU_EEEMD_LPI_IN_TX);
2380 
2381 	/* Pull lpi timer value */
2382 	dest->tx_lpi_timer = tr32(TG3_CPMU_EEE_DBTMR1) & 0xffff;
2383 }
2384 
2385 static void tg3_phy_eee_adjust(struct tg3 *tp, bool current_link_up)
2386 {
2387 	u32 val;
2388 
2389 	if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2390 		return;
2391 
2392 	tp->setlpicnt = 0;
2393 
2394 	if (tp->link_config.autoneg == AUTONEG_ENABLE &&
2395 	    current_link_up &&
2396 	    tp->link_config.active_duplex == DUPLEX_FULL &&
2397 	    (tp->link_config.active_speed == SPEED_100 ||
2398 	     tp->link_config.active_speed == SPEED_1000)) {
2399 		u32 eeectl;
2400 
2401 		if (tp->link_config.active_speed == SPEED_1000)
2402 			eeectl = TG3_CPMU_EEE_CTRL_EXIT_16_5_US;
2403 		else
2404 			eeectl = TG3_CPMU_EEE_CTRL_EXIT_36_US;
2405 
2406 		tw32(TG3_CPMU_EEE_CTRL, eeectl);
2407 
2408 		tg3_eee_pull_config(tp, NULL);
2409 		if (tp->eee.eee_active)
2410 			tp->setlpicnt = 2;
2411 	}
2412 
2413 	if (!tp->setlpicnt) {
2414 		if (current_link_up &&
2415 		   !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2416 			tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0000);
2417 			tg3_phy_toggle_auxctl_smdsp(tp, false);
2418 		}
2419 
2420 		val = tr32(TG3_CPMU_EEE_MODE);
2421 		tw32(TG3_CPMU_EEE_MODE, val & ~TG3_CPMU_EEEMD_LPI_ENABLE);
2422 	}
2423 }
2424 
2425 static void tg3_phy_eee_enable(struct tg3 *tp)
2426 {
2427 	u32 val;
2428 
2429 	if (tp->link_config.active_speed == SPEED_1000 &&
2430 	    (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2431 	     tg3_asic_rev(tp) == ASIC_REV_5719 ||
2432 	     tg3_flag(tp, 57765_CLASS)) &&
2433 	    !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2434 		val = MII_TG3_DSP_TAP26_ALNOKO |
2435 		      MII_TG3_DSP_TAP26_RMRXSTO;
2436 		tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
2437 		tg3_phy_toggle_auxctl_smdsp(tp, false);
2438 	}
2439 
2440 	val = tr32(TG3_CPMU_EEE_MODE);
2441 	tw32(TG3_CPMU_EEE_MODE, val | TG3_CPMU_EEEMD_LPI_ENABLE);
2442 }
2443 
2444 static int tg3_wait_macro_done(struct tg3 *tp)
2445 {
2446 	int limit = 100;
2447 
2448 	while (limit--) {
2449 		u32 tmp32;
2450 
2451 		if (!tg3_readphy(tp, MII_TG3_DSP_CONTROL, &tmp32)) {
2452 			if ((tmp32 & 0x1000) == 0)
2453 				break;
2454 		}
2455 	}
2456 	if (limit < 0)
2457 		return -EBUSY;
2458 
2459 	return 0;
2460 }
2461 
2462 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
2463 {
2464 	static const u32 test_pat[4][6] = {
2465 	{ 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
2466 	{ 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
2467 	{ 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
2468 	{ 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
2469 	};
2470 	int chan;
2471 
2472 	for (chan = 0; chan < 4; chan++) {
2473 		int i;
2474 
2475 		tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2476 			     (chan * 0x2000) | 0x0200);
2477 		tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2478 
2479 		for (i = 0; i < 6; i++)
2480 			tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
2481 				     test_pat[chan][i]);
2482 
2483 		tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2484 		if (tg3_wait_macro_done(tp)) {
2485 			*resetp = 1;
2486 			return -EBUSY;
2487 		}
2488 
2489 		tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2490 			     (chan * 0x2000) | 0x0200);
2491 		tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0082);
2492 		if (tg3_wait_macro_done(tp)) {
2493 			*resetp = 1;
2494 			return -EBUSY;
2495 		}
2496 
2497 		tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0802);
2498 		if (tg3_wait_macro_done(tp)) {
2499 			*resetp = 1;
2500 			return -EBUSY;
2501 		}
2502 
2503 		for (i = 0; i < 6; i += 2) {
2504 			u32 low, high;
2505 
2506 			if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
2507 			    tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
2508 			    tg3_wait_macro_done(tp)) {
2509 				*resetp = 1;
2510 				return -EBUSY;
2511 			}
2512 			low &= 0x7fff;
2513 			high &= 0x000f;
2514 			if (low != test_pat[chan][i] ||
2515 			    high != test_pat[chan][i+1]) {
2516 				tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
2517 				tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
2518 				tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
2519 
2520 				return -EBUSY;
2521 			}
2522 		}
2523 	}
2524 
2525 	return 0;
2526 }
2527 
2528 static int tg3_phy_reset_chanpat(struct tg3 *tp)
2529 {
2530 	int chan;
2531 
2532 	for (chan = 0; chan < 4; chan++) {
2533 		int i;
2534 
2535 		tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2536 			     (chan * 0x2000) | 0x0200);
2537 		tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2538 		for (i = 0; i < 6; i++)
2539 			tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
2540 		tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2541 		if (tg3_wait_macro_done(tp))
2542 			return -EBUSY;
2543 	}
2544 
2545 	return 0;
2546 }
2547 
2548 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
2549 {
2550 	u32 reg32, phy9_orig;
2551 	int retries, do_phy_reset, err;
2552 
2553 	retries = 10;
2554 	do_phy_reset = 1;
2555 	do {
2556 		if (do_phy_reset) {
2557 			err = tg3_bmcr_reset(tp);
2558 			if (err)
2559 				return err;
2560 			do_phy_reset = 0;
2561 		}
2562 
2563 		/* Disable transmitter and interrupt.  */
2564 		if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
2565 			continue;
2566 
2567 		reg32 |= 0x3000;
2568 		tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2569 
2570 		/* Set full-duplex, 1000 mbps.  */
2571 		tg3_writephy(tp, MII_BMCR,
2572 			     BMCR_FULLDPLX | BMCR_SPEED1000);
2573 
2574 		/* Set to master mode.  */
2575 		if (tg3_readphy(tp, MII_CTRL1000, &phy9_orig))
2576 			continue;
2577 
2578 		tg3_writephy(tp, MII_CTRL1000,
2579 			     CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
2580 
2581 		err = tg3_phy_toggle_auxctl_smdsp(tp, true);
2582 		if (err)
2583 			return err;
2584 
2585 		/* Block the PHY control access.  */
2586 		tg3_phydsp_write(tp, 0x8005, 0x0800);
2587 
2588 		err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
2589 		if (!err)
2590 			break;
2591 	} while (--retries);
2592 
2593 	err = tg3_phy_reset_chanpat(tp);
2594 	if (err)
2595 		return err;
2596 
2597 	tg3_phydsp_write(tp, 0x8005, 0x0000);
2598 
2599 	tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
2600 	tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000);
2601 
2602 	tg3_phy_toggle_auxctl_smdsp(tp, false);
2603 
2604 	tg3_writephy(tp, MII_CTRL1000, phy9_orig);
2605 
2606 	err = tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32);
2607 	if (err)
2608 		return err;
2609 
2610 	reg32 &= ~0x3000;
2611 	tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2612 
2613 	return 0;
2614 }
2615 
2616 static void tg3_carrier_off(struct tg3 *tp)
2617 {
2618 	netif_carrier_off(tp->dev);
2619 	tp->link_up = false;
2620 }
2621 
2622 static void tg3_warn_mgmt_link_flap(struct tg3 *tp)
2623 {
2624 	if (tg3_flag(tp, ENABLE_ASF))
2625 		netdev_warn(tp->dev,
2626 			    "Management side-band traffic will be interrupted during phy settings change\n");
2627 }
2628 
2629 /* This will reset the tigon3 PHY if there is no valid
2630  * link unless the FORCE argument is non-zero.
2631  */
2632 static int tg3_phy_reset(struct tg3 *tp)
2633 {
2634 	u32 val, cpmuctrl;
2635 	int err;
2636 
2637 	if (tg3_asic_rev(tp) == ASIC_REV_5906) {
2638 		val = tr32(GRC_MISC_CFG);
2639 		tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
2640 		udelay(40);
2641 	}
2642 	err  = tg3_readphy(tp, MII_BMSR, &val);
2643 	err |= tg3_readphy(tp, MII_BMSR, &val);
2644 	if (err != 0)
2645 		return -EBUSY;
2646 
2647 	if (netif_running(tp->dev) && tp->link_up) {
2648 		netif_carrier_off(tp->dev);
2649 		tg3_link_report(tp);
2650 	}
2651 
2652 	if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
2653 	    tg3_asic_rev(tp) == ASIC_REV_5704 ||
2654 	    tg3_asic_rev(tp) == ASIC_REV_5705) {
2655 		err = tg3_phy_reset_5703_4_5(tp);
2656 		if (err)
2657 			return err;
2658 		goto out;
2659 	}
2660 
2661 	cpmuctrl = 0;
2662 	if (tg3_asic_rev(tp) == ASIC_REV_5784 &&
2663 	    tg3_chip_rev(tp) != CHIPREV_5784_AX) {
2664 		cpmuctrl = tr32(TG3_CPMU_CTRL);
2665 		if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
2666 			tw32(TG3_CPMU_CTRL,
2667 			     cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
2668 	}
2669 
2670 	err = tg3_bmcr_reset(tp);
2671 	if (err)
2672 		return err;
2673 
2674 	if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
2675 		val = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
2676 		tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, val);
2677 
2678 		tw32(TG3_CPMU_CTRL, cpmuctrl);
2679 	}
2680 
2681 	if (tg3_chip_rev(tp) == CHIPREV_5784_AX ||
2682 	    tg3_chip_rev(tp) == CHIPREV_5761_AX) {
2683 		val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2684 		if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
2685 		    CPMU_LSPD_1000MB_MACCLK_12_5) {
2686 			val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2687 			udelay(40);
2688 			tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2689 		}
2690 	}
2691 
2692 	if (tg3_flag(tp, 5717_PLUS) &&
2693 	    (tp->phy_flags & TG3_PHYFLG_MII_SERDES))
2694 		return 0;
2695 
2696 	tg3_phy_apply_otp(tp);
2697 
2698 	if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
2699 		tg3_phy_toggle_apd(tp, true);
2700 	else
2701 		tg3_phy_toggle_apd(tp, false);
2702 
2703 out:
2704 	if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) &&
2705 	    !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2706 		tg3_phydsp_write(tp, 0x201f, 0x2aaa);
2707 		tg3_phydsp_write(tp, 0x000a, 0x0323);
2708 		tg3_phy_toggle_auxctl_smdsp(tp, false);
2709 	}
2710 
2711 	if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) {
2712 		tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2713 		tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2714 	}
2715 
2716 	if (tp->phy_flags & TG3_PHYFLG_BER_BUG) {
2717 		if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2718 			tg3_phydsp_write(tp, 0x000a, 0x310b);
2719 			tg3_phydsp_write(tp, 0x201f, 0x9506);
2720 			tg3_phydsp_write(tp, 0x401f, 0x14e2);
2721 			tg3_phy_toggle_auxctl_smdsp(tp, false);
2722 		}
2723 	} else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) {
2724 		if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2725 			tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
2726 			if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) {
2727 				tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
2728 				tg3_writephy(tp, MII_TG3_TEST1,
2729 					     MII_TG3_TEST1_TRIM_EN | 0x4);
2730 			} else
2731 				tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
2732 
2733 			tg3_phy_toggle_auxctl_smdsp(tp, false);
2734 		}
2735 	}
2736 
2737 	/* Set Extended packet length bit (bit 14) on all chips that */
2738 	/* support jumbo frames */
2739 	if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2740 		/* Cannot do read-modify-write on 5401 */
2741 		tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
2742 	} else if (tg3_flag(tp, JUMBO_CAPABLE)) {
2743 		/* Set bit 14 with read-modify-write to preserve other bits */
2744 		err = tg3_phy_auxctl_read(tp,
2745 					  MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2746 		if (!err)
2747 			tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2748 					   val | MII_TG3_AUXCTL_ACTL_EXTPKTLEN);
2749 	}
2750 
2751 	/* Set phy register 0x10 bit 0 to high fifo elasticity to support
2752 	 * jumbo frames transmission.
2753 	 */
2754 	if (tg3_flag(tp, JUMBO_CAPABLE)) {
2755 		if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &val))
2756 			tg3_writephy(tp, MII_TG3_EXT_CTRL,
2757 				     val | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
2758 	}
2759 
2760 	if (tg3_asic_rev(tp) == ASIC_REV_5906) {
2761 		/* adjust output voltage */
2762 		tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
2763 	}
2764 
2765 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5762_A0)
2766 		tg3_phydsp_write(tp, 0xffb, 0x4000);
2767 
2768 	tg3_phy_toggle_automdix(tp, true);
2769 	tg3_phy_set_wirespeed(tp);
2770 	return 0;
2771 }
2772 
2773 #define TG3_GPIO_MSG_DRVR_PRES		 0x00000001
2774 #define TG3_GPIO_MSG_NEED_VAUX		 0x00000002
2775 #define TG3_GPIO_MSG_MASK		 (TG3_GPIO_MSG_DRVR_PRES | \
2776 					  TG3_GPIO_MSG_NEED_VAUX)
2777 #define TG3_GPIO_MSG_ALL_DRVR_PRES_MASK \
2778 	((TG3_GPIO_MSG_DRVR_PRES << 0) | \
2779 	 (TG3_GPIO_MSG_DRVR_PRES << 4) | \
2780 	 (TG3_GPIO_MSG_DRVR_PRES << 8) | \
2781 	 (TG3_GPIO_MSG_DRVR_PRES << 12))
2782 
2783 #define TG3_GPIO_MSG_ALL_NEED_VAUX_MASK \
2784 	((TG3_GPIO_MSG_NEED_VAUX << 0) | \
2785 	 (TG3_GPIO_MSG_NEED_VAUX << 4) | \
2786 	 (TG3_GPIO_MSG_NEED_VAUX << 8) | \
2787 	 (TG3_GPIO_MSG_NEED_VAUX << 12))
2788 
2789 static inline u32 tg3_set_function_status(struct tg3 *tp, u32 newstat)
2790 {
2791 	u32 status, shift;
2792 
2793 	if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2794 	    tg3_asic_rev(tp) == ASIC_REV_5719)
2795 		status = tg3_ape_read32(tp, TG3_APE_GPIO_MSG);
2796 	else
2797 		status = tr32(TG3_CPMU_DRV_STATUS);
2798 
2799 	shift = TG3_APE_GPIO_MSG_SHIFT + 4 * tp->pci_fn;
2800 	status &= ~(TG3_GPIO_MSG_MASK << shift);
2801 	status |= (newstat << shift);
2802 
2803 	if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2804 	    tg3_asic_rev(tp) == ASIC_REV_5719)
2805 		tg3_ape_write32(tp, TG3_APE_GPIO_MSG, status);
2806 	else
2807 		tw32(TG3_CPMU_DRV_STATUS, status);
2808 
2809 	return status >> TG3_APE_GPIO_MSG_SHIFT;
2810 }
2811 
2812 static inline int tg3_pwrsrc_switch_to_vmain(struct tg3 *tp)
2813 {
2814 	if (!tg3_flag(tp, IS_NIC))
2815 		return 0;
2816 
2817 	if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2818 	    tg3_asic_rev(tp) == ASIC_REV_5719 ||
2819 	    tg3_asic_rev(tp) == ASIC_REV_5720) {
2820 		if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2821 			return -EIO;
2822 
2823 		tg3_set_function_status(tp, TG3_GPIO_MSG_DRVR_PRES);
2824 
2825 		tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2826 			    TG3_GRC_LCLCTL_PWRSW_DELAY);
2827 
2828 		tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2829 	} else {
2830 		tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2831 			    TG3_GRC_LCLCTL_PWRSW_DELAY);
2832 	}
2833 
2834 	return 0;
2835 }
2836 
2837 static void tg3_pwrsrc_die_with_vmain(struct tg3 *tp)
2838 {
2839 	u32 grc_local_ctrl;
2840 
2841 	if (!tg3_flag(tp, IS_NIC) ||
2842 	    tg3_asic_rev(tp) == ASIC_REV_5700 ||
2843 	    tg3_asic_rev(tp) == ASIC_REV_5701)
2844 		return;
2845 
2846 	grc_local_ctrl = tp->grc_local_ctrl | GRC_LCLCTRL_GPIO_OE1;
2847 
2848 	tw32_wait_f(GRC_LOCAL_CTRL,
2849 		    grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2850 		    TG3_GRC_LCLCTL_PWRSW_DELAY);
2851 
2852 	tw32_wait_f(GRC_LOCAL_CTRL,
2853 		    grc_local_ctrl,
2854 		    TG3_GRC_LCLCTL_PWRSW_DELAY);
2855 
2856 	tw32_wait_f(GRC_LOCAL_CTRL,
2857 		    grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2858 		    TG3_GRC_LCLCTL_PWRSW_DELAY);
2859 }
2860 
2861 static void tg3_pwrsrc_switch_to_vaux(struct tg3 *tp)
2862 {
2863 	if (!tg3_flag(tp, IS_NIC))
2864 		return;
2865 
2866 	if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
2867 	    tg3_asic_rev(tp) == ASIC_REV_5701) {
2868 		tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2869 			    (GRC_LCLCTRL_GPIO_OE0 |
2870 			     GRC_LCLCTRL_GPIO_OE1 |
2871 			     GRC_LCLCTRL_GPIO_OE2 |
2872 			     GRC_LCLCTRL_GPIO_OUTPUT0 |
2873 			     GRC_LCLCTRL_GPIO_OUTPUT1),
2874 			    TG3_GRC_LCLCTL_PWRSW_DELAY);
2875 	} else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
2876 		   tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
2877 		/* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2878 		u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
2879 				     GRC_LCLCTRL_GPIO_OE1 |
2880 				     GRC_LCLCTRL_GPIO_OE2 |
2881 				     GRC_LCLCTRL_GPIO_OUTPUT0 |
2882 				     GRC_LCLCTRL_GPIO_OUTPUT1 |
2883 				     tp->grc_local_ctrl;
2884 		tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2885 			    TG3_GRC_LCLCTL_PWRSW_DELAY);
2886 
2887 		grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
2888 		tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2889 			    TG3_GRC_LCLCTL_PWRSW_DELAY);
2890 
2891 		grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
2892 		tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2893 			    TG3_GRC_LCLCTL_PWRSW_DELAY);
2894 	} else {
2895 		u32 no_gpio2;
2896 		u32 grc_local_ctrl = 0;
2897 
2898 		/* Workaround to prevent overdrawing Amps. */
2899 		if (tg3_asic_rev(tp) == ASIC_REV_5714) {
2900 			grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
2901 			tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2902 				    grc_local_ctrl,
2903 				    TG3_GRC_LCLCTL_PWRSW_DELAY);
2904 		}
2905 
2906 		/* On 5753 and variants, GPIO2 cannot be used. */
2907 		no_gpio2 = tp->nic_sram_data_cfg &
2908 			   NIC_SRAM_DATA_CFG_NO_GPIO2;
2909 
2910 		grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
2911 				  GRC_LCLCTRL_GPIO_OE1 |
2912 				  GRC_LCLCTRL_GPIO_OE2 |
2913 				  GRC_LCLCTRL_GPIO_OUTPUT1 |
2914 				  GRC_LCLCTRL_GPIO_OUTPUT2;
2915 		if (no_gpio2) {
2916 			grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
2917 					    GRC_LCLCTRL_GPIO_OUTPUT2);
2918 		}
2919 		tw32_wait_f(GRC_LOCAL_CTRL,
2920 			    tp->grc_local_ctrl | grc_local_ctrl,
2921 			    TG3_GRC_LCLCTL_PWRSW_DELAY);
2922 
2923 		grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
2924 
2925 		tw32_wait_f(GRC_LOCAL_CTRL,
2926 			    tp->grc_local_ctrl | grc_local_ctrl,
2927 			    TG3_GRC_LCLCTL_PWRSW_DELAY);
2928 
2929 		if (!no_gpio2) {
2930 			grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
2931 			tw32_wait_f(GRC_LOCAL_CTRL,
2932 				    tp->grc_local_ctrl | grc_local_ctrl,
2933 				    TG3_GRC_LCLCTL_PWRSW_DELAY);
2934 		}
2935 	}
2936 }
2937 
2938 static void tg3_frob_aux_power_5717(struct tg3 *tp, bool wol_enable)
2939 {
2940 	u32 msg = 0;
2941 
2942 	/* Serialize power state transitions */
2943 	if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2944 		return;
2945 
2946 	if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE) || wol_enable)
2947 		msg = TG3_GPIO_MSG_NEED_VAUX;
2948 
2949 	msg = tg3_set_function_status(tp, msg);
2950 
2951 	if (msg & TG3_GPIO_MSG_ALL_DRVR_PRES_MASK)
2952 		goto done;
2953 
2954 	if (msg & TG3_GPIO_MSG_ALL_NEED_VAUX_MASK)
2955 		tg3_pwrsrc_switch_to_vaux(tp);
2956 	else
2957 		tg3_pwrsrc_die_with_vmain(tp);
2958 
2959 done:
2960 	tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2961 }
2962 
2963 static void tg3_frob_aux_power(struct tg3 *tp, bool include_wol)
2964 {
2965 	bool need_vaux = false;
2966 
2967 	/* The GPIOs do something completely different on 57765. */
2968 	if (!tg3_flag(tp, IS_NIC) || tg3_flag(tp, 57765_CLASS))
2969 		return;
2970 
2971 	if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2972 	    tg3_asic_rev(tp) == ASIC_REV_5719 ||
2973 	    tg3_asic_rev(tp) == ASIC_REV_5720) {
2974 		tg3_frob_aux_power_5717(tp, include_wol ?
2975 					tg3_flag(tp, WOL_ENABLE) != 0 : 0);
2976 		return;
2977 	}
2978 
2979 	if (tp->pdev_peer && tp->pdev_peer != tp->pdev) {
2980 		struct net_device *dev_peer;
2981 
2982 		dev_peer = pci_get_drvdata(tp->pdev_peer);
2983 
2984 		/* remove_one() may have been run on the peer. */
2985 		if (dev_peer) {
2986 			struct tg3 *tp_peer = netdev_priv(dev_peer);
2987 
2988 			if (tg3_flag(tp_peer, INIT_COMPLETE))
2989 				return;
2990 
2991 			if ((include_wol && tg3_flag(tp_peer, WOL_ENABLE)) ||
2992 			    tg3_flag(tp_peer, ENABLE_ASF))
2993 				need_vaux = true;
2994 		}
2995 	}
2996 
2997 	if ((include_wol && tg3_flag(tp, WOL_ENABLE)) ||
2998 	    tg3_flag(tp, ENABLE_ASF))
2999 		need_vaux = true;
3000 
3001 	if (need_vaux)
3002 		tg3_pwrsrc_switch_to_vaux(tp);
3003 	else
3004 		tg3_pwrsrc_die_with_vmain(tp);
3005 }
3006 
3007 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
3008 {
3009 	if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
3010 		return 1;
3011 	else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) {
3012 		if (speed != SPEED_10)
3013 			return 1;
3014 	} else if (speed == SPEED_10)
3015 		return 1;
3016 
3017 	return 0;
3018 }
3019 
3020 static bool tg3_phy_power_bug(struct tg3 *tp)
3021 {
3022 	switch (tg3_asic_rev(tp)) {
3023 	case ASIC_REV_5700:
3024 	case ASIC_REV_5704:
3025 		return true;
3026 	case ASIC_REV_5780:
3027 		if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
3028 			return true;
3029 		return false;
3030 	case ASIC_REV_5717:
3031 		if (!tp->pci_fn)
3032 			return true;
3033 		return false;
3034 	case ASIC_REV_5719:
3035 	case ASIC_REV_5720:
3036 		if ((tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
3037 		    !tp->pci_fn)
3038 			return true;
3039 		return false;
3040 	}
3041 
3042 	return false;
3043 }
3044 
3045 static bool tg3_phy_led_bug(struct tg3 *tp)
3046 {
3047 	switch (tg3_asic_rev(tp)) {
3048 	case ASIC_REV_5719:
3049 	case ASIC_REV_5720:
3050 		if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
3051 		    !tp->pci_fn)
3052 			return true;
3053 		return false;
3054 	}
3055 
3056 	return false;
3057 }
3058 
3059 static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
3060 {
3061 	u32 val;
3062 
3063 	if (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)
3064 		return;
3065 
3066 	if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
3067 		if (tg3_asic_rev(tp) == ASIC_REV_5704) {
3068 			u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
3069 			u32 serdes_cfg = tr32(MAC_SERDES_CFG);
3070 
3071 			sg_dig_ctrl |=
3072 				SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
3073 			tw32(SG_DIG_CTRL, sg_dig_ctrl);
3074 			tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
3075 		}
3076 		return;
3077 	}
3078 
3079 	if (tg3_asic_rev(tp) == ASIC_REV_5906) {
3080 		tg3_bmcr_reset(tp);
3081 		val = tr32(GRC_MISC_CFG);
3082 		tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
3083 		udelay(40);
3084 		return;
3085 	} else if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
3086 		u32 phytest;
3087 		if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
3088 			u32 phy;
3089 
3090 			tg3_writephy(tp, MII_ADVERTISE, 0);
3091 			tg3_writephy(tp, MII_BMCR,
3092 				     BMCR_ANENABLE | BMCR_ANRESTART);
3093 
3094 			tg3_writephy(tp, MII_TG3_FET_TEST,
3095 				     phytest | MII_TG3_FET_SHADOW_EN);
3096 			if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) {
3097 				phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD;
3098 				tg3_writephy(tp,
3099 					     MII_TG3_FET_SHDW_AUXMODE4,
3100 					     phy);
3101 			}
3102 			tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
3103 		}
3104 		return;
3105 	} else if (do_low_power) {
3106 		if (!tg3_phy_led_bug(tp))
3107 			tg3_writephy(tp, MII_TG3_EXT_CTRL,
3108 				     MII_TG3_EXT_CTRL_FORCE_LED_OFF);
3109 
3110 		val = MII_TG3_AUXCTL_PCTL_100TX_LPWR |
3111 		      MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
3112 		      MII_TG3_AUXCTL_PCTL_VREG_11V;
3113 		tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, val);
3114 	}
3115 
3116 	/* The PHY should not be powered down on some chips because
3117 	 * of bugs.
3118 	 */
3119 	if (tg3_phy_power_bug(tp))
3120 		return;
3121 
3122 	if (tg3_chip_rev(tp) == CHIPREV_5784_AX ||
3123 	    tg3_chip_rev(tp) == CHIPREV_5761_AX) {
3124 		val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
3125 		val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
3126 		val |= CPMU_LSPD_1000MB_MACCLK_12_5;
3127 		tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
3128 	}
3129 
3130 	tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
3131 }
3132 
3133 /* tp->lock is held. */
3134 static int tg3_nvram_lock(struct tg3 *tp)
3135 {
3136 	if (tg3_flag(tp, NVRAM)) {
3137 		int i;
3138 
3139 		if (tp->nvram_lock_cnt == 0) {
3140 			tw32(NVRAM_SWARB, SWARB_REQ_SET1);
3141 			for (i = 0; i < 8000; i++) {
3142 				if (tr32(NVRAM_SWARB) & SWARB_GNT1)
3143 					break;
3144 				udelay(20);
3145 			}
3146 			if (i == 8000) {
3147 				tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
3148 				return -ENODEV;
3149 			}
3150 		}
3151 		tp->nvram_lock_cnt++;
3152 	}
3153 	return 0;
3154 }
3155 
3156 /* tp->lock is held. */
3157 static void tg3_nvram_unlock(struct tg3 *tp)
3158 {
3159 	if (tg3_flag(tp, NVRAM)) {
3160 		if (tp->nvram_lock_cnt > 0)
3161 			tp->nvram_lock_cnt--;
3162 		if (tp->nvram_lock_cnt == 0)
3163 			tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
3164 	}
3165 }
3166 
3167 /* tp->lock is held. */
3168 static void tg3_enable_nvram_access(struct tg3 *tp)
3169 {
3170 	if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
3171 		u32 nvaccess = tr32(NVRAM_ACCESS);
3172 
3173 		tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
3174 	}
3175 }
3176 
3177 /* tp->lock is held. */
3178 static void tg3_disable_nvram_access(struct tg3 *tp)
3179 {
3180 	if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
3181 		u32 nvaccess = tr32(NVRAM_ACCESS);
3182 
3183 		tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
3184 	}
3185 }
3186 
3187 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
3188 					u32 offset, u32 *val)
3189 {
3190 	u32 tmp;
3191 	int i;
3192 
3193 	if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
3194 		return -EINVAL;
3195 
3196 	tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
3197 					EEPROM_ADDR_DEVID_MASK |
3198 					EEPROM_ADDR_READ);
3199 	tw32(GRC_EEPROM_ADDR,
3200 	     tmp |
3201 	     (0 << EEPROM_ADDR_DEVID_SHIFT) |
3202 	     ((offset << EEPROM_ADDR_ADDR_SHIFT) &
3203 	      EEPROM_ADDR_ADDR_MASK) |
3204 	     EEPROM_ADDR_READ | EEPROM_ADDR_START);
3205 
3206 	for (i = 0; i < 1000; i++) {
3207 		tmp = tr32(GRC_EEPROM_ADDR);
3208 
3209 		if (tmp & EEPROM_ADDR_COMPLETE)
3210 			break;
3211 		msleep(1);
3212 	}
3213 	if (!(tmp & EEPROM_ADDR_COMPLETE))
3214 		return -EBUSY;
3215 
3216 	tmp = tr32(GRC_EEPROM_DATA);
3217 
3218 	/*
3219 	 * The data will always be opposite the native endian
3220 	 * format.  Perform a blind byteswap to compensate.
3221 	 */
3222 	*val = swab32(tmp);
3223 
3224 	return 0;
3225 }
3226 
3227 #define NVRAM_CMD_TIMEOUT 5000
3228 
3229 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
3230 {
3231 	int i;
3232 
3233 	tw32(NVRAM_CMD, nvram_cmd);
3234 	for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
3235 		usleep_range(10, 40);
3236 		if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
3237 			udelay(10);
3238 			break;
3239 		}
3240 	}
3241 
3242 	if (i == NVRAM_CMD_TIMEOUT)
3243 		return -EBUSY;
3244 
3245 	return 0;
3246 }
3247 
3248 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
3249 {
3250 	if (tg3_flag(tp, NVRAM) &&
3251 	    tg3_flag(tp, NVRAM_BUFFERED) &&
3252 	    tg3_flag(tp, FLASH) &&
3253 	    !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
3254 	    (tp->nvram_jedecnum == JEDEC_ATMEL))
3255 
3256 		addr = ((addr / tp->nvram_pagesize) <<
3257 			ATMEL_AT45DB0X1B_PAGE_POS) +
3258 		       (addr % tp->nvram_pagesize);
3259 
3260 	return addr;
3261 }
3262 
3263 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
3264 {
3265 	if (tg3_flag(tp, NVRAM) &&
3266 	    tg3_flag(tp, NVRAM_BUFFERED) &&
3267 	    tg3_flag(tp, FLASH) &&
3268 	    !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
3269 	    (tp->nvram_jedecnum == JEDEC_ATMEL))
3270 
3271 		addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
3272 			tp->nvram_pagesize) +
3273 		       (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
3274 
3275 	return addr;
3276 }
3277 
3278 /* NOTE: Data read in from NVRAM is byteswapped according to
3279  * the byteswapping settings for all other register accesses.
3280  * tg3 devices are BE devices, so on a BE machine, the data
3281  * returned will be exactly as it is seen in NVRAM.  On a LE
3282  * machine, the 32-bit value will be byteswapped.
3283  */
3284 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
3285 {
3286 	int ret;
3287 
3288 	if (!tg3_flag(tp, NVRAM))
3289 		return tg3_nvram_read_using_eeprom(tp, offset, val);
3290 
3291 	offset = tg3_nvram_phys_addr(tp, offset);
3292 
3293 	if (offset > NVRAM_ADDR_MSK)
3294 		return -EINVAL;
3295 
3296 	ret = tg3_nvram_lock(tp);
3297 	if (ret)
3298 		return ret;
3299 
3300 	tg3_enable_nvram_access(tp);
3301 
3302 	tw32(NVRAM_ADDR, offset);
3303 	ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
3304 		NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
3305 
3306 	if (ret == 0)
3307 		*val = tr32(NVRAM_RDDATA);
3308 
3309 	tg3_disable_nvram_access(tp);
3310 
3311 	tg3_nvram_unlock(tp);
3312 
3313 	return ret;
3314 }
3315 
3316 /* Ensures NVRAM data is in bytestream format. */
3317 static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val)
3318 {
3319 	u32 v;
3320 	int res = tg3_nvram_read(tp, offset, &v);
3321 	if (!res)
3322 		*val = cpu_to_be32(v);
3323 	return res;
3324 }
3325 
3326 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
3327 				    u32 offset, u32 len, u8 *buf)
3328 {
3329 	int i, j, rc = 0;
3330 	u32 val;
3331 
3332 	for (i = 0; i < len; i += 4) {
3333 		u32 addr;
3334 		__be32 data;
3335 
3336 		addr = offset + i;
3337 
3338 		memcpy(&data, buf + i, 4);
3339 
3340 		/*
3341 		 * The SEEPROM interface expects the data to always be opposite
3342 		 * the native endian format.  We accomplish this by reversing
3343 		 * all the operations that would have been performed on the
3344 		 * data from a call to tg3_nvram_read_be32().
3345 		 */
3346 		tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data)));
3347 
3348 		val = tr32(GRC_EEPROM_ADDR);
3349 		tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
3350 
3351 		val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
3352 			EEPROM_ADDR_READ);
3353 		tw32(GRC_EEPROM_ADDR, val |
3354 			(0 << EEPROM_ADDR_DEVID_SHIFT) |
3355 			(addr & EEPROM_ADDR_ADDR_MASK) |
3356 			EEPROM_ADDR_START |
3357 			EEPROM_ADDR_WRITE);
3358 
3359 		for (j = 0; j < 1000; j++) {
3360 			val = tr32(GRC_EEPROM_ADDR);
3361 
3362 			if (val & EEPROM_ADDR_COMPLETE)
3363 				break;
3364 			msleep(1);
3365 		}
3366 		if (!(val & EEPROM_ADDR_COMPLETE)) {
3367 			rc = -EBUSY;
3368 			break;
3369 		}
3370 	}
3371 
3372 	return rc;
3373 }
3374 
3375 /* offset and length are dword aligned */
3376 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
3377 		u8 *buf)
3378 {
3379 	int ret = 0;
3380 	u32 pagesize = tp->nvram_pagesize;
3381 	u32 pagemask = pagesize - 1;
3382 	u32 nvram_cmd;
3383 	u8 *tmp;
3384 
3385 	tmp = kmalloc(pagesize, GFP_KERNEL);
3386 	if (tmp == NULL)
3387 		return -ENOMEM;
3388 
3389 	while (len) {
3390 		int j;
3391 		u32 phy_addr, page_off, size;
3392 
3393 		phy_addr = offset & ~pagemask;
3394 
3395 		for (j = 0; j < pagesize; j += 4) {
3396 			ret = tg3_nvram_read_be32(tp, phy_addr + j,
3397 						  (__be32 *) (tmp + j));
3398 			if (ret)
3399 				break;
3400 		}
3401 		if (ret)
3402 			break;
3403 
3404 		page_off = offset & pagemask;
3405 		size = pagesize;
3406 		if (len < size)
3407 			size = len;
3408 
3409 		len -= size;
3410 
3411 		memcpy(tmp + page_off, buf, size);
3412 
3413 		offset = offset + (pagesize - page_off);
3414 
3415 		tg3_enable_nvram_access(tp);
3416 
3417 		/*
3418 		 * Before we can erase the flash page, we need
3419 		 * to issue a special "write enable" command.
3420 		 */
3421 		nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3422 
3423 		if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3424 			break;
3425 
3426 		/* Erase the target page */
3427 		tw32(NVRAM_ADDR, phy_addr);
3428 
3429 		nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
3430 			NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
3431 
3432 		if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3433 			break;
3434 
3435 		/* Issue another write enable to start the write. */
3436 		nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3437 
3438 		if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3439 			break;
3440 
3441 		for (j = 0; j < pagesize; j += 4) {
3442 			__be32 data;
3443 
3444 			data = *((__be32 *) (tmp + j));
3445 
3446 			tw32(NVRAM_WRDATA, be32_to_cpu(data));
3447 
3448 			tw32(NVRAM_ADDR, phy_addr + j);
3449 
3450 			nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
3451 				NVRAM_CMD_WR;
3452 
3453 			if (j == 0)
3454 				nvram_cmd |= NVRAM_CMD_FIRST;
3455 			else if (j == (pagesize - 4))
3456 				nvram_cmd |= NVRAM_CMD_LAST;
3457 
3458 			ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3459 			if (ret)
3460 				break;
3461 		}
3462 		if (ret)
3463 			break;
3464 	}
3465 
3466 	nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3467 	tg3_nvram_exec_cmd(tp, nvram_cmd);
3468 
3469 	kfree(tmp);
3470 
3471 	return ret;
3472 }
3473 
3474 /* offset and length are dword aligned */
3475 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
3476 		u8 *buf)
3477 {
3478 	int i, ret = 0;
3479 
3480 	for (i = 0; i < len; i += 4, offset += 4) {
3481 		u32 page_off, phy_addr, nvram_cmd;
3482 		__be32 data;
3483 
3484 		memcpy(&data, buf + i, 4);
3485 		tw32(NVRAM_WRDATA, be32_to_cpu(data));
3486 
3487 		page_off = offset % tp->nvram_pagesize;
3488 
3489 		phy_addr = tg3_nvram_phys_addr(tp, offset);
3490 
3491 		nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
3492 
3493 		if (page_off == 0 || i == 0)
3494 			nvram_cmd |= NVRAM_CMD_FIRST;
3495 		if (page_off == (tp->nvram_pagesize - 4))
3496 			nvram_cmd |= NVRAM_CMD_LAST;
3497 
3498 		if (i == (len - 4))
3499 			nvram_cmd |= NVRAM_CMD_LAST;
3500 
3501 		if ((nvram_cmd & NVRAM_CMD_FIRST) ||
3502 		    !tg3_flag(tp, FLASH) ||
3503 		    !tg3_flag(tp, 57765_PLUS))
3504 			tw32(NVRAM_ADDR, phy_addr);
3505 
3506 		if (tg3_asic_rev(tp) != ASIC_REV_5752 &&
3507 		    !tg3_flag(tp, 5755_PLUS) &&
3508 		    (tp->nvram_jedecnum == JEDEC_ST) &&
3509 		    (nvram_cmd & NVRAM_CMD_FIRST)) {
3510 			u32 cmd;
3511 
3512 			cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3513 			ret = tg3_nvram_exec_cmd(tp, cmd);
3514 			if (ret)
3515 				break;
3516 		}
3517 		if (!tg3_flag(tp, FLASH)) {
3518 			/* We always do complete word writes to eeprom. */
3519 			nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
3520 		}
3521 
3522 		ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3523 		if (ret)
3524 			break;
3525 	}
3526 	return ret;
3527 }
3528 
3529 /* offset and length are dword aligned */
3530 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
3531 {
3532 	int ret;
3533 
3534 	if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3535 		tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
3536 		       ~GRC_LCLCTRL_GPIO_OUTPUT1);
3537 		udelay(40);
3538 	}
3539 
3540 	if (!tg3_flag(tp, NVRAM)) {
3541 		ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
3542 	} else {
3543 		u32 grc_mode;
3544 
3545 		ret = tg3_nvram_lock(tp);
3546 		if (ret)
3547 			return ret;
3548 
3549 		tg3_enable_nvram_access(tp);
3550 		if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM))
3551 			tw32(NVRAM_WRITE1, 0x406);
3552 
3553 		grc_mode = tr32(GRC_MODE);
3554 		tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
3555 
3556 		if (tg3_flag(tp, NVRAM_BUFFERED) || !tg3_flag(tp, FLASH)) {
3557 			ret = tg3_nvram_write_block_buffered(tp, offset, len,
3558 				buf);
3559 		} else {
3560 			ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
3561 				buf);
3562 		}
3563 
3564 		grc_mode = tr32(GRC_MODE);
3565 		tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
3566 
3567 		tg3_disable_nvram_access(tp);
3568 		tg3_nvram_unlock(tp);
3569 	}
3570 
3571 	if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3572 		tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
3573 		udelay(40);
3574 	}
3575 
3576 	return ret;
3577 }
3578 
3579 #define RX_CPU_SCRATCH_BASE	0x30000
3580 #define RX_CPU_SCRATCH_SIZE	0x04000
3581 #define TX_CPU_SCRATCH_BASE	0x34000
3582 #define TX_CPU_SCRATCH_SIZE	0x04000
3583 
3584 /* tp->lock is held. */
3585 static int tg3_pause_cpu(struct tg3 *tp, u32 cpu_base)
3586 {
3587 	int i;
3588 	const int iters = 10000;
3589 
3590 	for (i = 0; i < iters; i++) {
3591 		tw32(cpu_base + CPU_STATE, 0xffffffff);
3592 		tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
3593 		if (tr32(cpu_base + CPU_MODE) & CPU_MODE_HALT)
3594 			break;
3595 		if (pci_channel_offline(tp->pdev))
3596 			return -EBUSY;
3597 	}
3598 
3599 	return (i == iters) ? -EBUSY : 0;
3600 }
3601 
3602 /* tp->lock is held. */
3603 static int tg3_rxcpu_pause(struct tg3 *tp)
3604 {
3605 	int rc = tg3_pause_cpu(tp, RX_CPU_BASE);
3606 
3607 	tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3608 	tw32_f(RX_CPU_BASE + CPU_MODE,  CPU_MODE_HALT);
3609 	udelay(10);
3610 
3611 	return rc;
3612 }
3613 
3614 /* tp->lock is held. */
3615 static int tg3_txcpu_pause(struct tg3 *tp)
3616 {
3617 	return tg3_pause_cpu(tp, TX_CPU_BASE);
3618 }
3619 
3620 /* tp->lock is held. */
3621 static void tg3_resume_cpu(struct tg3 *tp, u32 cpu_base)
3622 {
3623 	tw32(cpu_base + CPU_STATE, 0xffffffff);
3624 	tw32_f(cpu_base + CPU_MODE,  0x00000000);
3625 }
3626 
3627 /* tp->lock is held. */
3628 static void tg3_rxcpu_resume(struct tg3 *tp)
3629 {
3630 	tg3_resume_cpu(tp, RX_CPU_BASE);
3631 }
3632 
3633 /* tp->lock is held. */
3634 static int tg3_halt_cpu(struct tg3 *tp, u32 cpu_base)
3635 {
3636 	int rc;
3637 
3638 	BUG_ON(cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS));
3639 
3640 	if (tg3_asic_rev(tp) == ASIC_REV_5906) {
3641 		u32 val = tr32(GRC_VCPU_EXT_CTRL);
3642 
3643 		tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
3644 		return 0;
3645 	}
3646 	if (cpu_base == RX_CPU_BASE) {
3647 		rc = tg3_rxcpu_pause(tp);
3648 	} else {
3649 		/*
3650 		 * There is only an Rx CPU for the 5750 derivative in the
3651 		 * BCM4785.
3652 		 */
3653 		if (tg3_flag(tp, IS_SSB_CORE))
3654 			return 0;
3655 
3656 		rc = tg3_txcpu_pause(tp);
3657 	}
3658 
3659 	if (rc) {
3660 		netdev_err(tp->dev, "%s timed out, %s CPU\n",
3661 			   __func__, cpu_base == RX_CPU_BASE ? "RX" : "TX");
3662 		return -ENODEV;
3663 	}
3664 
3665 	/* Clear firmware's nvram arbitration. */
3666 	if (tg3_flag(tp, NVRAM))
3667 		tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
3668 	return 0;
3669 }
3670 
3671 static int tg3_fw_data_len(struct tg3 *tp,
3672 			   const struct tg3_firmware_hdr *fw_hdr)
3673 {
3674 	int fw_len;
3675 
3676 	/* Non fragmented firmware have one firmware header followed by a
3677 	 * contiguous chunk of data to be written. The length field in that
3678 	 * header is not the length of data to be written but the complete
3679 	 * length of the bss. The data length is determined based on
3680 	 * tp->fw->size minus headers.
3681 	 *
3682 	 * Fragmented firmware have a main header followed by multiple
3683 	 * fragments. Each fragment is identical to non fragmented firmware
3684 	 * with a firmware header followed by a contiguous chunk of data. In
3685 	 * the main header, the length field is unused and set to 0xffffffff.
3686 	 * In each fragment header the length is the entire size of that
3687 	 * fragment i.e. fragment data + header length. Data length is
3688 	 * therefore length field in the header minus TG3_FW_HDR_LEN.
3689 	 */
3690 	if (tp->fw_len == 0xffffffff)
3691 		fw_len = be32_to_cpu(fw_hdr->len);
3692 	else
3693 		fw_len = tp->fw->size;
3694 
3695 	return (fw_len - TG3_FW_HDR_LEN) / sizeof(u32);
3696 }
3697 
3698 /* tp->lock is held. */
3699 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base,
3700 				 u32 cpu_scratch_base, int cpu_scratch_size,
3701 				 const struct tg3_firmware_hdr *fw_hdr)
3702 {
3703 	int err, i;
3704 	void (*write_op)(struct tg3 *, u32, u32);
3705 	int total_len = tp->fw->size;
3706 
3707 	if (cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)) {
3708 		netdev_err(tp->dev,
3709 			   "%s: Trying to load TX cpu firmware which is 5705\n",
3710 			   __func__);
3711 		return -EINVAL;
3712 	}
3713 
3714 	if (tg3_flag(tp, 5705_PLUS) && tg3_asic_rev(tp) != ASIC_REV_57766)
3715 		write_op = tg3_write_mem;
3716 	else
3717 		write_op = tg3_write_indirect_reg32;
3718 
3719 	if (tg3_asic_rev(tp) != ASIC_REV_57766) {
3720 		/* It is possible that bootcode is still loading at this point.
3721 		 * Get the nvram lock first before halting the cpu.
3722 		 */
3723 		int lock_err = tg3_nvram_lock(tp);
3724 		err = tg3_halt_cpu(tp, cpu_base);
3725 		if (!lock_err)
3726 			tg3_nvram_unlock(tp);
3727 		if (err)
3728 			goto out;
3729 
3730 		for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
3731 			write_op(tp, cpu_scratch_base + i, 0);
3732 		tw32(cpu_base + CPU_STATE, 0xffffffff);
3733 		tw32(cpu_base + CPU_MODE,
3734 		     tr32(cpu_base + CPU_MODE) | CPU_MODE_HALT);
3735 	} else {
3736 		/* Subtract additional main header for fragmented firmware and
3737 		 * advance to the first fragment
3738 		 */
3739 		total_len -= TG3_FW_HDR_LEN;
3740 		fw_hdr++;
3741 	}
3742 
3743 	do {
3744 		u32 *fw_data = (u32 *)(fw_hdr + 1);
3745 		for (i = 0; i < tg3_fw_data_len(tp, fw_hdr); i++)
3746 			write_op(tp, cpu_scratch_base +
3747 				     (be32_to_cpu(fw_hdr->base_addr) & 0xffff) +
3748 				     (i * sizeof(u32)),
3749 				 be32_to_cpu(fw_data[i]));
3750 
3751 		total_len -= be32_to_cpu(fw_hdr->len);
3752 
3753 		/* Advance to next fragment */
3754 		fw_hdr = (struct tg3_firmware_hdr *)
3755 			 ((void *)fw_hdr + be32_to_cpu(fw_hdr->len));
3756 	} while (total_len > 0);
3757 
3758 	err = 0;
3759 
3760 out:
3761 	return err;
3762 }
3763 
3764 /* tp->lock is held. */
3765 static int tg3_pause_cpu_and_set_pc(struct tg3 *tp, u32 cpu_base, u32 pc)
3766 {
3767 	int i;
3768 	const int iters = 5;
3769 
3770 	tw32(cpu_base + CPU_STATE, 0xffffffff);
3771 	tw32_f(cpu_base + CPU_PC, pc);
3772 
3773 	for (i = 0; i < iters; i++) {
3774 		if (tr32(cpu_base + CPU_PC) == pc)
3775 			break;
3776 		tw32(cpu_base + CPU_STATE, 0xffffffff);
3777 		tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
3778 		tw32_f(cpu_base + CPU_PC, pc);
3779 		udelay(1000);
3780 	}
3781 
3782 	return (i == iters) ? -EBUSY : 0;
3783 }
3784 
3785 /* tp->lock is held. */
3786 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
3787 {
3788 	const struct tg3_firmware_hdr *fw_hdr;
3789 	int err;
3790 
3791 	fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3792 
3793 	/* Firmware blob starts with version numbers, followed by
3794 	   start address and length. We are setting complete length.
3795 	   length = end_address_of_bss - start_address_of_text.
3796 	   Remainder is the blob to be loaded contiguously
3797 	   from start address. */
3798 
3799 	err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
3800 				    RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
3801 				    fw_hdr);
3802 	if (err)
3803 		return err;
3804 
3805 	err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
3806 				    TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
3807 				    fw_hdr);
3808 	if (err)
3809 		return err;
3810 
3811 	/* Now startup only the RX cpu. */
3812 	err = tg3_pause_cpu_and_set_pc(tp, RX_CPU_BASE,
3813 				       be32_to_cpu(fw_hdr->base_addr));
3814 	if (err) {
3815 		netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x "
3816 			   "should be %08x\n", __func__,
3817 			   tr32(RX_CPU_BASE + CPU_PC),
3818 				be32_to_cpu(fw_hdr->base_addr));
3819 		return -ENODEV;
3820 	}
3821 
3822 	tg3_rxcpu_resume(tp);
3823 
3824 	return 0;
3825 }
3826 
3827 static int tg3_validate_rxcpu_state(struct tg3 *tp)
3828 {
3829 	const int iters = 1000;
3830 	int i;
3831 	u32 val;
3832 
3833 	/* Wait for boot code to complete initialization and enter service
3834 	 * loop. It is then safe to download service patches
3835 	 */
3836 	for (i = 0; i < iters; i++) {
3837 		if (tr32(RX_CPU_HWBKPT) == TG3_SBROM_IN_SERVICE_LOOP)
3838 			break;
3839 
3840 		udelay(10);
3841 	}
3842 
3843 	if (i == iters) {
3844 		netdev_err(tp->dev, "Boot code not ready for service patches\n");
3845 		return -EBUSY;
3846 	}
3847 
3848 	val = tg3_read_indirect_reg32(tp, TG3_57766_FW_HANDSHAKE);
3849 	if (val & 0xff) {
3850 		netdev_warn(tp->dev,
3851 			    "Other patches exist. Not downloading EEE patch\n");
3852 		return -EEXIST;
3853 	}
3854 
3855 	return 0;
3856 }
3857 
3858 /* tp->lock is held. */
3859 static void tg3_load_57766_firmware(struct tg3 *tp)
3860 {
3861 	struct tg3_firmware_hdr *fw_hdr;
3862 
3863 	if (!tg3_flag(tp, NO_NVRAM))
3864 		return;
3865 
3866 	if (tg3_validate_rxcpu_state(tp))
3867 		return;
3868 
3869 	if (!tp->fw)
3870 		return;
3871 
3872 	/* This firmware blob has a different format than older firmware
3873 	 * releases as given below. The main difference is we have fragmented
3874 	 * data to be written to non-contiguous locations.
3875 	 *
3876 	 * In the beginning we have a firmware header identical to other
3877 	 * firmware which consists of version, base addr and length. The length
3878 	 * here is unused and set to 0xffffffff.
3879 	 *
3880 	 * This is followed by a series of firmware fragments which are
3881 	 * individually identical to previous firmware. i.e. they have the
3882 	 * firmware header and followed by data for that fragment. The version
3883 	 * field of the individual fragment header is unused.
3884 	 */
3885 
3886 	fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3887 	if (be32_to_cpu(fw_hdr->base_addr) != TG3_57766_FW_BASE_ADDR)
3888 		return;
3889 
3890 	if (tg3_rxcpu_pause(tp))
3891 		return;
3892 
3893 	/* tg3_load_firmware_cpu() will always succeed for the 57766 */
3894 	tg3_load_firmware_cpu(tp, 0, TG3_57766_FW_BASE_ADDR, 0, fw_hdr);
3895 
3896 	tg3_rxcpu_resume(tp);
3897 }
3898 
3899 /* tp->lock is held. */
3900 static int tg3_load_tso_firmware(struct tg3 *tp)
3901 {
3902 	const struct tg3_firmware_hdr *fw_hdr;
3903 	unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
3904 	int err;
3905 
3906 	if (!tg3_flag(tp, FW_TSO))
3907 		return 0;
3908 
3909 	fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3910 
3911 	/* Firmware blob starts with version numbers, followed by
3912 	   start address and length. We are setting complete length.
3913 	   length = end_address_of_bss - start_address_of_text.
3914 	   Remainder is the blob to be loaded contiguously
3915 	   from start address. */
3916 
3917 	cpu_scratch_size = tp->fw_len;
3918 
3919 	if (tg3_asic_rev(tp) == ASIC_REV_5705) {
3920 		cpu_base = RX_CPU_BASE;
3921 		cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
3922 	} else {
3923 		cpu_base = TX_CPU_BASE;
3924 		cpu_scratch_base = TX_CPU_SCRATCH_BASE;
3925 		cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
3926 	}
3927 
3928 	err = tg3_load_firmware_cpu(tp, cpu_base,
3929 				    cpu_scratch_base, cpu_scratch_size,
3930 				    fw_hdr);
3931 	if (err)
3932 		return err;
3933 
3934 	/* Now startup the cpu. */
3935 	err = tg3_pause_cpu_and_set_pc(tp, cpu_base,
3936 				       be32_to_cpu(fw_hdr->base_addr));
3937 	if (err) {
3938 		netdev_err(tp->dev,
3939 			   "%s fails to set CPU PC, is %08x should be %08x\n",
3940 			   __func__, tr32(cpu_base + CPU_PC),
3941 			   be32_to_cpu(fw_hdr->base_addr));
3942 		return -ENODEV;
3943 	}
3944 
3945 	tg3_resume_cpu(tp, cpu_base);
3946 	return 0;
3947 }
3948 
3949 /* tp->lock is held. */
3950 static void __tg3_set_one_mac_addr(struct tg3 *tp, u8 *mac_addr, int index)
3951 {
3952 	u32 addr_high, addr_low;
3953 
3954 	addr_high = ((mac_addr[0] << 8) | mac_addr[1]);
3955 	addr_low = ((mac_addr[2] << 24) | (mac_addr[3] << 16) |
3956 		    (mac_addr[4] <<  8) | mac_addr[5]);
3957 
3958 	if (index < 4) {
3959 		tw32(MAC_ADDR_0_HIGH + (index * 8), addr_high);
3960 		tw32(MAC_ADDR_0_LOW + (index * 8), addr_low);
3961 	} else {
3962 		index -= 4;
3963 		tw32(MAC_EXTADDR_0_HIGH + (index * 8), addr_high);
3964 		tw32(MAC_EXTADDR_0_LOW + (index * 8), addr_low);
3965 	}
3966 }
3967 
3968 /* tp->lock is held. */
3969 static void __tg3_set_mac_addr(struct tg3 *tp, bool skip_mac_1)
3970 {
3971 	u32 addr_high;
3972 	int i;
3973 
3974 	for (i = 0; i < 4; i++) {
3975 		if (i == 1 && skip_mac_1)
3976 			continue;
3977 		__tg3_set_one_mac_addr(tp, tp->dev->dev_addr, i);
3978 	}
3979 
3980 	if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
3981 	    tg3_asic_rev(tp) == ASIC_REV_5704) {
3982 		for (i = 4; i < 16; i++)
3983 			__tg3_set_one_mac_addr(tp, tp->dev->dev_addr, i);
3984 	}
3985 
3986 	addr_high = (tp->dev->dev_addr[0] +
3987 		     tp->dev->dev_addr[1] +
3988 		     tp->dev->dev_addr[2] +
3989 		     tp->dev->dev_addr[3] +
3990 		     tp->dev->dev_addr[4] +
3991 		     tp->dev->dev_addr[5]) &
3992 		TX_BACKOFF_SEED_MASK;
3993 	tw32(MAC_TX_BACKOFF_SEED, addr_high);
3994 }
3995 
3996 static void tg3_enable_register_access(struct tg3 *tp)
3997 {
3998 	/*
3999 	 * Make sure register accesses (indirect or otherwise) will function
4000 	 * correctly.
4001 	 */
4002 	pci_write_config_dword(tp->pdev,
4003 			       TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl);
4004 }
4005 
4006 static int tg3_power_up(struct tg3 *tp)
4007 {
4008 	int err;
4009 
4010 	tg3_enable_register_access(tp);
4011 
4012 	err = pci_set_power_state(tp->pdev, PCI_D0);
4013 	if (!err) {
4014 		/* Switch out of Vaux if it is a NIC */
4015 		tg3_pwrsrc_switch_to_vmain(tp);
4016 	} else {
4017 		netdev_err(tp->dev, "Transition to D0 failed\n");
4018 	}
4019 
4020 	return err;
4021 }
4022 
4023 static int tg3_setup_phy(struct tg3 *, bool);
4024 
4025 static int tg3_power_down_prepare(struct tg3 *tp)
4026 {
4027 	u32 misc_host_ctrl;
4028 	bool device_should_wake, do_low_power;
4029 
4030 	tg3_enable_register_access(tp);
4031 
4032 	/* Restore the CLKREQ setting. */
4033 	if (tg3_flag(tp, CLKREQ_BUG))
4034 		pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
4035 					 PCI_EXP_LNKCTL_CLKREQ_EN);
4036 
4037 	misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
4038 	tw32(TG3PCI_MISC_HOST_CTRL,
4039 	     misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
4040 
4041 	device_should_wake = device_may_wakeup(&tp->pdev->dev) &&
4042 			     tg3_flag(tp, WOL_ENABLE);
4043 
4044 	if (tg3_flag(tp, USE_PHYLIB)) {
4045 		do_low_power = false;
4046 		if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) &&
4047 		    !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4048 			struct phy_device *phydev;
4049 			u32 phyid, advertising;
4050 
4051 			phydev = tp->mdio_bus->phy_map[tp->phy_addr];
4052 
4053 			tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
4054 
4055 			tp->link_config.speed = phydev->speed;
4056 			tp->link_config.duplex = phydev->duplex;
4057 			tp->link_config.autoneg = phydev->autoneg;
4058 			tp->link_config.advertising = phydev->advertising;
4059 
4060 			advertising = ADVERTISED_TP |
4061 				      ADVERTISED_Pause |
4062 				      ADVERTISED_Autoneg |
4063 				      ADVERTISED_10baseT_Half;
4064 
4065 			if (tg3_flag(tp, ENABLE_ASF) || device_should_wake) {
4066 				if (tg3_flag(tp, WOL_SPEED_100MB))
4067 					advertising |=
4068 						ADVERTISED_100baseT_Half |
4069 						ADVERTISED_100baseT_Full |
4070 						ADVERTISED_10baseT_Full;
4071 				else
4072 					advertising |= ADVERTISED_10baseT_Full;
4073 			}
4074 
4075 			phydev->advertising = advertising;
4076 
4077 			phy_start_aneg(phydev);
4078 
4079 			phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
4080 			if (phyid != PHY_ID_BCMAC131) {
4081 				phyid &= PHY_BCM_OUI_MASK;
4082 				if (phyid == PHY_BCM_OUI_1 ||
4083 				    phyid == PHY_BCM_OUI_2 ||
4084 				    phyid == PHY_BCM_OUI_3)
4085 					do_low_power = true;
4086 			}
4087 		}
4088 	} else {
4089 		do_low_power = true;
4090 
4091 		if (!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER))
4092 			tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
4093 
4094 		if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
4095 			tg3_setup_phy(tp, false);
4096 	}
4097 
4098 	if (tg3_asic_rev(tp) == ASIC_REV_5906) {
4099 		u32 val;
4100 
4101 		val = tr32(GRC_VCPU_EXT_CTRL);
4102 		tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
4103 	} else if (!tg3_flag(tp, ENABLE_ASF)) {
4104 		int i;
4105 		u32 val;
4106 
4107 		for (i = 0; i < 200; i++) {
4108 			tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
4109 			if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
4110 				break;
4111 			msleep(1);
4112 		}
4113 	}
4114 	if (tg3_flag(tp, WOL_CAP))
4115 		tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
4116 						     WOL_DRV_STATE_SHUTDOWN |
4117 						     WOL_DRV_WOL |
4118 						     WOL_SET_MAGIC_PKT);
4119 
4120 	if (device_should_wake) {
4121 		u32 mac_mode;
4122 
4123 		if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
4124 			if (do_low_power &&
4125 			    !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
4126 				tg3_phy_auxctl_write(tp,
4127 					       MII_TG3_AUXCTL_SHDWSEL_PWRCTL,
4128 					       MII_TG3_AUXCTL_PCTL_WOL_EN |
4129 					       MII_TG3_AUXCTL_PCTL_100TX_LPWR |
4130 					       MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC);
4131 				udelay(40);
4132 			}
4133 
4134 			if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4135 				mac_mode = MAC_MODE_PORT_MODE_GMII;
4136 			else if (tp->phy_flags &
4137 				 TG3_PHYFLG_KEEP_LINK_ON_PWRDN) {
4138 				if (tp->link_config.active_speed == SPEED_1000)
4139 					mac_mode = MAC_MODE_PORT_MODE_GMII;
4140 				else
4141 					mac_mode = MAC_MODE_PORT_MODE_MII;
4142 			} else
4143 				mac_mode = MAC_MODE_PORT_MODE_MII;
4144 
4145 			mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
4146 			if (tg3_asic_rev(tp) == ASIC_REV_5700) {
4147 				u32 speed = tg3_flag(tp, WOL_SPEED_100MB) ?
4148 					     SPEED_100 : SPEED_10;
4149 				if (tg3_5700_link_polarity(tp, speed))
4150 					mac_mode |= MAC_MODE_LINK_POLARITY;
4151 				else
4152 					mac_mode &= ~MAC_MODE_LINK_POLARITY;
4153 			}
4154 		} else {
4155 			mac_mode = MAC_MODE_PORT_MODE_TBI;
4156 		}
4157 
4158 		if (!tg3_flag(tp, 5750_PLUS))
4159 			tw32(MAC_LED_CTRL, tp->led_ctrl);
4160 
4161 		mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
4162 		if ((tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) &&
4163 		    (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)))
4164 			mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
4165 
4166 		if (tg3_flag(tp, ENABLE_APE))
4167 			mac_mode |= MAC_MODE_APE_TX_EN |
4168 				    MAC_MODE_APE_RX_EN |
4169 				    MAC_MODE_TDE_ENABLE;
4170 
4171 		tw32_f(MAC_MODE, mac_mode);
4172 		udelay(100);
4173 
4174 		tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
4175 		udelay(10);
4176 	}
4177 
4178 	if (!tg3_flag(tp, WOL_SPEED_100MB) &&
4179 	    (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4180 	     tg3_asic_rev(tp) == ASIC_REV_5701)) {
4181 		u32 base_val;
4182 
4183 		base_val = tp->pci_clock_ctrl;
4184 		base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
4185 			     CLOCK_CTRL_TXCLK_DISABLE);
4186 
4187 		tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
4188 			    CLOCK_CTRL_PWRDOWN_PLL133, 40);
4189 	} else if (tg3_flag(tp, 5780_CLASS) ||
4190 		   tg3_flag(tp, CPMU_PRESENT) ||
4191 		   tg3_asic_rev(tp) == ASIC_REV_5906) {
4192 		/* do nothing */
4193 	} else if (!(tg3_flag(tp, 5750_PLUS) && tg3_flag(tp, ENABLE_ASF))) {
4194 		u32 newbits1, newbits2;
4195 
4196 		if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4197 		    tg3_asic_rev(tp) == ASIC_REV_5701) {
4198 			newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
4199 				    CLOCK_CTRL_TXCLK_DISABLE |
4200 				    CLOCK_CTRL_ALTCLK);
4201 			newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
4202 		} else if (tg3_flag(tp, 5705_PLUS)) {
4203 			newbits1 = CLOCK_CTRL_625_CORE;
4204 			newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
4205 		} else {
4206 			newbits1 = CLOCK_CTRL_ALTCLK;
4207 			newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
4208 		}
4209 
4210 		tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
4211 			    40);
4212 
4213 		tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
4214 			    40);
4215 
4216 		if (!tg3_flag(tp, 5705_PLUS)) {
4217 			u32 newbits3;
4218 
4219 			if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4220 			    tg3_asic_rev(tp) == ASIC_REV_5701) {
4221 				newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
4222 					    CLOCK_CTRL_TXCLK_DISABLE |
4223 					    CLOCK_CTRL_44MHZ_CORE);
4224 			} else {
4225 				newbits3 = CLOCK_CTRL_44MHZ_CORE;
4226 			}
4227 
4228 			tw32_wait_f(TG3PCI_CLOCK_CTRL,
4229 				    tp->pci_clock_ctrl | newbits3, 40);
4230 		}
4231 	}
4232 
4233 	if (!(device_should_wake) && !tg3_flag(tp, ENABLE_ASF))
4234 		tg3_power_down_phy(tp, do_low_power);
4235 
4236 	tg3_frob_aux_power(tp, true);
4237 
4238 	/* Workaround for unstable PLL clock */
4239 	if ((!tg3_flag(tp, IS_SSB_CORE)) &&
4240 	    ((tg3_chip_rev(tp) == CHIPREV_5750_AX) ||
4241 	     (tg3_chip_rev(tp) == CHIPREV_5750_BX))) {
4242 		u32 val = tr32(0x7d00);
4243 
4244 		val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
4245 		tw32(0x7d00, val);
4246 		if (!tg3_flag(tp, ENABLE_ASF)) {
4247 			int err;
4248 
4249 			err = tg3_nvram_lock(tp);
4250 			tg3_halt_cpu(tp, RX_CPU_BASE);
4251 			if (!err)
4252 				tg3_nvram_unlock(tp);
4253 		}
4254 	}
4255 
4256 	tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
4257 
4258 	tg3_ape_driver_state_change(tp, RESET_KIND_SHUTDOWN);
4259 
4260 	return 0;
4261 }
4262 
4263 static void tg3_power_down(struct tg3 *tp)
4264 {
4265 	pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE));
4266 	pci_set_power_state(tp->pdev, PCI_D3hot);
4267 }
4268 
4269 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
4270 {
4271 	switch (val & MII_TG3_AUX_STAT_SPDMASK) {
4272 	case MII_TG3_AUX_STAT_10HALF:
4273 		*speed = SPEED_10;
4274 		*duplex = DUPLEX_HALF;
4275 		break;
4276 
4277 	case MII_TG3_AUX_STAT_10FULL:
4278 		*speed = SPEED_10;
4279 		*duplex = DUPLEX_FULL;
4280 		break;
4281 
4282 	case MII_TG3_AUX_STAT_100HALF:
4283 		*speed = SPEED_100;
4284 		*duplex = DUPLEX_HALF;
4285 		break;
4286 
4287 	case MII_TG3_AUX_STAT_100FULL:
4288 		*speed = SPEED_100;
4289 		*duplex = DUPLEX_FULL;
4290 		break;
4291 
4292 	case MII_TG3_AUX_STAT_1000HALF:
4293 		*speed = SPEED_1000;
4294 		*duplex = DUPLEX_HALF;
4295 		break;
4296 
4297 	case MII_TG3_AUX_STAT_1000FULL:
4298 		*speed = SPEED_1000;
4299 		*duplex = DUPLEX_FULL;
4300 		break;
4301 
4302 	default:
4303 		if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4304 			*speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
4305 				 SPEED_10;
4306 			*duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
4307 				  DUPLEX_HALF;
4308 			break;
4309 		}
4310 		*speed = SPEED_UNKNOWN;
4311 		*duplex = DUPLEX_UNKNOWN;
4312 		break;
4313 	}
4314 }
4315 
4316 static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
4317 {
4318 	int err = 0;
4319 	u32 val, new_adv;
4320 
4321 	new_adv = ADVERTISE_CSMA;
4322 	new_adv |= ethtool_adv_to_mii_adv_t(advertise) & ADVERTISE_ALL;
4323 	new_adv |= mii_advertise_flowctrl(flowctrl);
4324 
4325 	err = tg3_writephy(tp, MII_ADVERTISE, new_adv);
4326 	if (err)
4327 		goto done;
4328 
4329 	if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4330 		new_adv = ethtool_adv_to_mii_ctrl1000_t(advertise);
4331 
4332 		if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4333 		    tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0)
4334 			new_adv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4335 
4336 		err = tg3_writephy(tp, MII_CTRL1000, new_adv);
4337 		if (err)
4338 			goto done;
4339 	}
4340 
4341 	if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
4342 		goto done;
4343 
4344 	tw32(TG3_CPMU_EEE_MODE,
4345 	     tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE);
4346 
4347 	err = tg3_phy_toggle_auxctl_smdsp(tp, true);
4348 	if (!err) {
4349 		u32 err2;
4350 
4351 		val = 0;
4352 		/* Advertise 100-BaseTX EEE ability */
4353 		if (advertise & ADVERTISED_100baseT_Full)
4354 			val |= MDIO_AN_EEE_ADV_100TX;
4355 		/* Advertise 1000-BaseT EEE ability */
4356 		if (advertise & ADVERTISED_1000baseT_Full)
4357 			val |= MDIO_AN_EEE_ADV_1000T;
4358 
4359 		if (!tp->eee.eee_enabled) {
4360 			val = 0;
4361 			tp->eee.advertised = 0;
4362 		} else {
4363 			tp->eee.advertised = advertise &
4364 					     (ADVERTISED_100baseT_Full |
4365 					      ADVERTISED_1000baseT_Full);
4366 		}
4367 
4368 		err = tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val);
4369 		if (err)
4370 			val = 0;
4371 
4372 		switch (tg3_asic_rev(tp)) {
4373 		case ASIC_REV_5717:
4374 		case ASIC_REV_57765:
4375 		case ASIC_REV_57766:
4376 		case ASIC_REV_5719:
4377 			/* If we advertised any eee advertisements above... */
4378 			if (val)
4379 				val = MII_TG3_DSP_TAP26_ALNOKO |
4380 				      MII_TG3_DSP_TAP26_RMRXSTO |
4381 				      MII_TG3_DSP_TAP26_OPCSINPT;
4382 			tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
4383 			/* Fall through */
4384 		case ASIC_REV_5720:
4385 		case ASIC_REV_5762:
4386 			if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val))
4387 				tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val |
4388 						 MII_TG3_DSP_CH34TP2_HIBW01);
4389 		}
4390 
4391 		err2 = tg3_phy_toggle_auxctl_smdsp(tp, false);
4392 		if (!err)
4393 			err = err2;
4394 	}
4395 
4396 done:
4397 	return err;
4398 }
4399 
4400 static void tg3_phy_copper_begin(struct tg3 *tp)
4401 {
4402 	if (tp->link_config.autoneg == AUTONEG_ENABLE ||
4403 	    (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4404 		u32 adv, fc;
4405 
4406 		if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
4407 		    !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)) {
4408 			adv = ADVERTISED_10baseT_Half |
4409 			      ADVERTISED_10baseT_Full;
4410 			if (tg3_flag(tp, WOL_SPEED_100MB))
4411 				adv |= ADVERTISED_100baseT_Half |
4412 				       ADVERTISED_100baseT_Full;
4413 			if (tp->phy_flags & TG3_PHYFLG_1G_ON_VAUX_OK) {
4414 				if (!(tp->phy_flags &
4415 				      TG3_PHYFLG_DISABLE_1G_HD_ADV))
4416 					adv |= ADVERTISED_1000baseT_Half;
4417 				adv |= ADVERTISED_1000baseT_Full;
4418 			}
4419 
4420 			fc = FLOW_CTRL_TX | FLOW_CTRL_RX;
4421 		} else {
4422 			adv = tp->link_config.advertising;
4423 			if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
4424 				adv &= ~(ADVERTISED_1000baseT_Half |
4425 					 ADVERTISED_1000baseT_Full);
4426 
4427 			fc = tp->link_config.flowctrl;
4428 		}
4429 
4430 		tg3_phy_autoneg_cfg(tp, adv, fc);
4431 
4432 		if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
4433 		    (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)) {
4434 			/* Normally during power down we want to autonegotiate
4435 			 * the lowest possible speed for WOL. However, to avoid
4436 			 * link flap, we leave it untouched.
4437 			 */
4438 			return;
4439 		}
4440 
4441 		tg3_writephy(tp, MII_BMCR,
4442 			     BMCR_ANENABLE | BMCR_ANRESTART);
4443 	} else {
4444 		int i;
4445 		u32 bmcr, orig_bmcr;
4446 
4447 		tp->link_config.active_speed = tp->link_config.speed;
4448 		tp->link_config.active_duplex = tp->link_config.duplex;
4449 
4450 		if (tg3_asic_rev(tp) == ASIC_REV_5714) {
4451 			/* With autoneg disabled, 5715 only links up when the
4452 			 * advertisement register has the configured speed
4453 			 * enabled.
4454 			 */
4455 			tg3_writephy(tp, MII_ADVERTISE, ADVERTISE_ALL);
4456 		}
4457 
4458 		bmcr = 0;
4459 		switch (tp->link_config.speed) {
4460 		default:
4461 		case SPEED_10:
4462 			break;
4463 
4464 		case SPEED_100:
4465 			bmcr |= BMCR_SPEED100;
4466 			break;
4467 
4468 		case SPEED_1000:
4469 			bmcr |= BMCR_SPEED1000;
4470 			break;
4471 		}
4472 
4473 		if (tp->link_config.duplex == DUPLEX_FULL)
4474 			bmcr |= BMCR_FULLDPLX;
4475 
4476 		if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
4477 		    (bmcr != orig_bmcr)) {
4478 			tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
4479 			for (i = 0; i < 1500; i++) {
4480 				u32 tmp;
4481 
4482 				udelay(10);
4483 				if (tg3_readphy(tp, MII_BMSR, &tmp) ||
4484 				    tg3_readphy(tp, MII_BMSR, &tmp))
4485 					continue;
4486 				if (!(tmp & BMSR_LSTATUS)) {
4487 					udelay(40);
4488 					break;
4489 				}
4490 			}
4491 			tg3_writephy(tp, MII_BMCR, bmcr);
4492 			udelay(40);
4493 		}
4494 	}
4495 }
4496 
4497 static int tg3_phy_pull_config(struct tg3 *tp)
4498 {
4499 	int err;
4500 	u32 val;
4501 
4502 	err = tg3_readphy(tp, MII_BMCR, &val);
4503 	if (err)
4504 		goto done;
4505 
4506 	if (!(val & BMCR_ANENABLE)) {
4507 		tp->link_config.autoneg = AUTONEG_DISABLE;
4508 		tp->link_config.advertising = 0;
4509 		tg3_flag_clear(tp, PAUSE_AUTONEG);
4510 
4511 		err = -EIO;
4512 
4513 		switch (val & (BMCR_SPEED1000 | BMCR_SPEED100)) {
4514 		case 0:
4515 			if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
4516 				goto done;
4517 
4518 			tp->link_config.speed = SPEED_10;
4519 			break;
4520 		case BMCR_SPEED100:
4521 			if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
4522 				goto done;
4523 
4524 			tp->link_config.speed = SPEED_100;
4525 			break;
4526 		case BMCR_SPEED1000:
4527 			if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4528 				tp->link_config.speed = SPEED_1000;
4529 				break;
4530 			}
4531 			/* Fall through */
4532 		default:
4533 			goto done;
4534 		}
4535 
4536 		if (val & BMCR_FULLDPLX)
4537 			tp->link_config.duplex = DUPLEX_FULL;
4538 		else
4539 			tp->link_config.duplex = DUPLEX_HALF;
4540 
4541 		tp->link_config.flowctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
4542 
4543 		err = 0;
4544 		goto done;
4545 	}
4546 
4547 	tp->link_config.autoneg = AUTONEG_ENABLE;
4548 	tp->link_config.advertising = ADVERTISED_Autoneg;
4549 	tg3_flag_set(tp, PAUSE_AUTONEG);
4550 
4551 	if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
4552 		u32 adv;
4553 
4554 		err = tg3_readphy(tp, MII_ADVERTISE, &val);
4555 		if (err)
4556 			goto done;
4557 
4558 		adv = mii_adv_to_ethtool_adv_t(val & ADVERTISE_ALL);
4559 		tp->link_config.advertising |= adv | ADVERTISED_TP;
4560 
4561 		tp->link_config.flowctrl = tg3_decode_flowctrl_1000T(val);
4562 	} else {
4563 		tp->link_config.advertising |= ADVERTISED_FIBRE;
4564 	}
4565 
4566 	if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4567 		u32 adv;
4568 
4569 		if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
4570 			err = tg3_readphy(tp, MII_CTRL1000, &val);
4571 			if (err)
4572 				goto done;
4573 
4574 			adv = mii_ctrl1000_to_ethtool_adv_t(val);
4575 		} else {
4576 			err = tg3_readphy(tp, MII_ADVERTISE, &val);
4577 			if (err)
4578 				goto done;
4579 
4580 			adv = tg3_decode_flowctrl_1000X(val);
4581 			tp->link_config.flowctrl = adv;
4582 
4583 			val &= (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL);
4584 			adv = mii_adv_to_ethtool_adv_x(val);
4585 		}
4586 
4587 		tp->link_config.advertising |= adv;
4588 	}
4589 
4590 done:
4591 	return err;
4592 }
4593 
4594 static int tg3_init_5401phy_dsp(struct tg3 *tp)
4595 {
4596 	int err;
4597 
4598 	/* Turn off tap power management. */
4599 	/* Set Extended packet length bit */
4600 	err = tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
4601 
4602 	err |= tg3_phydsp_write(tp, 0x0012, 0x1804);
4603 	err |= tg3_phydsp_write(tp, 0x0013, 0x1204);
4604 	err |= tg3_phydsp_write(tp, 0x8006, 0x0132);
4605 	err |= tg3_phydsp_write(tp, 0x8006, 0x0232);
4606 	err |= tg3_phydsp_write(tp, 0x201f, 0x0a20);
4607 
4608 	udelay(40);
4609 
4610 	return err;
4611 }
4612 
4613 static bool tg3_phy_eee_config_ok(struct tg3 *tp)
4614 {
4615 	struct ethtool_eee eee;
4616 
4617 	if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
4618 		return true;
4619 
4620 	tg3_eee_pull_config(tp, &eee);
4621 
4622 	if (tp->eee.eee_enabled) {
4623 		if (tp->eee.advertised != eee.advertised ||
4624 		    tp->eee.tx_lpi_timer != eee.tx_lpi_timer ||
4625 		    tp->eee.tx_lpi_enabled != eee.tx_lpi_enabled)
4626 			return false;
4627 	} else {
4628 		/* EEE is disabled but we're advertising */
4629 		if (eee.advertised)
4630 			return false;
4631 	}
4632 
4633 	return true;
4634 }
4635 
4636 static bool tg3_phy_copper_an_config_ok(struct tg3 *tp, u32 *lcladv)
4637 {
4638 	u32 advmsk, tgtadv, advertising;
4639 
4640 	advertising = tp->link_config.advertising;
4641 	tgtadv = ethtool_adv_to_mii_adv_t(advertising) & ADVERTISE_ALL;
4642 
4643 	advmsk = ADVERTISE_ALL;
4644 	if (tp->link_config.active_duplex == DUPLEX_FULL) {
4645 		tgtadv |= mii_advertise_flowctrl(tp->link_config.flowctrl);
4646 		advmsk |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
4647 	}
4648 
4649 	if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
4650 		return false;
4651 
4652 	if ((*lcladv & advmsk) != tgtadv)
4653 		return false;
4654 
4655 	if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4656 		u32 tg3_ctrl;
4657 
4658 		tgtadv = ethtool_adv_to_mii_ctrl1000_t(advertising);
4659 
4660 		if (tg3_readphy(tp, MII_CTRL1000, &tg3_ctrl))
4661 			return false;
4662 
4663 		if (tgtadv &&
4664 		    (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4665 		     tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0)) {
4666 			tgtadv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4667 			tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL |
4668 				     CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
4669 		} else {
4670 			tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL);
4671 		}
4672 
4673 		if (tg3_ctrl != tgtadv)
4674 			return false;
4675 	}
4676 
4677 	return true;
4678 }
4679 
4680 static bool tg3_phy_copper_fetch_rmtadv(struct tg3 *tp, u32 *rmtadv)
4681 {
4682 	u32 lpeth = 0;
4683 
4684 	if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4685 		u32 val;
4686 
4687 		if (tg3_readphy(tp, MII_STAT1000, &val))
4688 			return false;
4689 
4690 		lpeth = mii_stat1000_to_ethtool_lpa_t(val);
4691 	}
4692 
4693 	if (tg3_readphy(tp, MII_LPA, rmtadv))
4694 		return false;
4695 
4696 	lpeth |= mii_lpa_to_ethtool_lpa_t(*rmtadv);
4697 	tp->link_config.rmt_adv = lpeth;
4698 
4699 	return true;
4700 }
4701 
4702 static bool tg3_test_and_report_link_chg(struct tg3 *tp, bool curr_link_up)
4703 {
4704 	if (curr_link_up != tp->link_up) {
4705 		if (curr_link_up) {
4706 			netif_carrier_on(tp->dev);
4707 		} else {
4708 			netif_carrier_off(tp->dev);
4709 			if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4710 				tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4711 		}
4712 
4713 		tg3_link_report(tp);
4714 		return true;
4715 	}
4716 
4717 	return false;
4718 }
4719 
4720 static void tg3_clear_mac_status(struct tg3 *tp)
4721 {
4722 	tw32(MAC_EVENT, 0);
4723 
4724 	tw32_f(MAC_STATUS,
4725 	       MAC_STATUS_SYNC_CHANGED |
4726 	       MAC_STATUS_CFG_CHANGED |
4727 	       MAC_STATUS_MI_COMPLETION |
4728 	       MAC_STATUS_LNKSTATE_CHANGED);
4729 	udelay(40);
4730 }
4731 
4732 static void tg3_setup_eee(struct tg3 *tp)
4733 {
4734 	u32 val;
4735 
4736 	val = TG3_CPMU_EEE_LNKIDL_PCIE_NL0 |
4737 	      TG3_CPMU_EEE_LNKIDL_UART_IDL;
4738 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0)
4739 		val |= TG3_CPMU_EEE_LNKIDL_APE_TX_MT;
4740 
4741 	tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL, val);
4742 
4743 	tw32_f(TG3_CPMU_EEE_CTRL,
4744 	       TG3_CPMU_EEE_CTRL_EXIT_20_1_US);
4745 
4746 	val = TG3_CPMU_EEEMD_ERLY_L1_XIT_DET |
4747 	      (tp->eee.tx_lpi_enabled ? TG3_CPMU_EEEMD_LPI_IN_TX : 0) |
4748 	      TG3_CPMU_EEEMD_LPI_IN_RX |
4749 	      TG3_CPMU_EEEMD_EEE_ENABLE;
4750 
4751 	if (tg3_asic_rev(tp) != ASIC_REV_5717)
4752 		val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN;
4753 
4754 	if (tg3_flag(tp, ENABLE_APE))
4755 		val |= TG3_CPMU_EEEMD_APE_TX_DET_EN;
4756 
4757 	tw32_f(TG3_CPMU_EEE_MODE, tp->eee.eee_enabled ? val : 0);
4758 
4759 	tw32_f(TG3_CPMU_EEE_DBTMR1,
4760 	       TG3_CPMU_DBTMR1_PCIEXIT_2047US |
4761 	       (tp->eee.tx_lpi_timer & 0xffff));
4762 
4763 	tw32_f(TG3_CPMU_EEE_DBTMR2,
4764 	       TG3_CPMU_DBTMR2_APE_TX_2047US |
4765 	       TG3_CPMU_DBTMR2_TXIDXEQ_2047US);
4766 }
4767 
4768 static int tg3_setup_copper_phy(struct tg3 *tp, bool force_reset)
4769 {
4770 	bool current_link_up;
4771 	u32 bmsr, val;
4772 	u32 lcl_adv, rmt_adv;
4773 	u16 current_speed;
4774 	u8 current_duplex;
4775 	int i, err;
4776 
4777 	tg3_clear_mac_status(tp);
4778 
4779 	if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
4780 		tw32_f(MAC_MI_MODE,
4781 		     (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
4782 		udelay(80);
4783 	}
4784 
4785 	tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, 0);
4786 
4787 	/* Some third-party PHYs need to be reset on link going
4788 	 * down.
4789 	 */
4790 	if ((tg3_asic_rev(tp) == ASIC_REV_5703 ||
4791 	     tg3_asic_rev(tp) == ASIC_REV_5704 ||
4792 	     tg3_asic_rev(tp) == ASIC_REV_5705) &&
4793 	    tp->link_up) {
4794 		tg3_readphy(tp, MII_BMSR, &bmsr);
4795 		if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4796 		    !(bmsr & BMSR_LSTATUS))
4797 			force_reset = true;
4798 	}
4799 	if (force_reset)
4800 		tg3_phy_reset(tp);
4801 
4802 	if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
4803 		tg3_readphy(tp, MII_BMSR, &bmsr);
4804 		if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
4805 		    !tg3_flag(tp, INIT_COMPLETE))
4806 			bmsr = 0;
4807 
4808 		if (!(bmsr & BMSR_LSTATUS)) {
4809 			err = tg3_init_5401phy_dsp(tp);
4810 			if (err)
4811 				return err;
4812 
4813 			tg3_readphy(tp, MII_BMSR, &bmsr);
4814 			for (i = 0; i < 1000; i++) {
4815 				udelay(10);
4816 				if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4817 				    (bmsr & BMSR_LSTATUS)) {
4818 					udelay(40);
4819 					break;
4820 				}
4821 			}
4822 
4823 			if ((tp->phy_id & TG3_PHY_ID_REV_MASK) ==
4824 			    TG3_PHY_REV_BCM5401_B0 &&
4825 			    !(bmsr & BMSR_LSTATUS) &&
4826 			    tp->link_config.active_speed == SPEED_1000) {
4827 				err = tg3_phy_reset(tp);
4828 				if (!err)
4829 					err = tg3_init_5401phy_dsp(tp);
4830 				if (err)
4831 					return err;
4832 			}
4833 		}
4834 	} else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4835 		   tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0) {
4836 		/* 5701 {A0,B0} CRC bug workaround */
4837 		tg3_writephy(tp, 0x15, 0x0a75);
4838 		tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4839 		tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
4840 		tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4841 	}
4842 
4843 	/* Clear pending interrupts... */
4844 	tg3_readphy(tp, MII_TG3_ISTAT, &val);
4845 	tg3_readphy(tp, MII_TG3_ISTAT, &val);
4846 
4847 	if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT)
4848 		tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
4849 	else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET))
4850 		tg3_writephy(tp, MII_TG3_IMASK, ~0);
4851 
4852 	if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4853 	    tg3_asic_rev(tp) == ASIC_REV_5701) {
4854 		if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
4855 			tg3_writephy(tp, MII_TG3_EXT_CTRL,
4856 				     MII_TG3_EXT_CTRL_LNK3_LED_MODE);
4857 		else
4858 			tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
4859 	}
4860 
4861 	current_link_up = false;
4862 	current_speed = SPEED_UNKNOWN;
4863 	current_duplex = DUPLEX_UNKNOWN;
4864 	tp->phy_flags &= ~TG3_PHYFLG_MDIX_STATE;
4865 	tp->link_config.rmt_adv = 0;
4866 
4867 	if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) {
4868 		err = tg3_phy_auxctl_read(tp,
4869 					  MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4870 					  &val);
4871 		if (!err && !(val & (1 << 10))) {
4872 			tg3_phy_auxctl_write(tp,
4873 					     MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4874 					     val | (1 << 10));
4875 			goto relink;
4876 		}
4877 	}
4878 
4879 	bmsr = 0;
4880 	for (i = 0; i < 100; i++) {
4881 		tg3_readphy(tp, MII_BMSR, &bmsr);
4882 		if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4883 		    (bmsr & BMSR_LSTATUS))
4884 			break;
4885 		udelay(40);
4886 	}
4887 
4888 	if (bmsr & BMSR_LSTATUS) {
4889 		u32 aux_stat, bmcr;
4890 
4891 		tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
4892 		for (i = 0; i < 2000; i++) {
4893 			udelay(10);
4894 			if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
4895 			    aux_stat)
4896 				break;
4897 		}
4898 
4899 		tg3_aux_stat_to_speed_duplex(tp, aux_stat,
4900 					     &current_speed,
4901 					     &current_duplex);
4902 
4903 		bmcr = 0;
4904 		for (i = 0; i < 200; i++) {
4905 			tg3_readphy(tp, MII_BMCR, &bmcr);
4906 			if (tg3_readphy(tp, MII_BMCR, &bmcr))
4907 				continue;
4908 			if (bmcr && bmcr != 0x7fff)
4909 				break;
4910 			udelay(10);
4911 		}
4912 
4913 		lcl_adv = 0;
4914 		rmt_adv = 0;
4915 
4916 		tp->link_config.active_speed = current_speed;
4917 		tp->link_config.active_duplex = current_duplex;
4918 
4919 		if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4920 			bool eee_config_ok = tg3_phy_eee_config_ok(tp);
4921 
4922 			if ((bmcr & BMCR_ANENABLE) &&
4923 			    eee_config_ok &&
4924 			    tg3_phy_copper_an_config_ok(tp, &lcl_adv) &&
4925 			    tg3_phy_copper_fetch_rmtadv(tp, &rmt_adv))
4926 				current_link_up = true;
4927 
4928 			/* EEE settings changes take effect only after a phy
4929 			 * reset.  If we have skipped a reset due to Link Flap
4930 			 * Avoidance being enabled, do it now.
4931 			 */
4932 			if (!eee_config_ok &&
4933 			    (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
4934 			    !force_reset) {
4935 				tg3_setup_eee(tp);
4936 				tg3_phy_reset(tp);
4937 			}
4938 		} else {
4939 			if (!(bmcr & BMCR_ANENABLE) &&
4940 			    tp->link_config.speed == current_speed &&
4941 			    tp->link_config.duplex == current_duplex) {
4942 				current_link_up = true;
4943 			}
4944 		}
4945 
4946 		if (current_link_up &&
4947 		    tp->link_config.active_duplex == DUPLEX_FULL) {
4948 			u32 reg, bit;
4949 
4950 			if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4951 				reg = MII_TG3_FET_GEN_STAT;
4952 				bit = MII_TG3_FET_GEN_STAT_MDIXSTAT;
4953 			} else {
4954 				reg = MII_TG3_EXT_STAT;
4955 				bit = MII_TG3_EXT_STAT_MDIX;
4956 			}
4957 
4958 			if (!tg3_readphy(tp, reg, &val) && (val & bit))
4959 				tp->phy_flags |= TG3_PHYFLG_MDIX_STATE;
4960 
4961 			tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
4962 		}
4963 	}
4964 
4965 relink:
4966 	if (!current_link_up || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4967 		tg3_phy_copper_begin(tp);
4968 
4969 		if (tg3_flag(tp, ROBOSWITCH)) {
4970 			current_link_up = true;
4971 			/* FIXME: when BCM5325 switch is used use 100 MBit/s */
4972 			current_speed = SPEED_1000;
4973 			current_duplex = DUPLEX_FULL;
4974 			tp->link_config.active_speed = current_speed;
4975 			tp->link_config.active_duplex = current_duplex;
4976 		}
4977 
4978 		tg3_readphy(tp, MII_BMSR, &bmsr);
4979 		if ((!tg3_readphy(tp, MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) ||
4980 		    (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
4981 			current_link_up = true;
4982 	}
4983 
4984 	tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
4985 	if (current_link_up) {
4986 		if (tp->link_config.active_speed == SPEED_100 ||
4987 		    tp->link_config.active_speed == SPEED_10)
4988 			tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4989 		else
4990 			tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4991 	} else if (tp->phy_flags & TG3_PHYFLG_IS_FET)
4992 		tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4993 	else
4994 		tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4995 
4996 	/* In order for the 5750 core in BCM4785 chip to work properly
4997 	 * in RGMII mode, the Led Control Register must be set up.
4998 	 */
4999 	if (tg3_flag(tp, RGMII_MODE)) {
5000 		u32 led_ctrl = tr32(MAC_LED_CTRL);
5001 		led_ctrl &= ~(LED_CTRL_1000MBPS_ON | LED_CTRL_100MBPS_ON);
5002 
5003 		if (tp->link_config.active_speed == SPEED_10)
5004 			led_ctrl |= LED_CTRL_LNKLED_OVERRIDE;
5005 		else if (tp->link_config.active_speed == SPEED_100)
5006 			led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE |
5007 				     LED_CTRL_100MBPS_ON);
5008 		else if (tp->link_config.active_speed == SPEED_1000)
5009 			led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE |
5010 				     LED_CTRL_1000MBPS_ON);
5011 
5012 		tw32(MAC_LED_CTRL, led_ctrl);
5013 		udelay(40);
5014 	}
5015 
5016 	tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
5017 	if (tp->link_config.active_duplex == DUPLEX_HALF)
5018 		tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
5019 
5020 	if (tg3_asic_rev(tp) == ASIC_REV_5700) {
5021 		if (current_link_up &&
5022 		    tg3_5700_link_polarity(tp, tp->link_config.active_speed))
5023 			tp->mac_mode |= MAC_MODE_LINK_POLARITY;
5024 		else
5025 			tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
5026 	}
5027 
5028 	/* ??? Without this setting Netgear GA302T PHY does not
5029 	 * ??? send/receive packets...
5030 	 */
5031 	if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 &&
5032 	    tg3_chip_rev_id(tp) == CHIPREV_ID_5700_ALTIMA) {
5033 		tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
5034 		tw32_f(MAC_MI_MODE, tp->mi_mode);
5035 		udelay(80);
5036 	}
5037 
5038 	tw32_f(MAC_MODE, tp->mac_mode);
5039 	udelay(40);
5040 
5041 	tg3_phy_eee_adjust(tp, current_link_up);
5042 
5043 	if (tg3_flag(tp, USE_LINKCHG_REG)) {
5044 		/* Polled via timer. */
5045 		tw32_f(MAC_EVENT, 0);
5046 	} else {
5047 		tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5048 	}
5049 	udelay(40);
5050 
5051 	if (tg3_asic_rev(tp) == ASIC_REV_5700 &&
5052 	    current_link_up &&
5053 	    tp->link_config.active_speed == SPEED_1000 &&
5054 	    (tg3_flag(tp, PCIX_MODE) || tg3_flag(tp, PCI_HIGH_SPEED))) {
5055 		udelay(120);
5056 		tw32_f(MAC_STATUS,
5057 		     (MAC_STATUS_SYNC_CHANGED |
5058 		      MAC_STATUS_CFG_CHANGED));
5059 		udelay(40);
5060 		tg3_write_mem(tp,
5061 			      NIC_SRAM_FIRMWARE_MBOX,
5062 			      NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
5063 	}
5064 
5065 	/* Prevent send BD corruption. */
5066 	if (tg3_flag(tp, CLKREQ_BUG)) {
5067 		if (tp->link_config.active_speed == SPEED_100 ||
5068 		    tp->link_config.active_speed == SPEED_10)
5069 			pcie_capability_clear_word(tp->pdev, PCI_EXP_LNKCTL,
5070 						   PCI_EXP_LNKCTL_CLKREQ_EN);
5071 		else
5072 			pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
5073 						 PCI_EXP_LNKCTL_CLKREQ_EN);
5074 	}
5075 
5076 	tg3_test_and_report_link_chg(tp, current_link_up);
5077 
5078 	return 0;
5079 }
5080 
5081 struct tg3_fiber_aneginfo {
5082 	int state;
5083 #define ANEG_STATE_UNKNOWN		0
5084 #define ANEG_STATE_AN_ENABLE		1
5085 #define ANEG_STATE_RESTART_INIT		2
5086 #define ANEG_STATE_RESTART		3
5087 #define ANEG_STATE_DISABLE_LINK_OK	4
5088 #define ANEG_STATE_ABILITY_DETECT_INIT	5
5089 #define ANEG_STATE_ABILITY_DETECT	6
5090 #define ANEG_STATE_ACK_DETECT_INIT	7
5091 #define ANEG_STATE_ACK_DETECT		8
5092 #define ANEG_STATE_COMPLETE_ACK_INIT	9
5093 #define ANEG_STATE_COMPLETE_ACK		10
5094 #define ANEG_STATE_IDLE_DETECT_INIT	11
5095 #define ANEG_STATE_IDLE_DETECT		12
5096 #define ANEG_STATE_LINK_OK		13
5097 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT	14
5098 #define ANEG_STATE_NEXT_PAGE_WAIT	15
5099 
5100 	u32 flags;
5101 #define MR_AN_ENABLE		0x00000001
5102 #define MR_RESTART_AN		0x00000002
5103 #define MR_AN_COMPLETE		0x00000004
5104 #define MR_PAGE_RX		0x00000008
5105 #define MR_NP_LOADED		0x00000010
5106 #define MR_TOGGLE_TX		0x00000020
5107 #define MR_LP_ADV_FULL_DUPLEX	0x00000040
5108 #define MR_LP_ADV_HALF_DUPLEX	0x00000080
5109 #define MR_LP_ADV_SYM_PAUSE	0x00000100
5110 #define MR_LP_ADV_ASYM_PAUSE	0x00000200
5111 #define MR_LP_ADV_REMOTE_FAULT1	0x00000400
5112 #define MR_LP_ADV_REMOTE_FAULT2	0x00000800
5113 #define MR_LP_ADV_NEXT_PAGE	0x00001000
5114 #define MR_TOGGLE_RX		0x00002000
5115 #define MR_NP_RX		0x00004000
5116 
5117 #define MR_LINK_OK		0x80000000
5118 
5119 	unsigned long link_time, cur_time;
5120 
5121 	u32 ability_match_cfg;
5122 	int ability_match_count;
5123 
5124 	char ability_match, idle_match, ack_match;
5125 
5126 	u32 txconfig, rxconfig;
5127 #define ANEG_CFG_NP		0x00000080
5128 #define ANEG_CFG_ACK		0x00000040
5129 #define ANEG_CFG_RF2		0x00000020
5130 #define ANEG_CFG_RF1		0x00000010
5131 #define ANEG_CFG_PS2		0x00000001
5132 #define ANEG_CFG_PS1		0x00008000
5133 #define ANEG_CFG_HD		0x00004000
5134 #define ANEG_CFG_FD		0x00002000
5135 #define ANEG_CFG_INVAL		0x00001f06
5136 
5137 };
5138 #define ANEG_OK		0
5139 #define ANEG_DONE	1
5140 #define ANEG_TIMER_ENAB	2
5141 #define ANEG_FAILED	-1
5142 
5143 #define ANEG_STATE_SETTLE_TIME	10000
5144 
5145 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
5146 				   struct tg3_fiber_aneginfo *ap)
5147 {
5148 	u16 flowctrl;
5149 	unsigned long delta;
5150 	u32 rx_cfg_reg;
5151 	int ret;
5152 
5153 	if (ap->state == ANEG_STATE_UNKNOWN) {
5154 		ap->rxconfig = 0;
5155 		ap->link_time = 0;
5156 		ap->cur_time = 0;
5157 		ap->ability_match_cfg = 0;
5158 		ap->ability_match_count = 0;
5159 		ap->ability_match = 0;
5160 		ap->idle_match = 0;
5161 		ap->ack_match = 0;
5162 	}
5163 	ap->cur_time++;
5164 
5165 	if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
5166 		rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
5167 
5168 		if (rx_cfg_reg != ap->ability_match_cfg) {
5169 			ap->ability_match_cfg = rx_cfg_reg;
5170 			ap->ability_match = 0;
5171 			ap->ability_match_count = 0;
5172 		} else {
5173 			if (++ap->ability_match_count > 1) {
5174 				ap->ability_match = 1;
5175 				ap->ability_match_cfg = rx_cfg_reg;
5176 			}
5177 		}
5178 		if (rx_cfg_reg & ANEG_CFG_ACK)
5179 			ap->ack_match = 1;
5180 		else
5181 			ap->ack_match = 0;
5182 
5183 		ap->idle_match = 0;
5184 	} else {
5185 		ap->idle_match = 1;
5186 		ap->ability_match_cfg = 0;
5187 		ap->ability_match_count = 0;
5188 		ap->ability_match = 0;
5189 		ap->ack_match = 0;
5190 
5191 		rx_cfg_reg = 0;
5192 	}
5193 
5194 	ap->rxconfig = rx_cfg_reg;
5195 	ret = ANEG_OK;
5196 
5197 	switch (ap->state) {
5198 	case ANEG_STATE_UNKNOWN:
5199 		if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
5200 			ap->state = ANEG_STATE_AN_ENABLE;
5201 
5202 		/* fallthru */
5203 	case ANEG_STATE_AN_ENABLE:
5204 		ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
5205 		if (ap->flags & MR_AN_ENABLE) {
5206 			ap->link_time = 0;
5207 			ap->cur_time = 0;
5208 			ap->ability_match_cfg = 0;
5209 			ap->ability_match_count = 0;
5210 			ap->ability_match = 0;
5211 			ap->idle_match = 0;
5212 			ap->ack_match = 0;
5213 
5214 			ap->state = ANEG_STATE_RESTART_INIT;
5215 		} else {
5216 			ap->state = ANEG_STATE_DISABLE_LINK_OK;
5217 		}
5218 		break;
5219 
5220 	case ANEG_STATE_RESTART_INIT:
5221 		ap->link_time = ap->cur_time;
5222 		ap->flags &= ~(MR_NP_LOADED);
5223 		ap->txconfig = 0;
5224 		tw32(MAC_TX_AUTO_NEG, 0);
5225 		tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5226 		tw32_f(MAC_MODE, tp->mac_mode);
5227 		udelay(40);
5228 
5229 		ret = ANEG_TIMER_ENAB;
5230 		ap->state = ANEG_STATE_RESTART;
5231 
5232 		/* fallthru */
5233 	case ANEG_STATE_RESTART:
5234 		delta = ap->cur_time - ap->link_time;
5235 		if (delta > ANEG_STATE_SETTLE_TIME)
5236 			ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
5237 		else
5238 			ret = ANEG_TIMER_ENAB;
5239 		break;
5240 
5241 	case ANEG_STATE_DISABLE_LINK_OK:
5242 		ret = ANEG_DONE;
5243 		break;
5244 
5245 	case ANEG_STATE_ABILITY_DETECT_INIT:
5246 		ap->flags &= ~(MR_TOGGLE_TX);
5247 		ap->txconfig = ANEG_CFG_FD;
5248 		flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5249 		if (flowctrl & ADVERTISE_1000XPAUSE)
5250 			ap->txconfig |= ANEG_CFG_PS1;
5251 		if (flowctrl & ADVERTISE_1000XPSE_ASYM)
5252 			ap->txconfig |= ANEG_CFG_PS2;
5253 		tw32(MAC_TX_AUTO_NEG, ap->txconfig);
5254 		tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5255 		tw32_f(MAC_MODE, tp->mac_mode);
5256 		udelay(40);
5257 
5258 		ap->state = ANEG_STATE_ABILITY_DETECT;
5259 		break;
5260 
5261 	case ANEG_STATE_ABILITY_DETECT:
5262 		if (ap->ability_match != 0 && ap->rxconfig != 0)
5263 			ap->state = ANEG_STATE_ACK_DETECT_INIT;
5264 		break;
5265 
5266 	case ANEG_STATE_ACK_DETECT_INIT:
5267 		ap->txconfig |= ANEG_CFG_ACK;
5268 		tw32(MAC_TX_AUTO_NEG, ap->txconfig);
5269 		tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5270 		tw32_f(MAC_MODE, tp->mac_mode);
5271 		udelay(40);
5272 
5273 		ap->state = ANEG_STATE_ACK_DETECT;
5274 
5275 		/* fallthru */
5276 	case ANEG_STATE_ACK_DETECT:
5277 		if (ap->ack_match != 0) {
5278 			if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
5279 			    (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
5280 				ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
5281 			} else {
5282 				ap->state = ANEG_STATE_AN_ENABLE;
5283 			}
5284 		} else if (ap->ability_match != 0 &&
5285 			   ap->rxconfig == 0) {
5286 			ap->state = ANEG_STATE_AN_ENABLE;
5287 		}
5288 		break;
5289 
5290 	case ANEG_STATE_COMPLETE_ACK_INIT:
5291 		if (ap->rxconfig & ANEG_CFG_INVAL) {
5292 			ret = ANEG_FAILED;
5293 			break;
5294 		}
5295 		ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
5296 			       MR_LP_ADV_HALF_DUPLEX |
5297 			       MR_LP_ADV_SYM_PAUSE |
5298 			       MR_LP_ADV_ASYM_PAUSE |
5299 			       MR_LP_ADV_REMOTE_FAULT1 |
5300 			       MR_LP_ADV_REMOTE_FAULT2 |
5301 			       MR_LP_ADV_NEXT_PAGE |
5302 			       MR_TOGGLE_RX |
5303 			       MR_NP_RX);
5304 		if (ap->rxconfig & ANEG_CFG_FD)
5305 			ap->flags |= MR_LP_ADV_FULL_DUPLEX;
5306 		if (ap->rxconfig & ANEG_CFG_HD)
5307 			ap->flags |= MR_LP_ADV_HALF_DUPLEX;
5308 		if (ap->rxconfig & ANEG_CFG_PS1)
5309 			ap->flags |= MR_LP_ADV_SYM_PAUSE;
5310 		if (ap->rxconfig & ANEG_CFG_PS2)
5311 			ap->flags |= MR_LP_ADV_ASYM_PAUSE;
5312 		if (ap->rxconfig & ANEG_CFG_RF1)
5313 			ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
5314 		if (ap->rxconfig & ANEG_CFG_RF2)
5315 			ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
5316 		if (ap->rxconfig & ANEG_CFG_NP)
5317 			ap->flags |= MR_LP_ADV_NEXT_PAGE;
5318 
5319 		ap->link_time = ap->cur_time;
5320 
5321 		ap->flags ^= (MR_TOGGLE_TX);
5322 		if (ap->rxconfig & 0x0008)
5323 			ap->flags |= MR_TOGGLE_RX;
5324 		if (ap->rxconfig & ANEG_CFG_NP)
5325 			ap->flags |= MR_NP_RX;
5326 		ap->flags |= MR_PAGE_RX;
5327 
5328 		ap->state = ANEG_STATE_COMPLETE_ACK;
5329 		ret = ANEG_TIMER_ENAB;
5330 		break;
5331 
5332 	case ANEG_STATE_COMPLETE_ACK:
5333 		if (ap->ability_match != 0 &&
5334 		    ap->rxconfig == 0) {
5335 			ap->state = ANEG_STATE_AN_ENABLE;
5336 			break;
5337 		}
5338 		delta = ap->cur_time - ap->link_time;
5339 		if (delta > ANEG_STATE_SETTLE_TIME) {
5340 			if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
5341 				ap->state = ANEG_STATE_IDLE_DETECT_INIT;
5342 			} else {
5343 				if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
5344 				    !(ap->flags & MR_NP_RX)) {
5345 					ap->state = ANEG_STATE_IDLE_DETECT_INIT;
5346 				} else {
5347 					ret = ANEG_FAILED;
5348 				}
5349 			}
5350 		}
5351 		break;
5352 
5353 	case ANEG_STATE_IDLE_DETECT_INIT:
5354 		ap->link_time = ap->cur_time;
5355 		tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
5356 		tw32_f(MAC_MODE, tp->mac_mode);
5357 		udelay(40);
5358 
5359 		ap->state = ANEG_STATE_IDLE_DETECT;
5360 		ret = ANEG_TIMER_ENAB;
5361 		break;
5362 
5363 	case ANEG_STATE_IDLE_DETECT:
5364 		if (ap->ability_match != 0 &&
5365 		    ap->rxconfig == 0) {
5366 			ap->state = ANEG_STATE_AN_ENABLE;
5367 			break;
5368 		}
5369 		delta = ap->cur_time - ap->link_time;
5370 		if (delta > ANEG_STATE_SETTLE_TIME) {
5371 			/* XXX another gem from the Broadcom driver :( */
5372 			ap->state = ANEG_STATE_LINK_OK;
5373 		}
5374 		break;
5375 
5376 	case ANEG_STATE_LINK_OK:
5377 		ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
5378 		ret = ANEG_DONE;
5379 		break;
5380 
5381 	case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
5382 		/* ??? unimplemented */
5383 		break;
5384 
5385 	case ANEG_STATE_NEXT_PAGE_WAIT:
5386 		/* ??? unimplemented */
5387 		break;
5388 
5389 	default:
5390 		ret = ANEG_FAILED;
5391 		break;
5392 	}
5393 
5394 	return ret;
5395 }
5396 
5397 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
5398 {
5399 	int res = 0;
5400 	struct tg3_fiber_aneginfo aninfo;
5401 	int status = ANEG_FAILED;
5402 	unsigned int tick;
5403 	u32 tmp;
5404 
5405 	tw32_f(MAC_TX_AUTO_NEG, 0);
5406 
5407 	tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
5408 	tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
5409 	udelay(40);
5410 
5411 	tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
5412 	udelay(40);
5413 
5414 	memset(&aninfo, 0, sizeof(aninfo));
5415 	aninfo.flags |= MR_AN_ENABLE;
5416 	aninfo.state = ANEG_STATE_UNKNOWN;
5417 	aninfo.cur_time = 0;
5418 	tick = 0;
5419 	while (++tick < 195000) {
5420 		status = tg3_fiber_aneg_smachine(tp, &aninfo);
5421 		if (status == ANEG_DONE || status == ANEG_FAILED)
5422 			break;
5423 
5424 		udelay(1);
5425 	}
5426 
5427 	tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
5428 	tw32_f(MAC_MODE, tp->mac_mode);
5429 	udelay(40);
5430 
5431 	*txflags = aninfo.txconfig;
5432 	*rxflags = aninfo.flags;
5433 
5434 	if (status == ANEG_DONE &&
5435 	    (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
5436 			     MR_LP_ADV_FULL_DUPLEX)))
5437 		res = 1;
5438 
5439 	return res;
5440 }
5441 
5442 static void tg3_init_bcm8002(struct tg3 *tp)
5443 {
5444 	u32 mac_status = tr32(MAC_STATUS);
5445 	int i;
5446 
5447 	/* Reset when initting first time or we have a link. */
5448 	if (tg3_flag(tp, INIT_COMPLETE) &&
5449 	    !(mac_status & MAC_STATUS_PCS_SYNCED))
5450 		return;
5451 
5452 	/* Set PLL lock range. */
5453 	tg3_writephy(tp, 0x16, 0x8007);
5454 
5455 	/* SW reset */
5456 	tg3_writephy(tp, MII_BMCR, BMCR_RESET);
5457 
5458 	/* Wait for reset to complete. */
5459 	/* XXX schedule_timeout() ... */
5460 	for (i = 0; i < 500; i++)
5461 		udelay(10);
5462 
5463 	/* Config mode; select PMA/Ch 1 regs. */
5464 	tg3_writephy(tp, 0x10, 0x8411);
5465 
5466 	/* Enable auto-lock and comdet, select txclk for tx. */
5467 	tg3_writephy(tp, 0x11, 0x0a10);
5468 
5469 	tg3_writephy(tp, 0x18, 0x00a0);
5470 	tg3_writephy(tp, 0x16, 0x41ff);
5471 
5472 	/* Assert and deassert POR. */
5473 	tg3_writephy(tp, 0x13, 0x0400);
5474 	udelay(40);
5475 	tg3_writephy(tp, 0x13, 0x0000);
5476 
5477 	tg3_writephy(tp, 0x11, 0x0a50);
5478 	udelay(40);
5479 	tg3_writephy(tp, 0x11, 0x0a10);
5480 
5481 	/* Wait for signal to stabilize */
5482 	/* XXX schedule_timeout() ... */
5483 	for (i = 0; i < 15000; i++)
5484 		udelay(10);
5485 
5486 	/* Deselect the channel register so we can read the PHYID
5487 	 * later.
5488 	 */
5489 	tg3_writephy(tp, 0x10, 0x8011);
5490 }
5491 
5492 static bool tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
5493 {
5494 	u16 flowctrl;
5495 	bool current_link_up;
5496 	u32 sg_dig_ctrl, sg_dig_status;
5497 	u32 serdes_cfg, expected_sg_dig_ctrl;
5498 	int workaround, port_a;
5499 
5500 	serdes_cfg = 0;
5501 	expected_sg_dig_ctrl = 0;
5502 	workaround = 0;
5503 	port_a = 1;
5504 	current_link_up = false;
5505 
5506 	if (tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A0 &&
5507 	    tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A1) {
5508 		workaround = 1;
5509 		if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
5510 			port_a = 0;
5511 
5512 		/* preserve bits 0-11,13,14 for signal pre-emphasis */
5513 		/* preserve bits 20-23 for voltage regulator */
5514 		serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
5515 	}
5516 
5517 	sg_dig_ctrl = tr32(SG_DIG_CTRL);
5518 
5519 	if (tp->link_config.autoneg != AUTONEG_ENABLE) {
5520 		if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
5521 			if (workaround) {
5522 				u32 val = serdes_cfg;
5523 
5524 				if (port_a)
5525 					val |= 0xc010000;
5526 				else
5527 					val |= 0x4010000;
5528 				tw32_f(MAC_SERDES_CFG, val);
5529 			}
5530 
5531 			tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
5532 		}
5533 		if (mac_status & MAC_STATUS_PCS_SYNCED) {
5534 			tg3_setup_flow_control(tp, 0, 0);
5535 			current_link_up = true;
5536 		}
5537 		goto out;
5538 	}
5539 
5540 	/* Want auto-negotiation.  */
5541 	expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
5542 
5543 	flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5544 	if (flowctrl & ADVERTISE_1000XPAUSE)
5545 		expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
5546 	if (flowctrl & ADVERTISE_1000XPSE_ASYM)
5547 		expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
5548 
5549 	if (sg_dig_ctrl != expected_sg_dig_ctrl) {
5550 		if ((tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT) &&
5551 		    tp->serdes_counter &&
5552 		    ((mac_status & (MAC_STATUS_PCS_SYNCED |
5553 				    MAC_STATUS_RCVD_CFG)) ==
5554 		     MAC_STATUS_PCS_SYNCED)) {
5555 			tp->serdes_counter--;
5556 			current_link_up = true;
5557 			goto out;
5558 		}
5559 restart_autoneg:
5560 		if (workaround)
5561 			tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
5562 		tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
5563 		udelay(5);
5564 		tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
5565 
5566 		tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
5567 		tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5568 	} else if (mac_status & (MAC_STATUS_PCS_SYNCED |
5569 				 MAC_STATUS_SIGNAL_DET)) {
5570 		sg_dig_status = tr32(SG_DIG_STATUS);
5571 		mac_status = tr32(MAC_STATUS);
5572 
5573 		if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
5574 		    (mac_status & MAC_STATUS_PCS_SYNCED)) {
5575 			u32 local_adv = 0, remote_adv = 0;
5576 
5577 			if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
5578 				local_adv |= ADVERTISE_1000XPAUSE;
5579 			if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
5580 				local_adv |= ADVERTISE_1000XPSE_ASYM;
5581 
5582 			if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
5583 				remote_adv |= LPA_1000XPAUSE;
5584 			if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
5585 				remote_adv |= LPA_1000XPAUSE_ASYM;
5586 
5587 			tp->link_config.rmt_adv =
5588 					   mii_adv_to_ethtool_adv_x(remote_adv);
5589 
5590 			tg3_setup_flow_control(tp, local_adv, remote_adv);
5591 			current_link_up = true;
5592 			tp->serdes_counter = 0;
5593 			tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5594 		} else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
5595 			if (tp->serdes_counter)
5596 				tp->serdes_counter--;
5597 			else {
5598 				if (workaround) {
5599 					u32 val = serdes_cfg;
5600 
5601 					if (port_a)
5602 						val |= 0xc010000;
5603 					else
5604 						val |= 0x4010000;
5605 
5606 					tw32_f(MAC_SERDES_CFG, val);
5607 				}
5608 
5609 				tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
5610 				udelay(40);
5611 
5612 				/* Link parallel detection - link is up */
5613 				/* only if we have PCS_SYNC and not */
5614 				/* receiving config code words */
5615 				mac_status = tr32(MAC_STATUS);
5616 				if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
5617 				    !(mac_status & MAC_STATUS_RCVD_CFG)) {
5618 					tg3_setup_flow_control(tp, 0, 0);
5619 					current_link_up = true;
5620 					tp->phy_flags |=
5621 						TG3_PHYFLG_PARALLEL_DETECT;
5622 					tp->serdes_counter =
5623 						SERDES_PARALLEL_DET_TIMEOUT;
5624 				} else
5625 					goto restart_autoneg;
5626 			}
5627 		}
5628 	} else {
5629 		tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
5630 		tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5631 	}
5632 
5633 out:
5634 	return current_link_up;
5635 }
5636 
5637 static bool tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
5638 {
5639 	bool current_link_up = false;
5640 
5641 	if (!(mac_status & MAC_STATUS_PCS_SYNCED))
5642 		goto out;
5643 
5644 	if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5645 		u32 txflags, rxflags;
5646 		int i;
5647 
5648 		if (fiber_autoneg(tp, &txflags, &rxflags)) {
5649 			u32 local_adv = 0, remote_adv = 0;
5650 
5651 			if (txflags & ANEG_CFG_PS1)
5652 				local_adv |= ADVERTISE_1000XPAUSE;
5653 			if (txflags & ANEG_CFG_PS2)
5654 				local_adv |= ADVERTISE_1000XPSE_ASYM;
5655 
5656 			if (rxflags & MR_LP_ADV_SYM_PAUSE)
5657 				remote_adv |= LPA_1000XPAUSE;
5658 			if (rxflags & MR_LP_ADV_ASYM_PAUSE)
5659 				remote_adv |= LPA_1000XPAUSE_ASYM;
5660 
5661 			tp->link_config.rmt_adv =
5662 					   mii_adv_to_ethtool_adv_x(remote_adv);
5663 
5664 			tg3_setup_flow_control(tp, local_adv, remote_adv);
5665 
5666 			current_link_up = true;
5667 		}
5668 		for (i = 0; i < 30; i++) {
5669 			udelay(20);
5670 			tw32_f(MAC_STATUS,
5671 			       (MAC_STATUS_SYNC_CHANGED |
5672 				MAC_STATUS_CFG_CHANGED));
5673 			udelay(40);
5674 			if ((tr32(MAC_STATUS) &
5675 			     (MAC_STATUS_SYNC_CHANGED |
5676 			      MAC_STATUS_CFG_CHANGED)) == 0)
5677 				break;
5678 		}
5679 
5680 		mac_status = tr32(MAC_STATUS);
5681 		if (!current_link_up &&
5682 		    (mac_status & MAC_STATUS_PCS_SYNCED) &&
5683 		    !(mac_status & MAC_STATUS_RCVD_CFG))
5684 			current_link_up = true;
5685 	} else {
5686 		tg3_setup_flow_control(tp, 0, 0);
5687 
5688 		/* Forcing 1000FD link up. */
5689 		current_link_up = true;
5690 
5691 		tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
5692 		udelay(40);
5693 
5694 		tw32_f(MAC_MODE, tp->mac_mode);
5695 		udelay(40);
5696 	}
5697 
5698 out:
5699 	return current_link_up;
5700 }
5701 
5702 static int tg3_setup_fiber_phy(struct tg3 *tp, bool force_reset)
5703 {
5704 	u32 orig_pause_cfg;
5705 	u16 orig_active_speed;
5706 	u8 orig_active_duplex;
5707 	u32 mac_status;
5708 	bool current_link_up;
5709 	int i;
5710 
5711 	orig_pause_cfg = tp->link_config.active_flowctrl;
5712 	orig_active_speed = tp->link_config.active_speed;
5713 	orig_active_duplex = tp->link_config.active_duplex;
5714 
5715 	if (!tg3_flag(tp, HW_AUTONEG) &&
5716 	    tp->link_up &&
5717 	    tg3_flag(tp, INIT_COMPLETE)) {
5718 		mac_status = tr32(MAC_STATUS);
5719 		mac_status &= (MAC_STATUS_PCS_SYNCED |
5720 			       MAC_STATUS_SIGNAL_DET |
5721 			       MAC_STATUS_CFG_CHANGED |
5722 			       MAC_STATUS_RCVD_CFG);
5723 		if (mac_status == (MAC_STATUS_PCS_SYNCED |
5724 				   MAC_STATUS_SIGNAL_DET)) {
5725 			tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5726 					    MAC_STATUS_CFG_CHANGED));
5727 			return 0;
5728 		}
5729 	}
5730 
5731 	tw32_f(MAC_TX_AUTO_NEG, 0);
5732 
5733 	tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
5734 	tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
5735 	tw32_f(MAC_MODE, tp->mac_mode);
5736 	udelay(40);
5737 
5738 	if (tp->phy_id == TG3_PHY_ID_BCM8002)
5739 		tg3_init_bcm8002(tp);
5740 
5741 	/* Enable link change event even when serdes polling.  */
5742 	tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5743 	udelay(40);
5744 
5745 	current_link_up = false;
5746 	tp->link_config.rmt_adv = 0;
5747 	mac_status = tr32(MAC_STATUS);
5748 
5749 	if (tg3_flag(tp, HW_AUTONEG))
5750 		current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
5751 	else
5752 		current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
5753 
5754 	tp->napi[0].hw_status->status =
5755 		(SD_STATUS_UPDATED |
5756 		 (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG));
5757 
5758 	for (i = 0; i < 100; i++) {
5759 		tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5760 				    MAC_STATUS_CFG_CHANGED));
5761 		udelay(5);
5762 		if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
5763 					 MAC_STATUS_CFG_CHANGED |
5764 					 MAC_STATUS_LNKSTATE_CHANGED)) == 0)
5765 			break;
5766 	}
5767 
5768 	mac_status = tr32(MAC_STATUS);
5769 	if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
5770 		current_link_up = false;
5771 		if (tp->link_config.autoneg == AUTONEG_ENABLE &&
5772 		    tp->serdes_counter == 0) {
5773 			tw32_f(MAC_MODE, (tp->mac_mode |
5774 					  MAC_MODE_SEND_CONFIGS));
5775 			udelay(1);
5776 			tw32_f(MAC_MODE, tp->mac_mode);
5777 		}
5778 	}
5779 
5780 	if (current_link_up) {
5781 		tp->link_config.active_speed = SPEED_1000;
5782 		tp->link_config.active_duplex = DUPLEX_FULL;
5783 		tw32(MAC_LED_CTRL, (tp->led_ctrl |
5784 				    LED_CTRL_LNKLED_OVERRIDE |
5785 				    LED_CTRL_1000MBPS_ON));
5786 	} else {
5787 		tp->link_config.active_speed = SPEED_UNKNOWN;
5788 		tp->link_config.active_duplex = DUPLEX_UNKNOWN;
5789 		tw32(MAC_LED_CTRL, (tp->led_ctrl |
5790 				    LED_CTRL_LNKLED_OVERRIDE |
5791 				    LED_CTRL_TRAFFIC_OVERRIDE));
5792 	}
5793 
5794 	if (!tg3_test_and_report_link_chg(tp, current_link_up)) {
5795 		u32 now_pause_cfg = tp->link_config.active_flowctrl;
5796 		if (orig_pause_cfg != now_pause_cfg ||
5797 		    orig_active_speed != tp->link_config.active_speed ||
5798 		    orig_active_duplex != tp->link_config.active_duplex)
5799 			tg3_link_report(tp);
5800 	}
5801 
5802 	return 0;
5803 }
5804 
5805 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, bool force_reset)
5806 {
5807 	int err = 0;
5808 	u32 bmsr, bmcr;
5809 	u16 current_speed = SPEED_UNKNOWN;
5810 	u8 current_duplex = DUPLEX_UNKNOWN;
5811 	bool current_link_up = false;
5812 	u32 local_adv, remote_adv, sgsr;
5813 
5814 	if ((tg3_asic_rev(tp) == ASIC_REV_5719 ||
5815 	     tg3_asic_rev(tp) == ASIC_REV_5720) &&
5816 	     !tg3_readphy(tp, SERDES_TG3_1000X_STATUS, &sgsr) &&
5817 	     (sgsr & SERDES_TG3_SGMII_MODE)) {
5818 
5819 		if (force_reset)
5820 			tg3_phy_reset(tp);
5821 
5822 		tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
5823 
5824 		if (!(sgsr & SERDES_TG3_LINK_UP)) {
5825 			tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5826 		} else {
5827 			current_link_up = true;
5828 			if (sgsr & SERDES_TG3_SPEED_1000) {
5829 				current_speed = SPEED_1000;
5830 				tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5831 			} else if (sgsr & SERDES_TG3_SPEED_100) {
5832 				current_speed = SPEED_100;
5833 				tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
5834 			} else {
5835 				current_speed = SPEED_10;
5836 				tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
5837 			}
5838 
5839 			if (sgsr & SERDES_TG3_FULL_DUPLEX)
5840 				current_duplex = DUPLEX_FULL;
5841 			else
5842 				current_duplex = DUPLEX_HALF;
5843 		}
5844 
5845 		tw32_f(MAC_MODE, tp->mac_mode);
5846 		udelay(40);
5847 
5848 		tg3_clear_mac_status(tp);
5849 
5850 		goto fiber_setup_done;
5851 	}
5852 
5853 	tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5854 	tw32_f(MAC_MODE, tp->mac_mode);
5855 	udelay(40);
5856 
5857 	tg3_clear_mac_status(tp);
5858 
5859 	if (force_reset)
5860 		tg3_phy_reset(tp);
5861 
5862 	tp->link_config.rmt_adv = 0;
5863 
5864 	err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5865 	err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5866 	if (tg3_asic_rev(tp) == ASIC_REV_5714) {
5867 		if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5868 			bmsr |= BMSR_LSTATUS;
5869 		else
5870 			bmsr &= ~BMSR_LSTATUS;
5871 	}
5872 
5873 	err |= tg3_readphy(tp, MII_BMCR, &bmcr);
5874 
5875 	if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
5876 	    (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
5877 		/* do nothing, just check for link up at the end */
5878 	} else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5879 		u32 adv, newadv;
5880 
5881 		err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5882 		newadv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
5883 				 ADVERTISE_1000XPAUSE |
5884 				 ADVERTISE_1000XPSE_ASYM |
5885 				 ADVERTISE_SLCT);
5886 
5887 		newadv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5888 		newadv |= ethtool_adv_to_mii_adv_x(tp->link_config.advertising);
5889 
5890 		if ((newadv != adv) || !(bmcr & BMCR_ANENABLE)) {
5891 			tg3_writephy(tp, MII_ADVERTISE, newadv);
5892 			bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
5893 			tg3_writephy(tp, MII_BMCR, bmcr);
5894 
5895 			tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5896 			tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
5897 			tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5898 
5899 			return err;
5900 		}
5901 	} else {
5902 		u32 new_bmcr;
5903 
5904 		bmcr &= ~BMCR_SPEED1000;
5905 		new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
5906 
5907 		if (tp->link_config.duplex == DUPLEX_FULL)
5908 			new_bmcr |= BMCR_FULLDPLX;
5909 
5910 		if (new_bmcr != bmcr) {
5911 			/* BMCR_SPEED1000 is a reserved bit that needs
5912 			 * to be set on write.
5913 			 */
5914 			new_bmcr |= BMCR_SPEED1000;
5915 
5916 			/* Force a linkdown */
5917 			if (tp->link_up) {
5918 				u32 adv;
5919 
5920 				err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5921 				adv &= ~(ADVERTISE_1000XFULL |
5922 					 ADVERTISE_1000XHALF |
5923 					 ADVERTISE_SLCT);
5924 				tg3_writephy(tp, MII_ADVERTISE, adv);
5925 				tg3_writephy(tp, MII_BMCR, bmcr |
5926 							   BMCR_ANRESTART |
5927 							   BMCR_ANENABLE);
5928 				udelay(10);
5929 				tg3_carrier_off(tp);
5930 			}
5931 			tg3_writephy(tp, MII_BMCR, new_bmcr);
5932 			bmcr = new_bmcr;
5933 			err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5934 			err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5935 			if (tg3_asic_rev(tp) == ASIC_REV_5714) {
5936 				if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5937 					bmsr |= BMSR_LSTATUS;
5938 				else
5939 					bmsr &= ~BMSR_LSTATUS;
5940 			}
5941 			tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5942 		}
5943 	}
5944 
5945 	if (bmsr & BMSR_LSTATUS) {
5946 		current_speed = SPEED_1000;
5947 		current_link_up = true;
5948 		if (bmcr & BMCR_FULLDPLX)
5949 			current_duplex = DUPLEX_FULL;
5950 		else
5951 			current_duplex = DUPLEX_HALF;
5952 
5953 		local_adv = 0;
5954 		remote_adv = 0;
5955 
5956 		if (bmcr & BMCR_ANENABLE) {
5957 			u32 common;
5958 
5959 			err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
5960 			err |= tg3_readphy(tp, MII_LPA, &remote_adv);
5961 			common = local_adv & remote_adv;
5962 			if (common & (ADVERTISE_1000XHALF |
5963 				      ADVERTISE_1000XFULL)) {
5964 				if (common & ADVERTISE_1000XFULL)
5965 					current_duplex = DUPLEX_FULL;
5966 				else
5967 					current_duplex = DUPLEX_HALF;
5968 
5969 				tp->link_config.rmt_adv =
5970 					   mii_adv_to_ethtool_adv_x(remote_adv);
5971 			} else if (!tg3_flag(tp, 5780_CLASS)) {
5972 				/* Link is up via parallel detect */
5973 			} else {
5974 				current_link_up = false;
5975 			}
5976 		}
5977 	}
5978 
5979 fiber_setup_done:
5980 	if (current_link_up && current_duplex == DUPLEX_FULL)
5981 		tg3_setup_flow_control(tp, local_adv, remote_adv);
5982 
5983 	tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
5984 	if (tp->link_config.active_duplex == DUPLEX_HALF)
5985 		tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
5986 
5987 	tw32_f(MAC_MODE, tp->mac_mode);
5988 	udelay(40);
5989 
5990 	tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5991 
5992 	tp->link_config.active_speed = current_speed;
5993 	tp->link_config.active_duplex = current_duplex;
5994 
5995 	tg3_test_and_report_link_chg(tp, current_link_up);
5996 	return err;
5997 }
5998 
5999 static void tg3_serdes_parallel_detect(struct tg3 *tp)
6000 {
6001 	if (tp->serdes_counter) {
6002 		/* Give autoneg time to complete. */
6003 		tp->serdes_counter--;
6004 		return;
6005 	}
6006 
6007 	if (!tp->link_up &&
6008 	    (tp->link_config.autoneg == AUTONEG_ENABLE)) {
6009 		u32 bmcr;
6010 
6011 		tg3_readphy(tp, MII_BMCR, &bmcr);
6012 		if (bmcr & BMCR_ANENABLE) {
6013 			u32 phy1, phy2;
6014 
6015 			/* Select shadow register 0x1f */
6016 			tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x7c00);
6017 			tg3_readphy(tp, MII_TG3_MISC_SHDW, &phy1);
6018 
6019 			/* Select expansion interrupt status register */
6020 			tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
6021 					 MII_TG3_DSP_EXP1_INT_STAT);
6022 			tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
6023 			tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
6024 
6025 			if ((phy1 & 0x10) && !(phy2 & 0x20)) {
6026 				/* We have signal detect and not receiving
6027 				 * config code words, link is up by parallel
6028 				 * detection.
6029 				 */
6030 
6031 				bmcr &= ~BMCR_ANENABLE;
6032 				bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
6033 				tg3_writephy(tp, MII_BMCR, bmcr);
6034 				tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT;
6035 			}
6036 		}
6037 	} else if (tp->link_up &&
6038 		   (tp->link_config.autoneg == AUTONEG_ENABLE) &&
6039 		   (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
6040 		u32 phy2;
6041 
6042 		/* Select expansion interrupt status register */
6043 		tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
6044 				 MII_TG3_DSP_EXP1_INT_STAT);
6045 		tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
6046 		if (phy2 & 0x20) {
6047 			u32 bmcr;
6048 
6049 			/* Config code words received, turn on autoneg. */
6050 			tg3_readphy(tp, MII_BMCR, &bmcr);
6051 			tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
6052 
6053 			tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
6054 
6055 		}
6056 	}
6057 }
6058 
6059 static int tg3_setup_phy(struct tg3 *tp, bool force_reset)
6060 {
6061 	u32 val;
6062 	int err;
6063 
6064 	if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
6065 		err = tg3_setup_fiber_phy(tp, force_reset);
6066 	else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
6067 		err = tg3_setup_fiber_mii_phy(tp, force_reset);
6068 	else
6069 		err = tg3_setup_copper_phy(tp, force_reset);
6070 
6071 	if (tg3_chip_rev(tp) == CHIPREV_5784_AX) {
6072 		u32 scale;
6073 
6074 		val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
6075 		if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
6076 			scale = 65;
6077 		else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
6078 			scale = 6;
6079 		else
6080 			scale = 12;
6081 
6082 		val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
6083 		val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
6084 		tw32(GRC_MISC_CFG, val);
6085 	}
6086 
6087 	val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
6088 	      (6 << TX_LENGTHS_IPG_SHIFT);
6089 	if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
6090 	    tg3_asic_rev(tp) == ASIC_REV_5762)
6091 		val |= tr32(MAC_TX_LENGTHS) &
6092 		       (TX_LENGTHS_JMB_FRM_LEN_MSK |
6093 			TX_LENGTHS_CNT_DWN_VAL_MSK);
6094 
6095 	if (tp->link_config.active_speed == SPEED_1000 &&
6096 	    tp->link_config.active_duplex == DUPLEX_HALF)
6097 		tw32(MAC_TX_LENGTHS, val |
6098 		     (0xff << TX_LENGTHS_SLOT_TIME_SHIFT));
6099 	else
6100 		tw32(MAC_TX_LENGTHS, val |
6101 		     (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
6102 
6103 	if (!tg3_flag(tp, 5705_PLUS)) {
6104 		if (tp->link_up) {
6105 			tw32(HOSTCC_STAT_COAL_TICKS,
6106 			     tp->coal.stats_block_coalesce_usecs);
6107 		} else {
6108 			tw32(HOSTCC_STAT_COAL_TICKS, 0);
6109 		}
6110 	}
6111 
6112 	if (tg3_flag(tp, ASPM_WORKAROUND)) {
6113 		val = tr32(PCIE_PWR_MGMT_THRESH);
6114 		if (!tp->link_up)
6115 			val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
6116 			      tp->pwrmgmt_thresh;
6117 		else
6118 			val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
6119 		tw32(PCIE_PWR_MGMT_THRESH, val);
6120 	}
6121 
6122 	return err;
6123 }
6124 
6125 /* tp->lock must be held */
6126 static u64 tg3_refclk_read(struct tg3 *tp)
6127 {
6128 	u64 stamp = tr32(TG3_EAV_REF_CLCK_LSB);
6129 	return stamp | (u64)tr32(TG3_EAV_REF_CLCK_MSB) << 32;
6130 }
6131 
6132 /* tp->lock must be held */
6133 static void tg3_refclk_write(struct tg3 *tp, u64 newval)
6134 {
6135 	u32 clock_ctl = tr32(TG3_EAV_REF_CLCK_CTL);
6136 
6137 	tw32(TG3_EAV_REF_CLCK_CTL, clock_ctl | TG3_EAV_REF_CLCK_CTL_STOP);
6138 	tw32(TG3_EAV_REF_CLCK_LSB, newval & 0xffffffff);
6139 	tw32(TG3_EAV_REF_CLCK_MSB, newval >> 32);
6140 	tw32_f(TG3_EAV_REF_CLCK_CTL, clock_ctl | TG3_EAV_REF_CLCK_CTL_RESUME);
6141 }
6142 
6143 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync);
6144 static inline void tg3_full_unlock(struct tg3 *tp);
6145 static int tg3_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info)
6146 {
6147 	struct tg3 *tp = netdev_priv(dev);
6148 
6149 	info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
6150 				SOF_TIMESTAMPING_RX_SOFTWARE |
6151 				SOF_TIMESTAMPING_SOFTWARE;
6152 
6153 	if (tg3_flag(tp, PTP_CAPABLE)) {
6154 		info->so_timestamping |= SOF_TIMESTAMPING_TX_HARDWARE |
6155 					SOF_TIMESTAMPING_RX_HARDWARE |
6156 					SOF_TIMESTAMPING_RAW_HARDWARE;
6157 	}
6158 
6159 	if (tp->ptp_clock)
6160 		info->phc_index = ptp_clock_index(tp->ptp_clock);
6161 	else
6162 		info->phc_index = -1;
6163 
6164 	info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
6165 
6166 	info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
6167 			   (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
6168 			   (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
6169 			   (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT);
6170 	return 0;
6171 }
6172 
6173 static int tg3_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
6174 {
6175 	struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6176 	bool neg_adj = false;
6177 	u32 correction = 0;
6178 
6179 	if (ppb < 0) {
6180 		neg_adj = true;
6181 		ppb = -ppb;
6182 	}
6183 
6184 	/* Frequency adjustment is performed using hardware with a 24 bit
6185 	 * accumulator and a programmable correction value. On each clk, the
6186 	 * correction value gets added to the accumulator and when it
6187 	 * overflows, the time counter is incremented/decremented.
6188 	 *
6189 	 * So conversion from ppb to correction value is
6190 	 *		ppb * (1 << 24) / 1000000000
6191 	 */
6192 	correction = div_u64((u64)ppb * (1 << 24), 1000000000ULL) &
6193 		     TG3_EAV_REF_CLK_CORRECT_MASK;
6194 
6195 	tg3_full_lock(tp, 0);
6196 
6197 	if (correction)
6198 		tw32(TG3_EAV_REF_CLK_CORRECT_CTL,
6199 		     TG3_EAV_REF_CLK_CORRECT_EN |
6200 		     (neg_adj ? TG3_EAV_REF_CLK_CORRECT_NEG : 0) | correction);
6201 	else
6202 		tw32(TG3_EAV_REF_CLK_CORRECT_CTL, 0);
6203 
6204 	tg3_full_unlock(tp);
6205 
6206 	return 0;
6207 }
6208 
6209 static int tg3_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
6210 {
6211 	struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6212 
6213 	tg3_full_lock(tp, 0);
6214 	tp->ptp_adjust += delta;
6215 	tg3_full_unlock(tp);
6216 
6217 	return 0;
6218 }
6219 
6220 static int tg3_ptp_gettime(struct ptp_clock_info *ptp, struct timespec *ts)
6221 {
6222 	u64 ns;
6223 	u32 remainder;
6224 	struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6225 
6226 	tg3_full_lock(tp, 0);
6227 	ns = tg3_refclk_read(tp);
6228 	ns += tp->ptp_adjust;
6229 	tg3_full_unlock(tp);
6230 
6231 	ts->tv_sec = div_u64_rem(ns, 1000000000, &remainder);
6232 	ts->tv_nsec = remainder;
6233 
6234 	return 0;
6235 }
6236 
6237 static int tg3_ptp_settime(struct ptp_clock_info *ptp,
6238 			   const struct timespec *ts)
6239 {
6240 	u64 ns;
6241 	struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6242 
6243 	ns = timespec_to_ns(ts);
6244 
6245 	tg3_full_lock(tp, 0);
6246 	tg3_refclk_write(tp, ns);
6247 	tp->ptp_adjust = 0;
6248 	tg3_full_unlock(tp);
6249 
6250 	return 0;
6251 }
6252 
6253 static int tg3_ptp_enable(struct ptp_clock_info *ptp,
6254 			  struct ptp_clock_request *rq, int on)
6255 {
6256 	struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6257 	u32 clock_ctl;
6258 	int rval = 0;
6259 
6260 	switch (rq->type) {
6261 	case PTP_CLK_REQ_PEROUT:
6262 		if (rq->perout.index != 0)
6263 			return -EINVAL;
6264 
6265 		tg3_full_lock(tp, 0);
6266 		clock_ctl = tr32(TG3_EAV_REF_CLCK_CTL);
6267 		clock_ctl &= ~TG3_EAV_CTL_TSYNC_GPIO_MASK;
6268 
6269 		if (on) {
6270 			u64 nsec;
6271 
6272 			nsec = rq->perout.start.sec * 1000000000ULL +
6273 			       rq->perout.start.nsec;
6274 
6275 			if (rq->perout.period.sec || rq->perout.period.nsec) {
6276 				netdev_warn(tp->dev,
6277 					    "Device supports only a one-shot timesync output, period must be 0\n");
6278 				rval = -EINVAL;
6279 				goto err_out;
6280 			}
6281 
6282 			if (nsec & (1ULL << 63)) {
6283 				netdev_warn(tp->dev,
6284 					    "Start value (nsec) is over limit. Maximum size of start is only 63 bits\n");
6285 				rval = -EINVAL;
6286 				goto err_out;
6287 			}
6288 
6289 			tw32(TG3_EAV_WATCHDOG0_LSB, (nsec & 0xffffffff));
6290 			tw32(TG3_EAV_WATCHDOG0_MSB,
6291 			     TG3_EAV_WATCHDOG0_EN |
6292 			     ((nsec >> 32) & TG3_EAV_WATCHDOG_MSB_MASK));
6293 
6294 			tw32(TG3_EAV_REF_CLCK_CTL,
6295 			     clock_ctl | TG3_EAV_CTL_TSYNC_WDOG0);
6296 		} else {
6297 			tw32(TG3_EAV_WATCHDOG0_MSB, 0);
6298 			tw32(TG3_EAV_REF_CLCK_CTL, clock_ctl);
6299 		}
6300 
6301 err_out:
6302 		tg3_full_unlock(tp);
6303 		return rval;
6304 
6305 	default:
6306 		break;
6307 	}
6308 
6309 	return -EOPNOTSUPP;
6310 }
6311 
6312 static const struct ptp_clock_info tg3_ptp_caps = {
6313 	.owner		= THIS_MODULE,
6314 	.name		= "tg3 clock",
6315 	.max_adj	= 250000000,
6316 	.n_alarm	= 0,
6317 	.n_ext_ts	= 0,
6318 	.n_per_out	= 1,
6319 	.n_pins		= 0,
6320 	.pps		= 0,
6321 	.adjfreq	= tg3_ptp_adjfreq,
6322 	.adjtime	= tg3_ptp_adjtime,
6323 	.gettime	= tg3_ptp_gettime,
6324 	.settime	= tg3_ptp_settime,
6325 	.enable		= tg3_ptp_enable,
6326 };
6327 
6328 static void tg3_hwclock_to_timestamp(struct tg3 *tp, u64 hwclock,
6329 				     struct skb_shared_hwtstamps *timestamp)
6330 {
6331 	memset(timestamp, 0, sizeof(struct skb_shared_hwtstamps));
6332 	timestamp->hwtstamp  = ns_to_ktime((hwclock & TG3_TSTAMP_MASK) +
6333 					   tp->ptp_adjust);
6334 }
6335 
6336 /* tp->lock must be held */
6337 static void tg3_ptp_init(struct tg3 *tp)
6338 {
6339 	if (!tg3_flag(tp, PTP_CAPABLE))
6340 		return;
6341 
6342 	/* Initialize the hardware clock to the system time. */
6343 	tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()));
6344 	tp->ptp_adjust = 0;
6345 	tp->ptp_info = tg3_ptp_caps;
6346 }
6347 
6348 /* tp->lock must be held */
6349 static void tg3_ptp_resume(struct tg3 *tp)
6350 {
6351 	if (!tg3_flag(tp, PTP_CAPABLE))
6352 		return;
6353 
6354 	tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()) + tp->ptp_adjust);
6355 	tp->ptp_adjust = 0;
6356 }
6357 
6358 static void tg3_ptp_fini(struct tg3 *tp)
6359 {
6360 	if (!tg3_flag(tp, PTP_CAPABLE) || !tp->ptp_clock)
6361 		return;
6362 
6363 	ptp_clock_unregister(tp->ptp_clock);
6364 	tp->ptp_clock = NULL;
6365 	tp->ptp_adjust = 0;
6366 }
6367 
6368 static inline int tg3_irq_sync(struct tg3 *tp)
6369 {
6370 	return tp->irq_sync;
6371 }
6372 
6373 static inline void tg3_rd32_loop(struct tg3 *tp, u32 *dst, u32 off, u32 len)
6374 {
6375 	int i;
6376 
6377 	dst = (u32 *)((u8 *)dst + off);
6378 	for (i = 0; i < len; i += sizeof(u32))
6379 		*dst++ = tr32(off + i);
6380 }
6381 
6382 static void tg3_dump_legacy_regs(struct tg3 *tp, u32 *regs)
6383 {
6384 	tg3_rd32_loop(tp, regs, TG3PCI_VENDOR, 0xb0);
6385 	tg3_rd32_loop(tp, regs, MAILBOX_INTERRUPT_0, 0x200);
6386 	tg3_rd32_loop(tp, regs, MAC_MODE, 0x4f0);
6387 	tg3_rd32_loop(tp, regs, SNDDATAI_MODE, 0xe0);
6388 	tg3_rd32_loop(tp, regs, SNDDATAC_MODE, 0x04);
6389 	tg3_rd32_loop(tp, regs, SNDBDS_MODE, 0x80);
6390 	tg3_rd32_loop(tp, regs, SNDBDI_MODE, 0x48);
6391 	tg3_rd32_loop(tp, regs, SNDBDC_MODE, 0x04);
6392 	tg3_rd32_loop(tp, regs, RCVLPC_MODE, 0x20);
6393 	tg3_rd32_loop(tp, regs, RCVLPC_SELLST_BASE, 0x15c);
6394 	tg3_rd32_loop(tp, regs, RCVDBDI_MODE, 0x0c);
6395 	tg3_rd32_loop(tp, regs, RCVDBDI_JUMBO_BD, 0x3c);
6396 	tg3_rd32_loop(tp, regs, RCVDBDI_BD_PROD_IDX_0, 0x44);
6397 	tg3_rd32_loop(tp, regs, RCVDCC_MODE, 0x04);
6398 	tg3_rd32_loop(tp, regs, RCVBDI_MODE, 0x20);
6399 	tg3_rd32_loop(tp, regs, RCVCC_MODE, 0x14);
6400 	tg3_rd32_loop(tp, regs, RCVLSC_MODE, 0x08);
6401 	tg3_rd32_loop(tp, regs, MBFREE_MODE, 0x08);
6402 	tg3_rd32_loop(tp, regs, HOSTCC_MODE, 0x100);
6403 
6404 	if (tg3_flag(tp, SUPPORT_MSIX))
6405 		tg3_rd32_loop(tp, regs, HOSTCC_RXCOL_TICKS_VEC1, 0x180);
6406 
6407 	tg3_rd32_loop(tp, regs, MEMARB_MODE, 0x10);
6408 	tg3_rd32_loop(tp, regs, BUFMGR_MODE, 0x58);
6409 	tg3_rd32_loop(tp, regs, RDMAC_MODE, 0x08);
6410 	tg3_rd32_loop(tp, regs, WDMAC_MODE, 0x08);
6411 	tg3_rd32_loop(tp, regs, RX_CPU_MODE, 0x04);
6412 	tg3_rd32_loop(tp, regs, RX_CPU_STATE, 0x04);
6413 	tg3_rd32_loop(tp, regs, RX_CPU_PGMCTR, 0x04);
6414 	tg3_rd32_loop(tp, regs, RX_CPU_HWBKPT, 0x04);
6415 
6416 	if (!tg3_flag(tp, 5705_PLUS)) {
6417 		tg3_rd32_loop(tp, regs, TX_CPU_MODE, 0x04);
6418 		tg3_rd32_loop(tp, regs, TX_CPU_STATE, 0x04);
6419 		tg3_rd32_loop(tp, regs, TX_CPU_PGMCTR, 0x04);
6420 	}
6421 
6422 	tg3_rd32_loop(tp, regs, GRCMBOX_INTERRUPT_0, 0x110);
6423 	tg3_rd32_loop(tp, regs, FTQ_RESET, 0x120);
6424 	tg3_rd32_loop(tp, regs, MSGINT_MODE, 0x0c);
6425 	tg3_rd32_loop(tp, regs, DMAC_MODE, 0x04);
6426 	tg3_rd32_loop(tp, regs, GRC_MODE, 0x4c);
6427 
6428 	if (tg3_flag(tp, NVRAM))
6429 		tg3_rd32_loop(tp, regs, NVRAM_CMD, 0x24);
6430 }
6431 
6432 static void tg3_dump_state(struct tg3 *tp)
6433 {
6434 	int i;
6435 	u32 *regs;
6436 
6437 	regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC);
6438 	if (!regs)
6439 		return;
6440 
6441 	if (tg3_flag(tp, PCI_EXPRESS)) {
6442 		/* Read up to but not including private PCI registers */
6443 		for (i = 0; i < TG3_PCIE_TLDLPL_PORT; i += sizeof(u32))
6444 			regs[i / sizeof(u32)] = tr32(i);
6445 	} else
6446 		tg3_dump_legacy_regs(tp, regs);
6447 
6448 	for (i = 0; i < TG3_REG_BLK_SIZE / sizeof(u32); i += 4) {
6449 		if (!regs[i + 0] && !regs[i + 1] &&
6450 		    !regs[i + 2] && !regs[i + 3])
6451 			continue;
6452 
6453 		netdev_err(tp->dev, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
6454 			   i * 4,
6455 			   regs[i + 0], regs[i + 1], regs[i + 2], regs[i + 3]);
6456 	}
6457 
6458 	kfree(regs);
6459 
6460 	for (i = 0; i < tp->irq_cnt; i++) {
6461 		struct tg3_napi *tnapi = &tp->napi[i];
6462 
6463 		/* SW status block */
6464 		netdev_err(tp->dev,
6465 			 "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
6466 			   i,
6467 			   tnapi->hw_status->status,
6468 			   tnapi->hw_status->status_tag,
6469 			   tnapi->hw_status->rx_jumbo_consumer,
6470 			   tnapi->hw_status->rx_consumer,
6471 			   tnapi->hw_status->rx_mini_consumer,
6472 			   tnapi->hw_status->idx[0].rx_producer,
6473 			   tnapi->hw_status->idx[0].tx_consumer);
6474 
6475 		netdev_err(tp->dev,
6476 		"%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n",
6477 			   i,
6478 			   tnapi->last_tag, tnapi->last_irq_tag,
6479 			   tnapi->tx_prod, tnapi->tx_cons, tnapi->tx_pending,
6480 			   tnapi->rx_rcb_ptr,
6481 			   tnapi->prodring.rx_std_prod_idx,
6482 			   tnapi->prodring.rx_std_cons_idx,
6483 			   tnapi->prodring.rx_jmb_prod_idx,
6484 			   tnapi->prodring.rx_jmb_cons_idx);
6485 	}
6486 }
6487 
6488 /* This is called whenever we suspect that the system chipset is re-
6489  * ordering the sequence of MMIO to the tx send mailbox. The symptom
6490  * is bogus tx completions. We try to recover by setting the
6491  * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
6492  * in the workqueue.
6493  */
6494 static void tg3_tx_recover(struct tg3 *tp)
6495 {
6496 	BUG_ON(tg3_flag(tp, MBOX_WRITE_REORDER) ||
6497 	       tp->write32_tx_mbox == tg3_write_indirect_mbox);
6498 
6499 	netdev_warn(tp->dev,
6500 		    "The system may be re-ordering memory-mapped I/O "
6501 		    "cycles to the network device, attempting to recover. "
6502 		    "Please report the problem to the driver maintainer "
6503 		    "and include system chipset information.\n");
6504 
6505 	tg3_flag_set(tp, TX_RECOVERY_PENDING);
6506 }
6507 
6508 static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
6509 {
6510 	/* Tell compiler to fetch tx indices from memory. */
6511 	barrier();
6512 	return tnapi->tx_pending -
6513 	       ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1));
6514 }
6515 
6516 /* Tigon3 never reports partial packet sends.  So we do not
6517  * need special logic to handle SKBs that have not had all
6518  * of their frags sent yet, like SunGEM does.
6519  */
6520 static void tg3_tx(struct tg3_napi *tnapi)
6521 {
6522 	struct tg3 *tp = tnapi->tp;
6523 	u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer;
6524 	u32 sw_idx = tnapi->tx_cons;
6525 	struct netdev_queue *txq;
6526 	int index = tnapi - tp->napi;
6527 	unsigned int pkts_compl = 0, bytes_compl = 0;
6528 
6529 	if (tg3_flag(tp, ENABLE_TSS))
6530 		index--;
6531 
6532 	txq = netdev_get_tx_queue(tp->dev, index);
6533 
6534 	while (sw_idx != hw_idx) {
6535 		struct tg3_tx_ring_info *ri = &tnapi->tx_buffers[sw_idx];
6536 		struct sk_buff *skb = ri->skb;
6537 		int i, tx_bug = 0;
6538 
6539 		if (unlikely(skb == NULL)) {
6540 			tg3_tx_recover(tp);
6541 			return;
6542 		}
6543 
6544 		if (tnapi->tx_ring[sw_idx].len_flags & TXD_FLAG_HWTSTAMP) {
6545 			struct skb_shared_hwtstamps timestamp;
6546 			u64 hwclock = tr32(TG3_TX_TSTAMP_LSB);
6547 			hwclock |= (u64)tr32(TG3_TX_TSTAMP_MSB) << 32;
6548 
6549 			tg3_hwclock_to_timestamp(tp, hwclock, &timestamp);
6550 
6551 			skb_tstamp_tx(skb, &timestamp);
6552 		}
6553 
6554 		pci_unmap_single(tp->pdev,
6555 				 dma_unmap_addr(ri, mapping),
6556 				 skb_headlen(skb),
6557 				 PCI_DMA_TODEVICE);
6558 
6559 		ri->skb = NULL;
6560 
6561 		while (ri->fragmented) {
6562 			ri->fragmented = false;
6563 			sw_idx = NEXT_TX(sw_idx);
6564 			ri = &tnapi->tx_buffers[sw_idx];
6565 		}
6566 
6567 		sw_idx = NEXT_TX(sw_idx);
6568 
6569 		for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
6570 			ri = &tnapi->tx_buffers[sw_idx];
6571 			if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
6572 				tx_bug = 1;
6573 
6574 			pci_unmap_page(tp->pdev,
6575 				       dma_unmap_addr(ri, mapping),
6576 				       skb_frag_size(&skb_shinfo(skb)->frags[i]),
6577 				       PCI_DMA_TODEVICE);
6578 
6579 			while (ri->fragmented) {
6580 				ri->fragmented = false;
6581 				sw_idx = NEXT_TX(sw_idx);
6582 				ri = &tnapi->tx_buffers[sw_idx];
6583 			}
6584 
6585 			sw_idx = NEXT_TX(sw_idx);
6586 		}
6587 
6588 		pkts_compl++;
6589 		bytes_compl += skb->len;
6590 
6591 		dev_kfree_skb_any(skb);
6592 
6593 		if (unlikely(tx_bug)) {
6594 			tg3_tx_recover(tp);
6595 			return;
6596 		}
6597 	}
6598 
6599 	netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
6600 
6601 	tnapi->tx_cons = sw_idx;
6602 
6603 	/* Need to make the tx_cons update visible to tg3_start_xmit()
6604 	 * before checking for netif_queue_stopped().  Without the
6605 	 * memory barrier, there is a small possibility that tg3_start_xmit()
6606 	 * will miss it and cause the queue to be stopped forever.
6607 	 */
6608 	smp_mb();
6609 
6610 	if (unlikely(netif_tx_queue_stopped(txq) &&
6611 		     (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) {
6612 		__netif_tx_lock(txq, smp_processor_id());
6613 		if (netif_tx_queue_stopped(txq) &&
6614 		    (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))
6615 			netif_tx_wake_queue(txq);
6616 		__netif_tx_unlock(txq);
6617 	}
6618 }
6619 
6620 static void tg3_frag_free(bool is_frag, void *data)
6621 {
6622 	if (is_frag)
6623 		put_page(virt_to_head_page(data));
6624 	else
6625 		kfree(data);
6626 }
6627 
6628 static void tg3_rx_data_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
6629 {
6630 	unsigned int skb_size = SKB_DATA_ALIGN(map_sz + TG3_RX_OFFSET(tp)) +
6631 		   SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
6632 
6633 	if (!ri->data)
6634 		return;
6635 
6636 	pci_unmap_single(tp->pdev, dma_unmap_addr(ri, mapping),
6637 			 map_sz, PCI_DMA_FROMDEVICE);
6638 	tg3_frag_free(skb_size <= PAGE_SIZE, ri->data);
6639 	ri->data = NULL;
6640 }
6641 
6642 
6643 /* Returns size of skb allocated or < 0 on error.
6644  *
6645  * We only need to fill in the address because the other members
6646  * of the RX descriptor are invariant, see tg3_init_rings.
6647  *
6648  * Note the purposeful assymetry of cpu vs. chip accesses.  For
6649  * posting buffers we only dirty the first cache line of the RX
6650  * descriptor (containing the address).  Whereas for the RX status
6651  * buffers the cpu only reads the last cacheline of the RX descriptor
6652  * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
6653  */
6654 static int tg3_alloc_rx_data(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
6655 			     u32 opaque_key, u32 dest_idx_unmasked,
6656 			     unsigned int *frag_size)
6657 {
6658 	struct tg3_rx_buffer_desc *desc;
6659 	struct ring_info *map;
6660 	u8 *data;
6661 	dma_addr_t mapping;
6662 	int skb_size, data_size, dest_idx;
6663 
6664 	switch (opaque_key) {
6665 	case RXD_OPAQUE_RING_STD:
6666 		dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
6667 		desc = &tpr->rx_std[dest_idx];
6668 		map = &tpr->rx_std_buffers[dest_idx];
6669 		data_size = tp->rx_pkt_map_sz;
6670 		break;
6671 
6672 	case RXD_OPAQUE_RING_JUMBO:
6673 		dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
6674 		desc = &tpr->rx_jmb[dest_idx].std;
6675 		map = &tpr->rx_jmb_buffers[dest_idx];
6676 		data_size = TG3_RX_JMB_MAP_SZ;
6677 		break;
6678 
6679 	default:
6680 		return -EINVAL;
6681 	}
6682 
6683 	/* Do not overwrite any of the map or rp information
6684 	 * until we are sure we can commit to a new buffer.
6685 	 *
6686 	 * Callers depend upon this behavior and assume that
6687 	 * we leave everything unchanged if we fail.
6688 	 */
6689 	skb_size = SKB_DATA_ALIGN(data_size + TG3_RX_OFFSET(tp)) +
6690 		   SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
6691 	if (skb_size <= PAGE_SIZE) {
6692 		data = netdev_alloc_frag(skb_size);
6693 		*frag_size = skb_size;
6694 	} else {
6695 		data = kmalloc(skb_size, GFP_ATOMIC);
6696 		*frag_size = 0;
6697 	}
6698 	if (!data)
6699 		return -ENOMEM;
6700 
6701 	mapping = pci_map_single(tp->pdev,
6702 				 data + TG3_RX_OFFSET(tp),
6703 				 data_size,
6704 				 PCI_DMA_FROMDEVICE);
6705 	if (unlikely(pci_dma_mapping_error(tp->pdev, mapping))) {
6706 		tg3_frag_free(skb_size <= PAGE_SIZE, data);
6707 		return -EIO;
6708 	}
6709 
6710 	map->data = data;
6711 	dma_unmap_addr_set(map, mapping, mapping);
6712 
6713 	desc->addr_hi = ((u64)mapping >> 32);
6714 	desc->addr_lo = ((u64)mapping & 0xffffffff);
6715 
6716 	return data_size;
6717 }
6718 
6719 /* We only need to move over in the address because the other
6720  * members of the RX descriptor are invariant.  See notes above
6721  * tg3_alloc_rx_data for full details.
6722  */
6723 static void tg3_recycle_rx(struct tg3_napi *tnapi,
6724 			   struct tg3_rx_prodring_set *dpr,
6725 			   u32 opaque_key, int src_idx,
6726 			   u32 dest_idx_unmasked)
6727 {
6728 	struct tg3 *tp = tnapi->tp;
6729 	struct tg3_rx_buffer_desc *src_desc, *dest_desc;
6730 	struct ring_info *src_map, *dest_map;
6731 	struct tg3_rx_prodring_set *spr = &tp->napi[0].prodring;
6732 	int dest_idx;
6733 
6734 	switch (opaque_key) {
6735 	case RXD_OPAQUE_RING_STD:
6736 		dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
6737 		dest_desc = &dpr->rx_std[dest_idx];
6738 		dest_map = &dpr->rx_std_buffers[dest_idx];
6739 		src_desc = &spr->rx_std[src_idx];
6740 		src_map = &spr->rx_std_buffers[src_idx];
6741 		break;
6742 
6743 	case RXD_OPAQUE_RING_JUMBO:
6744 		dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
6745 		dest_desc = &dpr->rx_jmb[dest_idx].std;
6746 		dest_map = &dpr->rx_jmb_buffers[dest_idx];
6747 		src_desc = &spr->rx_jmb[src_idx].std;
6748 		src_map = &spr->rx_jmb_buffers[src_idx];
6749 		break;
6750 
6751 	default:
6752 		return;
6753 	}
6754 
6755 	dest_map->data = src_map->data;
6756 	dma_unmap_addr_set(dest_map, mapping,
6757 			   dma_unmap_addr(src_map, mapping));
6758 	dest_desc->addr_hi = src_desc->addr_hi;
6759 	dest_desc->addr_lo = src_desc->addr_lo;
6760 
6761 	/* Ensure that the update to the skb happens after the physical
6762 	 * addresses have been transferred to the new BD location.
6763 	 */
6764 	smp_wmb();
6765 
6766 	src_map->data = NULL;
6767 }
6768 
6769 /* The RX ring scheme is composed of multiple rings which post fresh
6770  * buffers to the chip, and one special ring the chip uses to report
6771  * status back to the host.
6772  *
6773  * The special ring reports the status of received packets to the
6774  * host.  The chip does not write into the original descriptor the
6775  * RX buffer was obtained from.  The chip simply takes the original
6776  * descriptor as provided by the host, updates the status and length
6777  * field, then writes this into the next status ring entry.
6778  *
6779  * Each ring the host uses to post buffers to the chip is described
6780  * by a TG3_BDINFO entry in the chips SRAM area.  When a packet arrives,
6781  * it is first placed into the on-chip ram.  When the packet's length
6782  * is known, it walks down the TG3_BDINFO entries to select the ring.
6783  * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
6784  * which is within the range of the new packet's length is chosen.
6785  *
6786  * The "separate ring for rx status" scheme may sound queer, but it makes
6787  * sense from a cache coherency perspective.  If only the host writes
6788  * to the buffer post rings, and only the chip writes to the rx status
6789  * rings, then cache lines never move beyond shared-modified state.
6790  * If both the host and chip were to write into the same ring, cache line
6791  * eviction could occur since both entities want it in an exclusive state.
6792  */
6793 static int tg3_rx(struct tg3_napi *tnapi, int budget)
6794 {
6795 	struct tg3 *tp = tnapi->tp;
6796 	u32 work_mask, rx_std_posted = 0;
6797 	u32 std_prod_idx, jmb_prod_idx;
6798 	u32 sw_idx = tnapi->rx_rcb_ptr;
6799 	u16 hw_idx;
6800 	int received;
6801 	struct tg3_rx_prodring_set *tpr = &tnapi->prodring;
6802 
6803 	hw_idx = *(tnapi->rx_rcb_prod_idx);
6804 	/*
6805 	 * We need to order the read of hw_idx and the read of
6806 	 * the opaque cookie.
6807 	 */
6808 	rmb();
6809 	work_mask = 0;
6810 	received = 0;
6811 	std_prod_idx = tpr->rx_std_prod_idx;
6812 	jmb_prod_idx = tpr->rx_jmb_prod_idx;
6813 	while (sw_idx != hw_idx && budget > 0) {
6814 		struct ring_info *ri;
6815 		struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
6816 		unsigned int len;
6817 		struct sk_buff *skb;
6818 		dma_addr_t dma_addr;
6819 		u32 opaque_key, desc_idx, *post_ptr;
6820 		u8 *data;
6821 		u64 tstamp = 0;
6822 
6823 		desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
6824 		opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
6825 		if (opaque_key == RXD_OPAQUE_RING_STD) {
6826 			ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx];
6827 			dma_addr = dma_unmap_addr(ri, mapping);
6828 			data = ri->data;
6829 			post_ptr = &std_prod_idx;
6830 			rx_std_posted++;
6831 		} else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
6832 			ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx];
6833 			dma_addr = dma_unmap_addr(ri, mapping);
6834 			data = ri->data;
6835 			post_ptr = &jmb_prod_idx;
6836 		} else
6837 			goto next_pkt_nopost;
6838 
6839 		work_mask |= opaque_key;
6840 
6841 		if (desc->err_vlan & RXD_ERR_MASK) {
6842 		drop_it:
6843 			tg3_recycle_rx(tnapi, tpr, opaque_key,
6844 				       desc_idx, *post_ptr);
6845 		drop_it_no_recycle:
6846 			/* Other statistics kept track of by card. */
6847 			tp->rx_dropped++;
6848 			goto next_pkt;
6849 		}
6850 
6851 		prefetch(data + TG3_RX_OFFSET(tp));
6852 		len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
6853 		      ETH_FCS_LEN;
6854 
6855 		if ((desc->type_flags & RXD_FLAG_PTPSTAT_MASK) ==
6856 		     RXD_FLAG_PTPSTAT_PTPV1 ||
6857 		    (desc->type_flags & RXD_FLAG_PTPSTAT_MASK) ==
6858 		     RXD_FLAG_PTPSTAT_PTPV2) {
6859 			tstamp = tr32(TG3_RX_TSTAMP_LSB);
6860 			tstamp |= (u64)tr32(TG3_RX_TSTAMP_MSB) << 32;
6861 		}
6862 
6863 		if (len > TG3_RX_COPY_THRESH(tp)) {
6864 			int skb_size;
6865 			unsigned int frag_size;
6866 
6867 			skb_size = tg3_alloc_rx_data(tp, tpr, opaque_key,
6868 						    *post_ptr, &frag_size);
6869 			if (skb_size < 0)
6870 				goto drop_it;
6871 
6872 			pci_unmap_single(tp->pdev, dma_addr, skb_size,
6873 					 PCI_DMA_FROMDEVICE);
6874 
6875 			/* Ensure that the update to the data happens
6876 			 * after the usage of the old DMA mapping.
6877 			 */
6878 			smp_wmb();
6879 
6880 			ri->data = NULL;
6881 
6882 			skb = build_skb(data, frag_size);
6883 			if (!skb) {
6884 				tg3_frag_free(frag_size != 0, data);
6885 				goto drop_it_no_recycle;
6886 			}
6887 			skb_reserve(skb, TG3_RX_OFFSET(tp));
6888 		} else {
6889 			tg3_recycle_rx(tnapi, tpr, opaque_key,
6890 				       desc_idx, *post_ptr);
6891 
6892 			skb = netdev_alloc_skb(tp->dev,
6893 					       len + TG3_RAW_IP_ALIGN);
6894 			if (skb == NULL)
6895 				goto drop_it_no_recycle;
6896 
6897 			skb_reserve(skb, TG3_RAW_IP_ALIGN);
6898 			pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
6899 			memcpy(skb->data,
6900 			       data + TG3_RX_OFFSET(tp),
6901 			       len);
6902 			pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
6903 		}
6904 
6905 		skb_put(skb, len);
6906 		if (tstamp)
6907 			tg3_hwclock_to_timestamp(tp, tstamp,
6908 						 skb_hwtstamps(skb));
6909 
6910 		if ((tp->dev->features & NETIF_F_RXCSUM) &&
6911 		    (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
6912 		    (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
6913 		      >> RXD_TCPCSUM_SHIFT) == 0xffff))
6914 			skb->ip_summed = CHECKSUM_UNNECESSARY;
6915 		else
6916 			skb_checksum_none_assert(skb);
6917 
6918 		skb->protocol = eth_type_trans(skb, tp->dev);
6919 
6920 		if (len > (tp->dev->mtu + ETH_HLEN) &&
6921 		    skb->protocol != htons(ETH_P_8021Q) &&
6922 		    skb->protocol != htons(ETH_P_8021AD)) {
6923 			dev_kfree_skb_any(skb);
6924 			goto drop_it_no_recycle;
6925 		}
6926 
6927 		if (desc->type_flags & RXD_FLAG_VLAN &&
6928 		    !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG))
6929 			__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
6930 					       desc->err_vlan & RXD_VLAN_MASK);
6931 
6932 		napi_gro_receive(&tnapi->napi, skb);
6933 
6934 		received++;
6935 		budget--;
6936 
6937 next_pkt:
6938 		(*post_ptr)++;
6939 
6940 		if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
6941 			tpr->rx_std_prod_idx = std_prod_idx &
6942 					       tp->rx_std_ring_mask;
6943 			tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6944 				     tpr->rx_std_prod_idx);
6945 			work_mask &= ~RXD_OPAQUE_RING_STD;
6946 			rx_std_posted = 0;
6947 		}
6948 next_pkt_nopost:
6949 		sw_idx++;
6950 		sw_idx &= tp->rx_ret_ring_mask;
6951 
6952 		/* Refresh hw_idx to see if there is new work */
6953 		if (sw_idx == hw_idx) {
6954 			hw_idx = *(tnapi->rx_rcb_prod_idx);
6955 			rmb();
6956 		}
6957 	}
6958 
6959 	/* ACK the status ring. */
6960 	tnapi->rx_rcb_ptr = sw_idx;
6961 	tw32_rx_mbox(tnapi->consmbox, sw_idx);
6962 
6963 	/* Refill RX ring(s). */
6964 	if (!tg3_flag(tp, ENABLE_RSS)) {
6965 		/* Sync BD data before updating mailbox */
6966 		wmb();
6967 
6968 		if (work_mask & RXD_OPAQUE_RING_STD) {
6969 			tpr->rx_std_prod_idx = std_prod_idx &
6970 					       tp->rx_std_ring_mask;
6971 			tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6972 				     tpr->rx_std_prod_idx);
6973 		}
6974 		if (work_mask & RXD_OPAQUE_RING_JUMBO) {
6975 			tpr->rx_jmb_prod_idx = jmb_prod_idx &
6976 					       tp->rx_jmb_ring_mask;
6977 			tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
6978 				     tpr->rx_jmb_prod_idx);
6979 		}
6980 		mmiowb();
6981 	} else if (work_mask) {
6982 		/* rx_std_buffers[] and rx_jmb_buffers[] entries must be
6983 		 * updated before the producer indices can be updated.
6984 		 */
6985 		smp_wmb();
6986 
6987 		tpr->rx_std_prod_idx = std_prod_idx & tp->rx_std_ring_mask;
6988 		tpr->rx_jmb_prod_idx = jmb_prod_idx & tp->rx_jmb_ring_mask;
6989 
6990 		if (tnapi != &tp->napi[1]) {
6991 			tp->rx_refill = true;
6992 			napi_schedule(&tp->napi[1].napi);
6993 		}
6994 	}
6995 
6996 	return received;
6997 }
6998 
6999 static void tg3_poll_link(struct tg3 *tp)
7000 {
7001 	/* handle link change and other phy events */
7002 	if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
7003 		struct tg3_hw_status *sblk = tp->napi[0].hw_status;
7004 
7005 		if (sblk->status & SD_STATUS_LINK_CHG) {
7006 			sblk->status = SD_STATUS_UPDATED |
7007 				       (sblk->status & ~SD_STATUS_LINK_CHG);
7008 			spin_lock(&tp->lock);
7009 			if (tg3_flag(tp, USE_PHYLIB)) {
7010 				tw32_f(MAC_STATUS,
7011 				     (MAC_STATUS_SYNC_CHANGED |
7012 				      MAC_STATUS_CFG_CHANGED |
7013 				      MAC_STATUS_MI_COMPLETION |
7014 				      MAC_STATUS_LNKSTATE_CHANGED));
7015 				udelay(40);
7016 			} else
7017 				tg3_setup_phy(tp, false);
7018 			spin_unlock(&tp->lock);
7019 		}
7020 	}
7021 }
7022 
7023 static int tg3_rx_prodring_xfer(struct tg3 *tp,
7024 				struct tg3_rx_prodring_set *dpr,
7025 				struct tg3_rx_prodring_set *spr)
7026 {
7027 	u32 si, di, cpycnt, src_prod_idx;
7028 	int i, err = 0;
7029 
7030 	while (1) {
7031 		src_prod_idx = spr->rx_std_prod_idx;
7032 
7033 		/* Make sure updates to the rx_std_buffers[] entries and the
7034 		 * standard producer index are seen in the correct order.
7035 		 */
7036 		smp_rmb();
7037 
7038 		if (spr->rx_std_cons_idx == src_prod_idx)
7039 			break;
7040 
7041 		if (spr->rx_std_cons_idx < src_prod_idx)
7042 			cpycnt = src_prod_idx - spr->rx_std_cons_idx;
7043 		else
7044 			cpycnt = tp->rx_std_ring_mask + 1 -
7045 				 spr->rx_std_cons_idx;
7046 
7047 		cpycnt = min(cpycnt,
7048 			     tp->rx_std_ring_mask + 1 - dpr->rx_std_prod_idx);
7049 
7050 		si = spr->rx_std_cons_idx;
7051 		di = dpr->rx_std_prod_idx;
7052 
7053 		for (i = di; i < di + cpycnt; i++) {
7054 			if (dpr->rx_std_buffers[i].data) {
7055 				cpycnt = i - di;
7056 				err = -ENOSPC;
7057 				break;
7058 			}
7059 		}
7060 
7061 		if (!cpycnt)
7062 			break;
7063 
7064 		/* Ensure that updates to the rx_std_buffers ring and the
7065 		 * shadowed hardware producer ring from tg3_recycle_skb() are
7066 		 * ordered correctly WRT the skb check above.
7067 		 */
7068 		smp_rmb();
7069 
7070 		memcpy(&dpr->rx_std_buffers[di],
7071 		       &spr->rx_std_buffers[si],
7072 		       cpycnt * sizeof(struct ring_info));
7073 
7074 		for (i = 0; i < cpycnt; i++, di++, si++) {
7075 			struct tg3_rx_buffer_desc *sbd, *dbd;
7076 			sbd = &spr->rx_std[si];
7077 			dbd = &dpr->rx_std[di];
7078 			dbd->addr_hi = sbd->addr_hi;
7079 			dbd->addr_lo = sbd->addr_lo;
7080 		}
7081 
7082 		spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) &
7083 				       tp->rx_std_ring_mask;
7084 		dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) &
7085 				       tp->rx_std_ring_mask;
7086 	}
7087 
7088 	while (1) {
7089 		src_prod_idx = spr->rx_jmb_prod_idx;
7090 
7091 		/* Make sure updates to the rx_jmb_buffers[] entries and
7092 		 * the jumbo producer index are seen in the correct order.
7093 		 */
7094 		smp_rmb();
7095 
7096 		if (spr->rx_jmb_cons_idx == src_prod_idx)
7097 			break;
7098 
7099 		if (spr->rx_jmb_cons_idx < src_prod_idx)
7100 			cpycnt = src_prod_idx - spr->rx_jmb_cons_idx;
7101 		else
7102 			cpycnt = tp->rx_jmb_ring_mask + 1 -
7103 				 spr->rx_jmb_cons_idx;
7104 
7105 		cpycnt = min(cpycnt,
7106 			     tp->rx_jmb_ring_mask + 1 - dpr->rx_jmb_prod_idx);
7107 
7108 		si = spr->rx_jmb_cons_idx;
7109 		di = dpr->rx_jmb_prod_idx;
7110 
7111 		for (i = di; i < di + cpycnt; i++) {
7112 			if (dpr->rx_jmb_buffers[i].data) {
7113 				cpycnt = i - di;
7114 				err = -ENOSPC;
7115 				break;
7116 			}
7117 		}
7118 
7119 		if (!cpycnt)
7120 			break;
7121 
7122 		/* Ensure that updates to the rx_jmb_buffers ring and the
7123 		 * shadowed hardware producer ring from tg3_recycle_skb() are
7124 		 * ordered correctly WRT the skb check above.
7125 		 */
7126 		smp_rmb();
7127 
7128 		memcpy(&dpr->rx_jmb_buffers[di],
7129 		       &spr->rx_jmb_buffers[si],
7130 		       cpycnt * sizeof(struct ring_info));
7131 
7132 		for (i = 0; i < cpycnt; i++, di++, si++) {
7133 			struct tg3_rx_buffer_desc *sbd, *dbd;
7134 			sbd = &spr->rx_jmb[si].std;
7135 			dbd = &dpr->rx_jmb[di].std;
7136 			dbd->addr_hi = sbd->addr_hi;
7137 			dbd->addr_lo = sbd->addr_lo;
7138 		}
7139 
7140 		spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) &
7141 				       tp->rx_jmb_ring_mask;
7142 		dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) &
7143 				       tp->rx_jmb_ring_mask;
7144 	}
7145 
7146 	return err;
7147 }
7148 
7149 static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
7150 {
7151 	struct tg3 *tp = tnapi->tp;
7152 
7153 	/* run TX completion thread */
7154 	if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
7155 		tg3_tx(tnapi);
7156 		if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7157 			return work_done;
7158 	}
7159 
7160 	if (!tnapi->rx_rcb_prod_idx)
7161 		return work_done;
7162 
7163 	/* run RX thread, within the bounds set by NAPI.
7164 	 * All RX "locking" is done by ensuring outside
7165 	 * code synchronizes with tg3->napi.poll()
7166 	 */
7167 	if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
7168 		work_done += tg3_rx(tnapi, budget - work_done);
7169 
7170 	if (tg3_flag(tp, ENABLE_RSS) && tnapi == &tp->napi[1]) {
7171 		struct tg3_rx_prodring_set *dpr = &tp->napi[0].prodring;
7172 		int i, err = 0;
7173 		u32 std_prod_idx = dpr->rx_std_prod_idx;
7174 		u32 jmb_prod_idx = dpr->rx_jmb_prod_idx;
7175 
7176 		tp->rx_refill = false;
7177 		for (i = 1; i <= tp->rxq_cnt; i++)
7178 			err |= tg3_rx_prodring_xfer(tp, dpr,
7179 						    &tp->napi[i].prodring);
7180 
7181 		wmb();
7182 
7183 		if (std_prod_idx != dpr->rx_std_prod_idx)
7184 			tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
7185 				     dpr->rx_std_prod_idx);
7186 
7187 		if (jmb_prod_idx != dpr->rx_jmb_prod_idx)
7188 			tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
7189 				     dpr->rx_jmb_prod_idx);
7190 
7191 		mmiowb();
7192 
7193 		if (err)
7194 			tw32_f(HOSTCC_MODE, tp->coal_now);
7195 	}
7196 
7197 	return work_done;
7198 }
7199 
7200 static inline void tg3_reset_task_schedule(struct tg3 *tp)
7201 {
7202 	if (!test_and_set_bit(TG3_FLAG_RESET_TASK_PENDING, tp->tg3_flags))
7203 		schedule_work(&tp->reset_task);
7204 }
7205 
7206 static inline void tg3_reset_task_cancel(struct tg3 *tp)
7207 {
7208 	cancel_work_sync(&tp->reset_task);
7209 	tg3_flag_clear(tp, RESET_TASK_PENDING);
7210 	tg3_flag_clear(tp, TX_RECOVERY_PENDING);
7211 }
7212 
7213 static int tg3_poll_msix(struct napi_struct *napi, int budget)
7214 {
7215 	struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
7216 	struct tg3 *tp = tnapi->tp;
7217 	int work_done = 0;
7218 	struct tg3_hw_status *sblk = tnapi->hw_status;
7219 
7220 	while (1) {
7221 		work_done = tg3_poll_work(tnapi, work_done, budget);
7222 
7223 		if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7224 			goto tx_recovery;
7225 
7226 		if (unlikely(work_done >= budget))
7227 			break;
7228 
7229 		/* tp->last_tag is used in tg3_int_reenable() below
7230 		 * to tell the hw how much work has been processed,
7231 		 * so we must read it before checking for more work.
7232 		 */
7233 		tnapi->last_tag = sblk->status_tag;
7234 		tnapi->last_irq_tag = tnapi->last_tag;
7235 		rmb();
7236 
7237 		/* check for RX/TX work to do */
7238 		if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons &&
7239 			   *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) {
7240 
7241 			/* This test here is not race free, but will reduce
7242 			 * the number of interrupts by looping again.
7243 			 */
7244 			if (tnapi == &tp->napi[1] && tp->rx_refill)
7245 				continue;
7246 
7247 			napi_complete(napi);
7248 			/* Reenable interrupts. */
7249 			tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
7250 
7251 			/* This test here is synchronized by napi_schedule()
7252 			 * and napi_complete() to close the race condition.
7253 			 */
7254 			if (unlikely(tnapi == &tp->napi[1] && tp->rx_refill)) {
7255 				tw32(HOSTCC_MODE, tp->coalesce_mode |
7256 						  HOSTCC_MODE_ENABLE |
7257 						  tnapi->coal_now);
7258 			}
7259 			mmiowb();
7260 			break;
7261 		}
7262 	}
7263 
7264 	return work_done;
7265 
7266 tx_recovery:
7267 	/* work_done is guaranteed to be less than budget. */
7268 	napi_complete(napi);
7269 	tg3_reset_task_schedule(tp);
7270 	return work_done;
7271 }
7272 
7273 static void tg3_process_error(struct tg3 *tp)
7274 {
7275 	u32 val;
7276 	bool real_error = false;
7277 
7278 	if (tg3_flag(tp, ERROR_PROCESSED))
7279 		return;
7280 
7281 	/* Check Flow Attention register */
7282 	val = tr32(HOSTCC_FLOW_ATTN);
7283 	if (val & ~HOSTCC_FLOW_ATTN_MBUF_LWM) {
7284 		netdev_err(tp->dev, "FLOW Attention error.  Resetting chip.\n");
7285 		real_error = true;
7286 	}
7287 
7288 	if (tr32(MSGINT_STATUS) & ~MSGINT_STATUS_MSI_REQ) {
7289 		netdev_err(tp->dev, "MSI Status error.  Resetting chip.\n");
7290 		real_error = true;
7291 	}
7292 
7293 	if (tr32(RDMAC_STATUS) || tr32(WDMAC_STATUS)) {
7294 		netdev_err(tp->dev, "DMA Status error.  Resetting chip.\n");
7295 		real_error = true;
7296 	}
7297 
7298 	if (!real_error)
7299 		return;
7300 
7301 	tg3_dump_state(tp);
7302 
7303 	tg3_flag_set(tp, ERROR_PROCESSED);
7304 	tg3_reset_task_schedule(tp);
7305 }
7306 
7307 static int tg3_poll(struct napi_struct *napi, int budget)
7308 {
7309 	struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
7310 	struct tg3 *tp = tnapi->tp;
7311 	int work_done = 0;
7312 	struct tg3_hw_status *sblk = tnapi->hw_status;
7313 
7314 	while (1) {
7315 		if (sblk->status & SD_STATUS_ERROR)
7316 			tg3_process_error(tp);
7317 
7318 		tg3_poll_link(tp);
7319 
7320 		work_done = tg3_poll_work(tnapi, work_done, budget);
7321 
7322 		if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7323 			goto tx_recovery;
7324 
7325 		if (unlikely(work_done >= budget))
7326 			break;
7327 
7328 		if (tg3_flag(tp, TAGGED_STATUS)) {
7329 			/* tp->last_tag is used in tg3_int_reenable() below
7330 			 * to tell the hw how much work has been processed,
7331 			 * so we must read it before checking for more work.
7332 			 */
7333 			tnapi->last_tag = sblk->status_tag;
7334 			tnapi->last_irq_tag = tnapi->last_tag;
7335 			rmb();
7336 		} else
7337 			sblk->status &= ~SD_STATUS_UPDATED;
7338 
7339 		if (likely(!tg3_has_work(tnapi))) {
7340 			napi_complete(napi);
7341 			tg3_int_reenable(tnapi);
7342 			break;
7343 		}
7344 	}
7345 
7346 	return work_done;
7347 
7348 tx_recovery:
7349 	/* work_done is guaranteed to be less than budget. */
7350 	napi_complete(napi);
7351 	tg3_reset_task_schedule(tp);
7352 	return work_done;
7353 }
7354 
7355 static void tg3_napi_disable(struct tg3 *tp)
7356 {
7357 	int i;
7358 
7359 	for (i = tp->irq_cnt - 1; i >= 0; i--)
7360 		napi_disable(&tp->napi[i].napi);
7361 }
7362 
7363 static void tg3_napi_enable(struct tg3 *tp)
7364 {
7365 	int i;
7366 
7367 	for (i = 0; i < tp->irq_cnt; i++)
7368 		napi_enable(&tp->napi[i].napi);
7369 }
7370 
7371 static void tg3_napi_init(struct tg3 *tp)
7372 {
7373 	int i;
7374 
7375 	netif_napi_add(tp->dev, &tp->napi[0].napi, tg3_poll, 64);
7376 	for (i = 1; i < tp->irq_cnt; i++)
7377 		netif_napi_add(tp->dev, &tp->napi[i].napi, tg3_poll_msix, 64);
7378 }
7379 
7380 static void tg3_napi_fini(struct tg3 *tp)
7381 {
7382 	int i;
7383 
7384 	for (i = 0; i < tp->irq_cnt; i++)
7385 		netif_napi_del(&tp->napi[i].napi);
7386 }
7387 
7388 static inline void tg3_netif_stop(struct tg3 *tp)
7389 {
7390 	tp->dev->trans_start = jiffies;	/* prevent tx timeout */
7391 	tg3_napi_disable(tp);
7392 	netif_carrier_off(tp->dev);
7393 	netif_tx_disable(tp->dev);
7394 }
7395 
7396 /* tp->lock must be held */
7397 static inline void tg3_netif_start(struct tg3 *tp)
7398 {
7399 	tg3_ptp_resume(tp);
7400 
7401 	/* NOTE: unconditional netif_tx_wake_all_queues is only
7402 	 * appropriate so long as all callers are assured to
7403 	 * have free tx slots (such as after tg3_init_hw)
7404 	 */
7405 	netif_tx_wake_all_queues(tp->dev);
7406 
7407 	if (tp->link_up)
7408 		netif_carrier_on(tp->dev);
7409 
7410 	tg3_napi_enable(tp);
7411 	tp->napi[0].hw_status->status |= SD_STATUS_UPDATED;
7412 	tg3_enable_ints(tp);
7413 }
7414 
7415 static void tg3_irq_quiesce(struct tg3 *tp)
7416 {
7417 	int i;
7418 
7419 	BUG_ON(tp->irq_sync);
7420 
7421 	tp->irq_sync = 1;
7422 	smp_mb();
7423 
7424 	for (i = 0; i < tp->irq_cnt; i++)
7425 		synchronize_irq(tp->napi[i].irq_vec);
7426 }
7427 
7428 /* Fully shutdown all tg3 driver activity elsewhere in the system.
7429  * If irq_sync is non-zero, then the IRQ handler must be synchronized
7430  * with as well.  Most of the time, this is not necessary except when
7431  * shutting down the device.
7432  */
7433 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
7434 {
7435 	spin_lock_bh(&tp->lock);
7436 	if (irq_sync)
7437 		tg3_irq_quiesce(tp);
7438 }
7439 
7440 static inline void tg3_full_unlock(struct tg3 *tp)
7441 {
7442 	spin_unlock_bh(&tp->lock);
7443 }
7444 
7445 /* One-shot MSI handler - Chip automatically disables interrupt
7446  * after sending MSI so driver doesn't have to do it.
7447  */
7448 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
7449 {
7450 	struct tg3_napi *tnapi = dev_id;
7451 	struct tg3 *tp = tnapi->tp;
7452 
7453 	prefetch(tnapi->hw_status);
7454 	if (tnapi->rx_rcb)
7455 		prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7456 
7457 	if (likely(!tg3_irq_sync(tp)))
7458 		napi_schedule(&tnapi->napi);
7459 
7460 	return IRQ_HANDLED;
7461 }
7462 
7463 /* MSI ISR - No need to check for interrupt sharing and no need to
7464  * flush status block and interrupt mailbox. PCI ordering rules
7465  * guarantee that MSI will arrive after the status block.
7466  */
7467 static irqreturn_t tg3_msi(int irq, void *dev_id)
7468 {
7469 	struct tg3_napi *tnapi = dev_id;
7470 	struct tg3 *tp = tnapi->tp;
7471 
7472 	prefetch(tnapi->hw_status);
7473 	if (tnapi->rx_rcb)
7474 		prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7475 	/*
7476 	 * Writing any value to intr-mbox-0 clears PCI INTA# and
7477 	 * chip-internal interrupt pending events.
7478 	 * Writing non-zero to intr-mbox-0 additional tells the
7479 	 * NIC to stop sending us irqs, engaging "in-intr-handler"
7480 	 * event coalescing.
7481 	 */
7482 	tw32_mailbox(tnapi->int_mbox, 0x00000001);
7483 	if (likely(!tg3_irq_sync(tp)))
7484 		napi_schedule(&tnapi->napi);
7485 
7486 	return IRQ_RETVAL(1);
7487 }
7488 
7489 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
7490 {
7491 	struct tg3_napi *tnapi = dev_id;
7492 	struct tg3 *tp = tnapi->tp;
7493 	struct tg3_hw_status *sblk = tnapi->hw_status;
7494 	unsigned int handled = 1;
7495 
7496 	/* In INTx mode, it is possible for the interrupt to arrive at
7497 	 * the CPU before the status block posted prior to the interrupt.
7498 	 * Reading the PCI State register will confirm whether the
7499 	 * interrupt is ours and will flush the status block.
7500 	 */
7501 	if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
7502 		if (tg3_flag(tp, CHIP_RESETTING) ||
7503 		    (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7504 			handled = 0;
7505 			goto out;
7506 		}
7507 	}
7508 
7509 	/*
7510 	 * Writing any value to intr-mbox-0 clears PCI INTA# and
7511 	 * chip-internal interrupt pending events.
7512 	 * Writing non-zero to intr-mbox-0 additional tells the
7513 	 * NIC to stop sending us irqs, engaging "in-intr-handler"
7514 	 * event coalescing.
7515 	 *
7516 	 * Flush the mailbox to de-assert the IRQ immediately to prevent
7517 	 * spurious interrupts.  The flush impacts performance but
7518 	 * excessive spurious interrupts can be worse in some cases.
7519 	 */
7520 	tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
7521 	if (tg3_irq_sync(tp))
7522 		goto out;
7523 	sblk->status &= ~SD_STATUS_UPDATED;
7524 	if (likely(tg3_has_work(tnapi))) {
7525 		prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7526 		napi_schedule(&tnapi->napi);
7527 	} else {
7528 		/* No work, shared interrupt perhaps?  re-enable
7529 		 * interrupts, and flush that PCI write
7530 		 */
7531 		tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
7532 			       0x00000000);
7533 	}
7534 out:
7535 	return IRQ_RETVAL(handled);
7536 }
7537 
7538 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
7539 {
7540 	struct tg3_napi *tnapi = dev_id;
7541 	struct tg3 *tp = tnapi->tp;
7542 	struct tg3_hw_status *sblk = tnapi->hw_status;
7543 	unsigned int handled = 1;
7544 
7545 	/* In INTx mode, it is possible for the interrupt to arrive at
7546 	 * the CPU before the status block posted prior to the interrupt.
7547 	 * Reading the PCI State register will confirm whether the
7548 	 * interrupt is ours and will flush the status block.
7549 	 */
7550 	if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) {
7551 		if (tg3_flag(tp, CHIP_RESETTING) ||
7552 		    (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7553 			handled = 0;
7554 			goto out;
7555 		}
7556 	}
7557 
7558 	/*
7559 	 * writing any value to intr-mbox-0 clears PCI INTA# and
7560 	 * chip-internal interrupt pending events.
7561 	 * writing non-zero to intr-mbox-0 additional tells the
7562 	 * NIC to stop sending us irqs, engaging "in-intr-handler"
7563 	 * event coalescing.
7564 	 *
7565 	 * Flush the mailbox to de-assert the IRQ immediately to prevent
7566 	 * spurious interrupts.  The flush impacts performance but
7567 	 * excessive spurious interrupts can be worse in some cases.
7568 	 */
7569 	tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
7570 
7571 	/*
7572 	 * In a shared interrupt configuration, sometimes other devices'
7573 	 * interrupts will scream.  We record the current status tag here
7574 	 * so that the above check can report that the screaming interrupts
7575 	 * are unhandled.  Eventually they will be silenced.
7576 	 */
7577 	tnapi->last_irq_tag = sblk->status_tag;
7578 
7579 	if (tg3_irq_sync(tp))
7580 		goto out;
7581 
7582 	prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7583 
7584 	napi_schedule(&tnapi->napi);
7585 
7586 out:
7587 	return IRQ_RETVAL(handled);
7588 }
7589 
7590 /* ISR for interrupt test */
7591 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
7592 {
7593 	struct tg3_napi *tnapi = dev_id;
7594 	struct tg3 *tp = tnapi->tp;
7595 	struct tg3_hw_status *sblk = tnapi->hw_status;
7596 
7597 	if ((sblk->status & SD_STATUS_UPDATED) ||
7598 	    !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7599 		tg3_disable_ints(tp);
7600 		return IRQ_RETVAL(1);
7601 	}
7602 	return IRQ_RETVAL(0);
7603 }
7604 
7605 #ifdef CONFIG_NET_POLL_CONTROLLER
7606 static void tg3_poll_controller(struct net_device *dev)
7607 {
7608 	int i;
7609 	struct tg3 *tp = netdev_priv(dev);
7610 
7611 	if (tg3_irq_sync(tp))
7612 		return;
7613 
7614 	for (i = 0; i < tp->irq_cnt; i++)
7615 		tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]);
7616 }
7617 #endif
7618 
7619 static void tg3_tx_timeout(struct net_device *dev)
7620 {
7621 	struct tg3 *tp = netdev_priv(dev);
7622 
7623 	if (netif_msg_tx_err(tp)) {
7624 		netdev_err(dev, "transmit timed out, resetting\n");
7625 		tg3_dump_state(tp);
7626 	}
7627 
7628 	tg3_reset_task_schedule(tp);
7629 }
7630 
7631 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
7632 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
7633 {
7634 	u32 base = (u32) mapping & 0xffffffff;
7635 
7636 	return base + len + 8 < base;
7637 }
7638 
7639 /* Test for TSO DMA buffers that cross into regions which are within MSS bytes
7640  * of any 4GB boundaries: 4G, 8G, etc
7641  */
7642 static inline int tg3_4g_tso_overflow_test(struct tg3 *tp, dma_addr_t mapping,
7643 					   u32 len, u32 mss)
7644 {
7645 	if (tg3_asic_rev(tp) == ASIC_REV_5762 && mss) {
7646 		u32 base = (u32) mapping & 0xffffffff;
7647 
7648 		return ((base + len + (mss & 0x3fff)) < base);
7649 	}
7650 	return 0;
7651 }
7652 
7653 /* Test for DMA addresses > 40-bit */
7654 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
7655 					  int len)
7656 {
7657 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
7658 	if (tg3_flag(tp, 40BIT_DMA_BUG))
7659 		return ((u64) mapping + len) > DMA_BIT_MASK(40);
7660 	return 0;
7661 #else
7662 	return 0;
7663 #endif
7664 }
7665 
7666 static inline void tg3_tx_set_bd(struct tg3_tx_buffer_desc *txbd,
7667 				 dma_addr_t mapping, u32 len, u32 flags,
7668 				 u32 mss, u32 vlan)
7669 {
7670 	txbd->addr_hi = ((u64) mapping >> 32);
7671 	txbd->addr_lo = ((u64) mapping & 0xffffffff);
7672 	txbd->len_flags = (len << TXD_LEN_SHIFT) | (flags & 0x0000ffff);
7673 	txbd->vlan_tag = (mss << TXD_MSS_SHIFT) | (vlan << TXD_VLAN_TAG_SHIFT);
7674 }
7675 
7676 static bool tg3_tx_frag_set(struct tg3_napi *tnapi, u32 *entry, u32 *budget,
7677 			    dma_addr_t map, u32 len, u32 flags,
7678 			    u32 mss, u32 vlan)
7679 {
7680 	struct tg3 *tp = tnapi->tp;
7681 	bool hwbug = false;
7682 
7683 	if (tg3_flag(tp, SHORT_DMA_BUG) && len <= 8)
7684 		hwbug = true;
7685 
7686 	if (tg3_4g_overflow_test(map, len))
7687 		hwbug = true;
7688 
7689 	if (tg3_4g_tso_overflow_test(tp, map, len, mss))
7690 		hwbug = true;
7691 
7692 	if (tg3_40bit_overflow_test(tp, map, len))
7693 		hwbug = true;
7694 
7695 	if (tp->dma_limit) {
7696 		u32 prvidx = *entry;
7697 		u32 tmp_flag = flags & ~TXD_FLAG_END;
7698 		while (len > tp->dma_limit && *budget) {
7699 			u32 frag_len = tp->dma_limit;
7700 			len -= tp->dma_limit;
7701 
7702 			/* Avoid the 8byte DMA problem */
7703 			if (len <= 8) {
7704 				len += tp->dma_limit / 2;
7705 				frag_len = tp->dma_limit / 2;
7706 			}
7707 
7708 			tnapi->tx_buffers[*entry].fragmented = true;
7709 
7710 			tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7711 				      frag_len, tmp_flag, mss, vlan);
7712 			*budget -= 1;
7713 			prvidx = *entry;
7714 			*entry = NEXT_TX(*entry);
7715 
7716 			map += frag_len;
7717 		}
7718 
7719 		if (len) {
7720 			if (*budget) {
7721 				tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7722 					      len, flags, mss, vlan);
7723 				*budget -= 1;
7724 				*entry = NEXT_TX(*entry);
7725 			} else {
7726 				hwbug = true;
7727 				tnapi->tx_buffers[prvidx].fragmented = false;
7728 			}
7729 		}
7730 	} else {
7731 		tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7732 			      len, flags, mss, vlan);
7733 		*entry = NEXT_TX(*entry);
7734 	}
7735 
7736 	return hwbug;
7737 }
7738 
7739 static void tg3_tx_skb_unmap(struct tg3_napi *tnapi, u32 entry, int last)
7740 {
7741 	int i;
7742 	struct sk_buff *skb;
7743 	struct tg3_tx_ring_info *txb = &tnapi->tx_buffers[entry];
7744 
7745 	skb = txb->skb;
7746 	txb->skb = NULL;
7747 
7748 	pci_unmap_single(tnapi->tp->pdev,
7749 			 dma_unmap_addr(txb, mapping),
7750 			 skb_headlen(skb),
7751 			 PCI_DMA_TODEVICE);
7752 
7753 	while (txb->fragmented) {
7754 		txb->fragmented = false;
7755 		entry = NEXT_TX(entry);
7756 		txb = &tnapi->tx_buffers[entry];
7757 	}
7758 
7759 	for (i = 0; i <= last; i++) {
7760 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
7761 
7762 		entry = NEXT_TX(entry);
7763 		txb = &tnapi->tx_buffers[entry];
7764 
7765 		pci_unmap_page(tnapi->tp->pdev,
7766 			       dma_unmap_addr(txb, mapping),
7767 			       skb_frag_size(frag), PCI_DMA_TODEVICE);
7768 
7769 		while (txb->fragmented) {
7770 			txb->fragmented = false;
7771 			entry = NEXT_TX(entry);
7772 			txb = &tnapi->tx_buffers[entry];
7773 		}
7774 	}
7775 }
7776 
7777 /* Workaround 4GB and 40-bit hardware DMA bugs. */
7778 static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
7779 				       struct sk_buff **pskb,
7780 				       u32 *entry, u32 *budget,
7781 				       u32 base_flags, u32 mss, u32 vlan)
7782 {
7783 	struct tg3 *tp = tnapi->tp;
7784 	struct sk_buff *new_skb, *skb = *pskb;
7785 	dma_addr_t new_addr = 0;
7786 	int ret = 0;
7787 
7788 	if (tg3_asic_rev(tp) != ASIC_REV_5701)
7789 		new_skb = skb_copy(skb, GFP_ATOMIC);
7790 	else {
7791 		int more_headroom = 4 - ((unsigned long)skb->data & 3);
7792 
7793 		new_skb = skb_copy_expand(skb,
7794 					  skb_headroom(skb) + more_headroom,
7795 					  skb_tailroom(skb), GFP_ATOMIC);
7796 	}
7797 
7798 	if (!new_skb) {
7799 		ret = -1;
7800 	} else {
7801 		/* New SKB is guaranteed to be linear. */
7802 		new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
7803 					  PCI_DMA_TODEVICE);
7804 		/* Make sure the mapping succeeded */
7805 		if (pci_dma_mapping_error(tp->pdev, new_addr)) {
7806 			dev_kfree_skb_any(new_skb);
7807 			ret = -1;
7808 		} else {
7809 			u32 save_entry = *entry;
7810 
7811 			base_flags |= TXD_FLAG_END;
7812 
7813 			tnapi->tx_buffers[*entry].skb = new_skb;
7814 			dma_unmap_addr_set(&tnapi->tx_buffers[*entry],
7815 					   mapping, new_addr);
7816 
7817 			if (tg3_tx_frag_set(tnapi, entry, budget, new_addr,
7818 					    new_skb->len, base_flags,
7819 					    mss, vlan)) {
7820 				tg3_tx_skb_unmap(tnapi, save_entry, -1);
7821 				dev_kfree_skb_any(new_skb);
7822 				ret = -1;
7823 			}
7824 		}
7825 	}
7826 
7827 	dev_kfree_skb_any(skb);
7828 	*pskb = new_skb;
7829 	return ret;
7830 }
7831 
7832 static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *);
7833 
7834 /* Use GSO to workaround all TSO packets that meet HW bug conditions
7835  * indicated in tg3_tx_frag_set()
7836  */
7837 static int tg3_tso_bug(struct tg3 *tp, struct tg3_napi *tnapi,
7838 		       struct netdev_queue *txq, struct sk_buff *skb)
7839 {
7840 	struct sk_buff *segs, *nskb;
7841 	u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3;
7842 
7843 	/* Estimate the number of fragments in the worst case */
7844 	if (unlikely(tg3_tx_avail(tnapi) <= frag_cnt_est)) {
7845 		netif_tx_stop_queue(txq);
7846 
7847 		/* netif_tx_stop_queue() must be done before checking
7848 		 * checking tx index in tg3_tx_avail() below, because in
7849 		 * tg3_tx(), we update tx index before checking for
7850 		 * netif_tx_queue_stopped().
7851 		 */
7852 		smp_mb();
7853 		if (tg3_tx_avail(tnapi) <= frag_cnt_est)
7854 			return NETDEV_TX_BUSY;
7855 
7856 		netif_tx_wake_queue(txq);
7857 	}
7858 
7859 	segs = skb_gso_segment(skb, tp->dev->features &
7860 				    ~(NETIF_F_TSO | NETIF_F_TSO6));
7861 	if (IS_ERR(segs) || !segs)
7862 		goto tg3_tso_bug_end;
7863 
7864 	do {
7865 		nskb = segs;
7866 		segs = segs->next;
7867 		nskb->next = NULL;
7868 		tg3_start_xmit(nskb, tp->dev);
7869 	} while (segs);
7870 
7871 tg3_tso_bug_end:
7872 	dev_kfree_skb_any(skb);
7873 
7874 	return NETDEV_TX_OK;
7875 }
7876 
7877 /* hard_start_xmit for all devices */
7878 static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
7879 {
7880 	struct tg3 *tp = netdev_priv(dev);
7881 	u32 len, entry, base_flags, mss, vlan = 0;
7882 	u32 budget;
7883 	int i = -1, would_hit_hwbug;
7884 	dma_addr_t mapping;
7885 	struct tg3_napi *tnapi;
7886 	struct netdev_queue *txq;
7887 	unsigned int last;
7888 	struct iphdr *iph = NULL;
7889 	struct tcphdr *tcph = NULL;
7890 	__sum16 tcp_csum = 0, ip_csum = 0;
7891 	__be16 ip_tot_len = 0;
7892 
7893 	txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
7894 	tnapi = &tp->napi[skb_get_queue_mapping(skb)];
7895 	if (tg3_flag(tp, ENABLE_TSS))
7896 		tnapi++;
7897 
7898 	budget = tg3_tx_avail(tnapi);
7899 
7900 	/* We are running in BH disabled context with netif_tx_lock
7901 	 * and TX reclaim runs via tp->napi.poll inside of a software
7902 	 * interrupt.  Furthermore, IRQ processing runs lockless so we have
7903 	 * no IRQ context deadlocks to worry about either.  Rejoice!
7904 	 */
7905 	if (unlikely(budget <= (skb_shinfo(skb)->nr_frags + 1))) {
7906 		if (!netif_tx_queue_stopped(txq)) {
7907 			netif_tx_stop_queue(txq);
7908 
7909 			/* This is a hard error, log it. */
7910 			netdev_err(dev,
7911 				   "BUG! Tx Ring full when queue awake!\n");
7912 		}
7913 		return NETDEV_TX_BUSY;
7914 	}
7915 
7916 	entry = tnapi->tx_prod;
7917 	base_flags = 0;
7918 
7919 	mss = skb_shinfo(skb)->gso_size;
7920 	if (mss) {
7921 		u32 tcp_opt_len, hdr_len;
7922 
7923 		if (skb_cow_head(skb, 0))
7924 			goto drop;
7925 
7926 		iph = ip_hdr(skb);
7927 		tcp_opt_len = tcp_optlen(skb);
7928 
7929 		hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb) - ETH_HLEN;
7930 
7931 		/* HW/FW can not correctly segment packets that have been
7932 		 * vlan encapsulated.
7933 		 */
7934 		if (skb->protocol == htons(ETH_P_8021Q) ||
7935 		    skb->protocol == htons(ETH_P_8021AD))
7936 			return tg3_tso_bug(tp, tnapi, txq, skb);
7937 
7938 		if (!skb_is_gso_v6(skb)) {
7939 			if (unlikely((ETH_HLEN + hdr_len) > 80) &&
7940 			    tg3_flag(tp, TSO_BUG))
7941 				return tg3_tso_bug(tp, tnapi, txq, skb);
7942 
7943 			ip_csum = iph->check;
7944 			ip_tot_len = iph->tot_len;
7945 			iph->check = 0;
7946 			iph->tot_len = htons(mss + hdr_len);
7947 		}
7948 
7949 		base_flags |= (TXD_FLAG_CPU_PRE_DMA |
7950 			       TXD_FLAG_CPU_POST_DMA);
7951 
7952 		tcph = tcp_hdr(skb);
7953 		tcp_csum = tcph->check;
7954 
7955 		if (tg3_flag(tp, HW_TSO_1) ||
7956 		    tg3_flag(tp, HW_TSO_2) ||
7957 		    tg3_flag(tp, HW_TSO_3)) {
7958 			tcph->check = 0;
7959 			base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
7960 		} else {
7961 			tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
7962 							 0, IPPROTO_TCP, 0);
7963 		}
7964 
7965 		if (tg3_flag(tp, HW_TSO_3)) {
7966 			mss |= (hdr_len & 0xc) << 12;
7967 			if (hdr_len & 0x10)
7968 				base_flags |= 0x00000010;
7969 			base_flags |= (hdr_len & 0x3e0) << 5;
7970 		} else if (tg3_flag(tp, HW_TSO_2))
7971 			mss |= hdr_len << 9;
7972 		else if (tg3_flag(tp, HW_TSO_1) ||
7973 			 tg3_asic_rev(tp) == ASIC_REV_5705) {
7974 			if (tcp_opt_len || iph->ihl > 5) {
7975 				int tsflags;
7976 
7977 				tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
7978 				mss |= (tsflags << 11);
7979 			}
7980 		} else {
7981 			if (tcp_opt_len || iph->ihl > 5) {
7982 				int tsflags;
7983 
7984 				tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
7985 				base_flags |= tsflags << 12;
7986 			}
7987 		}
7988 	} else if (skb->ip_summed == CHECKSUM_PARTIAL) {
7989 		/* HW/FW can not correctly checksum packets that have been
7990 		 * vlan encapsulated.
7991 		 */
7992 		if (skb->protocol == htons(ETH_P_8021Q) ||
7993 		    skb->protocol == htons(ETH_P_8021AD)) {
7994 			if (skb_checksum_help(skb))
7995 				goto drop;
7996 		} else  {
7997 			base_flags |= TXD_FLAG_TCPUDP_CSUM;
7998 		}
7999 	}
8000 
8001 	if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
8002 	    !mss && skb->len > VLAN_ETH_FRAME_LEN)
8003 		base_flags |= TXD_FLAG_JMB_PKT;
8004 
8005 	if (vlan_tx_tag_present(skb)) {
8006 		base_flags |= TXD_FLAG_VLAN;
8007 		vlan = vlan_tx_tag_get(skb);
8008 	}
8009 
8010 	if ((unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) &&
8011 	    tg3_flag(tp, TX_TSTAMP_EN)) {
8012 		skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
8013 		base_flags |= TXD_FLAG_HWTSTAMP;
8014 	}
8015 
8016 	len = skb_headlen(skb);
8017 
8018 	mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
8019 	if (pci_dma_mapping_error(tp->pdev, mapping))
8020 		goto drop;
8021 
8022 
8023 	tnapi->tx_buffers[entry].skb = skb;
8024 	dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
8025 
8026 	would_hit_hwbug = 0;
8027 
8028 	if (tg3_flag(tp, 5701_DMA_BUG))
8029 		would_hit_hwbug = 1;
8030 
8031 	if (tg3_tx_frag_set(tnapi, &entry, &budget, mapping, len, base_flags |
8032 			  ((skb_shinfo(skb)->nr_frags == 0) ? TXD_FLAG_END : 0),
8033 			    mss, vlan)) {
8034 		would_hit_hwbug = 1;
8035 	} else if (skb_shinfo(skb)->nr_frags > 0) {
8036 		u32 tmp_mss = mss;
8037 
8038 		if (!tg3_flag(tp, HW_TSO_1) &&
8039 		    !tg3_flag(tp, HW_TSO_2) &&
8040 		    !tg3_flag(tp, HW_TSO_3))
8041 			tmp_mss = 0;
8042 
8043 		/* Now loop through additional data
8044 		 * fragments, and queue them.
8045 		 */
8046 		last = skb_shinfo(skb)->nr_frags - 1;
8047 		for (i = 0; i <= last; i++) {
8048 			skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
8049 
8050 			len = skb_frag_size(frag);
8051 			mapping = skb_frag_dma_map(&tp->pdev->dev, frag, 0,
8052 						   len, DMA_TO_DEVICE);
8053 
8054 			tnapi->tx_buffers[entry].skb = NULL;
8055 			dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
8056 					   mapping);
8057 			if (dma_mapping_error(&tp->pdev->dev, mapping))
8058 				goto dma_error;
8059 
8060 			if (!budget ||
8061 			    tg3_tx_frag_set(tnapi, &entry, &budget, mapping,
8062 					    len, base_flags |
8063 					    ((i == last) ? TXD_FLAG_END : 0),
8064 					    tmp_mss, vlan)) {
8065 				would_hit_hwbug = 1;
8066 				break;
8067 			}
8068 		}
8069 	}
8070 
8071 	if (would_hit_hwbug) {
8072 		tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i);
8073 
8074 		if (mss) {
8075 			/* If it's a TSO packet, do GSO instead of
8076 			 * allocating and copying to a large linear SKB
8077 			 */
8078 			if (ip_tot_len) {
8079 				iph->check = ip_csum;
8080 				iph->tot_len = ip_tot_len;
8081 			}
8082 			tcph->check = tcp_csum;
8083 			return tg3_tso_bug(tp, tnapi, txq, skb);
8084 		}
8085 
8086 		/* If the workaround fails due to memory/mapping
8087 		 * failure, silently drop this packet.
8088 		 */
8089 		entry = tnapi->tx_prod;
8090 		budget = tg3_tx_avail(tnapi);
8091 		if (tigon3_dma_hwbug_workaround(tnapi, &skb, &entry, &budget,
8092 						base_flags, mss, vlan))
8093 			goto drop_nofree;
8094 	}
8095 
8096 	skb_tx_timestamp(skb);
8097 	netdev_tx_sent_queue(txq, skb->len);
8098 
8099 	/* Sync BD data before updating mailbox */
8100 	wmb();
8101 
8102 	tnapi->tx_prod = entry;
8103 	if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
8104 		netif_tx_stop_queue(txq);
8105 
8106 		/* netif_tx_stop_queue() must be done before checking
8107 		 * checking tx index in tg3_tx_avail() below, because in
8108 		 * tg3_tx(), we update tx index before checking for
8109 		 * netif_tx_queue_stopped().
8110 		 */
8111 		smp_mb();
8112 		if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
8113 			netif_tx_wake_queue(txq);
8114 	}
8115 
8116 	if (!skb->xmit_more || netif_xmit_stopped(txq)) {
8117 		/* Packets are ready, update Tx producer idx on card. */
8118 		tw32_tx_mbox(tnapi->prodmbox, entry);
8119 		mmiowb();
8120 	}
8121 
8122 	return NETDEV_TX_OK;
8123 
8124 dma_error:
8125 	tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, --i);
8126 	tnapi->tx_buffers[tnapi->tx_prod].skb = NULL;
8127 drop:
8128 	dev_kfree_skb_any(skb);
8129 drop_nofree:
8130 	tp->tx_dropped++;
8131 	return NETDEV_TX_OK;
8132 }
8133 
8134 static void tg3_mac_loopback(struct tg3 *tp, bool enable)
8135 {
8136 	if (enable) {
8137 		tp->mac_mode &= ~(MAC_MODE_HALF_DUPLEX |
8138 				  MAC_MODE_PORT_MODE_MASK);
8139 
8140 		tp->mac_mode |= MAC_MODE_PORT_INT_LPBACK;
8141 
8142 		if (!tg3_flag(tp, 5705_PLUS))
8143 			tp->mac_mode |= MAC_MODE_LINK_POLARITY;
8144 
8145 		if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
8146 			tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
8147 		else
8148 			tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
8149 	} else {
8150 		tp->mac_mode &= ~MAC_MODE_PORT_INT_LPBACK;
8151 
8152 		if (tg3_flag(tp, 5705_PLUS) ||
8153 		    (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) ||
8154 		    tg3_asic_rev(tp) == ASIC_REV_5700)
8155 			tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
8156 	}
8157 
8158 	tw32(MAC_MODE, tp->mac_mode);
8159 	udelay(40);
8160 }
8161 
8162 static int tg3_phy_lpbk_set(struct tg3 *tp, u32 speed, bool extlpbk)
8163 {
8164 	u32 val, bmcr, mac_mode, ptest = 0;
8165 
8166 	tg3_phy_toggle_apd(tp, false);
8167 	tg3_phy_toggle_automdix(tp, false);
8168 
8169 	if (extlpbk && tg3_phy_set_extloopbk(tp))
8170 		return -EIO;
8171 
8172 	bmcr = BMCR_FULLDPLX;
8173 	switch (speed) {
8174 	case SPEED_10:
8175 		break;
8176 	case SPEED_100:
8177 		bmcr |= BMCR_SPEED100;
8178 		break;
8179 	case SPEED_1000:
8180 	default:
8181 		if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
8182 			speed = SPEED_100;
8183 			bmcr |= BMCR_SPEED100;
8184 		} else {
8185 			speed = SPEED_1000;
8186 			bmcr |= BMCR_SPEED1000;
8187 		}
8188 	}
8189 
8190 	if (extlpbk) {
8191 		if (!(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
8192 			tg3_readphy(tp, MII_CTRL1000, &val);
8193 			val |= CTL1000_AS_MASTER |
8194 			       CTL1000_ENABLE_MASTER;
8195 			tg3_writephy(tp, MII_CTRL1000, val);
8196 		} else {
8197 			ptest = MII_TG3_FET_PTEST_TRIM_SEL |
8198 				MII_TG3_FET_PTEST_TRIM_2;
8199 			tg3_writephy(tp, MII_TG3_FET_PTEST, ptest);
8200 		}
8201 	} else
8202 		bmcr |= BMCR_LOOPBACK;
8203 
8204 	tg3_writephy(tp, MII_BMCR, bmcr);
8205 
8206 	/* The write needs to be flushed for the FETs */
8207 	if (tp->phy_flags & TG3_PHYFLG_IS_FET)
8208 		tg3_readphy(tp, MII_BMCR, &bmcr);
8209 
8210 	udelay(40);
8211 
8212 	if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
8213 	    tg3_asic_rev(tp) == ASIC_REV_5785) {
8214 		tg3_writephy(tp, MII_TG3_FET_PTEST, ptest |
8215 			     MII_TG3_FET_PTEST_FRC_TX_LINK |
8216 			     MII_TG3_FET_PTEST_FRC_TX_LOCK);
8217 
8218 		/* The write needs to be flushed for the AC131 */
8219 		tg3_readphy(tp, MII_TG3_FET_PTEST, &val);
8220 	}
8221 
8222 	/* Reset to prevent losing 1st rx packet intermittently */
8223 	if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
8224 	    tg3_flag(tp, 5780_CLASS)) {
8225 		tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8226 		udelay(10);
8227 		tw32_f(MAC_RX_MODE, tp->rx_mode);
8228 	}
8229 
8230 	mac_mode = tp->mac_mode &
8231 		   ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
8232 	if (speed == SPEED_1000)
8233 		mac_mode |= MAC_MODE_PORT_MODE_GMII;
8234 	else
8235 		mac_mode |= MAC_MODE_PORT_MODE_MII;
8236 
8237 	if (tg3_asic_rev(tp) == ASIC_REV_5700) {
8238 		u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK;
8239 
8240 		if (masked_phy_id == TG3_PHY_ID_BCM5401)
8241 			mac_mode &= ~MAC_MODE_LINK_POLARITY;
8242 		else if (masked_phy_id == TG3_PHY_ID_BCM5411)
8243 			mac_mode |= MAC_MODE_LINK_POLARITY;
8244 
8245 		tg3_writephy(tp, MII_TG3_EXT_CTRL,
8246 			     MII_TG3_EXT_CTRL_LNK3_LED_MODE);
8247 	}
8248 
8249 	tw32(MAC_MODE, mac_mode);
8250 	udelay(40);
8251 
8252 	return 0;
8253 }
8254 
8255 static void tg3_set_loopback(struct net_device *dev, netdev_features_t features)
8256 {
8257 	struct tg3 *tp = netdev_priv(dev);
8258 
8259 	if (features & NETIF_F_LOOPBACK) {
8260 		if (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)
8261 			return;
8262 
8263 		spin_lock_bh(&tp->lock);
8264 		tg3_mac_loopback(tp, true);
8265 		netif_carrier_on(tp->dev);
8266 		spin_unlock_bh(&tp->lock);
8267 		netdev_info(dev, "Internal MAC loopback mode enabled.\n");
8268 	} else {
8269 		if (!(tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
8270 			return;
8271 
8272 		spin_lock_bh(&tp->lock);
8273 		tg3_mac_loopback(tp, false);
8274 		/* Force link status check */
8275 		tg3_setup_phy(tp, true);
8276 		spin_unlock_bh(&tp->lock);
8277 		netdev_info(dev, "Internal MAC loopback mode disabled.\n");
8278 	}
8279 }
8280 
8281 static netdev_features_t tg3_fix_features(struct net_device *dev,
8282 	netdev_features_t features)
8283 {
8284 	struct tg3 *tp = netdev_priv(dev);
8285 
8286 	if (dev->mtu > ETH_DATA_LEN && tg3_flag(tp, 5780_CLASS))
8287 		features &= ~NETIF_F_ALL_TSO;
8288 
8289 	return features;
8290 }
8291 
8292 static int tg3_set_features(struct net_device *dev, netdev_features_t features)
8293 {
8294 	netdev_features_t changed = dev->features ^ features;
8295 
8296 	if ((changed & NETIF_F_LOOPBACK) && netif_running(dev))
8297 		tg3_set_loopback(dev, features);
8298 
8299 	return 0;
8300 }
8301 
8302 static void tg3_rx_prodring_free(struct tg3 *tp,
8303 				 struct tg3_rx_prodring_set *tpr)
8304 {
8305 	int i;
8306 
8307 	if (tpr != &tp->napi[0].prodring) {
8308 		for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx;
8309 		     i = (i + 1) & tp->rx_std_ring_mask)
8310 			tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
8311 					tp->rx_pkt_map_sz);
8312 
8313 		if (tg3_flag(tp, JUMBO_CAPABLE)) {
8314 			for (i = tpr->rx_jmb_cons_idx;
8315 			     i != tpr->rx_jmb_prod_idx;
8316 			     i = (i + 1) & tp->rx_jmb_ring_mask) {
8317 				tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
8318 						TG3_RX_JMB_MAP_SZ);
8319 			}
8320 		}
8321 
8322 		return;
8323 	}
8324 
8325 	for (i = 0; i <= tp->rx_std_ring_mask; i++)
8326 		tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
8327 				tp->rx_pkt_map_sz);
8328 
8329 	if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
8330 		for (i = 0; i <= tp->rx_jmb_ring_mask; i++)
8331 			tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
8332 					TG3_RX_JMB_MAP_SZ);
8333 	}
8334 }
8335 
8336 /* Initialize rx rings for packet processing.
8337  *
8338  * The chip has been shut down and the driver detached from
8339  * the networking, so no interrupts or new tx packets will
8340  * end up in the driver.  tp->{tx,}lock are held and thus
8341  * we may not sleep.
8342  */
8343 static int tg3_rx_prodring_alloc(struct tg3 *tp,
8344 				 struct tg3_rx_prodring_set *tpr)
8345 {
8346 	u32 i, rx_pkt_dma_sz;
8347 
8348 	tpr->rx_std_cons_idx = 0;
8349 	tpr->rx_std_prod_idx = 0;
8350 	tpr->rx_jmb_cons_idx = 0;
8351 	tpr->rx_jmb_prod_idx = 0;
8352 
8353 	if (tpr != &tp->napi[0].prodring) {
8354 		memset(&tpr->rx_std_buffers[0], 0,
8355 		       TG3_RX_STD_BUFF_RING_SIZE(tp));
8356 		if (tpr->rx_jmb_buffers)
8357 			memset(&tpr->rx_jmb_buffers[0], 0,
8358 			       TG3_RX_JMB_BUFF_RING_SIZE(tp));
8359 		goto done;
8360 	}
8361 
8362 	/* Zero out all descriptors. */
8363 	memset(tpr->rx_std, 0, TG3_RX_STD_RING_BYTES(tp));
8364 
8365 	rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ;
8366 	if (tg3_flag(tp, 5780_CLASS) &&
8367 	    tp->dev->mtu > ETH_DATA_LEN)
8368 		rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ;
8369 	tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz);
8370 
8371 	/* Initialize invariants of the rings, we only set this
8372 	 * stuff once.  This works because the card does not
8373 	 * write into the rx buffer posting rings.
8374 	 */
8375 	for (i = 0; i <= tp->rx_std_ring_mask; i++) {
8376 		struct tg3_rx_buffer_desc *rxd;
8377 
8378 		rxd = &tpr->rx_std[i];
8379 		rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT;
8380 		rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
8381 		rxd->opaque = (RXD_OPAQUE_RING_STD |
8382 			       (i << RXD_OPAQUE_INDEX_SHIFT));
8383 	}
8384 
8385 	/* Now allocate fresh SKBs for each rx ring. */
8386 	for (i = 0; i < tp->rx_pending; i++) {
8387 		unsigned int frag_size;
8388 
8389 		if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_STD, i,
8390 				      &frag_size) < 0) {
8391 			netdev_warn(tp->dev,
8392 				    "Using a smaller RX standard ring. Only "
8393 				    "%d out of %d buffers were allocated "
8394 				    "successfully\n", i, tp->rx_pending);
8395 			if (i == 0)
8396 				goto initfail;
8397 			tp->rx_pending = i;
8398 			break;
8399 		}
8400 	}
8401 
8402 	if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
8403 		goto done;
8404 
8405 	memset(tpr->rx_jmb, 0, TG3_RX_JMB_RING_BYTES(tp));
8406 
8407 	if (!tg3_flag(tp, JUMBO_RING_ENABLE))
8408 		goto done;
8409 
8410 	for (i = 0; i <= tp->rx_jmb_ring_mask; i++) {
8411 		struct tg3_rx_buffer_desc *rxd;
8412 
8413 		rxd = &tpr->rx_jmb[i].std;
8414 		rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT;
8415 		rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
8416 				  RXD_FLAG_JUMBO;
8417 		rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
8418 		       (i << RXD_OPAQUE_INDEX_SHIFT));
8419 	}
8420 
8421 	for (i = 0; i < tp->rx_jumbo_pending; i++) {
8422 		unsigned int frag_size;
8423 
8424 		if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_JUMBO, i,
8425 				      &frag_size) < 0) {
8426 			netdev_warn(tp->dev,
8427 				    "Using a smaller RX jumbo ring. Only %d "
8428 				    "out of %d buffers were allocated "
8429 				    "successfully\n", i, tp->rx_jumbo_pending);
8430 			if (i == 0)
8431 				goto initfail;
8432 			tp->rx_jumbo_pending = i;
8433 			break;
8434 		}
8435 	}
8436 
8437 done:
8438 	return 0;
8439 
8440 initfail:
8441 	tg3_rx_prodring_free(tp, tpr);
8442 	return -ENOMEM;
8443 }
8444 
8445 static void tg3_rx_prodring_fini(struct tg3 *tp,
8446 				 struct tg3_rx_prodring_set *tpr)
8447 {
8448 	kfree(tpr->rx_std_buffers);
8449 	tpr->rx_std_buffers = NULL;
8450 	kfree(tpr->rx_jmb_buffers);
8451 	tpr->rx_jmb_buffers = NULL;
8452 	if (tpr->rx_std) {
8453 		dma_free_coherent(&tp->pdev->dev, TG3_RX_STD_RING_BYTES(tp),
8454 				  tpr->rx_std, tpr->rx_std_mapping);
8455 		tpr->rx_std = NULL;
8456 	}
8457 	if (tpr->rx_jmb) {
8458 		dma_free_coherent(&tp->pdev->dev, TG3_RX_JMB_RING_BYTES(tp),
8459 				  tpr->rx_jmb, tpr->rx_jmb_mapping);
8460 		tpr->rx_jmb = NULL;
8461 	}
8462 }
8463 
8464 static int tg3_rx_prodring_init(struct tg3 *tp,
8465 				struct tg3_rx_prodring_set *tpr)
8466 {
8467 	tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp),
8468 				      GFP_KERNEL);
8469 	if (!tpr->rx_std_buffers)
8470 		return -ENOMEM;
8471 
8472 	tpr->rx_std = dma_alloc_coherent(&tp->pdev->dev,
8473 					 TG3_RX_STD_RING_BYTES(tp),
8474 					 &tpr->rx_std_mapping,
8475 					 GFP_KERNEL);
8476 	if (!tpr->rx_std)
8477 		goto err_out;
8478 
8479 	if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
8480 		tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp),
8481 					      GFP_KERNEL);
8482 		if (!tpr->rx_jmb_buffers)
8483 			goto err_out;
8484 
8485 		tpr->rx_jmb = dma_alloc_coherent(&tp->pdev->dev,
8486 						 TG3_RX_JMB_RING_BYTES(tp),
8487 						 &tpr->rx_jmb_mapping,
8488 						 GFP_KERNEL);
8489 		if (!tpr->rx_jmb)
8490 			goto err_out;
8491 	}
8492 
8493 	return 0;
8494 
8495 err_out:
8496 	tg3_rx_prodring_fini(tp, tpr);
8497 	return -ENOMEM;
8498 }
8499 
8500 /* Free up pending packets in all rx/tx rings.
8501  *
8502  * The chip has been shut down and the driver detached from
8503  * the networking, so no interrupts or new tx packets will
8504  * end up in the driver.  tp->{tx,}lock is not held and we are not
8505  * in an interrupt context and thus may sleep.
8506  */
8507 static void tg3_free_rings(struct tg3 *tp)
8508 {
8509 	int i, j;
8510 
8511 	for (j = 0; j < tp->irq_cnt; j++) {
8512 		struct tg3_napi *tnapi = &tp->napi[j];
8513 
8514 		tg3_rx_prodring_free(tp, &tnapi->prodring);
8515 
8516 		if (!tnapi->tx_buffers)
8517 			continue;
8518 
8519 		for (i = 0; i < TG3_TX_RING_SIZE; i++) {
8520 			struct sk_buff *skb = tnapi->tx_buffers[i].skb;
8521 
8522 			if (!skb)
8523 				continue;
8524 
8525 			tg3_tx_skb_unmap(tnapi, i,
8526 					 skb_shinfo(skb)->nr_frags - 1);
8527 
8528 			dev_kfree_skb_any(skb);
8529 		}
8530 		netdev_tx_reset_queue(netdev_get_tx_queue(tp->dev, j));
8531 	}
8532 }
8533 
8534 /* Initialize tx/rx rings for packet processing.
8535  *
8536  * The chip has been shut down and the driver detached from
8537  * the networking, so no interrupts or new tx packets will
8538  * end up in the driver.  tp->{tx,}lock are held and thus
8539  * we may not sleep.
8540  */
8541 static int tg3_init_rings(struct tg3 *tp)
8542 {
8543 	int i;
8544 
8545 	/* Free up all the SKBs. */
8546 	tg3_free_rings(tp);
8547 
8548 	for (i = 0; i < tp->irq_cnt; i++) {
8549 		struct tg3_napi *tnapi = &tp->napi[i];
8550 
8551 		tnapi->last_tag = 0;
8552 		tnapi->last_irq_tag = 0;
8553 		tnapi->hw_status->status = 0;
8554 		tnapi->hw_status->status_tag = 0;
8555 		memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8556 
8557 		tnapi->tx_prod = 0;
8558 		tnapi->tx_cons = 0;
8559 		if (tnapi->tx_ring)
8560 			memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES);
8561 
8562 		tnapi->rx_rcb_ptr = 0;
8563 		if (tnapi->rx_rcb)
8564 			memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
8565 
8566 		if (tnapi->prodring.rx_std &&
8567 		    tg3_rx_prodring_alloc(tp, &tnapi->prodring)) {
8568 			tg3_free_rings(tp);
8569 			return -ENOMEM;
8570 		}
8571 	}
8572 
8573 	return 0;
8574 }
8575 
8576 static void tg3_mem_tx_release(struct tg3 *tp)
8577 {
8578 	int i;
8579 
8580 	for (i = 0; i < tp->irq_max; i++) {
8581 		struct tg3_napi *tnapi = &tp->napi[i];
8582 
8583 		if (tnapi->tx_ring) {
8584 			dma_free_coherent(&tp->pdev->dev, TG3_TX_RING_BYTES,
8585 				tnapi->tx_ring, tnapi->tx_desc_mapping);
8586 			tnapi->tx_ring = NULL;
8587 		}
8588 
8589 		kfree(tnapi->tx_buffers);
8590 		tnapi->tx_buffers = NULL;
8591 	}
8592 }
8593 
8594 static int tg3_mem_tx_acquire(struct tg3 *tp)
8595 {
8596 	int i;
8597 	struct tg3_napi *tnapi = &tp->napi[0];
8598 
8599 	/* If multivector TSS is enabled, vector 0 does not handle
8600 	 * tx interrupts.  Don't allocate any resources for it.
8601 	 */
8602 	if (tg3_flag(tp, ENABLE_TSS))
8603 		tnapi++;
8604 
8605 	for (i = 0; i < tp->txq_cnt; i++, tnapi++) {
8606 		tnapi->tx_buffers = kzalloc(sizeof(struct tg3_tx_ring_info) *
8607 					    TG3_TX_RING_SIZE, GFP_KERNEL);
8608 		if (!tnapi->tx_buffers)
8609 			goto err_out;
8610 
8611 		tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev,
8612 						    TG3_TX_RING_BYTES,
8613 						    &tnapi->tx_desc_mapping,
8614 						    GFP_KERNEL);
8615 		if (!tnapi->tx_ring)
8616 			goto err_out;
8617 	}
8618 
8619 	return 0;
8620 
8621 err_out:
8622 	tg3_mem_tx_release(tp);
8623 	return -ENOMEM;
8624 }
8625 
8626 static void tg3_mem_rx_release(struct tg3 *tp)
8627 {
8628 	int i;
8629 
8630 	for (i = 0; i < tp->irq_max; i++) {
8631 		struct tg3_napi *tnapi = &tp->napi[i];
8632 
8633 		tg3_rx_prodring_fini(tp, &tnapi->prodring);
8634 
8635 		if (!tnapi->rx_rcb)
8636 			continue;
8637 
8638 		dma_free_coherent(&tp->pdev->dev,
8639 				  TG3_RX_RCB_RING_BYTES(tp),
8640 				  tnapi->rx_rcb,
8641 				  tnapi->rx_rcb_mapping);
8642 		tnapi->rx_rcb = NULL;
8643 	}
8644 }
8645 
8646 static int tg3_mem_rx_acquire(struct tg3 *tp)
8647 {
8648 	unsigned int i, limit;
8649 
8650 	limit = tp->rxq_cnt;
8651 
8652 	/* If RSS is enabled, we need a (dummy) producer ring
8653 	 * set on vector zero.  This is the true hw prodring.
8654 	 */
8655 	if (tg3_flag(tp, ENABLE_RSS))
8656 		limit++;
8657 
8658 	for (i = 0; i < limit; i++) {
8659 		struct tg3_napi *tnapi = &tp->napi[i];
8660 
8661 		if (tg3_rx_prodring_init(tp, &tnapi->prodring))
8662 			goto err_out;
8663 
8664 		/* If multivector RSS is enabled, vector 0
8665 		 * does not handle rx or tx interrupts.
8666 		 * Don't allocate any resources for it.
8667 		 */
8668 		if (!i && tg3_flag(tp, ENABLE_RSS))
8669 			continue;
8670 
8671 		tnapi->rx_rcb = dma_zalloc_coherent(&tp->pdev->dev,
8672 						    TG3_RX_RCB_RING_BYTES(tp),
8673 						    &tnapi->rx_rcb_mapping,
8674 						    GFP_KERNEL);
8675 		if (!tnapi->rx_rcb)
8676 			goto err_out;
8677 	}
8678 
8679 	return 0;
8680 
8681 err_out:
8682 	tg3_mem_rx_release(tp);
8683 	return -ENOMEM;
8684 }
8685 
8686 /*
8687  * Must not be invoked with interrupt sources disabled and
8688  * the hardware shutdown down.
8689  */
8690 static void tg3_free_consistent(struct tg3 *tp)
8691 {
8692 	int i;
8693 
8694 	for (i = 0; i < tp->irq_cnt; i++) {
8695 		struct tg3_napi *tnapi = &tp->napi[i];
8696 
8697 		if (tnapi->hw_status) {
8698 			dma_free_coherent(&tp->pdev->dev, TG3_HW_STATUS_SIZE,
8699 					  tnapi->hw_status,
8700 					  tnapi->status_mapping);
8701 			tnapi->hw_status = NULL;
8702 		}
8703 	}
8704 
8705 	tg3_mem_rx_release(tp);
8706 	tg3_mem_tx_release(tp);
8707 
8708 	if (tp->hw_stats) {
8709 		dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats),
8710 				  tp->hw_stats, tp->stats_mapping);
8711 		tp->hw_stats = NULL;
8712 	}
8713 }
8714 
8715 /*
8716  * Must not be invoked with interrupt sources disabled and
8717  * the hardware shutdown down.  Can sleep.
8718  */
8719 static int tg3_alloc_consistent(struct tg3 *tp)
8720 {
8721 	int i;
8722 
8723 	tp->hw_stats = dma_zalloc_coherent(&tp->pdev->dev,
8724 					   sizeof(struct tg3_hw_stats),
8725 					   &tp->stats_mapping, GFP_KERNEL);
8726 	if (!tp->hw_stats)
8727 		goto err_out;
8728 
8729 	for (i = 0; i < tp->irq_cnt; i++) {
8730 		struct tg3_napi *tnapi = &tp->napi[i];
8731 		struct tg3_hw_status *sblk;
8732 
8733 		tnapi->hw_status = dma_zalloc_coherent(&tp->pdev->dev,
8734 						       TG3_HW_STATUS_SIZE,
8735 						       &tnapi->status_mapping,
8736 						       GFP_KERNEL);
8737 		if (!tnapi->hw_status)
8738 			goto err_out;
8739 
8740 		sblk = tnapi->hw_status;
8741 
8742 		if (tg3_flag(tp, ENABLE_RSS)) {
8743 			u16 *prodptr = NULL;
8744 
8745 			/*
8746 			 * When RSS is enabled, the status block format changes
8747 			 * slightly.  The "rx_jumbo_consumer", "reserved",
8748 			 * and "rx_mini_consumer" members get mapped to the
8749 			 * other three rx return ring producer indexes.
8750 			 */
8751 			switch (i) {
8752 			case 1:
8753 				prodptr = &sblk->idx[0].rx_producer;
8754 				break;
8755 			case 2:
8756 				prodptr = &sblk->rx_jumbo_consumer;
8757 				break;
8758 			case 3:
8759 				prodptr = &sblk->reserved;
8760 				break;
8761 			case 4:
8762 				prodptr = &sblk->rx_mini_consumer;
8763 				break;
8764 			}
8765 			tnapi->rx_rcb_prod_idx = prodptr;
8766 		} else {
8767 			tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer;
8768 		}
8769 	}
8770 
8771 	if (tg3_mem_tx_acquire(tp) || tg3_mem_rx_acquire(tp))
8772 		goto err_out;
8773 
8774 	return 0;
8775 
8776 err_out:
8777 	tg3_free_consistent(tp);
8778 	return -ENOMEM;
8779 }
8780 
8781 #define MAX_WAIT_CNT 1000
8782 
8783 /* To stop a block, clear the enable bit and poll till it
8784  * clears.  tp->lock is held.
8785  */
8786 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, bool silent)
8787 {
8788 	unsigned int i;
8789 	u32 val;
8790 
8791 	if (tg3_flag(tp, 5705_PLUS)) {
8792 		switch (ofs) {
8793 		case RCVLSC_MODE:
8794 		case DMAC_MODE:
8795 		case MBFREE_MODE:
8796 		case BUFMGR_MODE:
8797 		case MEMARB_MODE:
8798 			/* We can't enable/disable these bits of the
8799 			 * 5705/5750, just say success.
8800 			 */
8801 			return 0;
8802 
8803 		default:
8804 			break;
8805 		}
8806 	}
8807 
8808 	val = tr32(ofs);
8809 	val &= ~enable_bit;
8810 	tw32_f(ofs, val);
8811 
8812 	for (i = 0; i < MAX_WAIT_CNT; i++) {
8813 		if (pci_channel_offline(tp->pdev)) {
8814 			dev_err(&tp->pdev->dev,
8815 				"tg3_stop_block device offline, "
8816 				"ofs=%lx enable_bit=%x\n",
8817 				ofs, enable_bit);
8818 			return -ENODEV;
8819 		}
8820 
8821 		udelay(100);
8822 		val = tr32(ofs);
8823 		if ((val & enable_bit) == 0)
8824 			break;
8825 	}
8826 
8827 	if (i == MAX_WAIT_CNT && !silent) {
8828 		dev_err(&tp->pdev->dev,
8829 			"tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
8830 			ofs, enable_bit);
8831 		return -ENODEV;
8832 	}
8833 
8834 	return 0;
8835 }
8836 
8837 /* tp->lock is held. */
8838 static int tg3_abort_hw(struct tg3 *tp, bool silent)
8839 {
8840 	int i, err;
8841 
8842 	tg3_disable_ints(tp);
8843 
8844 	if (pci_channel_offline(tp->pdev)) {
8845 		tp->rx_mode &= ~(RX_MODE_ENABLE | TX_MODE_ENABLE);
8846 		tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
8847 		err = -ENODEV;
8848 		goto err_no_dev;
8849 	}
8850 
8851 	tp->rx_mode &= ~RX_MODE_ENABLE;
8852 	tw32_f(MAC_RX_MODE, tp->rx_mode);
8853 	udelay(10);
8854 
8855 	err  = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
8856 	err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
8857 	err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
8858 	err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
8859 	err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
8860 	err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
8861 
8862 	err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
8863 	err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
8864 	err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
8865 	err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
8866 	err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
8867 	err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
8868 	err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
8869 
8870 	tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
8871 	tw32_f(MAC_MODE, tp->mac_mode);
8872 	udelay(40);
8873 
8874 	tp->tx_mode &= ~TX_MODE_ENABLE;
8875 	tw32_f(MAC_TX_MODE, tp->tx_mode);
8876 
8877 	for (i = 0; i < MAX_WAIT_CNT; i++) {
8878 		udelay(100);
8879 		if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
8880 			break;
8881 	}
8882 	if (i >= MAX_WAIT_CNT) {
8883 		dev_err(&tp->pdev->dev,
8884 			"%s timed out, TX_MODE_ENABLE will not clear "
8885 			"MAC_TX_MODE=%08x\n", __func__, tr32(MAC_TX_MODE));
8886 		err |= -ENODEV;
8887 	}
8888 
8889 	err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
8890 	err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
8891 	err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
8892 
8893 	tw32(FTQ_RESET, 0xffffffff);
8894 	tw32(FTQ_RESET, 0x00000000);
8895 
8896 	err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
8897 	err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
8898 
8899 err_no_dev:
8900 	for (i = 0; i < tp->irq_cnt; i++) {
8901 		struct tg3_napi *tnapi = &tp->napi[i];
8902 		if (tnapi->hw_status)
8903 			memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8904 	}
8905 
8906 	return err;
8907 }
8908 
8909 /* Save PCI command register before chip reset */
8910 static void tg3_save_pci_state(struct tg3 *tp)
8911 {
8912 	pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
8913 }
8914 
8915 /* Restore PCI state after chip reset */
8916 static void tg3_restore_pci_state(struct tg3 *tp)
8917 {
8918 	u32 val;
8919 
8920 	/* Re-enable indirect register accesses. */
8921 	pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
8922 			       tp->misc_host_ctrl);
8923 
8924 	/* Set MAX PCI retry to zero. */
8925 	val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
8926 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0 &&
8927 	    tg3_flag(tp, PCIX_MODE))
8928 		val |= PCISTATE_RETRY_SAME_DMA;
8929 	/* Allow reads and writes to the APE register and memory space. */
8930 	if (tg3_flag(tp, ENABLE_APE))
8931 		val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
8932 		       PCISTATE_ALLOW_APE_SHMEM_WR |
8933 		       PCISTATE_ALLOW_APE_PSPACE_WR;
8934 	pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
8935 
8936 	pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
8937 
8938 	if (!tg3_flag(tp, PCI_EXPRESS)) {
8939 		pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
8940 				      tp->pci_cacheline_sz);
8941 		pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
8942 				      tp->pci_lat_timer);
8943 	}
8944 
8945 	/* Make sure PCI-X relaxed ordering bit is clear. */
8946 	if (tg3_flag(tp, PCIX_MODE)) {
8947 		u16 pcix_cmd;
8948 
8949 		pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8950 				     &pcix_cmd);
8951 		pcix_cmd &= ~PCI_X_CMD_ERO;
8952 		pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8953 				      pcix_cmd);
8954 	}
8955 
8956 	if (tg3_flag(tp, 5780_CLASS)) {
8957 
8958 		/* Chip reset on 5780 will reset MSI enable bit,
8959 		 * so need to restore it.
8960 		 */
8961 		if (tg3_flag(tp, USING_MSI)) {
8962 			u16 ctrl;
8963 
8964 			pci_read_config_word(tp->pdev,
8965 					     tp->msi_cap + PCI_MSI_FLAGS,
8966 					     &ctrl);
8967 			pci_write_config_word(tp->pdev,
8968 					      tp->msi_cap + PCI_MSI_FLAGS,
8969 					      ctrl | PCI_MSI_FLAGS_ENABLE);
8970 			val = tr32(MSGINT_MODE);
8971 			tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
8972 		}
8973 	}
8974 }
8975 
8976 static void tg3_override_clk(struct tg3 *tp)
8977 {
8978 	u32 val;
8979 
8980 	switch (tg3_asic_rev(tp)) {
8981 	case ASIC_REV_5717:
8982 		val = tr32(TG3_CPMU_CLCK_ORIDE_ENABLE);
8983 		tw32(TG3_CPMU_CLCK_ORIDE_ENABLE, val |
8984 		     TG3_CPMU_MAC_ORIDE_ENABLE);
8985 		break;
8986 
8987 	case ASIC_REV_5719:
8988 	case ASIC_REV_5720:
8989 		tw32(TG3_CPMU_CLCK_ORIDE, CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
8990 		break;
8991 
8992 	default:
8993 		return;
8994 	}
8995 }
8996 
8997 static void tg3_restore_clk(struct tg3 *tp)
8998 {
8999 	u32 val;
9000 
9001 	switch (tg3_asic_rev(tp)) {
9002 	case ASIC_REV_5717:
9003 		val = tr32(TG3_CPMU_CLCK_ORIDE_ENABLE);
9004 		tw32(TG3_CPMU_CLCK_ORIDE_ENABLE,
9005 		     val & ~TG3_CPMU_MAC_ORIDE_ENABLE);
9006 		break;
9007 
9008 	case ASIC_REV_5719:
9009 	case ASIC_REV_5720:
9010 		val = tr32(TG3_CPMU_CLCK_ORIDE);
9011 		tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
9012 		break;
9013 
9014 	default:
9015 		return;
9016 	}
9017 }
9018 
9019 /* tp->lock is held. */
9020 static int tg3_chip_reset(struct tg3 *tp)
9021 {
9022 	u32 val;
9023 	void (*write_op)(struct tg3 *, u32, u32);
9024 	int i, err;
9025 
9026 	if (!pci_device_is_present(tp->pdev))
9027 		return -ENODEV;
9028 
9029 	tg3_nvram_lock(tp);
9030 
9031 	tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
9032 
9033 	/* No matching tg3_nvram_unlock() after this because
9034 	 * chip reset below will undo the nvram lock.
9035 	 */
9036 	tp->nvram_lock_cnt = 0;
9037 
9038 	/* GRC_MISC_CFG core clock reset will clear the memory
9039 	 * enable bit in PCI register 4 and the MSI enable bit
9040 	 * on some chips, so we save relevant registers here.
9041 	 */
9042 	tg3_save_pci_state(tp);
9043 
9044 	if (tg3_asic_rev(tp) == ASIC_REV_5752 ||
9045 	    tg3_flag(tp, 5755_PLUS))
9046 		tw32(GRC_FASTBOOT_PC, 0);
9047 
9048 	/*
9049 	 * We must avoid the readl() that normally takes place.
9050 	 * It locks machines, causes machine checks, and other
9051 	 * fun things.  So, temporarily disable the 5701
9052 	 * hardware workaround, while we do the reset.
9053 	 */
9054 	write_op = tp->write32;
9055 	if (write_op == tg3_write_flush_reg32)
9056 		tp->write32 = tg3_write32;
9057 
9058 	/* Prevent the irq handler from reading or writing PCI registers
9059 	 * during chip reset when the memory enable bit in the PCI command
9060 	 * register may be cleared.  The chip does not generate interrupt
9061 	 * at this time, but the irq handler may still be called due to irq
9062 	 * sharing or irqpoll.
9063 	 */
9064 	tg3_flag_set(tp, CHIP_RESETTING);
9065 	for (i = 0; i < tp->irq_cnt; i++) {
9066 		struct tg3_napi *tnapi = &tp->napi[i];
9067 		if (tnapi->hw_status) {
9068 			tnapi->hw_status->status = 0;
9069 			tnapi->hw_status->status_tag = 0;
9070 		}
9071 		tnapi->last_tag = 0;
9072 		tnapi->last_irq_tag = 0;
9073 	}
9074 	smp_mb();
9075 
9076 	for (i = 0; i < tp->irq_cnt; i++)
9077 		synchronize_irq(tp->napi[i].irq_vec);
9078 
9079 	if (tg3_asic_rev(tp) == ASIC_REV_57780) {
9080 		val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
9081 		tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
9082 	}
9083 
9084 	/* do the reset */
9085 	val = GRC_MISC_CFG_CORECLK_RESET;
9086 
9087 	if (tg3_flag(tp, PCI_EXPRESS)) {
9088 		/* Force PCIe 1.0a mode */
9089 		if (tg3_asic_rev(tp) != ASIC_REV_5785 &&
9090 		    !tg3_flag(tp, 57765_PLUS) &&
9091 		    tr32(TG3_PCIE_PHY_TSTCTL) ==
9092 		    (TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM))
9093 			tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM);
9094 
9095 		if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0) {
9096 			tw32(GRC_MISC_CFG, (1 << 29));
9097 			val |= (1 << 29);
9098 		}
9099 	}
9100 
9101 	if (tg3_asic_rev(tp) == ASIC_REV_5906) {
9102 		tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
9103 		tw32(GRC_VCPU_EXT_CTRL,
9104 		     tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
9105 	}
9106 
9107 	/* Set the clock to the highest frequency to avoid timeouts. With link
9108 	 * aware mode, the clock speed could be slow and bootcode does not
9109 	 * complete within the expected time. Override the clock to allow the
9110 	 * bootcode to finish sooner and then restore it.
9111 	 */
9112 	tg3_override_clk(tp);
9113 
9114 	/* Manage gphy power for all CPMU absent PCIe devices. */
9115 	if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, CPMU_PRESENT))
9116 		val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
9117 
9118 	tw32(GRC_MISC_CFG, val);
9119 
9120 	/* restore 5701 hardware bug workaround write method */
9121 	tp->write32 = write_op;
9122 
9123 	/* Unfortunately, we have to delay before the PCI read back.
9124 	 * Some 575X chips even will not respond to a PCI cfg access
9125 	 * when the reset command is given to the chip.
9126 	 *
9127 	 * How do these hardware designers expect things to work
9128 	 * properly if the PCI write is posted for a long period
9129 	 * of time?  It is always necessary to have some method by
9130 	 * which a register read back can occur to push the write
9131 	 * out which does the reset.
9132 	 *
9133 	 * For most tg3 variants the trick below was working.
9134 	 * Ho hum...
9135 	 */
9136 	udelay(120);
9137 
9138 	/* Flush PCI posted writes.  The normal MMIO registers
9139 	 * are inaccessible at this time so this is the only
9140 	 * way to make this reliably (actually, this is no longer
9141 	 * the case, see above).  I tried to use indirect
9142 	 * register read/write but this upset some 5701 variants.
9143 	 */
9144 	pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
9145 
9146 	udelay(120);
9147 
9148 	if (tg3_flag(tp, PCI_EXPRESS) && pci_is_pcie(tp->pdev)) {
9149 		u16 val16;
9150 
9151 		if (tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A0) {
9152 			int j;
9153 			u32 cfg_val;
9154 
9155 			/* Wait for link training to complete.  */
9156 			for (j = 0; j < 5000; j++)
9157 				udelay(100);
9158 
9159 			pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
9160 			pci_write_config_dword(tp->pdev, 0xc4,
9161 					       cfg_val | (1 << 15));
9162 		}
9163 
9164 		/* Clear the "no snoop" and "relaxed ordering" bits. */
9165 		val16 = PCI_EXP_DEVCTL_RELAX_EN | PCI_EXP_DEVCTL_NOSNOOP_EN;
9166 		/*
9167 		 * Older PCIe devices only support the 128 byte
9168 		 * MPS setting.  Enforce the restriction.
9169 		 */
9170 		if (!tg3_flag(tp, CPMU_PRESENT))
9171 			val16 |= PCI_EXP_DEVCTL_PAYLOAD;
9172 		pcie_capability_clear_word(tp->pdev, PCI_EXP_DEVCTL, val16);
9173 
9174 		/* Clear error status */
9175 		pcie_capability_write_word(tp->pdev, PCI_EXP_DEVSTA,
9176 				      PCI_EXP_DEVSTA_CED |
9177 				      PCI_EXP_DEVSTA_NFED |
9178 				      PCI_EXP_DEVSTA_FED |
9179 				      PCI_EXP_DEVSTA_URD);
9180 	}
9181 
9182 	tg3_restore_pci_state(tp);
9183 
9184 	tg3_flag_clear(tp, CHIP_RESETTING);
9185 	tg3_flag_clear(tp, ERROR_PROCESSED);
9186 
9187 	val = 0;
9188 	if (tg3_flag(tp, 5780_CLASS))
9189 		val = tr32(MEMARB_MODE);
9190 	tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
9191 
9192 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A3) {
9193 		tg3_stop_fw(tp);
9194 		tw32(0x5000, 0x400);
9195 	}
9196 
9197 	if (tg3_flag(tp, IS_SSB_CORE)) {
9198 		/*
9199 		 * BCM4785: In order to avoid repercussions from using
9200 		 * potentially defective internal ROM, stop the Rx RISC CPU,
9201 		 * which is not required.
9202 		 */
9203 		tg3_stop_fw(tp);
9204 		tg3_halt_cpu(tp, RX_CPU_BASE);
9205 	}
9206 
9207 	err = tg3_poll_fw(tp);
9208 	if (err)
9209 		return err;
9210 
9211 	tw32(GRC_MODE, tp->grc_mode);
9212 
9213 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0) {
9214 		val = tr32(0xc4);
9215 
9216 		tw32(0xc4, val | (1 << 15));
9217 	}
9218 
9219 	if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
9220 	    tg3_asic_rev(tp) == ASIC_REV_5705) {
9221 		tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
9222 		if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0)
9223 			tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
9224 		tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
9225 	}
9226 
9227 	if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
9228 		tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
9229 		val = tp->mac_mode;
9230 	} else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
9231 		tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
9232 		val = tp->mac_mode;
9233 	} else
9234 		val = 0;
9235 
9236 	tw32_f(MAC_MODE, val);
9237 	udelay(40);
9238 
9239 	tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
9240 
9241 	tg3_mdio_start(tp);
9242 
9243 	if (tg3_flag(tp, PCI_EXPRESS) &&
9244 	    tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0 &&
9245 	    tg3_asic_rev(tp) != ASIC_REV_5785 &&
9246 	    !tg3_flag(tp, 57765_PLUS)) {
9247 		val = tr32(0x7c00);
9248 
9249 		tw32(0x7c00, val | (1 << 25));
9250 	}
9251 
9252 	tg3_restore_clk(tp);
9253 
9254 	/* Reprobe ASF enable state.  */
9255 	tg3_flag_clear(tp, ENABLE_ASF);
9256 	tp->phy_flags &= ~(TG3_PHYFLG_1G_ON_VAUX_OK |
9257 			   TG3_PHYFLG_KEEP_LINK_ON_PWRDN);
9258 
9259 	tg3_flag_clear(tp, ASF_NEW_HANDSHAKE);
9260 	tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
9261 	if (val == NIC_SRAM_DATA_SIG_MAGIC) {
9262 		u32 nic_cfg;
9263 
9264 		tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
9265 		if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
9266 			tg3_flag_set(tp, ENABLE_ASF);
9267 			tp->last_event_jiffies = jiffies;
9268 			if (tg3_flag(tp, 5750_PLUS))
9269 				tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
9270 
9271 			tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &nic_cfg);
9272 			if (nic_cfg & NIC_SRAM_1G_ON_VAUX_OK)
9273 				tp->phy_flags |= TG3_PHYFLG_1G_ON_VAUX_OK;
9274 			if (nic_cfg & NIC_SRAM_LNK_FLAP_AVOID)
9275 				tp->phy_flags |= TG3_PHYFLG_KEEP_LINK_ON_PWRDN;
9276 		}
9277 	}
9278 
9279 	return 0;
9280 }
9281 
9282 static void tg3_get_nstats(struct tg3 *, struct rtnl_link_stats64 *);
9283 static void tg3_get_estats(struct tg3 *, struct tg3_ethtool_stats *);
9284 static void __tg3_set_rx_mode(struct net_device *);
9285 
9286 /* tp->lock is held. */
9287 static int tg3_halt(struct tg3 *tp, int kind, bool silent)
9288 {
9289 	int err;
9290 
9291 	tg3_stop_fw(tp);
9292 
9293 	tg3_write_sig_pre_reset(tp, kind);
9294 
9295 	tg3_abort_hw(tp, silent);
9296 	err = tg3_chip_reset(tp);
9297 
9298 	__tg3_set_mac_addr(tp, false);
9299 
9300 	tg3_write_sig_legacy(tp, kind);
9301 	tg3_write_sig_post_reset(tp, kind);
9302 
9303 	if (tp->hw_stats) {
9304 		/* Save the stats across chip resets... */
9305 		tg3_get_nstats(tp, &tp->net_stats_prev);
9306 		tg3_get_estats(tp, &tp->estats_prev);
9307 
9308 		/* And make sure the next sample is new data */
9309 		memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
9310 	}
9311 
9312 	return err;
9313 }
9314 
9315 static int tg3_set_mac_addr(struct net_device *dev, void *p)
9316 {
9317 	struct tg3 *tp = netdev_priv(dev);
9318 	struct sockaddr *addr = p;
9319 	int err = 0;
9320 	bool skip_mac_1 = false;
9321 
9322 	if (!is_valid_ether_addr(addr->sa_data))
9323 		return -EADDRNOTAVAIL;
9324 
9325 	memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
9326 
9327 	if (!netif_running(dev))
9328 		return 0;
9329 
9330 	if (tg3_flag(tp, ENABLE_ASF)) {
9331 		u32 addr0_high, addr0_low, addr1_high, addr1_low;
9332 
9333 		addr0_high = tr32(MAC_ADDR_0_HIGH);
9334 		addr0_low = tr32(MAC_ADDR_0_LOW);
9335 		addr1_high = tr32(MAC_ADDR_1_HIGH);
9336 		addr1_low = tr32(MAC_ADDR_1_LOW);
9337 
9338 		/* Skip MAC addr 1 if ASF is using it. */
9339 		if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
9340 		    !(addr1_high == 0 && addr1_low == 0))
9341 			skip_mac_1 = true;
9342 	}
9343 	spin_lock_bh(&tp->lock);
9344 	__tg3_set_mac_addr(tp, skip_mac_1);
9345 	__tg3_set_rx_mode(dev);
9346 	spin_unlock_bh(&tp->lock);
9347 
9348 	return err;
9349 }
9350 
9351 /* tp->lock is held. */
9352 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
9353 			   dma_addr_t mapping, u32 maxlen_flags,
9354 			   u32 nic_addr)
9355 {
9356 	tg3_write_mem(tp,
9357 		      (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
9358 		      ((u64) mapping >> 32));
9359 	tg3_write_mem(tp,
9360 		      (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
9361 		      ((u64) mapping & 0xffffffff));
9362 	tg3_write_mem(tp,
9363 		      (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
9364 		       maxlen_flags);
9365 
9366 	if (!tg3_flag(tp, 5705_PLUS))
9367 		tg3_write_mem(tp,
9368 			      (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
9369 			      nic_addr);
9370 }
9371 
9372 
9373 static void tg3_coal_tx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
9374 {
9375 	int i = 0;
9376 
9377 	if (!tg3_flag(tp, ENABLE_TSS)) {
9378 		tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
9379 		tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
9380 		tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
9381 	} else {
9382 		tw32(HOSTCC_TXCOL_TICKS, 0);
9383 		tw32(HOSTCC_TXMAX_FRAMES, 0);
9384 		tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
9385 
9386 		for (; i < tp->txq_cnt; i++) {
9387 			u32 reg;
9388 
9389 			reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18;
9390 			tw32(reg, ec->tx_coalesce_usecs);
9391 			reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18;
9392 			tw32(reg, ec->tx_max_coalesced_frames);
9393 			reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18;
9394 			tw32(reg, ec->tx_max_coalesced_frames_irq);
9395 		}
9396 	}
9397 
9398 	for (; i < tp->irq_max - 1; i++) {
9399 		tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0);
9400 		tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0);
9401 		tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
9402 	}
9403 }
9404 
9405 static void tg3_coal_rx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
9406 {
9407 	int i = 0;
9408 	u32 limit = tp->rxq_cnt;
9409 
9410 	if (!tg3_flag(tp, ENABLE_RSS)) {
9411 		tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
9412 		tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
9413 		tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
9414 		limit--;
9415 	} else {
9416 		tw32(HOSTCC_RXCOL_TICKS, 0);
9417 		tw32(HOSTCC_RXMAX_FRAMES, 0);
9418 		tw32(HOSTCC_RXCOAL_MAXF_INT, 0);
9419 	}
9420 
9421 	for (; i < limit; i++) {
9422 		u32 reg;
9423 
9424 		reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18;
9425 		tw32(reg, ec->rx_coalesce_usecs);
9426 		reg = HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18;
9427 		tw32(reg, ec->rx_max_coalesced_frames);
9428 		reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18;
9429 		tw32(reg, ec->rx_max_coalesced_frames_irq);
9430 	}
9431 
9432 	for (; i < tp->irq_max - 1; i++) {
9433 		tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0);
9434 		tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0);
9435 		tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
9436 	}
9437 }
9438 
9439 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
9440 {
9441 	tg3_coal_tx_init(tp, ec);
9442 	tg3_coal_rx_init(tp, ec);
9443 
9444 	if (!tg3_flag(tp, 5705_PLUS)) {
9445 		u32 val = ec->stats_block_coalesce_usecs;
9446 
9447 		tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
9448 		tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
9449 
9450 		if (!tp->link_up)
9451 			val = 0;
9452 
9453 		tw32(HOSTCC_STAT_COAL_TICKS, val);
9454 	}
9455 }
9456 
9457 /* tp->lock is held. */
9458 static void tg3_tx_rcbs_disable(struct tg3 *tp)
9459 {
9460 	u32 txrcb, limit;
9461 
9462 	/* Disable all transmit rings but the first. */
9463 	if (!tg3_flag(tp, 5705_PLUS))
9464 		limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16;
9465 	else if (tg3_flag(tp, 5717_PLUS))
9466 		limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4;
9467 	else if (tg3_flag(tp, 57765_CLASS) ||
9468 		 tg3_asic_rev(tp) == ASIC_REV_5762)
9469 		limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2;
9470 	else
9471 		limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
9472 
9473 	for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
9474 	     txrcb < limit; txrcb += TG3_BDINFO_SIZE)
9475 		tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS,
9476 			      BDINFO_FLAGS_DISABLED);
9477 }
9478 
9479 /* tp->lock is held. */
9480 static void tg3_tx_rcbs_init(struct tg3 *tp)
9481 {
9482 	int i = 0;
9483 	u32 txrcb = NIC_SRAM_SEND_RCB;
9484 
9485 	if (tg3_flag(tp, ENABLE_TSS))
9486 		i++;
9487 
9488 	for (; i < tp->irq_max; i++, txrcb += TG3_BDINFO_SIZE) {
9489 		struct tg3_napi *tnapi = &tp->napi[i];
9490 
9491 		if (!tnapi->tx_ring)
9492 			continue;
9493 
9494 		tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
9495 			       (TG3_TX_RING_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT),
9496 			       NIC_SRAM_TX_BUFFER_DESC);
9497 	}
9498 }
9499 
9500 /* tp->lock is held. */
9501 static void tg3_rx_ret_rcbs_disable(struct tg3 *tp)
9502 {
9503 	u32 rxrcb, limit;
9504 
9505 	/* Disable all receive return rings but the first. */
9506 	if (tg3_flag(tp, 5717_PLUS))
9507 		limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17;
9508 	else if (!tg3_flag(tp, 5705_PLUS))
9509 		limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
9510 	else if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
9511 		 tg3_asic_rev(tp) == ASIC_REV_5762 ||
9512 		 tg3_flag(tp, 57765_CLASS))
9513 		limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4;
9514 	else
9515 		limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
9516 
9517 	for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
9518 	     rxrcb < limit; rxrcb += TG3_BDINFO_SIZE)
9519 		tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS,
9520 			      BDINFO_FLAGS_DISABLED);
9521 }
9522 
9523 /* tp->lock is held. */
9524 static void tg3_rx_ret_rcbs_init(struct tg3 *tp)
9525 {
9526 	int i = 0;
9527 	u32 rxrcb = NIC_SRAM_RCV_RET_RCB;
9528 
9529 	if (tg3_flag(tp, ENABLE_RSS))
9530 		i++;
9531 
9532 	for (; i < tp->irq_max; i++, rxrcb += TG3_BDINFO_SIZE) {
9533 		struct tg3_napi *tnapi = &tp->napi[i];
9534 
9535 		if (!tnapi->rx_rcb)
9536 			continue;
9537 
9538 		tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
9539 			       (tp->rx_ret_ring_mask + 1) <<
9540 				BDINFO_FLAGS_MAXLEN_SHIFT, 0);
9541 	}
9542 }
9543 
9544 /* tp->lock is held. */
9545 static void tg3_rings_reset(struct tg3 *tp)
9546 {
9547 	int i;
9548 	u32 stblk;
9549 	struct tg3_napi *tnapi = &tp->napi[0];
9550 
9551 	tg3_tx_rcbs_disable(tp);
9552 
9553 	tg3_rx_ret_rcbs_disable(tp);
9554 
9555 	/* Disable interrupts */
9556 	tw32_mailbox_f(tp->napi[0].int_mbox, 1);
9557 	tp->napi[0].chk_msi_cnt = 0;
9558 	tp->napi[0].last_rx_cons = 0;
9559 	tp->napi[0].last_tx_cons = 0;
9560 
9561 	/* Zero mailbox registers. */
9562 	if (tg3_flag(tp, SUPPORT_MSIX)) {
9563 		for (i = 1; i < tp->irq_max; i++) {
9564 			tp->napi[i].tx_prod = 0;
9565 			tp->napi[i].tx_cons = 0;
9566 			if (tg3_flag(tp, ENABLE_TSS))
9567 				tw32_mailbox(tp->napi[i].prodmbox, 0);
9568 			tw32_rx_mbox(tp->napi[i].consmbox, 0);
9569 			tw32_mailbox_f(tp->napi[i].int_mbox, 1);
9570 			tp->napi[i].chk_msi_cnt = 0;
9571 			tp->napi[i].last_rx_cons = 0;
9572 			tp->napi[i].last_tx_cons = 0;
9573 		}
9574 		if (!tg3_flag(tp, ENABLE_TSS))
9575 			tw32_mailbox(tp->napi[0].prodmbox, 0);
9576 	} else {
9577 		tp->napi[0].tx_prod = 0;
9578 		tp->napi[0].tx_cons = 0;
9579 		tw32_mailbox(tp->napi[0].prodmbox, 0);
9580 		tw32_rx_mbox(tp->napi[0].consmbox, 0);
9581 	}
9582 
9583 	/* Make sure the NIC-based send BD rings are disabled. */
9584 	if (!tg3_flag(tp, 5705_PLUS)) {
9585 		u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW;
9586 		for (i = 0; i < 16; i++)
9587 			tw32_tx_mbox(mbox + i * 8, 0);
9588 	}
9589 
9590 	/* Clear status block in ram. */
9591 	memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
9592 
9593 	/* Set status block DMA address */
9594 	tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
9595 	     ((u64) tnapi->status_mapping >> 32));
9596 	tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
9597 	     ((u64) tnapi->status_mapping & 0xffffffff));
9598 
9599 	stblk = HOSTCC_STATBLCK_RING1;
9600 
9601 	for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) {
9602 		u64 mapping = (u64)tnapi->status_mapping;
9603 		tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32);
9604 		tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff);
9605 		stblk += 8;
9606 
9607 		/* Clear status block in ram. */
9608 		memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
9609 	}
9610 
9611 	tg3_tx_rcbs_init(tp);
9612 	tg3_rx_ret_rcbs_init(tp);
9613 }
9614 
9615 static void tg3_setup_rxbd_thresholds(struct tg3 *tp)
9616 {
9617 	u32 val, bdcache_maxcnt, host_rep_thresh, nic_rep_thresh;
9618 
9619 	if (!tg3_flag(tp, 5750_PLUS) ||
9620 	    tg3_flag(tp, 5780_CLASS) ||
9621 	    tg3_asic_rev(tp) == ASIC_REV_5750 ||
9622 	    tg3_asic_rev(tp) == ASIC_REV_5752 ||
9623 	    tg3_flag(tp, 57765_PLUS))
9624 		bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5700;
9625 	else if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
9626 		 tg3_asic_rev(tp) == ASIC_REV_5787)
9627 		bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5755;
9628 	else
9629 		bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5906;
9630 
9631 	nic_rep_thresh = min(bdcache_maxcnt / 2, tp->rx_std_max_post);
9632 	host_rep_thresh = max_t(u32, tp->rx_pending / 8, 1);
9633 
9634 	val = min(nic_rep_thresh, host_rep_thresh);
9635 	tw32(RCVBDI_STD_THRESH, val);
9636 
9637 	if (tg3_flag(tp, 57765_PLUS))
9638 		tw32(STD_REPLENISH_LWM, bdcache_maxcnt);
9639 
9640 	if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
9641 		return;
9642 
9643 	bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700;
9644 
9645 	host_rep_thresh = max_t(u32, tp->rx_jumbo_pending / 8, 1);
9646 
9647 	val = min(bdcache_maxcnt / 2, host_rep_thresh);
9648 	tw32(RCVBDI_JUMBO_THRESH, val);
9649 
9650 	if (tg3_flag(tp, 57765_PLUS))
9651 		tw32(JMB_REPLENISH_LWM, bdcache_maxcnt);
9652 }
9653 
9654 static inline u32 calc_crc(unsigned char *buf, int len)
9655 {
9656 	u32 reg;
9657 	u32 tmp;
9658 	int j, k;
9659 
9660 	reg = 0xffffffff;
9661 
9662 	for (j = 0; j < len; j++) {
9663 		reg ^= buf[j];
9664 
9665 		for (k = 0; k < 8; k++) {
9666 			tmp = reg & 0x01;
9667 
9668 			reg >>= 1;
9669 
9670 			if (tmp)
9671 				reg ^= 0xedb88320;
9672 		}
9673 	}
9674 
9675 	return ~reg;
9676 }
9677 
9678 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
9679 {
9680 	/* accept or reject all multicast frames */
9681 	tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
9682 	tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
9683 	tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
9684 	tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
9685 }
9686 
9687 static void __tg3_set_rx_mode(struct net_device *dev)
9688 {
9689 	struct tg3 *tp = netdev_priv(dev);
9690 	u32 rx_mode;
9691 
9692 	rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
9693 				  RX_MODE_KEEP_VLAN_TAG);
9694 
9695 #if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE)
9696 	/* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
9697 	 * flag clear.
9698 	 */
9699 	if (!tg3_flag(tp, ENABLE_ASF))
9700 		rx_mode |= RX_MODE_KEEP_VLAN_TAG;
9701 #endif
9702 
9703 	if (dev->flags & IFF_PROMISC) {
9704 		/* Promiscuous mode. */
9705 		rx_mode |= RX_MODE_PROMISC;
9706 	} else if (dev->flags & IFF_ALLMULTI) {
9707 		/* Accept all multicast. */
9708 		tg3_set_multi(tp, 1);
9709 	} else if (netdev_mc_empty(dev)) {
9710 		/* Reject all multicast. */
9711 		tg3_set_multi(tp, 0);
9712 	} else {
9713 		/* Accept one or more multicast(s). */
9714 		struct netdev_hw_addr *ha;
9715 		u32 mc_filter[4] = { 0, };
9716 		u32 regidx;
9717 		u32 bit;
9718 		u32 crc;
9719 
9720 		netdev_for_each_mc_addr(ha, dev) {
9721 			crc = calc_crc(ha->addr, ETH_ALEN);
9722 			bit = ~crc & 0x7f;
9723 			regidx = (bit & 0x60) >> 5;
9724 			bit &= 0x1f;
9725 			mc_filter[regidx] |= (1 << bit);
9726 		}
9727 
9728 		tw32(MAC_HASH_REG_0, mc_filter[0]);
9729 		tw32(MAC_HASH_REG_1, mc_filter[1]);
9730 		tw32(MAC_HASH_REG_2, mc_filter[2]);
9731 		tw32(MAC_HASH_REG_3, mc_filter[3]);
9732 	}
9733 
9734 	if (netdev_uc_count(dev) > TG3_MAX_UCAST_ADDR(tp)) {
9735 		rx_mode |= RX_MODE_PROMISC;
9736 	} else if (!(dev->flags & IFF_PROMISC)) {
9737 		/* Add all entries into to the mac addr filter list */
9738 		int i = 0;
9739 		struct netdev_hw_addr *ha;
9740 
9741 		netdev_for_each_uc_addr(ha, dev) {
9742 			__tg3_set_one_mac_addr(tp, ha->addr,
9743 					       i + TG3_UCAST_ADDR_IDX(tp));
9744 			i++;
9745 		}
9746 	}
9747 
9748 	if (rx_mode != tp->rx_mode) {
9749 		tp->rx_mode = rx_mode;
9750 		tw32_f(MAC_RX_MODE, rx_mode);
9751 		udelay(10);
9752 	}
9753 }
9754 
9755 static void tg3_rss_init_dflt_indir_tbl(struct tg3 *tp, u32 qcnt)
9756 {
9757 	int i;
9758 
9759 	for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
9760 		tp->rss_ind_tbl[i] = ethtool_rxfh_indir_default(i, qcnt);
9761 }
9762 
9763 static void tg3_rss_check_indir_tbl(struct tg3 *tp)
9764 {
9765 	int i;
9766 
9767 	if (!tg3_flag(tp, SUPPORT_MSIX))
9768 		return;
9769 
9770 	if (tp->rxq_cnt == 1) {
9771 		memset(&tp->rss_ind_tbl[0], 0, sizeof(tp->rss_ind_tbl));
9772 		return;
9773 	}
9774 
9775 	/* Validate table against current IRQ count */
9776 	for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) {
9777 		if (tp->rss_ind_tbl[i] >= tp->rxq_cnt)
9778 			break;
9779 	}
9780 
9781 	if (i != TG3_RSS_INDIR_TBL_SIZE)
9782 		tg3_rss_init_dflt_indir_tbl(tp, tp->rxq_cnt);
9783 }
9784 
9785 static void tg3_rss_write_indir_tbl(struct tg3 *tp)
9786 {
9787 	int i = 0;
9788 	u32 reg = MAC_RSS_INDIR_TBL_0;
9789 
9790 	while (i < TG3_RSS_INDIR_TBL_SIZE) {
9791 		u32 val = tp->rss_ind_tbl[i];
9792 		i++;
9793 		for (; i % 8; i++) {
9794 			val <<= 4;
9795 			val |= tp->rss_ind_tbl[i];
9796 		}
9797 		tw32(reg, val);
9798 		reg += 4;
9799 	}
9800 }
9801 
9802 static inline u32 tg3_lso_rd_dma_workaround_bit(struct tg3 *tp)
9803 {
9804 	if (tg3_asic_rev(tp) == ASIC_REV_5719)
9805 		return TG3_LSO_RD_DMA_TX_LENGTH_WA_5719;
9806 	else
9807 		return TG3_LSO_RD_DMA_TX_LENGTH_WA_5720;
9808 }
9809 
9810 /* tp->lock is held. */
9811 static int tg3_reset_hw(struct tg3 *tp, bool reset_phy)
9812 {
9813 	u32 val, rdmac_mode;
9814 	int i, err, limit;
9815 	struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
9816 
9817 	tg3_disable_ints(tp);
9818 
9819 	tg3_stop_fw(tp);
9820 
9821 	tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
9822 
9823 	if (tg3_flag(tp, INIT_COMPLETE))
9824 		tg3_abort_hw(tp, 1);
9825 
9826 	if ((tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
9827 	    !(tp->phy_flags & TG3_PHYFLG_USER_CONFIGURED)) {
9828 		tg3_phy_pull_config(tp);
9829 		tg3_eee_pull_config(tp, NULL);
9830 		tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
9831 	}
9832 
9833 	/* Enable MAC control of LPI */
9834 	if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
9835 		tg3_setup_eee(tp);
9836 
9837 	if (reset_phy)
9838 		tg3_phy_reset(tp);
9839 
9840 	err = tg3_chip_reset(tp);
9841 	if (err)
9842 		return err;
9843 
9844 	tg3_write_sig_legacy(tp, RESET_KIND_INIT);
9845 
9846 	if (tg3_chip_rev(tp) == CHIPREV_5784_AX) {
9847 		val = tr32(TG3_CPMU_CTRL);
9848 		val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
9849 		tw32(TG3_CPMU_CTRL, val);
9850 
9851 		val = tr32(TG3_CPMU_LSPD_10MB_CLK);
9852 		val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
9853 		val |= CPMU_LSPD_10MB_MACCLK_6_25;
9854 		tw32(TG3_CPMU_LSPD_10MB_CLK, val);
9855 
9856 		val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
9857 		val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
9858 		val |= CPMU_LNK_AWARE_MACCLK_6_25;
9859 		tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
9860 
9861 		val = tr32(TG3_CPMU_HST_ACC);
9862 		val &= ~CPMU_HST_ACC_MACCLK_MASK;
9863 		val |= CPMU_HST_ACC_MACCLK_6_25;
9864 		tw32(TG3_CPMU_HST_ACC, val);
9865 	}
9866 
9867 	if (tg3_asic_rev(tp) == ASIC_REV_57780) {
9868 		val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK;
9869 		val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN |
9870 		       PCIE_PWR_MGMT_L1_THRESH_4MS;
9871 		tw32(PCIE_PWR_MGMT_THRESH, val);
9872 
9873 		val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK;
9874 		tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS);
9875 
9876 		tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR);
9877 
9878 		val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
9879 		tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
9880 	}
9881 
9882 	if (tg3_flag(tp, L1PLLPD_EN)) {
9883 		u32 grc_mode = tr32(GRC_MODE);
9884 
9885 		/* Access the lower 1K of PL PCIE block registers. */
9886 		val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9887 		tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
9888 
9889 		val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1);
9890 		tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1,
9891 		     val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN);
9892 
9893 		tw32(GRC_MODE, grc_mode);
9894 	}
9895 
9896 	if (tg3_flag(tp, 57765_CLASS)) {
9897 		if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) {
9898 			u32 grc_mode = tr32(GRC_MODE);
9899 
9900 			/* Access the lower 1K of PL PCIE block registers. */
9901 			val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9902 			tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
9903 
9904 			val = tr32(TG3_PCIE_TLDLPL_PORT +
9905 				   TG3_PCIE_PL_LO_PHYCTL5);
9906 			tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5,
9907 			     val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ);
9908 
9909 			tw32(GRC_MODE, grc_mode);
9910 		}
9911 
9912 		if (tg3_chip_rev(tp) != CHIPREV_57765_AX) {
9913 			u32 grc_mode;
9914 
9915 			/* Fix transmit hangs */
9916 			val = tr32(TG3_CPMU_PADRNG_CTL);
9917 			val |= TG3_CPMU_PADRNG_CTL_RDIV2;
9918 			tw32(TG3_CPMU_PADRNG_CTL, val);
9919 
9920 			grc_mode = tr32(GRC_MODE);
9921 
9922 			/* Access the lower 1K of DL PCIE block registers. */
9923 			val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9924 			tw32(GRC_MODE, val | GRC_MODE_PCIE_DL_SEL);
9925 
9926 			val = tr32(TG3_PCIE_TLDLPL_PORT +
9927 				   TG3_PCIE_DL_LO_FTSMAX);
9928 			val &= ~TG3_PCIE_DL_LO_FTSMAX_MSK;
9929 			tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_DL_LO_FTSMAX,
9930 			     val | TG3_PCIE_DL_LO_FTSMAX_VAL);
9931 
9932 			tw32(GRC_MODE, grc_mode);
9933 		}
9934 
9935 		val = tr32(TG3_CPMU_LSPD_10MB_CLK);
9936 		val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
9937 		val |= CPMU_LSPD_10MB_MACCLK_6_25;
9938 		tw32(TG3_CPMU_LSPD_10MB_CLK, val);
9939 	}
9940 
9941 	/* This works around an issue with Athlon chipsets on
9942 	 * B3 tigon3 silicon.  This bit has no effect on any
9943 	 * other revision.  But do not set this on PCI Express
9944 	 * chips and don't even touch the clocks if the CPMU is present.
9945 	 */
9946 	if (!tg3_flag(tp, CPMU_PRESENT)) {
9947 		if (!tg3_flag(tp, PCI_EXPRESS))
9948 			tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
9949 		tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
9950 	}
9951 
9952 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0 &&
9953 	    tg3_flag(tp, PCIX_MODE)) {
9954 		val = tr32(TG3PCI_PCISTATE);
9955 		val |= PCISTATE_RETRY_SAME_DMA;
9956 		tw32(TG3PCI_PCISTATE, val);
9957 	}
9958 
9959 	if (tg3_flag(tp, ENABLE_APE)) {
9960 		/* Allow reads and writes to the
9961 		 * APE register and memory space.
9962 		 */
9963 		val = tr32(TG3PCI_PCISTATE);
9964 		val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
9965 		       PCISTATE_ALLOW_APE_SHMEM_WR |
9966 		       PCISTATE_ALLOW_APE_PSPACE_WR;
9967 		tw32(TG3PCI_PCISTATE, val);
9968 	}
9969 
9970 	if (tg3_chip_rev(tp) == CHIPREV_5704_BX) {
9971 		/* Enable some hw fixes.  */
9972 		val = tr32(TG3PCI_MSI_DATA);
9973 		val |= (1 << 26) | (1 << 28) | (1 << 29);
9974 		tw32(TG3PCI_MSI_DATA, val);
9975 	}
9976 
9977 	/* Descriptor ring init may make accesses to the
9978 	 * NIC SRAM area to setup the TX descriptors, so we
9979 	 * can only do this after the hardware has been
9980 	 * successfully reset.
9981 	 */
9982 	err = tg3_init_rings(tp);
9983 	if (err)
9984 		return err;
9985 
9986 	if (tg3_flag(tp, 57765_PLUS)) {
9987 		val = tr32(TG3PCI_DMA_RW_CTRL) &
9988 		      ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
9989 		if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0)
9990 			val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK;
9991 		if (!tg3_flag(tp, 57765_CLASS) &&
9992 		    tg3_asic_rev(tp) != ASIC_REV_5717 &&
9993 		    tg3_asic_rev(tp) != ASIC_REV_5762)
9994 			val |= DMA_RWCTRL_TAGGED_STAT_WA;
9995 		tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
9996 	} else if (tg3_asic_rev(tp) != ASIC_REV_5784 &&
9997 		   tg3_asic_rev(tp) != ASIC_REV_5761) {
9998 		/* This value is determined during the probe time DMA
9999 		 * engine test, tg3_test_dma.
10000 		 */
10001 		tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
10002 	}
10003 
10004 	tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
10005 			  GRC_MODE_4X_NIC_SEND_RINGS |
10006 			  GRC_MODE_NO_TX_PHDR_CSUM |
10007 			  GRC_MODE_NO_RX_PHDR_CSUM);
10008 	tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
10009 
10010 	/* Pseudo-header checksum is done by hardware logic and not
10011 	 * the offload processers, so make the chip do the pseudo-
10012 	 * header checksums on receive.  For transmit it is more
10013 	 * convenient to do the pseudo-header checksum in software
10014 	 * as Linux does that on transmit for us in all cases.
10015 	 */
10016 	tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
10017 
10018 	val = GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP;
10019 	if (tp->rxptpctl)
10020 		tw32(TG3_RX_PTP_CTL,
10021 		     tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK);
10022 
10023 	if (tg3_flag(tp, PTP_CAPABLE))
10024 		val |= GRC_MODE_TIME_SYNC_ENABLE;
10025 
10026 	tw32(GRC_MODE, tp->grc_mode | val);
10027 
10028 	/* Setup the timer prescalar register.  Clock is always 66Mhz. */
10029 	val = tr32(GRC_MISC_CFG);
10030 	val &= ~0xff;
10031 	val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
10032 	tw32(GRC_MISC_CFG, val);
10033 
10034 	/* Initialize MBUF/DESC pool. */
10035 	if (tg3_flag(tp, 5750_PLUS)) {
10036 		/* Do nothing.  */
10037 	} else if (tg3_asic_rev(tp) != ASIC_REV_5705) {
10038 		tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
10039 		if (tg3_asic_rev(tp) == ASIC_REV_5704)
10040 			tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
10041 		else
10042 			tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
10043 		tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
10044 		tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
10045 	} else if (tg3_flag(tp, TSO_CAPABLE)) {
10046 		int fw_len;
10047 
10048 		fw_len = tp->fw_len;
10049 		fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
10050 		tw32(BUFMGR_MB_POOL_ADDR,
10051 		     NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
10052 		tw32(BUFMGR_MB_POOL_SIZE,
10053 		     NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
10054 	}
10055 
10056 	if (tp->dev->mtu <= ETH_DATA_LEN) {
10057 		tw32(BUFMGR_MB_RDMA_LOW_WATER,
10058 		     tp->bufmgr_config.mbuf_read_dma_low_water);
10059 		tw32(BUFMGR_MB_MACRX_LOW_WATER,
10060 		     tp->bufmgr_config.mbuf_mac_rx_low_water);
10061 		tw32(BUFMGR_MB_HIGH_WATER,
10062 		     tp->bufmgr_config.mbuf_high_water);
10063 	} else {
10064 		tw32(BUFMGR_MB_RDMA_LOW_WATER,
10065 		     tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
10066 		tw32(BUFMGR_MB_MACRX_LOW_WATER,
10067 		     tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
10068 		tw32(BUFMGR_MB_HIGH_WATER,
10069 		     tp->bufmgr_config.mbuf_high_water_jumbo);
10070 	}
10071 	tw32(BUFMGR_DMA_LOW_WATER,
10072 	     tp->bufmgr_config.dma_low_water);
10073 	tw32(BUFMGR_DMA_HIGH_WATER,
10074 	     tp->bufmgr_config.dma_high_water);
10075 
10076 	val = BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE;
10077 	if (tg3_asic_rev(tp) == ASIC_REV_5719)
10078 		val |= BUFMGR_MODE_NO_TX_UNDERRUN;
10079 	if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
10080 	    tg3_asic_rev(tp) == ASIC_REV_5762 ||
10081 	    tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
10082 	    tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0)
10083 		val |= BUFMGR_MODE_MBLOW_ATTN_ENAB;
10084 	tw32(BUFMGR_MODE, val);
10085 	for (i = 0; i < 2000; i++) {
10086 		if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
10087 			break;
10088 		udelay(10);
10089 	}
10090 	if (i >= 2000) {
10091 		netdev_err(tp->dev, "%s cannot enable BUFMGR\n", __func__);
10092 		return -ENODEV;
10093 	}
10094 
10095 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5906_A1)
10096 		tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
10097 
10098 	tg3_setup_rxbd_thresholds(tp);
10099 
10100 	/* Initialize TG3_BDINFO's at:
10101 	 *  RCVDBDI_STD_BD:	standard eth size rx ring
10102 	 *  RCVDBDI_JUMBO_BD:	jumbo frame rx ring
10103 	 *  RCVDBDI_MINI_BD:	small frame rx ring (??? does not work)
10104 	 *
10105 	 * like so:
10106 	 *  TG3_BDINFO_HOST_ADDR:	high/low parts of DMA address of ring
10107 	 *  TG3_BDINFO_MAXLEN_FLAGS:	(rx max buffer size << 16) |
10108 	 *                              ring attribute flags
10109 	 *  TG3_BDINFO_NIC_ADDR:	location of descriptors in nic SRAM
10110 	 *
10111 	 * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
10112 	 * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
10113 	 *
10114 	 * The size of each ring is fixed in the firmware, but the location is
10115 	 * configurable.
10116 	 */
10117 	tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
10118 	     ((u64) tpr->rx_std_mapping >> 32));
10119 	tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
10120 	     ((u64) tpr->rx_std_mapping & 0xffffffff));
10121 	if (!tg3_flag(tp, 5717_PLUS))
10122 		tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
10123 		     NIC_SRAM_RX_BUFFER_DESC);
10124 
10125 	/* Disable the mini ring */
10126 	if (!tg3_flag(tp, 5705_PLUS))
10127 		tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
10128 		     BDINFO_FLAGS_DISABLED);
10129 
10130 	/* Program the jumbo buffer descriptor ring control
10131 	 * blocks on those devices that have them.
10132 	 */
10133 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
10134 	    (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))) {
10135 
10136 		if (tg3_flag(tp, JUMBO_RING_ENABLE)) {
10137 			tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
10138 			     ((u64) tpr->rx_jmb_mapping >> 32));
10139 			tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
10140 			     ((u64) tpr->rx_jmb_mapping & 0xffffffff));
10141 			val = TG3_RX_JMB_RING_SIZE(tp) <<
10142 			      BDINFO_FLAGS_MAXLEN_SHIFT;
10143 			tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
10144 			     val | BDINFO_FLAGS_USE_EXT_RECV);
10145 			if (!tg3_flag(tp, USE_JUMBO_BDFLAG) ||
10146 			    tg3_flag(tp, 57765_CLASS) ||
10147 			    tg3_asic_rev(tp) == ASIC_REV_5762)
10148 				tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
10149 				     NIC_SRAM_RX_JUMBO_BUFFER_DESC);
10150 		} else {
10151 			tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
10152 			     BDINFO_FLAGS_DISABLED);
10153 		}
10154 
10155 		if (tg3_flag(tp, 57765_PLUS)) {
10156 			val = TG3_RX_STD_RING_SIZE(tp);
10157 			val <<= BDINFO_FLAGS_MAXLEN_SHIFT;
10158 			val |= (TG3_RX_STD_DMA_SZ << 2);
10159 		} else
10160 			val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT;
10161 	} else
10162 		val = TG3_RX_STD_MAX_SIZE_5700 << BDINFO_FLAGS_MAXLEN_SHIFT;
10163 
10164 	tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val);
10165 
10166 	tpr->rx_std_prod_idx = tp->rx_pending;
10167 	tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx);
10168 
10169 	tpr->rx_jmb_prod_idx =
10170 		tg3_flag(tp, JUMBO_RING_ENABLE) ? tp->rx_jumbo_pending : 0;
10171 	tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx);
10172 
10173 	tg3_rings_reset(tp);
10174 
10175 	/* Initialize MAC address and backoff seed. */
10176 	__tg3_set_mac_addr(tp, false);
10177 
10178 	/* MTU + ethernet header + FCS + optional VLAN tag */
10179 	tw32(MAC_RX_MTU_SIZE,
10180 	     tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
10181 
10182 	/* The slot time is changed by tg3_setup_phy if we
10183 	 * run at gigabit with half duplex.
10184 	 */
10185 	val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
10186 	      (6 << TX_LENGTHS_IPG_SHIFT) |
10187 	      (32 << TX_LENGTHS_SLOT_TIME_SHIFT);
10188 
10189 	if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
10190 	    tg3_asic_rev(tp) == ASIC_REV_5762)
10191 		val |= tr32(MAC_TX_LENGTHS) &
10192 		       (TX_LENGTHS_JMB_FRM_LEN_MSK |
10193 			TX_LENGTHS_CNT_DWN_VAL_MSK);
10194 
10195 	tw32(MAC_TX_LENGTHS, val);
10196 
10197 	/* Receive rules. */
10198 	tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
10199 	tw32(RCVLPC_CONFIG, 0x0181);
10200 
10201 	/* Calculate RDMAC_MODE setting early, we need it to determine
10202 	 * the RCVLPC_STATE_ENABLE mask.
10203 	 */
10204 	rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
10205 		      RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
10206 		      RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
10207 		      RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
10208 		      RDMAC_MODE_LNGREAD_ENAB);
10209 
10210 	if (tg3_asic_rev(tp) == ASIC_REV_5717)
10211 		rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS;
10212 
10213 	if (tg3_asic_rev(tp) == ASIC_REV_5784 ||
10214 	    tg3_asic_rev(tp) == ASIC_REV_5785 ||
10215 	    tg3_asic_rev(tp) == ASIC_REV_57780)
10216 		rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
10217 			      RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
10218 			      RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
10219 
10220 	if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
10221 	    tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
10222 		if (tg3_flag(tp, TSO_CAPABLE) &&
10223 		    tg3_asic_rev(tp) == ASIC_REV_5705) {
10224 			rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
10225 		} else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
10226 			   !tg3_flag(tp, IS_5788)) {
10227 			rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
10228 		}
10229 	}
10230 
10231 	if (tg3_flag(tp, PCI_EXPRESS))
10232 		rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
10233 
10234 	if (tg3_asic_rev(tp) == ASIC_REV_57766) {
10235 		tp->dma_limit = 0;
10236 		if (tp->dev->mtu <= ETH_DATA_LEN) {
10237 			rdmac_mode |= RDMAC_MODE_JMB_2K_MMRR;
10238 			tp->dma_limit = TG3_TX_BD_DMA_MAX_2K;
10239 		}
10240 	}
10241 
10242 	if (tg3_flag(tp, HW_TSO_1) ||
10243 	    tg3_flag(tp, HW_TSO_2) ||
10244 	    tg3_flag(tp, HW_TSO_3))
10245 		rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN;
10246 
10247 	if (tg3_flag(tp, 57765_PLUS) ||
10248 	    tg3_asic_rev(tp) == ASIC_REV_5785 ||
10249 	    tg3_asic_rev(tp) == ASIC_REV_57780)
10250 		rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;
10251 
10252 	if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
10253 	    tg3_asic_rev(tp) == ASIC_REV_5762)
10254 		rdmac_mode |= tr32(RDMAC_MODE) & RDMAC_MODE_H2BNC_VLAN_DET;
10255 
10256 	if (tg3_asic_rev(tp) == ASIC_REV_5761 ||
10257 	    tg3_asic_rev(tp) == ASIC_REV_5784 ||
10258 	    tg3_asic_rev(tp) == ASIC_REV_5785 ||
10259 	    tg3_asic_rev(tp) == ASIC_REV_57780 ||
10260 	    tg3_flag(tp, 57765_PLUS)) {
10261 		u32 tgtreg;
10262 
10263 		if (tg3_asic_rev(tp) == ASIC_REV_5762)
10264 			tgtreg = TG3_RDMA_RSRVCTRL_REG2;
10265 		else
10266 			tgtreg = TG3_RDMA_RSRVCTRL_REG;
10267 
10268 		val = tr32(tgtreg);
10269 		if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
10270 		    tg3_asic_rev(tp) == ASIC_REV_5762) {
10271 			val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK |
10272 				 TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK |
10273 				 TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK);
10274 			val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B |
10275 			       TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
10276 			       TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K;
10277 		}
10278 		tw32(tgtreg, val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
10279 	}
10280 
10281 	if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
10282 	    tg3_asic_rev(tp) == ASIC_REV_5720 ||
10283 	    tg3_asic_rev(tp) == ASIC_REV_5762) {
10284 		u32 tgtreg;
10285 
10286 		if (tg3_asic_rev(tp) == ASIC_REV_5762)
10287 			tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL2;
10288 		else
10289 			tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL;
10290 
10291 		val = tr32(tgtreg);
10292 		tw32(tgtreg, val |
10293 		     TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K |
10294 		     TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K);
10295 	}
10296 
10297 	/* Receive/send statistics. */
10298 	if (tg3_flag(tp, 5750_PLUS)) {
10299 		val = tr32(RCVLPC_STATS_ENABLE);
10300 		val &= ~RCVLPC_STATSENAB_DACK_FIX;
10301 		tw32(RCVLPC_STATS_ENABLE, val);
10302 	} else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
10303 		   tg3_flag(tp, TSO_CAPABLE)) {
10304 		val = tr32(RCVLPC_STATS_ENABLE);
10305 		val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
10306 		tw32(RCVLPC_STATS_ENABLE, val);
10307 	} else {
10308 		tw32(RCVLPC_STATS_ENABLE, 0xffffff);
10309 	}
10310 	tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
10311 	tw32(SNDDATAI_STATSENAB, 0xffffff);
10312 	tw32(SNDDATAI_STATSCTRL,
10313 	     (SNDDATAI_SCTRL_ENABLE |
10314 	      SNDDATAI_SCTRL_FASTUPD));
10315 
10316 	/* Setup host coalescing engine. */
10317 	tw32(HOSTCC_MODE, 0);
10318 	for (i = 0; i < 2000; i++) {
10319 		if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
10320 			break;
10321 		udelay(10);
10322 	}
10323 
10324 	__tg3_set_coalesce(tp, &tp->coal);
10325 
10326 	if (!tg3_flag(tp, 5705_PLUS)) {
10327 		/* Status/statistics block address.  See tg3_timer,
10328 		 * the tg3_periodic_fetch_stats call there, and
10329 		 * tg3_get_stats to see how this works for 5705/5750 chips.
10330 		 */
10331 		tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
10332 		     ((u64) tp->stats_mapping >> 32));
10333 		tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
10334 		     ((u64) tp->stats_mapping & 0xffffffff));
10335 		tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
10336 
10337 		tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
10338 
10339 		/* Clear statistics and status block memory areas */
10340 		for (i = NIC_SRAM_STATS_BLK;
10341 		     i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
10342 		     i += sizeof(u32)) {
10343 			tg3_write_mem(tp, i, 0);
10344 			udelay(40);
10345 		}
10346 	}
10347 
10348 	tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
10349 
10350 	tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
10351 	tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
10352 	if (!tg3_flag(tp, 5705_PLUS))
10353 		tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
10354 
10355 	if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
10356 		tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
10357 		/* reset to prevent losing 1st rx packet intermittently */
10358 		tw32_f(MAC_RX_MODE, RX_MODE_RESET);
10359 		udelay(10);
10360 	}
10361 
10362 	tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
10363 			MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE |
10364 			MAC_MODE_FHDE_ENABLE;
10365 	if (tg3_flag(tp, ENABLE_APE))
10366 		tp->mac_mode |= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
10367 	if (!tg3_flag(tp, 5705_PLUS) &&
10368 	    !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
10369 	    tg3_asic_rev(tp) != ASIC_REV_5700)
10370 		tp->mac_mode |= MAC_MODE_LINK_POLARITY;
10371 	tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
10372 	udelay(40);
10373 
10374 	/* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
10375 	 * If TG3_FLAG_IS_NIC is zero, we should read the
10376 	 * register to preserve the GPIO settings for LOMs. The GPIOs,
10377 	 * whether used as inputs or outputs, are set by boot code after
10378 	 * reset.
10379 	 */
10380 	if (!tg3_flag(tp, IS_NIC)) {
10381 		u32 gpio_mask;
10382 
10383 		gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
10384 			    GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
10385 			    GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
10386 
10387 		if (tg3_asic_rev(tp) == ASIC_REV_5752)
10388 			gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
10389 				     GRC_LCLCTRL_GPIO_OUTPUT3;
10390 
10391 		if (tg3_asic_rev(tp) == ASIC_REV_5755)
10392 			gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
10393 
10394 		tp->grc_local_ctrl &= ~gpio_mask;
10395 		tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
10396 
10397 		/* GPIO1 must be driven high for eeprom write protect */
10398 		if (tg3_flag(tp, EEPROM_WRITE_PROT))
10399 			tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
10400 					       GRC_LCLCTRL_GPIO_OUTPUT1);
10401 	}
10402 	tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
10403 	udelay(100);
10404 
10405 	if (tg3_flag(tp, USING_MSIX)) {
10406 		val = tr32(MSGINT_MODE);
10407 		val |= MSGINT_MODE_ENABLE;
10408 		if (tp->irq_cnt > 1)
10409 			val |= MSGINT_MODE_MULTIVEC_EN;
10410 		if (!tg3_flag(tp, 1SHOT_MSI))
10411 			val |= MSGINT_MODE_ONE_SHOT_DISABLE;
10412 		tw32(MSGINT_MODE, val);
10413 	}
10414 
10415 	if (!tg3_flag(tp, 5705_PLUS)) {
10416 		tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
10417 		udelay(40);
10418 	}
10419 
10420 	val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
10421 	       WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
10422 	       WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
10423 	       WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
10424 	       WDMAC_MODE_LNGREAD_ENAB);
10425 
10426 	if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
10427 	    tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
10428 		if (tg3_flag(tp, TSO_CAPABLE) &&
10429 		    (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 ||
10430 		     tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A2)) {
10431 			/* nothing */
10432 		} else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
10433 			   !tg3_flag(tp, IS_5788)) {
10434 			val |= WDMAC_MODE_RX_ACCEL;
10435 		}
10436 	}
10437 
10438 	/* Enable host coalescing bug fix */
10439 	if (tg3_flag(tp, 5755_PLUS))
10440 		val |= WDMAC_MODE_STATUS_TAG_FIX;
10441 
10442 	if (tg3_asic_rev(tp) == ASIC_REV_5785)
10443 		val |= WDMAC_MODE_BURST_ALL_DATA;
10444 
10445 	tw32_f(WDMAC_MODE, val);
10446 	udelay(40);
10447 
10448 	if (tg3_flag(tp, PCIX_MODE)) {
10449 		u16 pcix_cmd;
10450 
10451 		pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
10452 				     &pcix_cmd);
10453 		if (tg3_asic_rev(tp) == ASIC_REV_5703) {
10454 			pcix_cmd &= ~PCI_X_CMD_MAX_READ;
10455 			pcix_cmd |= PCI_X_CMD_READ_2K;
10456 		} else if (tg3_asic_rev(tp) == ASIC_REV_5704) {
10457 			pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
10458 			pcix_cmd |= PCI_X_CMD_READ_2K;
10459 		}
10460 		pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
10461 				      pcix_cmd);
10462 	}
10463 
10464 	tw32_f(RDMAC_MODE, rdmac_mode);
10465 	udelay(40);
10466 
10467 	if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
10468 	    tg3_asic_rev(tp) == ASIC_REV_5720) {
10469 		for (i = 0; i < TG3_NUM_RDMA_CHANNELS; i++) {
10470 			if (tr32(TG3_RDMA_LENGTH + (i << 2)) > TG3_MAX_MTU(tp))
10471 				break;
10472 		}
10473 		if (i < TG3_NUM_RDMA_CHANNELS) {
10474 			val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
10475 			val |= tg3_lso_rd_dma_workaround_bit(tp);
10476 			tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
10477 			tg3_flag_set(tp, 5719_5720_RDMA_BUG);
10478 		}
10479 	}
10480 
10481 	tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
10482 	if (!tg3_flag(tp, 5705_PLUS))
10483 		tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
10484 
10485 	if (tg3_asic_rev(tp) == ASIC_REV_5761)
10486 		tw32(SNDDATAC_MODE,
10487 		     SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
10488 	else
10489 		tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
10490 
10491 	tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
10492 	tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
10493 	val = RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ;
10494 	if (tg3_flag(tp, LRG_PROD_RING_CAP))
10495 		val |= RCVDBDI_MODE_LRG_RING_SZ;
10496 	tw32(RCVDBDI_MODE, val);
10497 	tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
10498 	if (tg3_flag(tp, HW_TSO_1) ||
10499 	    tg3_flag(tp, HW_TSO_2) ||
10500 	    tg3_flag(tp, HW_TSO_3))
10501 		tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
10502 	val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE;
10503 	if (tg3_flag(tp, ENABLE_TSS))
10504 		val |= SNDBDI_MODE_MULTI_TXQ_EN;
10505 	tw32(SNDBDI_MODE, val);
10506 	tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
10507 
10508 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) {
10509 		err = tg3_load_5701_a0_firmware_fix(tp);
10510 		if (err)
10511 			return err;
10512 	}
10513 
10514 	if (tg3_asic_rev(tp) == ASIC_REV_57766) {
10515 		/* Ignore any errors for the firmware download. If download
10516 		 * fails, the device will operate with EEE disabled
10517 		 */
10518 		tg3_load_57766_firmware(tp);
10519 	}
10520 
10521 	if (tg3_flag(tp, TSO_CAPABLE)) {
10522 		err = tg3_load_tso_firmware(tp);
10523 		if (err)
10524 			return err;
10525 	}
10526 
10527 	tp->tx_mode = TX_MODE_ENABLE;
10528 
10529 	if (tg3_flag(tp, 5755_PLUS) ||
10530 	    tg3_asic_rev(tp) == ASIC_REV_5906)
10531 		tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX;
10532 
10533 	if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
10534 	    tg3_asic_rev(tp) == ASIC_REV_5762) {
10535 		val = TX_MODE_JMB_FRM_LEN | TX_MODE_CNT_DN_MODE;
10536 		tp->tx_mode &= ~val;
10537 		tp->tx_mode |= tr32(MAC_TX_MODE) & val;
10538 	}
10539 
10540 	tw32_f(MAC_TX_MODE, tp->tx_mode);
10541 	udelay(100);
10542 
10543 	if (tg3_flag(tp, ENABLE_RSS)) {
10544 		tg3_rss_write_indir_tbl(tp);
10545 
10546 		/* Setup the "secret" hash key. */
10547 		tw32(MAC_RSS_HASH_KEY_0, 0x5f865437);
10548 		tw32(MAC_RSS_HASH_KEY_1, 0xe4ac62cc);
10549 		tw32(MAC_RSS_HASH_KEY_2, 0x50103a45);
10550 		tw32(MAC_RSS_HASH_KEY_3, 0x36621985);
10551 		tw32(MAC_RSS_HASH_KEY_4, 0xbf14c0e8);
10552 		tw32(MAC_RSS_HASH_KEY_5, 0x1bc27a1e);
10553 		tw32(MAC_RSS_HASH_KEY_6, 0x84f4b556);
10554 		tw32(MAC_RSS_HASH_KEY_7, 0x094ea6fe);
10555 		tw32(MAC_RSS_HASH_KEY_8, 0x7dda01e7);
10556 		tw32(MAC_RSS_HASH_KEY_9, 0xc04d7481);
10557 	}
10558 
10559 	tp->rx_mode = RX_MODE_ENABLE;
10560 	if (tg3_flag(tp, 5755_PLUS))
10561 		tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
10562 
10563 	if (tg3_asic_rev(tp) == ASIC_REV_5762)
10564 		tp->rx_mode |= RX_MODE_IPV4_FRAG_FIX;
10565 
10566 	if (tg3_flag(tp, ENABLE_RSS))
10567 		tp->rx_mode |= RX_MODE_RSS_ENABLE |
10568 			       RX_MODE_RSS_ITBL_HASH_BITS_7 |
10569 			       RX_MODE_RSS_IPV6_HASH_EN |
10570 			       RX_MODE_RSS_TCP_IPV6_HASH_EN |
10571 			       RX_MODE_RSS_IPV4_HASH_EN |
10572 			       RX_MODE_RSS_TCP_IPV4_HASH_EN;
10573 
10574 	tw32_f(MAC_RX_MODE, tp->rx_mode);
10575 	udelay(10);
10576 
10577 	tw32(MAC_LED_CTRL, tp->led_ctrl);
10578 
10579 	tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
10580 	if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
10581 		tw32_f(MAC_RX_MODE, RX_MODE_RESET);
10582 		udelay(10);
10583 	}
10584 	tw32_f(MAC_RX_MODE, tp->rx_mode);
10585 	udelay(10);
10586 
10587 	if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
10588 		if ((tg3_asic_rev(tp) == ASIC_REV_5704) &&
10589 		    !(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) {
10590 			/* Set drive transmission level to 1.2V  */
10591 			/* only if the signal pre-emphasis bit is not set  */
10592 			val = tr32(MAC_SERDES_CFG);
10593 			val &= 0xfffff000;
10594 			val |= 0x880;
10595 			tw32(MAC_SERDES_CFG, val);
10596 		}
10597 		if (tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A1)
10598 			tw32(MAC_SERDES_CFG, 0x616000);
10599 	}
10600 
10601 	/* Prevent chip from dropping frames when flow control
10602 	 * is enabled.
10603 	 */
10604 	if (tg3_flag(tp, 57765_CLASS))
10605 		val = 1;
10606 	else
10607 		val = 2;
10608 	tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val);
10609 
10610 	if (tg3_asic_rev(tp) == ASIC_REV_5704 &&
10611 	    (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
10612 		/* Use hardware link auto-negotiation */
10613 		tg3_flag_set(tp, HW_AUTONEG);
10614 	}
10615 
10616 	if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
10617 	    tg3_asic_rev(tp) == ASIC_REV_5714) {
10618 		u32 tmp;
10619 
10620 		tmp = tr32(SERDES_RX_CTRL);
10621 		tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
10622 		tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
10623 		tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
10624 		tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
10625 	}
10626 
10627 	if (!tg3_flag(tp, USE_PHYLIB)) {
10628 		if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10629 			tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
10630 
10631 		err = tg3_setup_phy(tp, false);
10632 		if (err)
10633 			return err;
10634 
10635 		if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
10636 		    !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
10637 			u32 tmp;
10638 
10639 			/* Clear CRC stats. */
10640 			if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
10641 				tg3_writephy(tp, MII_TG3_TEST1,
10642 					     tmp | MII_TG3_TEST1_CRC_EN);
10643 				tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &tmp);
10644 			}
10645 		}
10646 	}
10647 
10648 	__tg3_set_rx_mode(tp->dev);
10649 
10650 	/* Initialize receive rules. */
10651 	tw32(MAC_RCV_RULE_0,  0xc2000000 & RCV_RULE_DISABLE_MASK);
10652 	tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
10653 	tw32(MAC_RCV_RULE_1,  0x86000004 & RCV_RULE_DISABLE_MASK);
10654 	tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
10655 
10656 	if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS))
10657 		limit = 8;
10658 	else
10659 		limit = 16;
10660 	if (tg3_flag(tp, ENABLE_ASF))
10661 		limit -= 4;
10662 	switch (limit) {
10663 	case 16:
10664 		tw32(MAC_RCV_RULE_15,  0); tw32(MAC_RCV_VALUE_15,  0);
10665 	case 15:
10666 		tw32(MAC_RCV_RULE_14,  0); tw32(MAC_RCV_VALUE_14,  0);
10667 	case 14:
10668 		tw32(MAC_RCV_RULE_13,  0); tw32(MAC_RCV_VALUE_13,  0);
10669 	case 13:
10670 		tw32(MAC_RCV_RULE_12,  0); tw32(MAC_RCV_VALUE_12,  0);
10671 	case 12:
10672 		tw32(MAC_RCV_RULE_11,  0); tw32(MAC_RCV_VALUE_11,  0);
10673 	case 11:
10674 		tw32(MAC_RCV_RULE_10,  0); tw32(MAC_RCV_VALUE_10,  0);
10675 	case 10:
10676 		tw32(MAC_RCV_RULE_9,  0); tw32(MAC_RCV_VALUE_9,  0);
10677 	case 9:
10678 		tw32(MAC_RCV_RULE_8,  0); tw32(MAC_RCV_VALUE_8,  0);
10679 	case 8:
10680 		tw32(MAC_RCV_RULE_7,  0); tw32(MAC_RCV_VALUE_7,  0);
10681 	case 7:
10682 		tw32(MAC_RCV_RULE_6,  0); tw32(MAC_RCV_VALUE_6,  0);
10683 	case 6:
10684 		tw32(MAC_RCV_RULE_5,  0); tw32(MAC_RCV_VALUE_5,  0);
10685 	case 5:
10686 		tw32(MAC_RCV_RULE_4,  0); tw32(MAC_RCV_VALUE_4,  0);
10687 	case 4:
10688 		/* tw32(MAC_RCV_RULE_3,  0); tw32(MAC_RCV_VALUE_3,  0); */
10689 	case 3:
10690 		/* tw32(MAC_RCV_RULE_2,  0); tw32(MAC_RCV_VALUE_2,  0); */
10691 	case 2:
10692 	case 1:
10693 
10694 	default:
10695 		break;
10696 	}
10697 
10698 	if (tg3_flag(tp, ENABLE_APE))
10699 		/* Write our heartbeat update interval to APE. */
10700 		tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
10701 				APE_HOST_HEARTBEAT_INT_DISABLE);
10702 
10703 	tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
10704 
10705 	return 0;
10706 }
10707 
10708 /* Called at device open time to get the chip ready for
10709  * packet processing.  Invoked with tp->lock held.
10710  */
10711 static int tg3_init_hw(struct tg3 *tp, bool reset_phy)
10712 {
10713 	/* Chip may have been just powered on. If so, the boot code may still
10714 	 * be running initialization. Wait for it to finish to avoid races in
10715 	 * accessing the hardware.
10716 	 */
10717 	tg3_enable_register_access(tp);
10718 	tg3_poll_fw(tp);
10719 
10720 	tg3_switch_clocks(tp);
10721 
10722 	tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
10723 
10724 	return tg3_reset_hw(tp, reset_phy);
10725 }
10726 
10727 static void tg3_sd_scan_scratchpad(struct tg3 *tp, struct tg3_ocir *ocir)
10728 {
10729 	int i;
10730 
10731 	for (i = 0; i < TG3_SD_NUM_RECS; i++, ocir++) {
10732 		u32 off = i * TG3_OCIR_LEN, len = TG3_OCIR_LEN;
10733 
10734 		tg3_ape_scratchpad_read(tp, (u32 *) ocir, off, len);
10735 		off += len;
10736 
10737 		if (ocir->signature != TG3_OCIR_SIG_MAGIC ||
10738 		    !(ocir->version_flags & TG3_OCIR_FLAG_ACTIVE))
10739 			memset(ocir, 0, TG3_OCIR_LEN);
10740 	}
10741 }
10742 
10743 /* sysfs attributes for hwmon */
10744 static ssize_t tg3_show_temp(struct device *dev,
10745 			     struct device_attribute *devattr, char *buf)
10746 {
10747 	struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
10748 	struct tg3 *tp = dev_get_drvdata(dev);
10749 	u32 temperature;
10750 
10751 	spin_lock_bh(&tp->lock);
10752 	tg3_ape_scratchpad_read(tp, &temperature, attr->index,
10753 				sizeof(temperature));
10754 	spin_unlock_bh(&tp->lock);
10755 	return sprintf(buf, "%u\n", temperature);
10756 }
10757 
10758 
10759 static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, tg3_show_temp, NULL,
10760 			  TG3_TEMP_SENSOR_OFFSET);
10761 static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, tg3_show_temp, NULL,
10762 			  TG3_TEMP_CAUTION_OFFSET);
10763 static SENSOR_DEVICE_ATTR(temp1_max, S_IRUGO, tg3_show_temp, NULL,
10764 			  TG3_TEMP_MAX_OFFSET);
10765 
10766 static struct attribute *tg3_attrs[] = {
10767 	&sensor_dev_attr_temp1_input.dev_attr.attr,
10768 	&sensor_dev_attr_temp1_crit.dev_attr.attr,
10769 	&sensor_dev_attr_temp1_max.dev_attr.attr,
10770 	NULL
10771 };
10772 ATTRIBUTE_GROUPS(tg3);
10773 
10774 static void tg3_hwmon_close(struct tg3 *tp)
10775 {
10776 	if (tp->hwmon_dev) {
10777 		hwmon_device_unregister(tp->hwmon_dev);
10778 		tp->hwmon_dev = NULL;
10779 	}
10780 }
10781 
10782 static void tg3_hwmon_open(struct tg3 *tp)
10783 {
10784 	int i;
10785 	u32 size = 0;
10786 	struct pci_dev *pdev = tp->pdev;
10787 	struct tg3_ocir ocirs[TG3_SD_NUM_RECS];
10788 
10789 	tg3_sd_scan_scratchpad(tp, ocirs);
10790 
10791 	for (i = 0; i < TG3_SD_NUM_RECS; i++) {
10792 		if (!ocirs[i].src_data_length)
10793 			continue;
10794 
10795 		size += ocirs[i].src_hdr_length;
10796 		size += ocirs[i].src_data_length;
10797 	}
10798 
10799 	if (!size)
10800 		return;
10801 
10802 	tp->hwmon_dev = hwmon_device_register_with_groups(&pdev->dev, "tg3",
10803 							  tp, tg3_groups);
10804 	if (IS_ERR(tp->hwmon_dev)) {
10805 		tp->hwmon_dev = NULL;
10806 		dev_err(&pdev->dev, "Cannot register hwmon device, aborting\n");
10807 	}
10808 }
10809 
10810 
10811 #define TG3_STAT_ADD32(PSTAT, REG) \
10812 do {	u32 __val = tr32(REG); \
10813 	(PSTAT)->low += __val; \
10814 	if ((PSTAT)->low < __val) \
10815 		(PSTAT)->high += 1; \
10816 } while (0)
10817 
10818 static void tg3_periodic_fetch_stats(struct tg3 *tp)
10819 {
10820 	struct tg3_hw_stats *sp = tp->hw_stats;
10821 
10822 	if (!tp->link_up)
10823 		return;
10824 
10825 	TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
10826 	TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
10827 	TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
10828 	TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
10829 	TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
10830 	TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
10831 	TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
10832 	TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
10833 	TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
10834 	TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
10835 	TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
10836 	TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
10837 	TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
10838 	if (unlikely(tg3_flag(tp, 5719_5720_RDMA_BUG) &&
10839 		     (sp->tx_ucast_packets.low + sp->tx_mcast_packets.low +
10840 		      sp->tx_bcast_packets.low) > TG3_NUM_RDMA_CHANNELS)) {
10841 		u32 val;
10842 
10843 		val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
10844 		val &= ~tg3_lso_rd_dma_workaround_bit(tp);
10845 		tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
10846 		tg3_flag_clear(tp, 5719_5720_RDMA_BUG);
10847 	}
10848 
10849 	TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
10850 	TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
10851 	TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
10852 	TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
10853 	TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
10854 	TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
10855 	TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
10856 	TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
10857 	TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
10858 	TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
10859 	TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
10860 	TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
10861 	TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
10862 	TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
10863 
10864 	TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
10865 	if (tg3_asic_rev(tp) != ASIC_REV_5717 &&
10866 	    tg3_asic_rev(tp) != ASIC_REV_5762 &&
10867 	    tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0 &&
10868 	    tg3_chip_rev_id(tp) != CHIPREV_ID_5720_A0) {
10869 		TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
10870 	} else {
10871 		u32 val = tr32(HOSTCC_FLOW_ATTN);
10872 		val = (val & HOSTCC_FLOW_ATTN_MBUF_LWM) ? 1 : 0;
10873 		if (val) {
10874 			tw32(HOSTCC_FLOW_ATTN, HOSTCC_FLOW_ATTN_MBUF_LWM);
10875 			sp->rx_discards.low += val;
10876 			if (sp->rx_discards.low < val)
10877 				sp->rx_discards.high += 1;
10878 		}
10879 		sp->mbuf_lwm_thresh_hit = sp->rx_discards;
10880 	}
10881 	TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
10882 }
10883 
10884 static void tg3_chk_missed_msi(struct tg3 *tp)
10885 {
10886 	u32 i;
10887 
10888 	for (i = 0; i < tp->irq_cnt; i++) {
10889 		struct tg3_napi *tnapi = &tp->napi[i];
10890 
10891 		if (tg3_has_work(tnapi)) {
10892 			if (tnapi->last_rx_cons == tnapi->rx_rcb_ptr &&
10893 			    tnapi->last_tx_cons == tnapi->tx_cons) {
10894 				if (tnapi->chk_msi_cnt < 1) {
10895 					tnapi->chk_msi_cnt++;
10896 					return;
10897 				}
10898 				tg3_msi(0, tnapi);
10899 			}
10900 		}
10901 		tnapi->chk_msi_cnt = 0;
10902 		tnapi->last_rx_cons = tnapi->rx_rcb_ptr;
10903 		tnapi->last_tx_cons = tnapi->tx_cons;
10904 	}
10905 }
10906 
10907 static void tg3_timer(unsigned long __opaque)
10908 {
10909 	struct tg3 *tp = (struct tg3 *) __opaque;
10910 
10911 	if (tp->irq_sync || tg3_flag(tp, RESET_TASK_PENDING))
10912 		goto restart_timer;
10913 
10914 	spin_lock(&tp->lock);
10915 
10916 	if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
10917 	    tg3_flag(tp, 57765_CLASS))
10918 		tg3_chk_missed_msi(tp);
10919 
10920 	if (tg3_flag(tp, FLUSH_POSTED_WRITES)) {
10921 		/* BCM4785: Flush posted writes from GbE to host memory. */
10922 		tr32(HOSTCC_MODE);
10923 	}
10924 
10925 	if (!tg3_flag(tp, TAGGED_STATUS)) {
10926 		/* All of this garbage is because when using non-tagged
10927 		 * IRQ status the mailbox/status_block protocol the chip
10928 		 * uses with the cpu is race prone.
10929 		 */
10930 		if (tp->napi[0].hw_status->status & SD_STATUS_UPDATED) {
10931 			tw32(GRC_LOCAL_CTRL,
10932 			     tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
10933 		} else {
10934 			tw32(HOSTCC_MODE, tp->coalesce_mode |
10935 			     HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW);
10936 		}
10937 
10938 		if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
10939 			spin_unlock(&tp->lock);
10940 			tg3_reset_task_schedule(tp);
10941 			goto restart_timer;
10942 		}
10943 	}
10944 
10945 	/* This part only runs once per second. */
10946 	if (!--tp->timer_counter) {
10947 		if (tg3_flag(tp, 5705_PLUS))
10948 			tg3_periodic_fetch_stats(tp);
10949 
10950 		if (tp->setlpicnt && !--tp->setlpicnt)
10951 			tg3_phy_eee_enable(tp);
10952 
10953 		if (tg3_flag(tp, USE_LINKCHG_REG)) {
10954 			u32 mac_stat;
10955 			int phy_event;
10956 
10957 			mac_stat = tr32(MAC_STATUS);
10958 
10959 			phy_event = 0;
10960 			if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) {
10961 				if (mac_stat & MAC_STATUS_MI_INTERRUPT)
10962 					phy_event = 1;
10963 			} else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
10964 				phy_event = 1;
10965 
10966 			if (phy_event)
10967 				tg3_setup_phy(tp, false);
10968 		} else if (tg3_flag(tp, POLL_SERDES)) {
10969 			u32 mac_stat = tr32(MAC_STATUS);
10970 			int need_setup = 0;
10971 
10972 			if (tp->link_up &&
10973 			    (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
10974 				need_setup = 1;
10975 			}
10976 			if (!tp->link_up &&
10977 			    (mac_stat & (MAC_STATUS_PCS_SYNCED |
10978 					 MAC_STATUS_SIGNAL_DET))) {
10979 				need_setup = 1;
10980 			}
10981 			if (need_setup) {
10982 				if (!tp->serdes_counter) {
10983 					tw32_f(MAC_MODE,
10984 					     (tp->mac_mode &
10985 					      ~MAC_MODE_PORT_MODE_MASK));
10986 					udelay(40);
10987 					tw32_f(MAC_MODE, tp->mac_mode);
10988 					udelay(40);
10989 				}
10990 				tg3_setup_phy(tp, false);
10991 			}
10992 		} else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
10993 			   tg3_flag(tp, 5780_CLASS)) {
10994 			tg3_serdes_parallel_detect(tp);
10995 		} else if (tg3_flag(tp, POLL_CPMU_LINK)) {
10996 			u32 cpmu = tr32(TG3_CPMU_STATUS);
10997 			bool link_up = !((cpmu & TG3_CPMU_STATUS_LINK_MASK) ==
10998 					 TG3_CPMU_STATUS_LINK_MASK);
10999 
11000 			if (link_up != tp->link_up)
11001 				tg3_setup_phy(tp, false);
11002 		}
11003 
11004 		tp->timer_counter = tp->timer_multiplier;
11005 	}
11006 
11007 	/* Heartbeat is only sent once every 2 seconds.
11008 	 *
11009 	 * The heartbeat is to tell the ASF firmware that the host
11010 	 * driver is still alive.  In the event that the OS crashes,
11011 	 * ASF needs to reset the hardware to free up the FIFO space
11012 	 * that may be filled with rx packets destined for the host.
11013 	 * If the FIFO is full, ASF will no longer function properly.
11014 	 *
11015 	 * Unintended resets have been reported on real time kernels
11016 	 * where the timer doesn't run on time.  Netpoll will also have
11017 	 * same problem.
11018 	 *
11019 	 * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
11020 	 * to check the ring condition when the heartbeat is expiring
11021 	 * before doing the reset.  This will prevent most unintended
11022 	 * resets.
11023 	 */
11024 	if (!--tp->asf_counter) {
11025 		if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
11026 			tg3_wait_for_event_ack(tp);
11027 
11028 			tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
11029 				      FWCMD_NICDRV_ALIVE3);
11030 			tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
11031 			tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX,
11032 				      TG3_FW_UPDATE_TIMEOUT_SEC);
11033 
11034 			tg3_generate_fw_event(tp);
11035 		}
11036 		tp->asf_counter = tp->asf_multiplier;
11037 	}
11038 
11039 	spin_unlock(&tp->lock);
11040 
11041 restart_timer:
11042 	tp->timer.expires = jiffies + tp->timer_offset;
11043 	add_timer(&tp->timer);
11044 }
11045 
11046 static void tg3_timer_init(struct tg3 *tp)
11047 {
11048 	if (tg3_flag(tp, TAGGED_STATUS) &&
11049 	    tg3_asic_rev(tp) != ASIC_REV_5717 &&
11050 	    !tg3_flag(tp, 57765_CLASS))
11051 		tp->timer_offset = HZ;
11052 	else
11053 		tp->timer_offset = HZ / 10;
11054 
11055 	BUG_ON(tp->timer_offset > HZ);
11056 
11057 	tp->timer_multiplier = (HZ / tp->timer_offset);
11058 	tp->asf_multiplier = (HZ / tp->timer_offset) *
11059 			     TG3_FW_UPDATE_FREQ_SEC;
11060 
11061 	init_timer(&tp->timer);
11062 	tp->timer.data = (unsigned long) tp;
11063 	tp->timer.function = tg3_timer;
11064 }
11065 
11066 static void tg3_timer_start(struct tg3 *tp)
11067 {
11068 	tp->asf_counter   = tp->asf_multiplier;
11069 	tp->timer_counter = tp->timer_multiplier;
11070 
11071 	tp->timer.expires = jiffies + tp->timer_offset;
11072 	add_timer(&tp->timer);
11073 }
11074 
11075 static void tg3_timer_stop(struct tg3 *tp)
11076 {
11077 	del_timer_sync(&tp->timer);
11078 }
11079 
11080 /* Restart hardware after configuration changes, self-test, etc.
11081  * Invoked with tp->lock held.
11082  */
11083 static int tg3_restart_hw(struct tg3 *tp, bool reset_phy)
11084 	__releases(tp->lock)
11085 	__acquires(tp->lock)
11086 {
11087 	int err;
11088 
11089 	err = tg3_init_hw(tp, reset_phy);
11090 	if (err) {
11091 		netdev_err(tp->dev,
11092 			   "Failed to re-initialize device, aborting\n");
11093 		tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11094 		tg3_full_unlock(tp);
11095 		tg3_timer_stop(tp);
11096 		tp->irq_sync = 0;
11097 		tg3_napi_enable(tp);
11098 		dev_close(tp->dev);
11099 		tg3_full_lock(tp, 0);
11100 	}
11101 	return err;
11102 }
11103 
11104 static void tg3_reset_task(struct work_struct *work)
11105 {
11106 	struct tg3 *tp = container_of(work, struct tg3, reset_task);
11107 	int err;
11108 
11109 	tg3_full_lock(tp, 0);
11110 
11111 	if (!netif_running(tp->dev)) {
11112 		tg3_flag_clear(tp, RESET_TASK_PENDING);
11113 		tg3_full_unlock(tp);
11114 		return;
11115 	}
11116 
11117 	tg3_full_unlock(tp);
11118 
11119 	tg3_phy_stop(tp);
11120 
11121 	tg3_netif_stop(tp);
11122 
11123 	tg3_full_lock(tp, 1);
11124 
11125 	if (tg3_flag(tp, TX_RECOVERY_PENDING)) {
11126 		tp->write32_tx_mbox = tg3_write32_tx_mbox;
11127 		tp->write32_rx_mbox = tg3_write_flush_reg32;
11128 		tg3_flag_set(tp, MBOX_WRITE_REORDER);
11129 		tg3_flag_clear(tp, TX_RECOVERY_PENDING);
11130 	}
11131 
11132 	tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
11133 	err = tg3_init_hw(tp, true);
11134 	if (err)
11135 		goto out;
11136 
11137 	tg3_netif_start(tp);
11138 
11139 out:
11140 	tg3_full_unlock(tp);
11141 
11142 	if (!err)
11143 		tg3_phy_start(tp);
11144 
11145 	tg3_flag_clear(tp, RESET_TASK_PENDING);
11146 }
11147 
11148 static int tg3_request_irq(struct tg3 *tp, int irq_num)
11149 {
11150 	irq_handler_t fn;
11151 	unsigned long flags;
11152 	char *name;
11153 	struct tg3_napi *tnapi = &tp->napi[irq_num];
11154 
11155 	if (tp->irq_cnt == 1)
11156 		name = tp->dev->name;
11157 	else {
11158 		name = &tnapi->irq_lbl[0];
11159 		if (tnapi->tx_buffers && tnapi->rx_rcb)
11160 			snprintf(name, IFNAMSIZ,
11161 				 "%s-txrx-%d", tp->dev->name, irq_num);
11162 		else if (tnapi->tx_buffers)
11163 			snprintf(name, IFNAMSIZ,
11164 				 "%s-tx-%d", tp->dev->name, irq_num);
11165 		else if (tnapi->rx_rcb)
11166 			snprintf(name, IFNAMSIZ,
11167 				 "%s-rx-%d", tp->dev->name, irq_num);
11168 		else
11169 			snprintf(name, IFNAMSIZ,
11170 				 "%s-%d", tp->dev->name, irq_num);
11171 		name[IFNAMSIZ-1] = 0;
11172 	}
11173 
11174 	if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
11175 		fn = tg3_msi;
11176 		if (tg3_flag(tp, 1SHOT_MSI))
11177 			fn = tg3_msi_1shot;
11178 		flags = 0;
11179 	} else {
11180 		fn = tg3_interrupt;
11181 		if (tg3_flag(tp, TAGGED_STATUS))
11182 			fn = tg3_interrupt_tagged;
11183 		flags = IRQF_SHARED;
11184 	}
11185 
11186 	return request_irq(tnapi->irq_vec, fn, flags, name, tnapi);
11187 }
11188 
11189 static int tg3_test_interrupt(struct tg3 *tp)
11190 {
11191 	struct tg3_napi *tnapi = &tp->napi[0];
11192 	struct net_device *dev = tp->dev;
11193 	int err, i, intr_ok = 0;
11194 	u32 val;
11195 
11196 	if (!netif_running(dev))
11197 		return -ENODEV;
11198 
11199 	tg3_disable_ints(tp);
11200 
11201 	free_irq(tnapi->irq_vec, tnapi);
11202 
11203 	/*
11204 	 * Turn off MSI one shot mode.  Otherwise this test has no
11205 	 * observable way to know whether the interrupt was delivered.
11206 	 */
11207 	if (tg3_flag(tp, 57765_PLUS)) {
11208 		val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE;
11209 		tw32(MSGINT_MODE, val);
11210 	}
11211 
11212 	err = request_irq(tnapi->irq_vec, tg3_test_isr,
11213 			  IRQF_SHARED, dev->name, tnapi);
11214 	if (err)
11215 		return err;
11216 
11217 	tnapi->hw_status->status &= ~SD_STATUS_UPDATED;
11218 	tg3_enable_ints(tp);
11219 
11220 	tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
11221 	       tnapi->coal_now);
11222 
11223 	for (i = 0; i < 5; i++) {
11224 		u32 int_mbox, misc_host_ctrl;
11225 
11226 		int_mbox = tr32_mailbox(tnapi->int_mbox);
11227 		misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
11228 
11229 		if ((int_mbox != 0) ||
11230 		    (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
11231 			intr_ok = 1;
11232 			break;
11233 		}
11234 
11235 		if (tg3_flag(tp, 57765_PLUS) &&
11236 		    tnapi->hw_status->status_tag != tnapi->last_tag)
11237 			tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
11238 
11239 		msleep(10);
11240 	}
11241 
11242 	tg3_disable_ints(tp);
11243 
11244 	free_irq(tnapi->irq_vec, tnapi);
11245 
11246 	err = tg3_request_irq(tp, 0);
11247 
11248 	if (err)
11249 		return err;
11250 
11251 	if (intr_ok) {
11252 		/* Reenable MSI one shot mode. */
11253 		if (tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, 1SHOT_MSI)) {
11254 			val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE;
11255 			tw32(MSGINT_MODE, val);
11256 		}
11257 		return 0;
11258 	}
11259 
11260 	return -EIO;
11261 }
11262 
11263 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
11264  * successfully restored
11265  */
11266 static int tg3_test_msi(struct tg3 *tp)
11267 {
11268 	int err;
11269 	u16 pci_cmd;
11270 
11271 	if (!tg3_flag(tp, USING_MSI))
11272 		return 0;
11273 
11274 	/* Turn off SERR reporting in case MSI terminates with Master
11275 	 * Abort.
11276 	 */
11277 	pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
11278 	pci_write_config_word(tp->pdev, PCI_COMMAND,
11279 			      pci_cmd & ~PCI_COMMAND_SERR);
11280 
11281 	err = tg3_test_interrupt(tp);
11282 
11283 	pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
11284 
11285 	if (!err)
11286 		return 0;
11287 
11288 	/* other failures */
11289 	if (err != -EIO)
11290 		return err;
11291 
11292 	/* MSI test failed, go back to INTx mode */
11293 	netdev_warn(tp->dev, "No interrupt was generated using MSI. Switching "
11294 		    "to INTx mode. Please report this failure to the PCI "
11295 		    "maintainer and include system chipset information\n");
11296 
11297 	free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
11298 
11299 	pci_disable_msi(tp->pdev);
11300 
11301 	tg3_flag_clear(tp, USING_MSI);
11302 	tp->napi[0].irq_vec = tp->pdev->irq;
11303 
11304 	err = tg3_request_irq(tp, 0);
11305 	if (err)
11306 		return err;
11307 
11308 	/* Need to reset the chip because the MSI cycle may have terminated
11309 	 * with Master Abort.
11310 	 */
11311 	tg3_full_lock(tp, 1);
11312 
11313 	tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11314 	err = tg3_init_hw(tp, true);
11315 
11316 	tg3_full_unlock(tp);
11317 
11318 	if (err)
11319 		free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
11320 
11321 	return err;
11322 }
11323 
11324 static int tg3_request_firmware(struct tg3 *tp)
11325 {
11326 	const struct tg3_firmware_hdr *fw_hdr;
11327 
11328 	if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) {
11329 		netdev_err(tp->dev, "Failed to load firmware \"%s\"\n",
11330 			   tp->fw_needed);
11331 		return -ENOENT;
11332 	}
11333 
11334 	fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
11335 
11336 	/* Firmware blob starts with version numbers, followed by
11337 	 * start address and _full_ length including BSS sections
11338 	 * (which must be longer than the actual data, of course
11339 	 */
11340 
11341 	tp->fw_len = be32_to_cpu(fw_hdr->len);	/* includes bss */
11342 	if (tp->fw_len < (tp->fw->size - TG3_FW_HDR_LEN)) {
11343 		netdev_err(tp->dev, "bogus length %d in \"%s\"\n",
11344 			   tp->fw_len, tp->fw_needed);
11345 		release_firmware(tp->fw);
11346 		tp->fw = NULL;
11347 		return -EINVAL;
11348 	}
11349 
11350 	/* We no longer need firmware; we have it. */
11351 	tp->fw_needed = NULL;
11352 	return 0;
11353 }
11354 
11355 static u32 tg3_irq_count(struct tg3 *tp)
11356 {
11357 	u32 irq_cnt = max(tp->rxq_cnt, tp->txq_cnt);
11358 
11359 	if (irq_cnt > 1) {
11360 		/* We want as many rx rings enabled as there are cpus.
11361 		 * In multiqueue MSI-X mode, the first MSI-X vector
11362 		 * only deals with link interrupts, etc, so we add
11363 		 * one to the number of vectors we are requesting.
11364 		 */
11365 		irq_cnt = min_t(unsigned, irq_cnt + 1, tp->irq_max);
11366 	}
11367 
11368 	return irq_cnt;
11369 }
11370 
11371 static bool tg3_enable_msix(struct tg3 *tp)
11372 {
11373 	int i, rc;
11374 	struct msix_entry msix_ent[TG3_IRQ_MAX_VECS];
11375 
11376 	tp->txq_cnt = tp->txq_req;
11377 	tp->rxq_cnt = tp->rxq_req;
11378 	if (!tp->rxq_cnt)
11379 		tp->rxq_cnt = netif_get_num_default_rss_queues();
11380 	if (tp->rxq_cnt > tp->rxq_max)
11381 		tp->rxq_cnt = tp->rxq_max;
11382 
11383 	/* Disable multiple TX rings by default.  Simple round-robin hardware
11384 	 * scheduling of the TX rings can cause starvation of rings with
11385 	 * small packets when other rings have TSO or jumbo packets.
11386 	 */
11387 	if (!tp->txq_req)
11388 		tp->txq_cnt = 1;
11389 
11390 	tp->irq_cnt = tg3_irq_count(tp);
11391 
11392 	for (i = 0; i < tp->irq_max; i++) {
11393 		msix_ent[i].entry  = i;
11394 		msix_ent[i].vector = 0;
11395 	}
11396 
11397 	rc = pci_enable_msix_range(tp->pdev, msix_ent, 1, tp->irq_cnt);
11398 	if (rc < 0) {
11399 		return false;
11400 	} else if (rc < tp->irq_cnt) {
11401 		netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n",
11402 			      tp->irq_cnt, rc);
11403 		tp->irq_cnt = rc;
11404 		tp->rxq_cnt = max(rc - 1, 1);
11405 		if (tp->txq_cnt)
11406 			tp->txq_cnt = min(tp->rxq_cnt, tp->txq_max);
11407 	}
11408 
11409 	for (i = 0; i < tp->irq_max; i++)
11410 		tp->napi[i].irq_vec = msix_ent[i].vector;
11411 
11412 	if (netif_set_real_num_rx_queues(tp->dev, tp->rxq_cnt)) {
11413 		pci_disable_msix(tp->pdev);
11414 		return false;
11415 	}
11416 
11417 	if (tp->irq_cnt == 1)
11418 		return true;
11419 
11420 	tg3_flag_set(tp, ENABLE_RSS);
11421 
11422 	if (tp->txq_cnt > 1)
11423 		tg3_flag_set(tp, ENABLE_TSS);
11424 
11425 	netif_set_real_num_tx_queues(tp->dev, tp->txq_cnt);
11426 
11427 	return true;
11428 }
11429 
11430 static void tg3_ints_init(struct tg3 *tp)
11431 {
11432 	if ((tg3_flag(tp, SUPPORT_MSI) || tg3_flag(tp, SUPPORT_MSIX)) &&
11433 	    !tg3_flag(tp, TAGGED_STATUS)) {
11434 		/* All MSI supporting chips should support tagged
11435 		 * status.  Assert that this is the case.
11436 		 */
11437 		netdev_warn(tp->dev,
11438 			    "MSI without TAGGED_STATUS? Not using MSI\n");
11439 		goto defcfg;
11440 	}
11441 
11442 	if (tg3_flag(tp, SUPPORT_MSIX) && tg3_enable_msix(tp))
11443 		tg3_flag_set(tp, USING_MSIX);
11444 	else if (tg3_flag(tp, SUPPORT_MSI) && pci_enable_msi(tp->pdev) == 0)
11445 		tg3_flag_set(tp, USING_MSI);
11446 
11447 	if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
11448 		u32 msi_mode = tr32(MSGINT_MODE);
11449 		if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1)
11450 			msi_mode |= MSGINT_MODE_MULTIVEC_EN;
11451 		if (!tg3_flag(tp, 1SHOT_MSI))
11452 			msi_mode |= MSGINT_MODE_ONE_SHOT_DISABLE;
11453 		tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
11454 	}
11455 defcfg:
11456 	if (!tg3_flag(tp, USING_MSIX)) {
11457 		tp->irq_cnt = 1;
11458 		tp->napi[0].irq_vec = tp->pdev->irq;
11459 	}
11460 
11461 	if (tp->irq_cnt == 1) {
11462 		tp->txq_cnt = 1;
11463 		tp->rxq_cnt = 1;
11464 		netif_set_real_num_tx_queues(tp->dev, 1);
11465 		netif_set_real_num_rx_queues(tp->dev, 1);
11466 	}
11467 }
11468 
11469 static void tg3_ints_fini(struct tg3 *tp)
11470 {
11471 	if (tg3_flag(tp, USING_MSIX))
11472 		pci_disable_msix(tp->pdev);
11473 	else if (tg3_flag(tp, USING_MSI))
11474 		pci_disable_msi(tp->pdev);
11475 	tg3_flag_clear(tp, USING_MSI);
11476 	tg3_flag_clear(tp, USING_MSIX);
11477 	tg3_flag_clear(tp, ENABLE_RSS);
11478 	tg3_flag_clear(tp, ENABLE_TSS);
11479 }
11480 
11481 static int tg3_start(struct tg3 *tp, bool reset_phy, bool test_irq,
11482 		     bool init)
11483 {
11484 	struct net_device *dev = tp->dev;
11485 	int i, err;
11486 
11487 	/*
11488 	 * Setup interrupts first so we know how
11489 	 * many NAPI resources to allocate
11490 	 */
11491 	tg3_ints_init(tp);
11492 
11493 	tg3_rss_check_indir_tbl(tp);
11494 
11495 	/* The placement of this call is tied
11496 	 * to the setup and use of Host TX descriptors.
11497 	 */
11498 	err = tg3_alloc_consistent(tp);
11499 	if (err)
11500 		goto out_ints_fini;
11501 
11502 	tg3_napi_init(tp);
11503 
11504 	tg3_napi_enable(tp);
11505 
11506 	for (i = 0; i < tp->irq_cnt; i++) {
11507 		struct tg3_napi *tnapi = &tp->napi[i];
11508 		err = tg3_request_irq(tp, i);
11509 		if (err) {
11510 			for (i--; i >= 0; i--) {
11511 				tnapi = &tp->napi[i];
11512 				free_irq(tnapi->irq_vec, tnapi);
11513 			}
11514 			goto out_napi_fini;
11515 		}
11516 	}
11517 
11518 	tg3_full_lock(tp, 0);
11519 
11520 	if (init)
11521 		tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
11522 
11523 	err = tg3_init_hw(tp, reset_phy);
11524 	if (err) {
11525 		tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11526 		tg3_free_rings(tp);
11527 	}
11528 
11529 	tg3_full_unlock(tp);
11530 
11531 	if (err)
11532 		goto out_free_irq;
11533 
11534 	if (test_irq && tg3_flag(tp, USING_MSI)) {
11535 		err = tg3_test_msi(tp);
11536 
11537 		if (err) {
11538 			tg3_full_lock(tp, 0);
11539 			tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11540 			tg3_free_rings(tp);
11541 			tg3_full_unlock(tp);
11542 
11543 			goto out_napi_fini;
11544 		}
11545 
11546 		if (!tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
11547 			u32 val = tr32(PCIE_TRANSACTION_CFG);
11548 
11549 			tw32(PCIE_TRANSACTION_CFG,
11550 			     val | PCIE_TRANS_CFG_1SHOT_MSI);
11551 		}
11552 	}
11553 
11554 	tg3_phy_start(tp);
11555 
11556 	tg3_hwmon_open(tp);
11557 
11558 	tg3_full_lock(tp, 0);
11559 
11560 	tg3_timer_start(tp);
11561 	tg3_flag_set(tp, INIT_COMPLETE);
11562 	tg3_enable_ints(tp);
11563 
11564 	if (init)
11565 		tg3_ptp_init(tp);
11566 	else
11567 		tg3_ptp_resume(tp);
11568 
11569 
11570 	tg3_full_unlock(tp);
11571 
11572 	netif_tx_start_all_queues(dev);
11573 
11574 	/*
11575 	 * Reset loopback feature if it was turned on while the device was down
11576 	 * make sure that it's installed properly now.
11577 	 */
11578 	if (dev->features & NETIF_F_LOOPBACK)
11579 		tg3_set_loopback(dev, dev->features);
11580 
11581 	return 0;
11582 
11583 out_free_irq:
11584 	for (i = tp->irq_cnt - 1; i >= 0; i--) {
11585 		struct tg3_napi *tnapi = &tp->napi[i];
11586 		free_irq(tnapi->irq_vec, tnapi);
11587 	}
11588 
11589 out_napi_fini:
11590 	tg3_napi_disable(tp);
11591 	tg3_napi_fini(tp);
11592 	tg3_free_consistent(tp);
11593 
11594 out_ints_fini:
11595 	tg3_ints_fini(tp);
11596 
11597 	return err;
11598 }
11599 
11600 static void tg3_stop(struct tg3 *tp)
11601 {
11602 	int i;
11603 
11604 	tg3_reset_task_cancel(tp);
11605 	tg3_netif_stop(tp);
11606 
11607 	tg3_timer_stop(tp);
11608 
11609 	tg3_hwmon_close(tp);
11610 
11611 	tg3_phy_stop(tp);
11612 
11613 	tg3_full_lock(tp, 1);
11614 
11615 	tg3_disable_ints(tp);
11616 
11617 	tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11618 	tg3_free_rings(tp);
11619 	tg3_flag_clear(tp, INIT_COMPLETE);
11620 
11621 	tg3_full_unlock(tp);
11622 
11623 	for (i = tp->irq_cnt - 1; i >= 0; i--) {
11624 		struct tg3_napi *tnapi = &tp->napi[i];
11625 		free_irq(tnapi->irq_vec, tnapi);
11626 	}
11627 
11628 	tg3_ints_fini(tp);
11629 
11630 	tg3_napi_fini(tp);
11631 
11632 	tg3_free_consistent(tp);
11633 }
11634 
11635 static int tg3_open(struct net_device *dev)
11636 {
11637 	struct tg3 *tp = netdev_priv(dev);
11638 	int err;
11639 
11640 	if (tp->pcierr_recovery) {
11641 		netdev_err(dev, "Failed to open device. PCI error recovery "
11642 			   "in progress\n");
11643 		return -EAGAIN;
11644 	}
11645 
11646 	if (tp->fw_needed) {
11647 		err = tg3_request_firmware(tp);
11648 		if (tg3_asic_rev(tp) == ASIC_REV_57766) {
11649 			if (err) {
11650 				netdev_warn(tp->dev, "EEE capability disabled\n");
11651 				tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
11652 			} else if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
11653 				netdev_warn(tp->dev, "EEE capability restored\n");
11654 				tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
11655 			}
11656 		} else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) {
11657 			if (err)
11658 				return err;
11659 		} else if (err) {
11660 			netdev_warn(tp->dev, "TSO capability disabled\n");
11661 			tg3_flag_clear(tp, TSO_CAPABLE);
11662 		} else if (!tg3_flag(tp, TSO_CAPABLE)) {
11663 			netdev_notice(tp->dev, "TSO capability restored\n");
11664 			tg3_flag_set(tp, TSO_CAPABLE);
11665 		}
11666 	}
11667 
11668 	tg3_carrier_off(tp);
11669 
11670 	err = tg3_power_up(tp);
11671 	if (err)
11672 		return err;
11673 
11674 	tg3_full_lock(tp, 0);
11675 
11676 	tg3_disable_ints(tp);
11677 	tg3_flag_clear(tp, INIT_COMPLETE);
11678 
11679 	tg3_full_unlock(tp);
11680 
11681 	err = tg3_start(tp,
11682 			!(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN),
11683 			true, true);
11684 	if (err) {
11685 		tg3_frob_aux_power(tp, false);
11686 		pci_set_power_state(tp->pdev, PCI_D3hot);
11687 	}
11688 
11689 	if (tg3_flag(tp, PTP_CAPABLE)) {
11690 		tp->ptp_clock = ptp_clock_register(&tp->ptp_info,
11691 						   &tp->pdev->dev);
11692 		if (IS_ERR(tp->ptp_clock))
11693 			tp->ptp_clock = NULL;
11694 	}
11695 
11696 	return err;
11697 }
11698 
11699 static int tg3_close(struct net_device *dev)
11700 {
11701 	struct tg3 *tp = netdev_priv(dev);
11702 
11703 	if (tp->pcierr_recovery) {
11704 		netdev_err(dev, "Failed to close device. PCI error recovery "
11705 			   "in progress\n");
11706 		return -EAGAIN;
11707 	}
11708 
11709 	tg3_ptp_fini(tp);
11710 
11711 	tg3_stop(tp);
11712 
11713 	/* Clear stats across close / open calls */
11714 	memset(&tp->net_stats_prev, 0, sizeof(tp->net_stats_prev));
11715 	memset(&tp->estats_prev, 0, sizeof(tp->estats_prev));
11716 
11717 	if (pci_device_is_present(tp->pdev)) {
11718 		tg3_power_down_prepare(tp);
11719 
11720 		tg3_carrier_off(tp);
11721 	}
11722 	return 0;
11723 }
11724 
11725 static inline u64 get_stat64(tg3_stat64_t *val)
11726 {
11727        return ((u64)val->high << 32) | ((u64)val->low);
11728 }
11729 
11730 static u64 tg3_calc_crc_errors(struct tg3 *tp)
11731 {
11732 	struct tg3_hw_stats *hw_stats = tp->hw_stats;
11733 
11734 	if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
11735 	    (tg3_asic_rev(tp) == ASIC_REV_5700 ||
11736 	     tg3_asic_rev(tp) == ASIC_REV_5701)) {
11737 		u32 val;
11738 
11739 		if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
11740 			tg3_writephy(tp, MII_TG3_TEST1,
11741 				     val | MII_TG3_TEST1_CRC_EN);
11742 			tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &val);
11743 		} else
11744 			val = 0;
11745 
11746 		tp->phy_crc_errors += val;
11747 
11748 		return tp->phy_crc_errors;
11749 	}
11750 
11751 	return get_stat64(&hw_stats->rx_fcs_errors);
11752 }
11753 
11754 #define ESTAT_ADD(member) \
11755 	estats->member =	old_estats->member + \
11756 				get_stat64(&hw_stats->member)
11757 
11758 static void tg3_get_estats(struct tg3 *tp, struct tg3_ethtool_stats *estats)
11759 {
11760 	struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
11761 	struct tg3_hw_stats *hw_stats = tp->hw_stats;
11762 
11763 	ESTAT_ADD(rx_octets);
11764 	ESTAT_ADD(rx_fragments);
11765 	ESTAT_ADD(rx_ucast_packets);
11766 	ESTAT_ADD(rx_mcast_packets);
11767 	ESTAT_ADD(rx_bcast_packets);
11768 	ESTAT_ADD(rx_fcs_errors);
11769 	ESTAT_ADD(rx_align_errors);
11770 	ESTAT_ADD(rx_xon_pause_rcvd);
11771 	ESTAT_ADD(rx_xoff_pause_rcvd);
11772 	ESTAT_ADD(rx_mac_ctrl_rcvd);
11773 	ESTAT_ADD(rx_xoff_entered);
11774 	ESTAT_ADD(rx_frame_too_long_errors);
11775 	ESTAT_ADD(rx_jabbers);
11776 	ESTAT_ADD(rx_undersize_packets);
11777 	ESTAT_ADD(rx_in_length_errors);
11778 	ESTAT_ADD(rx_out_length_errors);
11779 	ESTAT_ADD(rx_64_or_less_octet_packets);
11780 	ESTAT_ADD(rx_65_to_127_octet_packets);
11781 	ESTAT_ADD(rx_128_to_255_octet_packets);
11782 	ESTAT_ADD(rx_256_to_511_octet_packets);
11783 	ESTAT_ADD(rx_512_to_1023_octet_packets);
11784 	ESTAT_ADD(rx_1024_to_1522_octet_packets);
11785 	ESTAT_ADD(rx_1523_to_2047_octet_packets);
11786 	ESTAT_ADD(rx_2048_to_4095_octet_packets);
11787 	ESTAT_ADD(rx_4096_to_8191_octet_packets);
11788 	ESTAT_ADD(rx_8192_to_9022_octet_packets);
11789 
11790 	ESTAT_ADD(tx_octets);
11791 	ESTAT_ADD(tx_collisions);
11792 	ESTAT_ADD(tx_xon_sent);
11793 	ESTAT_ADD(tx_xoff_sent);
11794 	ESTAT_ADD(tx_flow_control);
11795 	ESTAT_ADD(tx_mac_errors);
11796 	ESTAT_ADD(tx_single_collisions);
11797 	ESTAT_ADD(tx_mult_collisions);
11798 	ESTAT_ADD(tx_deferred);
11799 	ESTAT_ADD(tx_excessive_collisions);
11800 	ESTAT_ADD(tx_late_collisions);
11801 	ESTAT_ADD(tx_collide_2times);
11802 	ESTAT_ADD(tx_collide_3times);
11803 	ESTAT_ADD(tx_collide_4times);
11804 	ESTAT_ADD(tx_collide_5times);
11805 	ESTAT_ADD(tx_collide_6times);
11806 	ESTAT_ADD(tx_collide_7times);
11807 	ESTAT_ADD(tx_collide_8times);
11808 	ESTAT_ADD(tx_collide_9times);
11809 	ESTAT_ADD(tx_collide_10times);
11810 	ESTAT_ADD(tx_collide_11times);
11811 	ESTAT_ADD(tx_collide_12times);
11812 	ESTAT_ADD(tx_collide_13times);
11813 	ESTAT_ADD(tx_collide_14times);
11814 	ESTAT_ADD(tx_collide_15times);
11815 	ESTAT_ADD(tx_ucast_packets);
11816 	ESTAT_ADD(tx_mcast_packets);
11817 	ESTAT_ADD(tx_bcast_packets);
11818 	ESTAT_ADD(tx_carrier_sense_errors);
11819 	ESTAT_ADD(tx_discards);
11820 	ESTAT_ADD(tx_errors);
11821 
11822 	ESTAT_ADD(dma_writeq_full);
11823 	ESTAT_ADD(dma_write_prioq_full);
11824 	ESTAT_ADD(rxbds_empty);
11825 	ESTAT_ADD(rx_discards);
11826 	ESTAT_ADD(rx_errors);
11827 	ESTAT_ADD(rx_threshold_hit);
11828 
11829 	ESTAT_ADD(dma_readq_full);
11830 	ESTAT_ADD(dma_read_prioq_full);
11831 	ESTAT_ADD(tx_comp_queue_full);
11832 
11833 	ESTAT_ADD(ring_set_send_prod_index);
11834 	ESTAT_ADD(ring_status_update);
11835 	ESTAT_ADD(nic_irqs);
11836 	ESTAT_ADD(nic_avoided_irqs);
11837 	ESTAT_ADD(nic_tx_threshold_hit);
11838 
11839 	ESTAT_ADD(mbuf_lwm_thresh_hit);
11840 }
11841 
11842 static void tg3_get_nstats(struct tg3 *tp, struct rtnl_link_stats64 *stats)
11843 {
11844 	struct rtnl_link_stats64 *old_stats = &tp->net_stats_prev;
11845 	struct tg3_hw_stats *hw_stats = tp->hw_stats;
11846 
11847 	stats->rx_packets = old_stats->rx_packets +
11848 		get_stat64(&hw_stats->rx_ucast_packets) +
11849 		get_stat64(&hw_stats->rx_mcast_packets) +
11850 		get_stat64(&hw_stats->rx_bcast_packets);
11851 
11852 	stats->tx_packets = old_stats->tx_packets +
11853 		get_stat64(&hw_stats->tx_ucast_packets) +
11854 		get_stat64(&hw_stats->tx_mcast_packets) +
11855 		get_stat64(&hw_stats->tx_bcast_packets);
11856 
11857 	stats->rx_bytes = old_stats->rx_bytes +
11858 		get_stat64(&hw_stats->rx_octets);
11859 	stats->tx_bytes = old_stats->tx_bytes +
11860 		get_stat64(&hw_stats->tx_octets);
11861 
11862 	stats->rx_errors = old_stats->rx_errors +
11863 		get_stat64(&hw_stats->rx_errors);
11864 	stats->tx_errors = old_stats->tx_errors +
11865 		get_stat64(&hw_stats->tx_errors) +
11866 		get_stat64(&hw_stats->tx_mac_errors) +
11867 		get_stat64(&hw_stats->tx_carrier_sense_errors) +
11868 		get_stat64(&hw_stats->tx_discards);
11869 
11870 	stats->multicast = old_stats->multicast +
11871 		get_stat64(&hw_stats->rx_mcast_packets);
11872 	stats->collisions = old_stats->collisions +
11873 		get_stat64(&hw_stats->tx_collisions);
11874 
11875 	stats->rx_length_errors = old_stats->rx_length_errors +
11876 		get_stat64(&hw_stats->rx_frame_too_long_errors) +
11877 		get_stat64(&hw_stats->rx_undersize_packets);
11878 
11879 	stats->rx_frame_errors = old_stats->rx_frame_errors +
11880 		get_stat64(&hw_stats->rx_align_errors);
11881 	stats->tx_aborted_errors = old_stats->tx_aborted_errors +
11882 		get_stat64(&hw_stats->tx_discards);
11883 	stats->tx_carrier_errors = old_stats->tx_carrier_errors +
11884 		get_stat64(&hw_stats->tx_carrier_sense_errors);
11885 
11886 	stats->rx_crc_errors = old_stats->rx_crc_errors +
11887 		tg3_calc_crc_errors(tp);
11888 
11889 	stats->rx_missed_errors = old_stats->rx_missed_errors +
11890 		get_stat64(&hw_stats->rx_discards);
11891 
11892 	stats->rx_dropped = tp->rx_dropped;
11893 	stats->tx_dropped = tp->tx_dropped;
11894 }
11895 
11896 static int tg3_get_regs_len(struct net_device *dev)
11897 {
11898 	return TG3_REG_BLK_SIZE;
11899 }
11900 
11901 static void tg3_get_regs(struct net_device *dev,
11902 		struct ethtool_regs *regs, void *_p)
11903 {
11904 	struct tg3 *tp = netdev_priv(dev);
11905 
11906 	regs->version = 0;
11907 
11908 	memset(_p, 0, TG3_REG_BLK_SIZE);
11909 
11910 	if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11911 		return;
11912 
11913 	tg3_full_lock(tp, 0);
11914 
11915 	tg3_dump_legacy_regs(tp, (u32 *)_p);
11916 
11917 	tg3_full_unlock(tp);
11918 }
11919 
11920 static int tg3_get_eeprom_len(struct net_device *dev)
11921 {
11922 	struct tg3 *tp = netdev_priv(dev);
11923 
11924 	return tp->nvram_size;
11925 }
11926 
11927 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
11928 {
11929 	struct tg3 *tp = netdev_priv(dev);
11930 	int ret, cpmu_restore = 0;
11931 	u8  *pd;
11932 	u32 i, offset, len, b_offset, b_count, cpmu_val = 0;
11933 	__be32 val;
11934 
11935 	if (tg3_flag(tp, NO_NVRAM))
11936 		return -EINVAL;
11937 
11938 	offset = eeprom->offset;
11939 	len = eeprom->len;
11940 	eeprom->len = 0;
11941 
11942 	eeprom->magic = TG3_EEPROM_MAGIC;
11943 
11944 	/* Override clock, link aware and link idle modes */
11945 	if (tg3_flag(tp, CPMU_PRESENT)) {
11946 		cpmu_val = tr32(TG3_CPMU_CTRL);
11947 		if (cpmu_val & (CPMU_CTRL_LINK_AWARE_MODE |
11948 				CPMU_CTRL_LINK_IDLE_MODE)) {
11949 			tw32(TG3_CPMU_CTRL, cpmu_val &
11950 					    ~(CPMU_CTRL_LINK_AWARE_MODE |
11951 					     CPMU_CTRL_LINK_IDLE_MODE));
11952 			cpmu_restore = 1;
11953 		}
11954 	}
11955 	tg3_override_clk(tp);
11956 
11957 	if (offset & 3) {
11958 		/* adjustments to start on required 4 byte boundary */
11959 		b_offset = offset & 3;
11960 		b_count = 4 - b_offset;
11961 		if (b_count > len) {
11962 			/* i.e. offset=1 len=2 */
11963 			b_count = len;
11964 		}
11965 		ret = tg3_nvram_read_be32(tp, offset-b_offset, &val);
11966 		if (ret)
11967 			goto eeprom_done;
11968 		memcpy(data, ((char *)&val) + b_offset, b_count);
11969 		len -= b_count;
11970 		offset += b_count;
11971 		eeprom->len += b_count;
11972 	}
11973 
11974 	/* read bytes up to the last 4 byte boundary */
11975 	pd = &data[eeprom->len];
11976 	for (i = 0; i < (len - (len & 3)); i += 4) {
11977 		ret = tg3_nvram_read_be32(tp, offset + i, &val);
11978 		if (ret) {
11979 			if (i)
11980 				i -= 4;
11981 			eeprom->len += i;
11982 			goto eeprom_done;
11983 		}
11984 		memcpy(pd + i, &val, 4);
11985 		if (need_resched()) {
11986 			if (signal_pending(current)) {
11987 				eeprom->len += i;
11988 				ret = -EINTR;
11989 				goto eeprom_done;
11990 			}
11991 			cond_resched();
11992 		}
11993 	}
11994 	eeprom->len += i;
11995 
11996 	if (len & 3) {
11997 		/* read last bytes not ending on 4 byte boundary */
11998 		pd = &data[eeprom->len];
11999 		b_count = len & 3;
12000 		b_offset = offset + len - b_count;
12001 		ret = tg3_nvram_read_be32(tp, b_offset, &val);
12002 		if (ret)
12003 			goto eeprom_done;
12004 		memcpy(pd, &val, b_count);
12005 		eeprom->len += b_count;
12006 	}
12007 	ret = 0;
12008 
12009 eeprom_done:
12010 	/* Restore clock, link aware and link idle modes */
12011 	tg3_restore_clk(tp);
12012 	if (cpmu_restore)
12013 		tw32(TG3_CPMU_CTRL, cpmu_val);
12014 
12015 	return ret;
12016 }
12017 
12018 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
12019 {
12020 	struct tg3 *tp = netdev_priv(dev);
12021 	int ret;
12022 	u32 offset, len, b_offset, odd_len;
12023 	u8 *buf;
12024 	__be32 start, end;
12025 
12026 	if (tg3_flag(tp, NO_NVRAM) ||
12027 	    eeprom->magic != TG3_EEPROM_MAGIC)
12028 		return -EINVAL;
12029 
12030 	offset = eeprom->offset;
12031 	len = eeprom->len;
12032 
12033 	if ((b_offset = (offset & 3))) {
12034 		/* adjustments to start on required 4 byte boundary */
12035 		ret = tg3_nvram_read_be32(tp, offset-b_offset, &start);
12036 		if (ret)
12037 			return ret;
12038 		len += b_offset;
12039 		offset &= ~3;
12040 		if (len < 4)
12041 			len = 4;
12042 	}
12043 
12044 	odd_len = 0;
12045 	if (len & 3) {
12046 		/* adjustments to end on required 4 byte boundary */
12047 		odd_len = 1;
12048 		len = (len + 3) & ~3;
12049 		ret = tg3_nvram_read_be32(tp, offset+len-4, &end);
12050 		if (ret)
12051 			return ret;
12052 	}
12053 
12054 	buf = data;
12055 	if (b_offset || odd_len) {
12056 		buf = kmalloc(len, GFP_KERNEL);
12057 		if (!buf)
12058 			return -ENOMEM;
12059 		if (b_offset)
12060 			memcpy(buf, &start, 4);
12061 		if (odd_len)
12062 			memcpy(buf+len-4, &end, 4);
12063 		memcpy(buf + b_offset, data, eeprom->len);
12064 	}
12065 
12066 	ret = tg3_nvram_write_block(tp, offset, len, buf);
12067 
12068 	if (buf != data)
12069 		kfree(buf);
12070 
12071 	return ret;
12072 }
12073 
12074 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
12075 {
12076 	struct tg3 *tp = netdev_priv(dev);
12077 
12078 	if (tg3_flag(tp, USE_PHYLIB)) {
12079 		struct phy_device *phydev;
12080 		if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
12081 			return -EAGAIN;
12082 		phydev = tp->mdio_bus->phy_map[tp->phy_addr];
12083 		return phy_ethtool_gset(phydev, cmd);
12084 	}
12085 
12086 	cmd->supported = (SUPPORTED_Autoneg);
12087 
12088 	if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
12089 		cmd->supported |= (SUPPORTED_1000baseT_Half |
12090 				   SUPPORTED_1000baseT_Full);
12091 
12092 	if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
12093 		cmd->supported |= (SUPPORTED_100baseT_Half |
12094 				  SUPPORTED_100baseT_Full |
12095 				  SUPPORTED_10baseT_Half |
12096 				  SUPPORTED_10baseT_Full |
12097 				  SUPPORTED_TP);
12098 		cmd->port = PORT_TP;
12099 	} else {
12100 		cmd->supported |= SUPPORTED_FIBRE;
12101 		cmd->port = PORT_FIBRE;
12102 	}
12103 
12104 	cmd->advertising = tp->link_config.advertising;
12105 	if (tg3_flag(tp, PAUSE_AUTONEG)) {
12106 		if (tp->link_config.flowctrl & FLOW_CTRL_RX) {
12107 			if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
12108 				cmd->advertising |= ADVERTISED_Pause;
12109 			} else {
12110 				cmd->advertising |= ADVERTISED_Pause |
12111 						    ADVERTISED_Asym_Pause;
12112 			}
12113 		} else if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
12114 			cmd->advertising |= ADVERTISED_Asym_Pause;
12115 		}
12116 	}
12117 	if (netif_running(dev) && tp->link_up) {
12118 		ethtool_cmd_speed_set(cmd, tp->link_config.active_speed);
12119 		cmd->duplex = tp->link_config.active_duplex;
12120 		cmd->lp_advertising = tp->link_config.rmt_adv;
12121 		if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
12122 			if (tp->phy_flags & TG3_PHYFLG_MDIX_STATE)
12123 				cmd->eth_tp_mdix = ETH_TP_MDI_X;
12124 			else
12125 				cmd->eth_tp_mdix = ETH_TP_MDI;
12126 		}
12127 	} else {
12128 		ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN);
12129 		cmd->duplex = DUPLEX_UNKNOWN;
12130 		cmd->eth_tp_mdix = ETH_TP_MDI_INVALID;
12131 	}
12132 	cmd->phy_address = tp->phy_addr;
12133 	cmd->transceiver = XCVR_INTERNAL;
12134 	cmd->autoneg = tp->link_config.autoneg;
12135 	cmd->maxtxpkt = 0;
12136 	cmd->maxrxpkt = 0;
12137 	return 0;
12138 }
12139 
12140 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
12141 {
12142 	struct tg3 *tp = netdev_priv(dev);
12143 	u32 speed = ethtool_cmd_speed(cmd);
12144 
12145 	if (tg3_flag(tp, USE_PHYLIB)) {
12146 		struct phy_device *phydev;
12147 		if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
12148 			return -EAGAIN;
12149 		phydev = tp->mdio_bus->phy_map[tp->phy_addr];
12150 		return phy_ethtool_sset(phydev, cmd);
12151 	}
12152 
12153 	if (cmd->autoneg != AUTONEG_ENABLE &&
12154 	    cmd->autoneg != AUTONEG_DISABLE)
12155 		return -EINVAL;
12156 
12157 	if (cmd->autoneg == AUTONEG_DISABLE &&
12158 	    cmd->duplex != DUPLEX_FULL &&
12159 	    cmd->duplex != DUPLEX_HALF)
12160 		return -EINVAL;
12161 
12162 	if (cmd->autoneg == AUTONEG_ENABLE) {
12163 		u32 mask = ADVERTISED_Autoneg |
12164 			   ADVERTISED_Pause |
12165 			   ADVERTISED_Asym_Pause;
12166 
12167 		if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
12168 			mask |= ADVERTISED_1000baseT_Half |
12169 				ADVERTISED_1000baseT_Full;
12170 
12171 		if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
12172 			mask |= ADVERTISED_100baseT_Half |
12173 				ADVERTISED_100baseT_Full |
12174 				ADVERTISED_10baseT_Half |
12175 				ADVERTISED_10baseT_Full |
12176 				ADVERTISED_TP;
12177 		else
12178 			mask |= ADVERTISED_FIBRE;
12179 
12180 		if (cmd->advertising & ~mask)
12181 			return -EINVAL;
12182 
12183 		mask &= (ADVERTISED_1000baseT_Half |
12184 			 ADVERTISED_1000baseT_Full |
12185 			 ADVERTISED_100baseT_Half |
12186 			 ADVERTISED_100baseT_Full |
12187 			 ADVERTISED_10baseT_Half |
12188 			 ADVERTISED_10baseT_Full);
12189 
12190 		cmd->advertising &= mask;
12191 	} else {
12192 		if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) {
12193 			if (speed != SPEED_1000)
12194 				return -EINVAL;
12195 
12196 			if (cmd->duplex != DUPLEX_FULL)
12197 				return -EINVAL;
12198 		} else {
12199 			if (speed != SPEED_100 &&
12200 			    speed != SPEED_10)
12201 				return -EINVAL;
12202 		}
12203 	}
12204 
12205 	tg3_full_lock(tp, 0);
12206 
12207 	tp->link_config.autoneg = cmd->autoneg;
12208 	if (cmd->autoneg == AUTONEG_ENABLE) {
12209 		tp->link_config.advertising = (cmd->advertising |
12210 					      ADVERTISED_Autoneg);
12211 		tp->link_config.speed = SPEED_UNKNOWN;
12212 		tp->link_config.duplex = DUPLEX_UNKNOWN;
12213 	} else {
12214 		tp->link_config.advertising = 0;
12215 		tp->link_config.speed = speed;
12216 		tp->link_config.duplex = cmd->duplex;
12217 	}
12218 
12219 	tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
12220 
12221 	tg3_warn_mgmt_link_flap(tp);
12222 
12223 	if (netif_running(dev))
12224 		tg3_setup_phy(tp, true);
12225 
12226 	tg3_full_unlock(tp);
12227 
12228 	return 0;
12229 }
12230 
12231 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
12232 {
12233 	struct tg3 *tp = netdev_priv(dev);
12234 
12235 	strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
12236 	strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
12237 	strlcpy(info->fw_version, tp->fw_ver, sizeof(info->fw_version));
12238 	strlcpy(info->bus_info, pci_name(tp->pdev), sizeof(info->bus_info));
12239 }
12240 
12241 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
12242 {
12243 	struct tg3 *tp = netdev_priv(dev);
12244 
12245 	if (tg3_flag(tp, WOL_CAP) && device_can_wakeup(&tp->pdev->dev))
12246 		wol->supported = WAKE_MAGIC;
12247 	else
12248 		wol->supported = 0;
12249 	wol->wolopts = 0;
12250 	if (tg3_flag(tp, WOL_ENABLE) && device_can_wakeup(&tp->pdev->dev))
12251 		wol->wolopts = WAKE_MAGIC;
12252 	memset(&wol->sopass, 0, sizeof(wol->sopass));
12253 }
12254 
12255 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
12256 {
12257 	struct tg3 *tp = netdev_priv(dev);
12258 	struct device *dp = &tp->pdev->dev;
12259 
12260 	if (wol->wolopts & ~WAKE_MAGIC)
12261 		return -EINVAL;
12262 	if ((wol->wolopts & WAKE_MAGIC) &&
12263 	    !(tg3_flag(tp, WOL_CAP) && device_can_wakeup(dp)))
12264 		return -EINVAL;
12265 
12266 	device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC);
12267 
12268 	if (device_may_wakeup(dp))
12269 		tg3_flag_set(tp, WOL_ENABLE);
12270 	else
12271 		tg3_flag_clear(tp, WOL_ENABLE);
12272 
12273 	return 0;
12274 }
12275 
12276 static u32 tg3_get_msglevel(struct net_device *dev)
12277 {
12278 	struct tg3 *tp = netdev_priv(dev);
12279 	return tp->msg_enable;
12280 }
12281 
12282 static void tg3_set_msglevel(struct net_device *dev, u32 value)
12283 {
12284 	struct tg3 *tp = netdev_priv(dev);
12285 	tp->msg_enable = value;
12286 }
12287 
12288 static int tg3_nway_reset(struct net_device *dev)
12289 {
12290 	struct tg3 *tp = netdev_priv(dev);
12291 	int r;
12292 
12293 	if (!netif_running(dev))
12294 		return -EAGAIN;
12295 
12296 	if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
12297 		return -EINVAL;
12298 
12299 	tg3_warn_mgmt_link_flap(tp);
12300 
12301 	if (tg3_flag(tp, USE_PHYLIB)) {
12302 		if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
12303 			return -EAGAIN;
12304 		r = phy_start_aneg(tp->mdio_bus->phy_map[tp->phy_addr]);
12305 	} else {
12306 		u32 bmcr;
12307 
12308 		spin_lock_bh(&tp->lock);
12309 		r = -EINVAL;
12310 		tg3_readphy(tp, MII_BMCR, &bmcr);
12311 		if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
12312 		    ((bmcr & BMCR_ANENABLE) ||
12313 		     (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT))) {
12314 			tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
12315 						   BMCR_ANENABLE);
12316 			r = 0;
12317 		}
12318 		spin_unlock_bh(&tp->lock);
12319 	}
12320 
12321 	return r;
12322 }
12323 
12324 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
12325 {
12326 	struct tg3 *tp = netdev_priv(dev);
12327 
12328 	ering->rx_max_pending = tp->rx_std_ring_mask;
12329 	if (tg3_flag(tp, JUMBO_RING_ENABLE))
12330 		ering->rx_jumbo_max_pending = tp->rx_jmb_ring_mask;
12331 	else
12332 		ering->rx_jumbo_max_pending = 0;
12333 
12334 	ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
12335 
12336 	ering->rx_pending = tp->rx_pending;
12337 	if (tg3_flag(tp, JUMBO_RING_ENABLE))
12338 		ering->rx_jumbo_pending = tp->rx_jumbo_pending;
12339 	else
12340 		ering->rx_jumbo_pending = 0;
12341 
12342 	ering->tx_pending = tp->napi[0].tx_pending;
12343 }
12344 
12345 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
12346 {
12347 	struct tg3 *tp = netdev_priv(dev);
12348 	int i, irq_sync = 0, err = 0;
12349 
12350 	if ((ering->rx_pending > tp->rx_std_ring_mask) ||
12351 	    (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) ||
12352 	    (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
12353 	    (ering->tx_pending <= MAX_SKB_FRAGS) ||
12354 	    (tg3_flag(tp, TSO_BUG) &&
12355 	     (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
12356 		return -EINVAL;
12357 
12358 	if (netif_running(dev)) {
12359 		tg3_phy_stop(tp);
12360 		tg3_netif_stop(tp);
12361 		irq_sync = 1;
12362 	}
12363 
12364 	tg3_full_lock(tp, irq_sync);
12365 
12366 	tp->rx_pending = ering->rx_pending;
12367 
12368 	if (tg3_flag(tp, MAX_RXPEND_64) &&
12369 	    tp->rx_pending > 63)
12370 		tp->rx_pending = 63;
12371 
12372 	if (tg3_flag(tp, JUMBO_RING_ENABLE))
12373 		tp->rx_jumbo_pending = ering->rx_jumbo_pending;
12374 
12375 	for (i = 0; i < tp->irq_max; i++)
12376 		tp->napi[i].tx_pending = ering->tx_pending;
12377 
12378 	if (netif_running(dev)) {
12379 		tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12380 		err = tg3_restart_hw(tp, false);
12381 		if (!err)
12382 			tg3_netif_start(tp);
12383 	}
12384 
12385 	tg3_full_unlock(tp);
12386 
12387 	if (irq_sync && !err)
12388 		tg3_phy_start(tp);
12389 
12390 	return err;
12391 }
12392 
12393 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
12394 {
12395 	struct tg3 *tp = netdev_priv(dev);
12396 
12397 	epause->autoneg = !!tg3_flag(tp, PAUSE_AUTONEG);
12398 
12399 	if (tp->link_config.flowctrl & FLOW_CTRL_RX)
12400 		epause->rx_pause = 1;
12401 	else
12402 		epause->rx_pause = 0;
12403 
12404 	if (tp->link_config.flowctrl & FLOW_CTRL_TX)
12405 		epause->tx_pause = 1;
12406 	else
12407 		epause->tx_pause = 0;
12408 }
12409 
12410 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
12411 {
12412 	struct tg3 *tp = netdev_priv(dev);
12413 	int err = 0;
12414 
12415 	if (tp->link_config.autoneg == AUTONEG_ENABLE)
12416 		tg3_warn_mgmt_link_flap(tp);
12417 
12418 	if (tg3_flag(tp, USE_PHYLIB)) {
12419 		u32 newadv;
12420 		struct phy_device *phydev;
12421 
12422 		phydev = tp->mdio_bus->phy_map[tp->phy_addr];
12423 
12424 		if (!(phydev->supported & SUPPORTED_Pause) ||
12425 		    (!(phydev->supported & SUPPORTED_Asym_Pause) &&
12426 		     (epause->rx_pause != epause->tx_pause)))
12427 			return -EINVAL;
12428 
12429 		tp->link_config.flowctrl = 0;
12430 		if (epause->rx_pause) {
12431 			tp->link_config.flowctrl |= FLOW_CTRL_RX;
12432 
12433 			if (epause->tx_pause) {
12434 				tp->link_config.flowctrl |= FLOW_CTRL_TX;
12435 				newadv = ADVERTISED_Pause;
12436 			} else
12437 				newadv = ADVERTISED_Pause |
12438 					 ADVERTISED_Asym_Pause;
12439 		} else if (epause->tx_pause) {
12440 			tp->link_config.flowctrl |= FLOW_CTRL_TX;
12441 			newadv = ADVERTISED_Asym_Pause;
12442 		} else
12443 			newadv = 0;
12444 
12445 		if (epause->autoneg)
12446 			tg3_flag_set(tp, PAUSE_AUTONEG);
12447 		else
12448 			tg3_flag_clear(tp, PAUSE_AUTONEG);
12449 
12450 		if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
12451 			u32 oldadv = phydev->advertising &
12452 				     (ADVERTISED_Pause | ADVERTISED_Asym_Pause);
12453 			if (oldadv != newadv) {
12454 				phydev->advertising &=
12455 					~(ADVERTISED_Pause |
12456 					  ADVERTISED_Asym_Pause);
12457 				phydev->advertising |= newadv;
12458 				if (phydev->autoneg) {
12459 					/*
12460 					 * Always renegotiate the link to
12461 					 * inform our link partner of our
12462 					 * flow control settings, even if the
12463 					 * flow control is forced.  Let
12464 					 * tg3_adjust_link() do the final
12465 					 * flow control setup.
12466 					 */
12467 					return phy_start_aneg(phydev);
12468 				}
12469 			}
12470 
12471 			if (!epause->autoneg)
12472 				tg3_setup_flow_control(tp, 0, 0);
12473 		} else {
12474 			tp->link_config.advertising &=
12475 					~(ADVERTISED_Pause |
12476 					  ADVERTISED_Asym_Pause);
12477 			tp->link_config.advertising |= newadv;
12478 		}
12479 	} else {
12480 		int irq_sync = 0;
12481 
12482 		if (netif_running(dev)) {
12483 			tg3_netif_stop(tp);
12484 			irq_sync = 1;
12485 		}
12486 
12487 		tg3_full_lock(tp, irq_sync);
12488 
12489 		if (epause->autoneg)
12490 			tg3_flag_set(tp, PAUSE_AUTONEG);
12491 		else
12492 			tg3_flag_clear(tp, PAUSE_AUTONEG);
12493 		if (epause->rx_pause)
12494 			tp->link_config.flowctrl |= FLOW_CTRL_RX;
12495 		else
12496 			tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
12497 		if (epause->tx_pause)
12498 			tp->link_config.flowctrl |= FLOW_CTRL_TX;
12499 		else
12500 			tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
12501 
12502 		if (netif_running(dev)) {
12503 			tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12504 			err = tg3_restart_hw(tp, false);
12505 			if (!err)
12506 				tg3_netif_start(tp);
12507 		}
12508 
12509 		tg3_full_unlock(tp);
12510 	}
12511 
12512 	tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
12513 
12514 	return err;
12515 }
12516 
12517 static int tg3_get_sset_count(struct net_device *dev, int sset)
12518 {
12519 	switch (sset) {
12520 	case ETH_SS_TEST:
12521 		return TG3_NUM_TEST;
12522 	case ETH_SS_STATS:
12523 		return TG3_NUM_STATS;
12524 	default:
12525 		return -EOPNOTSUPP;
12526 	}
12527 }
12528 
12529 static int tg3_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
12530 			 u32 *rules __always_unused)
12531 {
12532 	struct tg3 *tp = netdev_priv(dev);
12533 
12534 	if (!tg3_flag(tp, SUPPORT_MSIX))
12535 		return -EOPNOTSUPP;
12536 
12537 	switch (info->cmd) {
12538 	case ETHTOOL_GRXRINGS:
12539 		if (netif_running(tp->dev))
12540 			info->data = tp->rxq_cnt;
12541 		else {
12542 			info->data = num_online_cpus();
12543 			if (info->data > TG3_RSS_MAX_NUM_QS)
12544 				info->data = TG3_RSS_MAX_NUM_QS;
12545 		}
12546 
12547 		/* The first interrupt vector only
12548 		 * handles link interrupts.
12549 		 */
12550 		info->data -= 1;
12551 		return 0;
12552 
12553 	default:
12554 		return -EOPNOTSUPP;
12555 	}
12556 }
12557 
12558 static u32 tg3_get_rxfh_indir_size(struct net_device *dev)
12559 {
12560 	u32 size = 0;
12561 	struct tg3 *tp = netdev_priv(dev);
12562 
12563 	if (tg3_flag(tp, SUPPORT_MSIX))
12564 		size = TG3_RSS_INDIR_TBL_SIZE;
12565 
12566 	return size;
12567 }
12568 
12569 static int tg3_get_rxfh(struct net_device *dev, u32 *indir, u8 *key)
12570 {
12571 	struct tg3 *tp = netdev_priv(dev);
12572 	int i;
12573 
12574 	for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
12575 		indir[i] = tp->rss_ind_tbl[i];
12576 
12577 	return 0;
12578 }
12579 
12580 static int tg3_set_rxfh(struct net_device *dev, const u32 *indir, const u8 *key)
12581 {
12582 	struct tg3 *tp = netdev_priv(dev);
12583 	size_t i;
12584 
12585 	for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
12586 		tp->rss_ind_tbl[i] = indir[i];
12587 
12588 	if (!netif_running(dev) || !tg3_flag(tp, ENABLE_RSS))
12589 		return 0;
12590 
12591 	/* It is legal to write the indirection
12592 	 * table while the device is running.
12593 	 */
12594 	tg3_full_lock(tp, 0);
12595 	tg3_rss_write_indir_tbl(tp);
12596 	tg3_full_unlock(tp);
12597 
12598 	return 0;
12599 }
12600 
12601 static void tg3_get_channels(struct net_device *dev,
12602 			     struct ethtool_channels *channel)
12603 {
12604 	struct tg3 *tp = netdev_priv(dev);
12605 	u32 deflt_qs = netif_get_num_default_rss_queues();
12606 
12607 	channel->max_rx = tp->rxq_max;
12608 	channel->max_tx = tp->txq_max;
12609 
12610 	if (netif_running(dev)) {
12611 		channel->rx_count = tp->rxq_cnt;
12612 		channel->tx_count = tp->txq_cnt;
12613 	} else {
12614 		if (tp->rxq_req)
12615 			channel->rx_count = tp->rxq_req;
12616 		else
12617 			channel->rx_count = min(deflt_qs, tp->rxq_max);
12618 
12619 		if (tp->txq_req)
12620 			channel->tx_count = tp->txq_req;
12621 		else
12622 			channel->tx_count = min(deflt_qs, tp->txq_max);
12623 	}
12624 }
12625 
12626 static int tg3_set_channels(struct net_device *dev,
12627 			    struct ethtool_channels *channel)
12628 {
12629 	struct tg3 *tp = netdev_priv(dev);
12630 
12631 	if (!tg3_flag(tp, SUPPORT_MSIX))
12632 		return -EOPNOTSUPP;
12633 
12634 	if (channel->rx_count > tp->rxq_max ||
12635 	    channel->tx_count > tp->txq_max)
12636 		return -EINVAL;
12637 
12638 	tp->rxq_req = channel->rx_count;
12639 	tp->txq_req = channel->tx_count;
12640 
12641 	if (!netif_running(dev))
12642 		return 0;
12643 
12644 	tg3_stop(tp);
12645 
12646 	tg3_carrier_off(tp);
12647 
12648 	tg3_start(tp, true, false, false);
12649 
12650 	return 0;
12651 }
12652 
12653 static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
12654 {
12655 	switch (stringset) {
12656 	case ETH_SS_STATS:
12657 		memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
12658 		break;
12659 	case ETH_SS_TEST:
12660 		memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
12661 		break;
12662 	default:
12663 		WARN_ON(1);	/* we need a WARN() */
12664 		break;
12665 	}
12666 }
12667 
12668 static int tg3_set_phys_id(struct net_device *dev,
12669 			    enum ethtool_phys_id_state state)
12670 {
12671 	struct tg3 *tp = netdev_priv(dev);
12672 
12673 	if (!netif_running(tp->dev))
12674 		return -EAGAIN;
12675 
12676 	switch (state) {
12677 	case ETHTOOL_ID_ACTIVE:
12678 		return 1;	/* cycle on/off once per second */
12679 
12680 	case ETHTOOL_ID_ON:
12681 		tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
12682 		     LED_CTRL_1000MBPS_ON |
12683 		     LED_CTRL_100MBPS_ON |
12684 		     LED_CTRL_10MBPS_ON |
12685 		     LED_CTRL_TRAFFIC_OVERRIDE |
12686 		     LED_CTRL_TRAFFIC_BLINK |
12687 		     LED_CTRL_TRAFFIC_LED);
12688 		break;
12689 
12690 	case ETHTOOL_ID_OFF:
12691 		tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
12692 		     LED_CTRL_TRAFFIC_OVERRIDE);
12693 		break;
12694 
12695 	case ETHTOOL_ID_INACTIVE:
12696 		tw32(MAC_LED_CTRL, tp->led_ctrl);
12697 		break;
12698 	}
12699 
12700 	return 0;
12701 }
12702 
12703 static void tg3_get_ethtool_stats(struct net_device *dev,
12704 				   struct ethtool_stats *estats, u64 *tmp_stats)
12705 {
12706 	struct tg3 *tp = netdev_priv(dev);
12707 
12708 	if (tp->hw_stats)
12709 		tg3_get_estats(tp, (struct tg3_ethtool_stats *)tmp_stats);
12710 	else
12711 		memset(tmp_stats, 0, sizeof(struct tg3_ethtool_stats));
12712 }
12713 
12714 static __be32 *tg3_vpd_readblock(struct tg3 *tp, u32 *vpdlen)
12715 {
12716 	int i;
12717 	__be32 *buf;
12718 	u32 offset = 0, len = 0;
12719 	u32 magic, val;
12720 
12721 	if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &magic))
12722 		return NULL;
12723 
12724 	if (magic == TG3_EEPROM_MAGIC) {
12725 		for (offset = TG3_NVM_DIR_START;
12726 		     offset < TG3_NVM_DIR_END;
12727 		     offset += TG3_NVM_DIRENT_SIZE) {
12728 			if (tg3_nvram_read(tp, offset, &val))
12729 				return NULL;
12730 
12731 			if ((val >> TG3_NVM_DIRTYPE_SHIFT) ==
12732 			    TG3_NVM_DIRTYPE_EXTVPD)
12733 				break;
12734 		}
12735 
12736 		if (offset != TG3_NVM_DIR_END) {
12737 			len = (val & TG3_NVM_DIRTYPE_LENMSK) * 4;
12738 			if (tg3_nvram_read(tp, offset + 4, &offset))
12739 				return NULL;
12740 
12741 			offset = tg3_nvram_logical_addr(tp, offset);
12742 		}
12743 	}
12744 
12745 	if (!offset || !len) {
12746 		offset = TG3_NVM_VPD_OFF;
12747 		len = TG3_NVM_VPD_LEN;
12748 	}
12749 
12750 	buf = kmalloc(len, GFP_KERNEL);
12751 	if (buf == NULL)
12752 		return NULL;
12753 
12754 	if (magic == TG3_EEPROM_MAGIC) {
12755 		for (i = 0; i < len; i += 4) {
12756 			/* The data is in little-endian format in NVRAM.
12757 			 * Use the big-endian read routines to preserve
12758 			 * the byte order as it exists in NVRAM.
12759 			 */
12760 			if (tg3_nvram_read_be32(tp, offset + i, &buf[i/4]))
12761 				goto error;
12762 		}
12763 	} else {
12764 		u8 *ptr;
12765 		ssize_t cnt;
12766 		unsigned int pos = 0;
12767 
12768 		ptr = (u8 *)&buf[0];
12769 		for (i = 0; pos < len && i < 3; i++, pos += cnt, ptr += cnt) {
12770 			cnt = pci_read_vpd(tp->pdev, pos,
12771 					   len - pos, ptr);
12772 			if (cnt == -ETIMEDOUT || cnt == -EINTR)
12773 				cnt = 0;
12774 			else if (cnt < 0)
12775 				goto error;
12776 		}
12777 		if (pos != len)
12778 			goto error;
12779 	}
12780 
12781 	*vpdlen = len;
12782 
12783 	return buf;
12784 
12785 error:
12786 	kfree(buf);
12787 	return NULL;
12788 }
12789 
12790 #define NVRAM_TEST_SIZE 0x100
12791 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE	0x14
12792 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE	0x18
12793 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE	0x1c
12794 #define NVRAM_SELFBOOT_FORMAT1_4_SIZE	0x20
12795 #define NVRAM_SELFBOOT_FORMAT1_5_SIZE	0x24
12796 #define NVRAM_SELFBOOT_FORMAT1_6_SIZE	0x50
12797 #define NVRAM_SELFBOOT_HW_SIZE 0x20
12798 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
12799 
12800 static int tg3_test_nvram(struct tg3 *tp)
12801 {
12802 	u32 csum, magic, len;
12803 	__be32 *buf;
12804 	int i, j, k, err = 0, size;
12805 
12806 	if (tg3_flag(tp, NO_NVRAM))
12807 		return 0;
12808 
12809 	if (tg3_nvram_read(tp, 0, &magic) != 0)
12810 		return -EIO;
12811 
12812 	if (magic == TG3_EEPROM_MAGIC)
12813 		size = NVRAM_TEST_SIZE;
12814 	else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
12815 		if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
12816 		    TG3_EEPROM_SB_FORMAT_1) {
12817 			switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
12818 			case TG3_EEPROM_SB_REVISION_0:
12819 				size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
12820 				break;
12821 			case TG3_EEPROM_SB_REVISION_2:
12822 				size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
12823 				break;
12824 			case TG3_EEPROM_SB_REVISION_3:
12825 				size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
12826 				break;
12827 			case TG3_EEPROM_SB_REVISION_4:
12828 				size = NVRAM_SELFBOOT_FORMAT1_4_SIZE;
12829 				break;
12830 			case TG3_EEPROM_SB_REVISION_5:
12831 				size = NVRAM_SELFBOOT_FORMAT1_5_SIZE;
12832 				break;
12833 			case TG3_EEPROM_SB_REVISION_6:
12834 				size = NVRAM_SELFBOOT_FORMAT1_6_SIZE;
12835 				break;
12836 			default:
12837 				return -EIO;
12838 			}
12839 		} else
12840 			return 0;
12841 	} else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
12842 		size = NVRAM_SELFBOOT_HW_SIZE;
12843 	else
12844 		return -EIO;
12845 
12846 	buf = kmalloc(size, GFP_KERNEL);
12847 	if (buf == NULL)
12848 		return -ENOMEM;
12849 
12850 	err = -EIO;
12851 	for (i = 0, j = 0; i < size; i += 4, j++) {
12852 		err = tg3_nvram_read_be32(tp, i, &buf[j]);
12853 		if (err)
12854 			break;
12855 	}
12856 	if (i < size)
12857 		goto out;
12858 
12859 	/* Selfboot format */
12860 	magic = be32_to_cpu(buf[0]);
12861 	if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
12862 	    TG3_EEPROM_MAGIC_FW) {
12863 		u8 *buf8 = (u8 *) buf, csum8 = 0;
12864 
12865 		if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
12866 		    TG3_EEPROM_SB_REVISION_2) {
12867 			/* For rev 2, the csum doesn't include the MBA. */
12868 			for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
12869 				csum8 += buf8[i];
12870 			for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
12871 				csum8 += buf8[i];
12872 		} else {
12873 			for (i = 0; i < size; i++)
12874 				csum8 += buf8[i];
12875 		}
12876 
12877 		if (csum8 == 0) {
12878 			err = 0;
12879 			goto out;
12880 		}
12881 
12882 		err = -EIO;
12883 		goto out;
12884 	}
12885 
12886 	if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
12887 	    TG3_EEPROM_MAGIC_HW) {
12888 		u8 data[NVRAM_SELFBOOT_DATA_SIZE];
12889 		u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
12890 		u8 *buf8 = (u8 *) buf;
12891 
12892 		/* Separate the parity bits and the data bytes.  */
12893 		for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
12894 			if ((i == 0) || (i == 8)) {
12895 				int l;
12896 				u8 msk;
12897 
12898 				for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
12899 					parity[k++] = buf8[i] & msk;
12900 				i++;
12901 			} else if (i == 16) {
12902 				int l;
12903 				u8 msk;
12904 
12905 				for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
12906 					parity[k++] = buf8[i] & msk;
12907 				i++;
12908 
12909 				for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
12910 					parity[k++] = buf8[i] & msk;
12911 				i++;
12912 			}
12913 			data[j++] = buf8[i];
12914 		}
12915 
12916 		err = -EIO;
12917 		for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
12918 			u8 hw8 = hweight8(data[i]);
12919 
12920 			if ((hw8 & 0x1) && parity[i])
12921 				goto out;
12922 			else if (!(hw8 & 0x1) && !parity[i])
12923 				goto out;
12924 		}
12925 		err = 0;
12926 		goto out;
12927 	}
12928 
12929 	err = -EIO;
12930 
12931 	/* Bootstrap checksum at offset 0x10 */
12932 	csum = calc_crc((unsigned char *) buf, 0x10);
12933 	if (csum != le32_to_cpu(buf[0x10/4]))
12934 		goto out;
12935 
12936 	/* Manufacturing block starts at offset 0x74, checksum at 0xfc */
12937 	csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
12938 	if (csum != le32_to_cpu(buf[0xfc/4]))
12939 		goto out;
12940 
12941 	kfree(buf);
12942 
12943 	buf = tg3_vpd_readblock(tp, &len);
12944 	if (!buf)
12945 		return -ENOMEM;
12946 
12947 	i = pci_vpd_find_tag((u8 *)buf, 0, len, PCI_VPD_LRDT_RO_DATA);
12948 	if (i > 0) {
12949 		j = pci_vpd_lrdt_size(&((u8 *)buf)[i]);
12950 		if (j < 0)
12951 			goto out;
12952 
12953 		if (i + PCI_VPD_LRDT_TAG_SIZE + j > len)
12954 			goto out;
12955 
12956 		i += PCI_VPD_LRDT_TAG_SIZE;
12957 		j = pci_vpd_find_info_keyword((u8 *)buf, i, j,
12958 					      PCI_VPD_RO_KEYWORD_CHKSUM);
12959 		if (j > 0) {
12960 			u8 csum8 = 0;
12961 
12962 			j += PCI_VPD_INFO_FLD_HDR_SIZE;
12963 
12964 			for (i = 0; i <= j; i++)
12965 				csum8 += ((u8 *)buf)[i];
12966 
12967 			if (csum8)
12968 				goto out;
12969 		}
12970 	}
12971 
12972 	err = 0;
12973 
12974 out:
12975 	kfree(buf);
12976 	return err;
12977 }
12978 
12979 #define TG3_SERDES_TIMEOUT_SEC	2
12980 #define TG3_COPPER_TIMEOUT_SEC	6
12981 
12982 static int tg3_test_link(struct tg3 *tp)
12983 {
12984 	int i, max;
12985 
12986 	if (!netif_running(tp->dev))
12987 		return -ENODEV;
12988 
12989 	if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
12990 		max = TG3_SERDES_TIMEOUT_SEC;
12991 	else
12992 		max = TG3_COPPER_TIMEOUT_SEC;
12993 
12994 	for (i = 0; i < max; i++) {
12995 		if (tp->link_up)
12996 			return 0;
12997 
12998 		if (msleep_interruptible(1000))
12999 			break;
13000 	}
13001 
13002 	return -EIO;
13003 }
13004 
13005 /* Only test the commonly used registers */
13006 static int tg3_test_registers(struct tg3 *tp)
13007 {
13008 	int i, is_5705, is_5750;
13009 	u32 offset, read_mask, write_mask, val, save_val, read_val;
13010 	static struct {
13011 		u16 offset;
13012 		u16 flags;
13013 #define TG3_FL_5705	0x1
13014 #define TG3_FL_NOT_5705	0x2
13015 #define TG3_FL_NOT_5788	0x4
13016 #define TG3_FL_NOT_5750	0x8
13017 		u32 read_mask;
13018 		u32 write_mask;
13019 	} reg_tbl[] = {
13020 		/* MAC Control Registers */
13021 		{ MAC_MODE, TG3_FL_NOT_5705,
13022 			0x00000000, 0x00ef6f8c },
13023 		{ MAC_MODE, TG3_FL_5705,
13024 			0x00000000, 0x01ef6b8c },
13025 		{ MAC_STATUS, TG3_FL_NOT_5705,
13026 			0x03800107, 0x00000000 },
13027 		{ MAC_STATUS, TG3_FL_5705,
13028 			0x03800100, 0x00000000 },
13029 		{ MAC_ADDR_0_HIGH, 0x0000,
13030 			0x00000000, 0x0000ffff },
13031 		{ MAC_ADDR_0_LOW, 0x0000,
13032 			0x00000000, 0xffffffff },
13033 		{ MAC_RX_MTU_SIZE, 0x0000,
13034 			0x00000000, 0x0000ffff },
13035 		{ MAC_TX_MODE, 0x0000,
13036 			0x00000000, 0x00000070 },
13037 		{ MAC_TX_LENGTHS, 0x0000,
13038 			0x00000000, 0x00003fff },
13039 		{ MAC_RX_MODE, TG3_FL_NOT_5705,
13040 			0x00000000, 0x000007fc },
13041 		{ MAC_RX_MODE, TG3_FL_5705,
13042 			0x00000000, 0x000007dc },
13043 		{ MAC_HASH_REG_0, 0x0000,
13044 			0x00000000, 0xffffffff },
13045 		{ MAC_HASH_REG_1, 0x0000,
13046 			0x00000000, 0xffffffff },
13047 		{ MAC_HASH_REG_2, 0x0000,
13048 			0x00000000, 0xffffffff },
13049 		{ MAC_HASH_REG_3, 0x0000,
13050 			0x00000000, 0xffffffff },
13051 
13052 		/* Receive Data and Receive BD Initiator Control Registers. */
13053 		{ RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
13054 			0x00000000, 0xffffffff },
13055 		{ RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
13056 			0x00000000, 0xffffffff },
13057 		{ RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
13058 			0x00000000, 0x00000003 },
13059 		{ RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
13060 			0x00000000, 0xffffffff },
13061 		{ RCVDBDI_STD_BD+0, 0x0000,
13062 			0x00000000, 0xffffffff },
13063 		{ RCVDBDI_STD_BD+4, 0x0000,
13064 			0x00000000, 0xffffffff },
13065 		{ RCVDBDI_STD_BD+8, 0x0000,
13066 			0x00000000, 0xffff0002 },
13067 		{ RCVDBDI_STD_BD+0xc, 0x0000,
13068 			0x00000000, 0xffffffff },
13069 
13070 		/* Receive BD Initiator Control Registers. */
13071 		{ RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
13072 			0x00000000, 0xffffffff },
13073 		{ RCVBDI_STD_THRESH, TG3_FL_5705,
13074 			0x00000000, 0x000003ff },
13075 		{ RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
13076 			0x00000000, 0xffffffff },
13077 
13078 		/* Host Coalescing Control Registers. */
13079 		{ HOSTCC_MODE, TG3_FL_NOT_5705,
13080 			0x00000000, 0x00000004 },
13081 		{ HOSTCC_MODE, TG3_FL_5705,
13082 			0x00000000, 0x000000f6 },
13083 		{ HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
13084 			0x00000000, 0xffffffff },
13085 		{ HOSTCC_RXCOL_TICKS, TG3_FL_5705,
13086 			0x00000000, 0x000003ff },
13087 		{ HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
13088 			0x00000000, 0xffffffff },
13089 		{ HOSTCC_TXCOL_TICKS, TG3_FL_5705,
13090 			0x00000000, 0x000003ff },
13091 		{ HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
13092 			0x00000000, 0xffffffff },
13093 		{ HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
13094 			0x00000000, 0x000000ff },
13095 		{ HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
13096 			0x00000000, 0xffffffff },
13097 		{ HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
13098 			0x00000000, 0x000000ff },
13099 		{ HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
13100 			0x00000000, 0xffffffff },
13101 		{ HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
13102 			0x00000000, 0xffffffff },
13103 		{ HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
13104 			0x00000000, 0xffffffff },
13105 		{ HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
13106 			0x00000000, 0x000000ff },
13107 		{ HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
13108 			0x00000000, 0xffffffff },
13109 		{ HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
13110 			0x00000000, 0x000000ff },
13111 		{ HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
13112 			0x00000000, 0xffffffff },
13113 		{ HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
13114 			0x00000000, 0xffffffff },
13115 		{ HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
13116 			0x00000000, 0xffffffff },
13117 		{ HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
13118 			0x00000000, 0xffffffff },
13119 		{ HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
13120 			0x00000000, 0xffffffff },
13121 		{ HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
13122 			0xffffffff, 0x00000000 },
13123 		{ HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
13124 			0xffffffff, 0x00000000 },
13125 
13126 		/* Buffer Manager Control Registers. */
13127 		{ BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
13128 			0x00000000, 0x007fff80 },
13129 		{ BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
13130 			0x00000000, 0x007fffff },
13131 		{ BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
13132 			0x00000000, 0x0000003f },
13133 		{ BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
13134 			0x00000000, 0x000001ff },
13135 		{ BUFMGR_MB_HIGH_WATER, 0x0000,
13136 			0x00000000, 0x000001ff },
13137 		{ BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
13138 			0xffffffff, 0x00000000 },
13139 		{ BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
13140 			0xffffffff, 0x00000000 },
13141 
13142 		/* Mailbox Registers */
13143 		{ GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
13144 			0x00000000, 0x000001ff },
13145 		{ GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
13146 			0x00000000, 0x000001ff },
13147 		{ GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
13148 			0x00000000, 0x000007ff },
13149 		{ GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
13150 			0x00000000, 0x000001ff },
13151 
13152 		{ 0xffff, 0x0000, 0x00000000, 0x00000000 },
13153 	};
13154 
13155 	is_5705 = is_5750 = 0;
13156 	if (tg3_flag(tp, 5705_PLUS)) {
13157 		is_5705 = 1;
13158 		if (tg3_flag(tp, 5750_PLUS))
13159 			is_5750 = 1;
13160 	}
13161 
13162 	for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
13163 		if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
13164 			continue;
13165 
13166 		if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
13167 			continue;
13168 
13169 		if (tg3_flag(tp, IS_5788) &&
13170 		    (reg_tbl[i].flags & TG3_FL_NOT_5788))
13171 			continue;
13172 
13173 		if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
13174 			continue;
13175 
13176 		offset = (u32) reg_tbl[i].offset;
13177 		read_mask = reg_tbl[i].read_mask;
13178 		write_mask = reg_tbl[i].write_mask;
13179 
13180 		/* Save the original register content */
13181 		save_val = tr32(offset);
13182 
13183 		/* Determine the read-only value. */
13184 		read_val = save_val & read_mask;
13185 
13186 		/* Write zero to the register, then make sure the read-only bits
13187 		 * are not changed and the read/write bits are all zeros.
13188 		 */
13189 		tw32(offset, 0);
13190 
13191 		val = tr32(offset);
13192 
13193 		/* Test the read-only and read/write bits. */
13194 		if (((val & read_mask) != read_val) || (val & write_mask))
13195 			goto out;
13196 
13197 		/* Write ones to all the bits defined by RdMask and WrMask, then
13198 		 * make sure the read-only bits are not changed and the
13199 		 * read/write bits are all ones.
13200 		 */
13201 		tw32(offset, read_mask | write_mask);
13202 
13203 		val = tr32(offset);
13204 
13205 		/* Test the read-only bits. */
13206 		if ((val & read_mask) != read_val)
13207 			goto out;
13208 
13209 		/* Test the read/write bits. */
13210 		if ((val & write_mask) != write_mask)
13211 			goto out;
13212 
13213 		tw32(offset, save_val);
13214 	}
13215 
13216 	return 0;
13217 
13218 out:
13219 	if (netif_msg_hw(tp))
13220 		netdev_err(tp->dev,
13221 			   "Register test failed at offset %x\n", offset);
13222 	tw32(offset, save_val);
13223 	return -EIO;
13224 }
13225 
13226 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
13227 {
13228 	static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
13229 	int i;
13230 	u32 j;
13231 
13232 	for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
13233 		for (j = 0; j < len; j += 4) {
13234 			u32 val;
13235 
13236 			tg3_write_mem(tp, offset + j, test_pattern[i]);
13237 			tg3_read_mem(tp, offset + j, &val);
13238 			if (val != test_pattern[i])
13239 				return -EIO;
13240 		}
13241 	}
13242 	return 0;
13243 }
13244 
13245 static int tg3_test_memory(struct tg3 *tp)
13246 {
13247 	static struct mem_entry {
13248 		u32 offset;
13249 		u32 len;
13250 	} mem_tbl_570x[] = {
13251 		{ 0x00000000, 0x00b50},
13252 		{ 0x00002000, 0x1c000},
13253 		{ 0xffffffff, 0x00000}
13254 	}, mem_tbl_5705[] = {
13255 		{ 0x00000100, 0x0000c},
13256 		{ 0x00000200, 0x00008},
13257 		{ 0x00004000, 0x00800},
13258 		{ 0x00006000, 0x01000},
13259 		{ 0x00008000, 0x02000},
13260 		{ 0x00010000, 0x0e000},
13261 		{ 0xffffffff, 0x00000}
13262 	}, mem_tbl_5755[] = {
13263 		{ 0x00000200, 0x00008},
13264 		{ 0x00004000, 0x00800},
13265 		{ 0x00006000, 0x00800},
13266 		{ 0x00008000, 0x02000},
13267 		{ 0x00010000, 0x0c000},
13268 		{ 0xffffffff, 0x00000}
13269 	}, mem_tbl_5906[] = {
13270 		{ 0x00000200, 0x00008},
13271 		{ 0x00004000, 0x00400},
13272 		{ 0x00006000, 0x00400},
13273 		{ 0x00008000, 0x01000},
13274 		{ 0x00010000, 0x01000},
13275 		{ 0xffffffff, 0x00000}
13276 	}, mem_tbl_5717[] = {
13277 		{ 0x00000200, 0x00008},
13278 		{ 0x00010000, 0x0a000},
13279 		{ 0x00020000, 0x13c00},
13280 		{ 0xffffffff, 0x00000}
13281 	}, mem_tbl_57765[] = {
13282 		{ 0x00000200, 0x00008},
13283 		{ 0x00004000, 0x00800},
13284 		{ 0x00006000, 0x09800},
13285 		{ 0x00010000, 0x0a000},
13286 		{ 0xffffffff, 0x00000}
13287 	};
13288 	struct mem_entry *mem_tbl;
13289 	int err = 0;
13290 	int i;
13291 
13292 	if (tg3_flag(tp, 5717_PLUS))
13293 		mem_tbl = mem_tbl_5717;
13294 	else if (tg3_flag(tp, 57765_CLASS) ||
13295 		 tg3_asic_rev(tp) == ASIC_REV_5762)
13296 		mem_tbl = mem_tbl_57765;
13297 	else if (tg3_flag(tp, 5755_PLUS))
13298 		mem_tbl = mem_tbl_5755;
13299 	else if (tg3_asic_rev(tp) == ASIC_REV_5906)
13300 		mem_tbl = mem_tbl_5906;
13301 	else if (tg3_flag(tp, 5705_PLUS))
13302 		mem_tbl = mem_tbl_5705;
13303 	else
13304 		mem_tbl = mem_tbl_570x;
13305 
13306 	for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
13307 		err = tg3_do_mem_test(tp, mem_tbl[i].offset, mem_tbl[i].len);
13308 		if (err)
13309 			break;
13310 	}
13311 
13312 	return err;
13313 }
13314 
13315 #define TG3_TSO_MSS		500
13316 
13317 #define TG3_TSO_IP_HDR_LEN	20
13318 #define TG3_TSO_TCP_HDR_LEN	20
13319 #define TG3_TSO_TCP_OPT_LEN	12
13320 
13321 static const u8 tg3_tso_header[] = {
13322 0x08, 0x00,
13323 0x45, 0x00, 0x00, 0x00,
13324 0x00, 0x00, 0x40, 0x00,
13325 0x40, 0x06, 0x00, 0x00,
13326 0x0a, 0x00, 0x00, 0x01,
13327 0x0a, 0x00, 0x00, 0x02,
13328 0x0d, 0x00, 0xe0, 0x00,
13329 0x00, 0x00, 0x01, 0x00,
13330 0x00, 0x00, 0x02, 0x00,
13331 0x80, 0x10, 0x10, 0x00,
13332 0x14, 0x09, 0x00, 0x00,
13333 0x01, 0x01, 0x08, 0x0a,
13334 0x11, 0x11, 0x11, 0x11,
13335 0x11, 0x11, 0x11, 0x11,
13336 };
13337 
13338 static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, bool tso_loopback)
13339 {
13340 	u32 rx_start_idx, rx_idx, tx_idx, opaque_key;
13341 	u32 base_flags = 0, mss = 0, desc_idx, coal_now, data_off, val;
13342 	u32 budget;
13343 	struct sk_buff *skb;
13344 	u8 *tx_data, *rx_data;
13345 	dma_addr_t map;
13346 	int num_pkts, tx_len, rx_len, i, err;
13347 	struct tg3_rx_buffer_desc *desc;
13348 	struct tg3_napi *tnapi, *rnapi;
13349 	struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
13350 
13351 	tnapi = &tp->napi[0];
13352 	rnapi = &tp->napi[0];
13353 	if (tp->irq_cnt > 1) {
13354 		if (tg3_flag(tp, ENABLE_RSS))
13355 			rnapi = &tp->napi[1];
13356 		if (tg3_flag(tp, ENABLE_TSS))
13357 			tnapi = &tp->napi[1];
13358 	}
13359 	coal_now = tnapi->coal_now | rnapi->coal_now;
13360 
13361 	err = -EIO;
13362 
13363 	tx_len = pktsz;
13364 	skb = netdev_alloc_skb(tp->dev, tx_len);
13365 	if (!skb)
13366 		return -ENOMEM;
13367 
13368 	tx_data = skb_put(skb, tx_len);
13369 	memcpy(tx_data, tp->dev->dev_addr, ETH_ALEN);
13370 	memset(tx_data + ETH_ALEN, 0x0, 8);
13371 
13372 	tw32(MAC_RX_MTU_SIZE, tx_len + ETH_FCS_LEN);
13373 
13374 	if (tso_loopback) {
13375 		struct iphdr *iph = (struct iphdr *)&tx_data[ETH_HLEN];
13376 
13377 		u32 hdr_len = TG3_TSO_IP_HDR_LEN + TG3_TSO_TCP_HDR_LEN +
13378 			      TG3_TSO_TCP_OPT_LEN;
13379 
13380 		memcpy(tx_data + ETH_ALEN * 2, tg3_tso_header,
13381 		       sizeof(tg3_tso_header));
13382 		mss = TG3_TSO_MSS;
13383 
13384 		val = tx_len - ETH_ALEN * 2 - sizeof(tg3_tso_header);
13385 		num_pkts = DIV_ROUND_UP(val, TG3_TSO_MSS);
13386 
13387 		/* Set the total length field in the IP header */
13388 		iph->tot_len = htons((u16)(mss + hdr_len));
13389 
13390 		base_flags = (TXD_FLAG_CPU_PRE_DMA |
13391 			      TXD_FLAG_CPU_POST_DMA);
13392 
13393 		if (tg3_flag(tp, HW_TSO_1) ||
13394 		    tg3_flag(tp, HW_TSO_2) ||
13395 		    tg3_flag(tp, HW_TSO_3)) {
13396 			struct tcphdr *th;
13397 			val = ETH_HLEN + TG3_TSO_IP_HDR_LEN;
13398 			th = (struct tcphdr *)&tx_data[val];
13399 			th->check = 0;
13400 		} else
13401 			base_flags |= TXD_FLAG_TCPUDP_CSUM;
13402 
13403 		if (tg3_flag(tp, HW_TSO_3)) {
13404 			mss |= (hdr_len & 0xc) << 12;
13405 			if (hdr_len & 0x10)
13406 				base_flags |= 0x00000010;
13407 			base_flags |= (hdr_len & 0x3e0) << 5;
13408 		} else if (tg3_flag(tp, HW_TSO_2))
13409 			mss |= hdr_len << 9;
13410 		else if (tg3_flag(tp, HW_TSO_1) ||
13411 			 tg3_asic_rev(tp) == ASIC_REV_5705) {
13412 			mss |= (TG3_TSO_TCP_OPT_LEN << 9);
13413 		} else {
13414 			base_flags |= (TG3_TSO_TCP_OPT_LEN << 10);
13415 		}
13416 
13417 		data_off = ETH_ALEN * 2 + sizeof(tg3_tso_header);
13418 	} else {
13419 		num_pkts = 1;
13420 		data_off = ETH_HLEN;
13421 
13422 		if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
13423 		    tx_len > VLAN_ETH_FRAME_LEN)
13424 			base_flags |= TXD_FLAG_JMB_PKT;
13425 	}
13426 
13427 	for (i = data_off; i < tx_len; i++)
13428 		tx_data[i] = (u8) (i & 0xff);
13429 
13430 	map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
13431 	if (pci_dma_mapping_error(tp->pdev, map)) {
13432 		dev_kfree_skb(skb);
13433 		return -EIO;
13434 	}
13435 
13436 	val = tnapi->tx_prod;
13437 	tnapi->tx_buffers[val].skb = skb;
13438 	dma_unmap_addr_set(&tnapi->tx_buffers[val], mapping, map);
13439 
13440 	tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
13441 	       rnapi->coal_now);
13442 
13443 	udelay(10);
13444 
13445 	rx_start_idx = rnapi->hw_status->idx[0].rx_producer;
13446 
13447 	budget = tg3_tx_avail(tnapi);
13448 	if (tg3_tx_frag_set(tnapi, &val, &budget, map, tx_len,
13449 			    base_flags | TXD_FLAG_END, mss, 0)) {
13450 		tnapi->tx_buffers[val].skb = NULL;
13451 		dev_kfree_skb(skb);
13452 		return -EIO;
13453 	}
13454 
13455 	tnapi->tx_prod++;
13456 
13457 	/* Sync BD data before updating mailbox */
13458 	wmb();
13459 
13460 	tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod);
13461 	tr32_mailbox(tnapi->prodmbox);
13462 
13463 	udelay(10);
13464 
13465 	/* 350 usec to allow enough time on some 10/100 Mbps devices.  */
13466 	for (i = 0; i < 35; i++) {
13467 		tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
13468 		       coal_now);
13469 
13470 		udelay(10);
13471 
13472 		tx_idx = tnapi->hw_status->idx[0].tx_consumer;
13473 		rx_idx = rnapi->hw_status->idx[0].rx_producer;
13474 		if ((tx_idx == tnapi->tx_prod) &&
13475 		    (rx_idx == (rx_start_idx + num_pkts)))
13476 			break;
13477 	}
13478 
13479 	tg3_tx_skb_unmap(tnapi, tnapi->tx_prod - 1, -1);
13480 	dev_kfree_skb(skb);
13481 
13482 	if (tx_idx != tnapi->tx_prod)
13483 		goto out;
13484 
13485 	if (rx_idx != rx_start_idx + num_pkts)
13486 		goto out;
13487 
13488 	val = data_off;
13489 	while (rx_idx != rx_start_idx) {
13490 		desc = &rnapi->rx_rcb[rx_start_idx++];
13491 		desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
13492 		opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
13493 
13494 		if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
13495 		    (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
13496 			goto out;
13497 
13498 		rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT)
13499 			 - ETH_FCS_LEN;
13500 
13501 		if (!tso_loopback) {
13502 			if (rx_len != tx_len)
13503 				goto out;
13504 
13505 			if (pktsz <= TG3_RX_STD_DMA_SZ - ETH_FCS_LEN) {
13506 				if (opaque_key != RXD_OPAQUE_RING_STD)
13507 					goto out;
13508 			} else {
13509 				if (opaque_key != RXD_OPAQUE_RING_JUMBO)
13510 					goto out;
13511 			}
13512 		} else if ((desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
13513 			   (desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
13514 			    >> RXD_TCPCSUM_SHIFT != 0xffff) {
13515 			goto out;
13516 		}
13517 
13518 		if (opaque_key == RXD_OPAQUE_RING_STD) {
13519 			rx_data = tpr->rx_std_buffers[desc_idx].data;
13520 			map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx],
13521 					     mapping);
13522 		} else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
13523 			rx_data = tpr->rx_jmb_buffers[desc_idx].data;
13524 			map = dma_unmap_addr(&tpr->rx_jmb_buffers[desc_idx],
13525 					     mapping);
13526 		} else
13527 			goto out;
13528 
13529 		pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len,
13530 					    PCI_DMA_FROMDEVICE);
13531 
13532 		rx_data += TG3_RX_OFFSET(tp);
13533 		for (i = data_off; i < rx_len; i++, val++) {
13534 			if (*(rx_data + i) != (u8) (val & 0xff))
13535 				goto out;
13536 		}
13537 	}
13538 
13539 	err = 0;
13540 
13541 	/* tg3_free_rings will unmap and free the rx_data */
13542 out:
13543 	return err;
13544 }
13545 
13546 #define TG3_STD_LOOPBACK_FAILED		1
13547 #define TG3_JMB_LOOPBACK_FAILED		2
13548 #define TG3_TSO_LOOPBACK_FAILED		4
13549 #define TG3_LOOPBACK_FAILED \
13550 	(TG3_STD_LOOPBACK_FAILED | \
13551 	 TG3_JMB_LOOPBACK_FAILED | \
13552 	 TG3_TSO_LOOPBACK_FAILED)
13553 
13554 static int tg3_test_loopback(struct tg3 *tp, u64 *data, bool do_extlpbk)
13555 {
13556 	int err = -EIO;
13557 	u32 eee_cap;
13558 	u32 jmb_pkt_sz = 9000;
13559 
13560 	if (tp->dma_limit)
13561 		jmb_pkt_sz = tp->dma_limit - ETH_HLEN;
13562 
13563 	eee_cap = tp->phy_flags & TG3_PHYFLG_EEE_CAP;
13564 	tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
13565 
13566 	if (!netif_running(tp->dev)) {
13567 		data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13568 		data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13569 		if (do_extlpbk)
13570 			data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13571 		goto done;
13572 	}
13573 
13574 	err = tg3_reset_hw(tp, true);
13575 	if (err) {
13576 		data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13577 		data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13578 		if (do_extlpbk)
13579 			data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13580 		goto done;
13581 	}
13582 
13583 	if (tg3_flag(tp, ENABLE_RSS)) {
13584 		int i;
13585 
13586 		/* Reroute all rx packets to the 1st queue */
13587 		for (i = MAC_RSS_INDIR_TBL_0;
13588 		     i < MAC_RSS_INDIR_TBL_0 + TG3_RSS_INDIR_TBL_SIZE; i += 4)
13589 			tw32(i, 0x0);
13590 	}
13591 
13592 	/* HW errata - mac loopback fails in some cases on 5780.
13593 	 * Normal traffic and PHY loopback are not affected by
13594 	 * errata.  Also, the MAC loopback test is deprecated for
13595 	 * all newer ASIC revisions.
13596 	 */
13597 	if (tg3_asic_rev(tp) != ASIC_REV_5780 &&
13598 	    !tg3_flag(tp, CPMU_PRESENT)) {
13599 		tg3_mac_loopback(tp, true);
13600 
13601 		if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13602 			data[TG3_MAC_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED;
13603 
13604 		if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13605 		    tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13606 			data[TG3_MAC_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED;
13607 
13608 		tg3_mac_loopback(tp, false);
13609 	}
13610 
13611 	if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
13612 	    !tg3_flag(tp, USE_PHYLIB)) {
13613 		int i;
13614 
13615 		tg3_phy_lpbk_set(tp, 0, false);
13616 
13617 		/* Wait for link */
13618 		for (i = 0; i < 100; i++) {
13619 			if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
13620 				break;
13621 			mdelay(1);
13622 		}
13623 
13624 		if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13625 			data[TG3_PHY_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED;
13626 		if (tg3_flag(tp, TSO_CAPABLE) &&
13627 		    tg3_run_loopback(tp, ETH_FRAME_LEN, true))
13628 			data[TG3_PHY_LOOPB_TEST] |= TG3_TSO_LOOPBACK_FAILED;
13629 		if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13630 		    tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13631 			data[TG3_PHY_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED;
13632 
13633 		if (do_extlpbk) {
13634 			tg3_phy_lpbk_set(tp, 0, true);
13635 
13636 			/* All link indications report up, but the hardware
13637 			 * isn't really ready for about 20 msec.  Double it
13638 			 * to be sure.
13639 			 */
13640 			mdelay(40);
13641 
13642 			if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13643 				data[TG3_EXT_LOOPB_TEST] |=
13644 							TG3_STD_LOOPBACK_FAILED;
13645 			if (tg3_flag(tp, TSO_CAPABLE) &&
13646 			    tg3_run_loopback(tp, ETH_FRAME_LEN, true))
13647 				data[TG3_EXT_LOOPB_TEST] |=
13648 							TG3_TSO_LOOPBACK_FAILED;
13649 			if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13650 			    tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13651 				data[TG3_EXT_LOOPB_TEST] |=
13652 							TG3_JMB_LOOPBACK_FAILED;
13653 		}
13654 
13655 		/* Re-enable gphy autopowerdown. */
13656 		if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
13657 			tg3_phy_toggle_apd(tp, true);
13658 	}
13659 
13660 	err = (data[TG3_MAC_LOOPB_TEST] | data[TG3_PHY_LOOPB_TEST] |
13661 	       data[TG3_EXT_LOOPB_TEST]) ? -EIO : 0;
13662 
13663 done:
13664 	tp->phy_flags |= eee_cap;
13665 
13666 	return err;
13667 }
13668 
13669 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
13670 			  u64 *data)
13671 {
13672 	struct tg3 *tp = netdev_priv(dev);
13673 	bool doextlpbk = etest->flags & ETH_TEST_FL_EXTERNAL_LB;
13674 
13675 	if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
13676 		if (tg3_power_up(tp)) {
13677 			etest->flags |= ETH_TEST_FL_FAILED;
13678 			memset(data, 1, sizeof(u64) * TG3_NUM_TEST);
13679 			return;
13680 		}
13681 		tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
13682 	}
13683 
13684 	memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
13685 
13686 	if (tg3_test_nvram(tp) != 0) {
13687 		etest->flags |= ETH_TEST_FL_FAILED;
13688 		data[TG3_NVRAM_TEST] = 1;
13689 	}
13690 	if (!doextlpbk && tg3_test_link(tp)) {
13691 		etest->flags |= ETH_TEST_FL_FAILED;
13692 		data[TG3_LINK_TEST] = 1;
13693 	}
13694 	if (etest->flags & ETH_TEST_FL_OFFLINE) {
13695 		int err, err2 = 0, irq_sync = 0;
13696 
13697 		if (netif_running(dev)) {
13698 			tg3_phy_stop(tp);
13699 			tg3_netif_stop(tp);
13700 			irq_sync = 1;
13701 		}
13702 
13703 		tg3_full_lock(tp, irq_sync);
13704 		tg3_halt(tp, RESET_KIND_SUSPEND, 1);
13705 		err = tg3_nvram_lock(tp);
13706 		tg3_halt_cpu(tp, RX_CPU_BASE);
13707 		if (!tg3_flag(tp, 5705_PLUS))
13708 			tg3_halt_cpu(tp, TX_CPU_BASE);
13709 		if (!err)
13710 			tg3_nvram_unlock(tp);
13711 
13712 		if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
13713 			tg3_phy_reset(tp);
13714 
13715 		if (tg3_test_registers(tp) != 0) {
13716 			etest->flags |= ETH_TEST_FL_FAILED;
13717 			data[TG3_REGISTER_TEST] = 1;
13718 		}
13719 
13720 		if (tg3_test_memory(tp) != 0) {
13721 			etest->flags |= ETH_TEST_FL_FAILED;
13722 			data[TG3_MEMORY_TEST] = 1;
13723 		}
13724 
13725 		if (doextlpbk)
13726 			etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE;
13727 
13728 		if (tg3_test_loopback(tp, data, doextlpbk))
13729 			etest->flags |= ETH_TEST_FL_FAILED;
13730 
13731 		tg3_full_unlock(tp);
13732 
13733 		if (tg3_test_interrupt(tp) != 0) {
13734 			etest->flags |= ETH_TEST_FL_FAILED;
13735 			data[TG3_INTERRUPT_TEST] = 1;
13736 		}
13737 
13738 		tg3_full_lock(tp, 0);
13739 
13740 		tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
13741 		if (netif_running(dev)) {
13742 			tg3_flag_set(tp, INIT_COMPLETE);
13743 			err2 = tg3_restart_hw(tp, true);
13744 			if (!err2)
13745 				tg3_netif_start(tp);
13746 		}
13747 
13748 		tg3_full_unlock(tp);
13749 
13750 		if (irq_sync && !err2)
13751 			tg3_phy_start(tp);
13752 	}
13753 	if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
13754 		tg3_power_down_prepare(tp);
13755 
13756 }
13757 
13758 static int tg3_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
13759 {
13760 	struct tg3 *tp = netdev_priv(dev);
13761 	struct hwtstamp_config stmpconf;
13762 
13763 	if (!tg3_flag(tp, PTP_CAPABLE))
13764 		return -EOPNOTSUPP;
13765 
13766 	if (copy_from_user(&stmpconf, ifr->ifr_data, sizeof(stmpconf)))
13767 		return -EFAULT;
13768 
13769 	if (stmpconf.flags)
13770 		return -EINVAL;
13771 
13772 	if (stmpconf.tx_type != HWTSTAMP_TX_ON &&
13773 	    stmpconf.tx_type != HWTSTAMP_TX_OFF)
13774 		return -ERANGE;
13775 
13776 	switch (stmpconf.rx_filter) {
13777 	case HWTSTAMP_FILTER_NONE:
13778 		tp->rxptpctl = 0;
13779 		break;
13780 	case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
13781 		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13782 			       TG3_RX_PTP_CTL_ALL_V1_EVENTS;
13783 		break;
13784 	case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
13785 		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13786 			       TG3_RX_PTP_CTL_SYNC_EVNT;
13787 		break;
13788 	case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
13789 		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13790 			       TG3_RX_PTP_CTL_DELAY_REQ;
13791 		break;
13792 	case HWTSTAMP_FILTER_PTP_V2_EVENT:
13793 		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13794 			       TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13795 		break;
13796 	case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
13797 		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13798 			       TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13799 		break;
13800 	case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
13801 		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13802 			       TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13803 		break;
13804 	case HWTSTAMP_FILTER_PTP_V2_SYNC:
13805 		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13806 			       TG3_RX_PTP_CTL_SYNC_EVNT;
13807 		break;
13808 	case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
13809 		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13810 			       TG3_RX_PTP_CTL_SYNC_EVNT;
13811 		break;
13812 	case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
13813 		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13814 			       TG3_RX_PTP_CTL_SYNC_EVNT;
13815 		break;
13816 	case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
13817 		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13818 			       TG3_RX_PTP_CTL_DELAY_REQ;
13819 		break;
13820 	case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
13821 		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13822 			       TG3_RX_PTP_CTL_DELAY_REQ;
13823 		break;
13824 	case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
13825 		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13826 			       TG3_RX_PTP_CTL_DELAY_REQ;
13827 		break;
13828 	default:
13829 		return -ERANGE;
13830 	}
13831 
13832 	if (netif_running(dev) && tp->rxptpctl)
13833 		tw32(TG3_RX_PTP_CTL,
13834 		     tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK);
13835 
13836 	if (stmpconf.tx_type == HWTSTAMP_TX_ON)
13837 		tg3_flag_set(tp, TX_TSTAMP_EN);
13838 	else
13839 		tg3_flag_clear(tp, TX_TSTAMP_EN);
13840 
13841 	return copy_to_user(ifr->ifr_data, &stmpconf, sizeof(stmpconf)) ?
13842 		-EFAULT : 0;
13843 }
13844 
13845 static int tg3_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
13846 {
13847 	struct tg3 *tp = netdev_priv(dev);
13848 	struct hwtstamp_config stmpconf;
13849 
13850 	if (!tg3_flag(tp, PTP_CAPABLE))
13851 		return -EOPNOTSUPP;
13852 
13853 	stmpconf.flags = 0;
13854 	stmpconf.tx_type = (tg3_flag(tp, TX_TSTAMP_EN) ?
13855 			    HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF);
13856 
13857 	switch (tp->rxptpctl) {
13858 	case 0:
13859 		stmpconf.rx_filter = HWTSTAMP_FILTER_NONE;
13860 		break;
13861 	case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_ALL_V1_EVENTS:
13862 		stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
13863 		break;
13864 	case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
13865 		stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
13866 		break;
13867 	case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_DELAY_REQ:
13868 		stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
13869 		break;
13870 	case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS:
13871 		stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
13872 		break;
13873 	case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS:
13874 		stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
13875 		break;
13876 	case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS:
13877 		stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
13878 		break;
13879 	case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
13880 		stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
13881 		break;
13882 	case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
13883 		stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_SYNC;
13884 		break;
13885 	case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
13886 		stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
13887 		break;
13888 	case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_DELAY_REQ:
13889 		stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
13890 		break;
13891 	case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_DELAY_REQ:
13892 		stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ;
13893 		break;
13894 	case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_DELAY_REQ:
13895 		stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
13896 		break;
13897 	default:
13898 		WARN_ON_ONCE(1);
13899 		return -ERANGE;
13900 	}
13901 
13902 	return copy_to_user(ifr->ifr_data, &stmpconf, sizeof(stmpconf)) ?
13903 		-EFAULT : 0;
13904 }
13905 
13906 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
13907 {
13908 	struct mii_ioctl_data *data = if_mii(ifr);
13909 	struct tg3 *tp = netdev_priv(dev);
13910 	int err;
13911 
13912 	if (tg3_flag(tp, USE_PHYLIB)) {
13913 		struct phy_device *phydev;
13914 		if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
13915 			return -EAGAIN;
13916 		phydev = tp->mdio_bus->phy_map[tp->phy_addr];
13917 		return phy_mii_ioctl(phydev, ifr, cmd);
13918 	}
13919 
13920 	switch (cmd) {
13921 	case SIOCGMIIPHY:
13922 		data->phy_id = tp->phy_addr;
13923 
13924 		/* fallthru */
13925 	case SIOCGMIIREG: {
13926 		u32 mii_regval;
13927 
13928 		if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
13929 			break;			/* We have no PHY */
13930 
13931 		if (!netif_running(dev))
13932 			return -EAGAIN;
13933 
13934 		spin_lock_bh(&tp->lock);
13935 		err = __tg3_readphy(tp, data->phy_id & 0x1f,
13936 				    data->reg_num & 0x1f, &mii_regval);
13937 		spin_unlock_bh(&tp->lock);
13938 
13939 		data->val_out = mii_regval;
13940 
13941 		return err;
13942 	}
13943 
13944 	case SIOCSMIIREG:
13945 		if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
13946 			break;			/* We have no PHY */
13947 
13948 		if (!netif_running(dev))
13949 			return -EAGAIN;
13950 
13951 		spin_lock_bh(&tp->lock);
13952 		err = __tg3_writephy(tp, data->phy_id & 0x1f,
13953 				     data->reg_num & 0x1f, data->val_in);
13954 		spin_unlock_bh(&tp->lock);
13955 
13956 		return err;
13957 
13958 	case SIOCSHWTSTAMP:
13959 		return tg3_hwtstamp_set(dev, ifr);
13960 
13961 	case SIOCGHWTSTAMP:
13962 		return tg3_hwtstamp_get(dev, ifr);
13963 
13964 	default:
13965 		/* do nothing */
13966 		break;
13967 	}
13968 	return -EOPNOTSUPP;
13969 }
13970 
13971 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
13972 {
13973 	struct tg3 *tp = netdev_priv(dev);
13974 
13975 	memcpy(ec, &tp->coal, sizeof(*ec));
13976 	return 0;
13977 }
13978 
13979 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
13980 {
13981 	struct tg3 *tp = netdev_priv(dev);
13982 	u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
13983 	u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
13984 
13985 	if (!tg3_flag(tp, 5705_PLUS)) {
13986 		max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
13987 		max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
13988 		max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
13989 		min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
13990 	}
13991 
13992 	if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
13993 	    (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
13994 	    (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
13995 	    (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
13996 	    (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
13997 	    (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
13998 	    (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
13999 	    (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
14000 	    (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
14001 	    (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
14002 		return -EINVAL;
14003 
14004 	/* No rx interrupts will be generated if both are zero */
14005 	if ((ec->rx_coalesce_usecs == 0) &&
14006 	    (ec->rx_max_coalesced_frames == 0))
14007 		return -EINVAL;
14008 
14009 	/* No tx interrupts will be generated if both are zero */
14010 	if ((ec->tx_coalesce_usecs == 0) &&
14011 	    (ec->tx_max_coalesced_frames == 0))
14012 		return -EINVAL;
14013 
14014 	/* Only copy relevant parameters, ignore all others. */
14015 	tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
14016 	tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
14017 	tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
14018 	tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
14019 	tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
14020 	tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
14021 	tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
14022 	tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
14023 	tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
14024 
14025 	if (netif_running(dev)) {
14026 		tg3_full_lock(tp, 0);
14027 		__tg3_set_coalesce(tp, &tp->coal);
14028 		tg3_full_unlock(tp);
14029 	}
14030 	return 0;
14031 }
14032 
14033 static int tg3_set_eee(struct net_device *dev, struct ethtool_eee *edata)
14034 {
14035 	struct tg3 *tp = netdev_priv(dev);
14036 
14037 	if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
14038 		netdev_warn(tp->dev, "Board does not support EEE!\n");
14039 		return -EOPNOTSUPP;
14040 	}
14041 
14042 	if (edata->advertised != tp->eee.advertised) {
14043 		netdev_warn(tp->dev,
14044 			    "Direct manipulation of EEE advertisement is not supported\n");
14045 		return -EINVAL;
14046 	}
14047 
14048 	if (edata->tx_lpi_timer > TG3_CPMU_DBTMR1_LNKIDLE_MAX) {
14049 		netdev_warn(tp->dev,
14050 			    "Maximal Tx Lpi timer supported is %#x(u)\n",
14051 			    TG3_CPMU_DBTMR1_LNKIDLE_MAX);
14052 		return -EINVAL;
14053 	}
14054 
14055 	tp->eee = *edata;
14056 
14057 	tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
14058 	tg3_warn_mgmt_link_flap(tp);
14059 
14060 	if (netif_running(tp->dev)) {
14061 		tg3_full_lock(tp, 0);
14062 		tg3_setup_eee(tp);
14063 		tg3_phy_reset(tp);
14064 		tg3_full_unlock(tp);
14065 	}
14066 
14067 	return 0;
14068 }
14069 
14070 static int tg3_get_eee(struct net_device *dev, struct ethtool_eee *edata)
14071 {
14072 	struct tg3 *tp = netdev_priv(dev);
14073 
14074 	if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
14075 		netdev_warn(tp->dev,
14076 			    "Board does not support EEE!\n");
14077 		return -EOPNOTSUPP;
14078 	}
14079 
14080 	*edata = tp->eee;
14081 	return 0;
14082 }
14083 
14084 static const struct ethtool_ops tg3_ethtool_ops = {
14085 	.get_settings		= tg3_get_settings,
14086 	.set_settings		= tg3_set_settings,
14087 	.get_drvinfo		= tg3_get_drvinfo,
14088 	.get_regs_len		= tg3_get_regs_len,
14089 	.get_regs		= tg3_get_regs,
14090 	.get_wol		= tg3_get_wol,
14091 	.set_wol		= tg3_set_wol,
14092 	.get_msglevel		= tg3_get_msglevel,
14093 	.set_msglevel		= tg3_set_msglevel,
14094 	.nway_reset		= tg3_nway_reset,
14095 	.get_link		= ethtool_op_get_link,
14096 	.get_eeprom_len		= tg3_get_eeprom_len,
14097 	.get_eeprom		= tg3_get_eeprom,
14098 	.set_eeprom		= tg3_set_eeprom,
14099 	.get_ringparam		= tg3_get_ringparam,
14100 	.set_ringparam		= tg3_set_ringparam,
14101 	.get_pauseparam		= tg3_get_pauseparam,
14102 	.set_pauseparam		= tg3_set_pauseparam,
14103 	.self_test		= tg3_self_test,
14104 	.get_strings		= tg3_get_strings,
14105 	.set_phys_id		= tg3_set_phys_id,
14106 	.get_ethtool_stats	= tg3_get_ethtool_stats,
14107 	.get_coalesce		= tg3_get_coalesce,
14108 	.set_coalesce		= tg3_set_coalesce,
14109 	.get_sset_count		= tg3_get_sset_count,
14110 	.get_rxnfc		= tg3_get_rxnfc,
14111 	.get_rxfh_indir_size    = tg3_get_rxfh_indir_size,
14112 	.get_rxfh		= tg3_get_rxfh,
14113 	.set_rxfh		= tg3_set_rxfh,
14114 	.get_channels		= tg3_get_channels,
14115 	.set_channels		= tg3_set_channels,
14116 	.get_ts_info		= tg3_get_ts_info,
14117 	.get_eee		= tg3_get_eee,
14118 	.set_eee		= tg3_set_eee,
14119 };
14120 
14121 static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *dev,
14122 						struct rtnl_link_stats64 *stats)
14123 {
14124 	struct tg3 *tp = netdev_priv(dev);
14125 
14126 	spin_lock_bh(&tp->lock);
14127 	if (!tp->hw_stats) {
14128 		*stats = tp->net_stats_prev;
14129 		spin_unlock_bh(&tp->lock);
14130 		return stats;
14131 	}
14132 
14133 	tg3_get_nstats(tp, stats);
14134 	spin_unlock_bh(&tp->lock);
14135 
14136 	return stats;
14137 }
14138 
14139 static void tg3_set_rx_mode(struct net_device *dev)
14140 {
14141 	struct tg3 *tp = netdev_priv(dev);
14142 
14143 	if (!netif_running(dev))
14144 		return;
14145 
14146 	tg3_full_lock(tp, 0);
14147 	__tg3_set_rx_mode(dev);
14148 	tg3_full_unlock(tp);
14149 }
14150 
14151 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
14152 			       int new_mtu)
14153 {
14154 	dev->mtu = new_mtu;
14155 
14156 	if (new_mtu > ETH_DATA_LEN) {
14157 		if (tg3_flag(tp, 5780_CLASS)) {
14158 			netdev_update_features(dev);
14159 			tg3_flag_clear(tp, TSO_CAPABLE);
14160 		} else {
14161 			tg3_flag_set(tp, JUMBO_RING_ENABLE);
14162 		}
14163 	} else {
14164 		if (tg3_flag(tp, 5780_CLASS)) {
14165 			tg3_flag_set(tp, TSO_CAPABLE);
14166 			netdev_update_features(dev);
14167 		}
14168 		tg3_flag_clear(tp, JUMBO_RING_ENABLE);
14169 	}
14170 }
14171 
14172 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
14173 {
14174 	struct tg3 *tp = netdev_priv(dev);
14175 	int err;
14176 	bool reset_phy = false;
14177 
14178 	if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
14179 		return -EINVAL;
14180 
14181 	if (!netif_running(dev)) {
14182 		/* We'll just catch it later when the
14183 		 * device is up'd.
14184 		 */
14185 		tg3_set_mtu(dev, tp, new_mtu);
14186 		return 0;
14187 	}
14188 
14189 	tg3_phy_stop(tp);
14190 
14191 	tg3_netif_stop(tp);
14192 
14193 	tg3_set_mtu(dev, tp, new_mtu);
14194 
14195 	tg3_full_lock(tp, 1);
14196 
14197 	tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
14198 
14199 	/* Reset PHY, otherwise the read DMA engine will be in a mode that
14200 	 * breaks all requests to 256 bytes.
14201 	 */
14202 	if (tg3_asic_rev(tp) == ASIC_REV_57766)
14203 		reset_phy = true;
14204 
14205 	err = tg3_restart_hw(tp, reset_phy);
14206 
14207 	if (!err)
14208 		tg3_netif_start(tp);
14209 
14210 	tg3_full_unlock(tp);
14211 
14212 	if (!err)
14213 		tg3_phy_start(tp);
14214 
14215 	return err;
14216 }
14217 
14218 static const struct net_device_ops tg3_netdev_ops = {
14219 	.ndo_open		= tg3_open,
14220 	.ndo_stop		= tg3_close,
14221 	.ndo_start_xmit		= tg3_start_xmit,
14222 	.ndo_get_stats64	= tg3_get_stats64,
14223 	.ndo_validate_addr	= eth_validate_addr,
14224 	.ndo_set_rx_mode	= tg3_set_rx_mode,
14225 	.ndo_set_mac_address	= tg3_set_mac_addr,
14226 	.ndo_do_ioctl		= tg3_ioctl,
14227 	.ndo_tx_timeout		= tg3_tx_timeout,
14228 	.ndo_change_mtu		= tg3_change_mtu,
14229 	.ndo_fix_features	= tg3_fix_features,
14230 	.ndo_set_features	= tg3_set_features,
14231 #ifdef CONFIG_NET_POLL_CONTROLLER
14232 	.ndo_poll_controller	= tg3_poll_controller,
14233 #endif
14234 };
14235 
14236 static void tg3_get_eeprom_size(struct tg3 *tp)
14237 {
14238 	u32 cursize, val, magic;
14239 
14240 	tp->nvram_size = EEPROM_CHIP_SIZE;
14241 
14242 	if (tg3_nvram_read(tp, 0, &magic) != 0)
14243 		return;
14244 
14245 	if ((magic != TG3_EEPROM_MAGIC) &&
14246 	    ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
14247 	    ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
14248 		return;
14249 
14250 	/*
14251 	 * Size the chip by reading offsets at increasing powers of two.
14252 	 * When we encounter our validation signature, we know the addressing
14253 	 * has wrapped around, and thus have our chip size.
14254 	 */
14255 	cursize = 0x10;
14256 
14257 	while (cursize < tp->nvram_size) {
14258 		if (tg3_nvram_read(tp, cursize, &val) != 0)
14259 			return;
14260 
14261 		if (val == magic)
14262 			break;
14263 
14264 		cursize <<= 1;
14265 	}
14266 
14267 	tp->nvram_size = cursize;
14268 }
14269 
14270 static void tg3_get_nvram_size(struct tg3 *tp)
14271 {
14272 	u32 val;
14273 
14274 	if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &val) != 0)
14275 		return;
14276 
14277 	/* Selfboot format */
14278 	if (val != TG3_EEPROM_MAGIC) {
14279 		tg3_get_eeprom_size(tp);
14280 		return;
14281 	}
14282 
14283 	if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
14284 		if (val != 0) {
14285 			/* This is confusing.  We want to operate on the
14286 			 * 16-bit value at offset 0xf2.  The tg3_nvram_read()
14287 			 * call will read from NVRAM and byteswap the data
14288 			 * according to the byteswapping settings for all
14289 			 * other register accesses.  This ensures the data we
14290 			 * want will always reside in the lower 16-bits.
14291 			 * However, the data in NVRAM is in LE format, which
14292 			 * means the data from the NVRAM read will always be
14293 			 * opposite the endianness of the CPU.  The 16-bit
14294 			 * byteswap then brings the data to CPU endianness.
14295 			 */
14296 			tp->nvram_size = swab16((u16)(val & 0x0000ffff)) * 1024;
14297 			return;
14298 		}
14299 	}
14300 	tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14301 }
14302 
14303 static void tg3_get_nvram_info(struct tg3 *tp)
14304 {
14305 	u32 nvcfg1;
14306 
14307 	nvcfg1 = tr32(NVRAM_CFG1);
14308 	if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
14309 		tg3_flag_set(tp, FLASH);
14310 	} else {
14311 		nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14312 		tw32(NVRAM_CFG1, nvcfg1);
14313 	}
14314 
14315 	if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
14316 	    tg3_flag(tp, 5780_CLASS)) {
14317 		switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
14318 		case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
14319 			tp->nvram_jedecnum = JEDEC_ATMEL;
14320 			tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
14321 			tg3_flag_set(tp, NVRAM_BUFFERED);
14322 			break;
14323 		case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
14324 			tp->nvram_jedecnum = JEDEC_ATMEL;
14325 			tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
14326 			break;
14327 		case FLASH_VENDOR_ATMEL_EEPROM:
14328 			tp->nvram_jedecnum = JEDEC_ATMEL;
14329 			tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14330 			tg3_flag_set(tp, NVRAM_BUFFERED);
14331 			break;
14332 		case FLASH_VENDOR_ST:
14333 			tp->nvram_jedecnum = JEDEC_ST;
14334 			tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
14335 			tg3_flag_set(tp, NVRAM_BUFFERED);
14336 			break;
14337 		case FLASH_VENDOR_SAIFUN:
14338 			tp->nvram_jedecnum = JEDEC_SAIFUN;
14339 			tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
14340 			break;
14341 		case FLASH_VENDOR_SST_SMALL:
14342 		case FLASH_VENDOR_SST_LARGE:
14343 			tp->nvram_jedecnum = JEDEC_SST;
14344 			tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
14345 			break;
14346 		}
14347 	} else {
14348 		tp->nvram_jedecnum = JEDEC_ATMEL;
14349 		tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
14350 		tg3_flag_set(tp, NVRAM_BUFFERED);
14351 	}
14352 }
14353 
14354 static void tg3_nvram_get_pagesize(struct tg3 *tp, u32 nvmcfg1)
14355 {
14356 	switch (nvmcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
14357 	case FLASH_5752PAGE_SIZE_256:
14358 		tp->nvram_pagesize = 256;
14359 		break;
14360 	case FLASH_5752PAGE_SIZE_512:
14361 		tp->nvram_pagesize = 512;
14362 		break;
14363 	case FLASH_5752PAGE_SIZE_1K:
14364 		tp->nvram_pagesize = 1024;
14365 		break;
14366 	case FLASH_5752PAGE_SIZE_2K:
14367 		tp->nvram_pagesize = 2048;
14368 		break;
14369 	case FLASH_5752PAGE_SIZE_4K:
14370 		tp->nvram_pagesize = 4096;
14371 		break;
14372 	case FLASH_5752PAGE_SIZE_264:
14373 		tp->nvram_pagesize = 264;
14374 		break;
14375 	case FLASH_5752PAGE_SIZE_528:
14376 		tp->nvram_pagesize = 528;
14377 		break;
14378 	}
14379 }
14380 
14381 static void tg3_get_5752_nvram_info(struct tg3 *tp)
14382 {
14383 	u32 nvcfg1;
14384 
14385 	nvcfg1 = tr32(NVRAM_CFG1);
14386 
14387 	/* NVRAM protection for TPM */
14388 	if (nvcfg1 & (1 << 27))
14389 		tg3_flag_set(tp, PROTECTED_NVRAM);
14390 
14391 	switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14392 	case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
14393 	case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
14394 		tp->nvram_jedecnum = JEDEC_ATMEL;
14395 		tg3_flag_set(tp, NVRAM_BUFFERED);
14396 		break;
14397 	case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14398 		tp->nvram_jedecnum = JEDEC_ATMEL;
14399 		tg3_flag_set(tp, NVRAM_BUFFERED);
14400 		tg3_flag_set(tp, FLASH);
14401 		break;
14402 	case FLASH_5752VENDOR_ST_M45PE10:
14403 	case FLASH_5752VENDOR_ST_M45PE20:
14404 	case FLASH_5752VENDOR_ST_M45PE40:
14405 		tp->nvram_jedecnum = JEDEC_ST;
14406 		tg3_flag_set(tp, NVRAM_BUFFERED);
14407 		tg3_flag_set(tp, FLASH);
14408 		break;
14409 	}
14410 
14411 	if (tg3_flag(tp, FLASH)) {
14412 		tg3_nvram_get_pagesize(tp, nvcfg1);
14413 	} else {
14414 		/* For eeprom, set pagesize to maximum eeprom size */
14415 		tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14416 
14417 		nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14418 		tw32(NVRAM_CFG1, nvcfg1);
14419 	}
14420 }
14421 
14422 static void tg3_get_5755_nvram_info(struct tg3 *tp)
14423 {
14424 	u32 nvcfg1, protect = 0;
14425 
14426 	nvcfg1 = tr32(NVRAM_CFG1);
14427 
14428 	/* NVRAM protection for TPM */
14429 	if (nvcfg1 & (1 << 27)) {
14430 		tg3_flag_set(tp, PROTECTED_NVRAM);
14431 		protect = 1;
14432 	}
14433 
14434 	nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
14435 	switch (nvcfg1) {
14436 	case FLASH_5755VENDOR_ATMEL_FLASH_1:
14437 	case FLASH_5755VENDOR_ATMEL_FLASH_2:
14438 	case FLASH_5755VENDOR_ATMEL_FLASH_3:
14439 	case FLASH_5755VENDOR_ATMEL_FLASH_5:
14440 		tp->nvram_jedecnum = JEDEC_ATMEL;
14441 		tg3_flag_set(tp, NVRAM_BUFFERED);
14442 		tg3_flag_set(tp, FLASH);
14443 		tp->nvram_pagesize = 264;
14444 		if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
14445 		    nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
14446 			tp->nvram_size = (protect ? 0x3e200 :
14447 					  TG3_NVRAM_SIZE_512KB);
14448 		else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
14449 			tp->nvram_size = (protect ? 0x1f200 :
14450 					  TG3_NVRAM_SIZE_256KB);
14451 		else
14452 			tp->nvram_size = (protect ? 0x1f200 :
14453 					  TG3_NVRAM_SIZE_128KB);
14454 		break;
14455 	case FLASH_5752VENDOR_ST_M45PE10:
14456 	case FLASH_5752VENDOR_ST_M45PE20:
14457 	case FLASH_5752VENDOR_ST_M45PE40:
14458 		tp->nvram_jedecnum = JEDEC_ST;
14459 		tg3_flag_set(tp, NVRAM_BUFFERED);
14460 		tg3_flag_set(tp, FLASH);
14461 		tp->nvram_pagesize = 256;
14462 		if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
14463 			tp->nvram_size = (protect ?
14464 					  TG3_NVRAM_SIZE_64KB :
14465 					  TG3_NVRAM_SIZE_128KB);
14466 		else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
14467 			tp->nvram_size = (protect ?
14468 					  TG3_NVRAM_SIZE_64KB :
14469 					  TG3_NVRAM_SIZE_256KB);
14470 		else
14471 			tp->nvram_size = (protect ?
14472 					  TG3_NVRAM_SIZE_128KB :
14473 					  TG3_NVRAM_SIZE_512KB);
14474 		break;
14475 	}
14476 }
14477 
14478 static void tg3_get_5787_nvram_info(struct tg3 *tp)
14479 {
14480 	u32 nvcfg1;
14481 
14482 	nvcfg1 = tr32(NVRAM_CFG1);
14483 
14484 	switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14485 	case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
14486 	case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
14487 	case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
14488 	case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
14489 		tp->nvram_jedecnum = JEDEC_ATMEL;
14490 		tg3_flag_set(tp, NVRAM_BUFFERED);
14491 		tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14492 
14493 		nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14494 		tw32(NVRAM_CFG1, nvcfg1);
14495 		break;
14496 	case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14497 	case FLASH_5755VENDOR_ATMEL_FLASH_1:
14498 	case FLASH_5755VENDOR_ATMEL_FLASH_2:
14499 	case FLASH_5755VENDOR_ATMEL_FLASH_3:
14500 		tp->nvram_jedecnum = JEDEC_ATMEL;
14501 		tg3_flag_set(tp, NVRAM_BUFFERED);
14502 		tg3_flag_set(tp, FLASH);
14503 		tp->nvram_pagesize = 264;
14504 		break;
14505 	case FLASH_5752VENDOR_ST_M45PE10:
14506 	case FLASH_5752VENDOR_ST_M45PE20:
14507 	case FLASH_5752VENDOR_ST_M45PE40:
14508 		tp->nvram_jedecnum = JEDEC_ST;
14509 		tg3_flag_set(tp, NVRAM_BUFFERED);
14510 		tg3_flag_set(tp, FLASH);
14511 		tp->nvram_pagesize = 256;
14512 		break;
14513 	}
14514 }
14515 
14516 static void tg3_get_5761_nvram_info(struct tg3 *tp)
14517 {
14518 	u32 nvcfg1, protect = 0;
14519 
14520 	nvcfg1 = tr32(NVRAM_CFG1);
14521 
14522 	/* NVRAM protection for TPM */
14523 	if (nvcfg1 & (1 << 27)) {
14524 		tg3_flag_set(tp, PROTECTED_NVRAM);
14525 		protect = 1;
14526 	}
14527 
14528 	nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
14529 	switch (nvcfg1) {
14530 	case FLASH_5761VENDOR_ATMEL_ADB021D:
14531 	case FLASH_5761VENDOR_ATMEL_ADB041D:
14532 	case FLASH_5761VENDOR_ATMEL_ADB081D:
14533 	case FLASH_5761VENDOR_ATMEL_ADB161D:
14534 	case FLASH_5761VENDOR_ATMEL_MDB021D:
14535 	case FLASH_5761VENDOR_ATMEL_MDB041D:
14536 	case FLASH_5761VENDOR_ATMEL_MDB081D:
14537 	case FLASH_5761VENDOR_ATMEL_MDB161D:
14538 		tp->nvram_jedecnum = JEDEC_ATMEL;
14539 		tg3_flag_set(tp, NVRAM_BUFFERED);
14540 		tg3_flag_set(tp, FLASH);
14541 		tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14542 		tp->nvram_pagesize = 256;
14543 		break;
14544 	case FLASH_5761VENDOR_ST_A_M45PE20:
14545 	case FLASH_5761VENDOR_ST_A_M45PE40:
14546 	case FLASH_5761VENDOR_ST_A_M45PE80:
14547 	case FLASH_5761VENDOR_ST_A_M45PE16:
14548 	case FLASH_5761VENDOR_ST_M_M45PE20:
14549 	case FLASH_5761VENDOR_ST_M_M45PE40:
14550 	case FLASH_5761VENDOR_ST_M_M45PE80:
14551 	case FLASH_5761VENDOR_ST_M_M45PE16:
14552 		tp->nvram_jedecnum = JEDEC_ST;
14553 		tg3_flag_set(tp, NVRAM_BUFFERED);
14554 		tg3_flag_set(tp, FLASH);
14555 		tp->nvram_pagesize = 256;
14556 		break;
14557 	}
14558 
14559 	if (protect) {
14560 		tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
14561 	} else {
14562 		switch (nvcfg1) {
14563 		case FLASH_5761VENDOR_ATMEL_ADB161D:
14564 		case FLASH_5761VENDOR_ATMEL_MDB161D:
14565 		case FLASH_5761VENDOR_ST_A_M45PE16:
14566 		case FLASH_5761VENDOR_ST_M_M45PE16:
14567 			tp->nvram_size = TG3_NVRAM_SIZE_2MB;
14568 			break;
14569 		case FLASH_5761VENDOR_ATMEL_ADB081D:
14570 		case FLASH_5761VENDOR_ATMEL_MDB081D:
14571 		case FLASH_5761VENDOR_ST_A_M45PE80:
14572 		case FLASH_5761VENDOR_ST_M_M45PE80:
14573 			tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14574 			break;
14575 		case FLASH_5761VENDOR_ATMEL_ADB041D:
14576 		case FLASH_5761VENDOR_ATMEL_MDB041D:
14577 		case FLASH_5761VENDOR_ST_A_M45PE40:
14578 		case FLASH_5761VENDOR_ST_M_M45PE40:
14579 			tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14580 			break;
14581 		case FLASH_5761VENDOR_ATMEL_ADB021D:
14582 		case FLASH_5761VENDOR_ATMEL_MDB021D:
14583 		case FLASH_5761VENDOR_ST_A_M45PE20:
14584 		case FLASH_5761VENDOR_ST_M_M45PE20:
14585 			tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14586 			break;
14587 		}
14588 	}
14589 }
14590 
14591 static void tg3_get_5906_nvram_info(struct tg3 *tp)
14592 {
14593 	tp->nvram_jedecnum = JEDEC_ATMEL;
14594 	tg3_flag_set(tp, NVRAM_BUFFERED);
14595 	tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14596 }
14597 
14598 static void tg3_get_57780_nvram_info(struct tg3 *tp)
14599 {
14600 	u32 nvcfg1;
14601 
14602 	nvcfg1 = tr32(NVRAM_CFG1);
14603 
14604 	switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14605 	case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
14606 	case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
14607 		tp->nvram_jedecnum = JEDEC_ATMEL;
14608 		tg3_flag_set(tp, NVRAM_BUFFERED);
14609 		tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14610 
14611 		nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14612 		tw32(NVRAM_CFG1, nvcfg1);
14613 		return;
14614 	case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14615 	case FLASH_57780VENDOR_ATMEL_AT45DB011D:
14616 	case FLASH_57780VENDOR_ATMEL_AT45DB011B:
14617 	case FLASH_57780VENDOR_ATMEL_AT45DB021D:
14618 	case FLASH_57780VENDOR_ATMEL_AT45DB021B:
14619 	case FLASH_57780VENDOR_ATMEL_AT45DB041D:
14620 	case FLASH_57780VENDOR_ATMEL_AT45DB041B:
14621 		tp->nvram_jedecnum = JEDEC_ATMEL;
14622 		tg3_flag_set(tp, NVRAM_BUFFERED);
14623 		tg3_flag_set(tp, FLASH);
14624 
14625 		switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14626 		case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14627 		case FLASH_57780VENDOR_ATMEL_AT45DB011D:
14628 		case FLASH_57780VENDOR_ATMEL_AT45DB011B:
14629 			tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14630 			break;
14631 		case FLASH_57780VENDOR_ATMEL_AT45DB021D:
14632 		case FLASH_57780VENDOR_ATMEL_AT45DB021B:
14633 			tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14634 			break;
14635 		case FLASH_57780VENDOR_ATMEL_AT45DB041D:
14636 		case FLASH_57780VENDOR_ATMEL_AT45DB041B:
14637 			tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14638 			break;
14639 		}
14640 		break;
14641 	case FLASH_5752VENDOR_ST_M45PE10:
14642 	case FLASH_5752VENDOR_ST_M45PE20:
14643 	case FLASH_5752VENDOR_ST_M45PE40:
14644 		tp->nvram_jedecnum = JEDEC_ST;
14645 		tg3_flag_set(tp, NVRAM_BUFFERED);
14646 		tg3_flag_set(tp, FLASH);
14647 
14648 		switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14649 		case FLASH_5752VENDOR_ST_M45PE10:
14650 			tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14651 			break;
14652 		case FLASH_5752VENDOR_ST_M45PE20:
14653 			tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14654 			break;
14655 		case FLASH_5752VENDOR_ST_M45PE40:
14656 			tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14657 			break;
14658 		}
14659 		break;
14660 	default:
14661 		tg3_flag_set(tp, NO_NVRAM);
14662 		return;
14663 	}
14664 
14665 	tg3_nvram_get_pagesize(tp, nvcfg1);
14666 	if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14667 		tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14668 }
14669 
14670 
14671 static void tg3_get_5717_nvram_info(struct tg3 *tp)
14672 {
14673 	u32 nvcfg1;
14674 
14675 	nvcfg1 = tr32(NVRAM_CFG1);
14676 
14677 	switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14678 	case FLASH_5717VENDOR_ATMEL_EEPROM:
14679 	case FLASH_5717VENDOR_MICRO_EEPROM:
14680 		tp->nvram_jedecnum = JEDEC_ATMEL;
14681 		tg3_flag_set(tp, NVRAM_BUFFERED);
14682 		tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14683 
14684 		nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14685 		tw32(NVRAM_CFG1, nvcfg1);
14686 		return;
14687 	case FLASH_5717VENDOR_ATMEL_MDB011D:
14688 	case FLASH_5717VENDOR_ATMEL_ADB011B:
14689 	case FLASH_5717VENDOR_ATMEL_ADB011D:
14690 	case FLASH_5717VENDOR_ATMEL_MDB021D:
14691 	case FLASH_5717VENDOR_ATMEL_ADB021B:
14692 	case FLASH_5717VENDOR_ATMEL_ADB021D:
14693 	case FLASH_5717VENDOR_ATMEL_45USPT:
14694 		tp->nvram_jedecnum = JEDEC_ATMEL;
14695 		tg3_flag_set(tp, NVRAM_BUFFERED);
14696 		tg3_flag_set(tp, FLASH);
14697 
14698 		switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14699 		case FLASH_5717VENDOR_ATMEL_MDB021D:
14700 			/* Detect size with tg3_nvram_get_size() */
14701 			break;
14702 		case FLASH_5717VENDOR_ATMEL_ADB021B:
14703 		case FLASH_5717VENDOR_ATMEL_ADB021D:
14704 			tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14705 			break;
14706 		default:
14707 			tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14708 			break;
14709 		}
14710 		break;
14711 	case FLASH_5717VENDOR_ST_M_M25PE10:
14712 	case FLASH_5717VENDOR_ST_A_M25PE10:
14713 	case FLASH_5717VENDOR_ST_M_M45PE10:
14714 	case FLASH_5717VENDOR_ST_A_M45PE10:
14715 	case FLASH_5717VENDOR_ST_M_M25PE20:
14716 	case FLASH_5717VENDOR_ST_A_M25PE20:
14717 	case FLASH_5717VENDOR_ST_M_M45PE20:
14718 	case FLASH_5717VENDOR_ST_A_M45PE20:
14719 	case FLASH_5717VENDOR_ST_25USPT:
14720 	case FLASH_5717VENDOR_ST_45USPT:
14721 		tp->nvram_jedecnum = JEDEC_ST;
14722 		tg3_flag_set(tp, NVRAM_BUFFERED);
14723 		tg3_flag_set(tp, FLASH);
14724 
14725 		switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14726 		case FLASH_5717VENDOR_ST_M_M25PE20:
14727 		case FLASH_5717VENDOR_ST_M_M45PE20:
14728 			/* Detect size with tg3_nvram_get_size() */
14729 			break;
14730 		case FLASH_5717VENDOR_ST_A_M25PE20:
14731 		case FLASH_5717VENDOR_ST_A_M45PE20:
14732 			tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14733 			break;
14734 		default:
14735 			tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14736 			break;
14737 		}
14738 		break;
14739 	default:
14740 		tg3_flag_set(tp, NO_NVRAM);
14741 		return;
14742 	}
14743 
14744 	tg3_nvram_get_pagesize(tp, nvcfg1);
14745 	if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14746 		tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14747 }
14748 
14749 static void tg3_get_5720_nvram_info(struct tg3 *tp)
14750 {
14751 	u32 nvcfg1, nvmpinstrp;
14752 
14753 	nvcfg1 = tr32(NVRAM_CFG1);
14754 	nvmpinstrp = nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK;
14755 
14756 	if (tg3_asic_rev(tp) == ASIC_REV_5762) {
14757 		if (!(nvcfg1 & NVRAM_CFG1_5762VENDOR_MASK)) {
14758 			tg3_flag_set(tp, NO_NVRAM);
14759 			return;
14760 		}
14761 
14762 		switch (nvmpinstrp) {
14763 		case FLASH_5762_EEPROM_HD:
14764 			nvmpinstrp = FLASH_5720_EEPROM_HD;
14765 			break;
14766 		case FLASH_5762_EEPROM_LD:
14767 			nvmpinstrp = FLASH_5720_EEPROM_LD;
14768 			break;
14769 		case FLASH_5720VENDOR_M_ST_M45PE20:
14770 			/* This pinstrap supports multiple sizes, so force it
14771 			 * to read the actual size from location 0xf0.
14772 			 */
14773 			nvmpinstrp = FLASH_5720VENDOR_ST_45USPT;
14774 			break;
14775 		}
14776 	}
14777 
14778 	switch (nvmpinstrp) {
14779 	case FLASH_5720_EEPROM_HD:
14780 	case FLASH_5720_EEPROM_LD:
14781 		tp->nvram_jedecnum = JEDEC_ATMEL;
14782 		tg3_flag_set(tp, NVRAM_BUFFERED);
14783 
14784 		nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14785 		tw32(NVRAM_CFG1, nvcfg1);
14786 		if (nvmpinstrp == FLASH_5720_EEPROM_HD)
14787 			tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14788 		else
14789 			tp->nvram_pagesize = ATMEL_AT24C02_CHIP_SIZE;
14790 		return;
14791 	case FLASH_5720VENDOR_M_ATMEL_DB011D:
14792 	case FLASH_5720VENDOR_A_ATMEL_DB011B:
14793 	case FLASH_5720VENDOR_A_ATMEL_DB011D:
14794 	case FLASH_5720VENDOR_M_ATMEL_DB021D:
14795 	case FLASH_5720VENDOR_A_ATMEL_DB021B:
14796 	case FLASH_5720VENDOR_A_ATMEL_DB021D:
14797 	case FLASH_5720VENDOR_M_ATMEL_DB041D:
14798 	case FLASH_5720VENDOR_A_ATMEL_DB041B:
14799 	case FLASH_5720VENDOR_A_ATMEL_DB041D:
14800 	case FLASH_5720VENDOR_M_ATMEL_DB081D:
14801 	case FLASH_5720VENDOR_A_ATMEL_DB081D:
14802 	case FLASH_5720VENDOR_ATMEL_45USPT:
14803 		tp->nvram_jedecnum = JEDEC_ATMEL;
14804 		tg3_flag_set(tp, NVRAM_BUFFERED);
14805 		tg3_flag_set(tp, FLASH);
14806 
14807 		switch (nvmpinstrp) {
14808 		case FLASH_5720VENDOR_M_ATMEL_DB021D:
14809 		case FLASH_5720VENDOR_A_ATMEL_DB021B:
14810 		case FLASH_5720VENDOR_A_ATMEL_DB021D:
14811 			tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14812 			break;
14813 		case FLASH_5720VENDOR_M_ATMEL_DB041D:
14814 		case FLASH_5720VENDOR_A_ATMEL_DB041B:
14815 		case FLASH_5720VENDOR_A_ATMEL_DB041D:
14816 			tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14817 			break;
14818 		case FLASH_5720VENDOR_M_ATMEL_DB081D:
14819 		case FLASH_5720VENDOR_A_ATMEL_DB081D:
14820 			tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14821 			break;
14822 		default:
14823 			if (tg3_asic_rev(tp) != ASIC_REV_5762)
14824 				tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14825 			break;
14826 		}
14827 		break;
14828 	case FLASH_5720VENDOR_M_ST_M25PE10:
14829 	case FLASH_5720VENDOR_M_ST_M45PE10:
14830 	case FLASH_5720VENDOR_A_ST_M25PE10:
14831 	case FLASH_5720VENDOR_A_ST_M45PE10:
14832 	case FLASH_5720VENDOR_M_ST_M25PE20:
14833 	case FLASH_5720VENDOR_M_ST_M45PE20:
14834 	case FLASH_5720VENDOR_A_ST_M25PE20:
14835 	case FLASH_5720VENDOR_A_ST_M45PE20:
14836 	case FLASH_5720VENDOR_M_ST_M25PE40:
14837 	case FLASH_5720VENDOR_M_ST_M45PE40:
14838 	case FLASH_5720VENDOR_A_ST_M25PE40:
14839 	case FLASH_5720VENDOR_A_ST_M45PE40:
14840 	case FLASH_5720VENDOR_M_ST_M25PE80:
14841 	case FLASH_5720VENDOR_M_ST_M45PE80:
14842 	case FLASH_5720VENDOR_A_ST_M25PE80:
14843 	case FLASH_5720VENDOR_A_ST_M45PE80:
14844 	case FLASH_5720VENDOR_ST_25USPT:
14845 	case FLASH_5720VENDOR_ST_45USPT:
14846 		tp->nvram_jedecnum = JEDEC_ST;
14847 		tg3_flag_set(tp, NVRAM_BUFFERED);
14848 		tg3_flag_set(tp, FLASH);
14849 
14850 		switch (nvmpinstrp) {
14851 		case FLASH_5720VENDOR_M_ST_M25PE20:
14852 		case FLASH_5720VENDOR_M_ST_M45PE20:
14853 		case FLASH_5720VENDOR_A_ST_M25PE20:
14854 		case FLASH_5720VENDOR_A_ST_M45PE20:
14855 			tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14856 			break;
14857 		case FLASH_5720VENDOR_M_ST_M25PE40:
14858 		case FLASH_5720VENDOR_M_ST_M45PE40:
14859 		case FLASH_5720VENDOR_A_ST_M25PE40:
14860 		case FLASH_5720VENDOR_A_ST_M45PE40:
14861 			tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14862 			break;
14863 		case FLASH_5720VENDOR_M_ST_M25PE80:
14864 		case FLASH_5720VENDOR_M_ST_M45PE80:
14865 		case FLASH_5720VENDOR_A_ST_M25PE80:
14866 		case FLASH_5720VENDOR_A_ST_M45PE80:
14867 			tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14868 			break;
14869 		default:
14870 			if (tg3_asic_rev(tp) != ASIC_REV_5762)
14871 				tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14872 			break;
14873 		}
14874 		break;
14875 	default:
14876 		tg3_flag_set(tp, NO_NVRAM);
14877 		return;
14878 	}
14879 
14880 	tg3_nvram_get_pagesize(tp, nvcfg1);
14881 	if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14882 		tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14883 
14884 	if (tg3_asic_rev(tp) == ASIC_REV_5762) {
14885 		u32 val;
14886 
14887 		if (tg3_nvram_read(tp, 0, &val))
14888 			return;
14889 
14890 		if (val != TG3_EEPROM_MAGIC &&
14891 		    (val & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW)
14892 			tg3_flag_set(tp, NO_NVRAM);
14893 	}
14894 }
14895 
14896 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
14897 static void tg3_nvram_init(struct tg3 *tp)
14898 {
14899 	if (tg3_flag(tp, IS_SSB_CORE)) {
14900 		/* No NVRAM and EEPROM on the SSB Broadcom GigE core. */
14901 		tg3_flag_clear(tp, NVRAM);
14902 		tg3_flag_clear(tp, NVRAM_BUFFERED);
14903 		tg3_flag_set(tp, NO_NVRAM);
14904 		return;
14905 	}
14906 
14907 	tw32_f(GRC_EEPROM_ADDR,
14908 	     (EEPROM_ADDR_FSM_RESET |
14909 	      (EEPROM_DEFAULT_CLOCK_PERIOD <<
14910 	       EEPROM_ADDR_CLKPERD_SHIFT)));
14911 
14912 	msleep(1);
14913 
14914 	/* Enable seeprom accesses. */
14915 	tw32_f(GRC_LOCAL_CTRL,
14916 	     tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
14917 	udelay(100);
14918 
14919 	if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
14920 	    tg3_asic_rev(tp) != ASIC_REV_5701) {
14921 		tg3_flag_set(tp, NVRAM);
14922 
14923 		if (tg3_nvram_lock(tp)) {
14924 			netdev_warn(tp->dev,
14925 				    "Cannot get nvram lock, %s failed\n",
14926 				    __func__);
14927 			return;
14928 		}
14929 		tg3_enable_nvram_access(tp);
14930 
14931 		tp->nvram_size = 0;
14932 
14933 		if (tg3_asic_rev(tp) == ASIC_REV_5752)
14934 			tg3_get_5752_nvram_info(tp);
14935 		else if (tg3_asic_rev(tp) == ASIC_REV_5755)
14936 			tg3_get_5755_nvram_info(tp);
14937 		else if (tg3_asic_rev(tp) == ASIC_REV_5787 ||
14938 			 tg3_asic_rev(tp) == ASIC_REV_5784 ||
14939 			 tg3_asic_rev(tp) == ASIC_REV_5785)
14940 			tg3_get_5787_nvram_info(tp);
14941 		else if (tg3_asic_rev(tp) == ASIC_REV_5761)
14942 			tg3_get_5761_nvram_info(tp);
14943 		else if (tg3_asic_rev(tp) == ASIC_REV_5906)
14944 			tg3_get_5906_nvram_info(tp);
14945 		else if (tg3_asic_rev(tp) == ASIC_REV_57780 ||
14946 			 tg3_flag(tp, 57765_CLASS))
14947 			tg3_get_57780_nvram_info(tp);
14948 		else if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
14949 			 tg3_asic_rev(tp) == ASIC_REV_5719)
14950 			tg3_get_5717_nvram_info(tp);
14951 		else if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
14952 			 tg3_asic_rev(tp) == ASIC_REV_5762)
14953 			tg3_get_5720_nvram_info(tp);
14954 		else
14955 			tg3_get_nvram_info(tp);
14956 
14957 		if (tp->nvram_size == 0)
14958 			tg3_get_nvram_size(tp);
14959 
14960 		tg3_disable_nvram_access(tp);
14961 		tg3_nvram_unlock(tp);
14962 
14963 	} else {
14964 		tg3_flag_clear(tp, NVRAM);
14965 		tg3_flag_clear(tp, NVRAM_BUFFERED);
14966 
14967 		tg3_get_eeprom_size(tp);
14968 	}
14969 }
14970 
14971 struct subsys_tbl_ent {
14972 	u16 subsys_vendor, subsys_devid;
14973 	u32 phy_id;
14974 };
14975 
14976 static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
14977 	/* Broadcom boards. */
14978 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
14979 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6, TG3_PHY_ID_BCM5401 },
14980 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
14981 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5, TG3_PHY_ID_BCM5701 },
14982 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
14983 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6, TG3_PHY_ID_BCM8002 },
14984 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
14985 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9, 0 },
14986 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
14987 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1, TG3_PHY_ID_BCM5701 },
14988 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
14989 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8, TG3_PHY_ID_BCM5701 },
14990 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
14991 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7, 0 },
14992 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
14993 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10, TG3_PHY_ID_BCM5701 },
14994 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
14995 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12, TG3_PHY_ID_BCM5701 },
14996 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
14997 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1, TG3_PHY_ID_BCM5703 },
14998 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
14999 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2, TG3_PHY_ID_BCM5703 },
15000 
15001 	/* 3com boards. */
15002 	{ TG3PCI_SUBVENDOR_ID_3COM,
15003 	  TG3PCI_SUBDEVICE_ID_3COM_3C996T, TG3_PHY_ID_BCM5401 },
15004 	{ TG3PCI_SUBVENDOR_ID_3COM,
15005 	  TG3PCI_SUBDEVICE_ID_3COM_3C996BT, TG3_PHY_ID_BCM5701 },
15006 	{ TG3PCI_SUBVENDOR_ID_3COM,
15007 	  TG3PCI_SUBDEVICE_ID_3COM_3C996SX, 0 },
15008 	{ TG3PCI_SUBVENDOR_ID_3COM,
15009 	  TG3PCI_SUBDEVICE_ID_3COM_3C1000T, TG3_PHY_ID_BCM5701 },
15010 	{ TG3PCI_SUBVENDOR_ID_3COM,
15011 	  TG3PCI_SUBDEVICE_ID_3COM_3C940BR01, TG3_PHY_ID_BCM5701 },
15012 
15013 	/* DELL boards. */
15014 	{ TG3PCI_SUBVENDOR_ID_DELL,
15015 	  TG3PCI_SUBDEVICE_ID_DELL_VIPER, TG3_PHY_ID_BCM5401 },
15016 	{ TG3PCI_SUBVENDOR_ID_DELL,
15017 	  TG3PCI_SUBDEVICE_ID_DELL_JAGUAR, TG3_PHY_ID_BCM5401 },
15018 	{ TG3PCI_SUBVENDOR_ID_DELL,
15019 	  TG3PCI_SUBDEVICE_ID_DELL_MERLOT, TG3_PHY_ID_BCM5411 },
15020 	{ TG3PCI_SUBVENDOR_ID_DELL,
15021 	  TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT, TG3_PHY_ID_BCM5411 },
15022 
15023 	/* Compaq boards. */
15024 	{ TG3PCI_SUBVENDOR_ID_COMPAQ,
15025 	  TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE, TG3_PHY_ID_BCM5701 },
15026 	{ TG3PCI_SUBVENDOR_ID_COMPAQ,
15027 	  TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2, TG3_PHY_ID_BCM5701 },
15028 	{ TG3PCI_SUBVENDOR_ID_COMPAQ,
15029 	  TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING, 0 },
15030 	{ TG3PCI_SUBVENDOR_ID_COMPAQ,
15031 	  TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780, TG3_PHY_ID_BCM5701 },
15032 	{ TG3PCI_SUBVENDOR_ID_COMPAQ,
15033 	  TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2, TG3_PHY_ID_BCM5701 },
15034 
15035 	/* IBM boards. */
15036 	{ TG3PCI_SUBVENDOR_ID_IBM,
15037 	  TG3PCI_SUBDEVICE_ID_IBM_5703SAX2, 0 }
15038 };
15039 
15040 static struct subsys_tbl_ent *tg3_lookup_by_subsys(struct tg3 *tp)
15041 {
15042 	int i;
15043 
15044 	for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
15045 		if ((subsys_id_to_phy_id[i].subsys_vendor ==
15046 		     tp->pdev->subsystem_vendor) &&
15047 		    (subsys_id_to_phy_id[i].subsys_devid ==
15048 		     tp->pdev->subsystem_device))
15049 			return &subsys_id_to_phy_id[i];
15050 	}
15051 	return NULL;
15052 }
15053 
15054 static void tg3_get_eeprom_hw_cfg(struct tg3 *tp)
15055 {
15056 	u32 val;
15057 
15058 	tp->phy_id = TG3_PHY_ID_INVALID;
15059 	tp->led_ctrl = LED_CTRL_MODE_PHY_1;
15060 
15061 	/* Assume an onboard device and WOL capable by default.  */
15062 	tg3_flag_set(tp, EEPROM_WRITE_PROT);
15063 	tg3_flag_set(tp, WOL_CAP);
15064 
15065 	if (tg3_asic_rev(tp) == ASIC_REV_5906) {
15066 		if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
15067 			tg3_flag_clear(tp, EEPROM_WRITE_PROT);
15068 			tg3_flag_set(tp, IS_NIC);
15069 		}
15070 		val = tr32(VCPU_CFGSHDW);
15071 		if (val & VCPU_CFGSHDW_ASPM_DBNC)
15072 			tg3_flag_set(tp, ASPM_WORKAROUND);
15073 		if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
15074 		    (val & VCPU_CFGSHDW_WOL_MAGPKT)) {
15075 			tg3_flag_set(tp, WOL_ENABLE);
15076 			device_set_wakeup_enable(&tp->pdev->dev, true);
15077 		}
15078 		goto done;
15079 	}
15080 
15081 	tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
15082 	if (val == NIC_SRAM_DATA_SIG_MAGIC) {
15083 		u32 nic_cfg, led_cfg;
15084 		u32 cfg2 = 0, cfg4 = 0, cfg5 = 0;
15085 		u32 nic_phy_id, ver, eeprom_phy_id;
15086 		int eeprom_phy_serdes = 0;
15087 
15088 		tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
15089 		tp->nic_sram_data_cfg = nic_cfg;
15090 
15091 		tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
15092 		ver >>= NIC_SRAM_DATA_VER_SHIFT;
15093 		if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
15094 		    tg3_asic_rev(tp) != ASIC_REV_5701 &&
15095 		    tg3_asic_rev(tp) != ASIC_REV_5703 &&
15096 		    (ver > 0) && (ver < 0x100))
15097 			tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
15098 
15099 		if (tg3_asic_rev(tp) == ASIC_REV_5785)
15100 			tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
15101 
15102 		if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
15103 		    tg3_asic_rev(tp) == ASIC_REV_5719 ||
15104 		    tg3_asic_rev(tp) == ASIC_REV_5720)
15105 			tg3_read_mem(tp, NIC_SRAM_DATA_CFG_5, &cfg5);
15106 
15107 		if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
15108 		    NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
15109 			eeprom_phy_serdes = 1;
15110 
15111 		tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
15112 		if (nic_phy_id != 0) {
15113 			u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
15114 			u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
15115 
15116 			eeprom_phy_id  = (id1 >> 16) << 10;
15117 			eeprom_phy_id |= (id2 & 0xfc00) << 16;
15118 			eeprom_phy_id |= (id2 & 0x03ff) <<  0;
15119 		} else
15120 			eeprom_phy_id = 0;
15121 
15122 		tp->phy_id = eeprom_phy_id;
15123 		if (eeprom_phy_serdes) {
15124 			if (!tg3_flag(tp, 5705_PLUS))
15125 				tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
15126 			else
15127 				tp->phy_flags |= TG3_PHYFLG_MII_SERDES;
15128 		}
15129 
15130 		if (tg3_flag(tp, 5750_PLUS))
15131 			led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
15132 				    SHASTA_EXT_LED_MODE_MASK);
15133 		else
15134 			led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
15135 
15136 		switch (led_cfg) {
15137 		default:
15138 		case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
15139 			tp->led_ctrl = LED_CTRL_MODE_PHY_1;
15140 			break;
15141 
15142 		case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
15143 			tp->led_ctrl = LED_CTRL_MODE_PHY_2;
15144 			break;
15145 
15146 		case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
15147 			tp->led_ctrl = LED_CTRL_MODE_MAC;
15148 
15149 			/* Default to PHY_1_MODE if 0 (MAC_MODE) is
15150 			 * read on some older 5700/5701 bootcode.
15151 			 */
15152 			if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
15153 			    tg3_asic_rev(tp) == ASIC_REV_5701)
15154 				tp->led_ctrl = LED_CTRL_MODE_PHY_1;
15155 
15156 			break;
15157 
15158 		case SHASTA_EXT_LED_SHARED:
15159 			tp->led_ctrl = LED_CTRL_MODE_SHARED;
15160 			if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0 &&
15161 			    tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A1)
15162 				tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
15163 						 LED_CTRL_MODE_PHY_2);
15164 
15165 			if (tg3_flag(tp, 5717_PLUS) ||
15166 			    tg3_asic_rev(tp) == ASIC_REV_5762)
15167 				tp->led_ctrl |= LED_CTRL_BLINK_RATE_OVERRIDE |
15168 						LED_CTRL_BLINK_RATE_MASK;
15169 
15170 			break;
15171 
15172 		case SHASTA_EXT_LED_MAC:
15173 			tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
15174 			break;
15175 
15176 		case SHASTA_EXT_LED_COMBO:
15177 			tp->led_ctrl = LED_CTRL_MODE_COMBO;
15178 			if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0)
15179 				tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
15180 						 LED_CTRL_MODE_PHY_2);
15181 			break;
15182 
15183 		}
15184 
15185 		if ((tg3_asic_rev(tp) == ASIC_REV_5700 ||
15186 		     tg3_asic_rev(tp) == ASIC_REV_5701) &&
15187 		    tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
15188 			tp->led_ctrl = LED_CTRL_MODE_PHY_2;
15189 
15190 		if (tg3_chip_rev(tp) == CHIPREV_5784_AX)
15191 			tp->led_ctrl = LED_CTRL_MODE_PHY_1;
15192 
15193 		if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
15194 			tg3_flag_set(tp, EEPROM_WRITE_PROT);
15195 			if ((tp->pdev->subsystem_vendor ==
15196 			     PCI_VENDOR_ID_ARIMA) &&
15197 			    (tp->pdev->subsystem_device == 0x205a ||
15198 			     tp->pdev->subsystem_device == 0x2063))
15199 				tg3_flag_clear(tp, EEPROM_WRITE_PROT);
15200 		} else {
15201 			tg3_flag_clear(tp, EEPROM_WRITE_PROT);
15202 			tg3_flag_set(tp, IS_NIC);
15203 		}
15204 
15205 		if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
15206 			tg3_flag_set(tp, ENABLE_ASF);
15207 			if (tg3_flag(tp, 5750_PLUS))
15208 				tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
15209 		}
15210 
15211 		if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
15212 		    tg3_flag(tp, 5750_PLUS))
15213 			tg3_flag_set(tp, ENABLE_APE);
15214 
15215 		if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES &&
15216 		    !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
15217 			tg3_flag_clear(tp, WOL_CAP);
15218 
15219 		if (tg3_flag(tp, WOL_CAP) &&
15220 		    (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)) {
15221 			tg3_flag_set(tp, WOL_ENABLE);
15222 			device_set_wakeup_enable(&tp->pdev->dev, true);
15223 		}
15224 
15225 		if (cfg2 & (1 << 17))
15226 			tp->phy_flags |= TG3_PHYFLG_CAPACITIVE_COUPLING;
15227 
15228 		/* serdes signal pre-emphasis in register 0x590 set by */
15229 		/* bootcode if bit 18 is set */
15230 		if (cfg2 & (1 << 18))
15231 			tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS;
15232 
15233 		if ((tg3_flag(tp, 57765_PLUS) ||
15234 		     (tg3_asic_rev(tp) == ASIC_REV_5784 &&
15235 		      tg3_chip_rev(tp) != CHIPREV_5784_AX)) &&
15236 		    (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
15237 			tp->phy_flags |= TG3_PHYFLG_ENABLE_APD;
15238 
15239 		if (tg3_flag(tp, PCI_EXPRESS)) {
15240 			u32 cfg3;
15241 
15242 			tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
15243 			if (tg3_asic_rev(tp) != ASIC_REV_5785 &&
15244 			    !tg3_flag(tp, 57765_PLUS) &&
15245 			    (cfg3 & NIC_SRAM_ASPM_DEBOUNCE))
15246 				tg3_flag_set(tp, ASPM_WORKAROUND);
15247 			if (cfg3 & NIC_SRAM_LNK_FLAP_AVOID)
15248 				tp->phy_flags |= TG3_PHYFLG_KEEP_LINK_ON_PWRDN;
15249 			if (cfg3 & NIC_SRAM_1G_ON_VAUX_OK)
15250 				tp->phy_flags |= TG3_PHYFLG_1G_ON_VAUX_OK;
15251 		}
15252 
15253 		if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE)
15254 			tg3_flag_set(tp, RGMII_INBAND_DISABLE);
15255 		if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
15256 			tg3_flag_set(tp, RGMII_EXT_IBND_RX_EN);
15257 		if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
15258 			tg3_flag_set(tp, RGMII_EXT_IBND_TX_EN);
15259 
15260 		if (cfg5 & NIC_SRAM_DISABLE_1G_HALF_ADV)
15261 			tp->phy_flags |= TG3_PHYFLG_DISABLE_1G_HD_ADV;
15262 	}
15263 done:
15264 	if (tg3_flag(tp, WOL_CAP))
15265 		device_set_wakeup_enable(&tp->pdev->dev,
15266 					 tg3_flag(tp, WOL_ENABLE));
15267 	else
15268 		device_set_wakeup_capable(&tp->pdev->dev, false);
15269 }
15270 
15271 static int tg3_ape_otp_read(struct tg3 *tp, u32 offset, u32 *val)
15272 {
15273 	int i, err;
15274 	u32 val2, off = offset * 8;
15275 
15276 	err = tg3_nvram_lock(tp);
15277 	if (err)
15278 		return err;
15279 
15280 	tg3_ape_write32(tp, TG3_APE_OTP_ADDR, off | APE_OTP_ADDR_CPU_ENABLE);
15281 	tg3_ape_write32(tp, TG3_APE_OTP_CTRL, APE_OTP_CTRL_PROG_EN |
15282 			APE_OTP_CTRL_CMD_RD | APE_OTP_CTRL_START);
15283 	tg3_ape_read32(tp, TG3_APE_OTP_CTRL);
15284 	udelay(10);
15285 
15286 	for (i = 0; i < 100; i++) {
15287 		val2 = tg3_ape_read32(tp, TG3_APE_OTP_STATUS);
15288 		if (val2 & APE_OTP_STATUS_CMD_DONE) {
15289 			*val = tg3_ape_read32(tp, TG3_APE_OTP_RD_DATA);
15290 			break;
15291 		}
15292 		udelay(10);
15293 	}
15294 
15295 	tg3_ape_write32(tp, TG3_APE_OTP_CTRL, 0);
15296 
15297 	tg3_nvram_unlock(tp);
15298 	if (val2 & APE_OTP_STATUS_CMD_DONE)
15299 		return 0;
15300 
15301 	return -EBUSY;
15302 }
15303 
15304 static int tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
15305 {
15306 	int i;
15307 	u32 val;
15308 
15309 	tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
15310 	tw32(OTP_CTRL, cmd);
15311 
15312 	/* Wait for up to 1 ms for command to execute. */
15313 	for (i = 0; i < 100; i++) {
15314 		val = tr32(OTP_STATUS);
15315 		if (val & OTP_STATUS_CMD_DONE)
15316 			break;
15317 		udelay(10);
15318 	}
15319 
15320 	return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
15321 }
15322 
15323 /* Read the gphy configuration from the OTP region of the chip.  The gphy
15324  * configuration is a 32-bit value that straddles the alignment boundary.
15325  * We do two 32-bit reads and then shift and merge the results.
15326  */
15327 static u32 tg3_read_otp_phycfg(struct tg3 *tp)
15328 {
15329 	u32 bhalf_otp, thalf_otp;
15330 
15331 	tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
15332 
15333 	if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
15334 		return 0;
15335 
15336 	tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
15337 
15338 	if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
15339 		return 0;
15340 
15341 	thalf_otp = tr32(OTP_READ_DATA);
15342 
15343 	tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
15344 
15345 	if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
15346 		return 0;
15347 
15348 	bhalf_otp = tr32(OTP_READ_DATA);
15349 
15350 	return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
15351 }
15352 
15353 static void tg3_phy_init_link_config(struct tg3 *tp)
15354 {
15355 	u32 adv = ADVERTISED_Autoneg;
15356 
15357 	if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
15358 		if (!(tp->phy_flags & TG3_PHYFLG_DISABLE_1G_HD_ADV))
15359 			adv |= ADVERTISED_1000baseT_Half;
15360 		adv |= ADVERTISED_1000baseT_Full;
15361 	}
15362 
15363 	if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
15364 		adv |= ADVERTISED_100baseT_Half |
15365 		       ADVERTISED_100baseT_Full |
15366 		       ADVERTISED_10baseT_Half |
15367 		       ADVERTISED_10baseT_Full |
15368 		       ADVERTISED_TP;
15369 	else
15370 		adv |= ADVERTISED_FIBRE;
15371 
15372 	tp->link_config.advertising = adv;
15373 	tp->link_config.speed = SPEED_UNKNOWN;
15374 	tp->link_config.duplex = DUPLEX_UNKNOWN;
15375 	tp->link_config.autoneg = AUTONEG_ENABLE;
15376 	tp->link_config.active_speed = SPEED_UNKNOWN;
15377 	tp->link_config.active_duplex = DUPLEX_UNKNOWN;
15378 
15379 	tp->old_link = -1;
15380 }
15381 
15382 static int tg3_phy_probe(struct tg3 *tp)
15383 {
15384 	u32 hw_phy_id_1, hw_phy_id_2;
15385 	u32 hw_phy_id, hw_phy_id_masked;
15386 	int err;
15387 
15388 	/* flow control autonegotiation is default behavior */
15389 	tg3_flag_set(tp, PAUSE_AUTONEG);
15390 	tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
15391 
15392 	if (tg3_flag(tp, ENABLE_APE)) {
15393 		switch (tp->pci_fn) {
15394 		case 0:
15395 			tp->phy_ape_lock = TG3_APE_LOCK_PHY0;
15396 			break;
15397 		case 1:
15398 			tp->phy_ape_lock = TG3_APE_LOCK_PHY1;
15399 			break;
15400 		case 2:
15401 			tp->phy_ape_lock = TG3_APE_LOCK_PHY2;
15402 			break;
15403 		case 3:
15404 			tp->phy_ape_lock = TG3_APE_LOCK_PHY3;
15405 			break;
15406 		}
15407 	}
15408 
15409 	if (!tg3_flag(tp, ENABLE_ASF) &&
15410 	    !(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
15411 	    !(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
15412 		tp->phy_flags &= ~(TG3_PHYFLG_1G_ON_VAUX_OK |
15413 				   TG3_PHYFLG_KEEP_LINK_ON_PWRDN);
15414 
15415 	if (tg3_flag(tp, USE_PHYLIB))
15416 		return tg3_phy_init(tp);
15417 
15418 	/* Reading the PHY ID register can conflict with ASF
15419 	 * firmware access to the PHY hardware.
15420 	 */
15421 	err = 0;
15422 	if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)) {
15423 		hw_phy_id = hw_phy_id_masked = TG3_PHY_ID_INVALID;
15424 	} else {
15425 		/* Now read the physical PHY_ID from the chip and verify
15426 		 * that it is sane.  If it doesn't look good, we fall back
15427 		 * to either the hard-coded table based PHY_ID and failing
15428 		 * that the value found in the eeprom area.
15429 		 */
15430 		err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
15431 		err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
15432 
15433 		hw_phy_id  = (hw_phy_id_1 & 0xffff) << 10;
15434 		hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
15435 		hw_phy_id |= (hw_phy_id_2 & 0x03ff) <<  0;
15436 
15437 		hw_phy_id_masked = hw_phy_id & TG3_PHY_ID_MASK;
15438 	}
15439 
15440 	if (!err && TG3_KNOWN_PHY_ID(hw_phy_id_masked)) {
15441 		tp->phy_id = hw_phy_id;
15442 		if (hw_phy_id_masked == TG3_PHY_ID_BCM8002)
15443 			tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
15444 		else
15445 			tp->phy_flags &= ~TG3_PHYFLG_PHY_SERDES;
15446 	} else {
15447 		if (tp->phy_id != TG3_PHY_ID_INVALID) {
15448 			/* Do nothing, phy ID already set up in
15449 			 * tg3_get_eeprom_hw_cfg().
15450 			 */
15451 		} else {
15452 			struct subsys_tbl_ent *p;
15453 
15454 			/* No eeprom signature?  Try the hardcoded
15455 			 * subsys device table.
15456 			 */
15457 			p = tg3_lookup_by_subsys(tp);
15458 			if (p) {
15459 				tp->phy_id = p->phy_id;
15460 			} else if (!tg3_flag(tp, IS_SSB_CORE)) {
15461 				/* For now we saw the IDs 0xbc050cd0,
15462 				 * 0xbc050f80 and 0xbc050c30 on devices
15463 				 * connected to an BCM4785 and there are
15464 				 * probably more. Just assume that the phy is
15465 				 * supported when it is connected to a SSB core
15466 				 * for now.
15467 				 */
15468 				return -ENODEV;
15469 			}
15470 
15471 			if (!tp->phy_id ||
15472 			    tp->phy_id == TG3_PHY_ID_BCM8002)
15473 				tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
15474 		}
15475 	}
15476 
15477 	if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
15478 	    (tg3_asic_rev(tp) == ASIC_REV_5719 ||
15479 	     tg3_asic_rev(tp) == ASIC_REV_5720 ||
15480 	     tg3_asic_rev(tp) == ASIC_REV_57766 ||
15481 	     tg3_asic_rev(tp) == ASIC_REV_5762 ||
15482 	     (tg3_asic_rev(tp) == ASIC_REV_5717 &&
15483 	      tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0) ||
15484 	     (tg3_asic_rev(tp) == ASIC_REV_57765 &&
15485 	      tg3_chip_rev_id(tp) != CHIPREV_ID_57765_A0))) {
15486 		tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
15487 
15488 		tp->eee.supported = SUPPORTED_100baseT_Full |
15489 				    SUPPORTED_1000baseT_Full;
15490 		tp->eee.advertised = ADVERTISED_100baseT_Full |
15491 				     ADVERTISED_1000baseT_Full;
15492 		tp->eee.eee_enabled = 1;
15493 		tp->eee.tx_lpi_enabled = 1;
15494 		tp->eee.tx_lpi_timer = TG3_CPMU_DBTMR1_LNKIDLE_2047US;
15495 	}
15496 
15497 	tg3_phy_init_link_config(tp);
15498 
15499 	if (!(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
15500 	    !(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
15501 	    !tg3_flag(tp, ENABLE_APE) &&
15502 	    !tg3_flag(tp, ENABLE_ASF)) {
15503 		u32 bmsr, dummy;
15504 
15505 		tg3_readphy(tp, MII_BMSR, &bmsr);
15506 		if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
15507 		    (bmsr & BMSR_LSTATUS))
15508 			goto skip_phy_reset;
15509 
15510 		err = tg3_phy_reset(tp);
15511 		if (err)
15512 			return err;
15513 
15514 		tg3_phy_set_wirespeed(tp);
15515 
15516 		if (!tg3_phy_copper_an_config_ok(tp, &dummy)) {
15517 			tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
15518 					    tp->link_config.flowctrl);
15519 
15520 			tg3_writephy(tp, MII_BMCR,
15521 				     BMCR_ANENABLE | BMCR_ANRESTART);
15522 		}
15523 	}
15524 
15525 skip_phy_reset:
15526 	if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
15527 		err = tg3_init_5401phy_dsp(tp);
15528 		if (err)
15529 			return err;
15530 
15531 		err = tg3_init_5401phy_dsp(tp);
15532 	}
15533 
15534 	return err;
15535 }
15536 
15537 static void tg3_read_vpd(struct tg3 *tp)
15538 {
15539 	u8 *vpd_data;
15540 	unsigned int block_end, rosize, len;
15541 	u32 vpdlen;
15542 	int j, i = 0;
15543 
15544 	vpd_data = (u8 *)tg3_vpd_readblock(tp, &vpdlen);
15545 	if (!vpd_data)
15546 		goto out_no_vpd;
15547 
15548 	i = pci_vpd_find_tag(vpd_data, 0, vpdlen, PCI_VPD_LRDT_RO_DATA);
15549 	if (i < 0)
15550 		goto out_not_found;
15551 
15552 	rosize = pci_vpd_lrdt_size(&vpd_data[i]);
15553 	block_end = i + PCI_VPD_LRDT_TAG_SIZE + rosize;
15554 	i += PCI_VPD_LRDT_TAG_SIZE;
15555 
15556 	if (block_end > vpdlen)
15557 		goto out_not_found;
15558 
15559 	j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
15560 				      PCI_VPD_RO_KEYWORD_MFR_ID);
15561 	if (j > 0) {
15562 		len = pci_vpd_info_field_size(&vpd_data[j]);
15563 
15564 		j += PCI_VPD_INFO_FLD_HDR_SIZE;
15565 		if (j + len > block_end || len != 4 ||
15566 		    memcmp(&vpd_data[j], "1028", 4))
15567 			goto partno;
15568 
15569 		j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
15570 					      PCI_VPD_RO_KEYWORD_VENDOR0);
15571 		if (j < 0)
15572 			goto partno;
15573 
15574 		len = pci_vpd_info_field_size(&vpd_data[j]);
15575 
15576 		j += PCI_VPD_INFO_FLD_HDR_SIZE;
15577 		if (j + len > block_end)
15578 			goto partno;
15579 
15580 		if (len >= sizeof(tp->fw_ver))
15581 			len = sizeof(tp->fw_ver) - 1;
15582 		memset(tp->fw_ver, 0, sizeof(tp->fw_ver));
15583 		snprintf(tp->fw_ver, sizeof(tp->fw_ver), "%.*s bc ", len,
15584 			 &vpd_data[j]);
15585 	}
15586 
15587 partno:
15588 	i = pci_vpd_find_info_keyword(vpd_data, i, rosize,
15589 				      PCI_VPD_RO_KEYWORD_PARTNO);
15590 	if (i < 0)
15591 		goto out_not_found;
15592 
15593 	len = pci_vpd_info_field_size(&vpd_data[i]);
15594 
15595 	i += PCI_VPD_INFO_FLD_HDR_SIZE;
15596 	if (len > TG3_BPN_SIZE ||
15597 	    (len + i) > vpdlen)
15598 		goto out_not_found;
15599 
15600 	memcpy(tp->board_part_number, &vpd_data[i], len);
15601 
15602 out_not_found:
15603 	kfree(vpd_data);
15604 	if (tp->board_part_number[0])
15605 		return;
15606 
15607 out_no_vpd:
15608 	if (tg3_asic_rev(tp) == ASIC_REV_5717) {
15609 		if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
15610 		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C)
15611 			strcpy(tp->board_part_number, "BCM5717");
15612 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718)
15613 			strcpy(tp->board_part_number, "BCM5718");
15614 		else
15615 			goto nomatch;
15616 	} else if (tg3_asic_rev(tp) == ASIC_REV_57780) {
15617 		if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780)
15618 			strcpy(tp->board_part_number, "BCM57780");
15619 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760)
15620 			strcpy(tp->board_part_number, "BCM57760");
15621 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790)
15622 			strcpy(tp->board_part_number, "BCM57790");
15623 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788)
15624 			strcpy(tp->board_part_number, "BCM57788");
15625 		else
15626 			goto nomatch;
15627 	} else if (tg3_asic_rev(tp) == ASIC_REV_57765) {
15628 		if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761)
15629 			strcpy(tp->board_part_number, "BCM57761");
15630 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765)
15631 			strcpy(tp->board_part_number, "BCM57765");
15632 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781)
15633 			strcpy(tp->board_part_number, "BCM57781");
15634 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785)
15635 			strcpy(tp->board_part_number, "BCM57785");
15636 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791)
15637 			strcpy(tp->board_part_number, "BCM57791");
15638 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
15639 			strcpy(tp->board_part_number, "BCM57795");
15640 		else
15641 			goto nomatch;
15642 	} else if (tg3_asic_rev(tp) == ASIC_REV_57766) {
15643 		if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762)
15644 			strcpy(tp->board_part_number, "BCM57762");
15645 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766)
15646 			strcpy(tp->board_part_number, "BCM57766");
15647 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782)
15648 			strcpy(tp->board_part_number, "BCM57782");
15649 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
15650 			strcpy(tp->board_part_number, "BCM57786");
15651 		else
15652 			goto nomatch;
15653 	} else if (tg3_asic_rev(tp) == ASIC_REV_5906) {
15654 		strcpy(tp->board_part_number, "BCM95906");
15655 	} else {
15656 nomatch:
15657 		strcpy(tp->board_part_number, "none");
15658 	}
15659 }
15660 
15661 static int tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
15662 {
15663 	u32 val;
15664 
15665 	if (tg3_nvram_read(tp, offset, &val) ||
15666 	    (val & 0xfc000000) != 0x0c000000 ||
15667 	    tg3_nvram_read(tp, offset + 4, &val) ||
15668 	    val != 0)
15669 		return 0;
15670 
15671 	return 1;
15672 }
15673 
15674 static void tg3_read_bc_ver(struct tg3 *tp)
15675 {
15676 	u32 val, offset, start, ver_offset;
15677 	int i, dst_off;
15678 	bool newver = false;
15679 
15680 	if (tg3_nvram_read(tp, 0xc, &offset) ||
15681 	    tg3_nvram_read(tp, 0x4, &start))
15682 		return;
15683 
15684 	offset = tg3_nvram_logical_addr(tp, offset);
15685 
15686 	if (tg3_nvram_read(tp, offset, &val))
15687 		return;
15688 
15689 	if ((val & 0xfc000000) == 0x0c000000) {
15690 		if (tg3_nvram_read(tp, offset + 4, &val))
15691 			return;
15692 
15693 		if (val == 0)
15694 			newver = true;
15695 	}
15696 
15697 	dst_off = strlen(tp->fw_ver);
15698 
15699 	if (newver) {
15700 		if (TG3_VER_SIZE - dst_off < 16 ||
15701 		    tg3_nvram_read(tp, offset + 8, &ver_offset))
15702 			return;
15703 
15704 		offset = offset + ver_offset - start;
15705 		for (i = 0; i < 16; i += 4) {
15706 			__be32 v;
15707 			if (tg3_nvram_read_be32(tp, offset + i, &v))
15708 				return;
15709 
15710 			memcpy(tp->fw_ver + dst_off + i, &v, sizeof(v));
15711 		}
15712 	} else {
15713 		u32 major, minor;
15714 
15715 		if (tg3_nvram_read(tp, TG3_NVM_PTREV_BCVER, &ver_offset))
15716 			return;
15717 
15718 		major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >>
15719 			TG3_NVM_BCVER_MAJSFT;
15720 		minor = ver_offset & TG3_NVM_BCVER_MINMSK;
15721 		snprintf(&tp->fw_ver[dst_off], TG3_VER_SIZE - dst_off,
15722 			 "v%d.%02d", major, minor);
15723 	}
15724 }
15725 
15726 static void tg3_read_hwsb_ver(struct tg3 *tp)
15727 {
15728 	u32 val, major, minor;
15729 
15730 	/* Use native endian representation */
15731 	if (tg3_nvram_read(tp, TG3_NVM_HWSB_CFG1, &val))
15732 		return;
15733 
15734 	major = (val & TG3_NVM_HWSB_CFG1_MAJMSK) >>
15735 		TG3_NVM_HWSB_CFG1_MAJSFT;
15736 	minor = (val & TG3_NVM_HWSB_CFG1_MINMSK) >>
15737 		TG3_NVM_HWSB_CFG1_MINSFT;
15738 
15739 	snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor);
15740 }
15741 
15742 static void tg3_read_sb_ver(struct tg3 *tp, u32 val)
15743 {
15744 	u32 offset, major, minor, build;
15745 
15746 	strncat(tp->fw_ver, "sb", TG3_VER_SIZE - strlen(tp->fw_ver) - 1);
15747 
15748 	if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1)
15749 		return;
15750 
15751 	switch (val & TG3_EEPROM_SB_REVISION_MASK) {
15752 	case TG3_EEPROM_SB_REVISION_0:
15753 		offset = TG3_EEPROM_SB_F1R0_EDH_OFF;
15754 		break;
15755 	case TG3_EEPROM_SB_REVISION_2:
15756 		offset = TG3_EEPROM_SB_F1R2_EDH_OFF;
15757 		break;
15758 	case TG3_EEPROM_SB_REVISION_3:
15759 		offset = TG3_EEPROM_SB_F1R3_EDH_OFF;
15760 		break;
15761 	case TG3_EEPROM_SB_REVISION_4:
15762 		offset = TG3_EEPROM_SB_F1R4_EDH_OFF;
15763 		break;
15764 	case TG3_EEPROM_SB_REVISION_5:
15765 		offset = TG3_EEPROM_SB_F1R5_EDH_OFF;
15766 		break;
15767 	case TG3_EEPROM_SB_REVISION_6:
15768 		offset = TG3_EEPROM_SB_F1R6_EDH_OFF;
15769 		break;
15770 	default:
15771 		return;
15772 	}
15773 
15774 	if (tg3_nvram_read(tp, offset, &val))
15775 		return;
15776 
15777 	build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >>
15778 		TG3_EEPROM_SB_EDH_BLD_SHFT;
15779 	major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >>
15780 		TG3_EEPROM_SB_EDH_MAJ_SHFT;
15781 	minor =  val & TG3_EEPROM_SB_EDH_MIN_MASK;
15782 
15783 	if (minor > 99 || build > 26)
15784 		return;
15785 
15786 	offset = strlen(tp->fw_ver);
15787 	snprintf(&tp->fw_ver[offset], TG3_VER_SIZE - offset,
15788 		 " v%d.%02d", major, minor);
15789 
15790 	if (build > 0) {
15791 		offset = strlen(tp->fw_ver);
15792 		if (offset < TG3_VER_SIZE - 1)
15793 			tp->fw_ver[offset] = 'a' + build - 1;
15794 	}
15795 }
15796 
15797 static void tg3_read_mgmtfw_ver(struct tg3 *tp)
15798 {
15799 	u32 val, offset, start;
15800 	int i, vlen;
15801 
15802 	for (offset = TG3_NVM_DIR_START;
15803 	     offset < TG3_NVM_DIR_END;
15804 	     offset += TG3_NVM_DIRENT_SIZE) {
15805 		if (tg3_nvram_read(tp, offset, &val))
15806 			return;
15807 
15808 		if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
15809 			break;
15810 	}
15811 
15812 	if (offset == TG3_NVM_DIR_END)
15813 		return;
15814 
15815 	if (!tg3_flag(tp, 5705_PLUS))
15816 		start = 0x08000000;
15817 	else if (tg3_nvram_read(tp, offset - 4, &start))
15818 		return;
15819 
15820 	if (tg3_nvram_read(tp, offset + 4, &offset) ||
15821 	    !tg3_fw_img_is_valid(tp, offset) ||
15822 	    tg3_nvram_read(tp, offset + 8, &val))
15823 		return;
15824 
15825 	offset += val - start;
15826 
15827 	vlen = strlen(tp->fw_ver);
15828 
15829 	tp->fw_ver[vlen++] = ',';
15830 	tp->fw_ver[vlen++] = ' ';
15831 
15832 	for (i = 0; i < 4; i++) {
15833 		__be32 v;
15834 		if (tg3_nvram_read_be32(tp, offset, &v))
15835 			return;
15836 
15837 		offset += sizeof(v);
15838 
15839 		if (vlen > TG3_VER_SIZE - sizeof(v)) {
15840 			memcpy(&tp->fw_ver[vlen], &v, TG3_VER_SIZE - vlen);
15841 			break;
15842 		}
15843 
15844 		memcpy(&tp->fw_ver[vlen], &v, sizeof(v));
15845 		vlen += sizeof(v);
15846 	}
15847 }
15848 
15849 static void tg3_probe_ncsi(struct tg3 *tp)
15850 {
15851 	u32 apedata;
15852 
15853 	apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
15854 	if (apedata != APE_SEG_SIG_MAGIC)
15855 		return;
15856 
15857 	apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
15858 	if (!(apedata & APE_FW_STATUS_READY))
15859 		return;
15860 
15861 	if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI)
15862 		tg3_flag_set(tp, APE_HAS_NCSI);
15863 }
15864 
15865 static void tg3_read_dash_ver(struct tg3 *tp)
15866 {
15867 	int vlen;
15868 	u32 apedata;
15869 	char *fwtype;
15870 
15871 	apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION);
15872 
15873 	if (tg3_flag(tp, APE_HAS_NCSI))
15874 		fwtype = "NCSI";
15875 	else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725)
15876 		fwtype = "SMASH";
15877 	else
15878 		fwtype = "DASH";
15879 
15880 	vlen = strlen(tp->fw_ver);
15881 
15882 	snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " %s v%d.%d.%d.%d",
15883 		 fwtype,
15884 		 (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT,
15885 		 (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT,
15886 		 (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT,
15887 		 (apedata & APE_FW_VERSION_BLDMSK));
15888 }
15889 
15890 static void tg3_read_otp_ver(struct tg3 *tp)
15891 {
15892 	u32 val, val2;
15893 
15894 	if (tg3_asic_rev(tp) != ASIC_REV_5762)
15895 		return;
15896 
15897 	if (!tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0, &val) &&
15898 	    !tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0 + 4, &val2) &&
15899 	    TG3_OTP_MAGIC0_VALID(val)) {
15900 		u64 val64 = (u64) val << 32 | val2;
15901 		u32 ver = 0;
15902 		int i, vlen;
15903 
15904 		for (i = 0; i < 7; i++) {
15905 			if ((val64 & 0xff) == 0)
15906 				break;
15907 			ver = val64 & 0xff;
15908 			val64 >>= 8;
15909 		}
15910 		vlen = strlen(tp->fw_ver);
15911 		snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " .%02d", ver);
15912 	}
15913 }
15914 
15915 static void tg3_read_fw_ver(struct tg3 *tp)
15916 {
15917 	u32 val;
15918 	bool vpd_vers = false;
15919 
15920 	if (tp->fw_ver[0] != 0)
15921 		vpd_vers = true;
15922 
15923 	if (tg3_flag(tp, NO_NVRAM)) {
15924 		strcat(tp->fw_ver, "sb");
15925 		tg3_read_otp_ver(tp);
15926 		return;
15927 	}
15928 
15929 	if (tg3_nvram_read(tp, 0, &val))
15930 		return;
15931 
15932 	if (val == TG3_EEPROM_MAGIC)
15933 		tg3_read_bc_ver(tp);
15934 	else if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW)
15935 		tg3_read_sb_ver(tp, val);
15936 	else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
15937 		tg3_read_hwsb_ver(tp);
15938 
15939 	if (tg3_flag(tp, ENABLE_ASF)) {
15940 		if (tg3_flag(tp, ENABLE_APE)) {
15941 			tg3_probe_ncsi(tp);
15942 			if (!vpd_vers)
15943 				tg3_read_dash_ver(tp);
15944 		} else if (!vpd_vers) {
15945 			tg3_read_mgmtfw_ver(tp);
15946 		}
15947 	}
15948 
15949 	tp->fw_ver[TG3_VER_SIZE - 1] = 0;
15950 }
15951 
15952 static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp)
15953 {
15954 	if (tg3_flag(tp, LRG_PROD_RING_CAP))
15955 		return TG3_RX_RET_MAX_SIZE_5717;
15956 	else if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))
15957 		return TG3_RX_RET_MAX_SIZE_5700;
15958 	else
15959 		return TG3_RX_RET_MAX_SIZE_5705;
15960 }
15961 
15962 static const struct pci_device_id tg3_write_reorder_chipsets[] = {
15963 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C) },
15964 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE) },
15965 	{ PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8385_0) },
15966 	{ },
15967 };
15968 
15969 static struct pci_dev *tg3_find_peer(struct tg3 *tp)
15970 {
15971 	struct pci_dev *peer;
15972 	unsigned int func, devnr = tp->pdev->devfn & ~7;
15973 
15974 	for (func = 0; func < 8; func++) {
15975 		peer = pci_get_slot(tp->pdev->bus, devnr | func);
15976 		if (peer && peer != tp->pdev)
15977 			break;
15978 		pci_dev_put(peer);
15979 	}
15980 	/* 5704 can be configured in single-port mode, set peer to
15981 	 * tp->pdev in that case.
15982 	 */
15983 	if (!peer) {
15984 		peer = tp->pdev;
15985 		return peer;
15986 	}
15987 
15988 	/*
15989 	 * We don't need to keep the refcount elevated; there's no way
15990 	 * to remove one half of this device without removing the other
15991 	 */
15992 	pci_dev_put(peer);
15993 
15994 	return peer;
15995 }
15996 
15997 static void tg3_detect_asic_rev(struct tg3 *tp, u32 misc_ctrl_reg)
15998 {
15999 	tp->pci_chip_rev_id = misc_ctrl_reg >> MISC_HOST_CTRL_CHIPREV_SHIFT;
16000 	if (tg3_asic_rev(tp) == ASIC_REV_USE_PROD_ID_REG) {
16001 		u32 reg;
16002 
16003 		/* All devices that use the alternate
16004 		 * ASIC REV location have a CPMU.
16005 		 */
16006 		tg3_flag_set(tp, CPMU_PRESENT);
16007 
16008 		if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
16009 		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
16010 		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
16011 		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
16012 		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 ||
16013 		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_57767 ||
16014 		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_57764 ||
16015 		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 ||
16016 		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 ||
16017 		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727 ||
16018 		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_57787)
16019 			reg = TG3PCI_GEN2_PRODID_ASICREV;
16020 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 ||
16021 			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 ||
16022 			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 ||
16023 			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 ||
16024 			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
16025 			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
16026 			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762 ||
16027 			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766 ||
16028 			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782 ||
16029 			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
16030 			reg = TG3PCI_GEN15_PRODID_ASICREV;
16031 		else
16032 			reg = TG3PCI_PRODID_ASICREV;
16033 
16034 		pci_read_config_dword(tp->pdev, reg, &tp->pci_chip_rev_id);
16035 	}
16036 
16037 	/* Wrong chip ID in 5752 A0. This code can be removed later
16038 	 * as A0 is not in production.
16039 	 */
16040 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5752_A0_HW)
16041 		tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
16042 
16043 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5717_C0)
16044 		tp->pci_chip_rev_id = CHIPREV_ID_5720_A0;
16045 
16046 	if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16047 	    tg3_asic_rev(tp) == ASIC_REV_5719 ||
16048 	    tg3_asic_rev(tp) == ASIC_REV_5720)
16049 		tg3_flag_set(tp, 5717_PLUS);
16050 
16051 	if (tg3_asic_rev(tp) == ASIC_REV_57765 ||
16052 	    tg3_asic_rev(tp) == ASIC_REV_57766)
16053 		tg3_flag_set(tp, 57765_CLASS);
16054 
16055 	if (tg3_flag(tp, 57765_CLASS) || tg3_flag(tp, 5717_PLUS) ||
16056 	     tg3_asic_rev(tp) == ASIC_REV_5762)
16057 		tg3_flag_set(tp, 57765_PLUS);
16058 
16059 	/* Intentionally exclude ASIC_REV_5906 */
16060 	if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
16061 	    tg3_asic_rev(tp) == ASIC_REV_5787 ||
16062 	    tg3_asic_rev(tp) == ASIC_REV_5784 ||
16063 	    tg3_asic_rev(tp) == ASIC_REV_5761 ||
16064 	    tg3_asic_rev(tp) == ASIC_REV_5785 ||
16065 	    tg3_asic_rev(tp) == ASIC_REV_57780 ||
16066 	    tg3_flag(tp, 57765_PLUS))
16067 		tg3_flag_set(tp, 5755_PLUS);
16068 
16069 	if (tg3_asic_rev(tp) == ASIC_REV_5780 ||
16070 	    tg3_asic_rev(tp) == ASIC_REV_5714)
16071 		tg3_flag_set(tp, 5780_CLASS);
16072 
16073 	if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
16074 	    tg3_asic_rev(tp) == ASIC_REV_5752 ||
16075 	    tg3_asic_rev(tp) == ASIC_REV_5906 ||
16076 	    tg3_flag(tp, 5755_PLUS) ||
16077 	    tg3_flag(tp, 5780_CLASS))
16078 		tg3_flag_set(tp, 5750_PLUS);
16079 
16080 	if (tg3_asic_rev(tp) == ASIC_REV_5705 ||
16081 	    tg3_flag(tp, 5750_PLUS))
16082 		tg3_flag_set(tp, 5705_PLUS);
16083 }
16084 
16085 static bool tg3_10_100_only_device(struct tg3 *tp,
16086 				   const struct pci_device_id *ent)
16087 {
16088 	u32 grc_misc_cfg = tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK;
16089 
16090 	if ((tg3_asic_rev(tp) == ASIC_REV_5703 &&
16091 	     (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
16092 	    (tp->phy_flags & TG3_PHYFLG_IS_FET))
16093 		return true;
16094 
16095 	if (ent->driver_data & TG3_DRV_DATA_FLAG_10_100_ONLY) {
16096 		if (tg3_asic_rev(tp) == ASIC_REV_5705) {
16097 			if (ent->driver_data & TG3_DRV_DATA_FLAG_5705_10_100)
16098 				return true;
16099 		} else {
16100 			return true;
16101 		}
16102 	}
16103 
16104 	return false;
16105 }
16106 
16107 static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
16108 {
16109 	u32 misc_ctrl_reg;
16110 	u32 pci_state_reg, grc_misc_cfg;
16111 	u32 val;
16112 	u16 pci_cmd;
16113 	int err;
16114 
16115 	/* Force memory write invalidate off.  If we leave it on,
16116 	 * then on 5700_BX chips we have to enable a workaround.
16117 	 * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
16118 	 * to match the cacheline size.  The Broadcom driver have this
16119 	 * workaround but turns MWI off all the times so never uses
16120 	 * it.  This seems to suggest that the workaround is insufficient.
16121 	 */
16122 	pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
16123 	pci_cmd &= ~PCI_COMMAND_INVALIDATE;
16124 	pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
16125 
16126 	/* Important! -- Make sure register accesses are byteswapped
16127 	 * correctly.  Also, for those chips that require it, make
16128 	 * sure that indirect register accesses are enabled before
16129 	 * the first operation.
16130 	 */
16131 	pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
16132 			      &misc_ctrl_reg);
16133 	tp->misc_host_ctrl |= (misc_ctrl_reg &
16134 			       MISC_HOST_CTRL_CHIPREV);
16135 	pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
16136 			       tp->misc_host_ctrl);
16137 
16138 	tg3_detect_asic_rev(tp, misc_ctrl_reg);
16139 
16140 	/* If we have 5702/03 A1 or A2 on certain ICH chipsets,
16141 	 * we need to disable memory and use config. cycles
16142 	 * only to access all registers. The 5702/03 chips
16143 	 * can mistakenly decode the special cycles from the
16144 	 * ICH chipsets as memory write cycles, causing corruption
16145 	 * of register and memory space. Only certain ICH bridges
16146 	 * will drive special cycles with non-zero data during the
16147 	 * address phase which can fall within the 5703's address
16148 	 * range. This is not an ICH bug as the PCI spec allows
16149 	 * non-zero address during special cycles. However, only
16150 	 * these ICH bridges are known to drive non-zero addresses
16151 	 * during special cycles.
16152 	 *
16153 	 * Since special cycles do not cross PCI bridges, we only
16154 	 * enable this workaround if the 5703 is on the secondary
16155 	 * bus of these ICH bridges.
16156 	 */
16157 	if ((tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A1) ||
16158 	    (tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A2)) {
16159 		static struct tg3_dev_id {
16160 			u32	vendor;
16161 			u32	device;
16162 			u32	rev;
16163 		} ich_chipsets[] = {
16164 			{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
16165 			  PCI_ANY_ID },
16166 			{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
16167 			  PCI_ANY_ID },
16168 			{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
16169 			  0xa },
16170 			{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
16171 			  PCI_ANY_ID },
16172 			{ },
16173 		};
16174 		struct tg3_dev_id *pci_id = &ich_chipsets[0];
16175 		struct pci_dev *bridge = NULL;
16176 
16177 		while (pci_id->vendor != 0) {
16178 			bridge = pci_get_device(pci_id->vendor, pci_id->device,
16179 						bridge);
16180 			if (!bridge) {
16181 				pci_id++;
16182 				continue;
16183 			}
16184 			if (pci_id->rev != PCI_ANY_ID) {
16185 				if (bridge->revision > pci_id->rev)
16186 					continue;
16187 			}
16188 			if (bridge->subordinate &&
16189 			    (bridge->subordinate->number ==
16190 			     tp->pdev->bus->number)) {
16191 				tg3_flag_set(tp, ICH_WORKAROUND);
16192 				pci_dev_put(bridge);
16193 				break;
16194 			}
16195 		}
16196 	}
16197 
16198 	if (tg3_asic_rev(tp) == ASIC_REV_5701) {
16199 		static struct tg3_dev_id {
16200 			u32	vendor;
16201 			u32	device;
16202 		} bridge_chipsets[] = {
16203 			{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
16204 			{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
16205 			{ },
16206 		};
16207 		struct tg3_dev_id *pci_id = &bridge_chipsets[0];
16208 		struct pci_dev *bridge = NULL;
16209 
16210 		while (pci_id->vendor != 0) {
16211 			bridge = pci_get_device(pci_id->vendor,
16212 						pci_id->device,
16213 						bridge);
16214 			if (!bridge) {
16215 				pci_id++;
16216 				continue;
16217 			}
16218 			if (bridge->subordinate &&
16219 			    (bridge->subordinate->number <=
16220 			     tp->pdev->bus->number) &&
16221 			    (bridge->subordinate->busn_res.end >=
16222 			     tp->pdev->bus->number)) {
16223 				tg3_flag_set(tp, 5701_DMA_BUG);
16224 				pci_dev_put(bridge);
16225 				break;
16226 			}
16227 		}
16228 	}
16229 
16230 	/* The EPB bridge inside 5714, 5715, and 5780 cannot support
16231 	 * DMA addresses > 40-bit. This bridge may have other additional
16232 	 * 57xx devices behind it in some 4-port NIC designs for example.
16233 	 * Any tg3 device found behind the bridge will also need the 40-bit
16234 	 * DMA workaround.
16235 	 */
16236 	if (tg3_flag(tp, 5780_CLASS)) {
16237 		tg3_flag_set(tp, 40BIT_DMA_BUG);
16238 		tp->msi_cap = tp->pdev->msi_cap;
16239 	} else {
16240 		struct pci_dev *bridge = NULL;
16241 
16242 		do {
16243 			bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
16244 						PCI_DEVICE_ID_SERVERWORKS_EPB,
16245 						bridge);
16246 			if (bridge && bridge->subordinate &&
16247 			    (bridge->subordinate->number <=
16248 			     tp->pdev->bus->number) &&
16249 			    (bridge->subordinate->busn_res.end >=
16250 			     tp->pdev->bus->number)) {
16251 				tg3_flag_set(tp, 40BIT_DMA_BUG);
16252 				pci_dev_put(bridge);
16253 				break;
16254 			}
16255 		} while (bridge);
16256 	}
16257 
16258 	if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
16259 	    tg3_asic_rev(tp) == ASIC_REV_5714)
16260 		tp->pdev_peer = tg3_find_peer(tp);
16261 
16262 	/* Determine TSO capabilities */
16263 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0)
16264 		; /* Do nothing. HW bug. */
16265 	else if (tg3_flag(tp, 57765_PLUS))
16266 		tg3_flag_set(tp, HW_TSO_3);
16267 	else if (tg3_flag(tp, 5755_PLUS) ||
16268 		 tg3_asic_rev(tp) == ASIC_REV_5906)
16269 		tg3_flag_set(tp, HW_TSO_2);
16270 	else if (tg3_flag(tp, 5750_PLUS)) {
16271 		tg3_flag_set(tp, HW_TSO_1);
16272 		tg3_flag_set(tp, TSO_BUG);
16273 		if (tg3_asic_rev(tp) == ASIC_REV_5750 &&
16274 		    tg3_chip_rev_id(tp) >= CHIPREV_ID_5750_C2)
16275 			tg3_flag_clear(tp, TSO_BUG);
16276 	} else if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
16277 		   tg3_asic_rev(tp) != ASIC_REV_5701 &&
16278 		   tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
16279 		tg3_flag_set(tp, FW_TSO);
16280 		tg3_flag_set(tp, TSO_BUG);
16281 		if (tg3_asic_rev(tp) == ASIC_REV_5705)
16282 			tp->fw_needed = FIRMWARE_TG3TSO5;
16283 		else
16284 			tp->fw_needed = FIRMWARE_TG3TSO;
16285 	}
16286 
16287 	/* Selectively allow TSO based on operating conditions */
16288 	if (tg3_flag(tp, HW_TSO_1) ||
16289 	    tg3_flag(tp, HW_TSO_2) ||
16290 	    tg3_flag(tp, HW_TSO_3) ||
16291 	    tg3_flag(tp, FW_TSO)) {
16292 		/* For firmware TSO, assume ASF is disabled.
16293 		 * We'll disable TSO later if we discover ASF
16294 		 * is enabled in tg3_get_eeprom_hw_cfg().
16295 		 */
16296 		tg3_flag_set(tp, TSO_CAPABLE);
16297 	} else {
16298 		tg3_flag_clear(tp, TSO_CAPABLE);
16299 		tg3_flag_clear(tp, TSO_BUG);
16300 		tp->fw_needed = NULL;
16301 	}
16302 
16303 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0)
16304 		tp->fw_needed = FIRMWARE_TG3;
16305 
16306 	if (tg3_asic_rev(tp) == ASIC_REV_57766)
16307 		tp->fw_needed = FIRMWARE_TG357766;
16308 
16309 	tp->irq_max = 1;
16310 
16311 	if (tg3_flag(tp, 5750_PLUS)) {
16312 		tg3_flag_set(tp, SUPPORT_MSI);
16313 		if (tg3_chip_rev(tp) == CHIPREV_5750_AX ||
16314 		    tg3_chip_rev(tp) == CHIPREV_5750_BX ||
16315 		    (tg3_asic_rev(tp) == ASIC_REV_5714 &&
16316 		     tg3_chip_rev_id(tp) <= CHIPREV_ID_5714_A2 &&
16317 		     tp->pdev_peer == tp->pdev))
16318 			tg3_flag_clear(tp, SUPPORT_MSI);
16319 
16320 		if (tg3_flag(tp, 5755_PLUS) ||
16321 		    tg3_asic_rev(tp) == ASIC_REV_5906) {
16322 			tg3_flag_set(tp, 1SHOT_MSI);
16323 		}
16324 
16325 		if (tg3_flag(tp, 57765_PLUS)) {
16326 			tg3_flag_set(tp, SUPPORT_MSIX);
16327 			tp->irq_max = TG3_IRQ_MAX_VECS;
16328 		}
16329 	}
16330 
16331 	tp->txq_max = 1;
16332 	tp->rxq_max = 1;
16333 	if (tp->irq_max > 1) {
16334 		tp->rxq_max = TG3_RSS_MAX_NUM_QS;
16335 		tg3_rss_init_dflt_indir_tbl(tp, TG3_RSS_MAX_NUM_QS);
16336 
16337 		if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
16338 		    tg3_asic_rev(tp) == ASIC_REV_5720)
16339 			tp->txq_max = tp->irq_max - 1;
16340 	}
16341 
16342 	if (tg3_flag(tp, 5755_PLUS) ||
16343 	    tg3_asic_rev(tp) == ASIC_REV_5906)
16344 		tg3_flag_set(tp, SHORT_DMA_BUG);
16345 
16346 	if (tg3_asic_rev(tp) == ASIC_REV_5719)
16347 		tp->dma_limit = TG3_TX_BD_DMA_MAX_4K;
16348 
16349 	if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16350 	    tg3_asic_rev(tp) == ASIC_REV_5719 ||
16351 	    tg3_asic_rev(tp) == ASIC_REV_5720 ||
16352 	    tg3_asic_rev(tp) == ASIC_REV_5762)
16353 		tg3_flag_set(tp, LRG_PROD_RING_CAP);
16354 
16355 	if (tg3_flag(tp, 57765_PLUS) &&
16356 	    tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0)
16357 		tg3_flag_set(tp, USE_JUMBO_BDFLAG);
16358 
16359 	if (!tg3_flag(tp, 5705_PLUS) ||
16360 	    tg3_flag(tp, 5780_CLASS) ||
16361 	    tg3_flag(tp, USE_JUMBO_BDFLAG))
16362 		tg3_flag_set(tp, JUMBO_CAPABLE);
16363 
16364 	pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
16365 			      &pci_state_reg);
16366 
16367 	if (pci_is_pcie(tp->pdev)) {
16368 		u16 lnkctl;
16369 
16370 		tg3_flag_set(tp, PCI_EXPRESS);
16371 
16372 		pcie_capability_read_word(tp->pdev, PCI_EXP_LNKCTL, &lnkctl);
16373 		if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
16374 			if (tg3_asic_rev(tp) == ASIC_REV_5906) {
16375 				tg3_flag_clear(tp, HW_TSO_2);
16376 				tg3_flag_clear(tp, TSO_CAPABLE);
16377 			}
16378 			if (tg3_asic_rev(tp) == ASIC_REV_5784 ||
16379 			    tg3_asic_rev(tp) == ASIC_REV_5761 ||
16380 			    tg3_chip_rev_id(tp) == CHIPREV_ID_57780_A0 ||
16381 			    tg3_chip_rev_id(tp) == CHIPREV_ID_57780_A1)
16382 				tg3_flag_set(tp, CLKREQ_BUG);
16383 		} else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5717_A0) {
16384 			tg3_flag_set(tp, L1PLLPD_EN);
16385 		}
16386 	} else if (tg3_asic_rev(tp) == ASIC_REV_5785) {
16387 		/* BCM5785 devices are effectively PCIe devices, and should
16388 		 * follow PCIe codepaths, but do not have a PCIe capabilities
16389 		 * section.
16390 		 */
16391 		tg3_flag_set(tp, PCI_EXPRESS);
16392 	} else if (!tg3_flag(tp, 5705_PLUS) ||
16393 		   tg3_flag(tp, 5780_CLASS)) {
16394 		tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
16395 		if (!tp->pcix_cap) {
16396 			dev_err(&tp->pdev->dev,
16397 				"Cannot find PCI-X capability, aborting\n");
16398 			return -EIO;
16399 		}
16400 
16401 		if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE))
16402 			tg3_flag_set(tp, PCIX_MODE);
16403 	}
16404 
16405 	/* If we have an AMD 762 or VIA K8T800 chipset, write
16406 	 * reordering to the mailbox registers done by the host
16407 	 * controller can cause major troubles.  We read back from
16408 	 * every mailbox register write to force the writes to be
16409 	 * posted to the chip in order.
16410 	 */
16411 	if (pci_dev_present(tg3_write_reorder_chipsets) &&
16412 	    !tg3_flag(tp, PCI_EXPRESS))
16413 		tg3_flag_set(tp, MBOX_WRITE_REORDER);
16414 
16415 	pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
16416 			     &tp->pci_cacheline_sz);
16417 	pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER,
16418 			     &tp->pci_lat_timer);
16419 	if (tg3_asic_rev(tp) == ASIC_REV_5703 &&
16420 	    tp->pci_lat_timer < 64) {
16421 		tp->pci_lat_timer = 64;
16422 		pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
16423 				      tp->pci_lat_timer);
16424 	}
16425 
16426 	/* Important! -- It is critical that the PCI-X hw workaround
16427 	 * situation is decided before the first MMIO register access.
16428 	 */
16429 	if (tg3_chip_rev(tp) == CHIPREV_5700_BX) {
16430 		/* 5700 BX chips need to have their TX producer index
16431 		 * mailboxes written twice to workaround a bug.
16432 		 */
16433 		tg3_flag_set(tp, TXD_MBOX_HWBUG);
16434 
16435 		/* If we are in PCI-X mode, enable register write workaround.
16436 		 *
16437 		 * The workaround is to use indirect register accesses
16438 		 * for all chip writes not to mailbox registers.
16439 		 */
16440 		if (tg3_flag(tp, PCIX_MODE)) {
16441 			u32 pm_reg;
16442 
16443 			tg3_flag_set(tp, PCIX_TARGET_HWBUG);
16444 
16445 			/* The chip can have it's power management PCI config
16446 			 * space registers clobbered due to this bug.
16447 			 * So explicitly force the chip into D0 here.
16448 			 */
16449 			pci_read_config_dword(tp->pdev,
16450 					      tp->pdev->pm_cap + PCI_PM_CTRL,
16451 					      &pm_reg);
16452 			pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
16453 			pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
16454 			pci_write_config_dword(tp->pdev,
16455 					       tp->pdev->pm_cap + PCI_PM_CTRL,
16456 					       pm_reg);
16457 
16458 			/* Also, force SERR#/PERR# in PCI command. */
16459 			pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
16460 			pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
16461 			pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
16462 		}
16463 	}
16464 
16465 	if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
16466 		tg3_flag_set(tp, PCI_HIGH_SPEED);
16467 	if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
16468 		tg3_flag_set(tp, PCI_32BIT);
16469 
16470 	/* Chip-specific fixup from Broadcom driver */
16471 	if ((tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0) &&
16472 	    (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
16473 		pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
16474 		pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
16475 	}
16476 
16477 	/* Default fast path register access methods */
16478 	tp->read32 = tg3_read32;
16479 	tp->write32 = tg3_write32;
16480 	tp->read32_mbox = tg3_read32;
16481 	tp->write32_mbox = tg3_write32;
16482 	tp->write32_tx_mbox = tg3_write32;
16483 	tp->write32_rx_mbox = tg3_write32;
16484 
16485 	/* Various workaround register access methods */
16486 	if (tg3_flag(tp, PCIX_TARGET_HWBUG))
16487 		tp->write32 = tg3_write_indirect_reg32;
16488 	else if (tg3_asic_rev(tp) == ASIC_REV_5701 ||
16489 		 (tg3_flag(tp, PCI_EXPRESS) &&
16490 		  tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A0)) {
16491 		/*
16492 		 * Back to back register writes can cause problems on these
16493 		 * chips, the workaround is to read back all reg writes
16494 		 * except those to mailbox regs.
16495 		 *
16496 		 * See tg3_write_indirect_reg32().
16497 		 */
16498 		tp->write32 = tg3_write_flush_reg32;
16499 	}
16500 
16501 	if (tg3_flag(tp, TXD_MBOX_HWBUG) || tg3_flag(tp, MBOX_WRITE_REORDER)) {
16502 		tp->write32_tx_mbox = tg3_write32_tx_mbox;
16503 		if (tg3_flag(tp, MBOX_WRITE_REORDER))
16504 			tp->write32_rx_mbox = tg3_write_flush_reg32;
16505 	}
16506 
16507 	if (tg3_flag(tp, ICH_WORKAROUND)) {
16508 		tp->read32 = tg3_read_indirect_reg32;
16509 		tp->write32 = tg3_write_indirect_reg32;
16510 		tp->read32_mbox = tg3_read_indirect_mbox;
16511 		tp->write32_mbox = tg3_write_indirect_mbox;
16512 		tp->write32_tx_mbox = tg3_write_indirect_mbox;
16513 		tp->write32_rx_mbox = tg3_write_indirect_mbox;
16514 
16515 		iounmap(tp->regs);
16516 		tp->regs = NULL;
16517 
16518 		pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
16519 		pci_cmd &= ~PCI_COMMAND_MEMORY;
16520 		pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
16521 	}
16522 	if (tg3_asic_rev(tp) == ASIC_REV_5906) {
16523 		tp->read32_mbox = tg3_read32_mbox_5906;
16524 		tp->write32_mbox = tg3_write32_mbox_5906;
16525 		tp->write32_tx_mbox = tg3_write32_mbox_5906;
16526 		tp->write32_rx_mbox = tg3_write32_mbox_5906;
16527 	}
16528 
16529 	if (tp->write32 == tg3_write_indirect_reg32 ||
16530 	    (tg3_flag(tp, PCIX_MODE) &&
16531 	     (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16532 	      tg3_asic_rev(tp) == ASIC_REV_5701)))
16533 		tg3_flag_set(tp, SRAM_USE_CONFIG);
16534 
16535 	/* The memory arbiter has to be enabled in order for SRAM accesses
16536 	 * to succeed.  Normally on powerup the tg3 chip firmware will make
16537 	 * sure it is enabled, but other entities such as system netboot
16538 	 * code might disable it.
16539 	 */
16540 	val = tr32(MEMARB_MODE);
16541 	tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
16542 
16543 	tp->pci_fn = PCI_FUNC(tp->pdev->devfn) & 3;
16544 	if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
16545 	    tg3_flag(tp, 5780_CLASS)) {
16546 		if (tg3_flag(tp, PCIX_MODE)) {
16547 			pci_read_config_dword(tp->pdev,
16548 					      tp->pcix_cap + PCI_X_STATUS,
16549 					      &val);
16550 			tp->pci_fn = val & 0x7;
16551 		}
16552 	} else if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16553 		   tg3_asic_rev(tp) == ASIC_REV_5719 ||
16554 		   tg3_asic_rev(tp) == ASIC_REV_5720) {
16555 		tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val);
16556 		if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) != NIC_SRAM_CPMUSTAT_SIG)
16557 			val = tr32(TG3_CPMU_STATUS);
16558 
16559 		if (tg3_asic_rev(tp) == ASIC_REV_5717)
16560 			tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5717) ? 1 : 0;
16561 		else
16562 			tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5719) >>
16563 				     TG3_CPMU_STATUS_FSHFT_5719;
16564 	}
16565 
16566 	if (tg3_flag(tp, FLUSH_POSTED_WRITES)) {
16567 		tp->write32_tx_mbox = tg3_write_flush_reg32;
16568 		tp->write32_rx_mbox = tg3_write_flush_reg32;
16569 	}
16570 
16571 	/* Get eeprom hw config before calling tg3_set_power_state().
16572 	 * In particular, the TG3_FLAG_IS_NIC flag must be
16573 	 * determined before calling tg3_set_power_state() so that
16574 	 * we know whether or not to switch out of Vaux power.
16575 	 * When the flag is set, it means that GPIO1 is used for eeprom
16576 	 * write protect and also implies that it is a LOM where GPIOs
16577 	 * are not used to switch power.
16578 	 */
16579 	tg3_get_eeprom_hw_cfg(tp);
16580 
16581 	if (tg3_flag(tp, FW_TSO) && tg3_flag(tp, ENABLE_ASF)) {
16582 		tg3_flag_clear(tp, TSO_CAPABLE);
16583 		tg3_flag_clear(tp, TSO_BUG);
16584 		tp->fw_needed = NULL;
16585 	}
16586 
16587 	if (tg3_flag(tp, ENABLE_APE)) {
16588 		/* Allow reads and writes to the
16589 		 * APE register and memory space.
16590 		 */
16591 		pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
16592 				 PCISTATE_ALLOW_APE_SHMEM_WR |
16593 				 PCISTATE_ALLOW_APE_PSPACE_WR;
16594 		pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
16595 				       pci_state_reg);
16596 
16597 		tg3_ape_lock_init(tp);
16598 	}
16599 
16600 	/* Set up tp->grc_local_ctrl before calling
16601 	 * tg3_pwrsrc_switch_to_vmain().  GPIO1 driven high
16602 	 * will bring 5700's external PHY out of reset.
16603 	 * It is also used as eeprom write protect on LOMs.
16604 	 */
16605 	tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
16606 	if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16607 	    tg3_flag(tp, EEPROM_WRITE_PROT))
16608 		tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
16609 				       GRC_LCLCTRL_GPIO_OUTPUT1);
16610 	/* Unused GPIO3 must be driven as output on 5752 because there
16611 	 * are no pull-up resistors on unused GPIO pins.
16612 	 */
16613 	else if (tg3_asic_rev(tp) == ASIC_REV_5752)
16614 		tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
16615 
16616 	if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
16617 	    tg3_asic_rev(tp) == ASIC_REV_57780 ||
16618 	    tg3_flag(tp, 57765_CLASS))
16619 		tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
16620 
16621 	if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
16622 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
16623 		/* Turn off the debug UART. */
16624 		tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
16625 		if (tg3_flag(tp, IS_NIC))
16626 			/* Keep VMain power. */
16627 			tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
16628 					      GRC_LCLCTRL_GPIO_OUTPUT0;
16629 	}
16630 
16631 	if (tg3_asic_rev(tp) == ASIC_REV_5762)
16632 		tp->grc_local_ctrl |=
16633 			tr32(GRC_LOCAL_CTRL) & GRC_LCLCTRL_GPIO_UART_SEL;
16634 
16635 	/* Switch out of Vaux if it is a NIC */
16636 	tg3_pwrsrc_switch_to_vmain(tp);
16637 
16638 	/* Derive initial jumbo mode from MTU assigned in
16639 	 * ether_setup() via the alloc_etherdev() call
16640 	 */
16641 	if (tp->dev->mtu > ETH_DATA_LEN && !tg3_flag(tp, 5780_CLASS))
16642 		tg3_flag_set(tp, JUMBO_RING_ENABLE);
16643 
16644 	/* Determine WakeOnLan speed to use. */
16645 	if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16646 	    tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
16647 	    tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0 ||
16648 	    tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B2) {
16649 		tg3_flag_clear(tp, WOL_SPEED_100MB);
16650 	} else {
16651 		tg3_flag_set(tp, WOL_SPEED_100MB);
16652 	}
16653 
16654 	if (tg3_asic_rev(tp) == ASIC_REV_5906)
16655 		tp->phy_flags |= TG3_PHYFLG_IS_FET;
16656 
16657 	/* A few boards don't want Ethernet@WireSpeed phy feature */
16658 	if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16659 	    (tg3_asic_rev(tp) == ASIC_REV_5705 &&
16660 	     (tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) &&
16661 	     (tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A1)) ||
16662 	    (tp->phy_flags & TG3_PHYFLG_IS_FET) ||
16663 	    (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
16664 		tp->phy_flags |= TG3_PHYFLG_NO_ETH_WIRE_SPEED;
16665 
16666 	if (tg3_chip_rev(tp) == CHIPREV_5703_AX ||
16667 	    tg3_chip_rev(tp) == CHIPREV_5704_AX)
16668 		tp->phy_flags |= TG3_PHYFLG_ADC_BUG;
16669 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0)
16670 		tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG;
16671 
16672 	if (tg3_flag(tp, 5705_PLUS) &&
16673 	    !(tp->phy_flags & TG3_PHYFLG_IS_FET) &&
16674 	    tg3_asic_rev(tp) != ASIC_REV_5785 &&
16675 	    tg3_asic_rev(tp) != ASIC_REV_57780 &&
16676 	    !tg3_flag(tp, 57765_PLUS)) {
16677 		if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
16678 		    tg3_asic_rev(tp) == ASIC_REV_5787 ||
16679 		    tg3_asic_rev(tp) == ASIC_REV_5784 ||
16680 		    tg3_asic_rev(tp) == ASIC_REV_5761) {
16681 			if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
16682 			    tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
16683 				tp->phy_flags |= TG3_PHYFLG_JITTER_BUG;
16684 			if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
16685 				tp->phy_flags |= TG3_PHYFLG_ADJUST_TRIM;
16686 		} else
16687 			tp->phy_flags |= TG3_PHYFLG_BER_BUG;
16688 	}
16689 
16690 	if (tg3_asic_rev(tp) == ASIC_REV_5784 &&
16691 	    tg3_chip_rev(tp) != CHIPREV_5784_AX) {
16692 		tp->phy_otp = tg3_read_otp_phycfg(tp);
16693 		if (tp->phy_otp == 0)
16694 			tp->phy_otp = TG3_OTP_DEFAULT;
16695 	}
16696 
16697 	if (tg3_flag(tp, CPMU_PRESENT))
16698 		tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
16699 	else
16700 		tp->mi_mode = MAC_MI_MODE_BASE;
16701 
16702 	tp->coalesce_mode = 0;
16703 	if (tg3_chip_rev(tp) != CHIPREV_5700_AX &&
16704 	    tg3_chip_rev(tp) != CHIPREV_5700_BX)
16705 		tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
16706 
16707 	/* Set these bits to enable statistics workaround. */
16708 	if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16709 	    tg3_asic_rev(tp) == ASIC_REV_5762 ||
16710 	    tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
16711 	    tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0) {
16712 		tp->coalesce_mode |= HOSTCC_MODE_ATTN;
16713 		tp->grc_mode |= GRC_MODE_IRQ_ON_FLOW_ATTN;
16714 	}
16715 
16716 	if (tg3_asic_rev(tp) == ASIC_REV_5785 ||
16717 	    tg3_asic_rev(tp) == ASIC_REV_57780)
16718 		tg3_flag_set(tp, USE_PHYLIB);
16719 
16720 	err = tg3_mdio_init(tp);
16721 	if (err)
16722 		return err;
16723 
16724 	/* Initialize data/descriptor byte/word swapping. */
16725 	val = tr32(GRC_MODE);
16726 	if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
16727 	    tg3_asic_rev(tp) == ASIC_REV_5762)
16728 		val &= (GRC_MODE_BYTE_SWAP_B2HRX_DATA |
16729 			GRC_MODE_WORD_SWAP_B2HRX_DATA |
16730 			GRC_MODE_B2HRX_ENABLE |
16731 			GRC_MODE_HTX2B_ENABLE |
16732 			GRC_MODE_HOST_STACKUP);
16733 	else
16734 		val &= GRC_MODE_HOST_STACKUP;
16735 
16736 	tw32(GRC_MODE, val | tp->grc_mode);
16737 
16738 	tg3_switch_clocks(tp);
16739 
16740 	/* Clear this out for sanity. */
16741 	tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
16742 
16743 	/* Clear TG3PCI_REG_BASE_ADDR to prevent hangs. */
16744 	tw32(TG3PCI_REG_BASE_ADDR, 0);
16745 
16746 	pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
16747 			      &pci_state_reg);
16748 	if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
16749 	    !tg3_flag(tp, PCIX_TARGET_HWBUG)) {
16750 		if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
16751 		    tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0 ||
16752 		    tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B2 ||
16753 		    tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B5) {
16754 			void __iomem *sram_base;
16755 
16756 			/* Write some dummy words into the SRAM status block
16757 			 * area, see if it reads back correctly.  If the return
16758 			 * value is bad, force enable the PCIX workaround.
16759 			 */
16760 			sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
16761 
16762 			writel(0x00000000, sram_base);
16763 			writel(0x00000000, sram_base + 4);
16764 			writel(0xffffffff, sram_base + 4);
16765 			if (readl(sram_base) != 0x00000000)
16766 				tg3_flag_set(tp, PCIX_TARGET_HWBUG);
16767 		}
16768 	}
16769 
16770 	udelay(50);
16771 	tg3_nvram_init(tp);
16772 
16773 	/* If the device has an NVRAM, no need to load patch firmware */
16774 	if (tg3_asic_rev(tp) == ASIC_REV_57766 &&
16775 	    !tg3_flag(tp, NO_NVRAM))
16776 		tp->fw_needed = NULL;
16777 
16778 	grc_misc_cfg = tr32(GRC_MISC_CFG);
16779 	grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
16780 
16781 	if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
16782 	    (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
16783 	     grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
16784 		tg3_flag_set(tp, IS_5788);
16785 
16786 	if (!tg3_flag(tp, IS_5788) &&
16787 	    tg3_asic_rev(tp) != ASIC_REV_5700)
16788 		tg3_flag_set(tp, TAGGED_STATUS);
16789 	if (tg3_flag(tp, TAGGED_STATUS)) {
16790 		tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
16791 				      HOSTCC_MODE_CLRTICK_TXBD);
16792 
16793 		tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
16794 		pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
16795 				       tp->misc_host_ctrl);
16796 	}
16797 
16798 	/* Preserve the APE MAC_MODE bits */
16799 	if (tg3_flag(tp, ENABLE_APE))
16800 		tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
16801 	else
16802 		tp->mac_mode = 0;
16803 
16804 	if (tg3_10_100_only_device(tp, ent))
16805 		tp->phy_flags |= TG3_PHYFLG_10_100_ONLY;
16806 
16807 	err = tg3_phy_probe(tp);
16808 	if (err) {
16809 		dev_err(&tp->pdev->dev, "phy probe failed, err %d\n", err);
16810 		/* ... but do not return immediately ... */
16811 		tg3_mdio_fini(tp);
16812 	}
16813 
16814 	tg3_read_vpd(tp);
16815 	tg3_read_fw_ver(tp);
16816 
16817 	if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
16818 		tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
16819 	} else {
16820 		if (tg3_asic_rev(tp) == ASIC_REV_5700)
16821 			tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
16822 		else
16823 			tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
16824 	}
16825 
16826 	/* 5700 {AX,BX} chips have a broken status block link
16827 	 * change bit implementation, so we must use the
16828 	 * status register in those cases.
16829 	 */
16830 	if (tg3_asic_rev(tp) == ASIC_REV_5700)
16831 		tg3_flag_set(tp, USE_LINKCHG_REG);
16832 	else
16833 		tg3_flag_clear(tp, USE_LINKCHG_REG);
16834 
16835 	/* The led_ctrl is set during tg3_phy_probe, here we might
16836 	 * have to force the link status polling mechanism based
16837 	 * upon subsystem IDs.
16838 	 */
16839 	if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
16840 	    tg3_asic_rev(tp) == ASIC_REV_5701 &&
16841 	    !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
16842 		tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
16843 		tg3_flag_set(tp, USE_LINKCHG_REG);
16844 	}
16845 
16846 	/* For all SERDES we poll the MAC status register. */
16847 	if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
16848 		tg3_flag_set(tp, POLL_SERDES);
16849 	else
16850 		tg3_flag_clear(tp, POLL_SERDES);
16851 
16852 	if (tg3_flag(tp, ENABLE_APE) && tg3_flag(tp, ENABLE_ASF))
16853 		tg3_flag_set(tp, POLL_CPMU_LINK);
16854 
16855 	tp->rx_offset = NET_SKB_PAD + NET_IP_ALIGN;
16856 	tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;
16857 	if (tg3_asic_rev(tp) == ASIC_REV_5701 &&
16858 	    tg3_flag(tp, PCIX_MODE)) {
16859 		tp->rx_offset = NET_SKB_PAD;
16860 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
16861 		tp->rx_copy_thresh = ~(u16)0;
16862 #endif
16863 	}
16864 
16865 	tp->rx_std_ring_mask = TG3_RX_STD_RING_SIZE(tp) - 1;
16866 	tp->rx_jmb_ring_mask = TG3_RX_JMB_RING_SIZE(tp) - 1;
16867 	tp->rx_ret_ring_mask = tg3_rx_ret_ring_size(tp) - 1;
16868 
16869 	tp->rx_std_max_post = tp->rx_std_ring_mask + 1;
16870 
16871 	/* Increment the rx prod index on the rx std ring by at most
16872 	 * 8 for these chips to workaround hw errata.
16873 	 */
16874 	if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
16875 	    tg3_asic_rev(tp) == ASIC_REV_5752 ||
16876 	    tg3_asic_rev(tp) == ASIC_REV_5755)
16877 		tp->rx_std_max_post = 8;
16878 
16879 	if (tg3_flag(tp, ASPM_WORKAROUND))
16880 		tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
16881 				     PCIE_PWR_MGMT_L1_THRESH_MSK;
16882 
16883 	return err;
16884 }
16885 
16886 #ifdef CONFIG_SPARC
16887 static int tg3_get_macaddr_sparc(struct tg3 *tp)
16888 {
16889 	struct net_device *dev = tp->dev;
16890 	struct pci_dev *pdev = tp->pdev;
16891 	struct device_node *dp = pci_device_to_OF_node(pdev);
16892 	const unsigned char *addr;
16893 	int len;
16894 
16895 	addr = of_get_property(dp, "local-mac-address", &len);
16896 	if (addr && len == ETH_ALEN) {
16897 		memcpy(dev->dev_addr, addr, ETH_ALEN);
16898 		return 0;
16899 	}
16900 	return -ENODEV;
16901 }
16902 
16903 static int tg3_get_default_macaddr_sparc(struct tg3 *tp)
16904 {
16905 	struct net_device *dev = tp->dev;
16906 
16907 	memcpy(dev->dev_addr, idprom->id_ethaddr, ETH_ALEN);
16908 	return 0;
16909 }
16910 #endif
16911 
16912 static int tg3_get_device_address(struct tg3 *tp)
16913 {
16914 	struct net_device *dev = tp->dev;
16915 	u32 hi, lo, mac_offset;
16916 	int addr_ok = 0;
16917 	int err;
16918 
16919 #ifdef CONFIG_SPARC
16920 	if (!tg3_get_macaddr_sparc(tp))
16921 		return 0;
16922 #endif
16923 
16924 	if (tg3_flag(tp, IS_SSB_CORE)) {
16925 		err = ssb_gige_get_macaddr(tp->pdev, &dev->dev_addr[0]);
16926 		if (!err && is_valid_ether_addr(&dev->dev_addr[0]))
16927 			return 0;
16928 	}
16929 
16930 	mac_offset = 0x7c;
16931 	if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
16932 	    tg3_flag(tp, 5780_CLASS)) {
16933 		if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
16934 			mac_offset = 0xcc;
16935 		if (tg3_nvram_lock(tp))
16936 			tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
16937 		else
16938 			tg3_nvram_unlock(tp);
16939 	} else if (tg3_flag(tp, 5717_PLUS)) {
16940 		if (tp->pci_fn & 1)
16941 			mac_offset = 0xcc;
16942 		if (tp->pci_fn > 1)
16943 			mac_offset += 0x18c;
16944 	} else if (tg3_asic_rev(tp) == ASIC_REV_5906)
16945 		mac_offset = 0x10;
16946 
16947 	/* First try to get it from MAC address mailbox. */
16948 	tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
16949 	if ((hi >> 16) == 0x484b) {
16950 		dev->dev_addr[0] = (hi >>  8) & 0xff;
16951 		dev->dev_addr[1] = (hi >>  0) & 0xff;
16952 
16953 		tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
16954 		dev->dev_addr[2] = (lo >> 24) & 0xff;
16955 		dev->dev_addr[3] = (lo >> 16) & 0xff;
16956 		dev->dev_addr[4] = (lo >>  8) & 0xff;
16957 		dev->dev_addr[5] = (lo >>  0) & 0xff;
16958 
16959 		/* Some old bootcode may report a 0 MAC address in SRAM */
16960 		addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
16961 	}
16962 	if (!addr_ok) {
16963 		/* Next, try NVRAM. */
16964 		if (!tg3_flag(tp, NO_NVRAM) &&
16965 		    !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) &&
16966 		    !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) {
16967 			memcpy(&dev->dev_addr[0], ((char *)&hi) + 2, 2);
16968 			memcpy(&dev->dev_addr[2], (char *)&lo, sizeof(lo));
16969 		}
16970 		/* Finally just fetch it out of the MAC control regs. */
16971 		else {
16972 			hi = tr32(MAC_ADDR_0_HIGH);
16973 			lo = tr32(MAC_ADDR_0_LOW);
16974 
16975 			dev->dev_addr[5] = lo & 0xff;
16976 			dev->dev_addr[4] = (lo >> 8) & 0xff;
16977 			dev->dev_addr[3] = (lo >> 16) & 0xff;
16978 			dev->dev_addr[2] = (lo >> 24) & 0xff;
16979 			dev->dev_addr[1] = hi & 0xff;
16980 			dev->dev_addr[0] = (hi >> 8) & 0xff;
16981 		}
16982 	}
16983 
16984 	if (!is_valid_ether_addr(&dev->dev_addr[0])) {
16985 #ifdef CONFIG_SPARC
16986 		if (!tg3_get_default_macaddr_sparc(tp))
16987 			return 0;
16988 #endif
16989 		return -EINVAL;
16990 	}
16991 	return 0;
16992 }
16993 
16994 #define BOUNDARY_SINGLE_CACHELINE	1
16995 #define BOUNDARY_MULTI_CACHELINE	2
16996 
16997 static u32 tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
16998 {
16999 	int cacheline_size;
17000 	u8 byte;
17001 	int goal;
17002 
17003 	pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
17004 	if (byte == 0)
17005 		cacheline_size = 1024;
17006 	else
17007 		cacheline_size = (int) byte * 4;
17008 
17009 	/* On 5703 and later chips, the boundary bits have no
17010 	 * effect.
17011 	 */
17012 	if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
17013 	    tg3_asic_rev(tp) != ASIC_REV_5701 &&
17014 	    !tg3_flag(tp, PCI_EXPRESS))
17015 		goto out;
17016 
17017 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
17018 	goal = BOUNDARY_MULTI_CACHELINE;
17019 #else
17020 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
17021 	goal = BOUNDARY_SINGLE_CACHELINE;
17022 #else
17023 	goal = 0;
17024 #endif
17025 #endif
17026 
17027 	if (tg3_flag(tp, 57765_PLUS)) {
17028 		val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
17029 		goto out;
17030 	}
17031 
17032 	if (!goal)
17033 		goto out;
17034 
17035 	/* PCI controllers on most RISC systems tend to disconnect
17036 	 * when a device tries to burst across a cache-line boundary.
17037 	 * Therefore, letting tg3 do so just wastes PCI bandwidth.
17038 	 *
17039 	 * Unfortunately, for PCI-E there are only limited
17040 	 * write-side controls for this, and thus for reads
17041 	 * we will still get the disconnects.  We'll also waste
17042 	 * these PCI cycles for both read and write for chips
17043 	 * other than 5700 and 5701 which do not implement the
17044 	 * boundary bits.
17045 	 */
17046 	if (tg3_flag(tp, PCIX_MODE) && !tg3_flag(tp, PCI_EXPRESS)) {
17047 		switch (cacheline_size) {
17048 		case 16:
17049 		case 32:
17050 		case 64:
17051 		case 128:
17052 			if (goal == BOUNDARY_SINGLE_CACHELINE) {
17053 				val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
17054 					DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
17055 			} else {
17056 				val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
17057 					DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
17058 			}
17059 			break;
17060 
17061 		case 256:
17062 			val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
17063 				DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
17064 			break;
17065 
17066 		default:
17067 			val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
17068 				DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
17069 			break;
17070 		}
17071 	} else if (tg3_flag(tp, PCI_EXPRESS)) {
17072 		switch (cacheline_size) {
17073 		case 16:
17074 		case 32:
17075 		case 64:
17076 			if (goal == BOUNDARY_SINGLE_CACHELINE) {
17077 				val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
17078 				val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
17079 				break;
17080 			}
17081 			/* fallthrough */
17082 		case 128:
17083 		default:
17084 			val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
17085 			val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
17086 			break;
17087 		}
17088 	} else {
17089 		switch (cacheline_size) {
17090 		case 16:
17091 			if (goal == BOUNDARY_SINGLE_CACHELINE) {
17092 				val |= (DMA_RWCTRL_READ_BNDRY_16 |
17093 					DMA_RWCTRL_WRITE_BNDRY_16);
17094 				break;
17095 			}
17096 			/* fallthrough */
17097 		case 32:
17098 			if (goal == BOUNDARY_SINGLE_CACHELINE) {
17099 				val |= (DMA_RWCTRL_READ_BNDRY_32 |
17100 					DMA_RWCTRL_WRITE_BNDRY_32);
17101 				break;
17102 			}
17103 			/* fallthrough */
17104 		case 64:
17105 			if (goal == BOUNDARY_SINGLE_CACHELINE) {
17106 				val |= (DMA_RWCTRL_READ_BNDRY_64 |
17107 					DMA_RWCTRL_WRITE_BNDRY_64);
17108 				break;
17109 			}
17110 			/* fallthrough */
17111 		case 128:
17112 			if (goal == BOUNDARY_SINGLE_CACHELINE) {
17113 				val |= (DMA_RWCTRL_READ_BNDRY_128 |
17114 					DMA_RWCTRL_WRITE_BNDRY_128);
17115 				break;
17116 			}
17117 			/* fallthrough */
17118 		case 256:
17119 			val |= (DMA_RWCTRL_READ_BNDRY_256 |
17120 				DMA_RWCTRL_WRITE_BNDRY_256);
17121 			break;
17122 		case 512:
17123 			val |= (DMA_RWCTRL_READ_BNDRY_512 |
17124 				DMA_RWCTRL_WRITE_BNDRY_512);
17125 			break;
17126 		case 1024:
17127 		default:
17128 			val |= (DMA_RWCTRL_READ_BNDRY_1024 |
17129 				DMA_RWCTRL_WRITE_BNDRY_1024);
17130 			break;
17131 		}
17132 	}
17133 
17134 out:
17135 	return val;
17136 }
17137 
17138 static int tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma,
17139 			   int size, bool to_device)
17140 {
17141 	struct tg3_internal_buffer_desc test_desc;
17142 	u32 sram_dma_descs;
17143 	int i, ret;
17144 
17145 	sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
17146 
17147 	tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
17148 	tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
17149 	tw32(RDMAC_STATUS, 0);
17150 	tw32(WDMAC_STATUS, 0);
17151 
17152 	tw32(BUFMGR_MODE, 0);
17153 	tw32(FTQ_RESET, 0);
17154 
17155 	test_desc.addr_hi = ((u64) buf_dma) >> 32;
17156 	test_desc.addr_lo = buf_dma & 0xffffffff;
17157 	test_desc.nic_mbuf = 0x00002100;
17158 	test_desc.len = size;
17159 
17160 	/*
17161 	 * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
17162 	 * the *second* time the tg3 driver was getting loaded after an
17163 	 * initial scan.
17164 	 *
17165 	 * Broadcom tells me:
17166 	 *   ...the DMA engine is connected to the GRC block and a DMA
17167 	 *   reset may affect the GRC block in some unpredictable way...
17168 	 *   The behavior of resets to individual blocks has not been tested.
17169 	 *
17170 	 * Broadcom noted the GRC reset will also reset all sub-components.
17171 	 */
17172 	if (to_device) {
17173 		test_desc.cqid_sqid = (13 << 8) | 2;
17174 
17175 		tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
17176 		udelay(40);
17177 	} else {
17178 		test_desc.cqid_sqid = (16 << 8) | 7;
17179 
17180 		tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
17181 		udelay(40);
17182 	}
17183 	test_desc.flags = 0x00000005;
17184 
17185 	for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
17186 		u32 val;
17187 
17188 		val = *(((u32 *)&test_desc) + i);
17189 		pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
17190 				       sram_dma_descs + (i * sizeof(u32)));
17191 		pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
17192 	}
17193 	pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
17194 
17195 	if (to_device)
17196 		tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
17197 	else
17198 		tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
17199 
17200 	ret = -ENODEV;
17201 	for (i = 0; i < 40; i++) {
17202 		u32 val;
17203 
17204 		if (to_device)
17205 			val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
17206 		else
17207 			val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
17208 		if ((val & 0xffff) == sram_dma_descs) {
17209 			ret = 0;
17210 			break;
17211 		}
17212 
17213 		udelay(100);
17214 	}
17215 
17216 	return ret;
17217 }
17218 
17219 #define TEST_BUFFER_SIZE	0x2000
17220 
17221 static const struct pci_device_id tg3_dma_wait_state_chipsets[] = {
17222 	{ PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
17223 	{ },
17224 };
17225 
17226 static int tg3_test_dma(struct tg3 *tp)
17227 {
17228 	dma_addr_t buf_dma;
17229 	u32 *buf, saved_dma_rwctrl;
17230 	int ret = 0;
17231 
17232 	buf = dma_alloc_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE,
17233 				 &buf_dma, GFP_KERNEL);
17234 	if (!buf) {
17235 		ret = -ENOMEM;
17236 		goto out_nofree;
17237 	}
17238 
17239 	tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
17240 			  (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
17241 
17242 	tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
17243 
17244 	if (tg3_flag(tp, 57765_PLUS))
17245 		goto out;
17246 
17247 	if (tg3_flag(tp, PCI_EXPRESS)) {
17248 		/* DMA read watermark not used on PCIE */
17249 		tp->dma_rwctrl |= 0x00180000;
17250 	} else if (!tg3_flag(tp, PCIX_MODE)) {
17251 		if (tg3_asic_rev(tp) == ASIC_REV_5705 ||
17252 		    tg3_asic_rev(tp) == ASIC_REV_5750)
17253 			tp->dma_rwctrl |= 0x003f0000;
17254 		else
17255 			tp->dma_rwctrl |= 0x003f000f;
17256 	} else {
17257 		if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
17258 		    tg3_asic_rev(tp) == ASIC_REV_5704) {
17259 			u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
17260 			u32 read_water = 0x7;
17261 
17262 			/* If the 5704 is behind the EPB bridge, we can
17263 			 * do the less restrictive ONE_DMA workaround for
17264 			 * better performance.
17265 			 */
17266 			if (tg3_flag(tp, 40BIT_DMA_BUG) &&
17267 			    tg3_asic_rev(tp) == ASIC_REV_5704)
17268 				tp->dma_rwctrl |= 0x8000;
17269 			else if (ccval == 0x6 || ccval == 0x7)
17270 				tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
17271 
17272 			if (tg3_asic_rev(tp) == ASIC_REV_5703)
17273 				read_water = 4;
17274 			/* Set bit 23 to enable PCIX hw bug fix */
17275 			tp->dma_rwctrl |=
17276 				(read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
17277 				(0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
17278 				(1 << 23);
17279 		} else if (tg3_asic_rev(tp) == ASIC_REV_5780) {
17280 			/* 5780 always in PCIX mode */
17281 			tp->dma_rwctrl |= 0x00144000;
17282 		} else if (tg3_asic_rev(tp) == ASIC_REV_5714) {
17283 			/* 5714 always in PCIX mode */
17284 			tp->dma_rwctrl |= 0x00148000;
17285 		} else {
17286 			tp->dma_rwctrl |= 0x001b000f;
17287 		}
17288 	}
17289 	if (tg3_flag(tp, ONE_DMA_AT_ONCE))
17290 		tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
17291 
17292 	if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
17293 	    tg3_asic_rev(tp) == ASIC_REV_5704)
17294 		tp->dma_rwctrl &= 0xfffffff0;
17295 
17296 	if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
17297 	    tg3_asic_rev(tp) == ASIC_REV_5701) {
17298 		/* Remove this if it causes problems for some boards. */
17299 		tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
17300 
17301 		/* On 5700/5701 chips, we need to set this bit.
17302 		 * Otherwise the chip will issue cacheline transactions
17303 		 * to streamable DMA memory with not all the byte
17304 		 * enables turned on.  This is an error on several
17305 		 * RISC PCI controllers, in particular sparc64.
17306 		 *
17307 		 * On 5703/5704 chips, this bit has been reassigned
17308 		 * a different meaning.  In particular, it is used
17309 		 * on those chips to enable a PCI-X workaround.
17310 		 */
17311 		tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
17312 	}
17313 
17314 	tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17315 
17316 
17317 	if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
17318 	    tg3_asic_rev(tp) != ASIC_REV_5701)
17319 		goto out;
17320 
17321 	/* It is best to perform DMA test with maximum write burst size
17322 	 * to expose the 5700/5701 write DMA bug.
17323 	 */
17324 	saved_dma_rwctrl = tp->dma_rwctrl;
17325 	tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
17326 	tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17327 
17328 	while (1) {
17329 		u32 *p = buf, i;
17330 
17331 		for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
17332 			p[i] = i;
17333 
17334 		/* Send the buffer to the chip. */
17335 		ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, true);
17336 		if (ret) {
17337 			dev_err(&tp->pdev->dev,
17338 				"%s: Buffer write failed. err = %d\n",
17339 				__func__, ret);
17340 			break;
17341 		}
17342 
17343 		/* Now read it back. */
17344 		ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, false);
17345 		if (ret) {
17346 			dev_err(&tp->pdev->dev, "%s: Buffer read failed. "
17347 				"err = %d\n", __func__, ret);
17348 			break;
17349 		}
17350 
17351 		/* Verify it. */
17352 		for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
17353 			if (p[i] == i)
17354 				continue;
17355 
17356 			if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
17357 			    DMA_RWCTRL_WRITE_BNDRY_16) {
17358 				tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
17359 				tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
17360 				tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17361 				break;
17362 			} else {
17363 				dev_err(&tp->pdev->dev,
17364 					"%s: Buffer corrupted on read back! "
17365 					"(%d != %d)\n", __func__, p[i], i);
17366 				ret = -ENODEV;
17367 				goto out;
17368 			}
17369 		}
17370 
17371 		if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
17372 			/* Success. */
17373 			ret = 0;
17374 			break;
17375 		}
17376 	}
17377 	if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
17378 	    DMA_RWCTRL_WRITE_BNDRY_16) {
17379 		/* DMA test passed without adjusting DMA boundary,
17380 		 * now look for chipsets that are known to expose the
17381 		 * DMA bug without failing the test.
17382 		 */
17383 		if (pci_dev_present(tg3_dma_wait_state_chipsets)) {
17384 			tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
17385 			tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
17386 		} else {
17387 			/* Safe to use the calculated DMA boundary. */
17388 			tp->dma_rwctrl = saved_dma_rwctrl;
17389 		}
17390 
17391 		tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17392 	}
17393 
17394 out:
17395 	dma_free_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, buf, buf_dma);
17396 out_nofree:
17397 	return ret;
17398 }
17399 
17400 static void tg3_init_bufmgr_config(struct tg3 *tp)
17401 {
17402 	if (tg3_flag(tp, 57765_PLUS)) {
17403 		tp->bufmgr_config.mbuf_read_dma_low_water =
17404 			DEFAULT_MB_RDMA_LOW_WATER_5705;
17405 		tp->bufmgr_config.mbuf_mac_rx_low_water =
17406 			DEFAULT_MB_MACRX_LOW_WATER_57765;
17407 		tp->bufmgr_config.mbuf_high_water =
17408 			DEFAULT_MB_HIGH_WATER_57765;
17409 
17410 		tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
17411 			DEFAULT_MB_RDMA_LOW_WATER_5705;
17412 		tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
17413 			DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765;
17414 		tp->bufmgr_config.mbuf_high_water_jumbo =
17415 			DEFAULT_MB_HIGH_WATER_JUMBO_57765;
17416 	} else if (tg3_flag(tp, 5705_PLUS)) {
17417 		tp->bufmgr_config.mbuf_read_dma_low_water =
17418 			DEFAULT_MB_RDMA_LOW_WATER_5705;
17419 		tp->bufmgr_config.mbuf_mac_rx_low_water =
17420 			DEFAULT_MB_MACRX_LOW_WATER_5705;
17421 		tp->bufmgr_config.mbuf_high_water =
17422 			DEFAULT_MB_HIGH_WATER_5705;
17423 		if (tg3_asic_rev(tp) == ASIC_REV_5906) {
17424 			tp->bufmgr_config.mbuf_mac_rx_low_water =
17425 				DEFAULT_MB_MACRX_LOW_WATER_5906;
17426 			tp->bufmgr_config.mbuf_high_water =
17427 				DEFAULT_MB_HIGH_WATER_5906;
17428 		}
17429 
17430 		tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
17431 			DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
17432 		tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
17433 			DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
17434 		tp->bufmgr_config.mbuf_high_water_jumbo =
17435 			DEFAULT_MB_HIGH_WATER_JUMBO_5780;
17436 	} else {
17437 		tp->bufmgr_config.mbuf_read_dma_low_water =
17438 			DEFAULT_MB_RDMA_LOW_WATER;
17439 		tp->bufmgr_config.mbuf_mac_rx_low_water =
17440 			DEFAULT_MB_MACRX_LOW_WATER;
17441 		tp->bufmgr_config.mbuf_high_water =
17442 			DEFAULT_MB_HIGH_WATER;
17443 
17444 		tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
17445 			DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
17446 		tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
17447 			DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
17448 		tp->bufmgr_config.mbuf_high_water_jumbo =
17449 			DEFAULT_MB_HIGH_WATER_JUMBO;
17450 	}
17451 
17452 	tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
17453 	tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
17454 }
17455 
17456 static char *tg3_phy_string(struct tg3 *tp)
17457 {
17458 	switch (tp->phy_id & TG3_PHY_ID_MASK) {
17459 	case TG3_PHY_ID_BCM5400:	return "5400";
17460 	case TG3_PHY_ID_BCM5401:	return "5401";
17461 	case TG3_PHY_ID_BCM5411:	return "5411";
17462 	case TG3_PHY_ID_BCM5701:	return "5701";
17463 	case TG3_PHY_ID_BCM5703:	return "5703";
17464 	case TG3_PHY_ID_BCM5704:	return "5704";
17465 	case TG3_PHY_ID_BCM5705:	return "5705";
17466 	case TG3_PHY_ID_BCM5750:	return "5750";
17467 	case TG3_PHY_ID_BCM5752:	return "5752";
17468 	case TG3_PHY_ID_BCM5714:	return "5714";
17469 	case TG3_PHY_ID_BCM5780:	return "5780";
17470 	case TG3_PHY_ID_BCM5755:	return "5755";
17471 	case TG3_PHY_ID_BCM5787:	return "5787";
17472 	case TG3_PHY_ID_BCM5784:	return "5784";
17473 	case TG3_PHY_ID_BCM5756:	return "5722/5756";
17474 	case TG3_PHY_ID_BCM5906:	return "5906";
17475 	case TG3_PHY_ID_BCM5761:	return "5761";
17476 	case TG3_PHY_ID_BCM5718C:	return "5718C";
17477 	case TG3_PHY_ID_BCM5718S:	return "5718S";
17478 	case TG3_PHY_ID_BCM57765:	return "57765";
17479 	case TG3_PHY_ID_BCM5719C:	return "5719C";
17480 	case TG3_PHY_ID_BCM5720C:	return "5720C";
17481 	case TG3_PHY_ID_BCM5762:	return "5762C";
17482 	case TG3_PHY_ID_BCM8002:	return "8002/serdes";
17483 	case 0:			return "serdes";
17484 	default:		return "unknown";
17485 	}
17486 }
17487 
17488 static char *tg3_bus_string(struct tg3 *tp, char *str)
17489 {
17490 	if (tg3_flag(tp, PCI_EXPRESS)) {
17491 		strcpy(str, "PCI Express");
17492 		return str;
17493 	} else if (tg3_flag(tp, PCIX_MODE)) {
17494 		u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
17495 
17496 		strcpy(str, "PCIX:");
17497 
17498 		if ((clock_ctrl == 7) ||
17499 		    ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
17500 		     GRC_MISC_CFG_BOARD_ID_5704CIOBE))
17501 			strcat(str, "133MHz");
17502 		else if (clock_ctrl == 0)
17503 			strcat(str, "33MHz");
17504 		else if (clock_ctrl == 2)
17505 			strcat(str, "50MHz");
17506 		else if (clock_ctrl == 4)
17507 			strcat(str, "66MHz");
17508 		else if (clock_ctrl == 6)
17509 			strcat(str, "100MHz");
17510 	} else {
17511 		strcpy(str, "PCI:");
17512 		if (tg3_flag(tp, PCI_HIGH_SPEED))
17513 			strcat(str, "66MHz");
17514 		else
17515 			strcat(str, "33MHz");
17516 	}
17517 	if (tg3_flag(tp, PCI_32BIT))
17518 		strcat(str, ":32-bit");
17519 	else
17520 		strcat(str, ":64-bit");
17521 	return str;
17522 }
17523 
17524 static void tg3_init_coal(struct tg3 *tp)
17525 {
17526 	struct ethtool_coalesce *ec = &tp->coal;
17527 
17528 	memset(ec, 0, sizeof(*ec));
17529 	ec->cmd = ETHTOOL_GCOALESCE;
17530 	ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
17531 	ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
17532 	ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
17533 	ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
17534 	ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
17535 	ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
17536 	ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
17537 	ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
17538 	ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
17539 
17540 	if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
17541 				 HOSTCC_MODE_CLRTICK_TXBD)) {
17542 		ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
17543 		ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
17544 		ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
17545 		ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
17546 	}
17547 
17548 	if (tg3_flag(tp, 5705_PLUS)) {
17549 		ec->rx_coalesce_usecs_irq = 0;
17550 		ec->tx_coalesce_usecs_irq = 0;
17551 		ec->stats_block_coalesce_usecs = 0;
17552 	}
17553 }
17554 
17555 static int tg3_init_one(struct pci_dev *pdev,
17556 				  const struct pci_device_id *ent)
17557 {
17558 	struct net_device *dev;
17559 	struct tg3 *tp;
17560 	int i, err;
17561 	u32 sndmbx, rcvmbx, intmbx;
17562 	char str[40];
17563 	u64 dma_mask, persist_dma_mask;
17564 	netdev_features_t features = 0;
17565 
17566 	printk_once(KERN_INFO "%s\n", version);
17567 
17568 	err = pci_enable_device(pdev);
17569 	if (err) {
17570 		dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
17571 		return err;
17572 	}
17573 
17574 	err = pci_request_regions(pdev, DRV_MODULE_NAME);
17575 	if (err) {
17576 		dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
17577 		goto err_out_disable_pdev;
17578 	}
17579 
17580 	pci_set_master(pdev);
17581 
17582 	dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS);
17583 	if (!dev) {
17584 		err = -ENOMEM;
17585 		goto err_out_free_res;
17586 	}
17587 
17588 	SET_NETDEV_DEV(dev, &pdev->dev);
17589 
17590 	tp = netdev_priv(dev);
17591 	tp->pdev = pdev;
17592 	tp->dev = dev;
17593 	tp->rx_mode = TG3_DEF_RX_MODE;
17594 	tp->tx_mode = TG3_DEF_TX_MODE;
17595 	tp->irq_sync = 1;
17596 	tp->pcierr_recovery = false;
17597 
17598 	if (tg3_debug > 0)
17599 		tp->msg_enable = tg3_debug;
17600 	else
17601 		tp->msg_enable = TG3_DEF_MSG_ENABLE;
17602 
17603 	if (pdev_is_ssb_gige_core(pdev)) {
17604 		tg3_flag_set(tp, IS_SSB_CORE);
17605 		if (ssb_gige_must_flush_posted_writes(pdev))
17606 			tg3_flag_set(tp, FLUSH_POSTED_WRITES);
17607 		if (ssb_gige_one_dma_at_once(pdev))
17608 			tg3_flag_set(tp, ONE_DMA_AT_ONCE);
17609 		if (ssb_gige_have_roboswitch(pdev)) {
17610 			tg3_flag_set(tp, USE_PHYLIB);
17611 			tg3_flag_set(tp, ROBOSWITCH);
17612 		}
17613 		if (ssb_gige_is_rgmii(pdev))
17614 			tg3_flag_set(tp, RGMII_MODE);
17615 	}
17616 
17617 	/* The word/byte swap controls here control register access byte
17618 	 * swapping.  DMA data byte swapping is controlled in the GRC_MODE
17619 	 * setting below.
17620 	 */
17621 	tp->misc_host_ctrl =
17622 		MISC_HOST_CTRL_MASK_PCI_INT |
17623 		MISC_HOST_CTRL_WORD_SWAP |
17624 		MISC_HOST_CTRL_INDIR_ACCESS |
17625 		MISC_HOST_CTRL_PCISTATE_RW;
17626 
17627 	/* The NONFRM (non-frame) byte/word swap controls take effect
17628 	 * on descriptor entries, anything which isn't packet data.
17629 	 *
17630 	 * The StrongARM chips on the board (one for tx, one for rx)
17631 	 * are running in big-endian mode.
17632 	 */
17633 	tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
17634 			GRC_MODE_WSWAP_NONFRM_DATA);
17635 #ifdef __BIG_ENDIAN
17636 	tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
17637 #endif
17638 	spin_lock_init(&tp->lock);
17639 	spin_lock_init(&tp->indirect_lock);
17640 	INIT_WORK(&tp->reset_task, tg3_reset_task);
17641 
17642 	tp->regs = pci_ioremap_bar(pdev, BAR_0);
17643 	if (!tp->regs) {
17644 		dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
17645 		err = -ENOMEM;
17646 		goto err_out_free_dev;
17647 	}
17648 
17649 	if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
17650 	    tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761E ||
17651 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S ||
17652 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761SE ||
17653 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
17654 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
17655 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
17656 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
17657 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 ||
17658 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_57767 ||
17659 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_57764 ||
17660 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 ||
17661 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 ||
17662 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727 ||
17663 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_57787) {
17664 		tg3_flag_set(tp, ENABLE_APE);
17665 		tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
17666 		if (!tp->aperegs) {
17667 			dev_err(&pdev->dev,
17668 				"Cannot map APE registers, aborting\n");
17669 			err = -ENOMEM;
17670 			goto err_out_iounmap;
17671 		}
17672 	}
17673 
17674 	tp->rx_pending = TG3_DEF_RX_RING_PENDING;
17675 	tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
17676 
17677 	dev->ethtool_ops = &tg3_ethtool_ops;
17678 	dev->watchdog_timeo = TG3_TX_TIMEOUT;
17679 	dev->netdev_ops = &tg3_netdev_ops;
17680 	dev->irq = pdev->irq;
17681 
17682 	err = tg3_get_invariants(tp, ent);
17683 	if (err) {
17684 		dev_err(&pdev->dev,
17685 			"Problem fetching invariants of chip, aborting\n");
17686 		goto err_out_apeunmap;
17687 	}
17688 
17689 	/* The EPB bridge inside 5714, 5715, and 5780 and any
17690 	 * device behind the EPB cannot support DMA addresses > 40-bit.
17691 	 * On 64-bit systems with IOMMU, use 40-bit dma_mask.
17692 	 * On 64-bit systems without IOMMU, use 64-bit dma_mask and
17693 	 * do DMA address check in tg3_start_xmit().
17694 	 */
17695 	if (tg3_flag(tp, IS_5788))
17696 		persist_dma_mask = dma_mask = DMA_BIT_MASK(32);
17697 	else if (tg3_flag(tp, 40BIT_DMA_BUG)) {
17698 		persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
17699 #ifdef CONFIG_HIGHMEM
17700 		dma_mask = DMA_BIT_MASK(64);
17701 #endif
17702 	} else
17703 		persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
17704 
17705 	/* Configure DMA attributes. */
17706 	if (dma_mask > DMA_BIT_MASK(32)) {
17707 		err = pci_set_dma_mask(pdev, dma_mask);
17708 		if (!err) {
17709 			features |= NETIF_F_HIGHDMA;
17710 			err = pci_set_consistent_dma_mask(pdev,
17711 							  persist_dma_mask);
17712 			if (err < 0) {
17713 				dev_err(&pdev->dev, "Unable to obtain 64 bit "
17714 					"DMA for consistent allocations\n");
17715 				goto err_out_apeunmap;
17716 			}
17717 		}
17718 	}
17719 	if (err || dma_mask == DMA_BIT_MASK(32)) {
17720 		err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
17721 		if (err) {
17722 			dev_err(&pdev->dev,
17723 				"No usable DMA configuration, aborting\n");
17724 			goto err_out_apeunmap;
17725 		}
17726 	}
17727 
17728 	tg3_init_bufmgr_config(tp);
17729 
17730 	/* 5700 B0 chips do not support checksumming correctly due
17731 	 * to hardware bugs.
17732 	 */
17733 	if (tg3_chip_rev_id(tp) != CHIPREV_ID_5700_B0) {
17734 		features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
17735 
17736 		if (tg3_flag(tp, 5755_PLUS))
17737 			features |= NETIF_F_IPV6_CSUM;
17738 	}
17739 
17740 	/* TSO is on by default on chips that support hardware TSO.
17741 	 * Firmware TSO on older chips gives lower performance, so it
17742 	 * is off by default, but can be enabled using ethtool.
17743 	 */
17744 	if ((tg3_flag(tp, HW_TSO_1) ||
17745 	     tg3_flag(tp, HW_TSO_2) ||
17746 	     tg3_flag(tp, HW_TSO_3)) &&
17747 	    (features & NETIF_F_IP_CSUM))
17748 		features |= NETIF_F_TSO;
17749 	if (tg3_flag(tp, HW_TSO_2) || tg3_flag(tp, HW_TSO_3)) {
17750 		if (features & NETIF_F_IPV6_CSUM)
17751 			features |= NETIF_F_TSO6;
17752 		if (tg3_flag(tp, HW_TSO_3) ||
17753 		    tg3_asic_rev(tp) == ASIC_REV_5761 ||
17754 		    (tg3_asic_rev(tp) == ASIC_REV_5784 &&
17755 		     tg3_chip_rev(tp) != CHIPREV_5784_AX) ||
17756 		    tg3_asic_rev(tp) == ASIC_REV_5785 ||
17757 		    tg3_asic_rev(tp) == ASIC_REV_57780)
17758 			features |= NETIF_F_TSO_ECN;
17759 	}
17760 
17761 	dev->features |= features | NETIF_F_HW_VLAN_CTAG_TX |
17762 			 NETIF_F_HW_VLAN_CTAG_RX;
17763 	dev->vlan_features |= features;
17764 
17765 	/*
17766 	 * Add loopback capability only for a subset of devices that support
17767 	 * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY
17768 	 * loopback for the remaining devices.
17769 	 */
17770 	if (tg3_asic_rev(tp) != ASIC_REV_5780 &&
17771 	    !tg3_flag(tp, CPMU_PRESENT))
17772 		/* Add the loopback capability */
17773 		features |= NETIF_F_LOOPBACK;
17774 
17775 	dev->hw_features |= features;
17776 	dev->priv_flags |= IFF_UNICAST_FLT;
17777 
17778 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 &&
17779 	    !tg3_flag(tp, TSO_CAPABLE) &&
17780 	    !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
17781 		tg3_flag_set(tp, MAX_RXPEND_64);
17782 		tp->rx_pending = 63;
17783 	}
17784 
17785 	err = tg3_get_device_address(tp);
17786 	if (err) {
17787 		dev_err(&pdev->dev,
17788 			"Could not obtain valid ethernet address, aborting\n");
17789 		goto err_out_apeunmap;
17790 	}
17791 
17792 	/*
17793 	 * Reset chip in case UNDI or EFI driver did not shutdown
17794 	 * DMA self test will enable WDMAC and we'll see (spurious)
17795 	 * pending DMA on the PCI bus at that point.
17796 	 */
17797 	if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
17798 	    (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
17799 		tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
17800 		tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
17801 	}
17802 
17803 	err = tg3_test_dma(tp);
17804 	if (err) {
17805 		dev_err(&pdev->dev, "DMA engine test failed, aborting\n");
17806 		goto err_out_apeunmap;
17807 	}
17808 
17809 	intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
17810 	rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
17811 	sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
17812 	for (i = 0; i < tp->irq_max; i++) {
17813 		struct tg3_napi *tnapi = &tp->napi[i];
17814 
17815 		tnapi->tp = tp;
17816 		tnapi->tx_pending = TG3_DEF_TX_RING_PENDING;
17817 
17818 		tnapi->int_mbox = intmbx;
17819 		if (i <= 4)
17820 			intmbx += 0x8;
17821 		else
17822 			intmbx += 0x4;
17823 
17824 		tnapi->consmbox = rcvmbx;
17825 		tnapi->prodmbox = sndmbx;
17826 
17827 		if (i)
17828 			tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1);
17829 		else
17830 			tnapi->coal_now = HOSTCC_MODE_NOW;
17831 
17832 		if (!tg3_flag(tp, SUPPORT_MSIX))
17833 			break;
17834 
17835 		/*
17836 		 * If we support MSIX, we'll be using RSS.  If we're using
17837 		 * RSS, the first vector only handles link interrupts and the
17838 		 * remaining vectors handle rx and tx interrupts.  Reuse the
17839 		 * mailbox values for the next iteration.  The values we setup
17840 		 * above are still useful for the single vectored mode.
17841 		 */
17842 		if (!i)
17843 			continue;
17844 
17845 		rcvmbx += 0x8;
17846 
17847 		if (sndmbx & 0x4)
17848 			sndmbx -= 0x4;
17849 		else
17850 			sndmbx += 0xc;
17851 	}
17852 
17853 	tg3_init_coal(tp);
17854 
17855 	pci_set_drvdata(pdev, dev);
17856 
17857 	if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
17858 	    tg3_asic_rev(tp) == ASIC_REV_5720 ||
17859 	    tg3_asic_rev(tp) == ASIC_REV_5762)
17860 		tg3_flag_set(tp, PTP_CAPABLE);
17861 
17862 	tg3_timer_init(tp);
17863 
17864 	tg3_carrier_off(tp);
17865 
17866 	err = register_netdev(dev);
17867 	if (err) {
17868 		dev_err(&pdev->dev, "Cannot register net device, aborting\n");
17869 		goto err_out_apeunmap;
17870 	}
17871 
17872 	netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
17873 		    tp->board_part_number,
17874 		    tg3_chip_rev_id(tp),
17875 		    tg3_bus_string(tp, str),
17876 		    dev->dev_addr);
17877 
17878 	if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
17879 		struct phy_device *phydev;
17880 		phydev = tp->mdio_bus->phy_map[tp->phy_addr];
17881 		netdev_info(dev,
17882 			    "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
17883 			    phydev->drv->name, dev_name(&phydev->dev));
17884 	} else {
17885 		char *ethtype;
17886 
17887 		if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
17888 			ethtype = "10/100Base-TX";
17889 		else if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
17890 			ethtype = "1000Base-SX";
17891 		else
17892 			ethtype = "10/100/1000Base-T";
17893 
17894 		netdev_info(dev, "attached PHY is %s (%s Ethernet) "
17895 			    "(WireSpeed[%d], EEE[%d])\n",
17896 			    tg3_phy_string(tp), ethtype,
17897 			    (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) == 0,
17898 			    (tp->phy_flags & TG3_PHYFLG_EEE_CAP) != 0);
17899 	}
17900 
17901 	netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
17902 		    (dev->features & NETIF_F_RXCSUM) != 0,
17903 		    tg3_flag(tp, USE_LINKCHG_REG) != 0,
17904 		    (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) != 0,
17905 		    tg3_flag(tp, ENABLE_ASF) != 0,
17906 		    tg3_flag(tp, TSO_CAPABLE) != 0);
17907 	netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
17908 		    tp->dma_rwctrl,
17909 		    pdev->dma_mask == DMA_BIT_MASK(32) ? 32 :
17910 		    ((u64)pdev->dma_mask) == DMA_BIT_MASK(40) ? 40 : 64);
17911 
17912 	pci_save_state(pdev);
17913 
17914 	return 0;
17915 
17916 err_out_apeunmap:
17917 	if (tp->aperegs) {
17918 		iounmap(tp->aperegs);
17919 		tp->aperegs = NULL;
17920 	}
17921 
17922 err_out_iounmap:
17923 	if (tp->regs) {
17924 		iounmap(tp->regs);
17925 		tp->regs = NULL;
17926 	}
17927 
17928 err_out_free_dev:
17929 	free_netdev(dev);
17930 
17931 err_out_free_res:
17932 	pci_release_regions(pdev);
17933 
17934 err_out_disable_pdev:
17935 	if (pci_is_enabled(pdev))
17936 		pci_disable_device(pdev);
17937 	return err;
17938 }
17939 
17940 static void tg3_remove_one(struct pci_dev *pdev)
17941 {
17942 	struct net_device *dev = pci_get_drvdata(pdev);
17943 
17944 	if (dev) {
17945 		struct tg3 *tp = netdev_priv(dev);
17946 
17947 		release_firmware(tp->fw);
17948 
17949 		tg3_reset_task_cancel(tp);
17950 
17951 		if (tg3_flag(tp, USE_PHYLIB)) {
17952 			tg3_phy_fini(tp);
17953 			tg3_mdio_fini(tp);
17954 		}
17955 
17956 		unregister_netdev(dev);
17957 		if (tp->aperegs) {
17958 			iounmap(tp->aperegs);
17959 			tp->aperegs = NULL;
17960 		}
17961 		if (tp->regs) {
17962 			iounmap(tp->regs);
17963 			tp->regs = NULL;
17964 		}
17965 		free_netdev(dev);
17966 		pci_release_regions(pdev);
17967 		pci_disable_device(pdev);
17968 	}
17969 }
17970 
17971 #ifdef CONFIG_PM_SLEEP
17972 static int tg3_suspend(struct device *device)
17973 {
17974 	struct pci_dev *pdev = to_pci_dev(device);
17975 	struct net_device *dev = pci_get_drvdata(pdev);
17976 	struct tg3 *tp = netdev_priv(dev);
17977 	int err = 0;
17978 
17979 	rtnl_lock();
17980 
17981 	if (!netif_running(dev))
17982 		goto unlock;
17983 
17984 	tg3_reset_task_cancel(tp);
17985 	tg3_phy_stop(tp);
17986 	tg3_netif_stop(tp);
17987 
17988 	tg3_timer_stop(tp);
17989 
17990 	tg3_full_lock(tp, 1);
17991 	tg3_disable_ints(tp);
17992 	tg3_full_unlock(tp);
17993 
17994 	netif_device_detach(dev);
17995 
17996 	tg3_full_lock(tp, 0);
17997 	tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
17998 	tg3_flag_clear(tp, INIT_COMPLETE);
17999 	tg3_full_unlock(tp);
18000 
18001 	err = tg3_power_down_prepare(tp);
18002 	if (err) {
18003 		int err2;
18004 
18005 		tg3_full_lock(tp, 0);
18006 
18007 		tg3_flag_set(tp, INIT_COMPLETE);
18008 		err2 = tg3_restart_hw(tp, true);
18009 		if (err2)
18010 			goto out;
18011 
18012 		tg3_timer_start(tp);
18013 
18014 		netif_device_attach(dev);
18015 		tg3_netif_start(tp);
18016 
18017 out:
18018 		tg3_full_unlock(tp);
18019 
18020 		if (!err2)
18021 			tg3_phy_start(tp);
18022 	}
18023 
18024 unlock:
18025 	rtnl_unlock();
18026 	return err;
18027 }
18028 
18029 static int tg3_resume(struct device *device)
18030 {
18031 	struct pci_dev *pdev = to_pci_dev(device);
18032 	struct net_device *dev = pci_get_drvdata(pdev);
18033 	struct tg3 *tp = netdev_priv(dev);
18034 	int err = 0;
18035 
18036 	rtnl_lock();
18037 
18038 	if (!netif_running(dev))
18039 		goto unlock;
18040 
18041 	netif_device_attach(dev);
18042 
18043 	tg3_full_lock(tp, 0);
18044 
18045 	tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
18046 
18047 	tg3_flag_set(tp, INIT_COMPLETE);
18048 	err = tg3_restart_hw(tp,
18049 			     !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN));
18050 	if (err)
18051 		goto out;
18052 
18053 	tg3_timer_start(tp);
18054 
18055 	tg3_netif_start(tp);
18056 
18057 out:
18058 	tg3_full_unlock(tp);
18059 
18060 	if (!err)
18061 		tg3_phy_start(tp);
18062 
18063 unlock:
18064 	rtnl_unlock();
18065 	return err;
18066 }
18067 #endif /* CONFIG_PM_SLEEP */
18068 
18069 static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume);
18070 
18071 static void tg3_shutdown(struct pci_dev *pdev)
18072 {
18073 	struct net_device *dev = pci_get_drvdata(pdev);
18074 	struct tg3 *tp = netdev_priv(dev);
18075 
18076 	rtnl_lock();
18077 	netif_device_detach(dev);
18078 
18079 	if (netif_running(dev))
18080 		dev_close(dev);
18081 
18082 	if (system_state == SYSTEM_POWER_OFF)
18083 		tg3_power_down(tp);
18084 
18085 	rtnl_unlock();
18086 }
18087 
18088 /**
18089  * tg3_io_error_detected - called when PCI error is detected
18090  * @pdev: Pointer to PCI device
18091  * @state: The current pci connection state
18092  *
18093  * This function is called after a PCI bus error affecting
18094  * this device has been detected.
18095  */
18096 static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
18097 					      pci_channel_state_t state)
18098 {
18099 	struct net_device *netdev = pci_get_drvdata(pdev);
18100 	struct tg3 *tp = netdev_priv(netdev);
18101 	pci_ers_result_t err = PCI_ERS_RESULT_NEED_RESET;
18102 
18103 	netdev_info(netdev, "PCI I/O error detected\n");
18104 
18105 	rtnl_lock();
18106 
18107 	tp->pcierr_recovery = true;
18108 
18109 	/* We probably don't have netdev yet */
18110 	if (!netdev || !netif_running(netdev))
18111 		goto done;
18112 
18113 	tg3_phy_stop(tp);
18114 
18115 	tg3_netif_stop(tp);
18116 
18117 	tg3_timer_stop(tp);
18118 
18119 	/* Want to make sure that the reset task doesn't run */
18120 	tg3_reset_task_cancel(tp);
18121 
18122 	netif_device_detach(netdev);
18123 
18124 	/* Clean up software state, even if MMIO is blocked */
18125 	tg3_full_lock(tp, 0);
18126 	tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
18127 	tg3_full_unlock(tp);
18128 
18129 done:
18130 	if (state == pci_channel_io_perm_failure) {
18131 		if (netdev) {
18132 			tg3_napi_enable(tp);
18133 			dev_close(netdev);
18134 		}
18135 		err = PCI_ERS_RESULT_DISCONNECT;
18136 	} else {
18137 		pci_disable_device(pdev);
18138 	}
18139 
18140 	rtnl_unlock();
18141 
18142 	return err;
18143 }
18144 
18145 /**
18146  * tg3_io_slot_reset - called after the pci bus has been reset.
18147  * @pdev: Pointer to PCI device
18148  *
18149  * Restart the card from scratch, as if from a cold-boot.
18150  * At this point, the card has exprienced a hard reset,
18151  * followed by fixups by BIOS, and has its config space
18152  * set up identically to what it was at cold boot.
18153  */
18154 static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev)
18155 {
18156 	struct net_device *netdev = pci_get_drvdata(pdev);
18157 	struct tg3 *tp = netdev_priv(netdev);
18158 	pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
18159 	int err;
18160 
18161 	rtnl_lock();
18162 
18163 	if (pci_enable_device(pdev)) {
18164 		dev_err(&pdev->dev,
18165 			"Cannot re-enable PCI device after reset.\n");
18166 		goto done;
18167 	}
18168 
18169 	pci_set_master(pdev);
18170 	pci_restore_state(pdev);
18171 	pci_save_state(pdev);
18172 
18173 	if (!netdev || !netif_running(netdev)) {
18174 		rc = PCI_ERS_RESULT_RECOVERED;
18175 		goto done;
18176 	}
18177 
18178 	err = tg3_power_up(tp);
18179 	if (err)
18180 		goto done;
18181 
18182 	rc = PCI_ERS_RESULT_RECOVERED;
18183 
18184 done:
18185 	if (rc != PCI_ERS_RESULT_RECOVERED && netdev && netif_running(netdev)) {
18186 		tg3_napi_enable(tp);
18187 		dev_close(netdev);
18188 	}
18189 	rtnl_unlock();
18190 
18191 	return rc;
18192 }
18193 
18194 /**
18195  * tg3_io_resume - called when traffic can start flowing again.
18196  * @pdev: Pointer to PCI device
18197  *
18198  * This callback is called when the error recovery driver tells
18199  * us that its OK to resume normal operation.
18200  */
18201 static void tg3_io_resume(struct pci_dev *pdev)
18202 {
18203 	struct net_device *netdev = pci_get_drvdata(pdev);
18204 	struct tg3 *tp = netdev_priv(netdev);
18205 	int err;
18206 
18207 	rtnl_lock();
18208 
18209 	if (!netif_running(netdev))
18210 		goto done;
18211 
18212 	tg3_full_lock(tp, 0);
18213 	tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
18214 	tg3_flag_set(tp, INIT_COMPLETE);
18215 	err = tg3_restart_hw(tp, true);
18216 	if (err) {
18217 		tg3_full_unlock(tp);
18218 		netdev_err(netdev, "Cannot restart hardware after reset.\n");
18219 		goto done;
18220 	}
18221 
18222 	netif_device_attach(netdev);
18223 
18224 	tg3_timer_start(tp);
18225 
18226 	tg3_netif_start(tp);
18227 
18228 	tg3_full_unlock(tp);
18229 
18230 	tg3_phy_start(tp);
18231 
18232 done:
18233 	tp->pcierr_recovery = false;
18234 	rtnl_unlock();
18235 }
18236 
18237 static const struct pci_error_handlers tg3_err_handler = {
18238 	.error_detected	= tg3_io_error_detected,
18239 	.slot_reset	= tg3_io_slot_reset,
18240 	.resume		= tg3_io_resume
18241 };
18242 
18243 static struct pci_driver tg3_driver = {
18244 	.name		= DRV_MODULE_NAME,
18245 	.id_table	= tg3_pci_tbl,
18246 	.probe		= tg3_init_one,
18247 	.remove		= tg3_remove_one,
18248 	.err_handler	= &tg3_err_handler,
18249 	.driver.pm	= &tg3_pm_ops,
18250 	.shutdown	= tg3_shutdown,
18251 };
18252 
18253 module_pci_driver(tg3_driver);
18254