1 /*
2  * tg3.c: Broadcom Tigon3 ethernet driver.
3  *
4  * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5  * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6  * Copyright (C) 2004 Sun Microsystems Inc.
7  * Copyright (C) 2005-2014 Broadcom Corporation.
8  *
9  * Firmware is:
10  *	Derived from proprietary unpublished source code,
11  *	Copyright (C) 2000-2003 Broadcom Corporation.
12  *
13  *	Permission is hereby granted for the distribution of this firmware
14  *	data in hexadecimal or equivalent format, provided this copyright
15  *	notice is accompanying it.
16  */
17 
18 
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/stringify.h>
22 #include <linux/kernel.h>
23 #include <linux/sched/signal.h>
24 #include <linux/types.h>
25 #include <linux/compiler.h>
26 #include <linux/slab.h>
27 #include <linux/delay.h>
28 #include <linux/in.h>
29 #include <linux/interrupt.h>
30 #include <linux/ioport.h>
31 #include <linux/pci.h>
32 #include <linux/netdevice.h>
33 #include <linux/etherdevice.h>
34 #include <linux/skbuff.h>
35 #include <linux/ethtool.h>
36 #include <linux/mdio.h>
37 #include <linux/mii.h>
38 #include <linux/phy.h>
39 #include <linux/brcmphy.h>
40 #include <linux/if.h>
41 #include <linux/if_vlan.h>
42 #include <linux/ip.h>
43 #include <linux/tcp.h>
44 #include <linux/workqueue.h>
45 #include <linux/prefetch.h>
46 #include <linux/dma-mapping.h>
47 #include <linux/firmware.h>
48 #include <linux/ssb/ssb_driver_gige.h>
49 #include <linux/hwmon.h>
50 #include <linux/hwmon-sysfs.h>
51 
52 #include <net/checksum.h>
53 #include <net/ip.h>
54 
55 #include <linux/io.h>
56 #include <asm/byteorder.h>
57 #include <linux/uaccess.h>
58 
59 #include <uapi/linux/net_tstamp.h>
60 #include <linux/ptp_clock_kernel.h>
61 
62 #ifdef CONFIG_SPARC
63 #include <asm/idprom.h>
64 #include <asm/prom.h>
65 #endif
66 
67 #define BAR_0	0
68 #define BAR_2	2
69 
70 #include "tg3.h"
71 
72 /* Functions & macros to verify TG3_FLAGS types */
73 
74 static inline int _tg3_flag(enum TG3_FLAGS flag, unsigned long *bits)
75 {
76 	return test_bit(flag, bits);
77 }
78 
79 static inline void _tg3_flag_set(enum TG3_FLAGS flag, unsigned long *bits)
80 {
81 	set_bit(flag, bits);
82 }
83 
84 static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
85 {
86 	clear_bit(flag, bits);
87 }
88 
89 #define tg3_flag(tp, flag)				\
90 	_tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags)
91 #define tg3_flag_set(tp, flag)				\
92 	_tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags)
93 #define tg3_flag_clear(tp, flag)			\
94 	_tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags)
95 
96 #define DRV_MODULE_NAME		"tg3"
97 #define TG3_MAJ_NUM			3
98 #define TG3_MIN_NUM			137
99 #define DRV_MODULE_VERSION	\
100 	__stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
101 #define DRV_MODULE_RELDATE	"May 11, 2014"
102 
103 #define RESET_KIND_SHUTDOWN	0
104 #define RESET_KIND_INIT		1
105 #define RESET_KIND_SUSPEND	2
106 
107 #define TG3_DEF_RX_MODE		0
108 #define TG3_DEF_TX_MODE		0
109 #define TG3_DEF_MSG_ENABLE	  \
110 	(NETIF_MSG_DRV		| \
111 	 NETIF_MSG_PROBE	| \
112 	 NETIF_MSG_LINK		| \
113 	 NETIF_MSG_TIMER	| \
114 	 NETIF_MSG_IFDOWN	| \
115 	 NETIF_MSG_IFUP		| \
116 	 NETIF_MSG_RX_ERR	| \
117 	 NETIF_MSG_TX_ERR)
118 
119 #define TG3_GRC_LCLCTL_PWRSW_DELAY	100
120 
121 /* length of time before we decide the hardware is borked,
122  * and dev->tx_timeout() should be called to fix the problem
123  */
124 
125 #define TG3_TX_TIMEOUT			(5 * HZ)
126 
127 /* hardware minimum and maximum for a single frame's data payload */
128 #define TG3_MIN_MTU			ETH_ZLEN
129 #define TG3_MAX_MTU(tp)	\
130 	(tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500)
131 
132 /* These numbers seem to be hard coded in the NIC firmware somehow.
133  * You can't change the ring sizes, but you can change where you place
134  * them in the NIC onboard memory.
135  */
136 #define TG3_RX_STD_RING_SIZE(tp) \
137 	(tg3_flag(tp, LRG_PROD_RING_CAP) ? \
138 	 TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700)
139 #define TG3_DEF_RX_RING_PENDING		200
140 #define TG3_RX_JMB_RING_SIZE(tp) \
141 	(tg3_flag(tp, LRG_PROD_RING_CAP) ? \
142 	 TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
143 #define TG3_DEF_RX_JUMBO_RING_PENDING	100
144 
145 /* Do not place this n-ring entries value into the tp struct itself,
146  * we really want to expose these constants to GCC so that modulo et
147  * al.  operations are done with shifts and masks instead of with
148  * hw multiply/modulo instructions.  Another solution would be to
149  * replace things like '% foo' with '& (foo - 1)'.
150  */
151 
152 #define TG3_TX_RING_SIZE		512
153 #define TG3_DEF_TX_RING_PENDING		(TG3_TX_RING_SIZE - 1)
154 
155 #define TG3_RX_STD_RING_BYTES(tp) \
156 	(sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
157 #define TG3_RX_JMB_RING_BYTES(tp) \
158 	(sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
159 #define TG3_RX_RCB_RING_BYTES(tp) \
160 	(sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1))
161 #define TG3_TX_RING_BYTES	(sizeof(struct tg3_tx_buffer_desc) * \
162 				 TG3_TX_RING_SIZE)
163 #define NEXT_TX(N)		(((N) + 1) & (TG3_TX_RING_SIZE - 1))
164 
165 #define TG3_DMA_BYTE_ENAB		64
166 
167 #define TG3_RX_STD_DMA_SZ		1536
168 #define TG3_RX_JMB_DMA_SZ		9046
169 
170 #define TG3_RX_DMA_TO_MAP_SZ(x)		((x) + TG3_DMA_BYTE_ENAB)
171 
172 #define TG3_RX_STD_MAP_SZ		TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
173 #define TG3_RX_JMB_MAP_SZ		TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
174 
175 #define TG3_RX_STD_BUFF_RING_SIZE(tp) \
176 	(sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
177 
178 #define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
179 	(sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
180 
181 /* Due to a hardware bug, the 5701 can only DMA to memory addresses
182  * that are at least dword aligned when used in PCIX mode.  The driver
183  * works around this bug by double copying the packet.  This workaround
184  * is built into the normal double copy length check for efficiency.
185  *
186  * However, the double copy is only necessary on those architectures
187  * where unaligned memory accesses are inefficient.  For those architectures
188  * where unaligned memory accesses incur little penalty, we can reintegrate
189  * the 5701 in the normal rx path.  Doing so saves a device structure
190  * dereference by hardcoding the double copy threshold in place.
191  */
192 #define TG3_RX_COPY_THRESHOLD		256
193 #if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
194 	#define TG3_RX_COPY_THRESH(tp)	TG3_RX_COPY_THRESHOLD
195 #else
196 	#define TG3_RX_COPY_THRESH(tp)	((tp)->rx_copy_thresh)
197 #endif
198 
199 #if (NET_IP_ALIGN != 0)
200 #define TG3_RX_OFFSET(tp)	((tp)->rx_offset)
201 #else
202 #define TG3_RX_OFFSET(tp)	(NET_SKB_PAD)
203 #endif
204 
205 /* minimum number of free TX descriptors required to wake up TX process */
206 #define TG3_TX_WAKEUP_THRESH(tnapi)		((tnapi)->tx_pending / 4)
207 #define TG3_TX_BD_DMA_MAX_2K		2048
208 #define TG3_TX_BD_DMA_MAX_4K		4096
209 
210 #define TG3_RAW_IP_ALIGN 2
211 
212 #define TG3_MAX_UCAST_ADDR(tp) (tg3_flag((tp), ENABLE_ASF) ? 2 : 3)
213 #define TG3_UCAST_ADDR_IDX(tp) (tg3_flag((tp), ENABLE_ASF) ? 2 : 1)
214 
215 #define TG3_FW_UPDATE_TIMEOUT_SEC	5
216 #define TG3_FW_UPDATE_FREQ_SEC		(TG3_FW_UPDATE_TIMEOUT_SEC / 2)
217 
218 #define FIRMWARE_TG3		"tigon/tg3.bin"
219 #define FIRMWARE_TG357766	"tigon/tg357766.bin"
220 #define FIRMWARE_TG3TSO		"tigon/tg3_tso.bin"
221 #define FIRMWARE_TG3TSO5	"tigon/tg3_tso5.bin"
222 
223 static char version[] =
224 	DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")";
225 
226 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
227 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
228 MODULE_LICENSE("GPL");
229 MODULE_VERSION(DRV_MODULE_VERSION);
230 MODULE_FIRMWARE(FIRMWARE_TG3);
231 MODULE_FIRMWARE(FIRMWARE_TG3TSO);
232 MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
233 
234 static int tg3_debug = -1;	/* -1 == use TG3_DEF_MSG_ENABLE as value */
235 module_param(tg3_debug, int, 0);
236 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
237 
238 #define TG3_DRV_DATA_FLAG_10_100_ONLY	0x0001
239 #define TG3_DRV_DATA_FLAG_5705_10_100	0x0002
240 
241 static const struct pci_device_id tg3_pci_tbl[] = {
242 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
243 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
244 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
245 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
246 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
247 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
248 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
249 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
250 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
251 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
252 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
253 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
254 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
255 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
256 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
257 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
258 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
259 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
260 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901),
261 	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
262 			TG3_DRV_DATA_FLAG_5705_10_100},
263 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2),
264 	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
265 			TG3_DRV_DATA_FLAG_5705_10_100},
266 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
267 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F),
268 	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
269 			TG3_DRV_DATA_FLAG_5705_10_100},
270 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
271 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
272 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750)},
273 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
274 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
275 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F),
276 	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
277 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
278 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
279 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
280 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
281 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F),
282 	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
283 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
284 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
285 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
286 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
287 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
288 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
289 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
290 	{PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5787M,
291 			PCI_VENDOR_ID_LENOVO,
292 			TG3PCI_SUBDEVICE_ID_LENOVO_5787M),
293 	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
294 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
295 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F),
296 	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
297 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
298 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
299 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
300 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
301 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
302 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
303 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
304 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
305 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
306 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
307 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
308 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
309 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
310 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
311 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
312 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
313 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)},
314 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)},
315 	{PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
316 			PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_A),
317 	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
318 	{PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
319 			PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_B),
320 	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
321 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
322 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
323 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790),
324 	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
325 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
326 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
327 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717_C)},
328 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
329 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)},
330 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)},
331 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)},
332 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)},
333 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791),
334 	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
335 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795),
336 	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
337 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)},
338 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)},
339 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57762)},
340 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57766)},
341 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5762)},
342 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5725)},
343 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5727)},
344 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57764)},
345 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57767)},
346 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57787)},
347 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57782)},
348 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57786)},
349 	{PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
350 	{PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
351 	{PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
352 	{PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
353 	{PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
354 	{PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
355 	{PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
356 	{PCI_DEVICE(0x10cf, 0x11a2)}, /* Fujitsu 1000base-SX with BCM5703SKHB */
357 	{}
358 };
359 
360 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
361 
362 static const struct {
363 	const char string[ETH_GSTRING_LEN];
364 } ethtool_stats_keys[] = {
365 	{ "rx_octets" },
366 	{ "rx_fragments" },
367 	{ "rx_ucast_packets" },
368 	{ "rx_mcast_packets" },
369 	{ "rx_bcast_packets" },
370 	{ "rx_fcs_errors" },
371 	{ "rx_align_errors" },
372 	{ "rx_xon_pause_rcvd" },
373 	{ "rx_xoff_pause_rcvd" },
374 	{ "rx_mac_ctrl_rcvd" },
375 	{ "rx_xoff_entered" },
376 	{ "rx_frame_too_long_errors" },
377 	{ "rx_jabbers" },
378 	{ "rx_undersize_packets" },
379 	{ "rx_in_length_errors" },
380 	{ "rx_out_length_errors" },
381 	{ "rx_64_or_less_octet_packets" },
382 	{ "rx_65_to_127_octet_packets" },
383 	{ "rx_128_to_255_octet_packets" },
384 	{ "rx_256_to_511_octet_packets" },
385 	{ "rx_512_to_1023_octet_packets" },
386 	{ "rx_1024_to_1522_octet_packets" },
387 	{ "rx_1523_to_2047_octet_packets" },
388 	{ "rx_2048_to_4095_octet_packets" },
389 	{ "rx_4096_to_8191_octet_packets" },
390 	{ "rx_8192_to_9022_octet_packets" },
391 
392 	{ "tx_octets" },
393 	{ "tx_collisions" },
394 
395 	{ "tx_xon_sent" },
396 	{ "tx_xoff_sent" },
397 	{ "tx_flow_control" },
398 	{ "tx_mac_errors" },
399 	{ "tx_single_collisions" },
400 	{ "tx_mult_collisions" },
401 	{ "tx_deferred" },
402 	{ "tx_excessive_collisions" },
403 	{ "tx_late_collisions" },
404 	{ "tx_collide_2times" },
405 	{ "tx_collide_3times" },
406 	{ "tx_collide_4times" },
407 	{ "tx_collide_5times" },
408 	{ "tx_collide_6times" },
409 	{ "tx_collide_7times" },
410 	{ "tx_collide_8times" },
411 	{ "tx_collide_9times" },
412 	{ "tx_collide_10times" },
413 	{ "tx_collide_11times" },
414 	{ "tx_collide_12times" },
415 	{ "tx_collide_13times" },
416 	{ "tx_collide_14times" },
417 	{ "tx_collide_15times" },
418 	{ "tx_ucast_packets" },
419 	{ "tx_mcast_packets" },
420 	{ "tx_bcast_packets" },
421 	{ "tx_carrier_sense_errors" },
422 	{ "tx_discards" },
423 	{ "tx_errors" },
424 
425 	{ "dma_writeq_full" },
426 	{ "dma_write_prioq_full" },
427 	{ "rxbds_empty" },
428 	{ "rx_discards" },
429 	{ "rx_errors" },
430 	{ "rx_threshold_hit" },
431 
432 	{ "dma_readq_full" },
433 	{ "dma_read_prioq_full" },
434 	{ "tx_comp_queue_full" },
435 
436 	{ "ring_set_send_prod_index" },
437 	{ "ring_status_update" },
438 	{ "nic_irqs" },
439 	{ "nic_avoided_irqs" },
440 	{ "nic_tx_threshold_hit" },
441 
442 	{ "mbuf_lwm_thresh_hit" },
443 };
444 
445 #define TG3_NUM_STATS	ARRAY_SIZE(ethtool_stats_keys)
446 #define TG3_NVRAM_TEST		0
447 #define TG3_LINK_TEST		1
448 #define TG3_REGISTER_TEST	2
449 #define TG3_MEMORY_TEST		3
450 #define TG3_MAC_LOOPB_TEST	4
451 #define TG3_PHY_LOOPB_TEST	5
452 #define TG3_EXT_LOOPB_TEST	6
453 #define TG3_INTERRUPT_TEST	7
454 
455 
456 static const struct {
457 	const char string[ETH_GSTRING_LEN];
458 } ethtool_test_keys[] = {
459 	[TG3_NVRAM_TEST]	= { "nvram test        (online) " },
460 	[TG3_LINK_TEST]		= { "link test         (online) " },
461 	[TG3_REGISTER_TEST]	= { "register test     (offline)" },
462 	[TG3_MEMORY_TEST]	= { "memory test       (offline)" },
463 	[TG3_MAC_LOOPB_TEST]	= { "mac loopback test (offline)" },
464 	[TG3_PHY_LOOPB_TEST]	= { "phy loopback test (offline)" },
465 	[TG3_EXT_LOOPB_TEST]	= { "ext loopback test (offline)" },
466 	[TG3_INTERRUPT_TEST]	= { "interrupt test    (offline)" },
467 };
468 
469 #define TG3_NUM_TEST	ARRAY_SIZE(ethtool_test_keys)
470 
471 
472 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
473 {
474 	writel(val, tp->regs + off);
475 }
476 
477 static u32 tg3_read32(struct tg3 *tp, u32 off)
478 {
479 	return readl(tp->regs + off);
480 }
481 
482 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
483 {
484 	writel(val, tp->aperegs + off);
485 }
486 
487 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
488 {
489 	return readl(tp->aperegs + off);
490 }
491 
492 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
493 {
494 	unsigned long flags;
495 
496 	spin_lock_irqsave(&tp->indirect_lock, flags);
497 	pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
498 	pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
499 	spin_unlock_irqrestore(&tp->indirect_lock, flags);
500 }
501 
502 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
503 {
504 	writel(val, tp->regs + off);
505 	readl(tp->regs + off);
506 }
507 
508 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
509 {
510 	unsigned long flags;
511 	u32 val;
512 
513 	spin_lock_irqsave(&tp->indirect_lock, flags);
514 	pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
515 	pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
516 	spin_unlock_irqrestore(&tp->indirect_lock, flags);
517 	return val;
518 }
519 
520 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
521 {
522 	unsigned long flags;
523 
524 	if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
525 		pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
526 				       TG3_64BIT_REG_LOW, val);
527 		return;
528 	}
529 	if (off == TG3_RX_STD_PROD_IDX_REG) {
530 		pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
531 				       TG3_64BIT_REG_LOW, val);
532 		return;
533 	}
534 
535 	spin_lock_irqsave(&tp->indirect_lock, flags);
536 	pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
537 	pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
538 	spin_unlock_irqrestore(&tp->indirect_lock, flags);
539 
540 	/* In indirect mode when disabling interrupts, we also need
541 	 * to clear the interrupt bit in the GRC local ctrl register.
542 	 */
543 	if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
544 	    (val == 0x1)) {
545 		pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
546 				       tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
547 	}
548 }
549 
550 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
551 {
552 	unsigned long flags;
553 	u32 val;
554 
555 	spin_lock_irqsave(&tp->indirect_lock, flags);
556 	pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
557 	pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
558 	spin_unlock_irqrestore(&tp->indirect_lock, flags);
559 	return val;
560 }
561 
562 /* usec_wait specifies the wait time in usec when writing to certain registers
563  * where it is unsafe to read back the register without some delay.
564  * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
565  * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
566  */
567 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
568 {
569 	if (tg3_flag(tp, PCIX_TARGET_HWBUG) || tg3_flag(tp, ICH_WORKAROUND))
570 		/* Non-posted methods */
571 		tp->write32(tp, off, val);
572 	else {
573 		/* Posted method */
574 		tg3_write32(tp, off, val);
575 		if (usec_wait)
576 			udelay(usec_wait);
577 		tp->read32(tp, off);
578 	}
579 	/* Wait again after the read for the posted method to guarantee that
580 	 * the wait time is met.
581 	 */
582 	if (usec_wait)
583 		udelay(usec_wait);
584 }
585 
586 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
587 {
588 	tp->write32_mbox(tp, off, val);
589 	if (tg3_flag(tp, FLUSH_POSTED_WRITES) ||
590 	    (!tg3_flag(tp, MBOX_WRITE_REORDER) &&
591 	     !tg3_flag(tp, ICH_WORKAROUND)))
592 		tp->read32_mbox(tp, off);
593 }
594 
595 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
596 {
597 	void __iomem *mbox = tp->regs + off;
598 	writel(val, mbox);
599 	if (tg3_flag(tp, TXD_MBOX_HWBUG))
600 		writel(val, mbox);
601 	if (tg3_flag(tp, MBOX_WRITE_REORDER) ||
602 	    tg3_flag(tp, FLUSH_POSTED_WRITES))
603 		readl(mbox);
604 }
605 
606 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
607 {
608 	return readl(tp->regs + off + GRCMBOX_BASE);
609 }
610 
611 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
612 {
613 	writel(val, tp->regs + off + GRCMBOX_BASE);
614 }
615 
616 #define tw32_mailbox(reg, val)		tp->write32_mbox(tp, reg, val)
617 #define tw32_mailbox_f(reg, val)	tw32_mailbox_flush(tp, (reg), (val))
618 #define tw32_rx_mbox(reg, val)		tp->write32_rx_mbox(tp, reg, val)
619 #define tw32_tx_mbox(reg, val)		tp->write32_tx_mbox(tp, reg, val)
620 #define tr32_mailbox(reg)		tp->read32_mbox(tp, reg)
621 
622 #define tw32(reg, val)			tp->write32(tp, reg, val)
623 #define tw32_f(reg, val)		_tw32_flush(tp, (reg), (val), 0)
624 #define tw32_wait_f(reg, val, us)	_tw32_flush(tp, (reg), (val), (us))
625 #define tr32(reg)			tp->read32(tp, reg)
626 
627 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
628 {
629 	unsigned long flags;
630 
631 	if (tg3_asic_rev(tp) == ASIC_REV_5906 &&
632 	    (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
633 		return;
634 
635 	spin_lock_irqsave(&tp->indirect_lock, flags);
636 	if (tg3_flag(tp, SRAM_USE_CONFIG)) {
637 		pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
638 		pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
639 
640 		/* Always leave this as zero. */
641 		pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
642 	} else {
643 		tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
644 		tw32_f(TG3PCI_MEM_WIN_DATA, val);
645 
646 		/* Always leave this as zero. */
647 		tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
648 	}
649 	spin_unlock_irqrestore(&tp->indirect_lock, flags);
650 }
651 
652 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
653 {
654 	unsigned long flags;
655 
656 	if (tg3_asic_rev(tp) == ASIC_REV_5906 &&
657 	    (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
658 		*val = 0;
659 		return;
660 	}
661 
662 	spin_lock_irqsave(&tp->indirect_lock, flags);
663 	if (tg3_flag(tp, SRAM_USE_CONFIG)) {
664 		pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
665 		pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
666 
667 		/* Always leave this as zero. */
668 		pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
669 	} else {
670 		tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
671 		*val = tr32(TG3PCI_MEM_WIN_DATA);
672 
673 		/* Always leave this as zero. */
674 		tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
675 	}
676 	spin_unlock_irqrestore(&tp->indirect_lock, flags);
677 }
678 
679 static void tg3_ape_lock_init(struct tg3 *tp)
680 {
681 	int i;
682 	u32 regbase, bit;
683 
684 	if (tg3_asic_rev(tp) == ASIC_REV_5761)
685 		regbase = TG3_APE_LOCK_GRANT;
686 	else
687 		regbase = TG3_APE_PER_LOCK_GRANT;
688 
689 	/* Make sure the driver hasn't any stale locks. */
690 	for (i = TG3_APE_LOCK_PHY0; i <= TG3_APE_LOCK_GPIO; i++) {
691 		switch (i) {
692 		case TG3_APE_LOCK_PHY0:
693 		case TG3_APE_LOCK_PHY1:
694 		case TG3_APE_LOCK_PHY2:
695 		case TG3_APE_LOCK_PHY3:
696 			bit = APE_LOCK_GRANT_DRIVER;
697 			break;
698 		default:
699 			if (!tp->pci_fn)
700 				bit = APE_LOCK_GRANT_DRIVER;
701 			else
702 				bit = 1 << tp->pci_fn;
703 		}
704 		tg3_ape_write32(tp, regbase + 4 * i, bit);
705 	}
706 
707 }
708 
709 static int tg3_ape_lock(struct tg3 *tp, int locknum)
710 {
711 	int i, off;
712 	int ret = 0;
713 	u32 status, req, gnt, bit;
714 
715 	if (!tg3_flag(tp, ENABLE_APE))
716 		return 0;
717 
718 	switch (locknum) {
719 	case TG3_APE_LOCK_GPIO:
720 		if (tg3_asic_rev(tp) == ASIC_REV_5761)
721 			return 0;
722 	case TG3_APE_LOCK_GRC:
723 	case TG3_APE_LOCK_MEM:
724 		if (!tp->pci_fn)
725 			bit = APE_LOCK_REQ_DRIVER;
726 		else
727 			bit = 1 << tp->pci_fn;
728 		break;
729 	case TG3_APE_LOCK_PHY0:
730 	case TG3_APE_LOCK_PHY1:
731 	case TG3_APE_LOCK_PHY2:
732 	case TG3_APE_LOCK_PHY3:
733 		bit = APE_LOCK_REQ_DRIVER;
734 		break;
735 	default:
736 		return -EINVAL;
737 	}
738 
739 	if (tg3_asic_rev(tp) == ASIC_REV_5761) {
740 		req = TG3_APE_LOCK_REQ;
741 		gnt = TG3_APE_LOCK_GRANT;
742 	} else {
743 		req = TG3_APE_PER_LOCK_REQ;
744 		gnt = TG3_APE_PER_LOCK_GRANT;
745 	}
746 
747 	off = 4 * locknum;
748 
749 	tg3_ape_write32(tp, req + off, bit);
750 
751 	/* Wait for up to 1 millisecond to acquire lock. */
752 	for (i = 0; i < 100; i++) {
753 		status = tg3_ape_read32(tp, gnt + off);
754 		if (status == bit)
755 			break;
756 		if (pci_channel_offline(tp->pdev))
757 			break;
758 
759 		udelay(10);
760 	}
761 
762 	if (status != bit) {
763 		/* Revoke the lock request. */
764 		tg3_ape_write32(tp, gnt + off, bit);
765 		ret = -EBUSY;
766 	}
767 
768 	return ret;
769 }
770 
771 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
772 {
773 	u32 gnt, bit;
774 
775 	if (!tg3_flag(tp, ENABLE_APE))
776 		return;
777 
778 	switch (locknum) {
779 	case TG3_APE_LOCK_GPIO:
780 		if (tg3_asic_rev(tp) == ASIC_REV_5761)
781 			return;
782 	case TG3_APE_LOCK_GRC:
783 	case TG3_APE_LOCK_MEM:
784 		if (!tp->pci_fn)
785 			bit = APE_LOCK_GRANT_DRIVER;
786 		else
787 			bit = 1 << tp->pci_fn;
788 		break;
789 	case TG3_APE_LOCK_PHY0:
790 	case TG3_APE_LOCK_PHY1:
791 	case TG3_APE_LOCK_PHY2:
792 	case TG3_APE_LOCK_PHY3:
793 		bit = APE_LOCK_GRANT_DRIVER;
794 		break;
795 	default:
796 		return;
797 	}
798 
799 	if (tg3_asic_rev(tp) == ASIC_REV_5761)
800 		gnt = TG3_APE_LOCK_GRANT;
801 	else
802 		gnt = TG3_APE_PER_LOCK_GRANT;
803 
804 	tg3_ape_write32(tp, gnt + 4 * locknum, bit);
805 }
806 
807 static int tg3_ape_event_lock(struct tg3 *tp, u32 timeout_us)
808 {
809 	u32 apedata;
810 
811 	while (timeout_us) {
812 		if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
813 			return -EBUSY;
814 
815 		apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
816 		if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
817 			break;
818 
819 		tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
820 
821 		udelay(10);
822 		timeout_us -= (timeout_us > 10) ? 10 : timeout_us;
823 	}
824 
825 	return timeout_us ? 0 : -EBUSY;
826 }
827 
828 #ifdef CONFIG_TIGON3_HWMON
829 static int tg3_ape_wait_for_event(struct tg3 *tp, u32 timeout_us)
830 {
831 	u32 i, apedata;
832 
833 	for (i = 0; i < timeout_us / 10; i++) {
834 		apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
835 
836 		if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
837 			break;
838 
839 		udelay(10);
840 	}
841 
842 	return i == timeout_us / 10;
843 }
844 
845 static int tg3_ape_scratchpad_read(struct tg3 *tp, u32 *data, u32 base_off,
846 				   u32 len)
847 {
848 	int err;
849 	u32 i, bufoff, msgoff, maxlen, apedata;
850 
851 	if (!tg3_flag(tp, APE_HAS_NCSI))
852 		return 0;
853 
854 	apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
855 	if (apedata != APE_SEG_SIG_MAGIC)
856 		return -ENODEV;
857 
858 	apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
859 	if (!(apedata & APE_FW_STATUS_READY))
860 		return -EAGAIN;
861 
862 	bufoff = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_OFF) +
863 		 TG3_APE_SHMEM_BASE;
864 	msgoff = bufoff + 2 * sizeof(u32);
865 	maxlen = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_LEN);
866 
867 	while (len) {
868 		u32 length;
869 
870 		/* Cap xfer sizes to scratchpad limits. */
871 		length = (len > maxlen) ? maxlen : len;
872 		len -= length;
873 
874 		apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
875 		if (!(apedata & APE_FW_STATUS_READY))
876 			return -EAGAIN;
877 
878 		/* Wait for up to 1 msec for APE to service previous event. */
879 		err = tg3_ape_event_lock(tp, 1000);
880 		if (err)
881 			return err;
882 
883 		apedata = APE_EVENT_STATUS_DRIVER_EVNT |
884 			  APE_EVENT_STATUS_SCRTCHPD_READ |
885 			  APE_EVENT_STATUS_EVENT_PENDING;
886 		tg3_ape_write32(tp, TG3_APE_EVENT_STATUS, apedata);
887 
888 		tg3_ape_write32(tp, bufoff, base_off);
889 		tg3_ape_write32(tp, bufoff + sizeof(u32), length);
890 
891 		tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
892 		tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
893 
894 		base_off += length;
895 
896 		if (tg3_ape_wait_for_event(tp, 30000))
897 			return -EAGAIN;
898 
899 		for (i = 0; length; i += 4, length -= 4) {
900 			u32 val = tg3_ape_read32(tp, msgoff + i);
901 			memcpy(data, &val, sizeof(u32));
902 			data++;
903 		}
904 	}
905 
906 	return 0;
907 }
908 #endif
909 
910 static int tg3_ape_send_event(struct tg3 *tp, u32 event)
911 {
912 	int err;
913 	u32 apedata;
914 
915 	apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
916 	if (apedata != APE_SEG_SIG_MAGIC)
917 		return -EAGAIN;
918 
919 	apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
920 	if (!(apedata & APE_FW_STATUS_READY))
921 		return -EAGAIN;
922 
923 	/* Wait for up to 1 millisecond for APE to service previous event. */
924 	err = tg3_ape_event_lock(tp, 1000);
925 	if (err)
926 		return err;
927 
928 	tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
929 			event | APE_EVENT_STATUS_EVENT_PENDING);
930 
931 	tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
932 	tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
933 
934 	return 0;
935 }
936 
937 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
938 {
939 	u32 event;
940 	u32 apedata;
941 
942 	if (!tg3_flag(tp, ENABLE_APE))
943 		return;
944 
945 	switch (kind) {
946 	case RESET_KIND_INIT:
947 		tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
948 				APE_HOST_SEG_SIG_MAGIC);
949 		tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
950 				APE_HOST_SEG_LEN_MAGIC);
951 		apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
952 		tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
953 		tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
954 			APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, TG3_MIN_NUM));
955 		tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
956 				APE_HOST_BEHAV_NO_PHYLOCK);
957 		tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE,
958 				    TG3_APE_HOST_DRVR_STATE_START);
959 
960 		event = APE_EVENT_STATUS_STATE_START;
961 		break;
962 	case RESET_KIND_SHUTDOWN:
963 		/* With the interface we are currently using,
964 		 * APE does not track driver state.  Wiping
965 		 * out the HOST SEGMENT SIGNATURE forces
966 		 * the APE to assume OS absent status.
967 		 */
968 		tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0);
969 
970 		if (device_may_wakeup(&tp->pdev->dev) &&
971 		    tg3_flag(tp, WOL_ENABLE)) {
972 			tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED,
973 					    TG3_APE_HOST_WOL_SPEED_AUTO);
974 			apedata = TG3_APE_HOST_DRVR_STATE_WOL;
975 		} else
976 			apedata = TG3_APE_HOST_DRVR_STATE_UNLOAD;
977 
978 		tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, apedata);
979 
980 		event = APE_EVENT_STATUS_STATE_UNLOAD;
981 		break;
982 	default:
983 		return;
984 	}
985 
986 	event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
987 
988 	tg3_ape_send_event(tp, event);
989 }
990 
991 static void tg3_disable_ints(struct tg3 *tp)
992 {
993 	int i;
994 
995 	tw32(TG3PCI_MISC_HOST_CTRL,
996 	     (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
997 	for (i = 0; i < tp->irq_max; i++)
998 		tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001);
999 }
1000 
1001 static void tg3_enable_ints(struct tg3 *tp)
1002 {
1003 	int i;
1004 
1005 	tp->irq_sync = 0;
1006 	wmb();
1007 
1008 	tw32(TG3PCI_MISC_HOST_CTRL,
1009 	     (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
1010 
1011 	tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE;
1012 	for (i = 0; i < tp->irq_cnt; i++) {
1013 		struct tg3_napi *tnapi = &tp->napi[i];
1014 
1015 		tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
1016 		if (tg3_flag(tp, 1SHOT_MSI))
1017 			tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
1018 
1019 		tp->coal_now |= tnapi->coal_now;
1020 	}
1021 
1022 	/* Force an initial interrupt */
1023 	if (!tg3_flag(tp, TAGGED_STATUS) &&
1024 	    (tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
1025 		tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
1026 	else
1027 		tw32(HOSTCC_MODE, tp->coal_now);
1028 
1029 	tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now);
1030 }
1031 
1032 static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
1033 {
1034 	struct tg3 *tp = tnapi->tp;
1035 	struct tg3_hw_status *sblk = tnapi->hw_status;
1036 	unsigned int work_exists = 0;
1037 
1038 	/* check for phy events */
1039 	if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
1040 		if (sblk->status & SD_STATUS_LINK_CHG)
1041 			work_exists = 1;
1042 	}
1043 
1044 	/* check for TX work to do */
1045 	if (sblk->idx[0].tx_consumer != tnapi->tx_cons)
1046 		work_exists = 1;
1047 
1048 	/* check for RX work to do */
1049 	if (tnapi->rx_rcb_prod_idx &&
1050 	    *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
1051 		work_exists = 1;
1052 
1053 	return work_exists;
1054 }
1055 
1056 /* tg3_int_reenable
1057  *  similar to tg3_enable_ints, but it accurately determines whether there
1058  *  is new work pending and can return without flushing the PIO write
1059  *  which reenables interrupts
1060  */
1061 static void tg3_int_reenable(struct tg3_napi *tnapi)
1062 {
1063 	struct tg3 *tp = tnapi->tp;
1064 
1065 	tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
1066 	mmiowb();
1067 
1068 	/* When doing tagged status, this work check is unnecessary.
1069 	 * The last_tag we write above tells the chip which piece of
1070 	 * work we've completed.
1071 	 */
1072 	if (!tg3_flag(tp, TAGGED_STATUS) && tg3_has_work(tnapi))
1073 		tw32(HOSTCC_MODE, tp->coalesce_mode |
1074 		     HOSTCC_MODE_ENABLE | tnapi->coal_now);
1075 }
1076 
1077 static void tg3_switch_clocks(struct tg3 *tp)
1078 {
1079 	u32 clock_ctrl;
1080 	u32 orig_clock_ctrl;
1081 
1082 	if (tg3_flag(tp, CPMU_PRESENT) || tg3_flag(tp, 5780_CLASS))
1083 		return;
1084 
1085 	clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
1086 
1087 	orig_clock_ctrl = clock_ctrl;
1088 	clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
1089 		       CLOCK_CTRL_CLKRUN_OENABLE |
1090 		       0x1f);
1091 	tp->pci_clock_ctrl = clock_ctrl;
1092 
1093 	if (tg3_flag(tp, 5705_PLUS)) {
1094 		if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
1095 			tw32_wait_f(TG3PCI_CLOCK_CTRL,
1096 				    clock_ctrl | CLOCK_CTRL_625_CORE, 40);
1097 		}
1098 	} else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
1099 		tw32_wait_f(TG3PCI_CLOCK_CTRL,
1100 			    clock_ctrl |
1101 			    (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
1102 			    40);
1103 		tw32_wait_f(TG3PCI_CLOCK_CTRL,
1104 			    clock_ctrl | (CLOCK_CTRL_ALTCLK),
1105 			    40);
1106 	}
1107 	tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
1108 }
1109 
1110 #define PHY_BUSY_LOOPS	5000
1111 
1112 static int __tg3_readphy(struct tg3 *tp, unsigned int phy_addr, int reg,
1113 			 u32 *val)
1114 {
1115 	u32 frame_val;
1116 	unsigned int loops;
1117 	int ret;
1118 
1119 	if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1120 		tw32_f(MAC_MI_MODE,
1121 		     (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1122 		udelay(80);
1123 	}
1124 
1125 	tg3_ape_lock(tp, tp->phy_ape_lock);
1126 
1127 	*val = 0x0;
1128 
1129 	frame_val  = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1130 		      MI_COM_PHY_ADDR_MASK);
1131 	frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1132 		      MI_COM_REG_ADDR_MASK);
1133 	frame_val |= (MI_COM_CMD_READ | MI_COM_START);
1134 
1135 	tw32_f(MAC_MI_COM, frame_val);
1136 
1137 	loops = PHY_BUSY_LOOPS;
1138 	while (loops != 0) {
1139 		udelay(10);
1140 		frame_val = tr32(MAC_MI_COM);
1141 
1142 		if ((frame_val & MI_COM_BUSY) == 0) {
1143 			udelay(5);
1144 			frame_val = tr32(MAC_MI_COM);
1145 			break;
1146 		}
1147 		loops -= 1;
1148 	}
1149 
1150 	ret = -EBUSY;
1151 	if (loops != 0) {
1152 		*val = frame_val & MI_COM_DATA_MASK;
1153 		ret = 0;
1154 	}
1155 
1156 	if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1157 		tw32_f(MAC_MI_MODE, tp->mi_mode);
1158 		udelay(80);
1159 	}
1160 
1161 	tg3_ape_unlock(tp, tp->phy_ape_lock);
1162 
1163 	return ret;
1164 }
1165 
1166 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
1167 {
1168 	return __tg3_readphy(tp, tp->phy_addr, reg, val);
1169 }
1170 
1171 static int __tg3_writephy(struct tg3 *tp, unsigned int phy_addr, int reg,
1172 			  u32 val)
1173 {
1174 	u32 frame_val;
1175 	unsigned int loops;
1176 	int ret;
1177 
1178 	if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
1179 	    (reg == MII_CTRL1000 || reg == MII_TG3_AUX_CTRL))
1180 		return 0;
1181 
1182 	if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1183 		tw32_f(MAC_MI_MODE,
1184 		     (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1185 		udelay(80);
1186 	}
1187 
1188 	tg3_ape_lock(tp, tp->phy_ape_lock);
1189 
1190 	frame_val  = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1191 		      MI_COM_PHY_ADDR_MASK);
1192 	frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1193 		      MI_COM_REG_ADDR_MASK);
1194 	frame_val |= (val & MI_COM_DATA_MASK);
1195 	frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
1196 
1197 	tw32_f(MAC_MI_COM, frame_val);
1198 
1199 	loops = PHY_BUSY_LOOPS;
1200 	while (loops != 0) {
1201 		udelay(10);
1202 		frame_val = tr32(MAC_MI_COM);
1203 		if ((frame_val & MI_COM_BUSY) == 0) {
1204 			udelay(5);
1205 			frame_val = tr32(MAC_MI_COM);
1206 			break;
1207 		}
1208 		loops -= 1;
1209 	}
1210 
1211 	ret = -EBUSY;
1212 	if (loops != 0)
1213 		ret = 0;
1214 
1215 	if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1216 		tw32_f(MAC_MI_MODE, tp->mi_mode);
1217 		udelay(80);
1218 	}
1219 
1220 	tg3_ape_unlock(tp, tp->phy_ape_lock);
1221 
1222 	return ret;
1223 }
1224 
1225 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
1226 {
1227 	return __tg3_writephy(tp, tp->phy_addr, reg, val);
1228 }
1229 
1230 static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val)
1231 {
1232 	int err;
1233 
1234 	err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1235 	if (err)
1236 		goto done;
1237 
1238 	err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1239 	if (err)
1240 		goto done;
1241 
1242 	err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1243 			   MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1244 	if (err)
1245 		goto done;
1246 
1247 	err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, val);
1248 
1249 done:
1250 	return err;
1251 }
1252 
1253 static int tg3_phy_cl45_read(struct tg3 *tp, u32 devad, u32 addr, u32 *val)
1254 {
1255 	int err;
1256 
1257 	err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1258 	if (err)
1259 		goto done;
1260 
1261 	err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1262 	if (err)
1263 		goto done;
1264 
1265 	err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1266 			   MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1267 	if (err)
1268 		goto done;
1269 
1270 	err = tg3_readphy(tp, MII_TG3_MMD_ADDRESS, val);
1271 
1272 done:
1273 	return err;
1274 }
1275 
1276 static int tg3_phydsp_read(struct tg3 *tp, u32 reg, u32 *val)
1277 {
1278 	int err;
1279 
1280 	err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1281 	if (!err)
1282 		err = tg3_readphy(tp, MII_TG3_DSP_RW_PORT, val);
1283 
1284 	return err;
1285 }
1286 
1287 static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
1288 {
1289 	int err;
1290 
1291 	err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1292 	if (!err)
1293 		err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
1294 
1295 	return err;
1296 }
1297 
1298 static int tg3_phy_auxctl_read(struct tg3 *tp, int reg, u32 *val)
1299 {
1300 	int err;
1301 
1302 	err = tg3_writephy(tp, MII_TG3_AUX_CTRL,
1303 			   (reg << MII_TG3_AUXCTL_MISC_RDSEL_SHIFT) |
1304 			   MII_TG3_AUXCTL_SHDWSEL_MISC);
1305 	if (!err)
1306 		err = tg3_readphy(tp, MII_TG3_AUX_CTRL, val);
1307 
1308 	return err;
1309 }
1310 
1311 static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set)
1312 {
1313 	if (reg == MII_TG3_AUXCTL_SHDWSEL_MISC)
1314 		set |= MII_TG3_AUXCTL_MISC_WREN;
1315 
1316 	return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg);
1317 }
1318 
1319 static int tg3_phy_toggle_auxctl_smdsp(struct tg3 *tp, bool enable)
1320 {
1321 	u32 val;
1322 	int err;
1323 
1324 	err = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
1325 
1326 	if (err)
1327 		return err;
1328 
1329 	if (enable)
1330 		val |= MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
1331 	else
1332 		val &= ~MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
1333 
1334 	err = tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
1335 				   val | MII_TG3_AUXCTL_ACTL_TX_6DB);
1336 
1337 	return err;
1338 }
1339 
1340 static int tg3_phy_shdw_write(struct tg3 *tp, int reg, u32 val)
1341 {
1342 	return tg3_writephy(tp, MII_TG3_MISC_SHDW,
1343 			    reg | val | MII_TG3_MISC_SHDW_WREN);
1344 }
1345 
1346 static int tg3_bmcr_reset(struct tg3 *tp)
1347 {
1348 	u32 phy_control;
1349 	int limit, err;
1350 
1351 	/* OK, reset it, and poll the BMCR_RESET bit until it
1352 	 * clears or we time out.
1353 	 */
1354 	phy_control = BMCR_RESET;
1355 	err = tg3_writephy(tp, MII_BMCR, phy_control);
1356 	if (err != 0)
1357 		return -EBUSY;
1358 
1359 	limit = 5000;
1360 	while (limit--) {
1361 		err = tg3_readphy(tp, MII_BMCR, &phy_control);
1362 		if (err != 0)
1363 			return -EBUSY;
1364 
1365 		if ((phy_control & BMCR_RESET) == 0) {
1366 			udelay(40);
1367 			break;
1368 		}
1369 		udelay(10);
1370 	}
1371 	if (limit < 0)
1372 		return -EBUSY;
1373 
1374 	return 0;
1375 }
1376 
1377 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
1378 {
1379 	struct tg3 *tp = bp->priv;
1380 	u32 val;
1381 
1382 	spin_lock_bh(&tp->lock);
1383 
1384 	if (__tg3_readphy(tp, mii_id, reg, &val))
1385 		val = -EIO;
1386 
1387 	spin_unlock_bh(&tp->lock);
1388 
1389 	return val;
1390 }
1391 
1392 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
1393 {
1394 	struct tg3 *tp = bp->priv;
1395 	u32 ret = 0;
1396 
1397 	spin_lock_bh(&tp->lock);
1398 
1399 	if (__tg3_writephy(tp, mii_id, reg, val))
1400 		ret = -EIO;
1401 
1402 	spin_unlock_bh(&tp->lock);
1403 
1404 	return ret;
1405 }
1406 
1407 static void tg3_mdio_config_5785(struct tg3 *tp)
1408 {
1409 	u32 val;
1410 	struct phy_device *phydev;
1411 
1412 	phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
1413 	switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1414 	case PHY_ID_BCM50610:
1415 	case PHY_ID_BCM50610M:
1416 		val = MAC_PHYCFG2_50610_LED_MODES;
1417 		break;
1418 	case PHY_ID_BCMAC131:
1419 		val = MAC_PHYCFG2_AC131_LED_MODES;
1420 		break;
1421 	case PHY_ID_RTL8211C:
1422 		val = MAC_PHYCFG2_RTL8211C_LED_MODES;
1423 		break;
1424 	case PHY_ID_RTL8201E:
1425 		val = MAC_PHYCFG2_RTL8201E_LED_MODES;
1426 		break;
1427 	default:
1428 		return;
1429 	}
1430 
1431 	if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
1432 		tw32(MAC_PHYCFG2, val);
1433 
1434 		val = tr32(MAC_PHYCFG1);
1435 		val &= ~(MAC_PHYCFG1_RGMII_INT |
1436 			 MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK);
1437 		val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT;
1438 		tw32(MAC_PHYCFG1, val);
1439 
1440 		return;
1441 	}
1442 
1443 	if (!tg3_flag(tp, RGMII_INBAND_DISABLE))
1444 		val |= MAC_PHYCFG2_EMODE_MASK_MASK |
1445 		       MAC_PHYCFG2_FMODE_MASK_MASK |
1446 		       MAC_PHYCFG2_GMODE_MASK_MASK |
1447 		       MAC_PHYCFG2_ACT_MASK_MASK   |
1448 		       MAC_PHYCFG2_QUAL_MASK_MASK |
1449 		       MAC_PHYCFG2_INBAND_ENABLE;
1450 
1451 	tw32(MAC_PHYCFG2, val);
1452 
1453 	val = tr32(MAC_PHYCFG1);
1454 	val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
1455 		 MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
1456 	if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1457 		if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1458 			val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
1459 		if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1460 			val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
1461 	}
1462 	val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT |
1463 	       MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV;
1464 	tw32(MAC_PHYCFG1, val);
1465 
1466 	val = tr32(MAC_EXT_RGMII_MODE);
1467 	val &= ~(MAC_RGMII_MODE_RX_INT_B |
1468 		 MAC_RGMII_MODE_RX_QUALITY |
1469 		 MAC_RGMII_MODE_RX_ACTIVITY |
1470 		 MAC_RGMII_MODE_RX_ENG_DET |
1471 		 MAC_RGMII_MODE_TX_ENABLE |
1472 		 MAC_RGMII_MODE_TX_LOWPWR |
1473 		 MAC_RGMII_MODE_TX_RESET);
1474 	if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1475 		if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1476 			val |= MAC_RGMII_MODE_RX_INT_B |
1477 			       MAC_RGMII_MODE_RX_QUALITY |
1478 			       MAC_RGMII_MODE_RX_ACTIVITY |
1479 			       MAC_RGMII_MODE_RX_ENG_DET;
1480 		if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1481 			val |= MAC_RGMII_MODE_TX_ENABLE |
1482 			       MAC_RGMII_MODE_TX_LOWPWR |
1483 			       MAC_RGMII_MODE_TX_RESET;
1484 	}
1485 	tw32(MAC_EXT_RGMII_MODE, val);
1486 }
1487 
1488 static void tg3_mdio_start(struct tg3 *tp)
1489 {
1490 	tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
1491 	tw32_f(MAC_MI_MODE, tp->mi_mode);
1492 	udelay(80);
1493 
1494 	if (tg3_flag(tp, MDIOBUS_INITED) &&
1495 	    tg3_asic_rev(tp) == ASIC_REV_5785)
1496 		tg3_mdio_config_5785(tp);
1497 }
1498 
1499 static int tg3_mdio_init(struct tg3 *tp)
1500 {
1501 	int i;
1502 	u32 reg;
1503 	struct phy_device *phydev;
1504 
1505 	if (tg3_flag(tp, 5717_PLUS)) {
1506 		u32 is_serdes;
1507 
1508 		tp->phy_addr = tp->pci_fn + 1;
1509 
1510 		if (tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0)
1511 			is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
1512 		else
1513 			is_serdes = tr32(TG3_CPMU_PHY_STRAP) &
1514 				    TG3_CPMU_PHY_STRAP_IS_SERDES;
1515 		if (is_serdes)
1516 			tp->phy_addr += 7;
1517 	} else if (tg3_flag(tp, IS_SSB_CORE) && tg3_flag(tp, ROBOSWITCH)) {
1518 		int addr;
1519 
1520 		addr = ssb_gige_get_phyaddr(tp->pdev);
1521 		if (addr < 0)
1522 			return addr;
1523 		tp->phy_addr = addr;
1524 	} else
1525 		tp->phy_addr = TG3_PHY_MII_ADDR;
1526 
1527 	tg3_mdio_start(tp);
1528 
1529 	if (!tg3_flag(tp, USE_PHYLIB) || tg3_flag(tp, MDIOBUS_INITED))
1530 		return 0;
1531 
1532 	tp->mdio_bus = mdiobus_alloc();
1533 	if (tp->mdio_bus == NULL)
1534 		return -ENOMEM;
1535 
1536 	tp->mdio_bus->name     = "tg3 mdio bus";
1537 	snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
1538 		 (tp->pdev->bus->number << 8) | tp->pdev->devfn);
1539 	tp->mdio_bus->priv     = tp;
1540 	tp->mdio_bus->parent   = &tp->pdev->dev;
1541 	tp->mdio_bus->read     = &tg3_mdio_read;
1542 	tp->mdio_bus->write    = &tg3_mdio_write;
1543 	tp->mdio_bus->phy_mask = ~(1 << tp->phy_addr);
1544 
1545 	/* The bus registration will look for all the PHYs on the mdio bus.
1546 	 * Unfortunately, it does not ensure the PHY is powered up before
1547 	 * accessing the PHY ID registers.  A chip reset is the
1548 	 * quickest way to bring the device back to an operational state..
1549 	 */
1550 	if (tg3_readphy(tp, MII_BMCR, &reg) || (reg & BMCR_PDOWN))
1551 		tg3_bmcr_reset(tp);
1552 
1553 	i = mdiobus_register(tp->mdio_bus);
1554 	if (i) {
1555 		dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i);
1556 		mdiobus_free(tp->mdio_bus);
1557 		return i;
1558 	}
1559 
1560 	phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
1561 
1562 	if (!phydev || !phydev->drv) {
1563 		dev_warn(&tp->pdev->dev, "No PHY devices\n");
1564 		mdiobus_unregister(tp->mdio_bus);
1565 		mdiobus_free(tp->mdio_bus);
1566 		return -ENODEV;
1567 	}
1568 
1569 	switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1570 	case PHY_ID_BCM57780:
1571 		phydev->interface = PHY_INTERFACE_MODE_GMII;
1572 		phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1573 		break;
1574 	case PHY_ID_BCM50610:
1575 	case PHY_ID_BCM50610M:
1576 		phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE |
1577 				     PHY_BRCM_RX_REFCLK_UNUSED |
1578 				     PHY_BRCM_DIS_TXCRXC_NOENRGY |
1579 				     PHY_BRCM_AUTO_PWRDWN_ENABLE;
1580 		if (tg3_flag(tp, RGMII_INBAND_DISABLE))
1581 			phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
1582 		if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1583 			phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
1584 		if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1585 			phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
1586 		/* fallthru */
1587 	case PHY_ID_RTL8211C:
1588 		phydev->interface = PHY_INTERFACE_MODE_RGMII;
1589 		break;
1590 	case PHY_ID_RTL8201E:
1591 	case PHY_ID_BCMAC131:
1592 		phydev->interface = PHY_INTERFACE_MODE_MII;
1593 		phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1594 		tp->phy_flags |= TG3_PHYFLG_IS_FET;
1595 		break;
1596 	}
1597 
1598 	tg3_flag_set(tp, MDIOBUS_INITED);
1599 
1600 	if (tg3_asic_rev(tp) == ASIC_REV_5785)
1601 		tg3_mdio_config_5785(tp);
1602 
1603 	return 0;
1604 }
1605 
1606 static void tg3_mdio_fini(struct tg3 *tp)
1607 {
1608 	if (tg3_flag(tp, MDIOBUS_INITED)) {
1609 		tg3_flag_clear(tp, MDIOBUS_INITED);
1610 		mdiobus_unregister(tp->mdio_bus);
1611 		mdiobus_free(tp->mdio_bus);
1612 	}
1613 }
1614 
1615 /* tp->lock is held. */
1616 static inline void tg3_generate_fw_event(struct tg3 *tp)
1617 {
1618 	u32 val;
1619 
1620 	val = tr32(GRC_RX_CPU_EVENT);
1621 	val |= GRC_RX_CPU_DRIVER_EVENT;
1622 	tw32_f(GRC_RX_CPU_EVENT, val);
1623 
1624 	tp->last_event_jiffies = jiffies;
1625 }
1626 
1627 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1628 
1629 /* tp->lock is held. */
1630 static void tg3_wait_for_event_ack(struct tg3 *tp)
1631 {
1632 	int i;
1633 	unsigned int delay_cnt;
1634 	long time_remain;
1635 
1636 	/* If enough time has passed, no wait is necessary. */
1637 	time_remain = (long)(tp->last_event_jiffies + 1 +
1638 		      usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1639 		      (long)jiffies;
1640 	if (time_remain < 0)
1641 		return;
1642 
1643 	/* Check if we can shorten the wait time. */
1644 	delay_cnt = jiffies_to_usecs(time_remain);
1645 	if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1646 		delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1647 	delay_cnt = (delay_cnt >> 3) + 1;
1648 
1649 	for (i = 0; i < delay_cnt; i++) {
1650 		if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1651 			break;
1652 		if (pci_channel_offline(tp->pdev))
1653 			break;
1654 
1655 		udelay(8);
1656 	}
1657 }
1658 
1659 /* tp->lock is held. */
1660 static void tg3_phy_gather_ump_data(struct tg3 *tp, u32 *data)
1661 {
1662 	u32 reg, val;
1663 
1664 	val = 0;
1665 	if (!tg3_readphy(tp, MII_BMCR, &reg))
1666 		val = reg << 16;
1667 	if (!tg3_readphy(tp, MII_BMSR, &reg))
1668 		val |= (reg & 0xffff);
1669 	*data++ = val;
1670 
1671 	val = 0;
1672 	if (!tg3_readphy(tp, MII_ADVERTISE, &reg))
1673 		val = reg << 16;
1674 	if (!tg3_readphy(tp, MII_LPA, &reg))
1675 		val |= (reg & 0xffff);
1676 	*data++ = val;
1677 
1678 	val = 0;
1679 	if (!(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) {
1680 		if (!tg3_readphy(tp, MII_CTRL1000, &reg))
1681 			val = reg << 16;
1682 		if (!tg3_readphy(tp, MII_STAT1000, &reg))
1683 			val |= (reg & 0xffff);
1684 	}
1685 	*data++ = val;
1686 
1687 	if (!tg3_readphy(tp, MII_PHYADDR, &reg))
1688 		val = reg << 16;
1689 	else
1690 		val = 0;
1691 	*data++ = val;
1692 }
1693 
1694 /* tp->lock is held. */
1695 static void tg3_ump_link_report(struct tg3 *tp)
1696 {
1697 	u32 data[4];
1698 
1699 	if (!tg3_flag(tp, 5780_CLASS) || !tg3_flag(tp, ENABLE_ASF))
1700 		return;
1701 
1702 	tg3_phy_gather_ump_data(tp, data);
1703 
1704 	tg3_wait_for_event_ack(tp);
1705 
1706 	tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1707 	tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1708 	tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x0, data[0]);
1709 	tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x4, data[1]);
1710 	tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x8, data[2]);
1711 	tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0xc, data[3]);
1712 
1713 	tg3_generate_fw_event(tp);
1714 }
1715 
1716 /* tp->lock is held. */
1717 static void tg3_stop_fw(struct tg3 *tp)
1718 {
1719 	if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
1720 		/* Wait for RX cpu to ACK the previous event. */
1721 		tg3_wait_for_event_ack(tp);
1722 
1723 		tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
1724 
1725 		tg3_generate_fw_event(tp);
1726 
1727 		/* Wait for RX cpu to ACK this event. */
1728 		tg3_wait_for_event_ack(tp);
1729 	}
1730 }
1731 
1732 /* tp->lock is held. */
1733 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
1734 {
1735 	tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
1736 		      NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
1737 
1738 	if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1739 		switch (kind) {
1740 		case RESET_KIND_INIT:
1741 			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1742 				      DRV_STATE_START);
1743 			break;
1744 
1745 		case RESET_KIND_SHUTDOWN:
1746 			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1747 				      DRV_STATE_UNLOAD);
1748 			break;
1749 
1750 		case RESET_KIND_SUSPEND:
1751 			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1752 				      DRV_STATE_SUSPEND);
1753 			break;
1754 
1755 		default:
1756 			break;
1757 		}
1758 	}
1759 }
1760 
1761 /* tp->lock is held. */
1762 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
1763 {
1764 	if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1765 		switch (kind) {
1766 		case RESET_KIND_INIT:
1767 			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1768 				      DRV_STATE_START_DONE);
1769 			break;
1770 
1771 		case RESET_KIND_SHUTDOWN:
1772 			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1773 				      DRV_STATE_UNLOAD_DONE);
1774 			break;
1775 
1776 		default:
1777 			break;
1778 		}
1779 	}
1780 }
1781 
1782 /* tp->lock is held. */
1783 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
1784 {
1785 	if (tg3_flag(tp, ENABLE_ASF)) {
1786 		switch (kind) {
1787 		case RESET_KIND_INIT:
1788 			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1789 				      DRV_STATE_START);
1790 			break;
1791 
1792 		case RESET_KIND_SHUTDOWN:
1793 			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1794 				      DRV_STATE_UNLOAD);
1795 			break;
1796 
1797 		case RESET_KIND_SUSPEND:
1798 			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1799 				      DRV_STATE_SUSPEND);
1800 			break;
1801 
1802 		default:
1803 			break;
1804 		}
1805 	}
1806 }
1807 
1808 static int tg3_poll_fw(struct tg3 *tp)
1809 {
1810 	int i;
1811 	u32 val;
1812 
1813 	if (tg3_flag(tp, NO_FWARE_REPORTED))
1814 		return 0;
1815 
1816 	if (tg3_flag(tp, IS_SSB_CORE)) {
1817 		/* We don't use firmware. */
1818 		return 0;
1819 	}
1820 
1821 	if (tg3_asic_rev(tp) == ASIC_REV_5906) {
1822 		/* Wait up to 20ms for init done. */
1823 		for (i = 0; i < 200; i++) {
1824 			if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
1825 				return 0;
1826 			if (pci_channel_offline(tp->pdev))
1827 				return -ENODEV;
1828 
1829 			udelay(100);
1830 		}
1831 		return -ENODEV;
1832 	}
1833 
1834 	/* Wait for firmware initialization to complete. */
1835 	for (i = 0; i < 100000; i++) {
1836 		tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
1837 		if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1838 			break;
1839 		if (pci_channel_offline(tp->pdev)) {
1840 			if (!tg3_flag(tp, NO_FWARE_REPORTED)) {
1841 				tg3_flag_set(tp, NO_FWARE_REPORTED);
1842 				netdev_info(tp->dev, "No firmware running\n");
1843 			}
1844 
1845 			break;
1846 		}
1847 
1848 		udelay(10);
1849 	}
1850 
1851 	/* Chip might not be fitted with firmware.  Some Sun onboard
1852 	 * parts are configured like that.  So don't signal the timeout
1853 	 * of the above loop as an error, but do report the lack of
1854 	 * running firmware once.
1855 	 */
1856 	if (i >= 100000 && !tg3_flag(tp, NO_FWARE_REPORTED)) {
1857 		tg3_flag_set(tp, NO_FWARE_REPORTED);
1858 
1859 		netdev_info(tp->dev, "No firmware running\n");
1860 	}
1861 
1862 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) {
1863 		/* The 57765 A0 needs a little more
1864 		 * time to do some important work.
1865 		 */
1866 		mdelay(10);
1867 	}
1868 
1869 	return 0;
1870 }
1871 
1872 static void tg3_link_report(struct tg3 *tp)
1873 {
1874 	if (!netif_carrier_ok(tp->dev)) {
1875 		netif_info(tp, link, tp->dev, "Link is down\n");
1876 		tg3_ump_link_report(tp);
1877 	} else if (netif_msg_link(tp)) {
1878 		netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n",
1879 			    (tp->link_config.active_speed == SPEED_1000 ?
1880 			     1000 :
1881 			     (tp->link_config.active_speed == SPEED_100 ?
1882 			      100 : 10)),
1883 			    (tp->link_config.active_duplex == DUPLEX_FULL ?
1884 			     "full" : "half"));
1885 
1886 		netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n",
1887 			    (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
1888 			    "on" : "off",
1889 			    (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
1890 			    "on" : "off");
1891 
1892 		if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
1893 			netdev_info(tp->dev, "EEE is %s\n",
1894 				    tp->setlpicnt ? "enabled" : "disabled");
1895 
1896 		tg3_ump_link_report(tp);
1897 	}
1898 
1899 	tp->link_up = netif_carrier_ok(tp->dev);
1900 }
1901 
1902 static u32 tg3_decode_flowctrl_1000T(u32 adv)
1903 {
1904 	u32 flowctrl = 0;
1905 
1906 	if (adv & ADVERTISE_PAUSE_CAP) {
1907 		flowctrl |= FLOW_CTRL_RX;
1908 		if (!(adv & ADVERTISE_PAUSE_ASYM))
1909 			flowctrl |= FLOW_CTRL_TX;
1910 	} else if (adv & ADVERTISE_PAUSE_ASYM)
1911 		flowctrl |= FLOW_CTRL_TX;
1912 
1913 	return flowctrl;
1914 }
1915 
1916 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1917 {
1918 	u16 miireg;
1919 
1920 	if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1921 		miireg = ADVERTISE_1000XPAUSE;
1922 	else if (flow_ctrl & FLOW_CTRL_TX)
1923 		miireg = ADVERTISE_1000XPSE_ASYM;
1924 	else if (flow_ctrl & FLOW_CTRL_RX)
1925 		miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1926 	else
1927 		miireg = 0;
1928 
1929 	return miireg;
1930 }
1931 
1932 static u32 tg3_decode_flowctrl_1000X(u32 adv)
1933 {
1934 	u32 flowctrl = 0;
1935 
1936 	if (adv & ADVERTISE_1000XPAUSE) {
1937 		flowctrl |= FLOW_CTRL_RX;
1938 		if (!(adv & ADVERTISE_1000XPSE_ASYM))
1939 			flowctrl |= FLOW_CTRL_TX;
1940 	} else if (adv & ADVERTISE_1000XPSE_ASYM)
1941 		flowctrl |= FLOW_CTRL_TX;
1942 
1943 	return flowctrl;
1944 }
1945 
1946 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1947 {
1948 	u8 cap = 0;
1949 
1950 	if (lcladv & rmtadv & ADVERTISE_1000XPAUSE) {
1951 		cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1952 	} else if (lcladv & rmtadv & ADVERTISE_1000XPSE_ASYM) {
1953 		if (lcladv & ADVERTISE_1000XPAUSE)
1954 			cap = FLOW_CTRL_RX;
1955 		if (rmtadv & ADVERTISE_1000XPAUSE)
1956 			cap = FLOW_CTRL_TX;
1957 	}
1958 
1959 	return cap;
1960 }
1961 
1962 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1963 {
1964 	u8 autoneg;
1965 	u8 flowctrl = 0;
1966 	u32 old_rx_mode = tp->rx_mode;
1967 	u32 old_tx_mode = tp->tx_mode;
1968 
1969 	if (tg3_flag(tp, USE_PHYLIB))
1970 		autoneg = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr)->autoneg;
1971 	else
1972 		autoneg = tp->link_config.autoneg;
1973 
1974 	if (autoneg == AUTONEG_ENABLE && tg3_flag(tp, PAUSE_AUTONEG)) {
1975 		if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
1976 			flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1977 		else
1978 			flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1979 	} else
1980 		flowctrl = tp->link_config.flowctrl;
1981 
1982 	tp->link_config.active_flowctrl = flowctrl;
1983 
1984 	if (flowctrl & FLOW_CTRL_RX)
1985 		tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1986 	else
1987 		tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1988 
1989 	if (old_rx_mode != tp->rx_mode)
1990 		tw32_f(MAC_RX_MODE, tp->rx_mode);
1991 
1992 	if (flowctrl & FLOW_CTRL_TX)
1993 		tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1994 	else
1995 		tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1996 
1997 	if (old_tx_mode != tp->tx_mode)
1998 		tw32_f(MAC_TX_MODE, tp->tx_mode);
1999 }
2000 
2001 static void tg3_adjust_link(struct net_device *dev)
2002 {
2003 	u8 oldflowctrl, linkmesg = 0;
2004 	u32 mac_mode, lcl_adv, rmt_adv;
2005 	struct tg3 *tp = netdev_priv(dev);
2006 	struct phy_device *phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
2007 
2008 	spin_lock_bh(&tp->lock);
2009 
2010 	mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
2011 				    MAC_MODE_HALF_DUPLEX);
2012 
2013 	oldflowctrl = tp->link_config.active_flowctrl;
2014 
2015 	if (phydev->link) {
2016 		lcl_adv = 0;
2017 		rmt_adv = 0;
2018 
2019 		if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
2020 			mac_mode |= MAC_MODE_PORT_MODE_MII;
2021 		else if (phydev->speed == SPEED_1000 ||
2022 			 tg3_asic_rev(tp) != ASIC_REV_5785)
2023 			mac_mode |= MAC_MODE_PORT_MODE_GMII;
2024 		else
2025 			mac_mode |= MAC_MODE_PORT_MODE_MII;
2026 
2027 		if (phydev->duplex == DUPLEX_HALF)
2028 			mac_mode |= MAC_MODE_HALF_DUPLEX;
2029 		else {
2030 			lcl_adv = mii_advertise_flowctrl(
2031 				  tp->link_config.flowctrl);
2032 
2033 			if (phydev->pause)
2034 				rmt_adv = LPA_PAUSE_CAP;
2035 			if (phydev->asym_pause)
2036 				rmt_adv |= LPA_PAUSE_ASYM;
2037 		}
2038 
2039 		tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
2040 	} else
2041 		mac_mode |= MAC_MODE_PORT_MODE_GMII;
2042 
2043 	if (mac_mode != tp->mac_mode) {
2044 		tp->mac_mode = mac_mode;
2045 		tw32_f(MAC_MODE, tp->mac_mode);
2046 		udelay(40);
2047 	}
2048 
2049 	if (tg3_asic_rev(tp) == ASIC_REV_5785) {
2050 		if (phydev->speed == SPEED_10)
2051 			tw32(MAC_MI_STAT,
2052 			     MAC_MI_STAT_10MBPS_MODE |
2053 			     MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
2054 		else
2055 			tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
2056 	}
2057 
2058 	if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
2059 		tw32(MAC_TX_LENGTHS,
2060 		     ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2061 		      (6 << TX_LENGTHS_IPG_SHIFT) |
2062 		      (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
2063 	else
2064 		tw32(MAC_TX_LENGTHS,
2065 		     ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2066 		      (6 << TX_LENGTHS_IPG_SHIFT) |
2067 		      (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
2068 
2069 	if (phydev->link != tp->old_link ||
2070 	    phydev->speed != tp->link_config.active_speed ||
2071 	    phydev->duplex != tp->link_config.active_duplex ||
2072 	    oldflowctrl != tp->link_config.active_flowctrl)
2073 		linkmesg = 1;
2074 
2075 	tp->old_link = phydev->link;
2076 	tp->link_config.active_speed = phydev->speed;
2077 	tp->link_config.active_duplex = phydev->duplex;
2078 
2079 	spin_unlock_bh(&tp->lock);
2080 
2081 	if (linkmesg)
2082 		tg3_link_report(tp);
2083 }
2084 
2085 static int tg3_phy_init(struct tg3 *tp)
2086 {
2087 	struct phy_device *phydev;
2088 
2089 	if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)
2090 		return 0;
2091 
2092 	/* Bring the PHY back to a known state. */
2093 	tg3_bmcr_reset(tp);
2094 
2095 	phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
2096 
2097 	/* Attach the MAC to the PHY. */
2098 	phydev = phy_connect(tp->dev, phydev_name(phydev),
2099 			     tg3_adjust_link, phydev->interface);
2100 	if (IS_ERR(phydev)) {
2101 		dev_err(&tp->pdev->dev, "Could not attach to PHY\n");
2102 		return PTR_ERR(phydev);
2103 	}
2104 
2105 	/* Mask with MAC supported features. */
2106 	switch (phydev->interface) {
2107 	case PHY_INTERFACE_MODE_GMII:
2108 	case PHY_INTERFACE_MODE_RGMII:
2109 		if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
2110 			phydev->supported &= (PHY_GBIT_FEATURES |
2111 					      SUPPORTED_Pause |
2112 					      SUPPORTED_Asym_Pause);
2113 			break;
2114 		}
2115 		/* fallthru */
2116 	case PHY_INTERFACE_MODE_MII:
2117 		phydev->supported &= (PHY_BASIC_FEATURES |
2118 				      SUPPORTED_Pause |
2119 				      SUPPORTED_Asym_Pause);
2120 		break;
2121 	default:
2122 		phy_disconnect(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr));
2123 		return -EINVAL;
2124 	}
2125 
2126 	tp->phy_flags |= TG3_PHYFLG_IS_CONNECTED;
2127 
2128 	phydev->advertising = phydev->supported;
2129 
2130 	phy_attached_info(phydev);
2131 
2132 	return 0;
2133 }
2134 
2135 static void tg3_phy_start(struct tg3 *tp)
2136 {
2137 	struct phy_device *phydev;
2138 
2139 	if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
2140 		return;
2141 
2142 	phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
2143 
2144 	if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
2145 		tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
2146 		phydev->speed = tp->link_config.speed;
2147 		phydev->duplex = tp->link_config.duplex;
2148 		phydev->autoneg = tp->link_config.autoneg;
2149 		phydev->advertising = tp->link_config.advertising;
2150 	}
2151 
2152 	phy_start(phydev);
2153 
2154 	phy_start_aneg(phydev);
2155 }
2156 
2157 static void tg3_phy_stop(struct tg3 *tp)
2158 {
2159 	if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
2160 		return;
2161 
2162 	phy_stop(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr));
2163 }
2164 
2165 static void tg3_phy_fini(struct tg3 *tp)
2166 {
2167 	if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
2168 		phy_disconnect(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr));
2169 		tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED;
2170 	}
2171 }
2172 
2173 static int tg3_phy_set_extloopbk(struct tg3 *tp)
2174 {
2175 	int err;
2176 	u32 val;
2177 
2178 	if (tp->phy_flags & TG3_PHYFLG_IS_FET)
2179 		return 0;
2180 
2181 	if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2182 		/* Cannot do read-modify-write on 5401 */
2183 		err = tg3_phy_auxctl_write(tp,
2184 					   MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2185 					   MII_TG3_AUXCTL_ACTL_EXTLOOPBK |
2186 					   0x4c20);
2187 		goto done;
2188 	}
2189 
2190 	err = tg3_phy_auxctl_read(tp,
2191 				  MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2192 	if (err)
2193 		return err;
2194 
2195 	val |= MII_TG3_AUXCTL_ACTL_EXTLOOPBK;
2196 	err = tg3_phy_auxctl_write(tp,
2197 				   MII_TG3_AUXCTL_SHDWSEL_AUXCTL, val);
2198 
2199 done:
2200 	return err;
2201 }
2202 
2203 static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
2204 {
2205 	u32 phytest;
2206 
2207 	if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2208 		u32 phy;
2209 
2210 		tg3_writephy(tp, MII_TG3_FET_TEST,
2211 			     phytest | MII_TG3_FET_SHADOW_EN);
2212 		if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) {
2213 			if (enable)
2214 				phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD;
2215 			else
2216 				phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD;
2217 			tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy);
2218 		}
2219 		tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2220 	}
2221 }
2222 
2223 static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
2224 {
2225 	u32 reg;
2226 
2227 	if (!tg3_flag(tp, 5705_PLUS) ||
2228 	    (tg3_flag(tp, 5717_PLUS) &&
2229 	     (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
2230 		return;
2231 
2232 	if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2233 		tg3_phy_fet_toggle_apd(tp, enable);
2234 		return;
2235 	}
2236 
2237 	reg = MII_TG3_MISC_SHDW_SCR5_LPED |
2238 	      MII_TG3_MISC_SHDW_SCR5_DLPTLM |
2239 	      MII_TG3_MISC_SHDW_SCR5_SDTL |
2240 	      MII_TG3_MISC_SHDW_SCR5_C125OE;
2241 	if (tg3_asic_rev(tp) != ASIC_REV_5784 || !enable)
2242 		reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
2243 
2244 	tg3_phy_shdw_write(tp, MII_TG3_MISC_SHDW_SCR5_SEL, reg);
2245 
2246 
2247 	reg = MII_TG3_MISC_SHDW_APD_WKTM_84MS;
2248 	if (enable)
2249 		reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
2250 
2251 	tg3_phy_shdw_write(tp, MII_TG3_MISC_SHDW_APD_SEL, reg);
2252 }
2253 
2254 static void tg3_phy_toggle_automdix(struct tg3 *tp, bool enable)
2255 {
2256 	u32 phy;
2257 
2258 	if (!tg3_flag(tp, 5705_PLUS) ||
2259 	    (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
2260 		return;
2261 
2262 	if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2263 		u32 ephy;
2264 
2265 		if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
2266 			u32 reg = MII_TG3_FET_SHDW_MISCCTRL;
2267 
2268 			tg3_writephy(tp, MII_TG3_FET_TEST,
2269 				     ephy | MII_TG3_FET_SHADOW_EN);
2270 			if (!tg3_readphy(tp, reg, &phy)) {
2271 				if (enable)
2272 					phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2273 				else
2274 					phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2275 				tg3_writephy(tp, reg, phy);
2276 			}
2277 			tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
2278 		}
2279 	} else {
2280 		int ret;
2281 
2282 		ret = tg3_phy_auxctl_read(tp,
2283 					  MII_TG3_AUXCTL_SHDWSEL_MISC, &phy);
2284 		if (!ret) {
2285 			if (enable)
2286 				phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2287 			else
2288 				phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2289 			tg3_phy_auxctl_write(tp,
2290 					     MII_TG3_AUXCTL_SHDWSEL_MISC, phy);
2291 		}
2292 	}
2293 }
2294 
2295 static void tg3_phy_set_wirespeed(struct tg3 *tp)
2296 {
2297 	int ret;
2298 	u32 val;
2299 
2300 	if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED)
2301 		return;
2302 
2303 	ret = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, &val);
2304 	if (!ret)
2305 		tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_MISC,
2306 				     val | MII_TG3_AUXCTL_MISC_WIRESPD_EN);
2307 }
2308 
2309 static void tg3_phy_apply_otp(struct tg3 *tp)
2310 {
2311 	u32 otp, phy;
2312 
2313 	if (!tp->phy_otp)
2314 		return;
2315 
2316 	otp = tp->phy_otp;
2317 
2318 	if (tg3_phy_toggle_auxctl_smdsp(tp, true))
2319 		return;
2320 
2321 	phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
2322 	phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
2323 	tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
2324 
2325 	phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
2326 	      ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
2327 	tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
2328 
2329 	phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
2330 	phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
2331 	tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
2332 
2333 	phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
2334 	tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
2335 
2336 	phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
2337 	tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
2338 
2339 	phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
2340 	      ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
2341 	tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
2342 
2343 	tg3_phy_toggle_auxctl_smdsp(tp, false);
2344 }
2345 
2346 static void tg3_eee_pull_config(struct tg3 *tp, struct ethtool_eee *eee)
2347 {
2348 	u32 val;
2349 	struct ethtool_eee *dest = &tp->eee;
2350 
2351 	if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2352 		return;
2353 
2354 	if (eee)
2355 		dest = eee;
2356 
2357 	if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, TG3_CL45_D7_EEERES_STAT, &val))
2358 		return;
2359 
2360 	/* Pull eee_active */
2361 	if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T ||
2362 	    val == TG3_CL45_D7_EEERES_STAT_LP_100TX) {
2363 		dest->eee_active = 1;
2364 	} else
2365 		dest->eee_active = 0;
2366 
2367 	/* Pull lp advertised settings */
2368 	if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, MDIO_AN_EEE_LPABLE, &val))
2369 		return;
2370 	dest->lp_advertised = mmd_eee_adv_to_ethtool_adv_t(val);
2371 
2372 	/* Pull advertised and eee_enabled settings */
2373 	if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, &val))
2374 		return;
2375 	dest->eee_enabled = !!val;
2376 	dest->advertised = mmd_eee_adv_to_ethtool_adv_t(val);
2377 
2378 	/* Pull tx_lpi_enabled */
2379 	val = tr32(TG3_CPMU_EEE_MODE);
2380 	dest->tx_lpi_enabled = !!(val & TG3_CPMU_EEEMD_LPI_IN_TX);
2381 
2382 	/* Pull lpi timer value */
2383 	dest->tx_lpi_timer = tr32(TG3_CPMU_EEE_DBTMR1) & 0xffff;
2384 }
2385 
2386 static void tg3_phy_eee_adjust(struct tg3 *tp, bool current_link_up)
2387 {
2388 	u32 val;
2389 
2390 	if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2391 		return;
2392 
2393 	tp->setlpicnt = 0;
2394 
2395 	if (tp->link_config.autoneg == AUTONEG_ENABLE &&
2396 	    current_link_up &&
2397 	    tp->link_config.active_duplex == DUPLEX_FULL &&
2398 	    (tp->link_config.active_speed == SPEED_100 ||
2399 	     tp->link_config.active_speed == SPEED_1000)) {
2400 		u32 eeectl;
2401 
2402 		if (tp->link_config.active_speed == SPEED_1000)
2403 			eeectl = TG3_CPMU_EEE_CTRL_EXIT_16_5_US;
2404 		else
2405 			eeectl = TG3_CPMU_EEE_CTRL_EXIT_36_US;
2406 
2407 		tw32(TG3_CPMU_EEE_CTRL, eeectl);
2408 
2409 		tg3_eee_pull_config(tp, NULL);
2410 		if (tp->eee.eee_active)
2411 			tp->setlpicnt = 2;
2412 	}
2413 
2414 	if (!tp->setlpicnt) {
2415 		if (current_link_up &&
2416 		   !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2417 			tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0000);
2418 			tg3_phy_toggle_auxctl_smdsp(tp, false);
2419 		}
2420 
2421 		val = tr32(TG3_CPMU_EEE_MODE);
2422 		tw32(TG3_CPMU_EEE_MODE, val & ~TG3_CPMU_EEEMD_LPI_ENABLE);
2423 	}
2424 }
2425 
2426 static void tg3_phy_eee_enable(struct tg3 *tp)
2427 {
2428 	u32 val;
2429 
2430 	if (tp->link_config.active_speed == SPEED_1000 &&
2431 	    (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2432 	     tg3_asic_rev(tp) == ASIC_REV_5719 ||
2433 	     tg3_flag(tp, 57765_CLASS)) &&
2434 	    !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2435 		val = MII_TG3_DSP_TAP26_ALNOKO |
2436 		      MII_TG3_DSP_TAP26_RMRXSTO;
2437 		tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
2438 		tg3_phy_toggle_auxctl_smdsp(tp, false);
2439 	}
2440 
2441 	val = tr32(TG3_CPMU_EEE_MODE);
2442 	tw32(TG3_CPMU_EEE_MODE, val | TG3_CPMU_EEEMD_LPI_ENABLE);
2443 }
2444 
2445 static int tg3_wait_macro_done(struct tg3 *tp)
2446 {
2447 	int limit = 100;
2448 
2449 	while (limit--) {
2450 		u32 tmp32;
2451 
2452 		if (!tg3_readphy(tp, MII_TG3_DSP_CONTROL, &tmp32)) {
2453 			if ((tmp32 & 0x1000) == 0)
2454 				break;
2455 		}
2456 	}
2457 	if (limit < 0)
2458 		return -EBUSY;
2459 
2460 	return 0;
2461 }
2462 
2463 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
2464 {
2465 	static const u32 test_pat[4][6] = {
2466 	{ 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
2467 	{ 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
2468 	{ 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
2469 	{ 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
2470 	};
2471 	int chan;
2472 
2473 	for (chan = 0; chan < 4; chan++) {
2474 		int i;
2475 
2476 		tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2477 			     (chan * 0x2000) | 0x0200);
2478 		tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2479 
2480 		for (i = 0; i < 6; i++)
2481 			tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
2482 				     test_pat[chan][i]);
2483 
2484 		tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2485 		if (tg3_wait_macro_done(tp)) {
2486 			*resetp = 1;
2487 			return -EBUSY;
2488 		}
2489 
2490 		tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2491 			     (chan * 0x2000) | 0x0200);
2492 		tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0082);
2493 		if (tg3_wait_macro_done(tp)) {
2494 			*resetp = 1;
2495 			return -EBUSY;
2496 		}
2497 
2498 		tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0802);
2499 		if (tg3_wait_macro_done(tp)) {
2500 			*resetp = 1;
2501 			return -EBUSY;
2502 		}
2503 
2504 		for (i = 0; i < 6; i += 2) {
2505 			u32 low, high;
2506 
2507 			if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
2508 			    tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
2509 			    tg3_wait_macro_done(tp)) {
2510 				*resetp = 1;
2511 				return -EBUSY;
2512 			}
2513 			low &= 0x7fff;
2514 			high &= 0x000f;
2515 			if (low != test_pat[chan][i] ||
2516 			    high != test_pat[chan][i+1]) {
2517 				tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
2518 				tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
2519 				tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
2520 
2521 				return -EBUSY;
2522 			}
2523 		}
2524 	}
2525 
2526 	return 0;
2527 }
2528 
2529 static int tg3_phy_reset_chanpat(struct tg3 *tp)
2530 {
2531 	int chan;
2532 
2533 	for (chan = 0; chan < 4; chan++) {
2534 		int i;
2535 
2536 		tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2537 			     (chan * 0x2000) | 0x0200);
2538 		tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2539 		for (i = 0; i < 6; i++)
2540 			tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
2541 		tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2542 		if (tg3_wait_macro_done(tp))
2543 			return -EBUSY;
2544 	}
2545 
2546 	return 0;
2547 }
2548 
2549 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
2550 {
2551 	u32 reg32, phy9_orig;
2552 	int retries, do_phy_reset, err;
2553 
2554 	retries = 10;
2555 	do_phy_reset = 1;
2556 	do {
2557 		if (do_phy_reset) {
2558 			err = tg3_bmcr_reset(tp);
2559 			if (err)
2560 				return err;
2561 			do_phy_reset = 0;
2562 		}
2563 
2564 		/* Disable transmitter and interrupt.  */
2565 		if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
2566 			continue;
2567 
2568 		reg32 |= 0x3000;
2569 		tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2570 
2571 		/* Set full-duplex, 1000 mbps.  */
2572 		tg3_writephy(tp, MII_BMCR,
2573 			     BMCR_FULLDPLX | BMCR_SPEED1000);
2574 
2575 		/* Set to master mode.  */
2576 		if (tg3_readphy(tp, MII_CTRL1000, &phy9_orig))
2577 			continue;
2578 
2579 		tg3_writephy(tp, MII_CTRL1000,
2580 			     CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
2581 
2582 		err = tg3_phy_toggle_auxctl_smdsp(tp, true);
2583 		if (err)
2584 			return err;
2585 
2586 		/* Block the PHY control access.  */
2587 		tg3_phydsp_write(tp, 0x8005, 0x0800);
2588 
2589 		err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
2590 		if (!err)
2591 			break;
2592 	} while (--retries);
2593 
2594 	err = tg3_phy_reset_chanpat(tp);
2595 	if (err)
2596 		return err;
2597 
2598 	tg3_phydsp_write(tp, 0x8005, 0x0000);
2599 
2600 	tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
2601 	tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000);
2602 
2603 	tg3_phy_toggle_auxctl_smdsp(tp, false);
2604 
2605 	tg3_writephy(tp, MII_CTRL1000, phy9_orig);
2606 
2607 	err = tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32);
2608 	if (err)
2609 		return err;
2610 
2611 	reg32 &= ~0x3000;
2612 	tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2613 
2614 	return 0;
2615 }
2616 
2617 static void tg3_carrier_off(struct tg3 *tp)
2618 {
2619 	netif_carrier_off(tp->dev);
2620 	tp->link_up = false;
2621 }
2622 
2623 static void tg3_warn_mgmt_link_flap(struct tg3 *tp)
2624 {
2625 	if (tg3_flag(tp, ENABLE_ASF))
2626 		netdev_warn(tp->dev,
2627 			    "Management side-band traffic will be interrupted during phy settings change\n");
2628 }
2629 
2630 /* This will reset the tigon3 PHY if there is no valid
2631  * link unless the FORCE argument is non-zero.
2632  */
2633 static int tg3_phy_reset(struct tg3 *tp)
2634 {
2635 	u32 val, cpmuctrl;
2636 	int err;
2637 
2638 	if (tg3_asic_rev(tp) == ASIC_REV_5906) {
2639 		val = tr32(GRC_MISC_CFG);
2640 		tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
2641 		udelay(40);
2642 	}
2643 	err  = tg3_readphy(tp, MII_BMSR, &val);
2644 	err |= tg3_readphy(tp, MII_BMSR, &val);
2645 	if (err != 0)
2646 		return -EBUSY;
2647 
2648 	if (netif_running(tp->dev) && tp->link_up) {
2649 		netif_carrier_off(tp->dev);
2650 		tg3_link_report(tp);
2651 	}
2652 
2653 	if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
2654 	    tg3_asic_rev(tp) == ASIC_REV_5704 ||
2655 	    tg3_asic_rev(tp) == ASIC_REV_5705) {
2656 		err = tg3_phy_reset_5703_4_5(tp);
2657 		if (err)
2658 			return err;
2659 		goto out;
2660 	}
2661 
2662 	cpmuctrl = 0;
2663 	if (tg3_asic_rev(tp) == ASIC_REV_5784 &&
2664 	    tg3_chip_rev(tp) != CHIPREV_5784_AX) {
2665 		cpmuctrl = tr32(TG3_CPMU_CTRL);
2666 		if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
2667 			tw32(TG3_CPMU_CTRL,
2668 			     cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
2669 	}
2670 
2671 	err = tg3_bmcr_reset(tp);
2672 	if (err)
2673 		return err;
2674 
2675 	if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
2676 		val = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
2677 		tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, val);
2678 
2679 		tw32(TG3_CPMU_CTRL, cpmuctrl);
2680 	}
2681 
2682 	if (tg3_chip_rev(tp) == CHIPREV_5784_AX ||
2683 	    tg3_chip_rev(tp) == CHIPREV_5761_AX) {
2684 		val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2685 		if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
2686 		    CPMU_LSPD_1000MB_MACCLK_12_5) {
2687 			val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2688 			udelay(40);
2689 			tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2690 		}
2691 	}
2692 
2693 	if (tg3_flag(tp, 5717_PLUS) &&
2694 	    (tp->phy_flags & TG3_PHYFLG_MII_SERDES))
2695 		return 0;
2696 
2697 	tg3_phy_apply_otp(tp);
2698 
2699 	if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
2700 		tg3_phy_toggle_apd(tp, true);
2701 	else
2702 		tg3_phy_toggle_apd(tp, false);
2703 
2704 out:
2705 	if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) &&
2706 	    !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2707 		tg3_phydsp_write(tp, 0x201f, 0x2aaa);
2708 		tg3_phydsp_write(tp, 0x000a, 0x0323);
2709 		tg3_phy_toggle_auxctl_smdsp(tp, false);
2710 	}
2711 
2712 	if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) {
2713 		tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2714 		tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2715 	}
2716 
2717 	if (tp->phy_flags & TG3_PHYFLG_BER_BUG) {
2718 		if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2719 			tg3_phydsp_write(tp, 0x000a, 0x310b);
2720 			tg3_phydsp_write(tp, 0x201f, 0x9506);
2721 			tg3_phydsp_write(tp, 0x401f, 0x14e2);
2722 			tg3_phy_toggle_auxctl_smdsp(tp, false);
2723 		}
2724 	} else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) {
2725 		if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2726 			tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
2727 			if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) {
2728 				tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
2729 				tg3_writephy(tp, MII_TG3_TEST1,
2730 					     MII_TG3_TEST1_TRIM_EN | 0x4);
2731 			} else
2732 				tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
2733 
2734 			tg3_phy_toggle_auxctl_smdsp(tp, false);
2735 		}
2736 	}
2737 
2738 	/* Set Extended packet length bit (bit 14) on all chips that */
2739 	/* support jumbo frames */
2740 	if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2741 		/* Cannot do read-modify-write on 5401 */
2742 		tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
2743 	} else if (tg3_flag(tp, JUMBO_CAPABLE)) {
2744 		/* Set bit 14 with read-modify-write to preserve other bits */
2745 		err = tg3_phy_auxctl_read(tp,
2746 					  MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2747 		if (!err)
2748 			tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2749 					   val | MII_TG3_AUXCTL_ACTL_EXTPKTLEN);
2750 	}
2751 
2752 	/* Set phy register 0x10 bit 0 to high fifo elasticity to support
2753 	 * jumbo frames transmission.
2754 	 */
2755 	if (tg3_flag(tp, JUMBO_CAPABLE)) {
2756 		if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &val))
2757 			tg3_writephy(tp, MII_TG3_EXT_CTRL,
2758 				     val | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
2759 	}
2760 
2761 	if (tg3_asic_rev(tp) == ASIC_REV_5906) {
2762 		/* adjust output voltage */
2763 		tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
2764 	}
2765 
2766 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5762_A0)
2767 		tg3_phydsp_write(tp, 0xffb, 0x4000);
2768 
2769 	tg3_phy_toggle_automdix(tp, true);
2770 	tg3_phy_set_wirespeed(tp);
2771 	return 0;
2772 }
2773 
2774 #define TG3_GPIO_MSG_DRVR_PRES		 0x00000001
2775 #define TG3_GPIO_MSG_NEED_VAUX		 0x00000002
2776 #define TG3_GPIO_MSG_MASK		 (TG3_GPIO_MSG_DRVR_PRES | \
2777 					  TG3_GPIO_MSG_NEED_VAUX)
2778 #define TG3_GPIO_MSG_ALL_DRVR_PRES_MASK \
2779 	((TG3_GPIO_MSG_DRVR_PRES << 0) | \
2780 	 (TG3_GPIO_MSG_DRVR_PRES << 4) | \
2781 	 (TG3_GPIO_MSG_DRVR_PRES << 8) | \
2782 	 (TG3_GPIO_MSG_DRVR_PRES << 12))
2783 
2784 #define TG3_GPIO_MSG_ALL_NEED_VAUX_MASK \
2785 	((TG3_GPIO_MSG_NEED_VAUX << 0) | \
2786 	 (TG3_GPIO_MSG_NEED_VAUX << 4) | \
2787 	 (TG3_GPIO_MSG_NEED_VAUX << 8) | \
2788 	 (TG3_GPIO_MSG_NEED_VAUX << 12))
2789 
2790 static inline u32 tg3_set_function_status(struct tg3 *tp, u32 newstat)
2791 {
2792 	u32 status, shift;
2793 
2794 	if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2795 	    tg3_asic_rev(tp) == ASIC_REV_5719)
2796 		status = tg3_ape_read32(tp, TG3_APE_GPIO_MSG);
2797 	else
2798 		status = tr32(TG3_CPMU_DRV_STATUS);
2799 
2800 	shift = TG3_APE_GPIO_MSG_SHIFT + 4 * tp->pci_fn;
2801 	status &= ~(TG3_GPIO_MSG_MASK << shift);
2802 	status |= (newstat << shift);
2803 
2804 	if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2805 	    tg3_asic_rev(tp) == ASIC_REV_5719)
2806 		tg3_ape_write32(tp, TG3_APE_GPIO_MSG, status);
2807 	else
2808 		tw32(TG3_CPMU_DRV_STATUS, status);
2809 
2810 	return status >> TG3_APE_GPIO_MSG_SHIFT;
2811 }
2812 
2813 static inline int tg3_pwrsrc_switch_to_vmain(struct tg3 *tp)
2814 {
2815 	if (!tg3_flag(tp, IS_NIC))
2816 		return 0;
2817 
2818 	if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2819 	    tg3_asic_rev(tp) == ASIC_REV_5719 ||
2820 	    tg3_asic_rev(tp) == ASIC_REV_5720) {
2821 		if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2822 			return -EIO;
2823 
2824 		tg3_set_function_status(tp, TG3_GPIO_MSG_DRVR_PRES);
2825 
2826 		tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2827 			    TG3_GRC_LCLCTL_PWRSW_DELAY);
2828 
2829 		tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2830 	} else {
2831 		tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2832 			    TG3_GRC_LCLCTL_PWRSW_DELAY);
2833 	}
2834 
2835 	return 0;
2836 }
2837 
2838 static void tg3_pwrsrc_die_with_vmain(struct tg3 *tp)
2839 {
2840 	u32 grc_local_ctrl;
2841 
2842 	if (!tg3_flag(tp, IS_NIC) ||
2843 	    tg3_asic_rev(tp) == ASIC_REV_5700 ||
2844 	    tg3_asic_rev(tp) == ASIC_REV_5701)
2845 		return;
2846 
2847 	grc_local_ctrl = tp->grc_local_ctrl | GRC_LCLCTRL_GPIO_OE1;
2848 
2849 	tw32_wait_f(GRC_LOCAL_CTRL,
2850 		    grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2851 		    TG3_GRC_LCLCTL_PWRSW_DELAY);
2852 
2853 	tw32_wait_f(GRC_LOCAL_CTRL,
2854 		    grc_local_ctrl,
2855 		    TG3_GRC_LCLCTL_PWRSW_DELAY);
2856 
2857 	tw32_wait_f(GRC_LOCAL_CTRL,
2858 		    grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2859 		    TG3_GRC_LCLCTL_PWRSW_DELAY);
2860 }
2861 
2862 static void tg3_pwrsrc_switch_to_vaux(struct tg3 *tp)
2863 {
2864 	if (!tg3_flag(tp, IS_NIC))
2865 		return;
2866 
2867 	if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
2868 	    tg3_asic_rev(tp) == ASIC_REV_5701) {
2869 		tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2870 			    (GRC_LCLCTRL_GPIO_OE0 |
2871 			     GRC_LCLCTRL_GPIO_OE1 |
2872 			     GRC_LCLCTRL_GPIO_OE2 |
2873 			     GRC_LCLCTRL_GPIO_OUTPUT0 |
2874 			     GRC_LCLCTRL_GPIO_OUTPUT1),
2875 			    TG3_GRC_LCLCTL_PWRSW_DELAY);
2876 	} else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
2877 		   tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
2878 		/* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2879 		u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
2880 				     GRC_LCLCTRL_GPIO_OE1 |
2881 				     GRC_LCLCTRL_GPIO_OE2 |
2882 				     GRC_LCLCTRL_GPIO_OUTPUT0 |
2883 				     GRC_LCLCTRL_GPIO_OUTPUT1 |
2884 				     tp->grc_local_ctrl;
2885 		tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2886 			    TG3_GRC_LCLCTL_PWRSW_DELAY);
2887 
2888 		grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
2889 		tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2890 			    TG3_GRC_LCLCTL_PWRSW_DELAY);
2891 
2892 		grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
2893 		tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2894 			    TG3_GRC_LCLCTL_PWRSW_DELAY);
2895 	} else {
2896 		u32 no_gpio2;
2897 		u32 grc_local_ctrl = 0;
2898 
2899 		/* Workaround to prevent overdrawing Amps. */
2900 		if (tg3_asic_rev(tp) == ASIC_REV_5714) {
2901 			grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
2902 			tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2903 				    grc_local_ctrl,
2904 				    TG3_GRC_LCLCTL_PWRSW_DELAY);
2905 		}
2906 
2907 		/* On 5753 and variants, GPIO2 cannot be used. */
2908 		no_gpio2 = tp->nic_sram_data_cfg &
2909 			   NIC_SRAM_DATA_CFG_NO_GPIO2;
2910 
2911 		grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
2912 				  GRC_LCLCTRL_GPIO_OE1 |
2913 				  GRC_LCLCTRL_GPIO_OE2 |
2914 				  GRC_LCLCTRL_GPIO_OUTPUT1 |
2915 				  GRC_LCLCTRL_GPIO_OUTPUT2;
2916 		if (no_gpio2) {
2917 			grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
2918 					    GRC_LCLCTRL_GPIO_OUTPUT2);
2919 		}
2920 		tw32_wait_f(GRC_LOCAL_CTRL,
2921 			    tp->grc_local_ctrl | grc_local_ctrl,
2922 			    TG3_GRC_LCLCTL_PWRSW_DELAY);
2923 
2924 		grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
2925 
2926 		tw32_wait_f(GRC_LOCAL_CTRL,
2927 			    tp->grc_local_ctrl | grc_local_ctrl,
2928 			    TG3_GRC_LCLCTL_PWRSW_DELAY);
2929 
2930 		if (!no_gpio2) {
2931 			grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
2932 			tw32_wait_f(GRC_LOCAL_CTRL,
2933 				    tp->grc_local_ctrl | grc_local_ctrl,
2934 				    TG3_GRC_LCLCTL_PWRSW_DELAY);
2935 		}
2936 	}
2937 }
2938 
2939 static void tg3_frob_aux_power_5717(struct tg3 *tp, bool wol_enable)
2940 {
2941 	u32 msg = 0;
2942 
2943 	/* Serialize power state transitions */
2944 	if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2945 		return;
2946 
2947 	if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE) || wol_enable)
2948 		msg = TG3_GPIO_MSG_NEED_VAUX;
2949 
2950 	msg = tg3_set_function_status(tp, msg);
2951 
2952 	if (msg & TG3_GPIO_MSG_ALL_DRVR_PRES_MASK)
2953 		goto done;
2954 
2955 	if (msg & TG3_GPIO_MSG_ALL_NEED_VAUX_MASK)
2956 		tg3_pwrsrc_switch_to_vaux(tp);
2957 	else
2958 		tg3_pwrsrc_die_with_vmain(tp);
2959 
2960 done:
2961 	tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2962 }
2963 
2964 static void tg3_frob_aux_power(struct tg3 *tp, bool include_wol)
2965 {
2966 	bool need_vaux = false;
2967 
2968 	/* The GPIOs do something completely different on 57765. */
2969 	if (!tg3_flag(tp, IS_NIC) || tg3_flag(tp, 57765_CLASS))
2970 		return;
2971 
2972 	if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2973 	    tg3_asic_rev(tp) == ASIC_REV_5719 ||
2974 	    tg3_asic_rev(tp) == ASIC_REV_5720) {
2975 		tg3_frob_aux_power_5717(tp, include_wol ?
2976 					tg3_flag(tp, WOL_ENABLE) != 0 : 0);
2977 		return;
2978 	}
2979 
2980 	if (tp->pdev_peer && tp->pdev_peer != tp->pdev) {
2981 		struct net_device *dev_peer;
2982 
2983 		dev_peer = pci_get_drvdata(tp->pdev_peer);
2984 
2985 		/* remove_one() may have been run on the peer. */
2986 		if (dev_peer) {
2987 			struct tg3 *tp_peer = netdev_priv(dev_peer);
2988 
2989 			if (tg3_flag(tp_peer, INIT_COMPLETE))
2990 				return;
2991 
2992 			if ((include_wol && tg3_flag(tp_peer, WOL_ENABLE)) ||
2993 			    tg3_flag(tp_peer, ENABLE_ASF))
2994 				need_vaux = true;
2995 		}
2996 	}
2997 
2998 	if ((include_wol && tg3_flag(tp, WOL_ENABLE)) ||
2999 	    tg3_flag(tp, ENABLE_ASF))
3000 		need_vaux = true;
3001 
3002 	if (need_vaux)
3003 		tg3_pwrsrc_switch_to_vaux(tp);
3004 	else
3005 		tg3_pwrsrc_die_with_vmain(tp);
3006 }
3007 
3008 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
3009 {
3010 	if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
3011 		return 1;
3012 	else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) {
3013 		if (speed != SPEED_10)
3014 			return 1;
3015 	} else if (speed == SPEED_10)
3016 		return 1;
3017 
3018 	return 0;
3019 }
3020 
3021 static bool tg3_phy_power_bug(struct tg3 *tp)
3022 {
3023 	switch (tg3_asic_rev(tp)) {
3024 	case ASIC_REV_5700:
3025 	case ASIC_REV_5704:
3026 		return true;
3027 	case ASIC_REV_5780:
3028 		if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
3029 			return true;
3030 		return false;
3031 	case ASIC_REV_5717:
3032 		if (!tp->pci_fn)
3033 			return true;
3034 		return false;
3035 	case ASIC_REV_5719:
3036 	case ASIC_REV_5720:
3037 		if ((tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
3038 		    !tp->pci_fn)
3039 			return true;
3040 		return false;
3041 	}
3042 
3043 	return false;
3044 }
3045 
3046 static bool tg3_phy_led_bug(struct tg3 *tp)
3047 {
3048 	switch (tg3_asic_rev(tp)) {
3049 	case ASIC_REV_5719:
3050 	case ASIC_REV_5720:
3051 		if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
3052 		    !tp->pci_fn)
3053 			return true;
3054 		return false;
3055 	}
3056 
3057 	return false;
3058 }
3059 
3060 static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
3061 {
3062 	u32 val;
3063 
3064 	if (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)
3065 		return;
3066 
3067 	if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
3068 		if (tg3_asic_rev(tp) == ASIC_REV_5704) {
3069 			u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
3070 			u32 serdes_cfg = tr32(MAC_SERDES_CFG);
3071 
3072 			sg_dig_ctrl |=
3073 				SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
3074 			tw32(SG_DIG_CTRL, sg_dig_ctrl);
3075 			tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
3076 		}
3077 		return;
3078 	}
3079 
3080 	if (tg3_asic_rev(tp) == ASIC_REV_5906) {
3081 		tg3_bmcr_reset(tp);
3082 		val = tr32(GRC_MISC_CFG);
3083 		tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
3084 		udelay(40);
3085 		return;
3086 	} else if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
3087 		u32 phytest;
3088 		if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
3089 			u32 phy;
3090 
3091 			tg3_writephy(tp, MII_ADVERTISE, 0);
3092 			tg3_writephy(tp, MII_BMCR,
3093 				     BMCR_ANENABLE | BMCR_ANRESTART);
3094 
3095 			tg3_writephy(tp, MII_TG3_FET_TEST,
3096 				     phytest | MII_TG3_FET_SHADOW_EN);
3097 			if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) {
3098 				phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD;
3099 				tg3_writephy(tp,
3100 					     MII_TG3_FET_SHDW_AUXMODE4,
3101 					     phy);
3102 			}
3103 			tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
3104 		}
3105 		return;
3106 	} else if (do_low_power) {
3107 		if (!tg3_phy_led_bug(tp))
3108 			tg3_writephy(tp, MII_TG3_EXT_CTRL,
3109 				     MII_TG3_EXT_CTRL_FORCE_LED_OFF);
3110 
3111 		val = MII_TG3_AUXCTL_PCTL_100TX_LPWR |
3112 		      MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
3113 		      MII_TG3_AUXCTL_PCTL_VREG_11V;
3114 		tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, val);
3115 	}
3116 
3117 	/* The PHY should not be powered down on some chips because
3118 	 * of bugs.
3119 	 */
3120 	if (tg3_phy_power_bug(tp))
3121 		return;
3122 
3123 	if (tg3_chip_rev(tp) == CHIPREV_5784_AX ||
3124 	    tg3_chip_rev(tp) == CHIPREV_5761_AX) {
3125 		val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
3126 		val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
3127 		val |= CPMU_LSPD_1000MB_MACCLK_12_5;
3128 		tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
3129 	}
3130 
3131 	tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
3132 }
3133 
3134 /* tp->lock is held. */
3135 static int tg3_nvram_lock(struct tg3 *tp)
3136 {
3137 	if (tg3_flag(tp, NVRAM)) {
3138 		int i;
3139 
3140 		if (tp->nvram_lock_cnt == 0) {
3141 			tw32(NVRAM_SWARB, SWARB_REQ_SET1);
3142 			for (i = 0; i < 8000; i++) {
3143 				if (tr32(NVRAM_SWARB) & SWARB_GNT1)
3144 					break;
3145 				udelay(20);
3146 			}
3147 			if (i == 8000) {
3148 				tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
3149 				return -ENODEV;
3150 			}
3151 		}
3152 		tp->nvram_lock_cnt++;
3153 	}
3154 	return 0;
3155 }
3156 
3157 /* tp->lock is held. */
3158 static void tg3_nvram_unlock(struct tg3 *tp)
3159 {
3160 	if (tg3_flag(tp, NVRAM)) {
3161 		if (tp->nvram_lock_cnt > 0)
3162 			tp->nvram_lock_cnt--;
3163 		if (tp->nvram_lock_cnt == 0)
3164 			tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
3165 	}
3166 }
3167 
3168 /* tp->lock is held. */
3169 static void tg3_enable_nvram_access(struct tg3 *tp)
3170 {
3171 	if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
3172 		u32 nvaccess = tr32(NVRAM_ACCESS);
3173 
3174 		tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
3175 	}
3176 }
3177 
3178 /* tp->lock is held. */
3179 static void tg3_disable_nvram_access(struct tg3 *tp)
3180 {
3181 	if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
3182 		u32 nvaccess = tr32(NVRAM_ACCESS);
3183 
3184 		tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
3185 	}
3186 }
3187 
3188 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
3189 					u32 offset, u32 *val)
3190 {
3191 	u32 tmp;
3192 	int i;
3193 
3194 	if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
3195 		return -EINVAL;
3196 
3197 	tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
3198 					EEPROM_ADDR_DEVID_MASK |
3199 					EEPROM_ADDR_READ);
3200 	tw32(GRC_EEPROM_ADDR,
3201 	     tmp |
3202 	     (0 << EEPROM_ADDR_DEVID_SHIFT) |
3203 	     ((offset << EEPROM_ADDR_ADDR_SHIFT) &
3204 	      EEPROM_ADDR_ADDR_MASK) |
3205 	     EEPROM_ADDR_READ | EEPROM_ADDR_START);
3206 
3207 	for (i = 0; i < 1000; i++) {
3208 		tmp = tr32(GRC_EEPROM_ADDR);
3209 
3210 		if (tmp & EEPROM_ADDR_COMPLETE)
3211 			break;
3212 		msleep(1);
3213 	}
3214 	if (!(tmp & EEPROM_ADDR_COMPLETE))
3215 		return -EBUSY;
3216 
3217 	tmp = tr32(GRC_EEPROM_DATA);
3218 
3219 	/*
3220 	 * The data will always be opposite the native endian
3221 	 * format.  Perform a blind byteswap to compensate.
3222 	 */
3223 	*val = swab32(tmp);
3224 
3225 	return 0;
3226 }
3227 
3228 #define NVRAM_CMD_TIMEOUT 5000
3229 
3230 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
3231 {
3232 	int i;
3233 
3234 	tw32(NVRAM_CMD, nvram_cmd);
3235 	for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
3236 		usleep_range(10, 40);
3237 		if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
3238 			udelay(10);
3239 			break;
3240 		}
3241 	}
3242 
3243 	if (i == NVRAM_CMD_TIMEOUT)
3244 		return -EBUSY;
3245 
3246 	return 0;
3247 }
3248 
3249 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
3250 {
3251 	if (tg3_flag(tp, NVRAM) &&
3252 	    tg3_flag(tp, NVRAM_BUFFERED) &&
3253 	    tg3_flag(tp, FLASH) &&
3254 	    !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
3255 	    (tp->nvram_jedecnum == JEDEC_ATMEL))
3256 
3257 		addr = ((addr / tp->nvram_pagesize) <<
3258 			ATMEL_AT45DB0X1B_PAGE_POS) +
3259 		       (addr % tp->nvram_pagesize);
3260 
3261 	return addr;
3262 }
3263 
3264 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
3265 {
3266 	if (tg3_flag(tp, NVRAM) &&
3267 	    tg3_flag(tp, NVRAM_BUFFERED) &&
3268 	    tg3_flag(tp, FLASH) &&
3269 	    !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
3270 	    (tp->nvram_jedecnum == JEDEC_ATMEL))
3271 
3272 		addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
3273 			tp->nvram_pagesize) +
3274 		       (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
3275 
3276 	return addr;
3277 }
3278 
3279 /* NOTE: Data read in from NVRAM is byteswapped according to
3280  * the byteswapping settings for all other register accesses.
3281  * tg3 devices are BE devices, so on a BE machine, the data
3282  * returned will be exactly as it is seen in NVRAM.  On a LE
3283  * machine, the 32-bit value will be byteswapped.
3284  */
3285 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
3286 {
3287 	int ret;
3288 
3289 	if (!tg3_flag(tp, NVRAM))
3290 		return tg3_nvram_read_using_eeprom(tp, offset, val);
3291 
3292 	offset = tg3_nvram_phys_addr(tp, offset);
3293 
3294 	if (offset > NVRAM_ADDR_MSK)
3295 		return -EINVAL;
3296 
3297 	ret = tg3_nvram_lock(tp);
3298 	if (ret)
3299 		return ret;
3300 
3301 	tg3_enable_nvram_access(tp);
3302 
3303 	tw32(NVRAM_ADDR, offset);
3304 	ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
3305 		NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
3306 
3307 	if (ret == 0)
3308 		*val = tr32(NVRAM_RDDATA);
3309 
3310 	tg3_disable_nvram_access(tp);
3311 
3312 	tg3_nvram_unlock(tp);
3313 
3314 	return ret;
3315 }
3316 
3317 /* Ensures NVRAM data is in bytestream format. */
3318 static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val)
3319 {
3320 	u32 v;
3321 	int res = tg3_nvram_read(tp, offset, &v);
3322 	if (!res)
3323 		*val = cpu_to_be32(v);
3324 	return res;
3325 }
3326 
3327 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
3328 				    u32 offset, u32 len, u8 *buf)
3329 {
3330 	int i, j, rc = 0;
3331 	u32 val;
3332 
3333 	for (i = 0; i < len; i += 4) {
3334 		u32 addr;
3335 		__be32 data;
3336 
3337 		addr = offset + i;
3338 
3339 		memcpy(&data, buf + i, 4);
3340 
3341 		/*
3342 		 * The SEEPROM interface expects the data to always be opposite
3343 		 * the native endian format.  We accomplish this by reversing
3344 		 * all the operations that would have been performed on the
3345 		 * data from a call to tg3_nvram_read_be32().
3346 		 */
3347 		tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data)));
3348 
3349 		val = tr32(GRC_EEPROM_ADDR);
3350 		tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
3351 
3352 		val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
3353 			EEPROM_ADDR_READ);
3354 		tw32(GRC_EEPROM_ADDR, val |
3355 			(0 << EEPROM_ADDR_DEVID_SHIFT) |
3356 			(addr & EEPROM_ADDR_ADDR_MASK) |
3357 			EEPROM_ADDR_START |
3358 			EEPROM_ADDR_WRITE);
3359 
3360 		for (j = 0; j < 1000; j++) {
3361 			val = tr32(GRC_EEPROM_ADDR);
3362 
3363 			if (val & EEPROM_ADDR_COMPLETE)
3364 				break;
3365 			msleep(1);
3366 		}
3367 		if (!(val & EEPROM_ADDR_COMPLETE)) {
3368 			rc = -EBUSY;
3369 			break;
3370 		}
3371 	}
3372 
3373 	return rc;
3374 }
3375 
3376 /* offset and length are dword aligned */
3377 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
3378 		u8 *buf)
3379 {
3380 	int ret = 0;
3381 	u32 pagesize = tp->nvram_pagesize;
3382 	u32 pagemask = pagesize - 1;
3383 	u32 nvram_cmd;
3384 	u8 *tmp;
3385 
3386 	tmp = kmalloc(pagesize, GFP_KERNEL);
3387 	if (tmp == NULL)
3388 		return -ENOMEM;
3389 
3390 	while (len) {
3391 		int j;
3392 		u32 phy_addr, page_off, size;
3393 
3394 		phy_addr = offset & ~pagemask;
3395 
3396 		for (j = 0; j < pagesize; j += 4) {
3397 			ret = tg3_nvram_read_be32(tp, phy_addr + j,
3398 						  (__be32 *) (tmp + j));
3399 			if (ret)
3400 				break;
3401 		}
3402 		if (ret)
3403 			break;
3404 
3405 		page_off = offset & pagemask;
3406 		size = pagesize;
3407 		if (len < size)
3408 			size = len;
3409 
3410 		len -= size;
3411 
3412 		memcpy(tmp + page_off, buf, size);
3413 
3414 		offset = offset + (pagesize - page_off);
3415 
3416 		tg3_enable_nvram_access(tp);
3417 
3418 		/*
3419 		 * Before we can erase the flash page, we need
3420 		 * to issue a special "write enable" command.
3421 		 */
3422 		nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3423 
3424 		if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3425 			break;
3426 
3427 		/* Erase the target page */
3428 		tw32(NVRAM_ADDR, phy_addr);
3429 
3430 		nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
3431 			NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
3432 
3433 		if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3434 			break;
3435 
3436 		/* Issue another write enable to start the write. */
3437 		nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3438 
3439 		if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3440 			break;
3441 
3442 		for (j = 0; j < pagesize; j += 4) {
3443 			__be32 data;
3444 
3445 			data = *((__be32 *) (tmp + j));
3446 
3447 			tw32(NVRAM_WRDATA, be32_to_cpu(data));
3448 
3449 			tw32(NVRAM_ADDR, phy_addr + j);
3450 
3451 			nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
3452 				NVRAM_CMD_WR;
3453 
3454 			if (j == 0)
3455 				nvram_cmd |= NVRAM_CMD_FIRST;
3456 			else if (j == (pagesize - 4))
3457 				nvram_cmd |= NVRAM_CMD_LAST;
3458 
3459 			ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3460 			if (ret)
3461 				break;
3462 		}
3463 		if (ret)
3464 			break;
3465 	}
3466 
3467 	nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3468 	tg3_nvram_exec_cmd(tp, nvram_cmd);
3469 
3470 	kfree(tmp);
3471 
3472 	return ret;
3473 }
3474 
3475 /* offset and length are dword aligned */
3476 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
3477 		u8 *buf)
3478 {
3479 	int i, ret = 0;
3480 
3481 	for (i = 0; i < len; i += 4, offset += 4) {
3482 		u32 page_off, phy_addr, nvram_cmd;
3483 		__be32 data;
3484 
3485 		memcpy(&data, buf + i, 4);
3486 		tw32(NVRAM_WRDATA, be32_to_cpu(data));
3487 
3488 		page_off = offset % tp->nvram_pagesize;
3489 
3490 		phy_addr = tg3_nvram_phys_addr(tp, offset);
3491 
3492 		nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
3493 
3494 		if (page_off == 0 || i == 0)
3495 			nvram_cmd |= NVRAM_CMD_FIRST;
3496 		if (page_off == (tp->nvram_pagesize - 4))
3497 			nvram_cmd |= NVRAM_CMD_LAST;
3498 
3499 		if (i == (len - 4))
3500 			nvram_cmd |= NVRAM_CMD_LAST;
3501 
3502 		if ((nvram_cmd & NVRAM_CMD_FIRST) ||
3503 		    !tg3_flag(tp, FLASH) ||
3504 		    !tg3_flag(tp, 57765_PLUS))
3505 			tw32(NVRAM_ADDR, phy_addr);
3506 
3507 		if (tg3_asic_rev(tp) != ASIC_REV_5752 &&
3508 		    !tg3_flag(tp, 5755_PLUS) &&
3509 		    (tp->nvram_jedecnum == JEDEC_ST) &&
3510 		    (nvram_cmd & NVRAM_CMD_FIRST)) {
3511 			u32 cmd;
3512 
3513 			cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3514 			ret = tg3_nvram_exec_cmd(tp, cmd);
3515 			if (ret)
3516 				break;
3517 		}
3518 		if (!tg3_flag(tp, FLASH)) {
3519 			/* We always do complete word writes to eeprom. */
3520 			nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
3521 		}
3522 
3523 		ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3524 		if (ret)
3525 			break;
3526 	}
3527 	return ret;
3528 }
3529 
3530 /* offset and length are dword aligned */
3531 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
3532 {
3533 	int ret;
3534 
3535 	if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3536 		tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
3537 		       ~GRC_LCLCTRL_GPIO_OUTPUT1);
3538 		udelay(40);
3539 	}
3540 
3541 	if (!tg3_flag(tp, NVRAM)) {
3542 		ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
3543 	} else {
3544 		u32 grc_mode;
3545 
3546 		ret = tg3_nvram_lock(tp);
3547 		if (ret)
3548 			return ret;
3549 
3550 		tg3_enable_nvram_access(tp);
3551 		if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM))
3552 			tw32(NVRAM_WRITE1, 0x406);
3553 
3554 		grc_mode = tr32(GRC_MODE);
3555 		tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
3556 
3557 		if (tg3_flag(tp, NVRAM_BUFFERED) || !tg3_flag(tp, FLASH)) {
3558 			ret = tg3_nvram_write_block_buffered(tp, offset, len,
3559 				buf);
3560 		} else {
3561 			ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
3562 				buf);
3563 		}
3564 
3565 		grc_mode = tr32(GRC_MODE);
3566 		tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
3567 
3568 		tg3_disable_nvram_access(tp);
3569 		tg3_nvram_unlock(tp);
3570 	}
3571 
3572 	if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3573 		tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
3574 		udelay(40);
3575 	}
3576 
3577 	return ret;
3578 }
3579 
3580 #define RX_CPU_SCRATCH_BASE	0x30000
3581 #define RX_CPU_SCRATCH_SIZE	0x04000
3582 #define TX_CPU_SCRATCH_BASE	0x34000
3583 #define TX_CPU_SCRATCH_SIZE	0x04000
3584 
3585 /* tp->lock is held. */
3586 static int tg3_pause_cpu(struct tg3 *tp, u32 cpu_base)
3587 {
3588 	int i;
3589 	const int iters = 10000;
3590 
3591 	for (i = 0; i < iters; i++) {
3592 		tw32(cpu_base + CPU_STATE, 0xffffffff);
3593 		tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
3594 		if (tr32(cpu_base + CPU_MODE) & CPU_MODE_HALT)
3595 			break;
3596 		if (pci_channel_offline(tp->pdev))
3597 			return -EBUSY;
3598 	}
3599 
3600 	return (i == iters) ? -EBUSY : 0;
3601 }
3602 
3603 /* tp->lock is held. */
3604 static int tg3_rxcpu_pause(struct tg3 *tp)
3605 {
3606 	int rc = tg3_pause_cpu(tp, RX_CPU_BASE);
3607 
3608 	tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3609 	tw32_f(RX_CPU_BASE + CPU_MODE,  CPU_MODE_HALT);
3610 	udelay(10);
3611 
3612 	return rc;
3613 }
3614 
3615 /* tp->lock is held. */
3616 static int tg3_txcpu_pause(struct tg3 *tp)
3617 {
3618 	return tg3_pause_cpu(tp, TX_CPU_BASE);
3619 }
3620 
3621 /* tp->lock is held. */
3622 static void tg3_resume_cpu(struct tg3 *tp, u32 cpu_base)
3623 {
3624 	tw32(cpu_base + CPU_STATE, 0xffffffff);
3625 	tw32_f(cpu_base + CPU_MODE,  0x00000000);
3626 }
3627 
3628 /* tp->lock is held. */
3629 static void tg3_rxcpu_resume(struct tg3 *tp)
3630 {
3631 	tg3_resume_cpu(tp, RX_CPU_BASE);
3632 }
3633 
3634 /* tp->lock is held. */
3635 static int tg3_halt_cpu(struct tg3 *tp, u32 cpu_base)
3636 {
3637 	int rc;
3638 
3639 	BUG_ON(cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS));
3640 
3641 	if (tg3_asic_rev(tp) == ASIC_REV_5906) {
3642 		u32 val = tr32(GRC_VCPU_EXT_CTRL);
3643 
3644 		tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
3645 		return 0;
3646 	}
3647 	if (cpu_base == RX_CPU_BASE) {
3648 		rc = tg3_rxcpu_pause(tp);
3649 	} else {
3650 		/*
3651 		 * There is only an Rx CPU for the 5750 derivative in the
3652 		 * BCM4785.
3653 		 */
3654 		if (tg3_flag(tp, IS_SSB_CORE))
3655 			return 0;
3656 
3657 		rc = tg3_txcpu_pause(tp);
3658 	}
3659 
3660 	if (rc) {
3661 		netdev_err(tp->dev, "%s timed out, %s CPU\n",
3662 			   __func__, cpu_base == RX_CPU_BASE ? "RX" : "TX");
3663 		return -ENODEV;
3664 	}
3665 
3666 	/* Clear firmware's nvram arbitration. */
3667 	if (tg3_flag(tp, NVRAM))
3668 		tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
3669 	return 0;
3670 }
3671 
3672 static int tg3_fw_data_len(struct tg3 *tp,
3673 			   const struct tg3_firmware_hdr *fw_hdr)
3674 {
3675 	int fw_len;
3676 
3677 	/* Non fragmented firmware have one firmware header followed by a
3678 	 * contiguous chunk of data to be written. The length field in that
3679 	 * header is not the length of data to be written but the complete
3680 	 * length of the bss. The data length is determined based on
3681 	 * tp->fw->size minus headers.
3682 	 *
3683 	 * Fragmented firmware have a main header followed by multiple
3684 	 * fragments. Each fragment is identical to non fragmented firmware
3685 	 * with a firmware header followed by a contiguous chunk of data. In
3686 	 * the main header, the length field is unused and set to 0xffffffff.
3687 	 * In each fragment header the length is the entire size of that
3688 	 * fragment i.e. fragment data + header length. Data length is
3689 	 * therefore length field in the header minus TG3_FW_HDR_LEN.
3690 	 */
3691 	if (tp->fw_len == 0xffffffff)
3692 		fw_len = be32_to_cpu(fw_hdr->len);
3693 	else
3694 		fw_len = tp->fw->size;
3695 
3696 	return (fw_len - TG3_FW_HDR_LEN) / sizeof(u32);
3697 }
3698 
3699 /* tp->lock is held. */
3700 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base,
3701 				 u32 cpu_scratch_base, int cpu_scratch_size,
3702 				 const struct tg3_firmware_hdr *fw_hdr)
3703 {
3704 	int err, i;
3705 	void (*write_op)(struct tg3 *, u32, u32);
3706 	int total_len = tp->fw->size;
3707 
3708 	if (cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)) {
3709 		netdev_err(tp->dev,
3710 			   "%s: Trying to load TX cpu firmware which is 5705\n",
3711 			   __func__);
3712 		return -EINVAL;
3713 	}
3714 
3715 	if (tg3_flag(tp, 5705_PLUS) && tg3_asic_rev(tp) != ASIC_REV_57766)
3716 		write_op = tg3_write_mem;
3717 	else
3718 		write_op = tg3_write_indirect_reg32;
3719 
3720 	if (tg3_asic_rev(tp) != ASIC_REV_57766) {
3721 		/* It is possible that bootcode is still loading at this point.
3722 		 * Get the nvram lock first before halting the cpu.
3723 		 */
3724 		int lock_err = tg3_nvram_lock(tp);
3725 		err = tg3_halt_cpu(tp, cpu_base);
3726 		if (!lock_err)
3727 			tg3_nvram_unlock(tp);
3728 		if (err)
3729 			goto out;
3730 
3731 		for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
3732 			write_op(tp, cpu_scratch_base + i, 0);
3733 		tw32(cpu_base + CPU_STATE, 0xffffffff);
3734 		tw32(cpu_base + CPU_MODE,
3735 		     tr32(cpu_base + CPU_MODE) | CPU_MODE_HALT);
3736 	} else {
3737 		/* Subtract additional main header for fragmented firmware and
3738 		 * advance to the first fragment
3739 		 */
3740 		total_len -= TG3_FW_HDR_LEN;
3741 		fw_hdr++;
3742 	}
3743 
3744 	do {
3745 		u32 *fw_data = (u32 *)(fw_hdr + 1);
3746 		for (i = 0; i < tg3_fw_data_len(tp, fw_hdr); i++)
3747 			write_op(tp, cpu_scratch_base +
3748 				     (be32_to_cpu(fw_hdr->base_addr) & 0xffff) +
3749 				     (i * sizeof(u32)),
3750 				 be32_to_cpu(fw_data[i]));
3751 
3752 		total_len -= be32_to_cpu(fw_hdr->len);
3753 
3754 		/* Advance to next fragment */
3755 		fw_hdr = (struct tg3_firmware_hdr *)
3756 			 ((void *)fw_hdr + be32_to_cpu(fw_hdr->len));
3757 	} while (total_len > 0);
3758 
3759 	err = 0;
3760 
3761 out:
3762 	return err;
3763 }
3764 
3765 /* tp->lock is held. */
3766 static int tg3_pause_cpu_and_set_pc(struct tg3 *tp, u32 cpu_base, u32 pc)
3767 {
3768 	int i;
3769 	const int iters = 5;
3770 
3771 	tw32(cpu_base + CPU_STATE, 0xffffffff);
3772 	tw32_f(cpu_base + CPU_PC, pc);
3773 
3774 	for (i = 0; i < iters; i++) {
3775 		if (tr32(cpu_base + CPU_PC) == pc)
3776 			break;
3777 		tw32(cpu_base + CPU_STATE, 0xffffffff);
3778 		tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
3779 		tw32_f(cpu_base + CPU_PC, pc);
3780 		udelay(1000);
3781 	}
3782 
3783 	return (i == iters) ? -EBUSY : 0;
3784 }
3785 
3786 /* tp->lock is held. */
3787 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
3788 {
3789 	const struct tg3_firmware_hdr *fw_hdr;
3790 	int err;
3791 
3792 	fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3793 
3794 	/* Firmware blob starts with version numbers, followed by
3795 	   start address and length. We are setting complete length.
3796 	   length = end_address_of_bss - start_address_of_text.
3797 	   Remainder is the blob to be loaded contiguously
3798 	   from start address. */
3799 
3800 	err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
3801 				    RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
3802 				    fw_hdr);
3803 	if (err)
3804 		return err;
3805 
3806 	err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
3807 				    TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
3808 				    fw_hdr);
3809 	if (err)
3810 		return err;
3811 
3812 	/* Now startup only the RX cpu. */
3813 	err = tg3_pause_cpu_and_set_pc(tp, RX_CPU_BASE,
3814 				       be32_to_cpu(fw_hdr->base_addr));
3815 	if (err) {
3816 		netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x "
3817 			   "should be %08x\n", __func__,
3818 			   tr32(RX_CPU_BASE + CPU_PC),
3819 				be32_to_cpu(fw_hdr->base_addr));
3820 		return -ENODEV;
3821 	}
3822 
3823 	tg3_rxcpu_resume(tp);
3824 
3825 	return 0;
3826 }
3827 
3828 static int tg3_validate_rxcpu_state(struct tg3 *tp)
3829 {
3830 	const int iters = 1000;
3831 	int i;
3832 	u32 val;
3833 
3834 	/* Wait for boot code to complete initialization and enter service
3835 	 * loop. It is then safe to download service patches
3836 	 */
3837 	for (i = 0; i < iters; i++) {
3838 		if (tr32(RX_CPU_HWBKPT) == TG3_SBROM_IN_SERVICE_LOOP)
3839 			break;
3840 
3841 		udelay(10);
3842 	}
3843 
3844 	if (i == iters) {
3845 		netdev_err(tp->dev, "Boot code not ready for service patches\n");
3846 		return -EBUSY;
3847 	}
3848 
3849 	val = tg3_read_indirect_reg32(tp, TG3_57766_FW_HANDSHAKE);
3850 	if (val & 0xff) {
3851 		netdev_warn(tp->dev,
3852 			    "Other patches exist. Not downloading EEE patch\n");
3853 		return -EEXIST;
3854 	}
3855 
3856 	return 0;
3857 }
3858 
3859 /* tp->lock is held. */
3860 static void tg3_load_57766_firmware(struct tg3 *tp)
3861 {
3862 	struct tg3_firmware_hdr *fw_hdr;
3863 
3864 	if (!tg3_flag(tp, NO_NVRAM))
3865 		return;
3866 
3867 	if (tg3_validate_rxcpu_state(tp))
3868 		return;
3869 
3870 	if (!tp->fw)
3871 		return;
3872 
3873 	/* This firmware blob has a different format than older firmware
3874 	 * releases as given below. The main difference is we have fragmented
3875 	 * data to be written to non-contiguous locations.
3876 	 *
3877 	 * In the beginning we have a firmware header identical to other
3878 	 * firmware which consists of version, base addr and length. The length
3879 	 * here is unused and set to 0xffffffff.
3880 	 *
3881 	 * This is followed by a series of firmware fragments which are
3882 	 * individually identical to previous firmware. i.e. they have the
3883 	 * firmware header and followed by data for that fragment. The version
3884 	 * field of the individual fragment header is unused.
3885 	 */
3886 
3887 	fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3888 	if (be32_to_cpu(fw_hdr->base_addr) != TG3_57766_FW_BASE_ADDR)
3889 		return;
3890 
3891 	if (tg3_rxcpu_pause(tp))
3892 		return;
3893 
3894 	/* tg3_load_firmware_cpu() will always succeed for the 57766 */
3895 	tg3_load_firmware_cpu(tp, 0, TG3_57766_FW_BASE_ADDR, 0, fw_hdr);
3896 
3897 	tg3_rxcpu_resume(tp);
3898 }
3899 
3900 /* tp->lock is held. */
3901 static int tg3_load_tso_firmware(struct tg3 *tp)
3902 {
3903 	const struct tg3_firmware_hdr *fw_hdr;
3904 	unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
3905 	int err;
3906 
3907 	if (!tg3_flag(tp, FW_TSO))
3908 		return 0;
3909 
3910 	fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3911 
3912 	/* Firmware blob starts with version numbers, followed by
3913 	   start address and length. We are setting complete length.
3914 	   length = end_address_of_bss - start_address_of_text.
3915 	   Remainder is the blob to be loaded contiguously
3916 	   from start address. */
3917 
3918 	cpu_scratch_size = tp->fw_len;
3919 
3920 	if (tg3_asic_rev(tp) == ASIC_REV_5705) {
3921 		cpu_base = RX_CPU_BASE;
3922 		cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
3923 	} else {
3924 		cpu_base = TX_CPU_BASE;
3925 		cpu_scratch_base = TX_CPU_SCRATCH_BASE;
3926 		cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
3927 	}
3928 
3929 	err = tg3_load_firmware_cpu(tp, cpu_base,
3930 				    cpu_scratch_base, cpu_scratch_size,
3931 				    fw_hdr);
3932 	if (err)
3933 		return err;
3934 
3935 	/* Now startup the cpu. */
3936 	err = tg3_pause_cpu_and_set_pc(tp, cpu_base,
3937 				       be32_to_cpu(fw_hdr->base_addr));
3938 	if (err) {
3939 		netdev_err(tp->dev,
3940 			   "%s fails to set CPU PC, is %08x should be %08x\n",
3941 			   __func__, tr32(cpu_base + CPU_PC),
3942 			   be32_to_cpu(fw_hdr->base_addr));
3943 		return -ENODEV;
3944 	}
3945 
3946 	tg3_resume_cpu(tp, cpu_base);
3947 	return 0;
3948 }
3949 
3950 /* tp->lock is held. */
3951 static void __tg3_set_one_mac_addr(struct tg3 *tp, u8 *mac_addr, int index)
3952 {
3953 	u32 addr_high, addr_low;
3954 
3955 	addr_high = ((mac_addr[0] << 8) | mac_addr[1]);
3956 	addr_low = ((mac_addr[2] << 24) | (mac_addr[3] << 16) |
3957 		    (mac_addr[4] <<  8) | mac_addr[5]);
3958 
3959 	if (index < 4) {
3960 		tw32(MAC_ADDR_0_HIGH + (index * 8), addr_high);
3961 		tw32(MAC_ADDR_0_LOW + (index * 8), addr_low);
3962 	} else {
3963 		index -= 4;
3964 		tw32(MAC_EXTADDR_0_HIGH + (index * 8), addr_high);
3965 		tw32(MAC_EXTADDR_0_LOW + (index * 8), addr_low);
3966 	}
3967 }
3968 
3969 /* tp->lock is held. */
3970 static void __tg3_set_mac_addr(struct tg3 *tp, bool skip_mac_1)
3971 {
3972 	u32 addr_high;
3973 	int i;
3974 
3975 	for (i = 0; i < 4; i++) {
3976 		if (i == 1 && skip_mac_1)
3977 			continue;
3978 		__tg3_set_one_mac_addr(tp, tp->dev->dev_addr, i);
3979 	}
3980 
3981 	if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
3982 	    tg3_asic_rev(tp) == ASIC_REV_5704) {
3983 		for (i = 4; i < 16; i++)
3984 			__tg3_set_one_mac_addr(tp, tp->dev->dev_addr, i);
3985 	}
3986 
3987 	addr_high = (tp->dev->dev_addr[0] +
3988 		     tp->dev->dev_addr[1] +
3989 		     tp->dev->dev_addr[2] +
3990 		     tp->dev->dev_addr[3] +
3991 		     tp->dev->dev_addr[4] +
3992 		     tp->dev->dev_addr[5]) &
3993 		TX_BACKOFF_SEED_MASK;
3994 	tw32(MAC_TX_BACKOFF_SEED, addr_high);
3995 }
3996 
3997 static void tg3_enable_register_access(struct tg3 *tp)
3998 {
3999 	/*
4000 	 * Make sure register accesses (indirect or otherwise) will function
4001 	 * correctly.
4002 	 */
4003 	pci_write_config_dword(tp->pdev,
4004 			       TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl);
4005 }
4006 
4007 static int tg3_power_up(struct tg3 *tp)
4008 {
4009 	int err;
4010 
4011 	tg3_enable_register_access(tp);
4012 
4013 	err = pci_set_power_state(tp->pdev, PCI_D0);
4014 	if (!err) {
4015 		/* Switch out of Vaux if it is a NIC */
4016 		tg3_pwrsrc_switch_to_vmain(tp);
4017 	} else {
4018 		netdev_err(tp->dev, "Transition to D0 failed\n");
4019 	}
4020 
4021 	return err;
4022 }
4023 
4024 static int tg3_setup_phy(struct tg3 *, bool);
4025 
4026 static int tg3_power_down_prepare(struct tg3 *tp)
4027 {
4028 	u32 misc_host_ctrl;
4029 	bool device_should_wake, do_low_power;
4030 
4031 	tg3_enable_register_access(tp);
4032 
4033 	/* Restore the CLKREQ setting. */
4034 	if (tg3_flag(tp, CLKREQ_BUG))
4035 		pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
4036 					 PCI_EXP_LNKCTL_CLKREQ_EN);
4037 
4038 	misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
4039 	tw32(TG3PCI_MISC_HOST_CTRL,
4040 	     misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
4041 
4042 	device_should_wake = device_may_wakeup(&tp->pdev->dev) &&
4043 			     tg3_flag(tp, WOL_ENABLE);
4044 
4045 	if (tg3_flag(tp, USE_PHYLIB)) {
4046 		do_low_power = false;
4047 		if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) &&
4048 		    !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4049 			struct phy_device *phydev;
4050 			u32 phyid, advertising;
4051 
4052 			phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
4053 
4054 			tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
4055 
4056 			tp->link_config.speed = phydev->speed;
4057 			tp->link_config.duplex = phydev->duplex;
4058 			tp->link_config.autoneg = phydev->autoneg;
4059 			tp->link_config.advertising = phydev->advertising;
4060 
4061 			advertising = ADVERTISED_TP |
4062 				      ADVERTISED_Pause |
4063 				      ADVERTISED_Autoneg |
4064 				      ADVERTISED_10baseT_Half;
4065 
4066 			if (tg3_flag(tp, ENABLE_ASF) || device_should_wake) {
4067 				if (tg3_flag(tp, WOL_SPEED_100MB))
4068 					advertising |=
4069 						ADVERTISED_100baseT_Half |
4070 						ADVERTISED_100baseT_Full |
4071 						ADVERTISED_10baseT_Full;
4072 				else
4073 					advertising |= ADVERTISED_10baseT_Full;
4074 			}
4075 
4076 			phydev->advertising = advertising;
4077 
4078 			phy_start_aneg(phydev);
4079 
4080 			phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
4081 			if (phyid != PHY_ID_BCMAC131) {
4082 				phyid &= PHY_BCM_OUI_MASK;
4083 				if (phyid == PHY_BCM_OUI_1 ||
4084 				    phyid == PHY_BCM_OUI_2 ||
4085 				    phyid == PHY_BCM_OUI_3)
4086 					do_low_power = true;
4087 			}
4088 		}
4089 	} else {
4090 		do_low_power = true;
4091 
4092 		if (!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER))
4093 			tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
4094 
4095 		if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
4096 			tg3_setup_phy(tp, false);
4097 	}
4098 
4099 	if (tg3_asic_rev(tp) == ASIC_REV_5906) {
4100 		u32 val;
4101 
4102 		val = tr32(GRC_VCPU_EXT_CTRL);
4103 		tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
4104 	} else if (!tg3_flag(tp, ENABLE_ASF)) {
4105 		int i;
4106 		u32 val;
4107 
4108 		for (i = 0; i < 200; i++) {
4109 			tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
4110 			if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
4111 				break;
4112 			msleep(1);
4113 		}
4114 	}
4115 	if (tg3_flag(tp, WOL_CAP))
4116 		tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
4117 						     WOL_DRV_STATE_SHUTDOWN |
4118 						     WOL_DRV_WOL |
4119 						     WOL_SET_MAGIC_PKT);
4120 
4121 	if (device_should_wake) {
4122 		u32 mac_mode;
4123 
4124 		if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
4125 			if (do_low_power &&
4126 			    !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
4127 				tg3_phy_auxctl_write(tp,
4128 					       MII_TG3_AUXCTL_SHDWSEL_PWRCTL,
4129 					       MII_TG3_AUXCTL_PCTL_WOL_EN |
4130 					       MII_TG3_AUXCTL_PCTL_100TX_LPWR |
4131 					       MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC);
4132 				udelay(40);
4133 			}
4134 
4135 			if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4136 				mac_mode = MAC_MODE_PORT_MODE_GMII;
4137 			else if (tp->phy_flags &
4138 				 TG3_PHYFLG_KEEP_LINK_ON_PWRDN) {
4139 				if (tp->link_config.active_speed == SPEED_1000)
4140 					mac_mode = MAC_MODE_PORT_MODE_GMII;
4141 				else
4142 					mac_mode = MAC_MODE_PORT_MODE_MII;
4143 			} else
4144 				mac_mode = MAC_MODE_PORT_MODE_MII;
4145 
4146 			mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
4147 			if (tg3_asic_rev(tp) == ASIC_REV_5700) {
4148 				u32 speed = tg3_flag(tp, WOL_SPEED_100MB) ?
4149 					     SPEED_100 : SPEED_10;
4150 				if (tg3_5700_link_polarity(tp, speed))
4151 					mac_mode |= MAC_MODE_LINK_POLARITY;
4152 				else
4153 					mac_mode &= ~MAC_MODE_LINK_POLARITY;
4154 			}
4155 		} else {
4156 			mac_mode = MAC_MODE_PORT_MODE_TBI;
4157 		}
4158 
4159 		if (!tg3_flag(tp, 5750_PLUS))
4160 			tw32(MAC_LED_CTRL, tp->led_ctrl);
4161 
4162 		mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
4163 		if ((tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) &&
4164 		    (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)))
4165 			mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
4166 
4167 		if (tg3_flag(tp, ENABLE_APE))
4168 			mac_mode |= MAC_MODE_APE_TX_EN |
4169 				    MAC_MODE_APE_RX_EN |
4170 				    MAC_MODE_TDE_ENABLE;
4171 
4172 		tw32_f(MAC_MODE, mac_mode);
4173 		udelay(100);
4174 
4175 		tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
4176 		udelay(10);
4177 	}
4178 
4179 	if (!tg3_flag(tp, WOL_SPEED_100MB) &&
4180 	    (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4181 	     tg3_asic_rev(tp) == ASIC_REV_5701)) {
4182 		u32 base_val;
4183 
4184 		base_val = tp->pci_clock_ctrl;
4185 		base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
4186 			     CLOCK_CTRL_TXCLK_DISABLE);
4187 
4188 		tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
4189 			    CLOCK_CTRL_PWRDOWN_PLL133, 40);
4190 	} else if (tg3_flag(tp, 5780_CLASS) ||
4191 		   tg3_flag(tp, CPMU_PRESENT) ||
4192 		   tg3_asic_rev(tp) == ASIC_REV_5906) {
4193 		/* do nothing */
4194 	} else if (!(tg3_flag(tp, 5750_PLUS) && tg3_flag(tp, ENABLE_ASF))) {
4195 		u32 newbits1, newbits2;
4196 
4197 		if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4198 		    tg3_asic_rev(tp) == ASIC_REV_5701) {
4199 			newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
4200 				    CLOCK_CTRL_TXCLK_DISABLE |
4201 				    CLOCK_CTRL_ALTCLK);
4202 			newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
4203 		} else if (tg3_flag(tp, 5705_PLUS)) {
4204 			newbits1 = CLOCK_CTRL_625_CORE;
4205 			newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
4206 		} else {
4207 			newbits1 = CLOCK_CTRL_ALTCLK;
4208 			newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
4209 		}
4210 
4211 		tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
4212 			    40);
4213 
4214 		tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
4215 			    40);
4216 
4217 		if (!tg3_flag(tp, 5705_PLUS)) {
4218 			u32 newbits3;
4219 
4220 			if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4221 			    tg3_asic_rev(tp) == ASIC_REV_5701) {
4222 				newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
4223 					    CLOCK_CTRL_TXCLK_DISABLE |
4224 					    CLOCK_CTRL_44MHZ_CORE);
4225 			} else {
4226 				newbits3 = CLOCK_CTRL_44MHZ_CORE;
4227 			}
4228 
4229 			tw32_wait_f(TG3PCI_CLOCK_CTRL,
4230 				    tp->pci_clock_ctrl | newbits3, 40);
4231 		}
4232 	}
4233 
4234 	if (!(device_should_wake) && !tg3_flag(tp, ENABLE_ASF))
4235 		tg3_power_down_phy(tp, do_low_power);
4236 
4237 	tg3_frob_aux_power(tp, true);
4238 
4239 	/* Workaround for unstable PLL clock */
4240 	if ((!tg3_flag(tp, IS_SSB_CORE)) &&
4241 	    ((tg3_chip_rev(tp) == CHIPREV_5750_AX) ||
4242 	     (tg3_chip_rev(tp) == CHIPREV_5750_BX))) {
4243 		u32 val = tr32(0x7d00);
4244 
4245 		val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
4246 		tw32(0x7d00, val);
4247 		if (!tg3_flag(tp, ENABLE_ASF)) {
4248 			int err;
4249 
4250 			err = tg3_nvram_lock(tp);
4251 			tg3_halt_cpu(tp, RX_CPU_BASE);
4252 			if (!err)
4253 				tg3_nvram_unlock(tp);
4254 		}
4255 	}
4256 
4257 	tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
4258 
4259 	tg3_ape_driver_state_change(tp, RESET_KIND_SHUTDOWN);
4260 
4261 	return 0;
4262 }
4263 
4264 static void tg3_power_down(struct tg3 *tp)
4265 {
4266 	pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE));
4267 	pci_set_power_state(tp->pdev, PCI_D3hot);
4268 }
4269 
4270 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
4271 {
4272 	switch (val & MII_TG3_AUX_STAT_SPDMASK) {
4273 	case MII_TG3_AUX_STAT_10HALF:
4274 		*speed = SPEED_10;
4275 		*duplex = DUPLEX_HALF;
4276 		break;
4277 
4278 	case MII_TG3_AUX_STAT_10FULL:
4279 		*speed = SPEED_10;
4280 		*duplex = DUPLEX_FULL;
4281 		break;
4282 
4283 	case MII_TG3_AUX_STAT_100HALF:
4284 		*speed = SPEED_100;
4285 		*duplex = DUPLEX_HALF;
4286 		break;
4287 
4288 	case MII_TG3_AUX_STAT_100FULL:
4289 		*speed = SPEED_100;
4290 		*duplex = DUPLEX_FULL;
4291 		break;
4292 
4293 	case MII_TG3_AUX_STAT_1000HALF:
4294 		*speed = SPEED_1000;
4295 		*duplex = DUPLEX_HALF;
4296 		break;
4297 
4298 	case MII_TG3_AUX_STAT_1000FULL:
4299 		*speed = SPEED_1000;
4300 		*duplex = DUPLEX_FULL;
4301 		break;
4302 
4303 	default:
4304 		if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4305 			*speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
4306 				 SPEED_10;
4307 			*duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
4308 				  DUPLEX_HALF;
4309 			break;
4310 		}
4311 		*speed = SPEED_UNKNOWN;
4312 		*duplex = DUPLEX_UNKNOWN;
4313 		break;
4314 	}
4315 }
4316 
4317 static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
4318 {
4319 	int err = 0;
4320 	u32 val, new_adv;
4321 
4322 	new_adv = ADVERTISE_CSMA;
4323 	new_adv |= ethtool_adv_to_mii_adv_t(advertise) & ADVERTISE_ALL;
4324 	new_adv |= mii_advertise_flowctrl(flowctrl);
4325 
4326 	err = tg3_writephy(tp, MII_ADVERTISE, new_adv);
4327 	if (err)
4328 		goto done;
4329 
4330 	if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4331 		new_adv = ethtool_adv_to_mii_ctrl1000_t(advertise);
4332 
4333 		if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4334 		    tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0)
4335 			new_adv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4336 
4337 		err = tg3_writephy(tp, MII_CTRL1000, new_adv);
4338 		if (err)
4339 			goto done;
4340 	}
4341 
4342 	if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
4343 		goto done;
4344 
4345 	tw32(TG3_CPMU_EEE_MODE,
4346 	     tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE);
4347 
4348 	err = tg3_phy_toggle_auxctl_smdsp(tp, true);
4349 	if (!err) {
4350 		u32 err2;
4351 
4352 		val = 0;
4353 		/* Advertise 100-BaseTX EEE ability */
4354 		if (advertise & ADVERTISED_100baseT_Full)
4355 			val |= MDIO_AN_EEE_ADV_100TX;
4356 		/* Advertise 1000-BaseT EEE ability */
4357 		if (advertise & ADVERTISED_1000baseT_Full)
4358 			val |= MDIO_AN_EEE_ADV_1000T;
4359 
4360 		if (!tp->eee.eee_enabled) {
4361 			val = 0;
4362 			tp->eee.advertised = 0;
4363 		} else {
4364 			tp->eee.advertised = advertise &
4365 					     (ADVERTISED_100baseT_Full |
4366 					      ADVERTISED_1000baseT_Full);
4367 		}
4368 
4369 		err = tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val);
4370 		if (err)
4371 			val = 0;
4372 
4373 		switch (tg3_asic_rev(tp)) {
4374 		case ASIC_REV_5717:
4375 		case ASIC_REV_57765:
4376 		case ASIC_REV_57766:
4377 		case ASIC_REV_5719:
4378 			/* If we advertised any eee advertisements above... */
4379 			if (val)
4380 				val = MII_TG3_DSP_TAP26_ALNOKO |
4381 				      MII_TG3_DSP_TAP26_RMRXSTO |
4382 				      MII_TG3_DSP_TAP26_OPCSINPT;
4383 			tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
4384 			/* Fall through */
4385 		case ASIC_REV_5720:
4386 		case ASIC_REV_5762:
4387 			if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val))
4388 				tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val |
4389 						 MII_TG3_DSP_CH34TP2_HIBW01);
4390 		}
4391 
4392 		err2 = tg3_phy_toggle_auxctl_smdsp(tp, false);
4393 		if (!err)
4394 			err = err2;
4395 	}
4396 
4397 done:
4398 	return err;
4399 }
4400 
4401 static void tg3_phy_copper_begin(struct tg3 *tp)
4402 {
4403 	if (tp->link_config.autoneg == AUTONEG_ENABLE ||
4404 	    (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4405 		u32 adv, fc;
4406 
4407 		if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
4408 		    !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)) {
4409 			adv = ADVERTISED_10baseT_Half |
4410 			      ADVERTISED_10baseT_Full;
4411 			if (tg3_flag(tp, WOL_SPEED_100MB))
4412 				adv |= ADVERTISED_100baseT_Half |
4413 				       ADVERTISED_100baseT_Full;
4414 			if (tp->phy_flags & TG3_PHYFLG_1G_ON_VAUX_OK) {
4415 				if (!(tp->phy_flags &
4416 				      TG3_PHYFLG_DISABLE_1G_HD_ADV))
4417 					adv |= ADVERTISED_1000baseT_Half;
4418 				adv |= ADVERTISED_1000baseT_Full;
4419 			}
4420 
4421 			fc = FLOW_CTRL_TX | FLOW_CTRL_RX;
4422 		} else {
4423 			adv = tp->link_config.advertising;
4424 			if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
4425 				adv &= ~(ADVERTISED_1000baseT_Half |
4426 					 ADVERTISED_1000baseT_Full);
4427 
4428 			fc = tp->link_config.flowctrl;
4429 		}
4430 
4431 		tg3_phy_autoneg_cfg(tp, adv, fc);
4432 
4433 		if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
4434 		    (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)) {
4435 			/* Normally during power down we want to autonegotiate
4436 			 * the lowest possible speed for WOL. However, to avoid
4437 			 * link flap, we leave it untouched.
4438 			 */
4439 			return;
4440 		}
4441 
4442 		tg3_writephy(tp, MII_BMCR,
4443 			     BMCR_ANENABLE | BMCR_ANRESTART);
4444 	} else {
4445 		int i;
4446 		u32 bmcr, orig_bmcr;
4447 
4448 		tp->link_config.active_speed = tp->link_config.speed;
4449 		tp->link_config.active_duplex = tp->link_config.duplex;
4450 
4451 		if (tg3_asic_rev(tp) == ASIC_REV_5714) {
4452 			/* With autoneg disabled, 5715 only links up when the
4453 			 * advertisement register has the configured speed
4454 			 * enabled.
4455 			 */
4456 			tg3_writephy(tp, MII_ADVERTISE, ADVERTISE_ALL);
4457 		}
4458 
4459 		bmcr = 0;
4460 		switch (tp->link_config.speed) {
4461 		default:
4462 		case SPEED_10:
4463 			break;
4464 
4465 		case SPEED_100:
4466 			bmcr |= BMCR_SPEED100;
4467 			break;
4468 
4469 		case SPEED_1000:
4470 			bmcr |= BMCR_SPEED1000;
4471 			break;
4472 		}
4473 
4474 		if (tp->link_config.duplex == DUPLEX_FULL)
4475 			bmcr |= BMCR_FULLDPLX;
4476 
4477 		if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
4478 		    (bmcr != orig_bmcr)) {
4479 			tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
4480 			for (i = 0; i < 1500; i++) {
4481 				u32 tmp;
4482 
4483 				udelay(10);
4484 				if (tg3_readphy(tp, MII_BMSR, &tmp) ||
4485 				    tg3_readphy(tp, MII_BMSR, &tmp))
4486 					continue;
4487 				if (!(tmp & BMSR_LSTATUS)) {
4488 					udelay(40);
4489 					break;
4490 				}
4491 			}
4492 			tg3_writephy(tp, MII_BMCR, bmcr);
4493 			udelay(40);
4494 		}
4495 	}
4496 }
4497 
4498 static int tg3_phy_pull_config(struct tg3 *tp)
4499 {
4500 	int err;
4501 	u32 val;
4502 
4503 	err = tg3_readphy(tp, MII_BMCR, &val);
4504 	if (err)
4505 		goto done;
4506 
4507 	if (!(val & BMCR_ANENABLE)) {
4508 		tp->link_config.autoneg = AUTONEG_DISABLE;
4509 		tp->link_config.advertising = 0;
4510 		tg3_flag_clear(tp, PAUSE_AUTONEG);
4511 
4512 		err = -EIO;
4513 
4514 		switch (val & (BMCR_SPEED1000 | BMCR_SPEED100)) {
4515 		case 0:
4516 			if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
4517 				goto done;
4518 
4519 			tp->link_config.speed = SPEED_10;
4520 			break;
4521 		case BMCR_SPEED100:
4522 			if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
4523 				goto done;
4524 
4525 			tp->link_config.speed = SPEED_100;
4526 			break;
4527 		case BMCR_SPEED1000:
4528 			if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4529 				tp->link_config.speed = SPEED_1000;
4530 				break;
4531 			}
4532 			/* Fall through */
4533 		default:
4534 			goto done;
4535 		}
4536 
4537 		if (val & BMCR_FULLDPLX)
4538 			tp->link_config.duplex = DUPLEX_FULL;
4539 		else
4540 			tp->link_config.duplex = DUPLEX_HALF;
4541 
4542 		tp->link_config.flowctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
4543 
4544 		err = 0;
4545 		goto done;
4546 	}
4547 
4548 	tp->link_config.autoneg = AUTONEG_ENABLE;
4549 	tp->link_config.advertising = ADVERTISED_Autoneg;
4550 	tg3_flag_set(tp, PAUSE_AUTONEG);
4551 
4552 	if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
4553 		u32 adv;
4554 
4555 		err = tg3_readphy(tp, MII_ADVERTISE, &val);
4556 		if (err)
4557 			goto done;
4558 
4559 		adv = mii_adv_to_ethtool_adv_t(val & ADVERTISE_ALL);
4560 		tp->link_config.advertising |= adv | ADVERTISED_TP;
4561 
4562 		tp->link_config.flowctrl = tg3_decode_flowctrl_1000T(val);
4563 	} else {
4564 		tp->link_config.advertising |= ADVERTISED_FIBRE;
4565 	}
4566 
4567 	if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4568 		u32 adv;
4569 
4570 		if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
4571 			err = tg3_readphy(tp, MII_CTRL1000, &val);
4572 			if (err)
4573 				goto done;
4574 
4575 			adv = mii_ctrl1000_to_ethtool_adv_t(val);
4576 		} else {
4577 			err = tg3_readphy(tp, MII_ADVERTISE, &val);
4578 			if (err)
4579 				goto done;
4580 
4581 			adv = tg3_decode_flowctrl_1000X(val);
4582 			tp->link_config.flowctrl = adv;
4583 
4584 			val &= (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL);
4585 			adv = mii_adv_to_ethtool_adv_x(val);
4586 		}
4587 
4588 		tp->link_config.advertising |= adv;
4589 	}
4590 
4591 done:
4592 	return err;
4593 }
4594 
4595 static int tg3_init_5401phy_dsp(struct tg3 *tp)
4596 {
4597 	int err;
4598 
4599 	/* Turn off tap power management. */
4600 	/* Set Extended packet length bit */
4601 	err = tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
4602 
4603 	err |= tg3_phydsp_write(tp, 0x0012, 0x1804);
4604 	err |= tg3_phydsp_write(tp, 0x0013, 0x1204);
4605 	err |= tg3_phydsp_write(tp, 0x8006, 0x0132);
4606 	err |= tg3_phydsp_write(tp, 0x8006, 0x0232);
4607 	err |= tg3_phydsp_write(tp, 0x201f, 0x0a20);
4608 
4609 	udelay(40);
4610 
4611 	return err;
4612 }
4613 
4614 static bool tg3_phy_eee_config_ok(struct tg3 *tp)
4615 {
4616 	struct ethtool_eee eee;
4617 
4618 	if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
4619 		return true;
4620 
4621 	tg3_eee_pull_config(tp, &eee);
4622 
4623 	if (tp->eee.eee_enabled) {
4624 		if (tp->eee.advertised != eee.advertised ||
4625 		    tp->eee.tx_lpi_timer != eee.tx_lpi_timer ||
4626 		    tp->eee.tx_lpi_enabled != eee.tx_lpi_enabled)
4627 			return false;
4628 	} else {
4629 		/* EEE is disabled but we're advertising */
4630 		if (eee.advertised)
4631 			return false;
4632 	}
4633 
4634 	return true;
4635 }
4636 
4637 static bool tg3_phy_copper_an_config_ok(struct tg3 *tp, u32 *lcladv)
4638 {
4639 	u32 advmsk, tgtadv, advertising;
4640 
4641 	advertising = tp->link_config.advertising;
4642 	tgtadv = ethtool_adv_to_mii_adv_t(advertising) & ADVERTISE_ALL;
4643 
4644 	advmsk = ADVERTISE_ALL;
4645 	if (tp->link_config.active_duplex == DUPLEX_FULL) {
4646 		tgtadv |= mii_advertise_flowctrl(tp->link_config.flowctrl);
4647 		advmsk |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
4648 	}
4649 
4650 	if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
4651 		return false;
4652 
4653 	if ((*lcladv & advmsk) != tgtadv)
4654 		return false;
4655 
4656 	if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4657 		u32 tg3_ctrl;
4658 
4659 		tgtadv = ethtool_adv_to_mii_ctrl1000_t(advertising);
4660 
4661 		if (tg3_readphy(tp, MII_CTRL1000, &tg3_ctrl))
4662 			return false;
4663 
4664 		if (tgtadv &&
4665 		    (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4666 		     tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0)) {
4667 			tgtadv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4668 			tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL |
4669 				     CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
4670 		} else {
4671 			tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL);
4672 		}
4673 
4674 		if (tg3_ctrl != tgtadv)
4675 			return false;
4676 	}
4677 
4678 	return true;
4679 }
4680 
4681 static bool tg3_phy_copper_fetch_rmtadv(struct tg3 *tp, u32 *rmtadv)
4682 {
4683 	u32 lpeth = 0;
4684 
4685 	if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4686 		u32 val;
4687 
4688 		if (tg3_readphy(tp, MII_STAT1000, &val))
4689 			return false;
4690 
4691 		lpeth = mii_stat1000_to_ethtool_lpa_t(val);
4692 	}
4693 
4694 	if (tg3_readphy(tp, MII_LPA, rmtadv))
4695 		return false;
4696 
4697 	lpeth |= mii_lpa_to_ethtool_lpa_t(*rmtadv);
4698 	tp->link_config.rmt_adv = lpeth;
4699 
4700 	return true;
4701 }
4702 
4703 static bool tg3_test_and_report_link_chg(struct tg3 *tp, bool curr_link_up)
4704 {
4705 	if (curr_link_up != tp->link_up) {
4706 		if (curr_link_up) {
4707 			netif_carrier_on(tp->dev);
4708 		} else {
4709 			netif_carrier_off(tp->dev);
4710 			if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4711 				tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4712 		}
4713 
4714 		tg3_link_report(tp);
4715 		return true;
4716 	}
4717 
4718 	return false;
4719 }
4720 
4721 static void tg3_clear_mac_status(struct tg3 *tp)
4722 {
4723 	tw32(MAC_EVENT, 0);
4724 
4725 	tw32_f(MAC_STATUS,
4726 	       MAC_STATUS_SYNC_CHANGED |
4727 	       MAC_STATUS_CFG_CHANGED |
4728 	       MAC_STATUS_MI_COMPLETION |
4729 	       MAC_STATUS_LNKSTATE_CHANGED);
4730 	udelay(40);
4731 }
4732 
4733 static void tg3_setup_eee(struct tg3 *tp)
4734 {
4735 	u32 val;
4736 
4737 	val = TG3_CPMU_EEE_LNKIDL_PCIE_NL0 |
4738 	      TG3_CPMU_EEE_LNKIDL_UART_IDL;
4739 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0)
4740 		val |= TG3_CPMU_EEE_LNKIDL_APE_TX_MT;
4741 
4742 	tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL, val);
4743 
4744 	tw32_f(TG3_CPMU_EEE_CTRL,
4745 	       TG3_CPMU_EEE_CTRL_EXIT_20_1_US);
4746 
4747 	val = TG3_CPMU_EEEMD_ERLY_L1_XIT_DET |
4748 	      (tp->eee.tx_lpi_enabled ? TG3_CPMU_EEEMD_LPI_IN_TX : 0) |
4749 	      TG3_CPMU_EEEMD_LPI_IN_RX |
4750 	      TG3_CPMU_EEEMD_EEE_ENABLE;
4751 
4752 	if (tg3_asic_rev(tp) != ASIC_REV_5717)
4753 		val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN;
4754 
4755 	if (tg3_flag(tp, ENABLE_APE))
4756 		val |= TG3_CPMU_EEEMD_APE_TX_DET_EN;
4757 
4758 	tw32_f(TG3_CPMU_EEE_MODE, tp->eee.eee_enabled ? val : 0);
4759 
4760 	tw32_f(TG3_CPMU_EEE_DBTMR1,
4761 	       TG3_CPMU_DBTMR1_PCIEXIT_2047US |
4762 	       (tp->eee.tx_lpi_timer & 0xffff));
4763 
4764 	tw32_f(TG3_CPMU_EEE_DBTMR2,
4765 	       TG3_CPMU_DBTMR2_APE_TX_2047US |
4766 	       TG3_CPMU_DBTMR2_TXIDXEQ_2047US);
4767 }
4768 
4769 static int tg3_setup_copper_phy(struct tg3 *tp, bool force_reset)
4770 {
4771 	bool current_link_up;
4772 	u32 bmsr, val;
4773 	u32 lcl_adv, rmt_adv;
4774 	u16 current_speed;
4775 	u8 current_duplex;
4776 	int i, err;
4777 
4778 	tg3_clear_mac_status(tp);
4779 
4780 	if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
4781 		tw32_f(MAC_MI_MODE,
4782 		     (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
4783 		udelay(80);
4784 	}
4785 
4786 	tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, 0);
4787 
4788 	/* Some third-party PHYs need to be reset on link going
4789 	 * down.
4790 	 */
4791 	if ((tg3_asic_rev(tp) == ASIC_REV_5703 ||
4792 	     tg3_asic_rev(tp) == ASIC_REV_5704 ||
4793 	     tg3_asic_rev(tp) == ASIC_REV_5705) &&
4794 	    tp->link_up) {
4795 		tg3_readphy(tp, MII_BMSR, &bmsr);
4796 		if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4797 		    !(bmsr & BMSR_LSTATUS))
4798 			force_reset = true;
4799 	}
4800 	if (force_reset)
4801 		tg3_phy_reset(tp);
4802 
4803 	if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
4804 		tg3_readphy(tp, MII_BMSR, &bmsr);
4805 		if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
4806 		    !tg3_flag(tp, INIT_COMPLETE))
4807 			bmsr = 0;
4808 
4809 		if (!(bmsr & BMSR_LSTATUS)) {
4810 			err = tg3_init_5401phy_dsp(tp);
4811 			if (err)
4812 				return err;
4813 
4814 			tg3_readphy(tp, MII_BMSR, &bmsr);
4815 			for (i = 0; i < 1000; i++) {
4816 				udelay(10);
4817 				if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4818 				    (bmsr & BMSR_LSTATUS)) {
4819 					udelay(40);
4820 					break;
4821 				}
4822 			}
4823 
4824 			if ((tp->phy_id & TG3_PHY_ID_REV_MASK) ==
4825 			    TG3_PHY_REV_BCM5401_B0 &&
4826 			    !(bmsr & BMSR_LSTATUS) &&
4827 			    tp->link_config.active_speed == SPEED_1000) {
4828 				err = tg3_phy_reset(tp);
4829 				if (!err)
4830 					err = tg3_init_5401phy_dsp(tp);
4831 				if (err)
4832 					return err;
4833 			}
4834 		}
4835 	} else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4836 		   tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0) {
4837 		/* 5701 {A0,B0} CRC bug workaround */
4838 		tg3_writephy(tp, 0x15, 0x0a75);
4839 		tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4840 		tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
4841 		tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4842 	}
4843 
4844 	/* Clear pending interrupts... */
4845 	tg3_readphy(tp, MII_TG3_ISTAT, &val);
4846 	tg3_readphy(tp, MII_TG3_ISTAT, &val);
4847 
4848 	if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT)
4849 		tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
4850 	else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET))
4851 		tg3_writephy(tp, MII_TG3_IMASK, ~0);
4852 
4853 	if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4854 	    tg3_asic_rev(tp) == ASIC_REV_5701) {
4855 		if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
4856 			tg3_writephy(tp, MII_TG3_EXT_CTRL,
4857 				     MII_TG3_EXT_CTRL_LNK3_LED_MODE);
4858 		else
4859 			tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
4860 	}
4861 
4862 	current_link_up = false;
4863 	current_speed = SPEED_UNKNOWN;
4864 	current_duplex = DUPLEX_UNKNOWN;
4865 	tp->phy_flags &= ~TG3_PHYFLG_MDIX_STATE;
4866 	tp->link_config.rmt_adv = 0;
4867 
4868 	if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) {
4869 		err = tg3_phy_auxctl_read(tp,
4870 					  MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4871 					  &val);
4872 		if (!err && !(val & (1 << 10))) {
4873 			tg3_phy_auxctl_write(tp,
4874 					     MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4875 					     val | (1 << 10));
4876 			goto relink;
4877 		}
4878 	}
4879 
4880 	bmsr = 0;
4881 	for (i = 0; i < 100; i++) {
4882 		tg3_readphy(tp, MII_BMSR, &bmsr);
4883 		if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4884 		    (bmsr & BMSR_LSTATUS))
4885 			break;
4886 		udelay(40);
4887 	}
4888 
4889 	if (bmsr & BMSR_LSTATUS) {
4890 		u32 aux_stat, bmcr;
4891 
4892 		tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
4893 		for (i = 0; i < 2000; i++) {
4894 			udelay(10);
4895 			if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
4896 			    aux_stat)
4897 				break;
4898 		}
4899 
4900 		tg3_aux_stat_to_speed_duplex(tp, aux_stat,
4901 					     &current_speed,
4902 					     &current_duplex);
4903 
4904 		bmcr = 0;
4905 		for (i = 0; i < 200; i++) {
4906 			tg3_readphy(tp, MII_BMCR, &bmcr);
4907 			if (tg3_readphy(tp, MII_BMCR, &bmcr))
4908 				continue;
4909 			if (bmcr && bmcr != 0x7fff)
4910 				break;
4911 			udelay(10);
4912 		}
4913 
4914 		lcl_adv = 0;
4915 		rmt_adv = 0;
4916 
4917 		tp->link_config.active_speed = current_speed;
4918 		tp->link_config.active_duplex = current_duplex;
4919 
4920 		if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4921 			bool eee_config_ok = tg3_phy_eee_config_ok(tp);
4922 
4923 			if ((bmcr & BMCR_ANENABLE) &&
4924 			    eee_config_ok &&
4925 			    tg3_phy_copper_an_config_ok(tp, &lcl_adv) &&
4926 			    tg3_phy_copper_fetch_rmtadv(tp, &rmt_adv))
4927 				current_link_up = true;
4928 
4929 			/* EEE settings changes take effect only after a phy
4930 			 * reset.  If we have skipped a reset due to Link Flap
4931 			 * Avoidance being enabled, do it now.
4932 			 */
4933 			if (!eee_config_ok &&
4934 			    (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
4935 			    !force_reset) {
4936 				tg3_setup_eee(tp);
4937 				tg3_phy_reset(tp);
4938 			}
4939 		} else {
4940 			if (!(bmcr & BMCR_ANENABLE) &&
4941 			    tp->link_config.speed == current_speed &&
4942 			    tp->link_config.duplex == current_duplex) {
4943 				current_link_up = true;
4944 			}
4945 		}
4946 
4947 		if (current_link_up &&
4948 		    tp->link_config.active_duplex == DUPLEX_FULL) {
4949 			u32 reg, bit;
4950 
4951 			if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4952 				reg = MII_TG3_FET_GEN_STAT;
4953 				bit = MII_TG3_FET_GEN_STAT_MDIXSTAT;
4954 			} else {
4955 				reg = MII_TG3_EXT_STAT;
4956 				bit = MII_TG3_EXT_STAT_MDIX;
4957 			}
4958 
4959 			if (!tg3_readphy(tp, reg, &val) && (val & bit))
4960 				tp->phy_flags |= TG3_PHYFLG_MDIX_STATE;
4961 
4962 			tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
4963 		}
4964 	}
4965 
4966 relink:
4967 	if (!current_link_up || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4968 		tg3_phy_copper_begin(tp);
4969 
4970 		if (tg3_flag(tp, ROBOSWITCH)) {
4971 			current_link_up = true;
4972 			/* FIXME: when BCM5325 switch is used use 100 MBit/s */
4973 			current_speed = SPEED_1000;
4974 			current_duplex = DUPLEX_FULL;
4975 			tp->link_config.active_speed = current_speed;
4976 			tp->link_config.active_duplex = current_duplex;
4977 		}
4978 
4979 		tg3_readphy(tp, MII_BMSR, &bmsr);
4980 		if ((!tg3_readphy(tp, MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) ||
4981 		    (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
4982 			current_link_up = true;
4983 	}
4984 
4985 	tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
4986 	if (current_link_up) {
4987 		if (tp->link_config.active_speed == SPEED_100 ||
4988 		    tp->link_config.active_speed == SPEED_10)
4989 			tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4990 		else
4991 			tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4992 	} else if (tp->phy_flags & TG3_PHYFLG_IS_FET)
4993 		tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4994 	else
4995 		tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4996 
4997 	/* In order for the 5750 core in BCM4785 chip to work properly
4998 	 * in RGMII mode, the Led Control Register must be set up.
4999 	 */
5000 	if (tg3_flag(tp, RGMII_MODE)) {
5001 		u32 led_ctrl = tr32(MAC_LED_CTRL);
5002 		led_ctrl &= ~(LED_CTRL_1000MBPS_ON | LED_CTRL_100MBPS_ON);
5003 
5004 		if (tp->link_config.active_speed == SPEED_10)
5005 			led_ctrl |= LED_CTRL_LNKLED_OVERRIDE;
5006 		else if (tp->link_config.active_speed == SPEED_100)
5007 			led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE |
5008 				     LED_CTRL_100MBPS_ON);
5009 		else if (tp->link_config.active_speed == SPEED_1000)
5010 			led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE |
5011 				     LED_CTRL_1000MBPS_ON);
5012 
5013 		tw32(MAC_LED_CTRL, led_ctrl);
5014 		udelay(40);
5015 	}
5016 
5017 	tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
5018 	if (tp->link_config.active_duplex == DUPLEX_HALF)
5019 		tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
5020 
5021 	if (tg3_asic_rev(tp) == ASIC_REV_5700) {
5022 		if (current_link_up &&
5023 		    tg3_5700_link_polarity(tp, tp->link_config.active_speed))
5024 			tp->mac_mode |= MAC_MODE_LINK_POLARITY;
5025 		else
5026 			tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
5027 	}
5028 
5029 	/* ??? Without this setting Netgear GA302T PHY does not
5030 	 * ??? send/receive packets...
5031 	 */
5032 	if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 &&
5033 	    tg3_chip_rev_id(tp) == CHIPREV_ID_5700_ALTIMA) {
5034 		tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
5035 		tw32_f(MAC_MI_MODE, tp->mi_mode);
5036 		udelay(80);
5037 	}
5038 
5039 	tw32_f(MAC_MODE, tp->mac_mode);
5040 	udelay(40);
5041 
5042 	tg3_phy_eee_adjust(tp, current_link_up);
5043 
5044 	if (tg3_flag(tp, USE_LINKCHG_REG)) {
5045 		/* Polled via timer. */
5046 		tw32_f(MAC_EVENT, 0);
5047 	} else {
5048 		tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5049 	}
5050 	udelay(40);
5051 
5052 	if (tg3_asic_rev(tp) == ASIC_REV_5700 &&
5053 	    current_link_up &&
5054 	    tp->link_config.active_speed == SPEED_1000 &&
5055 	    (tg3_flag(tp, PCIX_MODE) || tg3_flag(tp, PCI_HIGH_SPEED))) {
5056 		udelay(120);
5057 		tw32_f(MAC_STATUS,
5058 		     (MAC_STATUS_SYNC_CHANGED |
5059 		      MAC_STATUS_CFG_CHANGED));
5060 		udelay(40);
5061 		tg3_write_mem(tp,
5062 			      NIC_SRAM_FIRMWARE_MBOX,
5063 			      NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
5064 	}
5065 
5066 	/* Prevent send BD corruption. */
5067 	if (tg3_flag(tp, CLKREQ_BUG)) {
5068 		if (tp->link_config.active_speed == SPEED_100 ||
5069 		    tp->link_config.active_speed == SPEED_10)
5070 			pcie_capability_clear_word(tp->pdev, PCI_EXP_LNKCTL,
5071 						   PCI_EXP_LNKCTL_CLKREQ_EN);
5072 		else
5073 			pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
5074 						 PCI_EXP_LNKCTL_CLKREQ_EN);
5075 	}
5076 
5077 	tg3_test_and_report_link_chg(tp, current_link_up);
5078 
5079 	return 0;
5080 }
5081 
5082 struct tg3_fiber_aneginfo {
5083 	int state;
5084 #define ANEG_STATE_UNKNOWN		0
5085 #define ANEG_STATE_AN_ENABLE		1
5086 #define ANEG_STATE_RESTART_INIT		2
5087 #define ANEG_STATE_RESTART		3
5088 #define ANEG_STATE_DISABLE_LINK_OK	4
5089 #define ANEG_STATE_ABILITY_DETECT_INIT	5
5090 #define ANEG_STATE_ABILITY_DETECT	6
5091 #define ANEG_STATE_ACK_DETECT_INIT	7
5092 #define ANEG_STATE_ACK_DETECT		8
5093 #define ANEG_STATE_COMPLETE_ACK_INIT	9
5094 #define ANEG_STATE_COMPLETE_ACK		10
5095 #define ANEG_STATE_IDLE_DETECT_INIT	11
5096 #define ANEG_STATE_IDLE_DETECT		12
5097 #define ANEG_STATE_LINK_OK		13
5098 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT	14
5099 #define ANEG_STATE_NEXT_PAGE_WAIT	15
5100 
5101 	u32 flags;
5102 #define MR_AN_ENABLE		0x00000001
5103 #define MR_RESTART_AN		0x00000002
5104 #define MR_AN_COMPLETE		0x00000004
5105 #define MR_PAGE_RX		0x00000008
5106 #define MR_NP_LOADED		0x00000010
5107 #define MR_TOGGLE_TX		0x00000020
5108 #define MR_LP_ADV_FULL_DUPLEX	0x00000040
5109 #define MR_LP_ADV_HALF_DUPLEX	0x00000080
5110 #define MR_LP_ADV_SYM_PAUSE	0x00000100
5111 #define MR_LP_ADV_ASYM_PAUSE	0x00000200
5112 #define MR_LP_ADV_REMOTE_FAULT1	0x00000400
5113 #define MR_LP_ADV_REMOTE_FAULT2	0x00000800
5114 #define MR_LP_ADV_NEXT_PAGE	0x00001000
5115 #define MR_TOGGLE_RX		0x00002000
5116 #define MR_NP_RX		0x00004000
5117 
5118 #define MR_LINK_OK		0x80000000
5119 
5120 	unsigned long link_time, cur_time;
5121 
5122 	u32 ability_match_cfg;
5123 	int ability_match_count;
5124 
5125 	char ability_match, idle_match, ack_match;
5126 
5127 	u32 txconfig, rxconfig;
5128 #define ANEG_CFG_NP		0x00000080
5129 #define ANEG_CFG_ACK		0x00000040
5130 #define ANEG_CFG_RF2		0x00000020
5131 #define ANEG_CFG_RF1		0x00000010
5132 #define ANEG_CFG_PS2		0x00000001
5133 #define ANEG_CFG_PS1		0x00008000
5134 #define ANEG_CFG_HD		0x00004000
5135 #define ANEG_CFG_FD		0x00002000
5136 #define ANEG_CFG_INVAL		0x00001f06
5137 
5138 };
5139 #define ANEG_OK		0
5140 #define ANEG_DONE	1
5141 #define ANEG_TIMER_ENAB	2
5142 #define ANEG_FAILED	-1
5143 
5144 #define ANEG_STATE_SETTLE_TIME	10000
5145 
5146 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
5147 				   struct tg3_fiber_aneginfo *ap)
5148 {
5149 	u16 flowctrl;
5150 	unsigned long delta;
5151 	u32 rx_cfg_reg;
5152 	int ret;
5153 
5154 	if (ap->state == ANEG_STATE_UNKNOWN) {
5155 		ap->rxconfig = 0;
5156 		ap->link_time = 0;
5157 		ap->cur_time = 0;
5158 		ap->ability_match_cfg = 0;
5159 		ap->ability_match_count = 0;
5160 		ap->ability_match = 0;
5161 		ap->idle_match = 0;
5162 		ap->ack_match = 0;
5163 	}
5164 	ap->cur_time++;
5165 
5166 	if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
5167 		rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
5168 
5169 		if (rx_cfg_reg != ap->ability_match_cfg) {
5170 			ap->ability_match_cfg = rx_cfg_reg;
5171 			ap->ability_match = 0;
5172 			ap->ability_match_count = 0;
5173 		} else {
5174 			if (++ap->ability_match_count > 1) {
5175 				ap->ability_match = 1;
5176 				ap->ability_match_cfg = rx_cfg_reg;
5177 			}
5178 		}
5179 		if (rx_cfg_reg & ANEG_CFG_ACK)
5180 			ap->ack_match = 1;
5181 		else
5182 			ap->ack_match = 0;
5183 
5184 		ap->idle_match = 0;
5185 	} else {
5186 		ap->idle_match = 1;
5187 		ap->ability_match_cfg = 0;
5188 		ap->ability_match_count = 0;
5189 		ap->ability_match = 0;
5190 		ap->ack_match = 0;
5191 
5192 		rx_cfg_reg = 0;
5193 	}
5194 
5195 	ap->rxconfig = rx_cfg_reg;
5196 	ret = ANEG_OK;
5197 
5198 	switch (ap->state) {
5199 	case ANEG_STATE_UNKNOWN:
5200 		if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
5201 			ap->state = ANEG_STATE_AN_ENABLE;
5202 
5203 		/* fallthru */
5204 	case ANEG_STATE_AN_ENABLE:
5205 		ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
5206 		if (ap->flags & MR_AN_ENABLE) {
5207 			ap->link_time = 0;
5208 			ap->cur_time = 0;
5209 			ap->ability_match_cfg = 0;
5210 			ap->ability_match_count = 0;
5211 			ap->ability_match = 0;
5212 			ap->idle_match = 0;
5213 			ap->ack_match = 0;
5214 
5215 			ap->state = ANEG_STATE_RESTART_INIT;
5216 		} else {
5217 			ap->state = ANEG_STATE_DISABLE_LINK_OK;
5218 		}
5219 		break;
5220 
5221 	case ANEG_STATE_RESTART_INIT:
5222 		ap->link_time = ap->cur_time;
5223 		ap->flags &= ~(MR_NP_LOADED);
5224 		ap->txconfig = 0;
5225 		tw32(MAC_TX_AUTO_NEG, 0);
5226 		tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5227 		tw32_f(MAC_MODE, tp->mac_mode);
5228 		udelay(40);
5229 
5230 		ret = ANEG_TIMER_ENAB;
5231 		ap->state = ANEG_STATE_RESTART;
5232 
5233 		/* fallthru */
5234 	case ANEG_STATE_RESTART:
5235 		delta = ap->cur_time - ap->link_time;
5236 		if (delta > ANEG_STATE_SETTLE_TIME)
5237 			ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
5238 		else
5239 			ret = ANEG_TIMER_ENAB;
5240 		break;
5241 
5242 	case ANEG_STATE_DISABLE_LINK_OK:
5243 		ret = ANEG_DONE;
5244 		break;
5245 
5246 	case ANEG_STATE_ABILITY_DETECT_INIT:
5247 		ap->flags &= ~(MR_TOGGLE_TX);
5248 		ap->txconfig = ANEG_CFG_FD;
5249 		flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5250 		if (flowctrl & ADVERTISE_1000XPAUSE)
5251 			ap->txconfig |= ANEG_CFG_PS1;
5252 		if (flowctrl & ADVERTISE_1000XPSE_ASYM)
5253 			ap->txconfig |= ANEG_CFG_PS2;
5254 		tw32(MAC_TX_AUTO_NEG, ap->txconfig);
5255 		tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5256 		tw32_f(MAC_MODE, tp->mac_mode);
5257 		udelay(40);
5258 
5259 		ap->state = ANEG_STATE_ABILITY_DETECT;
5260 		break;
5261 
5262 	case ANEG_STATE_ABILITY_DETECT:
5263 		if (ap->ability_match != 0 && ap->rxconfig != 0)
5264 			ap->state = ANEG_STATE_ACK_DETECT_INIT;
5265 		break;
5266 
5267 	case ANEG_STATE_ACK_DETECT_INIT:
5268 		ap->txconfig |= ANEG_CFG_ACK;
5269 		tw32(MAC_TX_AUTO_NEG, ap->txconfig);
5270 		tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5271 		tw32_f(MAC_MODE, tp->mac_mode);
5272 		udelay(40);
5273 
5274 		ap->state = ANEG_STATE_ACK_DETECT;
5275 
5276 		/* fallthru */
5277 	case ANEG_STATE_ACK_DETECT:
5278 		if (ap->ack_match != 0) {
5279 			if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
5280 			    (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
5281 				ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
5282 			} else {
5283 				ap->state = ANEG_STATE_AN_ENABLE;
5284 			}
5285 		} else if (ap->ability_match != 0 &&
5286 			   ap->rxconfig == 0) {
5287 			ap->state = ANEG_STATE_AN_ENABLE;
5288 		}
5289 		break;
5290 
5291 	case ANEG_STATE_COMPLETE_ACK_INIT:
5292 		if (ap->rxconfig & ANEG_CFG_INVAL) {
5293 			ret = ANEG_FAILED;
5294 			break;
5295 		}
5296 		ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
5297 			       MR_LP_ADV_HALF_DUPLEX |
5298 			       MR_LP_ADV_SYM_PAUSE |
5299 			       MR_LP_ADV_ASYM_PAUSE |
5300 			       MR_LP_ADV_REMOTE_FAULT1 |
5301 			       MR_LP_ADV_REMOTE_FAULT2 |
5302 			       MR_LP_ADV_NEXT_PAGE |
5303 			       MR_TOGGLE_RX |
5304 			       MR_NP_RX);
5305 		if (ap->rxconfig & ANEG_CFG_FD)
5306 			ap->flags |= MR_LP_ADV_FULL_DUPLEX;
5307 		if (ap->rxconfig & ANEG_CFG_HD)
5308 			ap->flags |= MR_LP_ADV_HALF_DUPLEX;
5309 		if (ap->rxconfig & ANEG_CFG_PS1)
5310 			ap->flags |= MR_LP_ADV_SYM_PAUSE;
5311 		if (ap->rxconfig & ANEG_CFG_PS2)
5312 			ap->flags |= MR_LP_ADV_ASYM_PAUSE;
5313 		if (ap->rxconfig & ANEG_CFG_RF1)
5314 			ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
5315 		if (ap->rxconfig & ANEG_CFG_RF2)
5316 			ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
5317 		if (ap->rxconfig & ANEG_CFG_NP)
5318 			ap->flags |= MR_LP_ADV_NEXT_PAGE;
5319 
5320 		ap->link_time = ap->cur_time;
5321 
5322 		ap->flags ^= (MR_TOGGLE_TX);
5323 		if (ap->rxconfig & 0x0008)
5324 			ap->flags |= MR_TOGGLE_RX;
5325 		if (ap->rxconfig & ANEG_CFG_NP)
5326 			ap->flags |= MR_NP_RX;
5327 		ap->flags |= MR_PAGE_RX;
5328 
5329 		ap->state = ANEG_STATE_COMPLETE_ACK;
5330 		ret = ANEG_TIMER_ENAB;
5331 		break;
5332 
5333 	case ANEG_STATE_COMPLETE_ACK:
5334 		if (ap->ability_match != 0 &&
5335 		    ap->rxconfig == 0) {
5336 			ap->state = ANEG_STATE_AN_ENABLE;
5337 			break;
5338 		}
5339 		delta = ap->cur_time - ap->link_time;
5340 		if (delta > ANEG_STATE_SETTLE_TIME) {
5341 			if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
5342 				ap->state = ANEG_STATE_IDLE_DETECT_INIT;
5343 			} else {
5344 				if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
5345 				    !(ap->flags & MR_NP_RX)) {
5346 					ap->state = ANEG_STATE_IDLE_DETECT_INIT;
5347 				} else {
5348 					ret = ANEG_FAILED;
5349 				}
5350 			}
5351 		}
5352 		break;
5353 
5354 	case ANEG_STATE_IDLE_DETECT_INIT:
5355 		ap->link_time = ap->cur_time;
5356 		tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
5357 		tw32_f(MAC_MODE, tp->mac_mode);
5358 		udelay(40);
5359 
5360 		ap->state = ANEG_STATE_IDLE_DETECT;
5361 		ret = ANEG_TIMER_ENAB;
5362 		break;
5363 
5364 	case ANEG_STATE_IDLE_DETECT:
5365 		if (ap->ability_match != 0 &&
5366 		    ap->rxconfig == 0) {
5367 			ap->state = ANEG_STATE_AN_ENABLE;
5368 			break;
5369 		}
5370 		delta = ap->cur_time - ap->link_time;
5371 		if (delta > ANEG_STATE_SETTLE_TIME) {
5372 			/* XXX another gem from the Broadcom driver :( */
5373 			ap->state = ANEG_STATE_LINK_OK;
5374 		}
5375 		break;
5376 
5377 	case ANEG_STATE_LINK_OK:
5378 		ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
5379 		ret = ANEG_DONE;
5380 		break;
5381 
5382 	case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
5383 		/* ??? unimplemented */
5384 		break;
5385 
5386 	case ANEG_STATE_NEXT_PAGE_WAIT:
5387 		/* ??? unimplemented */
5388 		break;
5389 
5390 	default:
5391 		ret = ANEG_FAILED;
5392 		break;
5393 	}
5394 
5395 	return ret;
5396 }
5397 
5398 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
5399 {
5400 	int res = 0;
5401 	struct tg3_fiber_aneginfo aninfo;
5402 	int status = ANEG_FAILED;
5403 	unsigned int tick;
5404 	u32 tmp;
5405 
5406 	tw32_f(MAC_TX_AUTO_NEG, 0);
5407 
5408 	tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
5409 	tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
5410 	udelay(40);
5411 
5412 	tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
5413 	udelay(40);
5414 
5415 	memset(&aninfo, 0, sizeof(aninfo));
5416 	aninfo.flags |= MR_AN_ENABLE;
5417 	aninfo.state = ANEG_STATE_UNKNOWN;
5418 	aninfo.cur_time = 0;
5419 	tick = 0;
5420 	while (++tick < 195000) {
5421 		status = tg3_fiber_aneg_smachine(tp, &aninfo);
5422 		if (status == ANEG_DONE || status == ANEG_FAILED)
5423 			break;
5424 
5425 		udelay(1);
5426 	}
5427 
5428 	tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
5429 	tw32_f(MAC_MODE, tp->mac_mode);
5430 	udelay(40);
5431 
5432 	*txflags = aninfo.txconfig;
5433 	*rxflags = aninfo.flags;
5434 
5435 	if (status == ANEG_DONE &&
5436 	    (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
5437 			     MR_LP_ADV_FULL_DUPLEX)))
5438 		res = 1;
5439 
5440 	return res;
5441 }
5442 
5443 static void tg3_init_bcm8002(struct tg3 *tp)
5444 {
5445 	u32 mac_status = tr32(MAC_STATUS);
5446 	int i;
5447 
5448 	/* Reset when initting first time or we have a link. */
5449 	if (tg3_flag(tp, INIT_COMPLETE) &&
5450 	    !(mac_status & MAC_STATUS_PCS_SYNCED))
5451 		return;
5452 
5453 	/* Set PLL lock range. */
5454 	tg3_writephy(tp, 0x16, 0x8007);
5455 
5456 	/* SW reset */
5457 	tg3_writephy(tp, MII_BMCR, BMCR_RESET);
5458 
5459 	/* Wait for reset to complete. */
5460 	/* XXX schedule_timeout() ... */
5461 	for (i = 0; i < 500; i++)
5462 		udelay(10);
5463 
5464 	/* Config mode; select PMA/Ch 1 regs. */
5465 	tg3_writephy(tp, 0x10, 0x8411);
5466 
5467 	/* Enable auto-lock and comdet, select txclk for tx. */
5468 	tg3_writephy(tp, 0x11, 0x0a10);
5469 
5470 	tg3_writephy(tp, 0x18, 0x00a0);
5471 	tg3_writephy(tp, 0x16, 0x41ff);
5472 
5473 	/* Assert and deassert POR. */
5474 	tg3_writephy(tp, 0x13, 0x0400);
5475 	udelay(40);
5476 	tg3_writephy(tp, 0x13, 0x0000);
5477 
5478 	tg3_writephy(tp, 0x11, 0x0a50);
5479 	udelay(40);
5480 	tg3_writephy(tp, 0x11, 0x0a10);
5481 
5482 	/* Wait for signal to stabilize */
5483 	/* XXX schedule_timeout() ... */
5484 	for (i = 0; i < 15000; i++)
5485 		udelay(10);
5486 
5487 	/* Deselect the channel register so we can read the PHYID
5488 	 * later.
5489 	 */
5490 	tg3_writephy(tp, 0x10, 0x8011);
5491 }
5492 
5493 static bool tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
5494 {
5495 	u16 flowctrl;
5496 	bool current_link_up;
5497 	u32 sg_dig_ctrl, sg_dig_status;
5498 	u32 serdes_cfg, expected_sg_dig_ctrl;
5499 	int workaround, port_a;
5500 
5501 	serdes_cfg = 0;
5502 	expected_sg_dig_ctrl = 0;
5503 	workaround = 0;
5504 	port_a = 1;
5505 	current_link_up = false;
5506 
5507 	if (tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A0 &&
5508 	    tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A1) {
5509 		workaround = 1;
5510 		if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
5511 			port_a = 0;
5512 
5513 		/* preserve bits 0-11,13,14 for signal pre-emphasis */
5514 		/* preserve bits 20-23 for voltage regulator */
5515 		serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
5516 	}
5517 
5518 	sg_dig_ctrl = tr32(SG_DIG_CTRL);
5519 
5520 	if (tp->link_config.autoneg != AUTONEG_ENABLE) {
5521 		if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
5522 			if (workaround) {
5523 				u32 val = serdes_cfg;
5524 
5525 				if (port_a)
5526 					val |= 0xc010000;
5527 				else
5528 					val |= 0x4010000;
5529 				tw32_f(MAC_SERDES_CFG, val);
5530 			}
5531 
5532 			tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
5533 		}
5534 		if (mac_status & MAC_STATUS_PCS_SYNCED) {
5535 			tg3_setup_flow_control(tp, 0, 0);
5536 			current_link_up = true;
5537 		}
5538 		goto out;
5539 	}
5540 
5541 	/* Want auto-negotiation.  */
5542 	expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
5543 
5544 	flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5545 	if (flowctrl & ADVERTISE_1000XPAUSE)
5546 		expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
5547 	if (flowctrl & ADVERTISE_1000XPSE_ASYM)
5548 		expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
5549 
5550 	if (sg_dig_ctrl != expected_sg_dig_ctrl) {
5551 		if ((tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT) &&
5552 		    tp->serdes_counter &&
5553 		    ((mac_status & (MAC_STATUS_PCS_SYNCED |
5554 				    MAC_STATUS_RCVD_CFG)) ==
5555 		     MAC_STATUS_PCS_SYNCED)) {
5556 			tp->serdes_counter--;
5557 			current_link_up = true;
5558 			goto out;
5559 		}
5560 restart_autoneg:
5561 		if (workaround)
5562 			tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
5563 		tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
5564 		udelay(5);
5565 		tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
5566 
5567 		tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
5568 		tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5569 	} else if (mac_status & (MAC_STATUS_PCS_SYNCED |
5570 				 MAC_STATUS_SIGNAL_DET)) {
5571 		sg_dig_status = tr32(SG_DIG_STATUS);
5572 		mac_status = tr32(MAC_STATUS);
5573 
5574 		if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
5575 		    (mac_status & MAC_STATUS_PCS_SYNCED)) {
5576 			u32 local_adv = 0, remote_adv = 0;
5577 
5578 			if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
5579 				local_adv |= ADVERTISE_1000XPAUSE;
5580 			if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
5581 				local_adv |= ADVERTISE_1000XPSE_ASYM;
5582 
5583 			if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
5584 				remote_adv |= LPA_1000XPAUSE;
5585 			if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
5586 				remote_adv |= LPA_1000XPAUSE_ASYM;
5587 
5588 			tp->link_config.rmt_adv =
5589 					   mii_adv_to_ethtool_adv_x(remote_adv);
5590 
5591 			tg3_setup_flow_control(tp, local_adv, remote_adv);
5592 			current_link_up = true;
5593 			tp->serdes_counter = 0;
5594 			tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5595 		} else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
5596 			if (tp->serdes_counter)
5597 				tp->serdes_counter--;
5598 			else {
5599 				if (workaround) {
5600 					u32 val = serdes_cfg;
5601 
5602 					if (port_a)
5603 						val |= 0xc010000;
5604 					else
5605 						val |= 0x4010000;
5606 
5607 					tw32_f(MAC_SERDES_CFG, val);
5608 				}
5609 
5610 				tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
5611 				udelay(40);
5612 
5613 				/* Link parallel detection - link is up */
5614 				/* only if we have PCS_SYNC and not */
5615 				/* receiving config code words */
5616 				mac_status = tr32(MAC_STATUS);
5617 				if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
5618 				    !(mac_status & MAC_STATUS_RCVD_CFG)) {
5619 					tg3_setup_flow_control(tp, 0, 0);
5620 					current_link_up = true;
5621 					tp->phy_flags |=
5622 						TG3_PHYFLG_PARALLEL_DETECT;
5623 					tp->serdes_counter =
5624 						SERDES_PARALLEL_DET_TIMEOUT;
5625 				} else
5626 					goto restart_autoneg;
5627 			}
5628 		}
5629 	} else {
5630 		tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
5631 		tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5632 	}
5633 
5634 out:
5635 	return current_link_up;
5636 }
5637 
5638 static bool tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
5639 {
5640 	bool current_link_up = false;
5641 
5642 	if (!(mac_status & MAC_STATUS_PCS_SYNCED))
5643 		goto out;
5644 
5645 	if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5646 		u32 txflags, rxflags;
5647 		int i;
5648 
5649 		if (fiber_autoneg(tp, &txflags, &rxflags)) {
5650 			u32 local_adv = 0, remote_adv = 0;
5651 
5652 			if (txflags & ANEG_CFG_PS1)
5653 				local_adv |= ADVERTISE_1000XPAUSE;
5654 			if (txflags & ANEG_CFG_PS2)
5655 				local_adv |= ADVERTISE_1000XPSE_ASYM;
5656 
5657 			if (rxflags & MR_LP_ADV_SYM_PAUSE)
5658 				remote_adv |= LPA_1000XPAUSE;
5659 			if (rxflags & MR_LP_ADV_ASYM_PAUSE)
5660 				remote_adv |= LPA_1000XPAUSE_ASYM;
5661 
5662 			tp->link_config.rmt_adv =
5663 					   mii_adv_to_ethtool_adv_x(remote_adv);
5664 
5665 			tg3_setup_flow_control(tp, local_adv, remote_adv);
5666 
5667 			current_link_up = true;
5668 		}
5669 		for (i = 0; i < 30; i++) {
5670 			udelay(20);
5671 			tw32_f(MAC_STATUS,
5672 			       (MAC_STATUS_SYNC_CHANGED |
5673 				MAC_STATUS_CFG_CHANGED));
5674 			udelay(40);
5675 			if ((tr32(MAC_STATUS) &
5676 			     (MAC_STATUS_SYNC_CHANGED |
5677 			      MAC_STATUS_CFG_CHANGED)) == 0)
5678 				break;
5679 		}
5680 
5681 		mac_status = tr32(MAC_STATUS);
5682 		if (!current_link_up &&
5683 		    (mac_status & MAC_STATUS_PCS_SYNCED) &&
5684 		    !(mac_status & MAC_STATUS_RCVD_CFG))
5685 			current_link_up = true;
5686 	} else {
5687 		tg3_setup_flow_control(tp, 0, 0);
5688 
5689 		/* Forcing 1000FD link up. */
5690 		current_link_up = true;
5691 
5692 		tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
5693 		udelay(40);
5694 
5695 		tw32_f(MAC_MODE, tp->mac_mode);
5696 		udelay(40);
5697 	}
5698 
5699 out:
5700 	return current_link_up;
5701 }
5702 
5703 static int tg3_setup_fiber_phy(struct tg3 *tp, bool force_reset)
5704 {
5705 	u32 orig_pause_cfg;
5706 	u16 orig_active_speed;
5707 	u8 orig_active_duplex;
5708 	u32 mac_status;
5709 	bool current_link_up;
5710 	int i;
5711 
5712 	orig_pause_cfg = tp->link_config.active_flowctrl;
5713 	orig_active_speed = tp->link_config.active_speed;
5714 	orig_active_duplex = tp->link_config.active_duplex;
5715 
5716 	if (!tg3_flag(tp, HW_AUTONEG) &&
5717 	    tp->link_up &&
5718 	    tg3_flag(tp, INIT_COMPLETE)) {
5719 		mac_status = tr32(MAC_STATUS);
5720 		mac_status &= (MAC_STATUS_PCS_SYNCED |
5721 			       MAC_STATUS_SIGNAL_DET |
5722 			       MAC_STATUS_CFG_CHANGED |
5723 			       MAC_STATUS_RCVD_CFG);
5724 		if (mac_status == (MAC_STATUS_PCS_SYNCED |
5725 				   MAC_STATUS_SIGNAL_DET)) {
5726 			tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5727 					    MAC_STATUS_CFG_CHANGED));
5728 			return 0;
5729 		}
5730 	}
5731 
5732 	tw32_f(MAC_TX_AUTO_NEG, 0);
5733 
5734 	tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
5735 	tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
5736 	tw32_f(MAC_MODE, tp->mac_mode);
5737 	udelay(40);
5738 
5739 	if (tp->phy_id == TG3_PHY_ID_BCM8002)
5740 		tg3_init_bcm8002(tp);
5741 
5742 	/* Enable link change event even when serdes polling.  */
5743 	tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5744 	udelay(40);
5745 
5746 	current_link_up = false;
5747 	tp->link_config.rmt_adv = 0;
5748 	mac_status = tr32(MAC_STATUS);
5749 
5750 	if (tg3_flag(tp, HW_AUTONEG))
5751 		current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
5752 	else
5753 		current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
5754 
5755 	tp->napi[0].hw_status->status =
5756 		(SD_STATUS_UPDATED |
5757 		 (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG));
5758 
5759 	for (i = 0; i < 100; i++) {
5760 		tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5761 				    MAC_STATUS_CFG_CHANGED));
5762 		udelay(5);
5763 		if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
5764 					 MAC_STATUS_CFG_CHANGED |
5765 					 MAC_STATUS_LNKSTATE_CHANGED)) == 0)
5766 			break;
5767 	}
5768 
5769 	mac_status = tr32(MAC_STATUS);
5770 	if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
5771 		current_link_up = false;
5772 		if (tp->link_config.autoneg == AUTONEG_ENABLE &&
5773 		    tp->serdes_counter == 0) {
5774 			tw32_f(MAC_MODE, (tp->mac_mode |
5775 					  MAC_MODE_SEND_CONFIGS));
5776 			udelay(1);
5777 			tw32_f(MAC_MODE, tp->mac_mode);
5778 		}
5779 	}
5780 
5781 	if (current_link_up) {
5782 		tp->link_config.active_speed = SPEED_1000;
5783 		tp->link_config.active_duplex = DUPLEX_FULL;
5784 		tw32(MAC_LED_CTRL, (tp->led_ctrl |
5785 				    LED_CTRL_LNKLED_OVERRIDE |
5786 				    LED_CTRL_1000MBPS_ON));
5787 	} else {
5788 		tp->link_config.active_speed = SPEED_UNKNOWN;
5789 		tp->link_config.active_duplex = DUPLEX_UNKNOWN;
5790 		tw32(MAC_LED_CTRL, (tp->led_ctrl |
5791 				    LED_CTRL_LNKLED_OVERRIDE |
5792 				    LED_CTRL_TRAFFIC_OVERRIDE));
5793 	}
5794 
5795 	if (!tg3_test_and_report_link_chg(tp, current_link_up)) {
5796 		u32 now_pause_cfg = tp->link_config.active_flowctrl;
5797 		if (orig_pause_cfg != now_pause_cfg ||
5798 		    orig_active_speed != tp->link_config.active_speed ||
5799 		    orig_active_duplex != tp->link_config.active_duplex)
5800 			tg3_link_report(tp);
5801 	}
5802 
5803 	return 0;
5804 }
5805 
5806 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, bool force_reset)
5807 {
5808 	int err = 0;
5809 	u32 bmsr, bmcr;
5810 	u16 current_speed = SPEED_UNKNOWN;
5811 	u8 current_duplex = DUPLEX_UNKNOWN;
5812 	bool current_link_up = false;
5813 	u32 local_adv, remote_adv, sgsr;
5814 
5815 	if ((tg3_asic_rev(tp) == ASIC_REV_5719 ||
5816 	     tg3_asic_rev(tp) == ASIC_REV_5720) &&
5817 	     !tg3_readphy(tp, SERDES_TG3_1000X_STATUS, &sgsr) &&
5818 	     (sgsr & SERDES_TG3_SGMII_MODE)) {
5819 
5820 		if (force_reset)
5821 			tg3_phy_reset(tp);
5822 
5823 		tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
5824 
5825 		if (!(sgsr & SERDES_TG3_LINK_UP)) {
5826 			tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5827 		} else {
5828 			current_link_up = true;
5829 			if (sgsr & SERDES_TG3_SPEED_1000) {
5830 				current_speed = SPEED_1000;
5831 				tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5832 			} else if (sgsr & SERDES_TG3_SPEED_100) {
5833 				current_speed = SPEED_100;
5834 				tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
5835 			} else {
5836 				current_speed = SPEED_10;
5837 				tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
5838 			}
5839 
5840 			if (sgsr & SERDES_TG3_FULL_DUPLEX)
5841 				current_duplex = DUPLEX_FULL;
5842 			else
5843 				current_duplex = DUPLEX_HALF;
5844 		}
5845 
5846 		tw32_f(MAC_MODE, tp->mac_mode);
5847 		udelay(40);
5848 
5849 		tg3_clear_mac_status(tp);
5850 
5851 		goto fiber_setup_done;
5852 	}
5853 
5854 	tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5855 	tw32_f(MAC_MODE, tp->mac_mode);
5856 	udelay(40);
5857 
5858 	tg3_clear_mac_status(tp);
5859 
5860 	if (force_reset)
5861 		tg3_phy_reset(tp);
5862 
5863 	tp->link_config.rmt_adv = 0;
5864 
5865 	err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5866 	err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5867 	if (tg3_asic_rev(tp) == ASIC_REV_5714) {
5868 		if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5869 			bmsr |= BMSR_LSTATUS;
5870 		else
5871 			bmsr &= ~BMSR_LSTATUS;
5872 	}
5873 
5874 	err |= tg3_readphy(tp, MII_BMCR, &bmcr);
5875 
5876 	if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
5877 	    (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
5878 		/* do nothing, just check for link up at the end */
5879 	} else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5880 		u32 adv, newadv;
5881 
5882 		err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5883 		newadv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
5884 				 ADVERTISE_1000XPAUSE |
5885 				 ADVERTISE_1000XPSE_ASYM |
5886 				 ADVERTISE_SLCT);
5887 
5888 		newadv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5889 		newadv |= ethtool_adv_to_mii_adv_x(tp->link_config.advertising);
5890 
5891 		if ((newadv != adv) || !(bmcr & BMCR_ANENABLE)) {
5892 			tg3_writephy(tp, MII_ADVERTISE, newadv);
5893 			bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
5894 			tg3_writephy(tp, MII_BMCR, bmcr);
5895 
5896 			tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5897 			tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
5898 			tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5899 
5900 			return err;
5901 		}
5902 	} else {
5903 		u32 new_bmcr;
5904 
5905 		bmcr &= ~BMCR_SPEED1000;
5906 		new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
5907 
5908 		if (tp->link_config.duplex == DUPLEX_FULL)
5909 			new_bmcr |= BMCR_FULLDPLX;
5910 
5911 		if (new_bmcr != bmcr) {
5912 			/* BMCR_SPEED1000 is a reserved bit that needs
5913 			 * to be set on write.
5914 			 */
5915 			new_bmcr |= BMCR_SPEED1000;
5916 
5917 			/* Force a linkdown */
5918 			if (tp->link_up) {
5919 				u32 adv;
5920 
5921 				err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5922 				adv &= ~(ADVERTISE_1000XFULL |
5923 					 ADVERTISE_1000XHALF |
5924 					 ADVERTISE_SLCT);
5925 				tg3_writephy(tp, MII_ADVERTISE, adv);
5926 				tg3_writephy(tp, MII_BMCR, bmcr |
5927 							   BMCR_ANRESTART |
5928 							   BMCR_ANENABLE);
5929 				udelay(10);
5930 				tg3_carrier_off(tp);
5931 			}
5932 			tg3_writephy(tp, MII_BMCR, new_bmcr);
5933 			bmcr = new_bmcr;
5934 			err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5935 			err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5936 			if (tg3_asic_rev(tp) == ASIC_REV_5714) {
5937 				if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5938 					bmsr |= BMSR_LSTATUS;
5939 				else
5940 					bmsr &= ~BMSR_LSTATUS;
5941 			}
5942 			tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5943 		}
5944 	}
5945 
5946 	if (bmsr & BMSR_LSTATUS) {
5947 		current_speed = SPEED_1000;
5948 		current_link_up = true;
5949 		if (bmcr & BMCR_FULLDPLX)
5950 			current_duplex = DUPLEX_FULL;
5951 		else
5952 			current_duplex = DUPLEX_HALF;
5953 
5954 		local_adv = 0;
5955 		remote_adv = 0;
5956 
5957 		if (bmcr & BMCR_ANENABLE) {
5958 			u32 common;
5959 
5960 			err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
5961 			err |= tg3_readphy(tp, MII_LPA, &remote_adv);
5962 			common = local_adv & remote_adv;
5963 			if (common & (ADVERTISE_1000XHALF |
5964 				      ADVERTISE_1000XFULL)) {
5965 				if (common & ADVERTISE_1000XFULL)
5966 					current_duplex = DUPLEX_FULL;
5967 				else
5968 					current_duplex = DUPLEX_HALF;
5969 
5970 				tp->link_config.rmt_adv =
5971 					   mii_adv_to_ethtool_adv_x(remote_adv);
5972 			} else if (!tg3_flag(tp, 5780_CLASS)) {
5973 				/* Link is up via parallel detect */
5974 			} else {
5975 				current_link_up = false;
5976 			}
5977 		}
5978 	}
5979 
5980 fiber_setup_done:
5981 	if (current_link_up && current_duplex == DUPLEX_FULL)
5982 		tg3_setup_flow_control(tp, local_adv, remote_adv);
5983 
5984 	tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
5985 	if (tp->link_config.active_duplex == DUPLEX_HALF)
5986 		tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
5987 
5988 	tw32_f(MAC_MODE, tp->mac_mode);
5989 	udelay(40);
5990 
5991 	tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5992 
5993 	tp->link_config.active_speed = current_speed;
5994 	tp->link_config.active_duplex = current_duplex;
5995 
5996 	tg3_test_and_report_link_chg(tp, current_link_up);
5997 	return err;
5998 }
5999 
6000 static void tg3_serdes_parallel_detect(struct tg3 *tp)
6001 {
6002 	if (tp->serdes_counter) {
6003 		/* Give autoneg time to complete. */
6004 		tp->serdes_counter--;
6005 		return;
6006 	}
6007 
6008 	if (!tp->link_up &&
6009 	    (tp->link_config.autoneg == AUTONEG_ENABLE)) {
6010 		u32 bmcr;
6011 
6012 		tg3_readphy(tp, MII_BMCR, &bmcr);
6013 		if (bmcr & BMCR_ANENABLE) {
6014 			u32 phy1, phy2;
6015 
6016 			/* Select shadow register 0x1f */
6017 			tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x7c00);
6018 			tg3_readphy(tp, MII_TG3_MISC_SHDW, &phy1);
6019 
6020 			/* Select expansion interrupt status register */
6021 			tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
6022 					 MII_TG3_DSP_EXP1_INT_STAT);
6023 			tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
6024 			tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
6025 
6026 			if ((phy1 & 0x10) && !(phy2 & 0x20)) {
6027 				/* We have signal detect and not receiving
6028 				 * config code words, link is up by parallel
6029 				 * detection.
6030 				 */
6031 
6032 				bmcr &= ~BMCR_ANENABLE;
6033 				bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
6034 				tg3_writephy(tp, MII_BMCR, bmcr);
6035 				tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT;
6036 			}
6037 		}
6038 	} else if (tp->link_up &&
6039 		   (tp->link_config.autoneg == AUTONEG_ENABLE) &&
6040 		   (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
6041 		u32 phy2;
6042 
6043 		/* Select expansion interrupt status register */
6044 		tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
6045 				 MII_TG3_DSP_EXP1_INT_STAT);
6046 		tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
6047 		if (phy2 & 0x20) {
6048 			u32 bmcr;
6049 
6050 			/* Config code words received, turn on autoneg. */
6051 			tg3_readphy(tp, MII_BMCR, &bmcr);
6052 			tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
6053 
6054 			tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
6055 
6056 		}
6057 	}
6058 }
6059 
6060 static int tg3_setup_phy(struct tg3 *tp, bool force_reset)
6061 {
6062 	u32 val;
6063 	int err;
6064 
6065 	if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
6066 		err = tg3_setup_fiber_phy(tp, force_reset);
6067 	else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
6068 		err = tg3_setup_fiber_mii_phy(tp, force_reset);
6069 	else
6070 		err = tg3_setup_copper_phy(tp, force_reset);
6071 
6072 	if (tg3_chip_rev(tp) == CHIPREV_5784_AX) {
6073 		u32 scale;
6074 
6075 		val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
6076 		if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
6077 			scale = 65;
6078 		else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
6079 			scale = 6;
6080 		else
6081 			scale = 12;
6082 
6083 		val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
6084 		val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
6085 		tw32(GRC_MISC_CFG, val);
6086 	}
6087 
6088 	val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
6089 	      (6 << TX_LENGTHS_IPG_SHIFT);
6090 	if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
6091 	    tg3_asic_rev(tp) == ASIC_REV_5762)
6092 		val |= tr32(MAC_TX_LENGTHS) &
6093 		       (TX_LENGTHS_JMB_FRM_LEN_MSK |
6094 			TX_LENGTHS_CNT_DWN_VAL_MSK);
6095 
6096 	if (tp->link_config.active_speed == SPEED_1000 &&
6097 	    tp->link_config.active_duplex == DUPLEX_HALF)
6098 		tw32(MAC_TX_LENGTHS, val |
6099 		     (0xff << TX_LENGTHS_SLOT_TIME_SHIFT));
6100 	else
6101 		tw32(MAC_TX_LENGTHS, val |
6102 		     (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
6103 
6104 	if (!tg3_flag(tp, 5705_PLUS)) {
6105 		if (tp->link_up) {
6106 			tw32(HOSTCC_STAT_COAL_TICKS,
6107 			     tp->coal.stats_block_coalesce_usecs);
6108 		} else {
6109 			tw32(HOSTCC_STAT_COAL_TICKS, 0);
6110 		}
6111 	}
6112 
6113 	if (tg3_flag(tp, ASPM_WORKAROUND)) {
6114 		val = tr32(PCIE_PWR_MGMT_THRESH);
6115 		if (!tp->link_up)
6116 			val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
6117 			      tp->pwrmgmt_thresh;
6118 		else
6119 			val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
6120 		tw32(PCIE_PWR_MGMT_THRESH, val);
6121 	}
6122 
6123 	return err;
6124 }
6125 
6126 /* tp->lock must be held */
6127 static u64 tg3_refclk_read(struct tg3 *tp)
6128 {
6129 	u64 stamp = tr32(TG3_EAV_REF_CLCK_LSB);
6130 	return stamp | (u64)tr32(TG3_EAV_REF_CLCK_MSB) << 32;
6131 }
6132 
6133 /* tp->lock must be held */
6134 static void tg3_refclk_write(struct tg3 *tp, u64 newval)
6135 {
6136 	u32 clock_ctl = tr32(TG3_EAV_REF_CLCK_CTL);
6137 
6138 	tw32(TG3_EAV_REF_CLCK_CTL, clock_ctl | TG3_EAV_REF_CLCK_CTL_STOP);
6139 	tw32(TG3_EAV_REF_CLCK_LSB, newval & 0xffffffff);
6140 	tw32(TG3_EAV_REF_CLCK_MSB, newval >> 32);
6141 	tw32_f(TG3_EAV_REF_CLCK_CTL, clock_ctl | TG3_EAV_REF_CLCK_CTL_RESUME);
6142 }
6143 
6144 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync);
6145 static inline void tg3_full_unlock(struct tg3 *tp);
6146 static int tg3_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info)
6147 {
6148 	struct tg3 *tp = netdev_priv(dev);
6149 
6150 	info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
6151 				SOF_TIMESTAMPING_RX_SOFTWARE |
6152 				SOF_TIMESTAMPING_SOFTWARE;
6153 
6154 	if (tg3_flag(tp, PTP_CAPABLE)) {
6155 		info->so_timestamping |= SOF_TIMESTAMPING_TX_HARDWARE |
6156 					SOF_TIMESTAMPING_RX_HARDWARE |
6157 					SOF_TIMESTAMPING_RAW_HARDWARE;
6158 	}
6159 
6160 	if (tp->ptp_clock)
6161 		info->phc_index = ptp_clock_index(tp->ptp_clock);
6162 	else
6163 		info->phc_index = -1;
6164 
6165 	info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
6166 
6167 	info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
6168 			   (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
6169 			   (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
6170 			   (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT);
6171 	return 0;
6172 }
6173 
6174 static int tg3_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
6175 {
6176 	struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6177 	bool neg_adj = false;
6178 	u32 correction = 0;
6179 
6180 	if (ppb < 0) {
6181 		neg_adj = true;
6182 		ppb = -ppb;
6183 	}
6184 
6185 	/* Frequency adjustment is performed using hardware with a 24 bit
6186 	 * accumulator and a programmable correction value. On each clk, the
6187 	 * correction value gets added to the accumulator and when it
6188 	 * overflows, the time counter is incremented/decremented.
6189 	 *
6190 	 * So conversion from ppb to correction value is
6191 	 *		ppb * (1 << 24) / 1000000000
6192 	 */
6193 	correction = div_u64((u64)ppb * (1 << 24), 1000000000ULL) &
6194 		     TG3_EAV_REF_CLK_CORRECT_MASK;
6195 
6196 	tg3_full_lock(tp, 0);
6197 
6198 	if (correction)
6199 		tw32(TG3_EAV_REF_CLK_CORRECT_CTL,
6200 		     TG3_EAV_REF_CLK_CORRECT_EN |
6201 		     (neg_adj ? TG3_EAV_REF_CLK_CORRECT_NEG : 0) | correction);
6202 	else
6203 		tw32(TG3_EAV_REF_CLK_CORRECT_CTL, 0);
6204 
6205 	tg3_full_unlock(tp);
6206 
6207 	return 0;
6208 }
6209 
6210 static int tg3_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
6211 {
6212 	struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6213 
6214 	tg3_full_lock(tp, 0);
6215 	tp->ptp_adjust += delta;
6216 	tg3_full_unlock(tp);
6217 
6218 	return 0;
6219 }
6220 
6221 static int tg3_ptp_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts)
6222 {
6223 	u64 ns;
6224 	struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6225 
6226 	tg3_full_lock(tp, 0);
6227 	ns = tg3_refclk_read(tp);
6228 	ns += tp->ptp_adjust;
6229 	tg3_full_unlock(tp);
6230 
6231 	*ts = ns_to_timespec64(ns);
6232 
6233 	return 0;
6234 }
6235 
6236 static int tg3_ptp_settime(struct ptp_clock_info *ptp,
6237 			   const struct timespec64 *ts)
6238 {
6239 	u64 ns;
6240 	struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6241 
6242 	ns = timespec64_to_ns(ts);
6243 
6244 	tg3_full_lock(tp, 0);
6245 	tg3_refclk_write(tp, ns);
6246 	tp->ptp_adjust = 0;
6247 	tg3_full_unlock(tp);
6248 
6249 	return 0;
6250 }
6251 
6252 static int tg3_ptp_enable(struct ptp_clock_info *ptp,
6253 			  struct ptp_clock_request *rq, int on)
6254 {
6255 	struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6256 	u32 clock_ctl;
6257 	int rval = 0;
6258 
6259 	switch (rq->type) {
6260 	case PTP_CLK_REQ_PEROUT:
6261 		if (rq->perout.index != 0)
6262 			return -EINVAL;
6263 
6264 		tg3_full_lock(tp, 0);
6265 		clock_ctl = tr32(TG3_EAV_REF_CLCK_CTL);
6266 		clock_ctl &= ~TG3_EAV_CTL_TSYNC_GPIO_MASK;
6267 
6268 		if (on) {
6269 			u64 nsec;
6270 
6271 			nsec = rq->perout.start.sec * 1000000000ULL +
6272 			       rq->perout.start.nsec;
6273 
6274 			if (rq->perout.period.sec || rq->perout.period.nsec) {
6275 				netdev_warn(tp->dev,
6276 					    "Device supports only a one-shot timesync output, period must be 0\n");
6277 				rval = -EINVAL;
6278 				goto err_out;
6279 			}
6280 
6281 			if (nsec & (1ULL << 63)) {
6282 				netdev_warn(tp->dev,
6283 					    "Start value (nsec) is over limit. Maximum size of start is only 63 bits\n");
6284 				rval = -EINVAL;
6285 				goto err_out;
6286 			}
6287 
6288 			tw32(TG3_EAV_WATCHDOG0_LSB, (nsec & 0xffffffff));
6289 			tw32(TG3_EAV_WATCHDOG0_MSB,
6290 			     TG3_EAV_WATCHDOG0_EN |
6291 			     ((nsec >> 32) & TG3_EAV_WATCHDOG_MSB_MASK));
6292 
6293 			tw32(TG3_EAV_REF_CLCK_CTL,
6294 			     clock_ctl | TG3_EAV_CTL_TSYNC_WDOG0);
6295 		} else {
6296 			tw32(TG3_EAV_WATCHDOG0_MSB, 0);
6297 			tw32(TG3_EAV_REF_CLCK_CTL, clock_ctl);
6298 		}
6299 
6300 err_out:
6301 		tg3_full_unlock(tp);
6302 		return rval;
6303 
6304 	default:
6305 		break;
6306 	}
6307 
6308 	return -EOPNOTSUPP;
6309 }
6310 
6311 static const struct ptp_clock_info tg3_ptp_caps = {
6312 	.owner		= THIS_MODULE,
6313 	.name		= "tg3 clock",
6314 	.max_adj	= 250000000,
6315 	.n_alarm	= 0,
6316 	.n_ext_ts	= 0,
6317 	.n_per_out	= 1,
6318 	.n_pins		= 0,
6319 	.pps		= 0,
6320 	.adjfreq	= tg3_ptp_adjfreq,
6321 	.adjtime	= tg3_ptp_adjtime,
6322 	.gettime64	= tg3_ptp_gettime,
6323 	.settime64	= tg3_ptp_settime,
6324 	.enable		= tg3_ptp_enable,
6325 };
6326 
6327 static void tg3_hwclock_to_timestamp(struct tg3 *tp, u64 hwclock,
6328 				     struct skb_shared_hwtstamps *timestamp)
6329 {
6330 	memset(timestamp, 0, sizeof(struct skb_shared_hwtstamps));
6331 	timestamp->hwtstamp  = ns_to_ktime((hwclock & TG3_TSTAMP_MASK) +
6332 					   tp->ptp_adjust);
6333 }
6334 
6335 /* tp->lock must be held */
6336 static void tg3_ptp_init(struct tg3 *tp)
6337 {
6338 	if (!tg3_flag(tp, PTP_CAPABLE))
6339 		return;
6340 
6341 	/* Initialize the hardware clock to the system time. */
6342 	tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()));
6343 	tp->ptp_adjust = 0;
6344 	tp->ptp_info = tg3_ptp_caps;
6345 }
6346 
6347 /* tp->lock must be held */
6348 static void tg3_ptp_resume(struct tg3 *tp)
6349 {
6350 	if (!tg3_flag(tp, PTP_CAPABLE))
6351 		return;
6352 
6353 	tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()) + tp->ptp_adjust);
6354 	tp->ptp_adjust = 0;
6355 }
6356 
6357 static void tg3_ptp_fini(struct tg3 *tp)
6358 {
6359 	if (!tg3_flag(tp, PTP_CAPABLE) || !tp->ptp_clock)
6360 		return;
6361 
6362 	ptp_clock_unregister(tp->ptp_clock);
6363 	tp->ptp_clock = NULL;
6364 	tp->ptp_adjust = 0;
6365 }
6366 
6367 static inline int tg3_irq_sync(struct tg3 *tp)
6368 {
6369 	return tp->irq_sync;
6370 }
6371 
6372 static inline void tg3_rd32_loop(struct tg3 *tp, u32 *dst, u32 off, u32 len)
6373 {
6374 	int i;
6375 
6376 	dst = (u32 *)((u8 *)dst + off);
6377 	for (i = 0; i < len; i += sizeof(u32))
6378 		*dst++ = tr32(off + i);
6379 }
6380 
6381 static void tg3_dump_legacy_regs(struct tg3 *tp, u32 *regs)
6382 {
6383 	tg3_rd32_loop(tp, regs, TG3PCI_VENDOR, 0xb0);
6384 	tg3_rd32_loop(tp, regs, MAILBOX_INTERRUPT_0, 0x200);
6385 	tg3_rd32_loop(tp, regs, MAC_MODE, 0x4f0);
6386 	tg3_rd32_loop(tp, regs, SNDDATAI_MODE, 0xe0);
6387 	tg3_rd32_loop(tp, regs, SNDDATAC_MODE, 0x04);
6388 	tg3_rd32_loop(tp, regs, SNDBDS_MODE, 0x80);
6389 	tg3_rd32_loop(tp, regs, SNDBDI_MODE, 0x48);
6390 	tg3_rd32_loop(tp, regs, SNDBDC_MODE, 0x04);
6391 	tg3_rd32_loop(tp, regs, RCVLPC_MODE, 0x20);
6392 	tg3_rd32_loop(tp, regs, RCVLPC_SELLST_BASE, 0x15c);
6393 	tg3_rd32_loop(tp, regs, RCVDBDI_MODE, 0x0c);
6394 	tg3_rd32_loop(tp, regs, RCVDBDI_JUMBO_BD, 0x3c);
6395 	tg3_rd32_loop(tp, regs, RCVDBDI_BD_PROD_IDX_0, 0x44);
6396 	tg3_rd32_loop(tp, regs, RCVDCC_MODE, 0x04);
6397 	tg3_rd32_loop(tp, regs, RCVBDI_MODE, 0x20);
6398 	tg3_rd32_loop(tp, regs, RCVCC_MODE, 0x14);
6399 	tg3_rd32_loop(tp, regs, RCVLSC_MODE, 0x08);
6400 	tg3_rd32_loop(tp, regs, MBFREE_MODE, 0x08);
6401 	tg3_rd32_loop(tp, regs, HOSTCC_MODE, 0x100);
6402 
6403 	if (tg3_flag(tp, SUPPORT_MSIX))
6404 		tg3_rd32_loop(tp, regs, HOSTCC_RXCOL_TICKS_VEC1, 0x180);
6405 
6406 	tg3_rd32_loop(tp, regs, MEMARB_MODE, 0x10);
6407 	tg3_rd32_loop(tp, regs, BUFMGR_MODE, 0x58);
6408 	tg3_rd32_loop(tp, regs, RDMAC_MODE, 0x08);
6409 	tg3_rd32_loop(tp, regs, WDMAC_MODE, 0x08);
6410 	tg3_rd32_loop(tp, regs, RX_CPU_MODE, 0x04);
6411 	tg3_rd32_loop(tp, regs, RX_CPU_STATE, 0x04);
6412 	tg3_rd32_loop(tp, regs, RX_CPU_PGMCTR, 0x04);
6413 	tg3_rd32_loop(tp, regs, RX_CPU_HWBKPT, 0x04);
6414 
6415 	if (!tg3_flag(tp, 5705_PLUS)) {
6416 		tg3_rd32_loop(tp, regs, TX_CPU_MODE, 0x04);
6417 		tg3_rd32_loop(tp, regs, TX_CPU_STATE, 0x04);
6418 		tg3_rd32_loop(tp, regs, TX_CPU_PGMCTR, 0x04);
6419 	}
6420 
6421 	tg3_rd32_loop(tp, regs, GRCMBOX_INTERRUPT_0, 0x110);
6422 	tg3_rd32_loop(tp, regs, FTQ_RESET, 0x120);
6423 	tg3_rd32_loop(tp, regs, MSGINT_MODE, 0x0c);
6424 	tg3_rd32_loop(tp, regs, DMAC_MODE, 0x04);
6425 	tg3_rd32_loop(tp, regs, GRC_MODE, 0x4c);
6426 
6427 	if (tg3_flag(tp, NVRAM))
6428 		tg3_rd32_loop(tp, regs, NVRAM_CMD, 0x24);
6429 }
6430 
6431 static void tg3_dump_state(struct tg3 *tp)
6432 {
6433 	int i;
6434 	u32 *regs;
6435 
6436 	regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC);
6437 	if (!regs)
6438 		return;
6439 
6440 	if (tg3_flag(tp, PCI_EXPRESS)) {
6441 		/* Read up to but not including private PCI registers */
6442 		for (i = 0; i < TG3_PCIE_TLDLPL_PORT; i += sizeof(u32))
6443 			regs[i / sizeof(u32)] = tr32(i);
6444 	} else
6445 		tg3_dump_legacy_regs(tp, regs);
6446 
6447 	for (i = 0; i < TG3_REG_BLK_SIZE / sizeof(u32); i += 4) {
6448 		if (!regs[i + 0] && !regs[i + 1] &&
6449 		    !regs[i + 2] && !regs[i + 3])
6450 			continue;
6451 
6452 		netdev_err(tp->dev, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
6453 			   i * 4,
6454 			   regs[i + 0], regs[i + 1], regs[i + 2], regs[i + 3]);
6455 	}
6456 
6457 	kfree(regs);
6458 
6459 	for (i = 0; i < tp->irq_cnt; i++) {
6460 		struct tg3_napi *tnapi = &tp->napi[i];
6461 
6462 		/* SW status block */
6463 		netdev_err(tp->dev,
6464 			 "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
6465 			   i,
6466 			   tnapi->hw_status->status,
6467 			   tnapi->hw_status->status_tag,
6468 			   tnapi->hw_status->rx_jumbo_consumer,
6469 			   tnapi->hw_status->rx_consumer,
6470 			   tnapi->hw_status->rx_mini_consumer,
6471 			   tnapi->hw_status->idx[0].rx_producer,
6472 			   tnapi->hw_status->idx[0].tx_consumer);
6473 
6474 		netdev_err(tp->dev,
6475 		"%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n",
6476 			   i,
6477 			   tnapi->last_tag, tnapi->last_irq_tag,
6478 			   tnapi->tx_prod, tnapi->tx_cons, tnapi->tx_pending,
6479 			   tnapi->rx_rcb_ptr,
6480 			   tnapi->prodring.rx_std_prod_idx,
6481 			   tnapi->prodring.rx_std_cons_idx,
6482 			   tnapi->prodring.rx_jmb_prod_idx,
6483 			   tnapi->prodring.rx_jmb_cons_idx);
6484 	}
6485 }
6486 
6487 /* This is called whenever we suspect that the system chipset is re-
6488  * ordering the sequence of MMIO to the tx send mailbox. The symptom
6489  * is bogus tx completions. We try to recover by setting the
6490  * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
6491  * in the workqueue.
6492  */
6493 static void tg3_tx_recover(struct tg3 *tp)
6494 {
6495 	BUG_ON(tg3_flag(tp, MBOX_WRITE_REORDER) ||
6496 	       tp->write32_tx_mbox == tg3_write_indirect_mbox);
6497 
6498 	netdev_warn(tp->dev,
6499 		    "The system may be re-ordering memory-mapped I/O "
6500 		    "cycles to the network device, attempting to recover. "
6501 		    "Please report the problem to the driver maintainer "
6502 		    "and include system chipset information.\n");
6503 
6504 	tg3_flag_set(tp, TX_RECOVERY_PENDING);
6505 }
6506 
6507 static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
6508 {
6509 	/* Tell compiler to fetch tx indices from memory. */
6510 	barrier();
6511 	return tnapi->tx_pending -
6512 	       ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1));
6513 }
6514 
6515 /* Tigon3 never reports partial packet sends.  So we do not
6516  * need special logic to handle SKBs that have not had all
6517  * of their frags sent yet, like SunGEM does.
6518  */
6519 static void tg3_tx(struct tg3_napi *tnapi)
6520 {
6521 	struct tg3 *tp = tnapi->tp;
6522 	u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer;
6523 	u32 sw_idx = tnapi->tx_cons;
6524 	struct netdev_queue *txq;
6525 	int index = tnapi - tp->napi;
6526 	unsigned int pkts_compl = 0, bytes_compl = 0;
6527 
6528 	if (tg3_flag(tp, ENABLE_TSS))
6529 		index--;
6530 
6531 	txq = netdev_get_tx_queue(tp->dev, index);
6532 
6533 	while (sw_idx != hw_idx) {
6534 		struct tg3_tx_ring_info *ri = &tnapi->tx_buffers[sw_idx];
6535 		struct sk_buff *skb = ri->skb;
6536 		int i, tx_bug = 0;
6537 
6538 		if (unlikely(skb == NULL)) {
6539 			tg3_tx_recover(tp);
6540 			return;
6541 		}
6542 
6543 		if (tnapi->tx_ring[sw_idx].len_flags & TXD_FLAG_HWTSTAMP) {
6544 			struct skb_shared_hwtstamps timestamp;
6545 			u64 hwclock = tr32(TG3_TX_TSTAMP_LSB);
6546 			hwclock |= (u64)tr32(TG3_TX_TSTAMP_MSB) << 32;
6547 
6548 			tg3_hwclock_to_timestamp(tp, hwclock, &timestamp);
6549 
6550 			skb_tstamp_tx(skb, &timestamp);
6551 		}
6552 
6553 		pci_unmap_single(tp->pdev,
6554 				 dma_unmap_addr(ri, mapping),
6555 				 skb_headlen(skb),
6556 				 PCI_DMA_TODEVICE);
6557 
6558 		ri->skb = NULL;
6559 
6560 		while (ri->fragmented) {
6561 			ri->fragmented = false;
6562 			sw_idx = NEXT_TX(sw_idx);
6563 			ri = &tnapi->tx_buffers[sw_idx];
6564 		}
6565 
6566 		sw_idx = NEXT_TX(sw_idx);
6567 
6568 		for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
6569 			ri = &tnapi->tx_buffers[sw_idx];
6570 			if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
6571 				tx_bug = 1;
6572 
6573 			pci_unmap_page(tp->pdev,
6574 				       dma_unmap_addr(ri, mapping),
6575 				       skb_frag_size(&skb_shinfo(skb)->frags[i]),
6576 				       PCI_DMA_TODEVICE);
6577 
6578 			while (ri->fragmented) {
6579 				ri->fragmented = false;
6580 				sw_idx = NEXT_TX(sw_idx);
6581 				ri = &tnapi->tx_buffers[sw_idx];
6582 			}
6583 
6584 			sw_idx = NEXT_TX(sw_idx);
6585 		}
6586 
6587 		pkts_compl++;
6588 		bytes_compl += skb->len;
6589 
6590 		dev_kfree_skb_any(skb);
6591 
6592 		if (unlikely(tx_bug)) {
6593 			tg3_tx_recover(tp);
6594 			return;
6595 		}
6596 	}
6597 
6598 	netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
6599 
6600 	tnapi->tx_cons = sw_idx;
6601 
6602 	/* Need to make the tx_cons update visible to tg3_start_xmit()
6603 	 * before checking for netif_queue_stopped().  Without the
6604 	 * memory barrier, there is a small possibility that tg3_start_xmit()
6605 	 * will miss it and cause the queue to be stopped forever.
6606 	 */
6607 	smp_mb();
6608 
6609 	if (unlikely(netif_tx_queue_stopped(txq) &&
6610 		     (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) {
6611 		__netif_tx_lock(txq, smp_processor_id());
6612 		if (netif_tx_queue_stopped(txq) &&
6613 		    (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))
6614 			netif_tx_wake_queue(txq);
6615 		__netif_tx_unlock(txq);
6616 	}
6617 }
6618 
6619 static void tg3_frag_free(bool is_frag, void *data)
6620 {
6621 	if (is_frag)
6622 		skb_free_frag(data);
6623 	else
6624 		kfree(data);
6625 }
6626 
6627 static void tg3_rx_data_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
6628 {
6629 	unsigned int skb_size = SKB_DATA_ALIGN(map_sz + TG3_RX_OFFSET(tp)) +
6630 		   SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
6631 
6632 	if (!ri->data)
6633 		return;
6634 
6635 	pci_unmap_single(tp->pdev, dma_unmap_addr(ri, mapping),
6636 			 map_sz, PCI_DMA_FROMDEVICE);
6637 	tg3_frag_free(skb_size <= PAGE_SIZE, ri->data);
6638 	ri->data = NULL;
6639 }
6640 
6641 
6642 /* Returns size of skb allocated or < 0 on error.
6643  *
6644  * We only need to fill in the address because the other members
6645  * of the RX descriptor are invariant, see tg3_init_rings.
6646  *
6647  * Note the purposeful assymetry of cpu vs. chip accesses.  For
6648  * posting buffers we only dirty the first cache line of the RX
6649  * descriptor (containing the address).  Whereas for the RX status
6650  * buffers the cpu only reads the last cacheline of the RX descriptor
6651  * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
6652  */
6653 static int tg3_alloc_rx_data(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
6654 			     u32 opaque_key, u32 dest_idx_unmasked,
6655 			     unsigned int *frag_size)
6656 {
6657 	struct tg3_rx_buffer_desc *desc;
6658 	struct ring_info *map;
6659 	u8 *data;
6660 	dma_addr_t mapping;
6661 	int skb_size, data_size, dest_idx;
6662 
6663 	switch (opaque_key) {
6664 	case RXD_OPAQUE_RING_STD:
6665 		dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
6666 		desc = &tpr->rx_std[dest_idx];
6667 		map = &tpr->rx_std_buffers[dest_idx];
6668 		data_size = tp->rx_pkt_map_sz;
6669 		break;
6670 
6671 	case RXD_OPAQUE_RING_JUMBO:
6672 		dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
6673 		desc = &tpr->rx_jmb[dest_idx].std;
6674 		map = &tpr->rx_jmb_buffers[dest_idx];
6675 		data_size = TG3_RX_JMB_MAP_SZ;
6676 		break;
6677 
6678 	default:
6679 		return -EINVAL;
6680 	}
6681 
6682 	/* Do not overwrite any of the map or rp information
6683 	 * until we are sure we can commit to a new buffer.
6684 	 *
6685 	 * Callers depend upon this behavior and assume that
6686 	 * we leave everything unchanged if we fail.
6687 	 */
6688 	skb_size = SKB_DATA_ALIGN(data_size + TG3_RX_OFFSET(tp)) +
6689 		   SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
6690 	if (skb_size <= PAGE_SIZE) {
6691 		data = netdev_alloc_frag(skb_size);
6692 		*frag_size = skb_size;
6693 	} else {
6694 		data = kmalloc(skb_size, GFP_ATOMIC);
6695 		*frag_size = 0;
6696 	}
6697 	if (!data)
6698 		return -ENOMEM;
6699 
6700 	mapping = pci_map_single(tp->pdev,
6701 				 data + TG3_RX_OFFSET(tp),
6702 				 data_size,
6703 				 PCI_DMA_FROMDEVICE);
6704 	if (unlikely(pci_dma_mapping_error(tp->pdev, mapping))) {
6705 		tg3_frag_free(skb_size <= PAGE_SIZE, data);
6706 		return -EIO;
6707 	}
6708 
6709 	map->data = data;
6710 	dma_unmap_addr_set(map, mapping, mapping);
6711 
6712 	desc->addr_hi = ((u64)mapping >> 32);
6713 	desc->addr_lo = ((u64)mapping & 0xffffffff);
6714 
6715 	return data_size;
6716 }
6717 
6718 /* We only need to move over in the address because the other
6719  * members of the RX descriptor are invariant.  See notes above
6720  * tg3_alloc_rx_data for full details.
6721  */
6722 static void tg3_recycle_rx(struct tg3_napi *tnapi,
6723 			   struct tg3_rx_prodring_set *dpr,
6724 			   u32 opaque_key, int src_idx,
6725 			   u32 dest_idx_unmasked)
6726 {
6727 	struct tg3 *tp = tnapi->tp;
6728 	struct tg3_rx_buffer_desc *src_desc, *dest_desc;
6729 	struct ring_info *src_map, *dest_map;
6730 	struct tg3_rx_prodring_set *spr = &tp->napi[0].prodring;
6731 	int dest_idx;
6732 
6733 	switch (opaque_key) {
6734 	case RXD_OPAQUE_RING_STD:
6735 		dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
6736 		dest_desc = &dpr->rx_std[dest_idx];
6737 		dest_map = &dpr->rx_std_buffers[dest_idx];
6738 		src_desc = &spr->rx_std[src_idx];
6739 		src_map = &spr->rx_std_buffers[src_idx];
6740 		break;
6741 
6742 	case RXD_OPAQUE_RING_JUMBO:
6743 		dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
6744 		dest_desc = &dpr->rx_jmb[dest_idx].std;
6745 		dest_map = &dpr->rx_jmb_buffers[dest_idx];
6746 		src_desc = &spr->rx_jmb[src_idx].std;
6747 		src_map = &spr->rx_jmb_buffers[src_idx];
6748 		break;
6749 
6750 	default:
6751 		return;
6752 	}
6753 
6754 	dest_map->data = src_map->data;
6755 	dma_unmap_addr_set(dest_map, mapping,
6756 			   dma_unmap_addr(src_map, mapping));
6757 	dest_desc->addr_hi = src_desc->addr_hi;
6758 	dest_desc->addr_lo = src_desc->addr_lo;
6759 
6760 	/* Ensure that the update to the skb happens after the physical
6761 	 * addresses have been transferred to the new BD location.
6762 	 */
6763 	smp_wmb();
6764 
6765 	src_map->data = NULL;
6766 }
6767 
6768 /* The RX ring scheme is composed of multiple rings which post fresh
6769  * buffers to the chip, and one special ring the chip uses to report
6770  * status back to the host.
6771  *
6772  * The special ring reports the status of received packets to the
6773  * host.  The chip does not write into the original descriptor the
6774  * RX buffer was obtained from.  The chip simply takes the original
6775  * descriptor as provided by the host, updates the status and length
6776  * field, then writes this into the next status ring entry.
6777  *
6778  * Each ring the host uses to post buffers to the chip is described
6779  * by a TG3_BDINFO entry in the chips SRAM area.  When a packet arrives,
6780  * it is first placed into the on-chip ram.  When the packet's length
6781  * is known, it walks down the TG3_BDINFO entries to select the ring.
6782  * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
6783  * which is within the range of the new packet's length is chosen.
6784  *
6785  * The "separate ring for rx status" scheme may sound queer, but it makes
6786  * sense from a cache coherency perspective.  If only the host writes
6787  * to the buffer post rings, and only the chip writes to the rx status
6788  * rings, then cache lines never move beyond shared-modified state.
6789  * If both the host and chip were to write into the same ring, cache line
6790  * eviction could occur since both entities want it in an exclusive state.
6791  */
6792 static int tg3_rx(struct tg3_napi *tnapi, int budget)
6793 {
6794 	struct tg3 *tp = tnapi->tp;
6795 	u32 work_mask, rx_std_posted = 0;
6796 	u32 std_prod_idx, jmb_prod_idx;
6797 	u32 sw_idx = tnapi->rx_rcb_ptr;
6798 	u16 hw_idx;
6799 	int received;
6800 	struct tg3_rx_prodring_set *tpr = &tnapi->prodring;
6801 
6802 	hw_idx = *(tnapi->rx_rcb_prod_idx);
6803 	/*
6804 	 * We need to order the read of hw_idx and the read of
6805 	 * the opaque cookie.
6806 	 */
6807 	rmb();
6808 	work_mask = 0;
6809 	received = 0;
6810 	std_prod_idx = tpr->rx_std_prod_idx;
6811 	jmb_prod_idx = tpr->rx_jmb_prod_idx;
6812 	while (sw_idx != hw_idx && budget > 0) {
6813 		struct ring_info *ri;
6814 		struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
6815 		unsigned int len;
6816 		struct sk_buff *skb;
6817 		dma_addr_t dma_addr;
6818 		u32 opaque_key, desc_idx, *post_ptr;
6819 		u8 *data;
6820 		u64 tstamp = 0;
6821 
6822 		desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
6823 		opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
6824 		if (opaque_key == RXD_OPAQUE_RING_STD) {
6825 			ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx];
6826 			dma_addr = dma_unmap_addr(ri, mapping);
6827 			data = ri->data;
6828 			post_ptr = &std_prod_idx;
6829 			rx_std_posted++;
6830 		} else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
6831 			ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx];
6832 			dma_addr = dma_unmap_addr(ri, mapping);
6833 			data = ri->data;
6834 			post_ptr = &jmb_prod_idx;
6835 		} else
6836 			goto next_pkt_nopost;
6837 
6838 		work_mask |= opaque_key;
6839 
6840 		if (desc->err_vlan & RXD_ERR_MASK) {
6841 		drop_it:
6842 			tg3_recycle_rx(tnapi, tpr, opaque_key,
6843 				       desc_idx, *post_ptr);
6844 		drop_it_no_recycle:
6845 			/* Other statistics kept track of by card. */
6846 			tp->rx_dropped++;
6847 			goto next_pkt;
6848 		}
6849 
6850 		prefetch(data + TG3_RX_OFFSET(tp));
6851 		len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
6852 		      ETH_FCS_LEN;
6853 
6854 		if ((desc->type_flags & RXD_FLAG_PTPSTAT_MASK) ==
6855 		     RXD_FLAG_PTPSTAT_PTPV1 ||
6856 		    (desc->type_flags & RXD_FLAG_PTPSTAT_MASK) ==
6857 		     RXD_FLAG_PTPSTAT_PTPV2) {
6858 			tstamp = tr32(TG3_RX_TSTAMP_LSB);
6859 			tstamp |= (u64)tr32(TG3_RX_TSTAMP_MSB) << 32;
6860 		}
6861 
6862 		if (len > TG3_RX_COPY_THRESH(tp)) {
6863 			int skb_size;
6864 			unsigned int frag_size;
6865 
6866 			skb_size = tg3_alloc_rx_data(tp, tpr, opaque_key,
6867 						    *post_ptr, &frag_size);
6868 			if (skb_size < 0)
6869 				goto drop_it;
6870 
6871 			pci_unmap_single(tp->pdev, dma_addr, skb_size,
6872 					 PCI_DMA_FROMDEVICE);
6873 
6874 			/* Ensure that the update to the data happens
6875 			 * after the usage of the old DMA mapping.
6876 			 */
6877 			smp_wmb();
6878 
6879 			ri->data = NULL;
6880 
6881 			skb = build_skb(data, frag_size);
6882 			if (!skb) {
6883 				tg3_frag_free(frag_size != 0, data);
6884 				goto drop_it_no_recycle;
6885 			}
6886 			skb_reserve(skb, TG3_RX_OFFSET(tp));
6887 		} else {
6888 			tg3_recycle_rx(tnapi, tpr, opaque_key,
6889 				       desc_idx, *post_ptr);
6890 
6891 			skb = netdev_alloc_skb(tp->dev,
6892 					       len + TG3_RAW_IP_ALIGN);
6893 			if (skb == NULL)
6894 				goto drop_it_no_recycle;
6895 
6896 			skb_reserve(skb, TG3_RAW_IP_ALIGN);
6897 			pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
6898 			memcpy(skb->data,
6899 			       data + TG3_RX_OFFSET(tp),
6900 			       len);
6901 			pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
6902 		}
6903 
6904 		skb_put(skb, len);
6905 		if (tstamp)
6906 			tg3_hwclock_to_timestamp(tp, tstamp,
6907 						 skb_hwtstamps(skb));
6908 
6909 		if ((tp->dev->features & NETIF_F_RXCSUM) &&
6910 		    (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
6911 		    (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
6912 		      >> RXD_TCPCSUM_SHIFT) == 0xffff))
6913 			skb->ip_summed = CHECKSUM_UNNECESSARY;
6914 		else
6915 			skb_checksum_none_assert(skb);
6916 
6917 		skb->protocol = eth_type_trans(skb, tp->dev);
6918 
6919 		if (len > (tp->dev->mtu + ETH_HLEN) &&
6920 		    skb->protocol != htons(ETH_P_8021Q) &&
6921 		    skb->protocol != htons(ETH_P_8021AD)) {
6922 			dev_kfree_skb_any(skb);
6923 			goto drop_it_no_recycle;
6924 		}
6925 
6926 		if (desc->type_flags & RXD_FLAG_VLAN &&
6927 		    !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG))
6928 			__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
6929 					       desc->err_vlan & RXD_VLAN_MASK);
6930 
6931 		napi_gro_receive(&tnapi->napi, skb);
6932 
6933 		received++;
6934 		budget--;
6935 
6936 next_pkt:
6937 		(*post_ptr)++;
6938 
6939 		if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
6940 			tpr->rx_std_prod_idx = std_prod_idx &
6941 					       tp->rx_std_ring_mask;
6942 			tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6943 				     tpr->rx_std_prod_idx);
6944 			work_mask &= ~RXD_OPAQUE_RING_STD;
6945 			rx_std_posted = 0;
6946 		}
6947 next_pkt_nopost:
6948 		sw_idx++;
6949 		sw_idx &= tp->rx_ret_ring_mask;
6950 
6951 		/* Refresh hw_idx to see if there is new work */
6952 		if (sw_idx == hw_idx) {
6953 			hw_idx = *(tnapi->rx_rcb_prod_idx);
6954 			rmb();
6955 		}
6956 	}
6957 
6958 	/* ACK the status ring. */
6959 	tnapi->rx_rcb_ptr = sw_idx;
6960 	tw32_rx_mbox(tnapi->consmbox, sw_idx);
6961 
6962 	/* Refill RX ring(s). */
6963 	if (!tg3_flag(tp, ENABLE_RSS)) {
6964 		/* Sync BD data before updating mailbox */
6965 		wmb();
6966 
6967 		if (work_mask & RXD_OPAQUE_RING_STD) {
6968 			tpr->rx_std_prod_idx = std_prod_idx &
6969 					       tp->rx_std_ring_mask;
6970 			tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6971 				     tpr->rx_std_prod_idx);
6972 		}
6973 		if (work_mask & RXD_OPAQUE_RING_JUMBO) {
6974 			tpr->rx_jmb_prod_idx = jmb_prod_idx &
6975 					       tp->rx_jmb_ring_mask;
6976 			tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
6977 				     tpr->rx_jmb_prod_idx);
6978 		}
6979 		mmiowb();
6980 	} else if (work_mask) {
6981 		/* rx_std_buffers[] and rx_jmb_buffers[] entries must be
6982 		 * updated before the producer indices can be updated.
6983 		 */
6984 		smp_wmb();
6985 
6986 		tpr->rx_std_prod_idx = std_prod_idx & tp->rx_std_ring_mask;
6987 		tpr->rx_jmb_prod_idx = jmb_prod_idx & tp->rx_jmb_ring_mask;
6988 
6989 		if (tnapi != &tp->napi[1]) {
6990 			tp->rx_refill = true;
6991 			napi_schedule(&tp->napi[1].napi);
6992 		}
6993 	}
6994 
6995 	return received;
6996 }
6997 
6998 static void tg3_poll_link(struct tg3 *tp)
6999 {
7000 	/* handle link change and other phy events */
7001 	if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
7002 		struct tg3_hw_status *sblk = tp->napi[0].hw_status;
7003 
7004 		if (sblk->status & SD_STATUS_LINK_CHG) {
7005 			sblk->status = SD_STATUS_UPDATED |
7006 				       (sblk->status & ~SD_STATUS_LINK_CHG);
7007 			spin_lock(&tp->lock);
7008 			if (tg3_flag(tp, USE_PHYLIB)) {
7009 				tw32_f(MAC_STATUS,
7010 				     (MAC_STATUS_SYNC_CHANGED |
7011 				      MAC_STATUS_CFG_CHANGED |
7012 				      MAC_STATUS_MI_COMPLETION |
7013 				      MAC_STATUS_LNKSTATE_CHANGED));
7014 				udelay(40);
7015 			} else
7016 				tg3_setup_phy(tp, false);
7017 			spin_unlock(&tp->lock);
7018 		}
7019 	}
7020 }
7021 
7022 static int tg3_rx_prodring_xfer(struct tg3 *tp,
7023 				struct tg3_rx_prodring_set *dpr,
7024 				struct tg3_rx_prodring_set *spr)
7025 {
7026 	u32 si, di, cpycnt, src_prod_idx;
7027 	int i, err = 0;
7028 
7029 	while (1) {
7030 		src_prod_idx = spr->rx_std_prod_idx;
7031 
7032 		/* Make sure updates to the rx_std_buffers[] entries and the
7033 		 * standard producer index are seen in the correct order.
7034 		 */
7035 		smp_rmb();
7036 
7037 		if (spr->rx_std_cons_idx == src_prod_idx)
7038 			break;
7039 
7040 		if (spr->rx_std_cons_idx < src_prod_idx)
7041 			cpycnt = src_prod_idx - spr->rx_std_cons_idx;
7042 		else
7043 			cpycnt = tp->rx_std_ring_mask + 1 -
7044 				 spr->rx_std_cons_idx;
7045 
7046 		cpycnt = min(cpycnt,
7047 			     tp->rx_std_ring_mask + 1 - dpr->rx_std_prod_idx);
7048 
7049 		si = spr->rx_std_cons_idx;
7050 		di = dpr->rx_std_prod_idx;
7051 
7052 		for (i = di; i < di + cpycnt; i++) {
7053 			if (dpr->rx_std_buffers[i].data) {
7054 				cpycnt = i - di;
7055 				err = -ENOSPC;
7056 				break;
7057 			}
7058 		}
7059 
7060 		if (!cpycnt)
7061 			break;
7062 
7063 		/* Ensure that updates to the rx_std_buffers ring and the
7064 		 * shadowed hardware producer ring from tg3_recycle_skb() are
7065 		 * ordered correctly WRT the skb check above.
7066 		 */
7067 		smp_rmb();
7068 
7069 		memcpy(&dpr->rx_std_buffers[di],
7070 		       &spr->rx_std_buffers[si],
7071 		       cpycnt * sizeof(struct ring_info));
7072 
7073 		for (i = 0; i < cpycnt; i++, di++, si++) {
7074 			struct tg3_rx_buffer_desc *sbd, *dbd;
7075 			sbd = &spr->rx_std[si];
7076 			dbd = &dpr->rx_std[di];
7077 			dbd->addr_hi = sbd->addr_hi;
7078 			dbd->addr_lo = sbd->addr_lo;
7079 		}
7080 
7081 		spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) &
7082 				       tp->rx_std_ring_mask;
7083 		dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) &
7084 				       tp->rx_std_ring_mask;
7085 	}
7086 
7087 	while (1) {
7088 		src_prod_idx = spr->rx_jmb_prod_idx;
7089 
7090 		/* Make sure updates to the rx_jmb_buffers[] entries and
7091 		 * the jumbo producer index are seen in the correct order.
7092 		 */
7093 		smp_rmb();
7094 
7095 		if (spr->rx_jmb_cons_idx == src_prod_idx)
7096 			break;
7097 
7098 		if (spr->rx_jmb_cons_idx < src_prod_idx)
7099 			cpycnt = src_prod_idx - spr->rx_jmb_cons_idx;
7100 		else
7101 			cpycnt = tp->rx_jmb_ring_mask + 1 -
7102 				 spr->rx_jmb_cons_idx;
7103 
7104 		cpycnt = min(cpycnt,
7105 			     tp->rx_jmb_ring_mask + 1 - dpr->rx_jmb_prod_idx);
7106 
7107 		si = spr->rx_jmb_cons_idx;
7108 		di = dpr->rx_jmb_prod_idx;
7109 
7110 		for (i = di; i < di + cpycnt; i++) {
7111 			if (dpr->rx_jmb_buffers[i].data) {
7112 				cpycnt = i - di;
7113 				err = -ENOSPC;
7114 				break;
7115 			}
7116 		}
7117 
7118 		if (!cpycnt)
7119 			break;
7120 
7121 		/* Ensure that updates to the rx_jmb_buffers ring and the
7122 		 * shadowed hardware producer ring from tg3_recycle_skb() are
7123 		 * ordered correctly WRT the skb check above.
7124 		 */
7125 		smp_rmb();
7126 
7127 		memcpy(&dpr->rx_jmb_buffers[di],
7128 		       &spr->rx_jmb_buffers[si],
7129 		       cpycnt * sizeof(struct ring_info));
7130 
7131 		for (i = 0; i < cpycnt; i++, di++, si++) {
7132 			struct tg3_rx_buffer_desc *sbd, *dbd;
7133 			sbd = &spr->rx_jmb[si].std;
7134 			dbd = &dpr->rx_jmb[di].std;
7135 			dbd->addr_hi = sbd->addr_hi;
7136 			dbd->addr_lo = sbd->addr_lo;
7137 		}
7138 
7139 		spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) &
7140 				       tp->rx_jmb_ring_mask;
7141 		dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) &
7142 				       tp->rx_jmb_ring_mask;
7143 	}
7144 
7145 	return err;
7146 }
7147 
7148 static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
7149 {
7150 	struct tg3 *tp = tnapi->tp;
7151 
7152 	/* run TX completion thread */
7153 	if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
7154 		tg3_tx(tnapi);
7155 		if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7156 			return work_done;
7157 	}
7158 
7159 	if (!tnapi->rx_rcb_prod_idx)
7160 		return work_done;
7161 
7162 	/* run RX thread, within the bounds set by NAPI.
7163 	 * All RX "locking" is done by ensuring outside
7164 	 * code synchronizes with tg3->napi.poll()
7165 	 */
7166 	if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
7167 		work_done += tg3_rx(tnapi, budget - work_done);
7168 
7169 	if (tg3_flag(tp, ENABLE_RSS) && tnapi == &tp->napi[1]) {
7170 		struct tg3_rx_prodring_set *dpr = &tp->napi[0].prodring;
7171 		int i, err = 0;
7172 		u32 std_prod_idx = dpr->rx_std_prod_idx;
7173 		u32 jmb_prod_idx = dpr->rx_jmb_prod_idx;
7174 
7175 		tp->rx_refill = false;
7176 		for (i = 1; i <= tp->rxq_cnt; i++)
7177 			err |= tg3_rx_prodring_xfer(tp, dpr,
7178 						    &tp->napi[i].prodring);
7179 
7180 		wmb();
7181 
7182 		if (std_prod_idx != dpr->rx_std_prod_idx)
7183 			tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
7184 				     dpr->rx_std_prod_idx);
7185 
7186 		if (jmb_prod_idx != dpr->rx_jmb_prod_idx)
7187 			tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
7188 				     dpr->rx_jmb_prod_idx);
7189 
7190 		mmiowb();
7191 
7192 		if (err)
7193 			tw32_f(HOSTCC_MODE, tp->coal_now);
7194 	}
7195 
7196 	return work_done;
7197 }
7198 
7199 static inline void tg3_reset_task_schedule(struct tg3 *tp)
7200 {
7201 	if (!test_and_set_bit(TG3_FLAG_RESET_TASK_PENDING, tp->tg3_flags))
7202 		schedule_work(&tp->reset_task);
7203 }
7204 
7205 static inline void tg3_reset_task_cancel(struct tg3 *tp)
7206 {
7207 	cancel_work_sync(&tp->reset_task);
7208 	tg3_flag_clear(tp, RESET_TASK_PENDING);
7209 	tg3_flag_clear(tp, TX_RECOVERY_PENDING);
7210 }
7211 
7212 static int tg3_poll_msix(struct napi_struct *napi, int budget)
7213 {
7214 	struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
7215 	struct tg3 *tp = tnapi->tp;
7216 	int work_done = 0;
7217 	struct tg3_hw_status *sblk = tnapi->hw_status;
7218 
7219 	while (1) {
7220 		work_done = tg3_poll_work(tnapi, work_done, budget);
7221 
7222 		if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7223 			goto tx_recovery;
7224 
7225 		if (unlikely(work_done >= budget))
7226 			break;
7227 
7228 		/* tp->last_tag is used in tg3_int_reenable() below
7229 		 * to tell the hw how much work has been processed,
7230 		 * so we must read it before checking for more work.
7231 		 */
7232 		tnapi->last_tag = sblk->status_tag;
7233 		tnapi->last_irq_tag = tnapi->last_tag;
7234 		rmb();
7235 
7236 		/* check for RX/TX work to do */
7237 		if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons &&
7238 			   *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) {
7239 
7240 			/* This test here is not race free, but will reduce
7241 			 * the number of interrupts by looping again.
7242 			 */
7243 			if (tnapi == &tp->napi[1] && tp->rx_refill)
7244 				continue;
7245 
7246 			napi_complete_done(napi, work_done);
7247 			/* Reenable interrupts. */
7248 			tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
7249 
7250 			/* This test here is synchronized by napi_schedule()
7251 			 * and napi_complete() to close the race condition.
7252 			 */
7253 			if (unlikely(tnapi == &tp->napi[1] && tp->rx_refill)) {
7254 				tw32(HOSTCC_MODE, tp->coalesce_mode |
7255 						  HOSTCC_MODE_ENABLE |
7256 						  tnapi->coal_now);
7257 			}
7258 			mmiowb();
7259 			break;
7260 		}
7261 	}
7262 
7263 	return work_done;
7264 
7265 tx_recovery:
7266 	/* work_done is guaranteed to be less than budget. */
7267 	napi_complete(napi);
7268 	tg3_reset_task_schedule(tp);
7269 	return work_done;
7270 }
7271 
7272 static void tg3_process_error(struct tg3 *tp)
7273 {
7274 	u32 val;
7275 	bool real_error = false;
7276 
7277 	if (tg3_flag(tp, ERROR_PROCESSED))
7278 		return;
7279 
7280 	/* Check Flow Attention register */
7281 	val = tr32(HOSTCC_FLOW_ATTN);
7282 	if (val & ~HOSTCC_FLOW_ATTN_MBUF_LWM) {
7283 		netdev_err(tp->dev, "FLOW Attention error.  Resetting chip.\n");
7284 		real_error = true;
7285 	}
7286 
7287 	if (tr32(MSGINT_STATUS) & ~MSGINT_STATUS_MSI_REQ) {
7288 		netdev_err(tp->dev, "MSI Status error.  Resetting chip.\n");
7289 		real_error = true;
7290 	}
7291 
7292 	if (tr32(RDMAC_STATUS) || tr32(WDMAC_STATUS)) {
7293 		netdev_err(tp->dev, "DMA Status error.  Resetting chip.\n");
7294 		real_error = true;
7295 	}
7296 
7297 	if (!real_error)
7298 		return;
7299 
7300 	tg3_dump_state(tp);
7301 
7302 	tg3_flag_set(tp, ERROR_PROCESSED);
7303 	tg3_reset_task_schedule(tp);
7304 }
7305 
7306 static int tg3_poll(struct napi_struct *napi, int budget)
7307 {
7308 	struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
7309 	struct tg3 *tp = tnapi->tp;
7310 	int work_done = 0;
7311 	struct tg3_hw_status *sblk = tnapi->hw_status;
7312 
7313 	while (1) {
7314 		if (sblk->status & SD_STATUS_ERROR)
7315 			tg3_process_error(tp);
7316 
7317 		tg3_poll_link(tp);
7318 
7319 		work_done = tg3_poll_work(tnapi, work_done, budget);
7320 
7321 		if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7322 			goto tx_recovery;
7323 
7324 		if (unlikely(work_done >= budget))
7325 			break;
7326 
7327 		if (tg3_flag(tp, TAGGED_STATUS)) {
7328 			/* tp->last_tag is used in tg3_int_reenable() below
7329 			 * to tell the hw how much work has been processed,
7330 			 * so we must read it before checking for more work.
7331 			 */
7332 			tnapi->last_tag = sblk->status_tag;
7333 			tnapi->last_irq_tag = tnapi->last_tag;
7334 			rmb();
7335 		} else
7336 			sblk->status &= ~SD_STATUS_UPDATED;
7337 
7338 		if (likely(!tg3_has_work(tnapi))) {
7339 			napi_complete_done(napi, work_done);
7340 			tg3_int_reenable(tnapi);
7341 			break;
7342 		}
7343 	}
7344 
7345 	return work_done;
7346 
7347 tx_recovery:
7348 	/* work_done is guaranteed to be less than budget. */
7349 	napi_complete(napi);
7350 	tg3_reset_task_schedule(tp);
7351 	return work_done;
7352 }
7353 
7354 static void tg3_napi_disable(struct tg3 *tp)
7355 {
7356 	int i;
7357 
7358 	for (i = tp->irq_cnt - 1; i >= 0; i--)
7359 		napi_disable(&tp->napi[i].napi);
7360 }
7361 
7362 static void tg3_napi_enable(struct tg3 *tp)
7363 {
7364 	int i;
7365 
7366 	for (i = 0; i < tp->irq_cnt; i++)
7367 		napi_enable(&tp->napi[i].napi);
7368 }
7369 
7370 static void tg3_napi_init(struct tg3 *tp)
7371 {
7372 	int i;
7373 
7374 	netif_napi_add(tp->dev, &tp->napi[0].napi, tg3_poll, 64);
7375 	for (i = 1; i < tp->irq_cnt; i++)
7376 		netif_napi_add(tp->dev, &tp->napi[i].napi, tg3_poll_msix, 64);
7377 }
7378 
7379 static void tg3_napi_fini(struct tg3 *tp)
7380 {
7381 	int i;
7382 
7383 	for (i = 0; i < tp->irq_cnt; i++)
7384 		netif_napi_del(&tp->napi[i].napi);
7385 }
7386 
7387 static inline void tg3_netif_stop(struct tg3 *tp)
7388 {
7389 	netif_trans_update(tp->dev);	/* prevent tx timeout */
7390 	tg3_napi_disable(tp);
7391 	netif_carrier_off(tp->dev);
7392 	netif_tx_disable(tp->dev);
7393 }
7394 
7395 /* tp->lock must be held */
7396 static inline void tg3_netif_start(struct tg3 *tp)
7397 {
7398 	tg3_ptp_resume(tp);
7399 
7400 	/* NOTE: unconditional netif_tx_wake_all_queues is only
7401 	 * appropriate so long as all callers are assured to
7402 	 * have free tx slots (such as after tg3_init_hw)
7403 	 */
7404 	netif_tx_wake_all_queues(tp->dev);
7405 
7406 	if (tp->link_up)
7407 		netif_carrier_on(tp->dev);
7408 
7409 	tg3_napi_enable(tp);
7410 	tp->napi[0].hw_status->status |= SD_STATUS_UPDATED;
7411 	tg3_enable_ints(tp);
7412 }
7413 
7414 static void tg3_irq_quiesce(struct tg3 *tp)
7415 	__releases(tp->lock)
7416 	__acquires(tp->lock)
7417 {
7418 	int i;
7419 
7420 	BUG_ON(tp->irq_sync);
7421 
7422 	tp->irq_sync = 1;
7423 	smp_mb();
7424 
7425 	spin_unlock_bh(&tp->lock);
7426 
7427 	for (i = 0; i < tp->irq_cnt; i++)
7428 		synchronize_irq(tp->napi[i].irq_vec);
7429 
7430 	spin_lock_bh(&tp->lock);
7431 }
7432 
7433 /* Fully shutdown all tg3 driver activity elsewhere in the system.
7434  * If irq_sync is non-zero, then the IRQ handler must be synchronized
7435  * with as well.  Most of the time, this is not necessary except when
7436  * shutting down the device.
7437  */
7438 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
7439 {
7440 	spin_lock_bh(&tp->lock);
7441 	if (irq_sync)
7442 		tg3_irq_quiesce(tp);
7443 }
7444 
7445 static inline void tg3_full_unlock(struct tg3 *tp)
7446 {
7447 	spin_unlock_bh(&tp->lock);
7448 }
7449 
7450 /* One-shot MSI handler - Chip automatically disables interrupt
7451  * after sending MSI so driver doesn't have to do it.
7452  */
7453 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
7454 {
7455 	struct tg3_napi *tnapi = dev_id;
7456 	struct tg3 *tp = tnapi->tp;
7457 
7458 	prefetch(tnapi->hw_status);
7459 	if (tnapi->rx_rcb)
7460 		prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7461 
7462 	if (likely(!tg3_irq_sync(tp)))
7463 		napi_schedule(&tnapi->napi);
7464 
7465 	return IRQ_HANDLED;
7466 }
7467 
7468 /* MSI ISR - No need to check for interrupt sharing and no need to
7469  * flush status block and interrupt mailbox. PCI ordering rules
7470  * guarantee that MSI will arrive after the status block.
7471  */
7472 static irqreturn_t tg3_msi(int irq, void *dev_id)
7473 {
7474 	struct tg3_napi *tnapi = dev_id;
7475 	struct tg3 *tp = tnapi->tp;
7476 
7477 	prefetch(tnapi->hw_status);
7478 	if (tnapi->rx_rcb)
7479 		prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7480 	/*
7481 	 * Writing any value to intr-mbox-0 clears PCI INTA# and
7482 	 * chip-internal interrupt pending events.
7483 	 * Writing non-zero to intr-mbox-0 additional tells the
7484 	 * NIC to stop sending us irqs, engaging "in-intr-handler"
7485 	 * event coalescing.
7486 	 */
7487 	tw32_mailbox(tnapi->int_mbox, 0x00000001);
7488 	if (likely(!tg3_irq_sync(tp)))
7489 		napi_schedule(&tnapi->napi);
7490 
7491 	return IRQ_RETVAL(1);
7492 }
7493 
7494 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
7495 {
7496 	struct tg3_napi *tnapi = dev_id;
7497 	struct tg3 *tp = tnapi->tp;
7498 	struct tg3_hw_status *sblk = tnapi->hw_status;
7499 	unsigned int handled = 1;
7500 
7501 	/* In INTx mode, it is possible for the interrupt to arrive at
7502 	 * the CPU before the status block posted prior to the interrupt.
7503 	 * Reading the PCI State register will confirm whether the
7504 	 * interrupt is ours and will flush the status block.
7505 	 */
7506 	if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
7507 		if (tg3_flag(tp, CHIP_RESETTING) ||
7508 		    (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7509 			handled = 0;
7510 			goto out;
7511 		}
7512 	}
7513 
7514 	/*
7515 	 * Writing any value to intr-mbox-0 clears PCI INTA# and
7516 	 * chip-internal interrupt pending events.
7517 	 * Writing non-zero to intr-mbox-0 additional tells the
7518 	 * NIC to stop sending us irqs, engaging "in-intr-handler"
7519 	 * event coalescing.
7520 	 *
7521 	 * Flush the mailbox to de-assert the IRQ immediately to prevent
7522 	 * spurious interrupts.  The flush impacts performance but
7523 	 * excessive spurious interrupts can be worse in some cases.
7524 	 */
7525 	tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
7526 	if (tg3_irq_sync(tp))
7527 		goto out;
7528 	sblk->status &= ~SD_STATUS_UPDATED;
7529 	if (likely(tg3_has_work(tnapi))) {
7530 		prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7531 		napi_schedule(&tnapi->napi);
7532 	} else {
7533 		/* No work, shared interrupt perhaps?  re-enable
7534 		 * interrupts, and flush that PCI write
7535 		 */
7536 		tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
7537 			       0x00000000);
7538 	}
7539 out:
7540 	return IRQ_RETVAL(handled);
7541 }
7542 
7543 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
7544 {
7545 	struct tg3_napi *tnapi = dev_id;
7546 	struct tg3 *tp = tnapi->tp;
7547 	struct tg3_hw_status *sblk = tnapi->hw_status;
7548 	unsigned int handled = 1;
7549 
7550 	/* In INTx mode, it is possible for the interrupt to arrive at
7551 	 * the CPU before the status block posted prior to the interrupt.
7552 	 * Reading the PCI State register will confirm whether the
7553 	 * interrupt is ours and will flush the status block.
7554 	 */
7555 	if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) {
7556 		if (tg3_flag(tp, CHIP_RESETTING) ||
7557 		    (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7558 			handled = 0;
7559 			goto out;
7560 		}
7561 	}
7562 
7563 	/*
7564 	 * writing any value to intr-mbox-0 clears PCI INTA# and
7565 	 * chip-internal interrupt pending events.
7566 	 * writing non-zero to intr-mbox-0 additional tells the
7567 	 * NIC to stop sending us irqs, engaging "in-intr-handler"
7568 	 * event coalescing.
7569 	 *
7570 	 * Flush the mailbox to de-assert the IRQ immediately to prevent
7571 	 * spurious interrupts.  The flush impacts performance but
7572 	 * excessive spurious interrupts can be worse in some cases.
7573 	 */
7574 	tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
7575 
7576 	/*
7577 	 * In a shared interrupt configuration, sometimes other devices'
7578 	 * interrupts will scream.  We record the current status tag here
7579 	 * so that the above check can report that the screaming interrupts
7580 	 * are unhandled.  Eventually they will be silenced.
7581 	 */
7582 	tnapi->last_irq_tag = sblk->status_tag;
7583 
7584 	if (tg3_irq_sync(tp))
7585 		goto out;
7586 
7587 	prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7588 
7589 	napi_schedule(&tnapi->napi);
7590 
7591 out:
7592 	return IRQ_RETVAL(handled);
7593 }
7594 
7595 /* ISR for interrupt test */
7596 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
7597 {
7598 	struct tg3_napi *tnapi = dev_id;
7599 	struct tg3 *tp = tnapi->tp;
7600 	struct tg3_hw_status *sblk = tnapi->hw_status;
7601 
7602 	if ((sblk->status & SD_STATUS_UPDATED) ||
7603 	    !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7604 		tg3_disable_ints(tp);
7605 		return IRQ_RETVAL(1);
7606 	}
7607 	return IRQ_RETVAL(0);
7608 }
7609 
7610 #ifdef CONFIG_NET_POLL_CONTROLLER
7611 static void tg3_poll_controller(struct net_device *dev)
7612 {
7613 	int i;
7614 	struct tg3 *tp = netdev_priv(dev);
7615 
7616 	if (tg3_irq_sync(tp))
7617 		return;
7618 
7619 	for (i = 0; i < tp->irq_cnt; i++)
7620 		tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]);
7621 }
7622 #endif
7623 
7624 static void tg3_tx_timeout(struct net_device *dev)
7625 {
7626 	struct tg3 *tp = netdev_priv(dev);
7627 
7628 	if (netif_msg_tx_err(tp)) {
7629 		netdev_err(dev, "transmit timed out, resetting\n");
7630 		tg3_dump_state(tp);
7631 	}
7632 
7633 	tg3_reset_task_schedule(tp);
7634 }
7635 
7636 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
7637 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
7638 {
7639 	u32 base = (u32) mapping & 0xffffffff;
7640 
7641 	return base + len + 8 < base;
7642 }
7643 
7644 /* Test for TSO DMA buffers that cross into regions which are within MSS bytes
7645  * of any 4GB boundaries: 4G, 8G, etc
7646  */
7647 static inline int tg3_4g_tso_overflow_test(struct tg3 *tp, dma_addr_t mapping,
7648 					   u32 len, u32 mss)
7649 {
7650 	if (tg3_asic_rev(tp) == ASIC_REV_5762 && mss) {
7651 		u32 base = (u32) mapping & 0xffffffff;
7652 
7653 		return ((base + len + (mss & 0x3fff)) < base);
7654 	}
7655 	return 0;
7656 }
7657 
7658 /* Test for DMA addresses > 40-bit */
7659 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
7660 					  int len)
7661 {
7662 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
7663 	if (tg3_flag(tp, 40BIT_DMA_BUG))
7664 		return ((u64) mapping + len) > DMA_BIT_MASK(40);
7665 	return 0;
7666 #else
7667 	return 0;
7668 #endif
7669 }
7670 
7671 static inline void tg3_tx_set_bd(struct tg3_tx_buffer_desc *txbd,
7672 				 dma_addr_t mapping, u32 len, u32 flags,
7673 				 u32 mss, u32 vlan)
7674 {
7675 	txbd->addr_hi = ((u64) mapping >> 32);
7676 	txbd->addr_lo = ((u64) mapping & 0xffffffff);
7677 	txbd->len_flags = (len << TXD_LEN_SHIFT) | (flags & 0x0000ffff);
7678 	txbd->vlan_tag = (mss << TXD_MSS_SHIFT) | (vlan << TXD_VLAN_TAG_SHIFT);
7679 }
7680 
7681 static bool tg3_tx_frag_set(struct tg3_napi *tnapi, u32 *entry, u32 *budget,
7682 			    dma_addr_t map, u32 len, u32 flags,
7683 			    u32 mss, u32 vlan)
7684 {
7685 	struct tg3 *tp = tnapi->tp;
7686 	bool hwbug = false;
7687 
7688 	if (tg3_flag(tp, SHORT_DMA_BUG) && len <= 8)
7689 		hwbug = true;
7690 
7691 	if (tg3_4g_overflow_test(map, len))
7692 		hwbug = true;
7693 
7694 	if (tg3_4g_tso_overflow_test(tp, map, len, mss))
7695 		hwbug = true;
7696 
7697 	if (tg3_40bit_overflow_test(tp, map, len))
7698 		hwbug = true;
7699 
7700 	if (tp->dma_limit) {
7701 		u32 prvidx = *entry;
7702 		u32 tmp_flag = flags & ~TXD_FLAG_END;
7703 		while (len > tp->dma_limit && *budget) {
7704 			u32 frag_len = tp->dma_limit;
7705 			len -= tp->dma_limit;
7706 
7707 			/* Avoid the 8byte DMA problem */
7708 			if (len <= 8) {
7709 				len += tp->dma_limit / 2;
7710 				frag_len = tp->dma_limit / 2;
7711 			}
7712 
7713 			tnapi->tx_buffers[*entry].fragmented = true;
7714 
7715 			tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7716 				      frag_len, tmp_flag, mss, vlan);
7717 			*budget -= 1;
7718 			prvidx = *entry;
7719 			*entry = NEXT_TX(*entry);
7720 
7721 			map += frag_len;
7722 		}
7723 
7724 		if (len) {
7725 			if (*budget) {
7726 				tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7727 					      len, flags, mss, vlan);
7728 				*budget -= 1;
7729 				*entry = NEXT_TX(*entry);
7730 			} else {
7731 				hwbug = true;
7732 				tnapi->tx_buffers[prvidx].fragmented = false;
7733 			}
7734 		}
7735 	} else {
7736 		tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7737 			      len, flags, mss, vlan);
7738 		*entry = NEXT_TX(*entry);
7739 	}
7740 
7741 	return hwbug;
7742 }
7743 
7744 static void tg3_tx_skb_unmap(struct tg3_napi *tnapi, u32 entry, int last)
7745 {
7746 	int i;
7747 	struct sk_buff *skb;
7748 	struct tg3_tx_ring_info *txb = &tnapi->tx_buffers[entry];
7749 
7750 	skb = txb->skb;
7751 	txb->skb = NULL;
7752 
7753 	pci_unmap_single(tnapi->tp->pdev,
7754 			 dma_unmap_addr(txb, mapping),
7755 			 skb_headlen(skb),
7756 			 PCI_DMA_TODEVICE);
7757 
7758 	while (txb->fragmented) {
7759 		txb->fragmented = false;
7760 		entry = NEXT_TX(entry);
7761 		txb = &tnapi->tx_buffers[entry];
7762 	}
7763 
7764 	for (i = 0; i <= last; i++) {
7765 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
7766 
7767 		entry = NEXT_TX(entry);
7768 		txb = &tnapi->tx_buffers[entry];
7769 
7770 		pci_unmap_page(tnapi->tp->pdev,
7771 			       dma_unmap_addr(txb, mapping),
7772 			       skb_frag_size(frag), PCI_DMA_TODEVICE);
7773 
7774 		while (txb->fragmented) {
7775 			txb->fragmented = false;
7776 			entry = NEXT_TX(entry);
7777 			txb = &tnapi->tx_buffers[entry];
7778 		}
7779 	}
7780 }
7781 
7782 /* Workaround 4GB and 40-bit hardware DMA bugs. */
7783 static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
7784 				       struct sk_buff **pskb,
7785 				       u32 *entry, u32 *budget,
7786 				       u32 base_flags, u32 mss, u32 vlan)
7787 {
7788 	struct tg3 *tp = tnapi->tp;
7789 	struct sk_buff *new_skb, *skb = *pskb;
7790 	dma_addr_t new_addr = 0;
7791 	int ret = 0;
7792 
7793 	if (tg3_asic_rev(tp) != ASIC_REV_5701)
7794 		new_skb = skb_copy(skb, GFP_ATOMIC);
7795 	else {
7796 		int more_headroom = 4 - ((unsigned long)skb->data & 3);
7797 
7798 		new_skb = skb_copy_expand(skb,
7799 					  skb_headroom(skb) + more_headroom,
7800 					  skb_tailroom(skb), GFP_ATOMIC);
7801 	}
7802 
7803 	if (!new_skb) {
7804 		ret = -1;
7805 	} else {
7806 		/* New SKB is guaranteed to be linear. */
7807 		new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
7808 					  PCI_DMA_TODEVICE);
7809 		/* Make sure the mapping succeeded */
7810 		if (pci_dma_mapping_error(tp->pdev, new_addr)) {
7811 			dev_kfree_skb_any(new_skb);
7812 			ret = -1;
7813 		} else {
7814 			u32 save_entry = *entry;
7815 
7816 			base_flags |= TXD_FLAG_END;
7817 
7818 			tnapi->tx_buffers[*entry].skb = new_skb;
7819 			dma_unmap_addr_set(&tnapi->tx_buffers[*entry],
7820 					   mapping, new_addr);
7821 
7822 			if (tg3_tx_frag_set(tnapi, entry, budget, new_addr,
7823 					    new_skb->len, base_flags,
7824 					    mss, vlan)) {
7825 				tg3_tx_skb_unmap(tnapi, save_entry, -1);
7826 				dev_kfree_skb_any(new_skb);
7827 				ret = -1;
7828 			}
7829 		}
7830 	}
7831 
7832 	dev_kfree_skb_any(skb);
7833 	*pskb = new_skb;
7834 	return ret;
7835 }
7836 
7837 static bool tg3_tso_bug_gso_check(struct tg3_napi *tnapi, struct sk_buff *skb)
7838 {
7839 	/* Check if we will never have enough descriptors,
7840 	 * as gso_segs can be more than current ring size
7841 	 */
7842 	return skb_shinfo(skb)->gso_segs < tnapi->tx_pending / 3;
7843 }
7844 
7845 static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *);
7846 
7847 /* Use GSO to workaround all TSO packets that meet HW bug conditions
7848  * indicated in tg3_tx_frag_set()
7849  */
7850 static int tg3_tso_bug(struct tg3 *tp, struct tg3_napi *tnapi,
7851 		       struct netdev_queue *txq, struct sk_buff *skb)
7852 {
7853 	struct sk_buff *segs, *nskb;
7854 	u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3;
7855 
7856 	/* Estimate the number of fragments in the worst case */
7857 	if (unlikely(tg3_tx_avail(tnapi) <= frag_cnt_est)) {
7858 		netif_tx_stop_queue(txq);
7859 
7860 		/* netif_tx_stop_queue() must be done before checking
7861 		 * checking tx index in tg3_tx_avail() below, because in
7862 		 * tg3_tx(), we update tx index before checking for
7863 		 * netif_tx_queue_stopped().
7864 		 */
7865 		smp_mb();
7866 		if (tg3_tx_avail(tnapi) <= frag_cnt_est)
7867 			return NETDEV_TX_BUSY;
7868 
7869 		netif_tx_wake_queue(txq);
7870 	}
7871 
7872 	segs = skb_gso_segment(skb, tp->dev->features &
7873 				    ~(NETIF_F_TSO | NETIF_F_TSO6));
7874 	if (IS_ERR(segs) || !segs)
7875 		goto tg3_tso_bug_end;
7876 
7877 	do {
7878 		nskb = segs;
7879 		segs = segs->next;
7880 		nskb->next = NULL;
7881 		tg3_start_xmit(nskb, tp->dev);
7882 	} while (segs);
7883 
7884 tg3_tso_bug_end:
7885 	dev_kfree_skb_any(skb);
7886 
7887 	return NETDEV_TX_OK;
7888 }
7889 
7890 /* hard_start_xmit for all devices */
7891 static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
7892 {
7893 	struct tg3 *tp = netdev_priv(dev);
7894 	u32 len, entry, base_flags, mss, vlan = 0;
7895 	u32 budget;
7896 	int i = -1, would_hit_hwbug;
7897 	dma_addr_t mapping;
7898 	struct tg3_napi *tnapi;
7899 	struct netdev_queue *txq;
7900 	unsigned int last;
7901 	struct iphdr *iph = NULL;
7902 	struct tcphdr *tcph = NULL;
7903 	__sum16 tcp_csum = 0, ip_csum = 0;
7904 	__be16 ip_tot_len = 0;
7905 
7906 	txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
7907 	tnapi = &tp->napi[skb_get_queue_mapping(skb)];
7908 	if (tg3_flag(tp, ENABLE_TSS))
7909 		tnapi++;
7910 
7911 	budget = tg3_tx_avail(tnapi);
7912 
7913 	/* We are running in BH disabled context with netif_tx_lock
7914 	 * and TX reclaim runs via tp->napi.poll inside of a software
7915 	 * interrupt.  Furthermore, IRQ processing runs lockless so we have
7916 	 * no IRQ context deadlocks to worry about either.  Rejoice!
7917 	 */
7918 	if (unlikely(budget <= (skb_shinfo(skb)->nr_frags + 1))) {
7919 		if (!netif_tx_queue_stopped(txq)) {
7920 			netif_tx_stop_queue(txq);
7921 
7922 			/* This is a hard error, log it. */
7923 			netdev_err(dev,
7924 				   "BUG! Tx Ring full when queue awake!\n");
7925 		}
7926 		return NETDEV_TX_BUSY;
7927 	}
7928 
7929 	entry = tnapi->tx_prod;
7930 	base_flags = 0;
7931 
7932 	mss = skb_shinfo(skb)->gso_size;
7933 	if (mss) {
7934 		u32 tcp_opt_len, hdr_len;
7935 
7936 		if (skb_cow_head(skb, 0))
7937 			goto drop;
7938 
7939 		iph = ip_hdr(skb);
7940 		tcp_opt_len = tcp_optlen(skb);
7941 
7942 		hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb) - ETH_HLEN;
7943 
7944 		/* HW/FW can not correctly segment packets that have been
7945 		 * vlan encapsulated.
7946 		 */
7947 		if (skb->protocol == htons(ETH_P_8021Q) ||
7948 		    skb->protocol == htons(ETH_P_8021AD)) {
7949 			if (tg3_tso_bug_gso_check(tnapi, skb))
7950 				return tg3_tso_bug(tp, tnapi, txq, skb);
7951 			goto drop;
7952 		}
7953 
7954 		if (!skb_is_gso_v6(skb)) {
7955 			if (unlikely((ETH_HLEN + hdr_len) > 80) &&
7956 			    tg3_flag(tp, TSO_BUG)) {
7957 				if (tg3_tso_bug_gso_check(tnapi, skb))
7958 					return tg3_tso_bug(tp, tnapi, txq, skb);
7959 				goto drop;
7960 			}
7961 			ip_csum = iph->check;
7962 			ip_tot_len = iph->tot_len;
7963 			iph->check = 0;
7964 			iph->tot_len = htons(mss + hdr_len);
7965 		}
7966 
7967 		base_flags |= (TXD_FLAG_CPU_PRE_DMA |
7968 			       TXD_FLAG_CPU_POST_DMA);
7969 
7970 		tcph = tcp_hdr(skb);
7971 		tcp_csum = tcph->check;
7972 
7973 		if (tg3_flag(tp, HW_TSO_1) ||
7974 		    tg3_flag(tp, HW_TSO_2) ||
7975 		    tg3_flag(tp, HW_TSO_3)) {
7976 			tcph->check = 0;
7977 			base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
7978 		} else {
7979 			tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
7980 							 0, IPPROTO_TCP, 0);
7981 		}
7982 
7983 		if (tg3_flag(tp, HW_TSO_3)) {
7984 			mss |= (hdr_len & 0xc) << 12;
7985 			if (hdr_len & 0x10)
7986 				base_flags |= 0x00000010;
7987 			base_flags |= (hdr_len & 0x3e0) << 5;
7988 		} else if (tg3_flag(tp, HW_TSO_2))
7989 			mss |= hdr_len << 9;
7990 		else if (tg3_flag(tp, HW_TSO_1) ||
7991 			 tg3_asic_rev(tp) == ASIC_REV_5705) {
7992 			if (tcp_opt_len || iph->ihl > 5) {
7993 				int tsflags;
7994 
7995 				tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
7996 				mss |= (tsflags << 11);
7997 			}
7998 		} else {
7999 			if (tcp_opt_len || iph->ihl > 5) {
8000 				int tsflags;
8001 
8002 				tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
8003 				base_flags |= tsflags << 12;
8004 			}
8005 		}
8006 	} else if (skb->ip_summed == CHECKSUM_PARTIAL) {
8007 		/* HW/FW can not correctly checksum packets that have been
8008 		 * vlan encapsulated.
8009 		 */
8010 		if (skb->protocol == htons(ETH_P_8021Q) ||
8011 		    skb->protocol == htons(ETH_P_8021AD)) {
8012 			if (skb_checksum_help(skb))
8013 				goto drop;
8014 		} else  {
8015 			base_flags |= TXD_FLAG_TCPUDP_CSUM;
8016 		}
8017 	}
8018 
8019 	if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
8020 	    !mss && skb->len > VLAN_ETH_FRAME_LEN)
8021 		base_flags |= TXD_FLAG_JMB_PKT;
8022 
8023 	if (skb_vlan_tag_present(skb)) {
8024 		base_flags |= TXD_FLAG_VLAN;
8025 		vlan = skb_vlan_tag_get(skb);
8026 	}
8027 
8028 	if ((unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) &&
8029 	    tg3_flag(tp, TX_TSTAMP_EN)) {
8030 		skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
8031 		base_flags |= TXD_FLAG_HWTSTAMP;
8032 	}
8033 
8034 	len = skb_headlen(skb);
8035 
8036 	mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
8037 	if (pci_dma_mapping_error(tp->pdev, mapping))
8038 		goto drop;
8039 
8040 
8041 	tnapi->tx_buffers[entry].skb = skb;
8042 	dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
8043 
8044 	would_hit_hwbug = 0;
8045 
8046 	if (tg3_flag(tp, 5701_DMA_BUG))
8047 		would_hit_hwbug = 1;
8048 
8049 	if (tg3_tx_frag_set(tnapi, &entry, &budget, mapping, len, base_flags |
8050 			  ((skb_shinfo(skb)->nr_frags == 0) ? TXD_FLAG_END : 0),
8051 			    mss, vlan)) {
8052 		would_hit_hwbug = 1;
8053 	} else if (skb_shinfo(skb)->nr_frags > 0) {
8054 		u32 tmp_mss = mss;
8055 
8056 		if (!tg3_flag(tp, HW_TSO_1) &&
8057 		    !tg3_flag(tp, HW_TSO_2) &&
8058 		    !tg3_flag(tp, HW_TSO_3))
8059 			tmp_mss = 0;
8060 
8061 		/* Now loop through additional data
8062 		 * fragments, and queue them.
8063 		 */
8064 		last = skb_shinfo(skb)->nr_frags - 1;
8065 		for (i = 0; i <= last; i++) {
8066 			skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
8067 
8068 			len = skb_frag_size(frag);
8069 			mapping = skb_frag_dma_map(&tp->pdev->dev, frag, 0,
8070 						   len, DMA_TO_DEVICE);
8071 
8072 			tnapi->tx_buffers[entry].skb = NULL;
8073 			dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
8074 					   mapping);
8075 			if (dma_mapping_error(&tp->pdev->dev, mapping))
8076 				goto dma_error;
8077 
8078 			if (!budget ||
8079 			    tg3_tx_frag_set(tnapi, &entry, &budget, mapping,
8080 					    len, base_flags |
8081 					    ((i == last) ? TXD_FLAG_END : 0),
8082 					    tmp_mss, vlan)) {
8083 				would_hit_hwbug = 1;
8084 				break;
8085 			}
8086 		}
8087 	}
8088 
8089 	if (would_hit_hwbug) {
8090 		tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i);
8091 
8092 		if (mss && tg3_tso_bug_gso_check(tnapi, skb)) {
8093 			/* If it's a TSO packet, do GSO instead of
8094 			 * allocating and copying to a large linear SKB
8095 			 */
8096 			if (ip_tot_len) {
8097 				iph->check = ip_csum;
8098 				iph->tot_len = ip_tot_len;
8099 			}
8100 			tcph->check = tcp_csum;
8101 			return tg3_tso_bug(tp, tnapi, txq, skb);
8102 		}
8103 
8104 		/* If the workaround fails due to memory/mapping
8105 		 * failure, silently drop this packet.
8106 		 */
8107 		entry = tnapi->tx_prod;
8108 		budget = tg3_tx_avail(tnapi);
8109 		if (tigon3_dma_hwbug_workaround(tnapi, &skb, &entry, &budget,
8110 						base_flags, mss, vlan))
8111 			goto drop_nofree;
8112 	}
8113 
8114 	skb_tx_timestamp(skb);
8115 	netdev_tx_sent_queue(txq, skb->len);
8116 
8117 	/* Sync BD data before updating mailbox */
8118 	wmb();
8119 
8120 	tnapi->tx_prod = entry;
8121 	if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
8122 		netif_tx_stop_queue(txq);
8123 
8124 		/* netif_tx_stop_queue() must be done before checking
8125 		 * checking tx index in tg3_tx_avail() below, because in
8126 		 * tg3_tx(), we update tx index before checking for
8127 		 * netif_tx_queue_stopped().
8128 		 */
8129 		smp_mb();
8130 		if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
8131 			netif_tx_wake_queue(txq);
8132 	}
8133 
8134 	if (!skb->xmit_more || netif_xmit_stopped(txq)) {
8135 		/* Packets are ready, update Tx producer idx on card. */
8136 		tw32_tx_mbox(tnapi->prodmbox, entry);
8137 		mmiowb();
8138 	}
8139 
8140 	return NETDEV_TX_OK;
8141 
8142 dma_error:
8143 	tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, --i);
8144 	tnapi->tx_buffers[tnapi->tx_prod].skb = NULL;
8145 drop:
8146 	dev_kfree_skb_any(skb);
8147 drop_nofree:
8148 	tp->tx_dropped++;
8149 	return NETDEV_TX_OK;
8150 }
8151 
8152 static void tg3_mac_loopback(struct tg3 *tp, bool enable)
8153 {
8154 	if (enable) {
8155 		tp->mac_mode &= ~(MAC_MODE_HALF_DUPLEX |
8156 				  MAC_MODE_PORT_MODE_MASK);
8157 
8158 		tp->mac_mode |= MAC_MODE_PORT_INT_LPBACK;
8159 
8160 		if (!tg3_flag(tp, 5705_PLUS))
8161 			tp->mac_mode |= MAC_MODE_LINK_POLARITY;
8162 
8163 		if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
8164 			tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
8165 		else
8166 			tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
8167 	} else {
8168 		tp->mac_mode &= ~MAC_MODE_PORT_INT_LPBACK;
8169 
8170 		if (tg3_flag(tp, 5705_PLUS) ||
8171 		    (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) ||
8172 		    tg3_asic_rev(tp) == ASIC_REV_5700)
8173 			tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
8174 	}
8175 
8176 	tw32(MAC_MODE, tp->mac_mode);
8177 	udelay(40);
8178 }
8179 
8180 static int tg3_phy_lpbk_set(struct tg3 *tp, u32 speed, bool extlpbk)
8181 {
8182 	u32 val, bmcr, mac_mode, ptest = 0;
8183 
8184 	tg3_phy_toggle_apd(tp, false);
8185 	tg3_phy_toggle_automdix(tp, false);
8186 
8187 	if (extlpbk && tg3_phy_set_extloopbk(tp))
8188 		return -EIO;
8189 
8190 	bmcr = BMCR_FULLDPLX;
8191 	switch (speed) {
8192 	case SPEED_10:
8193 		break;
8194 	case SPEED_100:
8195 		bmcr |= BMCR_SPEED100;
8196 		break;
8197 	case SPEED_1000:
8198 	default:
8199 		if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
8200 			speed = SPEED_100;
8201 			bmcr |= BMCR_SPEED100;
8202 		} else {
8203 			speed = SPEED_1000;
8204 			bmcr |= BMCR_SPEED1000;
8205 		}
8206 	}
8207 
8208 	if (extlpbk) {
8209 		if (!(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
8210 			tg3_readphy(tp, MII_CTRL1000, &val);
8211 			val |= CTL1000_AS_MASTER |
8212 			       CTL1000_ENABLE_MASTER;
8213 			tg3_writephy(tp, MII_CTRL1000, val);
8214 		} else {
8215 			ptest = MII_TG3_FET_PTEST_TRIM_SEL |
8216 				MII_TG3_FET_PTEST_TRIM_2;
8217 			tg3_writephy(tp, MII_TG3_FET_PTEST, ptest);
8218 		}
8219 	} else
8220 		bmcr |= BMCR_LOOPBACK;
8221 
8222 	tg3_writephy(tp, MII_BMCR, bmcr);
8223 
8224 	/* The write needs to be flushed for the FETs */
8225 	if (tp->phy_flags & TG3_PHYFLG_IS_FET)
8226 		tg3_readphy(tp, MII_BMCR, &bmcr);
8227 
8228 	udelay(40);
8229 
8230 	if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
8231 	    tg3_asic_rev(tp) == ASIC_REV_5785) {
8232 		tg3_writephy(tp, MII_TG3_FET_PTEST, ptest |
8233 			     MII_TG3_FET_PTEST_FRC_TX_LINK |
8234 			     MII_TG3_FET_PTEST_FRC_TX_LOCK);
8235 
8236 		/* The write needs to be flushed for the AC131 */
8237 		tg3_readphy(tp, MII_TG3_FET_PTEST, &val);
8238 	}
8239 
8240 	/* Reset to prevent losing 1st rx packet intermittently */
8241 	if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
8242 	    tg3_flag(tp, 5780_CLASS)) {
8243 		tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8244 		udelay(10);
8245 		tw32_f(MAC_RX_MODE, tp->rx_mode);
8246 	}
8247 
8248 	mac_mode = tp->mac_mode &
8249 		   ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
8250 	if (speed == SPEED_1000)
8251 		mac_mode |= MAC_MODE_PORT_MODE_GMII;
8252 	else
8253 		mac_mode |= MAC_MODE_PORT_MODE_MII;
8254 
8255 	if (tg3_asic_rev(tp) == ASIC_REV_5700) {
8256 		u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK;
8257 
8258 		if (masked_phy_id == TG3_PHY_ID_BCM5401)
8259 			mac_mode &= ~MAC_MODE_LINK_POLARITY;
8260 		else if (masked_phy_id == TG3_PHY_ID_BCM5411)
8261 			mac_mode |= MAC_MODE_LINK_POLARITY;
8262 
8263 		tg3_writephy(tp, MII_TG3_EXT_CTRL,
8264 			     MII_TG3_EXT_CTRL_LNK3_LED_MODE);
8265 	}
8266 
8267 	tw32(MAC_MODE, mac_mode);
8268 	udelay(40);
8269 
8270 	return 0;
8271 }
8272 
8273 static void tg3_set_loopback(struct net_device *dev, netdev_features_t features)
8274 {
8275 	struct tg3 *tp = netdev_priv(dev);
8276 
8277 	if (features & NETIF_F_LOOPBACK) {
8278 		if (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)
8279 			return;
8280 
8281 		spin_lock_bh(&tp->lock);
8282 		tg3_mac_loopback(tp, true);
8283 		netif_carrier_on(tp->dev);
8284 		spin_unlock_bh(&tp->lock);
8285 		netdev_info(dev, "Internal MAC loopback mode enabled.\n");
8286 	} else {
8287 		if (!(tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
8288 			return;
8289 
8290 		spin_lock_bh(&tp->lock);
8291 		tg3_mac_loopback(tp, false);
8292 		/* Force link status check */
8293 		tg3_setup_phy(tp, true);
8294 		spin_unlock_bh(&tp->lock);
8295 		netdev_info(dev, "Internal MAC loopback mode disabled.\n");
8296 	}
8297 }
8298 
8299 static netdev_features_t tg3_fix_features(struct net_device *dev,
8300 	netdev_features_t features)
8301 {
8302 	struct tg3 *tp = netdev_priv(dev);
8303 
8304 	if (dev->mtu > ETH_DATA_LEN && tg3_flag(tp, 5780_CLASS))
8305 		features &= ~NETIF_F_ALL_TSO;
8306 
8307 	return features;
8308 }
8309 
8310 static int tg3_set_features(struct net_device *dev, netdev_features_t features)
8311 {
8312 	netdev_features_t changed = dev->features ^ features;
8313 
8314 	if ((changed & NETIF_F_LOOPBACK) && netif_running(dev))
8315 		tg3_set_loopback(dev, features);
8316 
8317 	return 0;
8318 }
8319 
8320 static void tg3_rx_prodring_free(struct tg3 *tp,
8321 				 struct tg3_rx_prodring_set *tpr)
8322 {
8323 	int i;
8324 
8325 	if (tpr != &tp->napi[0].prodring) {
8326 		for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx;
8327 		     i = (i + 1) & tp->rx_std_ring_mask)
8328 			tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
8329 					tp->rx_pkt_map_sz);
8330 
8331 		if (tg3_flag(tp, JUMBO_CAPABLE)) {
8332 			for (i = tpr->rx_jmb_cons_idx;
8333 			     i != tpr->rx_jmb_prod_idx;
8334 			     i = (i + 1) & tp->rx_jmb_ring_mask) {
8335 				tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
8336 						TG3_RX_JMB_MAP_SZ);
8337 			}
8338 		}
8339 
8340 		return;
8341 	}
8342 
8343 	for (i = 0; i <= tp->rx_std_ring_mask; i++)
8344 		tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
8345 				tp->rx_pkt_map_sz);
8346 
8347 	if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
8348 		for (i = 0; i <= tp->rx_jmb_ring_mask; i++)
8349 			tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
8350 					TG3_RX_JMB_MAP_SZ);
8351 	}
8352 }
8353 
8354 /* Initialize rx rings for packet processing.
8355  *
8356  * The chip has been shut down and the driver detached from
8357  * the networking, so no interrupts or new tx packets will
8358  * end up in the driver.  tp->{tx,}lock are held and thus
8359  * we may not sleep.
8360  */
8361 static int tg3_rx_prodring_alloc(struct tg3 *tp,
8362 				 struct tg3_rx_prodring_set *tpr)
8363 {
8364 	u32 i, rx_pkt_dma_sz;
8365 
8366 	tpr->rx_std_cons_idx = 0;
8367 	tpr->rx_std_prod_idx = 0;
8368 	tpr->rx_jmb_cons_idx = 0;
8369 	tpr->rx_jmb_prod_idx = 0;
8370 
8371 	if (tpr != &tp->napi[0].prodring) {
8372 		memset(&tpr->rx_std_buffers[0], 0,
8373 		       TG3_RX_STD_BUFF_RING_SIZE(tp));
8374 		if (tpr->rx_jmb_buffers)
8375 			memset(&tpr->rx_jmb_buffers[0], 0,
8376 			       TG3_RX_JMB_BUFF_RING_SIZE(tp));
8377 		goto done;
8378 	}
8379 
8380 	/* Zero out all descriptors. */
8381 	memset(tpr->rx_std, 0, TG3_RX_STD_RING_BYTES(tp));
8382 
8383 	rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ;
8384 	if (tg3_flag(tp, 5780_CLASS) &&
8385 	    tp->dev->mtu > ETH_DATA_LEN)
8386 		rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ;
8387 	tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz);
8388 
8389 	/* Initialize invariants of the rings, we only set this
8390 	 * stuff once.  This works because the card does not
8391 	 * write into the rx buffer posting rings.
8392 	 */
8393 	for (i = 0; i <= tp->rx_std_ring_mask; i++) {
8394 		struct tg3_rx_buffer_desc *rxd;
8395 
8396 		rxd = &tpr->rx_std[i];
8397 		rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT;
8398 		rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
8399 		rxd->opaque = (RXD_OPAQUE_RING_STD |
8400 			       (i << RXD_OPAQUE_INDEX_SHIFT));
8401 	}
8402 
8403 	/* Now allocate fresh SKBs for each rx ring. */
8404 	for (i = 0; i < tp->rx_pending; i++) {
8405 		unsigned int frag_size;
8406 
8407 		if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_STD, i,
8408 				      &frag_size) < 0) {
8409 			netdev_warn(tp->dev,
8410 				    "Using a smaller RX standard ring. Only "
8411 				    "%d out of %d buffers were allocated "
8412 				    "successfully\n", i, tp->rx_pending);
8413 			if (i == 0)
8414 				goto initfail;
8415 			tp->rx_pending = i;
8416 			break;
8417 		}
8418 	}
8419 
8420 	if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
8421 		goto done;
8422 
8423 	memset(tpr->rx_jmb, 0, TG3_RX_JMB_RING_BYTES(tp));
8424 
8425 	if (!tg3_flag(tp, JUMBO_RING_ENABLE))
8426 		goto done;
8427 
8428 	for (i = 0; i <= tp->rx_jmb_ring_mask; i++) {
8429 		struct tg3_rx_buffer_desc *rxd;
8430 
8431 		rxd = &tpr->rx_jmb[i].std;
8432 		rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT;
8433 		rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
8434 				  RXD_FLAG_JUMBO;
8435 		rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
8436 		       (i << RXD_OPAQUE_INDEX_SHIFT));
8437 	}
8438 
8439 	for (i = 0; i < tp->rx_jumbo_pending; i++) {
8440 		unsigned int frag_size;
8441 
8442 		if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_JUMBO, i,
8443 				      &frag_size) < 0) {
8444 			netdev_warn(tp->dev,
8445 				    "Using a smaller RX jumbo ring. Only %d "
8446 				    "out of %d buffers were allocated "
8447 				    "successfully\n", i, tp->rx_jumbo_pending);
8448 			if (i == 0)
8449 				goto initfail;
8450 			tp->rx_jumbo_pending = i;
8451 			break;
8452 		}
8453 	}
8454 
8455 done:
8456 	return 0;
8457 
8458 initfail:
8459 	tg3_rx_prodring_free(tp, tpr);
8460 	return -ENOMEM;
8461 }
8462 
8463 static void tg3_rx_prodring_fini(struct tg3 *tp,
8464 				 struct tg3_rx_prodring_set *tpr)
8465 {
8466 	kfree(tpr->rx_std_buffers);
8467 	tpr->rx_std_buffers = NULL;
8468 	kfree(tpr->rx_jmb_buffers);
8469 	tpr->rx_jmb_buffers = NULL;
8470 	if (tpr->rx_std) {
8471 		dma_free_coherent(&tp->pdev->dev, TG3_RX_STD_RING_BYTES(tp),
8472 				  tpr->rx_std, tpr->rx_std_mapping);
8473 		tpr->rx_std = NULL;
8474 	}
8475 	if (tpr->rx_jmb) {
8476 		dma_free_coherent(&tp->pdev->dev, TG3_RX_JMB_RING_BYTES(tp),
8477 				  tpr->rx_jmb, tpr->rx_jmb_mapping);
8478 		tpr->rx_jmb = NULL;
8479 	}
8480 }
8481 
8482 static int tg3_rx_prodring_init(struct tg3 *tp,
8483 				struct tg3_rx_prodring_set *tpr)
8484 {
8485 	tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp),
8486 				      GFP_KERNEL);
8487 	if (!tpr->rx_std_buffers)
8488 		return -ENOMEM;
8489 
8490 	tpr->rx_std = dma_alloc_coherent(&tp->pdev->dev,
8491 					 TG3_RX_STD_RING_BYTES(tp),
8492 					 &tpr->rx_std_mapping,
8493 					 GFP_KERNEL);
8494 	if (!tpr->rx_std)
8495 		goto err_out;
8496 
8497 	if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
8498 		tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp),
8499 					      GFP_KERNEL);
8500 		if (!tpr->rx_jmb_buffers)
8501 			goto err_out;
8502 
8503 		tpr->rx_jmb = dma_alloc_coherent(&tp->pdev->dev,
8504 						 TG3_RX_JMB_RING_BYTES(tp),
8505 						 &tpr->rx_jmb_mapping,
8506 						 GFP_KERNEL);
8507 		if (!tpr->rx_jmb)
8508 			goto err_out;
8509 	}
8510 
8511 	return 0;
8512 
8513 err_out:
8514 	tg3_rx_prodring_fini(tp, tpr);
8515 	return -ENOMEM;
8516 }
8517 
8518 /* Free up pending packets in all rx/tx rings.
8519  *
8520  * The chip has been shut down and the driver detached from
8521  * the networking, so no interrupts or new tx packets will
8522  * end up in the driver.  tp->{tx,}lock is not held and we are not
8523  * in an interrupt context and thus may sleep.
8524  */
8525 static void tg3_free_rings(struct tg3 *tp)
8526 {
8527 	int i, j;
8528 
8529 	for (j = 0; j < tp->irq_cnt; j++) {
8530 		struct tg3_napi *tnapi = &tp->napi[j];
8531 
8532 		tg3_rx_prodring_free(tp, &tnapi->prodring);
8533 
8534 		if (!tnapi->tx_buffers)
8535 			continue;
8536 
8537 		for (i = 0; i < TG3_TX_RING_SIZE; i++) {
8538 			struct sk_buff *skb = tnapi->tx_buffers[i].skb;
8539 
8540 			if (!skb)
8541 				continue;
8542 
8543 			tg3_tx_skb_unmap(tnapi, i,
8544 					 skb_shinfo(skb)->nr_frags - 1);
8545 
8546 			dev_kfree_skb_any(skb);
8547 		}
8548 		netdev_tx_reset_queue(netdev_get_tx_queue(tp->dev, j));
8549 	}
8550 }
8551 
8552 /* Initialize tx/rx rings for packet processing.
8553  *
8554  * The chip has been shut down and the driver detached from
8555  * the networking, so no interrupts or new tx packets will
8556  * end up in the driver.  tp->{tx,}lock are held and thus
8557  * we may not sleep.
8558  */
8559 static int tg3_init_rings(struct tg3 *tp)
8560 {
8561 	int i;
8562 
8563 	/* Free up all the SKBs. */
8564 	tg3_free_rings(tp);
8565 
8566 	for (i = 0; i < tp->irq_cnt; i++) {
8567 		struct tg3_napi *tnapi = &tp->napi[i];
8568 
8569 		tnapi->last_tag = 0;
8570 		tnapi->last_irq_tag = 0;
8571 		tnapi->hw_status->status = 0;
8572 		tnapi->hw_status->status_tag = 0;
8573 		memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8574 
8575 		tnapi->tx_prod = 0;
8576 		tnapi->tx_cons = 0;
8577 		if (tnapi->tx_ring)
8578 			memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES);
8579 
8580 		tnapi->rx_rcb_ptr = 0;
8581 		if (tnapi->rx_rcb)
8582 			memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
8583 
8584 		if (tnapi->prodring.rx_std &&
8585 		    tg3_rx_prodring_alloc(tp, &tnapi->prodring)) {
8586 			tg3_free_rings(tp);
8587 			return -ENOMEM;
8588 		}
8589 	}
8590 
8591 	return 0;
8592 }
8593 
8594 static void tg3_mem_tx_release(struct tg3 *tp)
8595 {
8596 	int i;
8597 
8598 	for (i = 0; i < tp->irq_max; i++) {
8599 		struct tg3_napi *tnapi = &tp->napi[i];
8600 
8601 		if (tnapi->tx_ring) {
8602 			dma_free_coherent(&tp->pdev->dev, TG3_TX_RING_BYTES,
8603 				tnapi->tx_ring, tnapi->tx_desc_mapping);
8604 			tnapi->tx_ring = NULL;
8605 		}
8606 
8607 		kfree(tnapi->tx_buffers);
8608 		tnapi->tx_buffers = NULL;
8609 	}
8610 }
8611 
8612 static int tg3_mem_tx_acquire(struct tg3 *tp)
8613 {
8614 	int i;
8615 	struct tg3_napi *tnapi = &tp->napi[0];
8616 
8617 	/* If multivector TSS is enabled, vector 0 does not handle
8618 	 * tx interrupts.  Don't allocate any resources for it.
8619 	 */
8620 	if (tg3_flag(tp, ENABLE_TSS))
8621 		tnapi++;
8622 
8623 	for (i = 0; i < tp->txq_cnt; i++, tnapi++) {
8624 		tnapi->tx_buffers = kzalloc(sizeof(struct tg3_tx_ring_info) *
8625 					    TG3_TX_RING_SIZE, GFP_KERNEL);
8626 		if (!tnapi->tx_buffers)
8627 			goto err_out;
8628 
8629 		tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev,
8630 						    TG3_TX_RING_BYTES,
8631 						    &tnapi->tx_desc_mapping,
8632 						    GFP_KERNEL);
8633 		if (!tnapi->tx_ring)
8634 			goto err_out;
8635 	}
8636 
8637 	return 0;
8638 
8639 err_out:
8640 	tg3_mem_tx_release(tp);
8641 	return -ENOMEM;
8642 }
8643 
8644 static void tg3_mem_rx_release(struct tg3 *tp)
8645 {
8646 	int i;
8647 
8648 	for (i = 0; i < tp->irq_max; i++) {
8649 		struct tg3_napi *tnapi = &tp->napi[i];
8650 
8651 		tg3_rx_prodring_fini(tp, &tnapi->prodring);
8652 
8653 		if (!tnapi->rx_rcb)
8654 			continue;
8655 
8656 		dma_free_coherent(&tp->pdev->dev,
8657 				  TG3_RX_RCB_RING_BYTES(tp),
8658 				  tnapi->rx_rcb,
8659 				  tnapi->rx_rcb_mapping);
8660 		tnapi->rx_rcb = NULL;
8661 	}
8662 }
8663 
8664 static int tg3_mem_rx_acquire(struct tg3 *tp)
8665 {
8666 	unsigned int i, limit;
8667 
8668 	limit = tp->rxq_cnt;
8669 
8670 	/* If RSS is enabled, we need a (dummy) producer ring
8671 	 * set on vector zero.  This is the true hw prodring.
8672 	 */
8673 	if (tg3_flag(tp, ENABLE_RSS))
8674 		limit++;
8675 
8676 	for (i = 0; i < limit; i++) {
8677 		struct tg3_napi *tnapi = &tp->napi[i];
8678 
8679 		if (tg3_rx_prodring_init(tp, &tnapi->prodring))
8680 			goto err_out;
8681 
8682 		/* If multivector RSS is enabled, vector 0
8683 		 * does not handle rx or tx interrupts.
8684 		 * Don't allocate any resources for it.
8685 		 */
8686 		if (!i && tg3_flag(tp, ENABLE_RSS))
8687 			continue;
8688 
8689 		tnapi->rx_rcb = dma_zalloc_coherent(&tp->pdev->dev,
8690 						    TG3_RX_RCB_RING_BYTES(tp),
8691 						    &tnapi->rx_rcb_mapping,
8692 						    GFP_KERNEL);
8693 		if (!tnapi->rx_rcb)
8694 			goto err_out;
8695 	}
8696 
8697 	return 0;
8698 
8699 err_out:
8700 	tg3_mem_rx_release(tp);
8701 	return -ENOMEM;
8702 }
8703 
8704 /*
8705  * Must not be invoked with interrupt sources disabled and
8706  * the hardware shutdown down.
8707  */
8708 static void tg3_free_consistent(struct tg3 *tp)
8709 {
8710 	int i;
8711 
8712 	for (i = 0; i < tp->irq_cnt; i++) {
8713 		struct tg3_napi *tnapi = &tp->napi[i];
8714 
8715 		if (tnapi->hw_status) {
8716 			dma_free_coherent(&tp->pdev->dev, TG3_HW_STATUS_SIZE,
8717 					  tnapi->hw_status,
8718 					  tnapi->status_mapping);
8719 			tnapi->hw_status = NULL;
8720 		}
8721 	}
8722 
8723 	tg3_mem_rx_release(tp);
8724 	tg3_mem_tx_release(tp);
8725 
8726 	/* Protect tg3_get_stats64() from reading freed tp->hw_stats. */
8727 	tg3_full_lock(tp, 0);
8728 	if (tp->hw_stats) {
8729 		dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats),
8730 				  tp->hw_stats, tp->stats_mapping);
8731 		tp->hw_stats = NULL;
8732 	}
8733 	tg3_full_unlock(tp);
8734 }
8735 
8736 /*
8737  * Must not be invoked with interrupt sources disabled and
8738  * the hardware shutdown down.  Can sleep.
8739  */
8740 static int tg3_alloc_consistent(struct tg3 *tp)
8741 {
8742 	int i;
8743 
8744 	tp->hw_stats = dma_zalloc_coherent(&tp->pdev->dev,
8745 					   sizeof(struct tg3_hw_stats),
8746 					   &tp->stats_mapping, GFP_KERNEL);
8747 	if (!tp->hw_stats)
8748 		goto err_out;
8749 
8750 	for (i = 0; i < tp->irq_cnt; i++) {
8751 		struct tg3_napi *tnapi = &tp->napi[i];
8752 		struct tg3_hw_status *sblk;
8753 
8754 		tnapi->hw_status = dma_zalloc_coherent(&tp->pdev->dev,
8755 						       TG3_HW_STATUS_SIZE,
8756 						       &tnapi->status_mapping,
8757 						       GFP_KERNEL);
8758 		if (!tnapi->hw_status)
8759 			goto err_out;
8760 
8761 		sblk = tnapi->hw_status;
8762 
8763 		if (tg3_flag(tp, ENABLE_RSS)) {
8764 			u16 *prodptr = NULL;
8765 
8766 			/*
8767 			 * When RSS is enabled, the status block format changes
8768 			 * slightly.  The "rx_jumbo_consumer", "reserved",
8769 			 * and "rx_mini_consumer" members get mapped to the
8770 			 * other three rx return ring producer indexes.
8771 			 */
8772 			switch (i) {
8773 			case 1:
8774 				prodptr = &sblk->idx[0].rx_producer;
8775 				break;
8776 			case 2:
8777 				prodptr = &sblk->rx_jumbo_consumer;
8778 				break;
8779 			case 3:
8780 				prodptr = &sblk->reserved;
8781 				break;
8782 			case 4:
8783 				prodptr = &sblk->rx_mini_consumer;
8784 				break;
8785 			}
8786 			tnapi->rx_rcb_prod_idx = prodptr;
8787 		} else {
8788 			tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer;
8789 		}
8790 	}
8791 
8792 	if (tg3_mem_tx_acquire(tp) || tg3_mem_rx_acquire(tp))
8793 		goto err_out;
8794 
8795 	return 0;
8796 
8797 err_out:
8798 	tg3_free_consistent(tp);
8799 	return -ENOMEM;
8800 }
8801 
8802 #define MAX_WAIT_CNT 1000
8803 
8804 /* To stop a block, clear the enable bit and poll till it
8805  * clears.  tp->lock is held.
8806  */
8807 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, bool silent)
8808 {
8809 	unsigned int i;
8810 	u32 val;
8811 
8812 	if (tg3_flag(tp, 5705_PLUS)) {
8813 		switch (ofs) {
8814 		case RCVLSC_MODE:
8815 		case DMAC_MODE:
8816 		case MBFREE_MODE:
8817 		case BUFMGR_MODE:
8818 		case MEMARB_MODE:
8819 			/* We can't enable/disable these bits of the
8820 			 * 5705/5750, just say success.
8821 			 */
8822 			return 0;
8823 
8824 		default:
8825 			break;
8826 		}
8827 	}
8828 
8829 	val = tr32(ofs);
8830 	val &= ~enable_bit;
8831 	tw32_f(ofs, val);
8832 
8833 	for (i = 0; i < MAX_WAIT_CNT; i++) {
8834 		if (pci_channel_offline(tp->pdev)) {
8835 			dev_err(&tp->pdev->dev,
8836 				"tg3_stop_block device offline, "
8837 				"ofs=%lx enable_bit=%x\n",
8838 				ofs, enable_bit);
8839 			return -ENODEV;
8840 		}
8841 
8842 		udelay(100);
8843 		val = tr32(ofs);
8844 		if ((val & enable_bit) == 0)
8845 			break;
8846 	}
8847 
8848 	if (i == MAX_WAIT_CNT && !silent) {
8849 		dev_err(&tp->pdev->dev,
8850 			"tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
8851 			ofs, enable_bit);
8852 		return -ENODEV;
8853 	}
8854 
8855 	return 0;
8856 }
8857 
8858 /* tp->lock is held. */
8859 static int tg3_abort_hw(struct tg3 *tp, bool silent)
8860 {
8861 	int i, err;
8862 
8863 	tg3_disable_ints(tp);
8864 
8865 	if (pci_channel_offline(tp->pdev)) {
8866 		tp->rx_mode &= ~(RX_MODE_ENABLE | TX_MODE_ENABLE);
8867 		tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
8868 		err = -ENODEV;
8869 		goto err_no_dev;
8870 	}
8871 
8872 	tp->rx_mode &= ~RX_MODE_ENABLE;
8873 	tw32_f(MAC_RX_MODE, tp->rx_mode);
8874 	udelay(10);
8875 
8876 	err  = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
8877 	err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
8878 	err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
8879 	err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
8880 	err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
8881 	err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
8882 
8883 	err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
8884 	err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
8885 	err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
8886 	err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
8887 	err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
8888 	err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
8889 	err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
8890 
8891 	tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
8892 	tw32_f(MAC_MODE, tp->mac_mode);
8893 	udelay(40);
8894 
8895 	tp->tx_mode &= ~TX_MODE_ENABLE;
8896 	tw32_f(MAC_TX_MODE, tp->tx_mode);
8897 
8898 	for (i = 0; i < MAX_WAIT_CNT; i++) {
8899 		udelay(100);
8900 		if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
8901 			break;
8902 	}
8903 	if (i >= MAX_WAIT_CNT) {
8904 		dev_err(&tp->pdev->dev,
8905 			"%s timed out, TX_MODE_ENABLE will not clear "
8906 			"MAC_TX_MODE=%08x\n", __func__, tr32(MAC_TX_MODE));
8907 		err |= -ENODEV;
8908 	}
8909 
8910 	err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
8911 	err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
8912 	err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
8913 
8914 	tw32(FTQ_RESET, 0xffffffff);
8915 	tw32(FTQ_RESET, 0x00000000);
8916 
8917 	err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
8918 	err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
8919 
8920 err_no_dev:
8921 	for (i = 0; i < tp->irq_cnt; i++) {
8922 		struct tg3_napi *tnapi = &tp->napi[i];
8923 		if (tnapi->hw_status)
8924 			memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8925 	}
8926 
8927 	return err;
8928 }
8929 
8930 /* Save PCI command register before chip reset */
8931 static void tg3_save_pci_state(struct tg3 *tp)
8932 {
8933 	pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
8934 }
8935 
8936 /* Restore PCI state after chip reset */
8937 static void tg3_restore_pci_state(struct tg3 *tp)
8938 {
8939 	u32 val;
8940 
8941 	/* Re-enable indirect register accesses. */
8942 	pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
8943 			       tp->misc_host_ctrl);
8944 
8945 	/* Set MAX PCI retry to zero. */
8946 	val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
8947 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0 &&
8948 	    tg3_flag(tp, PCIX_MODE))
8949 		val |= PCISTATE_RETRY_SAME_DMA;
8950 	/* Allow reads and writes to the APE register and memory space. */
8951 	if (tg3_flag(tp, ENABLE_APE))
8952 		val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
8953 		       PCISTATE_ALLOW_APE_SHMEM_WR |
8954 		       PCISTATE_ALLOW_APE_PSPACE_WR;
8955 	pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
8956 
8957 	pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
8958 
8959 	if (!tg3_flag(tp, PCI_EXPRESS)) {
8960 		pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
8961 				      tp->pci_cacheline_sz);
8962 		pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
8963 				      tp->pci_lat_timer);
8964 	}
8965 
8966 	/* Make sure PCI-X relaxed ordering bit is clear. */
8967 	if (tg3_flag(tp, PCIX_MODE)) {
8968 		u16 pcix_cmd;
8969 
8970 		pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8971 				     &pcix_cmd);
8972 		pcix_cmd &= ~PCI_X_CMD_ERO;
8973 		pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8974 				      pcix_cmd);
8975 	}
8976 
8977 	if (tg3_flag(tp, 5780_CLASS)) {
8978 
8979 		/* Chip reset on 5780 will reset MSI enable bit,
8980 		 * so need to restore it.
8981 		 */
8982 		if (tg3_flag(tp, USING_MSI)) {
8983 			u16 ctrl;
8984 
8985 			pci_read_config_word(tp->pdev,
8986 					     tp->msi_cap + PCI_MSI_FLAGS,
8987 					     &ctrl);
8988 			pci_write_config_word(tp->pdev,
8989 					      tp->msi_cap + PCI_MSI_FLAGS,
8990 					      ctrl | PCI_MSI_FLAGS_ENABLE);
8991 			val = tr32(MSGINT_MODE);
8992 			tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
8993 		}
8994 	}
8995 }
8996 
8997 static void tg3_override_clk(struct tg3 *tp)
8998 {
8999 	u32 val;
9000 
9001 	switch (tg3_asic_rev(tp)) {
9002 	case ASIC_REV_5717:
9003 		val = tr32(TG3_CPMU_CLCK_ORIDE_ENABLE);
9004 		tw32(TG3_CPMU_CLCK_ORIDE_ENABLE, val |
9005 		     TG3_CPMU_MAC_ORIDE_ENABLE);
9006 		break;
9007 
9008 	case ASIC_REV_5719:
9009 	case ASIC_REV_5720:
9010 		tw32(TG3_CPMU_CLCK_ORIDE, CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
9011 		break;
9012 
9013 	default:
9014 		return;
9015 	}
9016 }
9017 
9018 static void tg3_restore_clk(struct tg3 *tp)
9019 {
9020 	u32 val;
9021 
9022 	switch (tg3_asic_rev(tp)) {
9023 	case ASIC_REV_5717:
9024 		val = tr32(TG3_CPMU_CLCK_ORIDE_ENABLE);
9025 		tw32(TG3_CPMU_CLCK_ORIDE_ENABLE,
9026 		     val & ~TG3_CPMU_MAC_ORIDE_ENABLE);
9027 		break;
9028 
9029 	case ASIC_REV_5719:
9030 	case ASIC_REV_5720:
9031 		val = tr32(TG3_CPMU_CLCK_ORIDE);
9032 		tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
9033 		break;
9034 
9035 	default:
9036 		return;
9037 	}
9038 }
9039 
9040 /* tp->lock is held. */
9041 static int tg3_chip_reset(struct tg3 *tp)
9042 	__releases(tp->lock)
9043 	__acquires(tp->lock)
9044 {
9045 	u32 val;
9046 	void (*write_op)(struct tg3 *, u32, u32);
9047 	int i, err;
9048 
9049 	if (!pci_device_is_present(tp->pdev))
9050 		return -ENODEV;
9051 
9052 	tg3_nvram_lock(tp);
9053 
9054 	tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
9055 
9056 	/* No matching tg3_nvram_unlock() after this because
9057 	 * chip reset below will undo the nvram lock.
9058 	 */
9059 	tp->nvram_lock_cnt = 0;
9060 
9061 	/* GRC_MISC_CFG core clock reset will clear the memory
9062 	 * enable bit in PCI register 4 and the MSI enable bit
9063 	 * on some chips, so we save relevant registers here.
9064 	 */
9065 	tg3_save_pci_state(tp);
9066 
9067 	if (tg3_asic_rev(tp) == ASIC_REV_5752 ||
9068 	    tg3_flag(tp, 5755_PLUS))
9069 		tw32(GRC_FASTBOOT_PC, 0);
9070 
9071 	/*
9072 	 * We must avoid the readl() that normally takes place.
9073 	 * It locks machines, causes machine checks, and other
9074 	 * fun things.  So, temporarily disable the 5701
9075 	 * hardware workaround, while we do the reset.
9076 	 */
9077 	write_op = tp->write32;
9078 	if (write_op == tg3_write_flush_reg32)
9079 		tp->write32 = tg3_write32;
9080 
9081 	/* Prevent the irq handler from reading or writing PCI registers
9082 	 * during chip reset when the memory enable bit in the PCI command
9083 	 * register may be cleared.  The chip does not generate interrupt
9084 	 * at this time, but the irq handler may still be called due to irq
9085 	 * sharing or irqpoll.
9086 	 */
9087 	tg3_flag_set(tp, CHIP_RESETTING);
9088 	for (i = 0; i < tp->irq_cnt; i++) {
9089 		struct tg3_napi *tnapi = &tp->napi[i];
9090 		if (tnapi->hw_status) {
9091 			tnapi->hw_status->status = 0;
9092 			tnapi->hw_status->status_tag = 0;
9093 		}
9094 		tnapi->last_tag = 0;
9095 		tnapi->last_irq_tag = 0;
9096 	}
9097 	smp_mb();
9098 
9099 	tg3_full_unlock(tp);
9100 
9101 	for (i = 0; i < tp->irq_cnt; i++)
9102 		synchronize_irq(tp->napi[i].irq_vec);
9103 
9104 	tg3_full_lock(tp, 0);
9105 
9106 	if (tg3_asic_rev(tp) == ASIC_REV_57780) {
9107 		val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
9108 		tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
9109 	}
9110 
9111 	/* do the reset */
9112 	val = GRC_MISC_CFG_CORECLK_RESET;
9113 
9114 	if (tg3_flag(tp, PCI_EXPRESS)) {
9115 		/* Force PCIe 1.0a mode */
9116 		if (tg3_asic_rev(tp) != ASIC_REV_5785 &&
9117 		    !tg3_flag(tp, 57765_PLUS) &&
9118 		    tr32(TG3_PCIE_PHY_TSTCTL) ==
9119 		    (TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM))
9120 			tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM);
9121 
9122 		if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0) {
9123 			tw32(GRC_MISC_CFG, (1 << 29));
9124 			val |= (1 << 29);
9125 		}
9126 	}
9127 
9128 	if (tg3_asic_rev(tp) == ASIC_REV_5906) {
9129 		tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
9130 		tw32(GRC_VCPU_EXT_CTRL,
9131 		     tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
9132 	}
9133 
9134 	/* Set the clock to the highest frequency to avoid timeouts. With link
9135 	 * aware mode, the clock speed could be slow and bootcode does not
9136 	 * complete within the expected time. Override the clock to allow the
9137 	 * bootcode to finish sooner and then restore it.
9138 	 */
9139 	tg3_override_clk(tp);
9140 
9141 	/* Manage gphy power for all CPMU absent PCIe devices. */
9142 	if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, CPMU_PRESENT))
9143 		val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
9144 
9145 	tw32(GRC_MISC_CFG, val);
9146 
9147 	/* restore 5701 hardware bug workaround write method */
9148 	tp->write32 = write_op;
9149 
9150 	/* Unfortunately, we have to delay before the PCI read back.
9151 	 * Some 575X chips even will not respond to a PCI cfg access
9152 	 * when the reset command is given to the chip.
9153 	 *
9154 	 * How do these hardware designers expect things to work
9155 	 * properly if the PCI write is posted for a long period
9156 	 * of time?  It is always necessary to have some method by
9157 	 * which a register read back can occur to push the write
9158 	 * out which does the reset.
9159 	 *
9160 	 * For most tg3 variants the trick below was working.
9161 	 * Ho hum...
9162 	 */
9163 	udelay(120);
9164 
9165 	/* Flush PCI posted writes.  The normal MMIO registers
9166 	 * are inaccessible at this time so this is the only
9167 	 * way to make this reliably (actually, this is no longer
9168 	 * the case, see above).  I tried to use indirect
9169 	 * register read/write but this upset some 5701 variants.
9170 	 */
9171 	pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
9172 
9173 	udelay(120);
9174 
9175 	if (tg3_flag(tp, PCI_EXPRESS) && pci_is_pcie(tp->pdev)) {
9176 		u16 val16;
9177 
9178 		if (tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A0) {
9179 			int j;
9180 			u32 cfg_val;
9181 
9182 			/* Wait for link training to complete.  */
9183 			for (j = 0; j < 5000; j++)
9184 				udelay(100);
9185 
9186 			pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
9187 			pci_write_config_dword(tp->pdev, 0xc4,
9188 					       cfg_val | (1 << 15));
9189 		}
9190 
9191 		/* Clear the "no snoop" and "relaxed ordering" bits. */
9192 		val16 = PCI_EXP_DEVCTL_RELAX_EN | PCI_EXP_DEVCTL_NOSNOOP_EN;
9193 		/*
9194 		 * Older PCIe devices only support the 128 byte
9195 		 * MPS setting.  Enforce the restriction.
9196 		 */
9197 		if (!tg3_flag(tp, CPMU_PRESENT))
9198 			val16 |= PCI_EXP_DEVCTL_PAYLOAD;
9199 		pcie_capability_clear_word(tp->pdev, PCI_EXP_DEVCTL, val16);
9200 
9201 		/* Clear error status */
9202 		pcie_capability_write_word(tp->pdev, PCI_EXP_DEVSTA,
9203 				      PCI_EXP_DEVSTA_CED |
9204 				      PCI_EXP_DEVSTA_NFED |
9205 				      PCI_EXP_DEVSTA_FED |
9206 				      PCI_EXP_DEVSTA_URD);
9207 	}
9208 
9209 	tg3_restore_pci_state(tp);
9210 
9211 	tg3_flag_clear(tp, CHIP_RESETTING);
9212 	tg3_flag_clear(tp, ERROR_PROCESSED);
9213 
9214 	val = 0;
9215 	if (tg3_flag(tp, 5780_CLASS))
9216 		val = tr32(MEMARB_MODE);
9217 	tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
9218 
9219 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A3) {
9220 		tg3_stop_fw(tp);
9221 		tw32(0x5000, 0x400);
9222 	}
9223 
9224 	if (tg3_flag(tp, IS_SSB_CORE)) {
9225 		/*
9226 		 * BCM4785: In order to avoid repercussions from using
9227 		 * potentially defective internal ROM, stop the Rx RISC CPU,
9228 		 * which is not required.
9229 		 */
9230 		tg3_stop_fw(tp);
9231 		tg3_halt_cpu(tp, RX_CPU_BASE);
9232 	}
9233 
9234 	err = tg3_poll_fw(tp);
9235 	if (err)
9236 		return err;
9237 
9238 	tw32(GRC_MODE, tp->grc_mode);
9239 
9240 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0) {
9241 		val = tr32(0xc4);
9242 
9243 		tw32(0xc4, val | (1 << 15));
9244 	}
9245 
9246 	if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
9247 	    tg3_asic_rev(tp) == ASIC_REV_5705) {
9248 		tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
9249 		if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0)
9250 			tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
9251 		tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
9252 	}
9253 
9254 	if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
9255 		tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
9256 		val = tp->mac_mode;
9257 	} else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
9258 		tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
9259 		val = tp->mac_mode;
9260 	} else
9261 		val = 0;
9262 
9263 	tw32_f(MAC_MODE, val);
9264 	udelay(40);
9265 
9266 	tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
9267 
9268 	tg3_mdio_start(tp);
9269 
9270 	if (tg3_flag(tp, PCI_EXPRESS) &&
9271 	    tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0 &&
9272 	    tg3_asic_rev(tp) != ASIC_REV_5785 &&
9273 	    !tg3_flag(tp, 57765_PLUS)) {
9274 		val = tr32(0x7c00);
9275 
9276 		tw32(0x7c00, val | (1 << 25));
9277 	}
9278 
9279 	tg3_restore_clk(tp);
9280 
9281 	/* Reprobe ASF enable state.  */
9282 	tg3_flag_clear(tp, ENABLE_ASF);
9283 	tp->phy_flags &= ~(TG3_PHYFLG_1G_ON_VAUX_OK |
9284 			   TG3_PHYFLG_KEEP_LINK_ON_PWRDN);
9285 
9286 	tg3_flag_clear(tp, ASF_NEW_HANDSHAKE);
9287 	tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
9288 	if (val == NIC_SRAM_DATA_SIG_MAGIC) {
9289 		u32 nic_cfg;
9290 
9291 		tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
9292 		if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
9293 			tg3_flag_set(tp, ENABLE_ASF);
9294 			tp->last_event_jiffies = jiffies;
9295 			if (tg3_flag(tp, 5750_PLUS))
9296 				tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
9297 
9298 			tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &nic_cfg);
9299 			if (nic_cfg & NIC_SRAM_1G_ON_VAUX_OK)
9300 				tp->phy_flags |= TG3_PHYFLG_1G_ON_VAUX_OK;
9301 			if (nic_cfg & NIC_SRAM_LNK_FLAP_AVOID)
9302 				tp->phy_flags |= TG3_PHYFLG_KEEP_LINK_ON_PWRDN;
9303 		}
9304 	}
9305 
9306 	return 0;
9307 }
9308 
9309 static void tg3_get_nstats(struct tg3 *, struct rtnl_link_stats64 *);
9310 static void tg3_get_estats(struct tg3 *, struct tg3_ethtool_stats *);
9311 static void __tg3_set_rx_mode(struct net_device *);
9312 
9313 /* tp->lock is held. */
9314 static int tg3_halt(struct tg3 *tp, int kind, bool silent)
9315 {
9316 	int err;
9317 
9318 	tg3_stop_fw(tp);
9319 
9320 	tg3_write_sig_pre_reset(tp, kind);
9321 
9322 	tg3_abort_hw(tp, silent);
9323 	err = tg3_chip_reset(tp);
9324 
9325 	__tg3_set_mac_addr(tp, false);
9326 
9327 	tg3_write_sig_legacy(tp, kind);
9328 	tg3_write_sig_post_reset(tp, kind);
9329 
9330 	if (tp->hw_stats) {
9331 		/* Save the stats across chip resets... */
9332 		tg3_get_nstats(tp, &tp->net_stats_prev);
9333 		tg3_get_estats(tp, &tp->estats_prev);
9334 
9335 		/* And make sure the next sample is new data */
9336 		memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
9337 	}
9338 
9339 	return err;
9340 }
9341 
9342 static int tg3_set_mac_addr(struct net_device *dev, void *p)
9343 {
9344 	struct tg3 *tp = netdev_priv(dev);
9345 	struct sockaddr *addr = p;
9346 	int err = 0;
9347 	bool skip_mac_1 = false;
9348 
9349 	if (!is_valid_ether_addr(addr->sa_data))
9350 		return -EADDRNOTAVAIL;
9351 
9352 	memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
9353 
9354 	if (!netif_running(dev))
9355 		return 0;
9356 
9357 	if (tg3_flag(tp, ENABLE_ASF)) {
9358 		u32 addr0_high, addr0_low, addr1_high, addr1_low;
9359 
9360 		addr0_high = tr32(MAC_ADDR_0_HIGH);
9361 		addr0_low = tr32(MAC_ADDR_0_LOW);
9362 		addr1_high = tr32(MAC_ADDR_1_HIGH);
9363 		addr1_low = tr32(MAC_ADDR_1_LOW);
9364 
9365 		/* Skip MAC addr 1 if ASF is using it. */
9366 		if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
9367 		    !(addr1_high == 0 && addr1_low == 0))
9368 			skip_mac_1 = true;
9369 	}
9370 	spin_lock_bh(&tp->lock);
9371 	__tg3_set_mac_addr(tp, skip_mac_1);
9372 	__tg3_set_rx_mode(dev);
9373 	spin_unlock_bh(&tp->lock);
9374 
9375 	return err;
9376 }
9377 
9378 /* tp->lock is held. */
9379 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
9380 			   dma_addr_t mapping, u32 maxlen_flags,
9381 			   u32 nic_addr)
9382 {
9383 	tg3_write_mem(tp,
9384 		      (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
9385 		      ((u64) mapping >> 32));
9386 	tg3_write_mem(tp,
9387 		      (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
9388 		      ((u64) mapping & 0xffffffff));
9389 	tg3_write_mem(tp,
9390 		      (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
9391 		       maxlen_flags);
9392 
9393 	if (!tg3_flag(tp, 5705_PLUS))
9394 		tg3_write_mem(tp,
9395 			      (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
9396 			      nic_addr);
9397 }
9398 
9399 
9400 static void tg3_coal_tx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
9401 {
9402 	int i = 0;
9403 
9404 	if (!tg3_flag(tp, ENABLE_TSS)) {
9405 		tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
9406 		tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
9407 		tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
9408 	} else {
9409 		tw32(HOSTCC_TXCOL_TICKS, 0);
9410 		tw32(HOSTCC_TXMAX_FRAMES, 0);
9411 		tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
9412 
9413 		for (; i < tp->txq_cnt; i++) {
9414 			u32 reg;
9415 
9416 			reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18;
9417 			tw32(reg, ec->tx_coalesce_usecs);
9418 			reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18;
9419 			tw32(reg, ec->tx_max_coalesced_frames);
9420 			reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18;
9421 			tw32(reg, ec->tx_max_coalesced_frames_irq);
9422 		}
9423 	}
9424 
9425 	for (; i < tp->irq_max - 1; i++) {
9426 		tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0);
9427 		tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0);
9428 		tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
9429 	}
9430 }
9431 
9432 static void tg3_coal_rx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
9433 {
9434 	int i = 0;
9435 	u32 limit = tp->rxq_cnt;
9436 
9437 	if (!tg3_flag(tp, ENABLE_RSS)) {
9438 		tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
9439 		tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
9440 		tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
9441 		limit--;
9442 	} else {
9443 		tw32(HOSTCC_RXCOL_TICKS, 0);
9444 		tw32(HOSTCC_RXMAX_FRAMES, 0);
9445 		tw32(HOSTCC_RXCOAL_MAXF_INT, 0);
9446 	}
9447 
9448 	for (; i < limit; i++) {
9449 		u32 reg;
9450 
9451 		reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18;
9452 		tw32(reg, ec->rx_coalesce_usecs);
9453 		reg = HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18;
9454 		tw32(reg, ec->rx_max_coalesced_frames);
9455 		reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18;
9456 		tw32(reg, ec->rx_max_coalesced_frames_irq);
9457 	}
9458 
9459 	for (; i < tp->irq_max - 1; i++) {
9460 		tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0);
9461 		tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0);
9462 		tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
9463 	}
9464 }
9465 
9466 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
9467 {
9468 	tg3_coal_tx_init(tp, ec);
9469 	tg3_coal_rx_init(tp, ec);
9470 
9471 	if (!tg3_flag(tp, 5705_PLUS)) {
9472 		u32 val = ec->stats_block_coalesce_usecs;
9473 
9474 		tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
9475 		tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
9476 
9477 		if (!tp->link_up)
9478 			val = 0;
9479 
9480 		tw32(HOSTCC_STAT_COAL_TICKS, val);
9481 	}
9482 }
9483 
9484 /* tp->lock is held. */
9485 static void tg3_tx_rcbs_disable(struct tg3 *tp)
9486 {
9487 	u32 txrcb, limit;
9488 
9489 	/* Disable all transmit rings but the first. */
9490 	if (!tg3_flag(tp, 5705_PLUS))
9491 		limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16;
9492 	else if (tg3_flag(tp, 5717_PLUS))
9493 		limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4;
9494 	else if (tg3_flag(tp, 57765_CLASS) ||
9495 		 tg3_asic_rev(tp) == ASIC_REV_5762)
9496 		limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2;
9497 	else
9498 		limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
9499 
9500 	for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
9501 	     txrcb < limit; txrcb += TG3_BDINFO_SIZE)
9502 		tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS,
9503 			      BDINFO_FLAGS_DISABLED);
9504 }
9505 
9506 /* tp->lock is held. */
9507 static void tg3_tx_rcbs_init(struct tg3 *tp)
9508 {
9509 	int i = 0;
9510 	u32 txrcb = NIC_SRAM_SEND_RCB;
9511 
9512 	if (tg3_flag(tp, ENABLE_TSS))
9513 		i++;
9514 
9515 	for (; i < tp->irq_max; i++, txrcb += TG3_BDINFO_SIZE) {
9516 		struct tg3_napi *tnapi = &tp->napi[i];
9517 
9518 		if (!tnapi->tx_ring)
9519 			continue;
9520 
9521 		tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
9522 			       (TG3_TX_RING_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT),
9523 			       NIC_SRAM_TX_BUFFER_DESC);
9524 	}
9525 }
9526 
9527 /* tp->lock is held. */
9528 static void tg3_rx_ret_rcbs_disable(struct tg3 *tp)
9529 {
9530 	u32 rxrcb, limit;
9531 
9532 	/* Disable all receive return rings but the first. */
9533 	if (tg3_flag(tp, 5717_PLUS))
9534 		limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17;
9535 	else if (!tg3_flag(tp, 5705_PLUS))
9536 		limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
9537 	else if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
9538 		 tg3_asic_rev(tp) == ASIC_REV_5762 ||
9539 		 tg3_flag(tp, 57765_CLASS))
9540 		limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4;
9541 	else
9542 		limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
9543 
9544 	for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
9545 	     rxrcb < limit; rxrcb += TG3_BDINFO_SIZE)
9546 		tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS,
9547 			      BDINFO_FLAGS_DISABLED);
9548 }
9549 
9550 /* tp->lock is held. */
9551 static void tg3_rx_ret_rcbs_init(struct tg3 *tp)
9552 {
9553 	int i = 0;
9554 	u32 rxrcb = NIC_SRAM_RCV_RET_RCB;
9555 
9556 	if (tg3_flag(tp, ENABLE_RSS))
9557 		i++;
9558 
9559 	for (; i < tp->irq_max; i++, rxrcb += TG3_BDINFO_SIZE) {
9560 		struct tg3_napi *tnapi = &tp->napi[i];
9561 
9562 		if (!tnapi->rx_rcb)
9563 			continue;
9564 
9565 		tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
9566 			       (tp->rx_ret_ring_mask + 1) <<
9567 				BDINFO_FLAGS_MAXLEN_SHIFT, 0);
9568 	}
9569 }
9570 
9571 /* tp->lock is held. */
9572 static void tg3_rings_reset(struct tg3 *tp)
9573 {
9574 	int i;
9575 	u32 stblk;
9576 	struct tg3_napi *tnapi = &tp->napi[0];
9577 
9578 	tg3_tx_rcbs_disable(tp);
9579 
9580 	tg3_rx_ret_rcbs_disable(tp);
9581 
9582 	/* Disable interrupts */
9583 	tw32_mailbox_f(tp->napi[0].int_mbox, 1);
9584 	tp->napi[0].chk_msi_cnt = 0;
9585 	tp->napi[0].last_rx_cons = 0;
9586 	tp->napi[0].last_tx_cons = 0;
9587 
9588 	/* Zero mailbox registers. */
9589 	if (tg3_flag(tp, SUPPORT_MSIX)) {
9590 		for (i = 1; i < tp->irq_max; i++) {
9591 			tp->napi[i].tx_prod = 0;
9592 			tp->napi[i].tx_cons = 0;
9593 			if (tg3_flag(tp, ENABLE_TSS))
9594 				tw32_mailbox(tp->napi[i].prodmbox, 0);
9595 			tw32_rx_mbox(tp->napi[i].consmbox, 0);
9596 			tw32_mailbox_f(tp->napi[i].int_mbox, 1);
9597 			tp->napi[i].chk_msi_cnt = 0;
9598 			tp->napi[i].last_rx_cons = 0;
9599 			tp->napi[i].last_tx_cons = 0;
9600 		}
9601 		if (!tg3_flag(tp, ENABLE_TSS))
9602 			tw32_mailbox(tp->napi[0].prodmbox, 0);
9603 	} else {
9604 		tp->napi[0].tx_prod = 0;
9605 		tp->napi[0].tx_cons = 0;
9606 		tw32_mailbox(tp->napi[0].prodmbox, 0);
9607 		tw32_rx_mbox(tp->napi[0].consmbox, 0);
9608 	}
9609 
9610 	/* Make sure the NIC-based send BD rings are disabled. */
9611 	if (!tg3_flag(tp, 5705_PLUS)) {
9612 		u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW;
9613 		for (i = 0; i < 16; i++)
9614 			tw32_tx_mbox(mbox + i * 8, 0);
9615 	}
9616 
9617 	/* Clear status block in ram. */
9618 	memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
9619 
9620 	/* Set status block DMA address */
9621 	tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
9622 	     ((u64) tnapi->status_mapping >> 32));
9623 	tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
9624 	     ((u64) tnapi->status_mapping & 0xffffffff));
9625 
9626 	stblk = HOSTCC_STATBLCK_RING1;
9627 
9628 	for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) {
9629 		u64 mapping = (u64)tnapi->status_mapping;
9630 		tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32);
9631 		tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff);
9632 		stblk += 8;
9633 
9634 		/* Clear status block in ram. */
9635 		memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
9636 	}
9637 
9638 	tg3_tx_rcbs_init(tp);
9639 	tg3_rx_ret_rcbs_init(tp);
9640 }
9641 
9642 static void tg3_setup_rxbd_thresholds(struct tg3 *tp)
9643 {
9644 	u32 val, bdcache_maxcnt, host_rep_thresh, nic_rep_thresh;
9645 
9646 	if (!tg3_flag(tp, 5750_PLUS) ||
9647 	    tg3_flag(tp, 5780_CLASS) ||
9648 	    tg3_asic_rev(tp) == ASIC_REV_5750 ||
9649 	    tg3_asic_rev(tp) == ASIC_REV_5752 ||
9650 	    tg3_flag(tp, 57765_PLUS))
9651 		bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5700;
9652 	else if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
9653 		 tg3_asic_rev(tp) == ASIC_REV_5787)
9654 		bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5755;
9655 	else
9656 		bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5906;
9657 
9658 	nic_rep_thresh = min(bdcache_maxcnt / 2, tp->rx_std_max_post);
9659 	host_rep_thresh = max_t(u32, tp->rx_pending / 8, 1);
9660 
9661 	val = min(nic_rep_thresh, host_rep_thresh);
9662 	tw32(RCVBDI_STD_THRESH, val);
9663 
9664 	if (tg3_flag(tp, 57765_PLUS))
9665 		tw32(STD_REPLENISH_LWM, bdcache_maxcnt);
9666 
9667 	if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
9668 		return;
9669 
9670 	bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700;
9671 
9672 	host_rep_thresh = max_t(u32, tp->rx_jumbo_pending / 8, 1);
9673 
9674 	val = min(bdcache_maxcnt / 2, host_rep_thresh);
9675 	tw32(RCVBDI_JUMBO_THRESH, val);
9676 
9677 	if (tg3_flag(tp, 57765_PLUS))
9678 		tw32(JMB_REPLENISH_LWM, bdcache_maxcnt);
9679 }
9680 
9681 static inline u32 calc_crc(unsigned char *buf, int len)
9682 {
9683 	u32 reg;
9684 	u32 tmp;
9685 	int j, k;
9686 
9687 	reg = 0xffffffff;
9688 
9689 	for (j = 0; j < len; j++) {
9690 		reg ^= buf[j];
9691 
9692 		for (k = 0; k < 8; k++) {
9693 			tmp = reg & 0x01;
9694 
9695 			reg >>= 1;
9696 
9697 			if (tmp)
9698 				reg ^= 0xedb88320;
9699 		}
9700 	}
9701 
9702 	return ~reg;
9703 }
9704 
9705 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
9706 {
9707 	/* accept or reject all multicast frames */
9708 	tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
9709 	tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
9710 	tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
9711 	tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
9712 }
9713 
9714 static void __tg3_set_rx_mode(struct net_device *dev)
9715 {
9716 	struct tg3 *tp = netdev_priv(dev);
9717 	u32 rx_mode;
9718 
9719 	rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
9720 				  RX_MODE_KEEP_VLAN_TAG);
9721 
9722 #if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE)
9723 	/* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
9724 	 * flag clear.
9725 	 */
9726 	if (!tg3_flag(tp, ENABLE_ASF))
9727 		rx_mode |= RX_MODE_KEEP_VLAN_TAG;
9728 #endif
9729 
9730 	if (dev->flags & IFF_PROMISC) {
9731 		/* Promiscuous mode. */
9732 		rx_mode |= RX_MODE_PROMISC;
9733 	} else if (dev->flags & IFF_ALLMULTI) {
9734 		/* Accept all multicast. */
9735 		tg3_set_multi(tp, 1);
9736 	} else if (netdev_mc_empty(dev)) {
9737 		/* Reject all multicast. */
9738 		tg3_set_multi(tp, 0);
9739 	} else {
9740 		/* Accept one or more multicast(s). */
9741 		struct netdev_hw_addr *ha;
9742 		u32 mc_filter[4] = { 0, };
9743 		u32 regidx;
9744 		u32 bit;
9745 		u32 crc;
9746 
9747 		netdev_for_each_mc_addr(ha, dev) {
9748 			crc = calc_crc(ha->addr, ETH_ALEN);
9749 			bit = ~crc & 0x7f;
9750 			regidx = (bit & 0x60) >> 5;
9751 			bit &= 0x1f;
9752 			mc_filter[regidx] |= (1 << bit);
9753 		}
9754 
9755 		tw32(MAC_HASH_REG_0, mc_filter[0]);
9756 		tw32(MAC_HASH_REG_1, mc_filter[1]);
9757 		tw32(MAC_HASH_REG_2, mc_filter[2]);
9758 		tw32(MAC_HASH_REG_3, mc_filter[3]);
9759 	}
9760 
9761 	if (netdev_uc_count(dev) > TG3_MAX_UCAST_ADDR(tp)) {
9762 		rx_mode |= RX_MODE_PROMISC;
9763 	} else if (!(dev->flags & IFF_PROMISC)) {
9764 		/* Add all entries into to the mac addr filter list */
9765 		int i = 0;
9766 		struct netdev_hw_addr *ha;
9767 
9768 		netdev_for_each_uc_addr(ha, dev) {
9769 			__tg3_set_one_mac_addr(tp, ha->addr,
9770 					       i + TG3_UCAST_ADDR_IDX(tp));
9771 			i++;
9772 		}
9773 	}
9774 
9775 	if (rx_mode != tp->rx_mode) {
9776 		tp->rx_mode = rx_mode;
9777 		tw32_f(MAC_RX_MODE, rx_mode);
9778 		udelay(10);
9779 	}
9780 }
9781 
9782 static void tg3_rss_init_dflt_indir_tbl(struct tg3 *tp, u32 qcnt)
9783 {
9784 	int i;
9785 
9786 	for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
9787 		tp->rss_ind_tbl[i] = ethtool_rxfh_indir_default(i, qcnt);
9788 }
9789 
9790 static void tg3_rss_check_indir_tbl(struct tg3 *tp)
9791 {
9792 	int i;
9793 
9794 	if (!tg3_flag(tp, SUPPORT_MSIX))
9795 		return;
9796 
9797 	if (tp->rxq_cnt == 1) {
9798 		memset(&tp->rss_ind_tbl[0], 0, sizeof(tp->rss_ind_tbl));
9799 		return;
9800 	}
9801 
9802 	/* Validate table against current IRQ count */
9803 	for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) {
9804 		if (tp->rss_ind_tbl[i] >= tp->rxq_cnt)
9805 			break;
9806 	}
9807 
9808 	if (i != TG3_RSS_INDIR_TBL_SIZE)
9809 		tg3_rss_init_dflt_indir_tbl(tp, tp->rxq_cnt);
9810 }
9811 
9812 static void tg3_rss_write_indir_tbl(struct tg3 *tp)
9813 {
9814 	int i = 0;
9815 	u32 reg = MAC_RSS_INDIR_TBL_0;
9816 
9817 	while (i < TG3_RSS_INDIR_TBL_SIZE) {
9818 		u32 val = tp->rss_ind_tbl[i];
9819 		i++;
9820 		for (; i % 8; i++) {
9821 			val <<= 4;
9822 			val |= tp->rss_ind_tbl[i];
9823 		}
9824 		tw32(reg, val);
9825 		reg += 4;
9826 	}
9827 }
9828 
9829 static inline u32 tg3_lso_rd_dma_workaround_bit(struct tg3 *tp)
9830 {
9831 	if (tg3_asic_rev(tp) == ASIC_REV_5719)
9832 		return TG3_LSO_RD_DMA_TX_LENGTH_WA_5719;
9833 	else
9834 		return TG3_LSO_RD_DMA_TX_LENGTH_WA_5720;
9835 }
9836 
9837 /* tp->lock is held. */
9838 static int tg3_reset_hw(struct tg3 *tp, bool reset_phy)
9839 {
9840 	u32 val, rdmac_mode;
9841 	int i, err, limit;
9842 	struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
9843 
9844 	tg3_disable_ints(tp);
9845 
9846 	tg3_stop_fw(tp);
9847 
9848 	tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
9849 
9850 	if (tg3_flag(tp, INIT_COMPLETE))
9851 		tg3_abort_hw(tp, 1);
9852 
9853 	if ((tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
9854 	    !(tp->phy_flags & TG3_PHYFLG_USER_CONFIGURED)) {
9855 		tg3_phy_pull_config(tp);
9856 		tg3_eee_pull_config(tp, NULL);
9857 		tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
9858 	}
9859 
9860 	/* Enable MAC control of LPI */
9861 	if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
9862 		tg3_setup_eee(tp);
9863 
9864 	if (reset_phy)
9865 		tg3_phy_reset(tp);
9866 
9867 	err = tg3_chip_reset(tp);
9868 	if (err)
9869 		return err;
9870 
9871 	tg3_write_sig_legacy(tp, RESET_KIND_INIT);
9872 
9873 	if (tg3_chip_rev(tp) == CHIPREV_5784_AX) {
9874 		val = tr32(TG3_CPMU_CTRL);
9875 		val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
9876 		tw32(TG3_CPMU_CTRL, val);
9877 
9878 		val = tr32(TG3_CPMU_LSPD_10MB_CLK);
9879 		val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
9880 		val |= CPMU_LSPD_10MB_MACCLK_6_25;
9881 		tw32(TG3_CPMU_LSPD_10MB_CLK, val);
9882 
9883 		val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
9884 		val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
9885 		val |= CPMU_LNK_AWARE_MACCLK_6_25;
9886 		tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
9887 
9888 		val = tr32(TG3_CPMU_HST_ACC);
9889 		val &= ~CPMU_HST_ACC_MACCLK_MASK;
9890 		val |= CPMU_HST_ACC_MACCLK_6_25;
9891 		tw32(TG3_CPMU_HST_ACC, val);
9892 	}
9893 
9894 	if (tg3_asic_rev(tp) == ASIC_REV_57780) {
9895 		val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK;
9896 		val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN |
9897 		       PCIE_PWR_MGMT_L1_THRESH_4MS;
9898 		tw32(PCIE_PWR_MGMT_THRESH, val);
9899 
9900 		val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK;
9901 		tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS);
9902 
9903 		tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR);
9904 
9905 		val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
9906 		tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
9907 	}
9908 
9909 	if (tg3_flag(tp, L1PLLPD_EN)) {
9910 		u32 grc_mode = tr32(GRC_MODE);
9911 
9912 		/* Access the lower 1K of PL PCIE block registers. */
9913 		val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9914 		tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
9915 
9916 		val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1);
9917 		tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1,
9918 		     val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN);
9919 
9920 		tw32(GRC_MODE, grc_mode);
9921 	}
9922 
9923 	if (tg3_flag(tp, 57765_CLASS)) {
9924 		if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) {
9925 			u32 grc_mode = tr32(GRC_MODE);
9926 
9927 			/* Access the lower 1K of PL PCIE block registers. */
9928 			val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9929 			tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
9930 
9931 			val = tr32(TG3_PCIE_TLDLPL_PORT +
9932 				   TG3_PCIE_PL_LO_PHYCTL5);
9933 			tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5,
9934 			     val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ);
9935 
9936 			tw32(GRC_MODE, grc_mode);
9937 		}
9938 
9939 		if (tg3_chip_rev(tp) != CHIPREV_57765_AX) {
9940 			u32 grc_mode;
9941 
9942 			/* Fix transmit hangs */
9943 			val = tr32(TG3_CPMU_PADRNG_CTL);
9944 			val |= TG3_CPMU_PADRNG_CTL_RDIV2;
9945 			tw32(TG3_CPMU_PADRNG_CTL, val);
9946 
9947 			grc_mode = tr32(GRC_MODE);
9948 
9949 			/* Access the lower 1K of DL PCIE block registers. */
9950 			val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9951 			tw32(GRC_MODE, val | GRC_MODE_PCIE_DL_SEL);
9952 
9953 			val = tr32(TG3_PCIE_TLDLPL_PORT +
9954 				   TG3_PCIE_DL_LO_FTSMAX);
9955 			val &= ~TG3_PCIE_DL_LO_FTSMAX_MSK;
9956 			tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_DL_LO_FTSMAX,
9957 			     val | TG3_PCIE_DL_LO_FTSMAX_VAL);
9958 
9959 			tw32(GRC_MODE, grc_mode);
9960 		}
9961 
9962 		val = tr32(TG3_CPMU_LSPD_10MB_CLK);
9963 		val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
9964 		val |= CPMU_LSPD_10MB_MACCLK_6_25;
9965 		tw32(TG3_CPMU_LSPD_10MB_CLK, val);
9966 	}
9967 
9968 	/* This works around an issue with Athlon chipsets on
9969 	 * B3 tigon3 silicon.  This bit has no effect on any
9970 	 * other revision.  But do not set this on PCI Express
9971 	 * chips and don't even touch the clocks if the CPMU is present.
9972 	 */
9973 	if (!tg3_flag(tp, CPMU_PRESENT)) {
9974 		if (!tg3_flag(tp, PCI_EXPRESS))
9975 			tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
9976 		tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
9977 	}
9978 
9979 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0 &&
9980 	    tg3_flag(tp, PCIX_MODE)) {
9981 		val = tr32(TG3PCI_PCISTATE);
9982 		val |= PCISTATE_RETRY_SAME_DMA;
9983 		tw32(TG3PCI_PCISTATE, val);
9984 	}
9985 
9986 	if (tg3_flag(tp, ENABLE_APE)) {
9987 		/* Allow reads and writes to the
9988 		 * APE register and memory space.
9989 		 */
9990 		val = tr32(TG3PCI_PCISTATE);
9991 		val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
9992 		       PCISTATE_ALLOW_APE_SHMEM_WR |
9993 		       PCISTATE_ALLOW_APE_PSPACE_WR;
9994 		tw32(TG3PCI_PCISTATE, val);
9995 	}
9996 
9997 	if (tg3_chip_rev(tp) == CHIPREV_5704_BX) {
9998 		/* Enable some hw fixes.  */
9999 		val = tr32(TG3PCI_MSI_DATA);
10000 		val |= (1 << 26) | (1 << 28) | (1 << 29);
10001 		tw32(TG3PCI_MSI_DATA, val);
10002 	}
10003 
10004 	/* Descriptor ring init may make accesses to the
10005 	 * NIC SRAM area to setup the TX descriptors, so we
10006 	 * can only do this after the hardware has been
10007 	 * successfully reset.
10008 	 */
10009 	err = tg3_init_rings(tp);
10010 	if (err)
10011 		return err;
10012 
10013 	if (tg3_flag(tp, 57765_PLUS)) {
10014 		val = tr32(TG3PCI_DMA_RW_CTRL) &
10015 		      ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
10016 		if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0)
10017 			val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK;
10018 		if (!tg3_flag(tp, 57765_CLASS) &&
10019 		    tg3_asic_rev(tp) != ASIC_REV_5717 &&
10020 		    tg3_asic_rev(tp) != ASIC_REV_5762)
10021 			val |= DMA_RWCTRL_TAGGED_STAT_WA;
10022 		tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
10023 	} else if (tg3_asic_rev(tp) != ASIC_REV_5784 &&
10024 		   tg3_asic_rev(tp) != ASIC_REV_5761) {
10025 		/* This value is determined during the probe time DMA
10026 		 * engine test, tg3_test_dma.
10027 		 */
10028 		tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
10029 	}
10030 
10031 	tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
10032 			  GRC_MODE_4X_NIC_SEND_RINGS |
10033 			  GRC_MODE_NO_TX_PHDR_CSUM |
10034 			  GRC_MODE_NO_RX_PHDR_CSUM);
10035 	tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
10036 
10037 	/* Pseudo-header checksum is done by hardware logic and not
10038 	 * the offload processers, so make the chip do the pseudo-
10039 	 * header checksums on receive.  For transmit it is more
10040 	 * convenient to do the pseudo-header checksum in software
10041 	 * as Linux does that on transmit for us in all cases.
10042 	 */
10043 	tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
10044 
10045 	val = GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP;
10046 	if (tp->rxptpctl)
10047 		tw32(TG3_RX_PTP_CTL,
10048 		     tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK);
10049 
10050 	if (tg3_flag(tp, PTP_CAPABLE))
10051 		val |= GRC_MODE_TIME_SYNC_ENABLE;
10052 
10053 	tw32(GRC_MODE, tp->grc_mode | val);
10054 
10055 	/* Setup the timer prescalar register.  Clock is always 66Mhz. */
10056 	val = tr32(GRC_MISC_CFG);
10057 	val &= ~0xff;
10058 	val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
10059 	tw32(GRC_MISC_CFG, val);
10060 
10061 	/* Initialize MBUF/DESC pool. */
10062 	if (tg3_flag(tp, 5750_PLUS)) {
10063 		/* Do nothing.  */
10064 	} else if (tg3_asic_rev(tp) != ASIC_REV_5705) {
10065 		tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
10066 		if (tg3_asic_rev(tp) == ASIC_REV_5704)
10067 			tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
10068 		else
10069 			tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
10070 		tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
10071 		tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
10072 	} else if (tg3_flag(tp, TSO_CAPABLE)) {
10073 		int fw_len;
10074 
10075 		fw_len = tp->fw_len;
10076 		fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
10077 		tw32(BUFMGR_MB_POOL_ADDR,
10078 		     NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
10079 		tw32(BUFMGR_MB_POOL_SIZE,
10080 		     NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
10081 	}
10082 
10083 	if (tp->dev->mtu <= ETH_DATA_LEN) {
10084 		tw32(BUFMGR_MB_RDMA_LOW_WATER,
10085 		     tp->bufmgr_config.mbuf_read_dma_low_water);
10086 		tw32(BUFMGR_MB_MACRX_LOW_WATER,
10087 		     tp->bufmgr_config.mbuf_mac_rx_low_water);
10088 		tw32(BUFMGR_MB_HIGH_WATER,
10089 		     tp->bufmgr_config.mbuf_high_water);
10090 	} else {
10091 		tw32(BUFMGR_MB_RDMA_LOW_WATER,
10092 		     tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
10093 		tw32(BUFMGR_MB_MACRX_LOW_WATER,
10094 		     tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
10095 		tw32(BUFMGR_MB_HIGH_WATER,
10096 		     tp->bufmgr_config.mbuf_high_water_jumbo);
10097 	}
10098 	tw32(BUFMGR_DMA_LOW_WATER,
10099 	     tp->bufmgr_config.dma_low_water);
10100 	tw32(BUFMGR_DMA_HIGH_WATER,
10101 	     tp->bufmgr_config.dma_high_water);
10102 
10103 	val = BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE;
10104 	if (tg3_asic_rev(tp) == ASIC_REV_5719)
10105 		val |= BUFMGR_MODE_NO_TX_UNDERRUN;
10106 	if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
10107 	    tg3_asic_rev(tp) == ASIC_REV_5762 ||
10108 	    tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
10109 	    tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0)
10110 		val |= BUFMGR_MODE_MBLOW_ATTN_ENAB;
10111 	tw32(BUFMGR_MODE, val);
10112 	for (i = 0; i < 2000; i++) {
10113 		if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
10114 			break;
10115 		udelay(10);
10116 	}
10117 	if (i >= 2000) {
10118 		netdev_err(tp->dev, "%s cannot enable BUFMGR\n", __func__);
10119 		return -ENODEV;
10120 	}
10121 
10122 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5906_A1)
10123 		tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
10124 
10125 	tg3_setup_rxbd_thresholds(tp);
10126 
10127 	/* Initialize TG3_BDINFO's at:
10128 	 *  RCVDBDI_STD_BD:	standard eth size rx ring
10129 	 *  RCVDBDI_JUMBO_BD:	jumbo frame rx ring
10130 	 *  RCVDBDI_MINI_BD:	small frame rx ring (??? does not work)
10131 	 *
10132 	 * like so:
10133 	 *  TG3_BDINFO_HOST_ADDR:	high/low parts of DMA address of ring
10134 	 *  TG3_BDINFO_MAXLEN_FLAGS:	(rx max buffer size << 16) |
10135 	 *                              ring attribute flags
10136 	 *  TG3_BDINFO_NIC_ADDR:	location of descriptors in nic SRAM
10137 	 *
10138 	 * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
10139 	 * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
10140 	 *
10141 	 * The size of each ring is fixed in the firmware, but the location is
10142 	 * configurable.
10143 	 */
10144 	tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
10145 	     ((u64) tpr->rx_std_mapping >> 32));
10146 	tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
10147 	     ((u64) tpr->rx_std_mapping & 0xffffffff));
10148 	if (!tg3_flag(tp, 5717_PLUS))
10149 		tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
10150 		     NIC_SRAM_RX_BUFFER_DESC);
10151 
10152 	/* Disable the mini ring */
10153 	if (!tg3_flag(tp, 5705_PLUS))
10154 		tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
10155 		     BDINFO_FLAGS_DISABLED);
10156 
10157 	/* Program the jumbo buffer descriptor ring control
10158 	 * blocks on those devices that have them.
10159 	 */
10160 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
10161 	    (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))) {
10162 
10163 		if (tg3_flag(tp, JUMBO_RING_ENABLE)) {
10164 			tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
10165 			     ((u64) tpr->rx_jmb_mapping >> 32));
10166 			tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
10167 			     ((u64) tpr->rx_jmb_mapping & 0xffffffff));
10168 			val = TG3_RX_JMB_RING_SIZE(tp) <<
10169 			      BDINFO_FLAGS_MAXLEN_SHIFT;
10170 			tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
10171 			     val | BDINFO_FLAGS_USE_EXT_RECV);
10172 			if (!tg3_flag(tp, USE_JUMBO_BDFLAG) ||
10173 			    tg3_flag(tp, 57765_CLASS) ||
10174 			    tg3_asic_rev(tp) == ASIC_REV_5762)
10175 				tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
10176 				     NIC_SRAM_RX_JUMBO_BUFFER_DESC);
10177 		} else {
10178 			tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
10179 			     BDINFO_FLAGS_DISABLED);
10180 		}
10181 
10182 		if (tg3_flag(tp, 57765_PLUS)) {
10183 			val = TG3_RX_STD_RING_SIZE(tp);
10184 			val <<= BDINFO_FLAGS_MAXLEN_SHIFT;
10185 			val |= (TG3_RX_STD_DMA_SZ << 2);
10186 		} else
10187 			val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT;
10188 	} else
10189 		val = TG3_RX_STD_MAX_SIZE_5700 << BDINFO_FLAGS_MAXLEN_SHIFT;
10190 
10191 	tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val);
10192 
10193 	tpr->rx_std_prod_idx = tp->rx_pending;
10194 	tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx);
10195 
10196 	tpr->rx_jmb_prod_idx =
10197 		tg3_flag(tp, JUMBO_RING_ENABLE) ? tp->rx_jumbo_pending : 0;
10198 	tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx);
10199 
10200 	tg3_rings_reset(tp);
10201 
10202 	/* Initialize MAC address and backoff seed. */
10203 	__tg3_set_mac_addr(tp, false);
10204 
10205 	/* MTU + ethernet header + FCS + optional VLAN tag */
10206 	tw32(MAC_RX_MTU_SIZE,
10207 	     tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
10208 
10209 	/* The slot time is changed by tg3_setup_phy if we
10210 	 * run at gigabit with half duplex.
10211 	 */
10212 	val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
10213 	      (6 << TX_LENGTHS_IPG_SHIFT) |
10214 	      (32 << TX_LENGTHS_SLOT_TIME_SHIFT);
10215 
10216 	if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
10217 	    tg3_asic_rev(tp) == ASIC_REV_5762)
10218 		val |= tr32(MAC_TX_LENGTHS) &
10219 		       (TX_LENGTHS_JMB_FRM_LEN_MSK |
10220 			TX_LENGTHS_CNT_DWN_VAL_MSK);
10221 
10222 	tw32(MAC_TX_LENGTHS, val);
10223 
10224 	/* Receive rules. */
10225 	tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
10226 	tw32(RCVLPC_CONFIG, 0x0181);
10227 
10228 	/* Calculate RDMAC_MODE setting early, we need it to determine
10229 	 * the RCVLPC_STATE_ENABLE mask.
10230 	 */
10231 	rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
10232 		      RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
10233 		      RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
10234 		      RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
10235 		      RDMAC_MODE_LNGREAD_ENAB);
10236 
10237 	if (tg3_asic_rev(tp) == ASIC_REV_5717)
10238 		rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS;
10239 
10240 	if (tg3_asic_rev(tp) == ASIC_REV_5784 ||
10241 	    tg3_asic_rev(tp) == ASIC_REV_5785 ||
10242 	    tg3_asic_rev(tp) == ASIC_REV_57780)
10243 		rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
10244 			      RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
10245 			      RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
10246 
10247 	if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
10248 	    tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
10249 		if (tg3_flag(tp, TSO_CAPABLE) &&
10250 		    tg3_asic_rev(tp) == ASIC_REV_5705) {
10251 			rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
10252 		} else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
10253 			   !tg3_flag(tp, IS_5788)) {
10254 			rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
10255 		}
10256 	}
10257 
10258 	if (tg3_flag(tp, PCI_EXPRESS))
10259 		rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
10260 
10261 	if (tg3_asic_rev(tp) == ASIC_REV_57766) {
10262 		tp->dma_limit = 0;
10263 		if (tp->dev->mtu <= ETH_DATA_LEN) {
10264 			rdmac_mode |= RDMAC_MODE_JMB_2K_MMRR;
10265 			tp->dma_limit = TG3_TX_BD_DMA_MAX_2K;
10266 		}
10267 	}
10268 
10269 	if (tg3_flag(tp, HW_TSO_1) ||
10270 	    tg3_flag(tp, HW_TSO_2) ||
10271 	    tg3_flag(tp, HW_TSO_3))
10272 		rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN;
10273 
10274 	if (tg3_flag(tp, 57765_PLUS) ||
10275 	    tg3_asic_rev(tp) == ASIC_REV_5785 ||
10276 	    tg3_asic_rev(tp) == ASIC_REV_57780)
10277 		rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;
10278 
10279 	if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
10280 	    tg3_asic_rev(tp) == ASIC_REV_5762)
10281 		rdmac_mode |= tr32(RDMAC_MODE) & RDMAC_MODE_H2BNC_VLAN_DET;
10282 
10283 	if (tg3_asic_rev(tp) == ASIC_REV_5761 ||
10284 	    tg3_asic_rev(tp) == ASIC_REV_5784 ||
10285 	    tg3_asic_rev(tp) == ASIC_REV_5785 ||
10286 	    tg3_asic_rev(tp) == ASIC_REV_57780 ||
10287 	    tg3_flag(tp, 57765_PLUS)) {
10288 		u32 tgtreg;
10289 
10290 		if (tg3_asic_rev(tp) == ASIC_REV_5762)
10291 			tgtreg = TG3_RDMA_RSRVCTRL_REG2;
10292 		else
10293 			tgtreg = TG3_RDMA_RSRVCTRL_REG;
10294 
10295 		val = tr32(tgtreg);
10296 		if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
10297 		    tg3_asic_rev(tp) == ASIC_REV_5762) {
10298 			val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK |
10299 				 TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK |
10300 				 TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK);
10301 			val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B |
10302 			       TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
10303 			       TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K;
10304 		}
10305 		tw32(tgtreg, val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
10306 	}
10307 
10308 	if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
10309 	    tg3_asic_rev(tp) == ASIC_REV_5720 ||
10310 	    tg3_asic_rev(tp) == ASIC_REV_5762) {
10311 		u32 tgtreg;
10312 
10313 		if (tg3_asic_rev(tp) == ASIC_REV_5762)
10314 			tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL2;
10315 		else
10316 			tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL;
10317 
10318 		val = tr32(tgtreg);
10319 		tw32(tgtreg, val |
10320 		     TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K |
10321 		     TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K);
10322 	}
10323 
10324 	/* Receive/send statistics. */
10325 	if (tg3_flag(tp, 5750_PLUS)) {
10326 		val = tr32(RCVLPC_STATS_ENABLE);
10327 		val &= ~RCVLPC_STATSENAB_DACK_FIX;
10328 		tw32(RCVLPC_STATS_ENABLE, val);
10329 	} else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
10330 		   tg3_flag(tp, TSO_CAPABLE)) {
10331 		val = tr32(RCVLPC_STATS_ENABLE);
10332 		val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
10333 		tw32(RCVLPC_STATS_ENABLE, val);
10334 	} else {
10335 		tw32(RCVLPC_STATS_ENABLE, 0xffffff);
10336 	}
10337 	tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
10338 	tw32(SNDDATAI_STATSENAB, 0xffffff);
10339 	tw32(SNDDATAI_STATSCTRL,
10340 	     (SNDDATAI_SCTRL_ENABLE |
10341 	      SNDDATAI_SCTRL_FASTUPD));
10342 
10343 	/* Setup host coalescing engine. */
10344 	tw32(HOSTCC_MODE, 0);
10345 	for (i = 0; i < 2000; i++) {
10346 		if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
10347 			break;
10348 		udelay(10);
10349 	}
10350 
10351 	__tg3_set_coalesce(tp, &tp->coal);
10352 
10353 	if (!tg3_flag(tp, 5705_PLUS)) {
10354 		/* Status/statistics block address.  See tg3_timer,
10355 		 * the tg3_periodic_fetch_stats call there, and
10356 		 * tg3_get_stats to see how this works for 5705/5750 chips.
10357 		 */
10358 		tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
10359 		     ((u64) tp->stats_mapping >> 32));
10360 		tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
10361 		     ((u64) tp->stats_mapping & 0xffffffff));
10362 		tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
10363 
10364 		tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
10365 
10366 		/* Clear statistics and status block memory areas */
10367 		for (i = NIC_SRAM_STATS_BLK;
10368 		     i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
10369 		     i += sizeof(u32)) {
10370 			tg3_write_mem(tp, i, 0);
10371 			udelay(40);
10372 		}
10373 	}
10374 
10375 	tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
10376 
10377 	tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
10378 	tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
10379 	if (!tg3_flag(tp, 5705_PLUS))
10380 		tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
10381 
10382 	if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
10383 		tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
10384 		/* reset to prevent losing 1st rx packet intermittently */
10385 		tw32_f(MAC_RX_MODE, RX_MODE_RESET);
10386 		udelay(10);
10387 	}
10388 
10389 	tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
10390 			MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE |
10391 			MAC_MODE_FHDE_ENABLE;
10392 	if (tg3_flag(tp, ENABLE_APE))
10393 		tp->mac_mode |= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
10394 	if (!tg3_flag(tp, 5705_PLUS) &&
10395 	    !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
10396 	    tg3_asic_rev(tp) != ASIC_REV_5700)
10397 		tp->mac_mode |= MAC_MODE_LINK_POLARITY;
10398 	tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
10399 	udelay(40);
10400 
10401 	/* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
10402 	 * If TG3_FLAG_IS_NIC is zero, we should read the
10403 	 * register to preserve the GPIO settings for LOMs. The GPIOs,
10404 	 * whether used as inputs or outputs, are set by boot code after
10405 	 * reset.
10406 	 */
10407 	if (!tg3_flag(tp, IS_NIC)) {
10408 		u32 gpio_mask;
10409 
10410 		gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
10411 			    GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
10412 			    GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
10413 
10414 		if (tg3_asic_rev(tp) == ASIC_REV_5752)
10415 			gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
10416 				     GRC_LCLCTRL_GPIO_OUTPUT3;
10417 
10418 		if (tg3_asic_rev(tp) == ASIC_REV_5755)
10419 			gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
10420 
10421 		tp->grc_local_ctrl &= ~gpio_mask;
10422 		tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
10423 
10424 		/* GPIO1 must be driven high for eeprom write protect */
10425 		if (tg3_flag(tp, EEPROM_WRITE_PROT))
10426 			tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
10427 					       GRC_LCLCTRL_GPIO_OUTPUT1);
10428 	}
10429 	tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
10430 	udelay(100);
10431 
10432 	if (tg3_flag(tp, USING_MSIX)) {
10433 		val = tr32(MSGINT_MODE);
10434 		val |= MSGINT_MODE_ENABLE;
10435 		if (tp->irq_cnt > 1)
10436 			val |= MSGINT_MODE_MULTIVEC_EN;
10437 		if (!tg3_flag(tp, 1SHOT_MSI))
10438 			val |= MSGINT_MODE_ONE_SHOT_DISABLE;
10439 		tw32(MSGINT_MODE, val);
10440 	}
10441 
10442 	if (!tg3_flag(tp, 5705_PLUS)) {
10443 		tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
10444 		udelay(40);
10445 	}
10446 
10447 	val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
10448 	       WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
10449 	       WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
10450 	       WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
10451 	       WDMAC_MODE_LNGREAD_ENAB);
10452 
10453 	if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
10454 	    tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
10455 		if (tg3_flag(tp, TSO_CAPABLE) &&
10456 		    (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 ||
10457 		     tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A2)) {
10458 			/* nothing */
10459 		} else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
10460 			   !tg3_flag(tp, IS_5788)) {
10461 			val |= WDMAC_MODE_RX_ACCEL;
10462 		}
10463 	}
10464 
10465 	/* Enable host coalescing bug fix */
10466 	if (tg3_flag(tp, 5755_PLUS))
10467 		val |= WDMAC_MODE_STATUS_TAG_FIX;
10468 
10469 	if (tg3_asic_rev(tp) == ASIC_REV_5785)
10470 		val |= WDMAC_MODE_BURST_ALL_DATA;
10471 
10472 	tw32_f(WDMAC_MODE, val);
10473 	udelay(40);
10474 
10475 	if (tg3_flag(tp, PCIX_MODE)) {
10476 		u16 pcix_cmd;
10477 
10478 		pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
10479 				     &pcix_cmd);
10480 		if (tg3_asic_rev(tp) == ASIC_REV_5703) {
10481 			pcix_cmd &= ~PCI_X_CMD_MAX_READ;
10482 			pcix_cmd |= PCI_X_CMD_READ_2K;
10483 		} else if (tg3_asic_rev(tp) == ASIC_REV_5704) {
10484 			pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
10485 			pcix_cmd |= PCI_X_CMD_READ_2K;
10486 		}
10487 		pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
10488 				      pcix_cmd);
10489 	}
10490 
10491 	tw32_f(RDMAC_MODE, rdmac_mode);
10492 	udelay(40);
10493 
10494 	if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
10495 	    tg3_asic_rev(tp) == ASIC_REV_5720) {
10496 		for (i = 0; i < TG3_NUM_RDMA_CHANNELS; i++) {
10497 			if (tr32(TG3_RDMA_LENGTH + (i << 2)) > TG3_MAX_MTU(tp))
10498 				break;
10499 		}
10500 		if (i < TG3_NUM_RDMA_CHANNELS) {
10501 			val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
10502 			val |= tg3_lso_rd_dma_workaround_bit(tp);
10503 			tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
10504 			tg3_flag_set(tp, 5719_5720_RDMA_BUG);
10505 		}
10506 	}
10507 
10508 	tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
10509 	if (!tg3_flag(tp, 5705_PLUS))
10510 		tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
10511 
10512 	if (tg3_asic_rev(tp) == ASIC_REV_5761)
10513 		tw32(SNDDATAC_MODE,
10514 		     SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
10515 	else
10516 		tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
10517 
10518 	tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
10519 	tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
10520 	val = RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ;
10521 	if (tg3_flag(tp, LRG_PROD_RING_CAP))
10522 		val |= RCVDBDI_MODE_LRG_RING_SZ;
10523 	tw32(RCVDBDI_MODE, val);
10524 	tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
10525 	if (tg3_flag(tp, HW_TSO_1) ||
10526 	    tg3_flag(tp, HW_TSO_2) ||
10527 	    tg3_flag(tp, HW_TSO_3))
10528 		tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
10529 	val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE;
10530 	if (tg3_flag(tp, ENABLE_TSS))
10531 		val |= SNDBDI_MODE_MULTI_TXQ_EN;
10532 	tw32(SNDBDI_MODE, val);
10533 	tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
10534 
10535 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) {
10536 		err = tg3_load_5701_a0_firmware_fix(tp);
10537 		if (err)
10538 			return err;
10539 	}
10540 
10541 	if (tg3_asic_rev(tp) == ASIC_REV_57766) {
10542 		/* Ignore any errors for the firmware download. If download
10543 		 * fails, the device will operate with EEE disabled
10544 		 */
10545 		tg3_load_57766_firmware(tp);
10546 	}
10547 
10548 	if (tg3_flag(tp, TSO_CAPABLE)) {
10549 		err = tg3_load_tso_firmware(tp);
10550 		if (err)
10551 			return err;
10552 	}
10553 
10554 	tp->tx_mode = TX_MODE_ENABLE;
10555 
10556 	if (tg3_flag(tp, 5755_PLUS) ||
10557 	    tg3_asic_rev(tp) == ASIC_REV_5906)
10558 		tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX;
10559 
10560 	if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
10561 	    tg3_asic_rev(tp) == ASIC_REV_5762) {
10562 		val = TX_MODE_JMB_FRM_LEN | TX_MODE_CNT_DN_MODE;
10563 		tp->tx_mode &= ~val;
10564 		tp->tx_mode |= tr32(MAC_TX_MODE) & val;
10565 	}
10566 
10567 	tw32_f(MAC_TX_MODE, tp->tx_mode);
10568 	udelay(100);
10569 
10570 	if (tg3_flag(tp, ENABLE_RSS)) {
10571 		u32 rss_key[10];
10572 
10573 		tg3_rss_write_indir_tbl(tp);
10574 
10575 		netdev_rss_key_fill(rss_key, 10 * sizeof(u32));
10576 
10577 		for (i = 0; i < 10 ; i++)
10578 			tw32(MAC_RSS_HASH_KEY_0 + i*4, rss_key[i]);
10579 	}
10580 
10581 	tp->rx_mode = RX_MODE_ENABLE;
10582 	if (tg3_flag(tp, 5755_PLUS))
10583 		tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
10584 
10585 	if (tg3_asic_rev(tp) == ASIC_REV_5762)
10586 		tp->rx_mode |= RX_MODE_IPV4_FRAG_FIX;
10587 
10588 	if (tg3_flag(tp, ENABLE_RSS))
10589 		tp->rx_mode |= RX_MODE_RSS_ENABLE |
10590 			       RX_MODE_RSS_ITBL_HASH_BITS_7 |
10591 			       RX_MODE_RSS_IPV6_HASH_EN |
10592 			       RX_MODE_RSS_TCP_IPV6_HASH_EN |
10593 			       RX_MODE_RSS_IPV4_HASH_EN |
10594 			       RX_MODE_RSS_TCP_IPV4_HASH_EN;
10595 
10596 	tw32_f(MAC_RX_MODE, tp->rx_mode);
10597 	udelay(10);
10598 
10599 	tw32(MAC_LED_CTRL, tp->led_ctrl);
10600 
10601 	tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
10602 	if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
10603 		tw32_f(MAC_RX_MODE, RX_MODE_RESET);
10604 		udelay(10);
10605 	}
10606 	tw32_f(MAC_RX_MODE, tp->rx_mode);
10607 	udelay(10);
10608 
10609 	if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
10610 		if ((tg3_asic_rev(tp) == ASIC_REV_5704) &&
10611 		    !(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) {
10612 			/* Set drive transmission level to 1.2V  */
10613 			/* only if the signal pre-emphasis bit is not set  */
10614 			val = tr32(MAC_SERDES_CFG);
10615 			val &= 0xfffff000;
10616 			val |= 0x880;
10617 			tw32(MAC_SERDES_CFG, val);
10618 		}
10619 		if (tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A1)
10620 			tw32(MAC_SERDES_CFG, 0x616000);
10621 	}
10622 
10623 	/* Prevent chip from dropping frames when flow control
10624 	 * is enabled.
10625 	 */
10626 	if (tg3_flag(tp, 57765_CLASS))
10627 		val = 1;
10628 	else
10629 		val = 2;
10630 	tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val);
10631 
10632 	if (tg3_asic_rev(tp) == ASIC_REV_5704 &&
10633 	    (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
10634 		/* Use hardware link auto-negotiation */
10635 		tg3_flag_set(tp, HW_AUTONEG);
10636 	}
10637 
10638 	if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
10639 	    tg3_asic_rev(tp) == ASIC_REV_5714) {
10640 		u32 tmp;
10641 
10642 		tmp = tr32(SERDES_RX_CTRL);
10643 		tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
10644 		tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
10645 		tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
10646 		tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
10647 	}
10648 
10649 	if (!tg3_flag(tp, USE_PHYLIB)) {
10650 		if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10651 			tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
10652 
10653 		err = tg3_setup_phy(tp, false);
10654 		if (err)
10655 			return err;
10656 
10657 		if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
10658 		    !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
10659 			u32 tmp;
10660 
10661 			/* Clear CRC stats. */
10662 			if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
10663 				tg3_writephy(tp, MII_TG3_TEST1,
10664 					     tmp | MII_TG3_TEST1_CRC_EN);
10665 				tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &tmp);
10666 			}
10667 		}
10668 	}
10669 
10670 	__tg3_set_rx_mode(tp->dev);
10671 
10672 	/* Initialize receive rules. */
10673 	tw32(MAC_RCV_RULE_0,  0xc2000000 & RCV_RULE_DISABLE_MASK);
10674 	tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
10675 	tw32(MAC_RCV_RULE_1,  0x86000004 & RCV_RULE_DISABLE_MASK);
10676 	tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
10677 
10678 	if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS))
10679 		limit = 8;
10680 	else
10681 		limit = 16;
10682 	if (tg3_flag(tp, ENABLE_ASF))
10683 		limit -= 4;
10684 	switch (limit) {
10685 	case 16:
10686 		tw32(MAC_RCV_RULE_15,  0); tw32(MAC_RCV_VALUE_15,  0);
10687 	case 15:
10688 		tw32(MAC_RCV_RULE_14,  0); tw32(MAC_RCV_VALUE_14,  0);
10689 	case 14:
10690 		tw32(MAC_RCV_RULE_13,  0); tw32(MAC_RCV_VALUE_13,  0);
10691 	case 13:
10692 		tw32(MAC_RCV_RULE_12,  0); tw32(MAC_RCV_VALUE_12,  0);
10693 	case 12:
10694 		tw32(MAC_RCV_RULE_11,  0); tw32(MAC_RCV_VALUE_11,  0);
10695 	case 11:
10696 		tw32(MAC_RCV_RULE_10,  0); tw32(MAC_RCV_VALUE_10,  0);
10697 	case 10:
10698 		tw32(MAC_RCV_RULE_9,  0); tw32(MAC_RCV_VALUE_9,  0);
10699 	case 9:
10700 		tw32(MAC_RCV_RULE_8,  0); tw32(MAC_RCV_VALUE_8,  0);
10701 	case 8:
10702 		tw32(MAC_RCV_RULE_7,  0); tw32(MAC_RCV_VALUE_7,  0);
10703 	case 7:
10704 		tw32(MAC_RCV_RULE_6,  0); tw32(MAC_RCV_VALUE_6,  0);
10705 	case 6:
10706 		tw32(MAC_RCV_RULE_5,  0); tw32(MAC_RCV_VALUE_5,  0);
10707 	case 5:
10708 		tw32(MAC_RCV_RULE_4,  0); tw32(MAC_RCV_VALUE_4,  0);
10709 	case 4:
10710 		/* tw32(MAC_RCV_RULE_3,  0); tw32(MAC_RCV_VALUE_3,  0); */
10711 	case 3:
10712 		/* tw32(MAC_RCV_RULE_2,  0); tw32(MAC_RCV_VALUE_2,  0); */
10713 	case 2:
10714 	case 1:
10715 
10716 	default:
10717 		break;
10718 	}
10719 
10720 	if (tg3_flag(tp, ENABLE_APE))
10721 		/* Write our heartbeat update interval to APE. */
10722 		tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
10723 				APE_HOST_HEARTBEAT_INT_DISABLE);
10724 
10725 	tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
10726 
10727 	return 0;
10728 }
10729 
10730 /* Called at device open time to get the chip ready for
10731  * packet processing.  Invoked with tp->lock held.
10732  */
10733 static int tg3_init_hw(struct tg3 *tp, bool reset_phy)
10734 {
10735 	/* Chip may have been just powered on. If so, the boot code may still
10736 	 * be running initialization. Wait for it to finish to avoid races in
10737 	 * accessing the hardware.
10738 	 */
10739 	tg3_enable_register_access(tp);
10740 	tg3_poll_fw(tp);
10741 
10742 	tg3_switch_clocks(tp);
10743 
10744 	tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
10745 
10746 	return tg3_reset_hw(tp, reset_phy);
10747 }
10748 
10749 #ifdef CONFIG_TIGON3_HWMON
10750 static void tg3_sd_scan_scratchpad(struct tg3 *tp, struct tg3_ocir *ocir)
10751 {
10752 	int i;
10753 
10754 	for (i = 0; i < TG3_SD_NUM_RECS; i++, ocir++) {
10755 		u32 off = i * TG3_OCIR_LEN, len = TG3_OCIR_LEN;
10756 
10757 		tg3_ape_scratchpad_read(tp, (u32 *) ocir, off, len);
10758 		off += len;
10759 
10760 		if (ocir->signature != TG3_OCIR_SIG_MAGIC ||
10761 		    !(ocir->version_flags & TG3_OCIR_FLAG_ACTIVE))
10762 			memset(ocir, 0, TG3_OCIR_LEN);
10763 	}
10764 }
10765 
10766 /* sysfs attributes for hwmon */
10767 static ssize_t tg3_show_temp(struct device *dev,
10768 			     struct device_attribute *devattr, char *buf)
10769 {
10770 	struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
10771 	struct tg3 *tp = dev_get_drvdata(dev);
10772 	u32 temperature;
10773 
10774 	spin_lock_bh(&tp->lock);
10775 	tg3_ape_scratchpad_read(tp, &temperature, attr->index,
10776 				sizeof(temperature));
10777 	spin_unlock_bh(&tp->lock);
10778 	return sprintf(buf, "%u\n", temperature * 1000);
10779 }
10780 
10781 
10782 static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, tg3_show_temp, NULL,
10783 			  TG3_TEMP_SENSOR_OFFSET);
10784 static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, tg3_show_temp, NULL,
10785 			  TG3_TEMP_CAUTION_OFFSET);
10786 static SENSOR_DEVICE_ATTR(temp1_max, S_IRUGO, tg3_show_temp, NULL,
10787 			  TG3_TEMP_MAX_OFFSET);
10788 
10789 static struct attribute *tg3_attrs[] = {
10790 	&sensor_dev_attr_temp1_input.dev_attr.attr,
10791 	&sensor_dev_attr_temp1_crit.dev_attr.attr,
10792 	&sensor_dev_attr_temp1_max.dev_attr.attr,
10793 	NULL
10794 };
10795 ATTRIBUTE_GROUPS(tg3);
10796 
10797 static void tg3_hwmon_close(struct tg3 *tp)
10798 {
10799 	if (tp->hwmon_dev) {
10800 		hwmon_device_unregister(tp->hwmon_dev);
10801 		tp->hwmon_dev = NULL;
10802 	}
10803 }
10804 
10805 static void tg3_hwmon_open(struct tg3 *tp)
10806 {
10807 	int i;
10808 	u32 size = 0;
10809 	struct pci_dev *pdev = tp->pdev;
10810 	struct tg3_ocir ocirs[TG3_SD_NUM_RECS];
10811 
10812 	tg3_sd_scan_scratchpad(tp, ocirs);
10813 
10814 	for (i = 0; i < TG3_SD_NUM_RECS; i++) {
10815 		if (!ocirs[i].src_data_length)
10816 			continue;
10817 
10818 		size += ocirs[i].src_hdr_length;
10819 		size += ocirs[i].src_data_length;
10820 	}
10821 
10822 	if (!size)
10823 		return;
10824 
10825 	tp->hwmon_dev = hwmon_device_register_with_groups(&pdev->dev, "tg3",
10826 							  tp, tg3_groups);
10827 	if (IS_ERR(tp->hwmon_dev)) {
10828 		tp->hwmon_dev = NULL;
10829 		dev_err(&pdev->dev, "Cannot register hwmon device, aborting\n");
10830 	}
10831 }
10832 #else
10833 static inline void tg3_hwmon_close(struct tg3 *tp) { }
10834 static inline void tg3_hwmon_open(struct tg3 *tp) { }
10835 #endif /* CONFIG_TIGON3_HWMON */
10836 
10837 
10838 #define TG3_STAT_ADD32(PSTAT, REG) \
10839 do {	u32 __val = tr32(REG); \
10840 	(PSTAT)->low += __val; \
10841 	if ((PSTAT)->low < __val) \
10842 		(PSTAT)->high += 1; \
10843 } while (0)
10844 
10845 static void tg3_periodic_fetch_stats(struct tg3 *tp)
10846 {
10847 	struct tg3_hw_stats *sp = tp->hw_stats;
10848 
10849 	if (!tp->link_up)
10850 		return;
10851 
10852 	TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
10853 	TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
10854 	TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
10855 	TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
10856 	TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
10857 	TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
10858 	TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
10859 	TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
10860 	TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
10861 	TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
10862 	TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
10863 	TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
10864 	TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
10865 	if (unlikely(tg3_flag(tp, 5719_5720_RDMA_BUG) &&
10866 		     (sp->tx_ucast_packets.low + sp->tx_mcast_packets.low +
10867 		      sp->tx_bcast_packets.low) > TG3_NUM_RDMA_CHANNELS)) {
10868 		u32 val;
10869 
10870 		val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
10871 		val &= ~tg3_lso_rd_dma_workaround_bit(tp);
10872 		tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
10873 		tg3_flag_clear(tp, 5719_5720_RDMA_BUG);
10874 	}
10875 
10876 	TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
10877 	TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
10878 	TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
10879 	TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
10880 	TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
10881 	TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
10882 	TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
10883 	TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
10884 	TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
10885 	TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
10886 	TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
10887 	TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
10888 	TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
10889 	TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
10890 
10891 	TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
10892 	if (tg3_asic_rev(tp) != ASIC_REV_5717 &&
10893 	    tg3_asic_rev(tp) != ASIC_REV_5762 &&
10894 	    tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0 &&
10895 	    tg3_chip_rev_id(tp) != CHIPREV_ID_5720_A0) {
10896 		TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
10897 	} else {
10898 		u32 val = tr32(HOSTCC_FLOW_ATTN);
10899 		val = (val & HOSTCC_FLOW_ATTN_MBUF_LWM) ? 1 : 0;
10900 		if (val) {
10901 			tw32(HOSTCC_FLOW_ATTN, HOSTCC_FLOW_ATTN_MBUF_LWM);
10902 			sp->rx_discards.low += val;
10903 			if (sp->rx_discards.low < val)
10904 				sp->rx_discards.high += 1;
10905 		}
10906 		sp->mbuf_lwm_thresh_hit = sp->rx_discards;
10907 	}
10908 	TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
10909 }
10910 
10911 static void tg3_chk_missed_msi(struct tg3 *tp)
10912 {
10913 	u32 i;
10914 
10915 	for (i = 0; i < tp->irq_cnt; i++) {
10916 		struct tg3_napi *tnapi = &tp->napi[i];
10917 
10918 		if (tg3_has_work(tnapi)) {
10919 			if (tnapi->last_rx_cons == tnapi->rx_rcb_ptr &&
10920 			    tnapi->last_tx_cons == tnapi->tx_cons) {
10921 				if (tnapi->chk_msi_cnt < 1) {
10922 					tnapi->chk_msi_cnt++;
10923 					return;
10924 				}
10925 				tg3_msi(0, tnapi);
10926 			}
10927 		}
10928 		tnapi->chk_msi_cnt = 0;
10929 		tnapi->last_rx_cons = tnapi->rx_rcb_ptr;
10930 		tnapi->last_tx_cons = tnapi->tx_cons;
10931 	}
10932 }
10933 
10934 static void tg3_timer(unsigned long __opaque)
10935 {
10936 	struct tg3 *tp = (struct tg3 *) __opaque;
10937 
10938 	spin_lock(&tp->lock);
10939 
10940 	if (tp->irq_sync || tg3_flag(tp, RESET_TASK_PENDING)) {
10941 		spin_unlock(&tp->lock);
10942 		goto restart_timer;
10943 	}
10944 
10945 	if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
10946 	    tg3_flag(tp, 57765_CLASS))
10947 		tg3_chk_missed_msi(tp);
10948 
10949 	if (tg3_flag(tp, FLUSH_POSTED_WRITES)) {
10950 		/* BCM4785: Flush posted writes from GbE to host memory. */
10951 		tr32(HOSTCC_MODE);
10952 	}
10953 
10954 	if (!tg3_flag(tp, TAGGED_STATUS)) {
10955 		/* All of this garbage is because when using non-tagged
10956 		 * IRQ status the mailbox/status_block protocol the chip
10957 		 * uses with the cpu is race prone.
10958 		 */
10959 		if (tp->napi[0].hw_status->status & SD_STATUS_UPDATED) {
10960 			tw32(GRC_LOCAL_CTRL,
10961 			     tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
10962 		} else {
10963 			tw32(HOSTCC_MODE, tp->coalesce_mode |
10964 			     HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW);
10965 		}
10966 
10967 		if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
10968 			spin_unlock(&tp->lock);
10969 			tg3_reset_task_schedule(tp);
10970 			goto restart_timer;
10971 		}
10972 	}
10973 
10974 	/* This part only runs once per second. */
10975 	if (!--tp->timer_counter) {
10976 		if (tg3_flag(tp, 5705_PLUS))
10977 			tg3_periodic_fetch_stats(tp);
10978 
10979 		if (tp->setlpicnt && !--tp->setlpicnt)
10980 			tg3_phy_eee_enable(tp);
10981 
10982 		if (tg3_flag(tp, USE_LINKCHG_REG)) {
10983 			u32 mac_stat;
10984 			int phy_event;
10985 
10986 			mac_stat = tr32(MAC_STATUS);
10987 
10988 			phy_event = 0;
10989 			if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) {
10990 				if (mac_stat & MAC_STATUS_MI_INTERRUPT)
10991 					phy_event = 1;
10992 			} else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
10993 				phy_event = 1;
10994 
10995 			if (phy_event)
10996 				tg3_setup_phy(tp, false);
10997 		} else if (tg3_flag(tp, POLL_SERDES)) {
10998 			u32 mac_stat = tr32(MAC_STATUS);
10999 			int need_setup = 0;
11000 
11001 			if (tp->link_up &&
11002 			    (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
11003 				need_setup = 1;
11004 			}
11005 			if (!tp->link_up &&
11006 			    (mac_stat & (MAC_STATUS_PCS_SYNCED |
11007 					 MAC_STATUS_SIGNAL_DET))) {
11008 				need_setup = 1;
11009 			}
11010 			if (need_setup) {
11011 				if (!tp->serdes_counter) {
11012 					tw32_f(MAC_MODE,
11013 					     (tp->mac_mode &
11014 					      ~MAC_MODE_PORT_MODE_MASK));
11015 					udelay(40);
11016 					tw32_f(MAC_MODE, tp->mac_mode);
11017 					udelay(40);
11018 				}
11019 				tg3_setup_phy(tp, false);
11020 			}
11021 		} else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
11022 			   tg3_flag(tp, 5780_CLASS)) {
11023 			tg3_serdes_parallel_detect(tp);
11024 		} else if (tg3_flag(tp, POLL_CPMU_LINK)) {
11025 			u32 cpmu = tr32(TG3_CPMU_STATUS);
11026 			bool link_up = !((cpmu & TG3_CPMU_STATUS_LINK_MASK) ==
11027 					 TG3_CPMU_STATUS_LINK_MASK);
11028 
11029 			if (link_up != tp->link_up)
11030 				tg3_setup_phy(tp, false);
11031 		}
11032 
11033 		tp->timer_counter = tp->timer_multiplier;
11034 	}
11035 
11036 	/* Heartbeat is only sent once every 2 seconds.
11037 	 *
11038 	 * The heartbeat is to tell the ASF firmware that the host
11039 	 * driver is still alive.  In the event that the OS crashes,
11040 	 * ASF needs to reset the hardware to free up the FIFO space
11041 	 * that may be filled with rx packets destined for the host.
11042 	 * If the FIFO is full, ASF will no longer function properly.
11043 	 *
11044 	 * Unintended resets have been reported on real time kernels
11045 	 * where the timer doesn't run on time.  Netpoll will also have
11046 	 * same problem.
11047 	 *
11048 	 * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
11049 	 * to check the ring condition when the heartbeat is expiring
11050 	 * before doing the reset.  This will prevent most unintended
11051 	 * resets.
11052 	 */
11053 	if (!--tp->asf_counter) {
11054 		if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
11055 			tg3_wait_for_event_ack(tp);
11056 
11057 			tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
11058 				      FWCMD_NICDRV_ALIVE3);
11059 			tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
11060 			tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX,
11061 				      TG3_FW_UPDATE_TIMEOUT_SEC);
11062 
11063 			tg3_generate_fw_event(tp);
11064 		}
11065 		tp->asf_counter = tp->asf_multiplier;
11066 	}
11067 
11068 	spin_unlock(&tp->lock);
11069 
11070 restart_timer:
11071 	tp->timer.expires = jiffies + tp->timer_offset;
11072 	add_timer(&tp->timer);
11073 }
11074 
11075 static void tg3_timer_init(struct tg3 *tp)
11076 {
11077 	if (tg3_flag(tp, TAGGED_STATUS) &&
11078 	    tg3_asic_rev(tp) != ASIC_REV_5717 &&
11079 	    !tg3_flag(tp, 57765_CLASS))
11080 		tp->timer_offset = HZ;
11081 	else
11082 		tp->timer_offset = HZ / 10;
11083 
11084 	BUG_ON(tp->timer_offset > HZ);
11085 
11086 	tp->timer_multiplier = (HZ / tp->timer_offset);
11087 	tp->asf_multiplier = (HZ / tp->timer_offset) *
11088 			     TG3_FW_UPDATE_FREQ_SEC;
11089 
11090 	init_timer(&tp->timer);
11091 	tp->timer.data = (unsigned long) tp;
11092 	tp->timer.function = tg3_timer;
11093 }
11094 
11095 static void tg3_timer_start(struct tg3 *tp)
11096 {
11097 	tp->asf_counter   = tp->asf_multiplier;
11098 	tp->timer_counter = tp->timer_multiplier;
11099 
11100 	tp->timer.expires = jiffies + tp->timer_offset;
11101 	add_timer(&tp->timer);
11102 }
11103 
11104 static void tg3_timer_stop(struct tg3 *tp)
11105 {
11106 	del_timer_sync(&tp->timer);
11107 }
11108 
11109 /* Restart hardware after configuration changes, self-test, etc.
11110  * Invoked with tp->lock held.
11111  */
11112 static int tg3_restart_hw(struct tg3 *tp, bool reset_phy)
11113 	__releases(tp->lock)
11114 	__acquires(tp->lock)
11115 {
11116 	int err;
11117 
11118 	err = tg3_init_hw(tp, reset_phy);
11119 	if (err) {
11120 		netdev_err(tp->dev,
11121 			   "Failed to re-initialize device, aborting\n");
11122 		tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11123 		tg3_full_unlock(tp);
11124 		tg3_timer_stop(tp);
11125 		tp->irq_sync = 0;
11126 		tg3_napi_enable(tp);
11127 		dev_close(tp->dev);
11128 		tg3_full_lock(tp, 0);
11129 	}
11130 	return err;
11131 }
11132 
11133 static void tg3_reset_task(struct work_struct *work)
11134 {
11135 	struct tg3 *tp = container_of(work, struct tg3, reset_task);
11136 	int err;
11137 
11138 	rtnl_lock();
11139 	tg3_full_lock(tp, 0);
11140 
11141 	if (!netif_running(tp->dev)) {
11142 		tg3_flag_clear(tp, RESET_TASK_PENDING);
11143 		tg3_full_unlock(tp);
11144 		rtnl_unlock();
11145 		return;
11146 	}
11147 
11148 	tg3_full_unlock(tp);
11149 
11150 	tg3_phy_stop(tp);
11151 
11152 	tg3_netif_stop(tp);
11153 
11154 	tg3_full_lock(tp, 1);
11155 
11156 	if (tg3_flag(tp, TX_RECOVERY_PENDING)) {
11157 		tp->write32_tx_mbox = tg3_write32_tx_mbox;
11158 		tp->write32_rx_mbox = tg3_write_flush_reg32;
11159 		tg3_flag_set(tp, MBOX_WRITE_REORDER);
11160 		tg3_flag_clear(tp, TX_RECOVERY_PENDING);
11161 	}
11162 
11163 	tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
11164 	err = tg3_init_hw(tp, true);
11165 	if (err)
11166 		goto out;
11167 
11168 	tg3_netif_start(tp);
11169 
11170 out:
11171 	tg3_full_unlock(tp);
11172 
11173 	if (!err)
11174 		tg3_phy_start(tp);
11175 
11176 	tg3_flag_clear(tp, RESET_TASK_PENDING);
11177 	rtnl_unlock();
11178 }
11179 
11180 static int tg3_request_irq(struct tg3 *tp, int irq_num)
11181 {
11182 	irq_handler_t fn;
11183 	unsigned long flags;
11184 	char *name;
11185 	struct tg3_napi *tnapi = &tp->napi[irq_num];
11186 
11187 	if (tp->irq_cnt == 1)
11188 		name = tp->dev->name;
11189 	else {
11190 		name = &tnapi->irq_lbl[0];
11191 		if (tnapi->tx_buffers && tnapi->rx_rcb)
11192 			snprintf(name, IFNAMSIZ,
11193 				 "%s-txrx-%d", tp->dev->name, irq_num);
11194 		else if (tnapi->tx_buffers)
11195 			snprintf(name, IFNAMSIZ,
11196 				 "%s-tx-%d", tp->dev->name, irq_num);
11197 		else if (tnapi->rx_rcb)
11198 			snprintf(name, IFNAMSIZ,
11199 				 "%s-rx-%d", tp->dev->name, irq_num);
11200 		else
11201 			snprintf(name, IFNAMSIZ,
11202 				 "%s-%d", tp->dev->name, irq_num);
11203 		name[IFNAMSIZ-1] = 0;
11204 	}
11205 
11206 	if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
11207 		fn = tg3_msi;
11208 		if (tg3_flag(tp, 1SHOT_MSI))
11209 			fn = tg3_msi_1shot;
11210 		flags = 0;
11211 	} else {
11212 		fn = tg3_interrupt;
11213 		if (tg3_flag(tp, TAGGED_STATUS))
11214 			fn = tg3_interrupt_tagged;
11215 		flags = IRQF_SHARED;
11216 	}
11217 
11218 	return request_irq(tnapi->irq_vec, fn, flags, name, tnapi);
11219 }
11220 
11221 static int tg3_test_interrupt(struct tg3 *tp)
11222 {
11223 	struct tg3_napi *tnapi = &tp->napi[0];
11224 	struct net_device *dev = tp->dev;
11225 	int err, i, intr_ok = 0;
11226 	u32 val;
11227 
11228 	if (!netif_running(dev))
11229 		return -ENODEV;
11230 
11231 	tg3_disable_ints(tp);
11232 
11233 	free_irq(tnapi->irq_vec, tnapi);
11234 
11235 	/*
11236 	 * Turn off MSI one shot mode.  Otherwise this test has no
11237 	 * observable way to know whether the interrupt was delivered.
11238 	 */
11239 	if (tg3_flag(tp, 57765_PLUS)) {
11240 		val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE;
11241 		tw32(MSGINT_MODE, val);
11242 	}
11243 
11244 	err = request_irq(tnapi->irq_vec, tg3_test_isr,
11245 			  IRQF_SHARED, dev->name, tnapi);
11246 	if (err)
11247 		return err;
11248 
11249 	tnapi->hw_status->status &= ~SD_STATUS_UPDATED;
11250 	tg3_enable_ints(tp);
11251 
11252 	tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
11253 	       tnapi->coal_now);
11254 
11255 	for (i = 0; i < 5; i++) {
11256 		u32 int_mbox, misc_host_ctrl;
11257 
11258 		int_mbox = tr32_mailbox(tnapi->int_mbox);
11259 		misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
11260 
11261 		if ((int_mbox != 0) ||
11262 		    (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
11263 			intr_ok = 1;
11264 			break;
11265 		}
11266 
11267 		if (tg3_flag(tp, 57765_PLUS) &&
11268 		    tnapi->hw_status->status_tag != tnapi->last_tag)
11269 			tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
11270 
11271 		msleep(10);
11272 	}
11273 
11274 	tg3_disable_ints(tp);
11275 
11276 	free_irq(tnapi->irq_vec, tnapi);
11277 
11278 	err = tg3_request_irq(tp, 0);
11279 
11280 	if (err)
11281 		return err;
11282 
11283 	if (intr_ok) {
11284 		/* Reenable MSI one shot mode. */
11285 		if (tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, 1SHOT_MSI)) {
11286 			val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE;
11287 			tw32(MSGINT_MODE, val);
11288 		}
11289 		return 0;
11290 	}
11291 
11292 	return -EIO;
11293 }
11294 
11295 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
11296  * successfully restored
11297  */
11298 static int tg3_test_msi(struct tg3 *tp)
11299 {
11300 	int err;
11301 	u16 pci_cmd;
11302 
11303 	if (!tg3_flag(tp, USING_MSI))
11304 		return 0;
11305 
11306 	/* Turn off SERR reporting in case MSI terminates with Master
11307 	 * Abort.
11308 	 */
11309 	pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
11310 	pci_write_config_word(tp->pdev, PCI_COMMAND,
11311 			      pci_cmd & ~PCI_COMMAND_SERR);
11312 
11313 	err = tg3_test_interrupt(tp);
11314 
11315 	pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
11316 
11317 	if (!err)
11318 		return 0;
11319 
11320 	/* other failures */
11321 	if (err != -EIO)
11322 		return err;
11323 
11324 	/* MSI test failed, go back to INTx mode */
11325 	netdev_warn(tp->dev, "No interrupt was generated using MSI. Switching "
11326 		    "to INTx mode. Please report this failure to the PCI "
11327 		    "maintainer and include system chipset information\n");
11328 
11329 	free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
11330 
11331 	pci_disable_msi(tp->pdev);
11332 
11333 	tg3_flag_clear(tp, USING_MSI);
11334 	tp->napi[0].irq_vec = tp->pdev->irq;
11335 
11336 	err = tg3_request_irq(tp, 0);
11337 	if (err)
11338 		return err;
11339 
11340 	/* Need to reset the chip because the MSI cycle may have terminated
11341 	 * with Master Abort.
11342 	 */
11343 	tg3_full_lock(tp, 1);
11344 
11345 	tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11346 	err = tg3_init_hw(tp, true);
11347 
11348 	tg3_full_unlock(tp);
11349 
11350 	if (err)
11351 		free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
11352 
11353 	return err;
11354 }
11355 
11356 static int tg3_request_firmware(struct tg3 *tp)
11357 {
11358 	const struct tg3_firmware_hdr *fw_hdr;
11359 
11360 	if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) {
11361 		netdev_err(tp->dev, "Failed to load firmware \"%s\"\n",
11362 			   tp->fw_needed);
11363 		return -ENOENT;
11364 	}
11365 
11366 	fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
11367 
11368 	/* Firmware blob starts with version numbers, followed by
11369 	 * start address and _full_ length including BSS sections
11370 	 * (which must be longer than the actual data, of course
11371 	 */
11372 
11373 	tp->fw_len = be32_to_cpu(fw_hdr->len);	/* includes bss */
11374 	if (tp->fw_len < (tp->fw->size - TG3_FW_HDR_LEN)) {
11375 		netdev_err(tp->dev, "bogus length %d in \"%s\"\n",
11376 			   tp->fw_len, tp->fw_needed);
11377 		release_firmware(tp->fw);
11378 		tp->fw = NULL;
11379 		return -EINVAL;
11380 	}
11381 
11382 	/* We no longer need firmware; we have it. */
11383 	tp->fw_needed = NULL;
11384 	return 0;
11385 }
11386 
11387 static u32 tg3_irq_count(struct tg3 *tp)
11388 {
11389 	u32 irq_cnt = max(tp->rxq_cnt, tp->txq_cnt);
11390 
11391 	if (irq_cnt > 1) {
11392 		/* We want as many rx rings enabled as there are cpus.
11393 		 * In multiqueue MSI-X mode, the first MSI-X vector
11394 		 * only deals with link interrupts, etc, so we add
11395 		 * one to the number of vectors we are requesting.
11396 		 */
11397 		irq_cnt = min_t(unsigned, irq_cnt + 1, tp->irq_max);
11398 	}
11399 
11400 	return irq_cnt;
11401 }
11402 
11403 static bool tg3_enable_msix(struct tg3 *tp)
11404 {
11405 	int i, rc;
11406 	struct msix_entry msix_ent[TG3_IRQ_MAX_VECS];
11407 
11408 	tp->txq_cnt = tp->txq_req;
11409 	tp->rxq_cnt = tp->rxq_req;
11410 	if (!tp->rxq_cnt)
11411 		tp->rxq_cnt = netif_get_num_default_rss_queues();
11412 	if (tp->rxq_cnt > tp->rxq_max)
11413 		tp->rxq_cnt = tp->rxq_max;
11414 
11415 	/* Disable multiple TX rings by default.  Simple round-robin hardware
11416 	 * scheduling of the TX rings can cause starvation of rings with
11417 	 * small packets when other rings have TSO or jumbo packets.
11418 	 */
11419 	if (!tp->txq_req)
11420 		tp->txq_cnt = 1;
11421 
11422 	tp->irq_cnt = tg3_irq_count(tp);
11423 
11424 	for (i = 0; i < tp->irq_max; i++) {
11425 		msix_ent[i].entry  = i;
11426 		msix_ent[i].vector = 0;
11427 	}
11428 
11429 	rc = pci_enable_msix_range(tp->pdev, msix_ent, 1, tp->irq_cnt);
11430 	if (rc < 0) {
11431 		return false;
11432 	} else if (rc < tp->irq_cnt) {
11433 		netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n",
11434 			      tp->irq_cnt, rc);
11435 		tp->irq_cnt = rc;
11436 		tp->rxq_cnt = max(rc - 1, 1);
11437 		if (tp->txq_cnt)
11438 			tp->txq_cnt = min(tp->rxq_cnt, tp->txq_max);
11439 	}
11440 
11441 	for (i = 0; i < tp->irq_max; i++)
11442 		tp->napi[i].irq_vec = msix_ent[i].vector;
11443 
11444 	if (netif_set_real_num_rx_queues(tp->dev, tp->rxq_cnt)) {
11445 		pci_disable_msix(tp->pdev);
11446 		return false;
11447 	}
11448 
11449 	if (tp->irq_cnt == 1)
11450 		return true;
11451 
11452 	tg3_flag_set(tp, ENABLE_RSS);
11453 
11454 	if (tp->txq_cnt > 1)
11455 		tg3_flag_set(tp, ENABLE_TSS);
11456 
11457 	netif_set_real_num_tx_queues(tp->dev, tp->txq_cnt);
11458 
11459 	return true;
11460 }
11461 
11462 static void tg3_ints_init(struct tg3 *tp)
11463 {
11464 	if ((tg3_flag(tp, SUPPORT_MSI) || tg3_flag(tp, SUPPORT_MSIX)) &&
11465 	    !tg3_flag(tp, TAGGED_STATUS)) {
11466 		/* All MSI supporting chips should support tagged
11467 		 * status.  Assert that this is the case.
11468 		 */
11469 		netdev_warn(tp->dev,
11470 			    "MSI without TAGGED_STATUS? Not using MSI\n");
11471 		goto defcfg;
11472 	}
11473 
11474 	if (tg3_flag(tp, SUPPORT_MSIX) && tg3_enable_msix(tp))
11475 		tg3_flag_set(tp, USING_MSIX);
11476 	else if (tg3_flag(tp, SUPPORT_MSI) && pci_enable_msi(tp->pdev) == 0)
11477 		tg3_flag_set(tp, USING_MSI);
11478 
11479 	if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
11480 		u32 msi_mode = tr32(MSGINT_MODE);
11481 		if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1)
11482 			msi_mode |= MSGINT_MODE_MULTIVEC_EN;
11483 		if (!tg3_flag(tp, 1SHOT_MSI))
11484 			msi_mode |= MSGINT_MODE_ONE_SHOT_DISABLE;
11485 		tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
11486 	}
11487 defcfg:
11488 	if (!tg3_flag(tp, USING_MSIX)) {
11489 		tp->irq_cnt = 1;
11490 		tp->napi[0].irq_vec = tp->pdev->irq;
11491 	}
11492 
11493 	if (tp->irq_cnt == 1) {
11494 		tp->txq_cnt = 1;
11495 		tp->rxq_cnt = 1;
11496 		netif_set_real_num_tx_queues(tp->dev, 1);
11497 		netif_set_real_num_rx_queues(tp->dev, 1);
11498 	}
11499 }
11500 
11501 static void tg3_ints_fini(struct tg3 *tp)
11502 {
11503 	if (tg3_flag(tp, USING_MSIX))
11504 		pci_disable_msix(tp->pdev);
11505 	else if (tg3_flag(tp, USING_MSI))
11506 		pci_disable_msi(tp->pdev);
11507 	tg3_flag_clear(tp, USING_MSI);
11508 	tg3_flag_clear(tp, USING_MSIX);
11509 	tg3_flag_clear(tp, ENABLE_RSS);
11510 	tg3_flag_clear(tp, ENABLE_TSS);
11511 }
11512 
11513 static int tg3_start(struct tg3 *tp, bool reset_phy, bool test_irq,
11514 		     bool init)
11515 {
11516 	struct net_device *dev = tp->dev;
11517 	int i, err;
11518 
11519 	/*
11520 	 * Setup interrupts first so we know how
11521 	 * many NAPI resources to allocate
11522 	 */
11523 	tg3_ints_init(tp);
11524 
11525 	tg3_rss_check_indir_tbl(tp);
11526 
11527 	/* The placement of this call is tied
11528 	 * to the setup and use of Host TX descriptors.
11529 	 */
11530 	err = tg3_alloc_consistent(tp);
11531 	if (err)
11532 		goto out_ints_fini;
11533 
11534 	tg3_napi_init(tp);
11535 
11536 	tg3_napi_enable(tp);
11537 
11538 	for (i = 0; i < tp->irq_cnt; i++) {
11539 		struct tg3_napi *tnapi = &tp->napi[i];
11540 		err = tg3_request_irq(tp, i);
11541 		if (err) {
11542 			for (i--; i >= 0; i--) {
11543 				tnapi = &tp->napi[i];
11544 				free_irq(tnapi->irq_vec, tnapi);
11545 			}
11546 			goto out_napi_fini;
11547 		}
11548 	}
11549 
11550 	tg3_full_lock(tp, 0);
11551 
11552 	if (init)
11553 		tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
11554 
11555 	err = tg3_init_hw(tp, reset_phy);
11556 	if (err) {
11557 		tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11558 		tg3_free_rings(tp);
11559 	}
11560 
11561 	tg3_full_unlock(tp);
11562 
11563 	if (err)
11564 		goto out_free_irq;
11565 
11566 	if (test_irq && tg3_flag(tp, USING_MSI)) {
11567 		err = tg3_test_msi(tp);
11568 
11569 		if (err) {
11570 			tg3_full_lock(tp, 0);
11571 			tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11572 			tg3_free_rings(tp);
11573 			tg3_full_unlock(tp);
11574 
11575 			goto out_napi_fini;
11576 		}
11577 
11578 		if (!tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
11579 			u32 val = tr32(PCIE_TRANSACTION_CFG);
11580 
11581 			tw32(PCIE_TRANSACTION_CFG,
11582 			     val | PCIE_TRANS_CFG_1SHOT_MSI);
11583 		}
11584 	}
11585 
11586 	tg3_phy_start(tp);
11587 
11588 	tg3_hwmon_open(tp);
11589 
11590 	tg3_full_lock(tp, 0);
11591 
11592 	tg3_timer_start(tp);
11593 	tg3_flag_set(tp, INIT_COMPLETE);
11594 	tg3_enable_ints(tp);
11595 
11596 	tg3_ptp_resume(tp);
11597 
11598 	tg3_full_unlock(tp);
11599 
11600 	netif_tx_start_all_queues(dev);
11601 
11602 	/*
11603 	 * Reset loopback feature if it was turned on while the device was down
11604 	 * make sure that it's installed properly now.
11605 	 */
11606 	if (dev->features & NETIF_F_LOOPBACK)
11607 		tg3_set_loopback(dev, dev->features);
11608 
11609 	return 0;
11610 
11611 out_free_irq:
11612 	for (i = tp->irq_cnt - 1; i >= 0; i--) {
11613 		struct tg3_napi *tnapi = &tp->napi[i];
11614 		free_irq(tnapi->irq_vec, tnapi);
11615 	}
11616 
11617 out_napi_fini:
11618 	tg3_napi_disable(tp);
11619 	tg3_napi_fini(tp);
11620 	tg3_free_consistent(tp);
11621 
11622 out_ints_fini:
11623 	tg3_ints_fini(tp);
11624 
11625 	return err;
11626 }
11627 
11628 static void tg3_stop(struct tg3 *tp)
11629 {
11630 	int i;
11631 
11632 	tg3_reset_task_cancel(tp);
11633 	tg3_netif_stop(tp);
11634 
11635 	tg3_timer_stop(tp);
11636 
11637 	tg3_hwmon_close(tp);
11638 
11639 	tg3_phy_stop(tp);
11640 
11641 	tg3_full_lock(tp, 1);
11642 
11643 	tg3_disable_ints(tp);
11644 
11645 	tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11646 	tg3_free_rings(tp);
11647 	tg3_flag_clear(tp, INIT_COMPLETE);
11648 
11649 	tg3_full_unlock(tp);
11650 
11651 	for (i = tp->irq_cnt - 1; i >= 0; i--) {
11652 		struct tg3_napi *tnapi = &tp->napi[i];
11653 		free_irq(tnapi->irq_vec, tnapi);
11654 	}
11655 
11656 	tg3_ints_fini(tp);
11657 
11658 	tg3_napi_fini(tp);
11659 
11660 	tg3_free_consistent(tp);
11661 }
11662 
11663 static int tg3_open(struct net_device *dev)
11664 {
11665 	struct tg3 *tp = netdev_priv(dev);
11666 	int err;
11667 
11668 	if (tp->pcierr_recovery) {
11669 		netdev_err(dev, "Failed to open device. PCI error recovery "
11670 			   "in progress\n");
11671 		return -EAGAIN;
11672 	}
11673 
11674 	if (tp->fw_needed) {
11675 		err = tg3_request_firmware(tp);
11676 		if (tg3_asic_rev(tp) == ASIC_REV_57766) {
11677 			if (err) {
11678 				netdev_warn(tp->dev, "EEE capability disabled\n");
11679 				tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
11680 			} else if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
11681 				netdev_warn(tp->dev, "EEE capability restored\n");
11682 				tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
11683 			}
11684 		} else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) {
11685 			if (err)
11686 				return err;
11687 		} else if (err) {
11688 			netdev_warn(tp->dev, "TSO capability disabled\n");
11689 			tg3_flag_clear(tp, TSO_CAPABLE);
11690 		} else if (!tg3_flag(tp, TSO_CAPABLE)) {
11691 			netdev_notice(tp->dev, "TSO capability restored\n");
11692 			tg3_flag_set(tp, TSO_CAPABLE);
11693 		}
11694 	}
11695 
11696 	tg3_carrier_off(tp);
11697 
11698 	err = tg3_power_up(tp);
11699 	if (err)
11700 		return err;
11701 
11702 	tg3_full_lock(tp, 0);
11703 
11704 	tg3_disable_ints(tp);
11705 	tg3_flag_clear(tp, INIT_COMPLETE);
11706 
11707 	tg3_full_unlock(tp);
11708 
11709 	err = tg3_start(tp,
11710 			!(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN),
11711 			true, true);
11712 	if (err) {
11713 		tg3_frob_aux_power(tp, false);
11714 		pci_set_power_state(tp->pdev, PCI_D3hot);
11715 	}
11716 
11717 	return err;
11718 }
11719 
11720 static int tg3_close(struct net_device *dev)
11721 {
11722 	struct tg3 *tp = netdev_priv(dev);
11723 
11724 	if (tp->pcierr_recovery) {
11725 		netdev_err(dev, "Failed to close device. PCI error recovery "
11726 			   "in progress\n");
11727 		return -EAGAIN;
11728 	}
11729 
11730 	tg3_stop(tp);
11731 
11732 	/* Clear stats across close / open calls */
11733 	memset(&tp->net_stats_prev, 0, sizeof(tp->net_stats_prev));
11734 	memset(&tp->estats_prev, 0, sizeof(tp->estats_prev));
11735 
11736 	if (pci_device_is_present(tp->pdev)) {
11737 		tg3_power_down_prepare(tp);
11738 
11739 		tg3_carrier_off(tp);
11740 	}
11741 	return 0;
11742 }
11743 
11744 static inline u64 get_stat64(tg3_stat64_t *val)
11745 {
11746        return ((u64)val->high << 32) | ((u64)val->low);
11747 }
11748 
11749 static u64 tg3_calc_crc_errors(struct tg3 *tp)
11750 {
11751 	struct tg3_hw_stats *hw_stats = tp->hw_stats;
11752 
11753 	if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
11754 	    (tg3_asic_rev(tp) == ASIC_REV_5700 ||
11755 	     tg3_asic_rev(tp) == ASIC_REV_5701)) {
11756 		u32 val;
11757 
11758 		if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
11759 			tg3_writephy(tp, MII_TG3_TEST1,
11760 				     val | MII_TG3_TEST1_CRC_EN);
11761 			tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &val);
11762 		} else
11763 			val = 0;
11764 
11765 		tp->phy_crc_errors += val;
11766 
11767 		return tp->phy_crc_errors;
11768 	}
11769 
11770 	return get_stat64(&hw_stats->rx_fcs_errors);
11771 }
11772 
11773 #define ESTAT_ADD(member) \
11774 	estats->member =	old_estats->member + \
11775 				get_stat64(&hw_stats->member)
11776 
11777 static void tg3_get_estats(struct tg3 *tp, struct tg3_ethtool_stats *estats)
11778 {
11779 	struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
11780 	struct tg3_hw_stats *hw_stats = tp->hw_stats;
11781 
11782 	ESTAT_ADD(rx_octets);
11783 	ESTAT_ADD(rx_fragments);
11784 	ESTAT_ADD(rx_ucast_packets);
11785 	ESTAT_ADD(rx_mcast_packets);
11786 	ESTAT_ADD(rx_bcast_packets);
11787 	ESTAT_ADD(rx_fcs_errors);
11788 	ESTAT_ADD(rx_align_errors);
11789 	ESTAT_ADD(rx_xon_pause_rcvd);
11790 	ESTAT_ADD(rx_xoff_pause_rcvd);
11791 	ESTAT_ADD(rx_mac_ctrl_rcvd);
11792 	ESTAT_ADD(rx_xoff_entered);
11793 	ESTAT_ADD(rx_frame_too_long_errors);
11794 	ESTAT_ADD(rx_jabbers);
11795 	ESTAT_ADD(rx_undersize_packets);
11796 	ESTAT_ADD(rx_in_length_errors);
11797 	ESTAT_ADD(rx_out_length_errors);
11798 	ESTAT_ADD(rx_64_or_less_octet_packets);
11799 	ESTAT_ADD(rx_65_to_127_octet_packets);
11800 	ESTAT_ADD(rx_128_to_255_octet_packets);
11801 	ESTAT_ADD(rx_256_to_511_octet_packets);
11802 	ESTAT_ADD(rx_512_to_1023_octet_packets);
11803 	ESTAT_ADD(rx_1024_to_1522_octet_packets);
11804 	ESTAT_ADD(rx_1523_to_2047_octet_packets);
11805 	ESTAT_ADD(rx_2048_to_4095_octet_packets);
11806 	ESTAT_ADD(rx_4096_to_8191_octet_packets);
11807 	ESTAT_ADD(rx_8192_to_9022_octet_packets);
11808 
11809 	ESTAT_ADD(tx_octets);
11810 	ESTAT_ADD(tx_collisions);
11811 	ESTAT_ADD(tx_xon_sent);
11812 	ESTAT_ADD(tx_xoff_sent);
11813 	ESTAT_ADD(tx_flow_control);
11814 	ESTAT_ADD(tx_mac_errors);
11815 	ESTAT_ADD(tx_single_collisions);
11816 	ESTAT_ADD(tx_mult_collisions);
11817 	ESTAT_ADD(tx_deferred);
11818 	ESTAT_ADD(tx_excessive_collisions);
11819 	ESTAT_ADD(tx_late_collisions);
11820 	ESTAT_ADD(tx_collide_2times);
11821 	ESTAT_ADD(tx_collide_3times);
11822 	ESTAT_ADD(tx_collide_4times);
11823 	ESTAT_ADD(tx_collide_5times);
11824 	ESTAT_ADD(tx_collide_6times);
11825 	ESTAT_ADD(tx_collide_7times);
11826 	ESTAT_ADD(tx_collide_8times);
11827 	ESTAT_ADD(tx_collide_9times);
11828 	ESTAT_ADD(tx_collide_10times);
11829 	ESTAT_ADD(tx_collide_11times);
11830 	ESTAT_ADD(tx_collide_12times);
11831 	ESTAT_ADD(tx_collide_13times);
11832 	ESTAT_ADD(tx_collide_14times);
11833 	ESTAT_ADD(tx_collide_15times);
11834 	ESTAT_ADD(tx_ucast_packets);
11835 	ESTAT_ADD(tx_mcast_packets);
11836 	ESTAT_ADD(tx_bcast_packets);
11837 	ESTAT_ADD(tx_carrier_sense_errors);
11838 	ESTAT_ADD(tx_discards);
11839 	ESTAT_ADD(tx_errors);
11840 
11841 	ESTAT_ADD(dma_writeq_full);
11842 	ESTAT_ADD(dma_write_prioq_full);
11843 	ESTAT_ADD(rxbds_empty);
11844 	ESTAT_ADD(rx_discards);
11845 	ESTAT_ADD(rx_errors);
11846 	ESTAT_ADD(rx_threshold_hit);
11847 
11848 	ESTAT_ADD(dma_readq_full);
11849 	ESTAT_ADD(dma_read_prioq_full);
11850 	ESTAT_ADD(tx_comp_queue_full);
11851 
11852 	ESTAT_ADD(ring_set_send_prod_index);
11853 	ESTAT_ADD(ring_status_update);
11854 	ESTAT_ADD(nic_irqs);
11855 	ESTAT_ADD(nic_avoided_irqs);
11856 	ESTAT_ADD(nic_tx_threshold_hit);
11857 
11858 	ESTAT_ADD(mbuf_lwm_thresh_hit);
11859 }
11860 
11861 static void tg3_get_nstats(struct tg3 *tp, struct rtnl_link_stats64 *stats)
11862 {
11863 	struct rtnl_link_stats64 *old_stats = &tp->net_stats_prev;
11864 	struct tg3_hw_stats *hw_stats = tp->hw_stats;
11865 
11866 	stats->rx_packets = old_stats->rx_packets +
11867 		get_stat64(&hw_stats->rx_ucast_packets) +
11868 		get_stat64(&hw_stats->rx_mcast_packets) +
11869 		get_stat64(&hw_stats->rx_bcast_packets);
11870 
11871 	stats->tx_packets = old_stats->tx_packets +
11872 		get_stat64(&hw_stats->tx_ucast_packets) +
11873 		get_stat64(&hw_stats->tx_mcast_packets) +
11874 		get_stat64(&hw_stats->tx_bcast_packets);
11875 
11876 	stats->rx_bytes = old_stats->rx_bytes +
11877 		get_stat64(&hw_stats->rx_octets);
11878 	stats->tx_bytes = old_stats->tx_bytes +
11879 		get_stat64(&hw_stats->tx_octets);
11880 
11881 	stats->rx_errors = old_stats->rx_errors +
11882 		get_stat64(&hw_stats->rx_errors);
11883 	stats->tx_errors = old_stats->tx_errors +
11884 		get_stat64(&hw_stats->tx_errors) +
11885 		get_stat64(&hw_stats->tx_mac_errors) +
11886 		get_stat64(&hw_stats->tx_carrier_sense_errors) +
11887 		get_stat64(&hw_stats->tx_discards);
11888 
11889 	stats->multicast = old_stats->multicast +
11890 		get_stat64(&hw_stats->rx_mcast_packets);
11891 	stats->collisions = old_stats->collisions +
11892 		get_stat64(&hw_stats->tx_collisions);
11893 
11894 	stats->rx_length_errors = old_stats->rx_length_errors +
11895 		get_stat64(&hw_stats->rx_frame_too_long_errors) +
11896 		get_stat64(&hw_stats->rx_undersize_packets);
11897 
11898 	stats->rx_frame_errors = old_stats->rx_frame_errors +
11899 		get_stat64(&hw_stats->rx_align_errors);
11900 	stats->tx_aborted_errors = old_stats->tx_aborted_errors +
11901 		get_stat64(&hw_stats->tx_discards);
11902 	stats->tx_carrier_errors = old_stats->tx_carrier_errors +
11903 		get_stat64(&hw_stats->tx_carrier_sense_errors);
11904 
11905 	stats->rx_crc_errors = old_stats->rx_crc_errors +
11906 		tg3_calc_crc_errors(tp);
11907 
11908 	stats->rx_missed_errors = old_stats->rx_missed_errors +
11909 		get_stat64(&hw_stats->rx_discards);
11910 
11911 	stats->rx_dropped = tp->rx_dropped;
11912 	stats->tx_dropped = tp->tx_dropped;
11913 }
11914 
11915 static int tg3_get_regs_len(struct net_device *dev)
11916 {
11917 	return TG3_REG_BLK_SIZE;
11918 }
11919 
11920 static void tg3_get_regs(struct net_device *dev,
11921 		struct ethtool_regs *regs, void *_p)
11922 {
11923 	struct tg3 *tp = netdev_priv(dev);
11924 
11925 	regs->version = 0;
11926 
11927 	memset(_p, 0, TG3_REG_BLK_SIZE);
11928 
11929 	if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11930 		return;
11931 
11932 	tg3_full_lock(tp, 0);
11933 
11934 	tg3_dump_legacy_regs(tp, (u32 *)_p);
11935 
11936 	tg3_full_unlock(tp);
11937 }
11938 
11939 static int tg3_get_eeprom_len(struct net_device *dev)
11940 {
11941 	struct tg3 *tp = netdev_priv(dev);
11942 
11943 	return tp->nvram_size;
11944 }
11945 
11946 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
11947 {
11948 	struct tg3 *tp = netdev_priv(dev);
11949 	int ret, cpmu_restore = 0;
11950 	u8  *pd;
11951 	u32 i, offset, len, b_offset, b_count, cpmu_val = 0;
11952 	__be32 val;
11953 
11954 	if (tg3_flag(tp, NO_NVRAM))
11955 		return -EINVAL;
11956 
11957 	offset = eeprom->offset;
11958 	len = eeprom->len;
11959 	eeprom->len = 0;
11960 
11961 	eeprom->magic = TG3_EEPROM_MAGIC;
11962 
11963 	/* Override clock, link aware and link idle modes */
11964 	if (tg3_flag(tp, CPMU_PRESENT)) {
11965 		cpmu_val = tr32(TG3_CPMU_CTRL);
11966 		if (cpmu_val & (CPMU_CTRL_LINK_AWARE_MODE |
11967 				CPMU_CTRL_LINK_IDLE_MODE)) {
11968 			tw32(TG3_CPMU_CTRL, cpmu_val &
11969 					    ~(CPMU_CTRL_LINK_AWARE_MODE |
11970 					     CPMU_CTRL_LINK_IDLE_MODE));
11971 			cpmu_restore = 1;
11972 		}
11973 	}
11974 	tg3_override_clk(tp);
11975 
11976 	if (offset & 3) {
11977 		/* adjustments to start on required 4 byte boundary */
11978 		b_offset = offset & 3;
11979 		b_count = 4 - b_offset;
11980 		if (b_count > len) {
11981 			/* i.e. offset=1 len=2 */
11982 			b_count = len;
11983 		}
11984 		ret = tg3_nvram_read_be32(tp, offset-b_offset, &val);
11985 		if (ret)
11986 			goto eeprom_done;
11987 		memcpy(data, ((char *)&val) + b_offset, b_count);
11988 		len -= b_count;
11989 		offset += b_count;
11990 		eeprom->len += b_count;
11991 	}
11992 
11993 	/* read bytes up to the last 4 byte boundary */
11994 	pd = &data[eeprom->len];
11995 	for (i = 0; i < (len - (len & 3)); i += 4) {
11996 		ret = tg3_nvram_read_be32(tp, offset + i, &val);
11997 		if (ret) {
11998 			if (i)
11999 				i -= 4;
12000 			eeprom->len += i;
12001 			goto eeprom_done;
12002 		}
12003 		memcpy(pd + i, &val, 4);
12004 		if (need_resched()) {
12005 			if (signal_pending(current)) {
12006 				eeprom->len += i;
12007 				ret = -EINTR;
12008 				goto eeprom_done;
12009 			}
12010 			cond_resched();
12011 		}
12012 	}
12013 	eeprom->len += i;
12014 
12015 	if (len & 3) {
12016 		/* read last bytes not ending on 4 byte boundary */
12017 		pd = &data[eeprom->len];
12018 		b_count = len & 3;
12019 		b_offset = offset + len - b_count;
12020 		ret = tg3_nvram_read_be32(tp, b_offset, &val);
12021 		if (ret)
12022 			goto eeprom_done;
12023 		memcpy(pd, &val, b_count);
12024 		eeprom->len += b_count;
12025 	}
12026 	ret = 0;
12027 
12028 eeprom_done:
12029 	/* Restore clock, link aware and link idle modes */
12030 	tg3_restore_clk(tp);
12031 	if (cpmu_restore)
12032 		tw32(TG3_CPMU_CTRL, cpmu_val);
12033 
12034 	return ret;
12035 }
12036 
12037 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
12038 {
12039 	struct tg3 *tp = netdev_priv(dev);
12040 	int ret;
12041 	u32 offset, len, b_offset, odd_len;
12042 	u8 *buf;
12043 	__be32 start = 0, end;
12044 
12045 	if (tg3_flag(tp, NO_NVRAM) ||
12046 	    eeprom->magic != TG3_EEPROM_MAGIC)
12047 		return -EINVAL;
12048 
12049 	offset = eeprom->offset;
12050 	len = eeprom->len;
12051 
12052 	if ((b_offset = (offset & 3))) {
12053 		/* adjustments to start on required 4 byte boundary */
12054 		ret = tg3_nvram_read_be32(tp, offset-b_offset, &start);
12055 		if (ret)
12056 			return ret;
12057 		len += b_offset;
12058 		offset &= ~3;
12059 		if (len < 4)
12060 			len = 4;
12061 	}
12062 
12063 	odd_len = 0;
12064 	if (len & 3) {
12065 		/* adjustments to end on required 4 byte boundary */
12066 		odd_len = 1;
12067 		len = (len + 3) & ~3;
12068 		ret = tg3_nvram_read_be32(tp, offset+len-4, &end);
12069 		if (ret)
12070 			return ret;
12071 	}
12072 
12073 	buf = data;
12074 	if (b_offset || odd_len) {
12075 		buf = kmalloc(len, GFP_KERNEL);
12076 		if (!buf)
12077 			return -ENOMEM;
12078 		if (b_offset)
12079 			memcpy(buf, &start, 4);
12080 		if (odd_len)
12081 			memcpy(buf+len-4, &end, 4);
12082 		memcpy(buf + b_offset, data, eeprom->len);
12083 	}
12084 
12085 	ret = tg3_nvram_write_block(tp, offset, len, buf);
12086 
12087 	if (buf != data)
12088 		kfree(buf);
12089 
12090 	return ret;
12091 }
12092 
12093 static int tg3_get_link_ksettings(struct net_device *dev,
12094 				  struct ethtool_link_ksettings *cmd)
12095 {
12096 	struct tg3 *tp = netdev_priv(dev);
12097 	u32 supported, advertising;
12098 
12099 	if (tg3_flag(tp, USE_PHYLIB)) {
12100 		struct phy_device *phydev;
12101 		if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
12102 			return -EAGAIN;
12103 		phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
12104 		return phy_ethtool_ksettings_get(phydev, cmd);
12105 	}
12106 
12107 	supported = (SUPPORTED_Autoneg);
12108 
12109 	if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
12110 		supported |= (SUPPORTED_1000baseT_Half |
12111 			      SUPPORTED_1000baseT_Full);
12112 
12113 	if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
12114 		supported |= (SUPPORTED_100baseT_Half |
12115 			      SUPPORTED_100baseT_Full |
12116 			      SUPPORTED_10baseT_Half |
12117 			      SUPPORTED_10baseT_Full |
12118 			      SUPPORTED_TP);
12119 		cmd->base.port = PORT_TP;
12120 	} else {
12121 		supported |= SUPPORTED_FIBRE;
12122 		cmd->base.port = PORT_FIBRE;
12123 	}
12124 	ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
12125 						supported);
12126 
12127 	advertising = tp->link_config.advertising;
12128 	if (tg3_flag(tp, PAUSE_AUTONEG)) {
12129 		if (tp->link_config.flowctrl & FLOW_CTRL_RX) {
12130 			if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
12131 				advertising |= ADVERTISED_Pause;
12132 			} else {
12133 				advertising |= ADVERTISED_Pause |
12134 					ADVERTISED_Asym_Pause;
12135 			}
12136 		} else if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
12137 			advertising |= ADVERTISED_Asym_Pause;
12138 		}
12139 	}
12140 	ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
12141 						advertising);
12142 
12143 	if (netif_running(dev) && tp->link_up) {
12144 		cmd->base.speed = tp->link_config.active_speed;
12145 		cmd->base.duplex = tp->link_config.active_duplex;
12146 		ethtool_convert_legacy_u32_to_link_mode(
12147 			cmd->link_modes.lp_advertising,
12148 			tp->link_config.rmt_adv);
12149 
12150 		if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
12151 			if (tp->phy_flags & TG3_PHYFLG_MDIX_STATE)
12152 				cmd->base.eth_tp_mdix = ETH_TP_MDI_X;
12153 			else
12154 				cmd->base.eth_tp_mdix = ETH_TP_MDI;
12155 		}
12156 	} else {
12157 		cmd->base.speed = SPEED_UNKNOWN;
12158 		cmd->base.duplex = DUPLEX_UNKNOWN;
12159 		cmd->base.eth_tp_mdix = ETH_TP_MDI_INVALID;
12160 	}
12161 	cmd->base.phy_address = tp->phy_addr;
12162 	cmd->base.autoneg = tp->link_config.autoneg;
12163 	return 0;
12164 }
12165 
12166 static int tg3_set_link_ksettings(struct net_device *dev,
12167 				  const struct ethtool_link_ksettings *cmd)
12168 {
12169 	struct tg3 *tp = netdev_priv(dev);
12170 	u32 speed = cmd->base.speed;
12171 	u32 advertising;
12172 
12173 	if (tg3_flag(tp, USE_PHYLIB)) {
12174 		struct phy_device *phydev;
12175 		if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
12176 			return -EAGAIN;
12177 		phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
12178 		return phy_ethtool_ksettings_set(phydev, cmd);
12179 	}
12180 
12181 	if (cmd->base.autoneg != AUTONEG_ENABLE &&
12182 	    cmd->base.autoneg != AUTONEG_DISABLE)
12183 		return -EINVAL;
12184 
12185 	if (cmd->base.autoneg == AUTONEG_DISABLE &&
12186 	    cmd->base.duplex != DUPLEX_FULL &&
12187 	    cmd->base.duplex != DUPLEX_HALF)
12188 		return -EINVAL;
12189 
12190 	ethtool_convert_link_mode_to_legacy_u32(&advertising,
12191 						cmd->link_modes.advertising);
12192 
12193 	if (cmd->base.autoneg == AUTONEG_ENABLE) {
12194 		u32 mask = ADVERTISED_Autoneg |
12195 			   ADVERTISED_Pause |
12196 			   ADVERTISED_Asym_Pause;
12197 
12198 		if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
12199 			mask |= ADVERTISED_1000baseT_Half |
12200 				ADVERTISED_1000baseT_Full;
12201 
12202 		if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
12203 			mask |= ADVERTISED_100baseT_Half |
12204 				ADVERTISED_100baseT_Full |
12205 				ADVERTISED_10baseT_Half |
12206 				ADVERTISED_10baseT_Full |
12207 				ADVERTISED_TP;
12208 		else
12209 			mask |= ADVERTISED_FIBRE;
12210 
12211 		if (advertising & ~mask)
12212 			return -EINVAL;
12213 
12214 		mask &= (ADVERTISED_1000baseT_Half |
12215 			 ADVERTISED_1000baseT_Full |
12216 			 ADVERTISED_100baseT_Half |
12217 			 ADVERTISED_100baseT_Full |
12218 			 ADVERTISED_10baseT_Half |
12219 			 ADVERTISED_10baseT_Full);
12220 
12221 		advertising &= mask;
12222 	} else {
12223 		if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) {
12224 			if (speed != SPEED_1000)
12225 				return -EINVAL;
12226 
12227 			if (cmd->base.duplex != DUPLEX_FULL)
12228 				return -EINVAL;
12229 		} else {
12230 			if (speed != SPEED_100 &&
12231 			    speed != SPEED_10)
12232 				return -EINVAL;
12233 		}
12234 	}
12235 
12236 	tg3_full_lock(tp, 0);
12237 
12238 	tp->link_config.autoneg = cmd->base.autoneg;
12239 	if (cmd->base.autoneg == AUTONEG_ENABLE) {
12240 		tp->link_config.advertising = (advertising |
12241 					      ADVERTISED_Autoneg);
12242 		tp->link_config.speed = SPEED_UNKNOWN;
12243 		tp->link_config.duplex = DUPLEX_UNKNOWN;
12244 	} else {
12245 		tp->link_config.advertising = 0;
12246 		tp->link_config.speed = speed;
12247 		tp->link_config.duplex = cmd->base.duplex;
12248 	}
12249 
12250 	tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
12251 
12252 	tg3_warn_mgmt_link_flap(tp);
12253 
12254 	if (netif_running(dev))
12255 		tg3_setup_phy(tp, true);
12256 
12257 	tg3_full_unlock(tp);
12258 
12259 	return 0;
12260 }
12261 
12262 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
12263 {
12264 	struct tg3 *tp = netdev_priv(dev);
12265 
12266 	strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
12267 	strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
12268 	strlcpy(info->fw_version, tp->fw_ver, sizeof(info->fw_version));
12269 	strlcpy(info->bus_info, pci_name(tp->pdev), sizeof(info->bus_info));
12270 }
12271 
12272 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
12273 {
12274 	struct tg3 *tp = netdev_priv(dev);
12275 
12276 	if (tg3_flag(tp, WOL_CAP) && device_can_wakeup(&tp->pdev->dev))
12277 		wol->supported = WAKE_MAGIC;
12278 	else
12279 		wol->supported = 0;
12280 	wol->wolopts = 0;
12281 	if (tg3_flag(tp, WOL_ENABLE) && device_can_wakeup(&tp->pdev->dev))
12282 		wol->wolopts = WAKE_MAGIC;
12283 	memset(&wol->sopass, 0, sizeof(wol->sopass));
12284 }
12285 
12286 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
12287 {
12288 	struct tg3 *tp = netdev_priv(dev);
12289 	struct device *dp = &tp->pdev->dev;
12290 
12291 	if (wol->wolopts & ~WAKE_MAGIC)
12292 		return -EINVAL;
12293 	if ((wol->wolopts & WAKE_MAGIC) &&
12294 	    !(tg3_flag(tp, WOL_CAP) && device_can_wakeup(dp)))
12295 		return -EINVAL;
12296 
12297 	device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC);
12298 
12299 	if (device_may_wakeup(dp))
12300 		tg3_flag_set(tp, WOL_ENABLE);
12301 	else
12302 		tg3_flag_clear(tp, WOL_ENABLE);
12303 
12304 	return 0;
12305 }
12306 
12307 static u32 tg3_get_msglevel(struct net_device *dev)
12308 {
12309 	struct tg3 *tp = netdev_priv(dev);
12310 	return tp->msg_enable;
12311 }
12312 
12313 static void tg3_set_msglevel(struct net_device *dev, u32 value)
12314 {
12315 	struct tg3 *tp = netdev_priv(dev);
12316 	tp->msg_enable = value;
12317 }
12318 
12319 static int tg3_nway_reset(struct net_device *dev)
12320 {
12321 	struct tg3 *tp = netdev_priv(dev);
12322 	int r;
12323 
12324 	if (!netif_running(dev))
12325 		return -EAGAIN;
12326 
12327 	if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
12328 		return -EINVAL;
12329 
12330 	tg3_warn_mgmt_link_flap(tp);
12331 
12332 	if (tg3_flag(tp, USE_PHYLIB)) {
12333 		if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
12334 			return -EAGAIN;
12335 		r = phy_start_aneg(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr));
12336 	} else {
12337 		u32 bmcr;
12338 
12339 		spin_lock_bh(&tp->lock);
12340 		r = -EINVAL;
12341 		tg3_readphy(tp, MII_BMCR, &bmcr);
12342 		if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
12343 		    ((bmcr & BMCR_ANENABLE) ||
12344 		     (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT))) {
12345 			tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
12346 						   BMCR_ANENABLE);
12347 			r = 0;
12348 		}
12349 		spin_unlock_bh(&tp->lock);
12350 	}
12351 
12352 	return r;
12353 }
12354 
12355 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
12356 {
12357 	struct tg3 *tp = netdev_priv(dev);
12358 
12359 	ering->rx_max_pending = tp->rx_std_ring_mask;
12360 	if (tg3_flag(tp, JUMBO_RING_ENABLE))
12361 		ering->rx_jumbo_max_pending = tp->rx_jmb_ring_mask;
12362 	else
12363 		ering->rx_jumbo_max_pending = 0;
12364 
12365 	ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
12366 
12367 	ering->rx_pending = tp->rx_pending;
12368 	if (tg3_flag(tp, JUMBO_RING_ENABLE))
12369 		ering->rx_jumbo_pending = tp->rx_jumbo_pending;
12370 	else
12371 		ering->rx_jumbo_pending = 0;
12372 
12373 	ering->tx_pending = tp->napi[0].tx_pending;
12374 }
12375 
12376 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
12377 {
12378 	struct tg3 *tp = netdev_priv(dev);
12379 	int i, irq_sync = 0, err = 0;
12380 
12381 	if ((ering->rx_pending > tp->rx_std_ring_mask) ||
12382 	    (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) ||
12383 	    (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
12384 	    (ering->tx_pending <= MAX_SKB_FRAGS) ||
12385 	    (tg3_flag(tp, TSO_BUG) &&
12386 	     (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
12387 		return -EINVAL;
12388 
12389 	if (netif_running(dev)) {
12390 		tg3_phy_stop(tp);
12391 		tg3_netif_stop(tp);
12392 		irq_sync = 1;
12393 	}
12394 
12395 	tg3_full_lock(tp, irq_sync);
12396 
12397 	tp->rx_pending = ering->rx_pending;
12398 
12399 	if (tg3_flag(tp, MAX_RXPEND_64) &&
12400 	    tp->rx_pending > 63)
12401 		tp->rx_pending = 63;
12402 
12403 	if (tg3_flag(tp, JUMBO_RING_ENABLE))
12404 		tp->rx_jumbo_pending = ering->rx_jumbo_pending;
12405 
12406 	for (i = 0; i < tp->irq_max; i++)
12407 		tp->napi[i].tx_pending = ering->tx_pending;
12408 
12409 	if (netif_running(dev)) {
12410 		tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12411 		err = tg3_restart_hw(tp, false);
12412 		if (!err)
12413 			tg3_netif_start(tp);
12414 	}
12415 
12416 	tg3_full_unlock(tp);
12417 
12418 	if (irq_sync && !err)
12419 		tg3_phy_start(tp);
12420 
12421 	return err;
12422 }
12423 
12424 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
12425 {
12426 	struct tg3 *tp = netdev_priv(dev);
12427 
12428 	epause->autoneg = !!tg3_flag(tp, PAUSE_AUTONEG);
12429 
12430 	if (tp->link_config.flowctrl & FLOW_CTRL_RX)
12431 		epause->rx_pause = 1;
12432 	else
12433 		epause->rx_pause = 0;
12434 
12435 	if (tp->link_config.flowctrl & FLOW_CTRL_TX)
12436 		epause->tx_pause = 1;
12437 	else
12438 		epause->tx_pause = 0;
12439 }
12440 
12441 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
12442 {
12443 	struct tg3 *tp = netdev_priv(dev);
12444 	int err = 0;
12445 
12446 	if (tp->link_config.autoneg == AUTONEG_ENABLE)
12447 		tg3_warn_mgmt_link_flap(tp);
12448 
12449 	if (tg3_flag(tp, USE_PHYLIB)) {
12450 		u32 newadv;
12451 		struct phy_device *phydev;
12452 
12453 		phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
12454 
12455 		if (!(phydev->supported & SUPPORTED_Pause) ||
12456 		    (!(phydev->supported & SUPPORTED_Asym_Pause) &&
12457 		     (epause->rx_pause != epause->tx_pause)))
12458 			return -EINVAL;
12459 
12460 		tp->link_config.flowctrl = 0;
12461 		if (epause->rx_pause) {
12462 			tp->link_config.flowctrl |= FLOW_CTRL_RX;
12463 
12464 			if (epause->tx_pause) {
12465 				tp->link_config.flowctrl |= FLOW_CTRL_TX;
12466 				newadv = ADVERTISED_Pause;
12467 			} else
12468 				newadv = ADVERTISED_Pause |
12469 					 ADVERTISED_Asym_Pause;
12470 		} else if (epause->tx_pause) {
12471 			tp->link_config.flowctrl |= FLOW_CTRL_TX;
12472 			newadv = ADVERTISED_Asym_Pause;
12473 		} else
12474 			newadv = 0;
12475 
12476 		if (epause->autoneg)
12477 			tg3_flag_set(tp, PAUSE_AUTONEG);
12478 		else
12479 			tg3_flag_clear(tp, PAUSE_AUTONEG);
12480 
12481 		if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
12482 			u32 oldadv = phydev->advertising &
12483 				     (ADVERTISED_Pause | ADVERTISED_Asym_Pause);
12484 			if (oldadv != newadv) {
12485 				phydev->advertising &=
12486 					~(ADVERTISED_Pause |
12487 					  ADVERTISED_Asym_Pause);
12488 				phydev->advertising |= newadv;
12489 				if (phydev->autoneg) {
12490 					/*
12491 					 * Always renegotiate the link to
12492 					 * inform our link partner of our
12493 					 * flow control settings, even if the
12494 					 * flow control is forced.  Let
12495 					 * tg3_adjust_link() do the final
12496 					 * flow control setup.
12497 					 */
12498 					return phy_start_aneg(phydev);
12499 				}
12500 			}
12501 
12502 			if (!epause->autoneg)
12503 				tg3_setup_flow_control(tp, 0, 0);
12504 		} else {
12505 			tp->link_config.advertising &=
12506 					~(ADVERTISED_Pause |
12507 					  ADVERTISED_Asym_Pause);
12508 			tp->link_config.advertising |= newadv;
12509 		}
12510 	} else {
12511 		int irq_sync = 0;
12512 
12513 		if (netif_running(dev)) {
12514 			tg3_netif_stop(tp);
12515 			irq_sync = 1;
12516 		}
12517 
12518 		tg3_full_lock(tp, irq_sync);
12519 
12520 		if (epause->autoneg)
12521 			tg3_flag_set(tp, PAUSE_AUTONEG);
12522 		else
12523 			tg3_flag_clear(tp, PAUSE_AUTONEG);
12524 		if (epause->rx_pause)
12525 			tp->link_config.flowctrl |= FLOW_CTRL_RX;
12526 		else
12527 			tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
12528 		if (epause->tx_pause)
12529 			tp->link_config.flowctrl |= FLOW_CTRL_TX;
12530 		else
12531 			tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
12532 
12533 		if (netif_running(dev)) {
12534 			tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12535 			err = tg3_restart_hw(tp, false);
12536 			if (!err)
12537 				tg3_netif_start(tp);
12538 		}
12539 
12540 		tg3_full_unlock(tp);
12541 	}
12542 
12543 	tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
12544 
12545 	return err;
12546 }
12547 
12548 static int tg3_get_sset_count(struct net_device *dev, int sset)
12549 {
12550 	switch (sset) {
12551 	case ETH_SS_TEST:
12552 		return TG3_NUM_TEST;
12553 	case ETH_SS_STATS:
12554 		return TG3_NUM_STATS;
12555 	default:
12556 		return -EOPNOTSUPP;
12557 	}
12558 }
12559 
12560 static int tg3_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
12561 			 u32 *rules __always_unused)
12562 {
12563 	struct tg3 *tp = netdev_priv(dev);
12564 
12565 	if (!tg3_flag(tp, SUPPORT_MSIX))
12566 		return -EOPNOTSUPP;
12567 
12568 	switch (info->cmd) {
12569 	case ETHTOOL_GRXRINGS:
12570 		if (netif_running(tp->dev))
12571 			info->data = tp->rxq_cnt;
12572 		else {
12573 			info->data = num_online_cpus();
12574 			if (info->data > TG3_RSS_MAX_NUM_QS)
12575 				info->data = TG3_RSS_MAX_NUM_QS;
12576 		}
12577 
12578 		return 0;
12579 
12580 	default:
12581 		return -EOPNOTSUPP;
12582 	}
12583 }
12584 
12585 static u32 tg3_get_rxfh_indir_size(struct net_device *dev)
12586 {
12587 	u32 size = 0;
12588 	struct tg3 *tp = netdev_priv(dev);
12589 
12590 	if (tg3_flag(tp, SUPPORT_MSIX))
12591 		size = TG3_RSS_INDIR_TBL_SIZE;
12592 
12593 	return size;
12594 }
12595 
12596 static int tg3_get_rxfh(struct net_device *dev, u32 *indir, u8 *key, u8 *hfunc)
12597 {
12598 	struct tg3 *tp = netdev_priv(dev);
12599 	int i;
12600 
12601 	if (hfunc)
12602 		*hfunc = ETH_RSS_HASH_TOP;
12603 	if (!indir)
12604 		return 0;
12605 
12606 	for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
12607 		indir[i] = tp->rss_ind_tbl[i];
12608 
12609 	return 0;
12610 }
12611 
12612 static int tg3_set_rxfh(struct net_device *dev, const u32 *indir, const u8 *key,
12613 			const u8 hfunc)
12614 {
12615 	struct tg3 *tp = netdev_priv(dev);
12616 	size_t i;
12617 
12618 	/* We require at least one supported parameter to be changed and no
12619 	 * change in any of the unsupported parameters
12620 	 */
12621 	if (key ||
12622 	    (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP))
12623 		return -EOPNOTSUPP;
12624 
12625 	if (!indir)
12626 		return 0;
12627 
12628 	for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
12629 		tp->rss_ind_tbl[i] = indir[i];
12630 
12631 	if (!netif_running(dev) || !tg3_flag(tp, ENABLE_RSS))
12632 		return 0;
12633 
12634 	/* It is legal to write the indirection
12635 	 * table while the device is running.
12636 	 */
12637 	tg3_full_lock(tp, 0);
12638 	tg3_rss_write_indir_tbl(tp);
12639 	tg3_full_unlock(tp);
12640 
12641 	return 0;
12642 }
12643 
12644 static void tg3_get_channels(struct net_device *dev,
12645 			     struct ethtool_channels *channel)
12646 {
12647 	struct tg3 *tp = netdev_priv(dev);
12648 	u32 deflt_qs = netif_get_num_default_rss_queues();
12649 
12650 	channel->max_rx = tp->rxq_max;
12651 	channel->max_tx = tp->txq_max;
12652 
12653 	if (netif_running(dev)) {
12654 		channel->rx_count = tp->rxq_cnt;
12655 		channel->tx_count = tp->txq_cnt;
12656 	} else {
12657 		if (tp->rxq_req)
12658 			channel->rx_count = tp->rxq_req;
12659 		else
12660 			channel->rx_count = min(deflt_qs, tp->rxq_max);
12661 
12662 		if (tp->txq_req)
12663 			channel->tx_count = tp->txq_req;
12664 		else
12665 			channel->tx_count = min(deflt_qs, tp->txq_max);
12666 	}
12667 }
12668 
12669 static int tg3_set_channels(struct net_device *dev,
12670 			    struct ethtool_channels *channel)
12671 {
12672 	struct tg3 *tp = netdev_priv(dev);
12673 
12674 	if (!tg3_flag(tp, SUPPORT_MSIX))
12675 		return -EOPNOTSUPP;
12676 
12677 	if (channel->rx_count > tp->rxq_max ||
12678 	    channel->tx_count > tp->txq_max)
12679 		return -EINVAL;
12680 
12681 	tp->rxq_req = channel->rx_count;
12682 	tp->txq_req = channel->tx_count;
12683 
12684 	if (!netif_running(dev))
12685 		return 0;
12686 
12687 	tg3_stop(tp);
12688 
12689 	tg3_carrier_off(tp);
12690 
12691 	tg3_start(tp, true, false, false);
12692 
12693 	return 0;
12694 }
12695 
12696 static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
12697 {
12698 	switch (stringset) {
12699 	case ETH_SS_STATS:
12700 		memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
12701 		break;
12702 	case ETH_SS_TEST:
12703 		memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
12704 		break;
12705 	default:
12706 		WARN_ON(1);	/* we need a WARN() */
12707 		break;
12708 	}
12709 }
12710 
12711 static int tg3_set_phys_id(struct net_device *dev,
12712 			    enum ethtool_phys_id_state state)
12713 {
12714 	struct tg3 *tp = netdev_priv(dev);
12715 
12716 	if (!netif_running(tp->dev))
12717 		return -EAGAIN;
12718 
12719 	switch (state) {
12720 	case ETHTOOL_ID_ACTIVE:
12721 		return 1;	/* cycle on/off once per second */
12722 
12723 	case ETHTOOL_ID_ON:
12724 		tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
12725 		     LED_CTRL_1000MBPS_ON |
12726 		     LED_CTRL_100MBPS_ON |
12727 		     LED_CTRL_10MBPS_ON |
12728 		     LED_CTRL_TRAFFIC_OVERRIDE |
12729 		     LED_CTRL_TRAFFIC_BLINK |
12730 		     LED_CTRL_TRAFFIC_LED);
12731 		break;
12732 
12733 	case ETHTOOL_ID_OFF:
12734 		tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
12735 		     LED_CTRL_TRAFFIC_OVERRIDE);
12736 		break;
12737 
12738 	case ETHTOOL_ID_INACTIVE:
12739 		tw32(MAC_LED_CTRL, tp->led_ctrl);
12740 		break;
12741 	}
12742 
12743 	return 0;
12744 }
12745 
12746 static void tg3_get_ethtool_stats(struct net_device *dev,
12747 				   struct ethtool_stats *estats, u64 *tmp_stats)
12748 {
12749 	struct tg3 *tp = netdev_priv(dev);
12750 
12751 	if (tp->hw_stats)
12752 		tg3_get_estats(tp, (struct tg3_ethtool_stats *)tmp_stats);
12753 	else
12754 		memset(tmp_stats, 0, sizeof(struct tg3_ethtool_stats));
12755 }
12756 
12757 static __be32 *tg3_vpd_readblock(struct tg3 *tp, u32 *vpdlen)
12758 {
12759 	int i;
12760 	__be32 *buf;
12761 	u32 offset = 0, len = 0;
12762 	u32 magic, val;
12763 
12764 	if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &magic))
12765 		return NULL;
12766 
12767 	if (magic == TG3_EEPROM_MAGIC) {
12768 		for (offset = TG3_NVM_DIR_START;
12769 		     offset < TG3_NVM_DIR_END;
12770 		     offset += TG3_NVM_DIRENT_SIZE) {
12771 			if (tg3_nvram_read(tp, offset, &val))
12772 				return NULL;
12773 
12774 			if ((val >> TG3_NVM_DIRTYPE_SHIFT) ==
12775 			    TG3_NVM_DIRTYPE_EXTVPD)
12776 				break;
12777 		}
12778 
12779 		if (offset != TG3_NVM_DIR_END) {
12780 			len = (val & TG3_NVM_DIRTYPE_LENMSK) * 4;
12781 			if (tg3_nvram_read(tp, offset + 4, &offset))
12782 				return NULL;
12783 
12784 			offset = tg3_nvram_logical_addr(tp, offset);
12785 		}
12786 	}
12787 
12788 	if (!offset || !len) {
12789 		offset = TG3_NVM_VPD_OFF;
12790 		len = TG3_NVM_VPD_LEN;
12791 	}
12792 
12793 	buf = kmalloc(len, GFP_KERNEL);
12794 	if (buf == NULL)
12795 		return NULL;
12796 
12797 	if (magic == TG3_EEPROM_MAGIC) {
12798 		for (i = 0; i < len; i += 4) {
12799 			/* The data is in little-endian format in NVRAM.
12800 			 * Use the big-endian read routines to preserve
12801 			 * the byte order as it exists in NVRAM.
12802 			 */
12803 			if (tg3_nvram_read_be32(tp, offset + i, &buf[i/4]))
12804 				goto error;
12805 		}
12806 	} else {
12807 		u8 *ptr;
12808 		ssize_t cnt;
12809 		unsigned int pos = 0;
12810 
12811 		ptr = (u8 *)&buf[0];
12812 		for (i = 0; pos < len && i < 3; i++, pos += cnt, ptr += cnt) {
12813 			cnt = pci_read_vpd(tp->pdev, pos,
12814 					   len - pos, ptr);
12815 			if (cnt == -ETIMEDOUT || cnt == -EINTR)
12816 				cnt = 0;
12817 			else if (cnt < 0)
12818 				goto error;
12819 		}
12820 		if (pos != len)
12821 			goto error;
12822 	}
12823 
12824 	*vpdlen = len;
12825 
12826 	return buf;
12827 
12828 error:
12829 	kfree(buf);
12830 	return NULL;
12831 }
12832 
12833 #define NVRAM_TEST_SIZE 0x100
12834 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE	0x14
12835 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE	0x18
12836 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE	0x1c
12837 #define NVRAM_SELFBOOT_FORMAT1_4_SIZE	0x20
12838 #define NVRAM_SELFBOOT_FORMAT1_5_SIZE	0x24
12839 #define NVRAM_SELFBOOT_FORMAT1_6_SIZE	0x50
12840 #define NVRAM_SELFBOOT_HW_SIZE 0x20
12841 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
12842 
12843 static int tg3_test_nvram(struct tg3 *tp)
12844 {
12845 	u32 csum, magic, len;
12846 	__be32 *buf;
12847 	int i, j, k, err = 0, size;
12848 
12849 	if (tg3_flag(tp, NO_NVRAM))
12850 		return 0;
12851 
12852 	if (tg3_nvram_read(tp, 0, &magic) != 0)
12853 		return -EIO;
12854 
12855 	if (magic == TG3_EEPROM_MAGIC)
12856 		size = NVRAM_TEST_SIZE;
12857 	else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
12858 		if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
12859 		    TG3_EEPROM_SB_FORMAT_1) {
12860 			switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
12861 			case TG3_EEPROM_SB_REVISION_0:
12862 				size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
12863 				break;
12864 			case TG3_EEPROM_SB_REVISION_2:
12865 				size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
12866 				break;
12867 			case TG3_EEPROM_SB_REVISION_3:
12868 				size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
12869 				break;
12870 			case TG3_EEPROM_SB_REVISION_4:
12871 				size = NVRAM_SELFBOOT_FORMAT1_4_SIZE;
12872 				break;
12873 			case TG3_EEPROM_SB_REVISION_5:
12874 				size = NVRAM_SELFBOOT_FORMAT1_5_SIZE;
12875 				break;
12876 			case TG3_EEPROM_SB_REVISION_6:
12877 				size = NVRAM_SELFBOOT_FORMAT1_6_SIZE;
12878 				break;
12879 			default:
12880 				return -EIO;
12881 			}
12882 		} else
12883 			return 0;
12884 	} else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
12885 		size = NVRAM_SELFBOOT_HW_SIZE;
12886 	else
12887 		return -EIO;
12888 
12889 	buf = kmalloc(size, GFP_KERNEL);
12890 	if (buf == NULL)
12891 		return -ENOMEM;
12892 
12893 	err = -EIO;
12894 	for (i = 0, j = 0; i < size; i += 4, j++) {
12895 		err = tg3_nvram_read_be32(tp, i, &buf[j]);
12896 		if (err)
12897 			break;
12898 	}
12899 	if (i < size)
12900 		goto out;
12901 
12902 	/* Selfboot format */
12903 	magic = be32_to_cpu(buf[0]);
12904 	if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
12905 	    TG3_EEPROM_MAGIC_FW) {
12906 		u8 *buf8 = (u8 *) buf, csum8 = 0;
12907 
12908 		if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
12909 		    TG3_EEPROM_SB_REVISION_2) {
12910 			/* For rev 2, the csum doesn't include the MBA. */
12911 			for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
12912 				csum8 += buf8[i];
12913 			for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
12914 				csum8 += buf8[i];
12915 		} else {
12916 			for (i = 0; i < size; i++)
12917 				csum8 += buf8[i];
12918 		}
12919 
12920 		if (csum8 == 0) {
12921 			err = 0;
12922 			goto out;
12923 		}
12924 
12925 		err = -EIO;
12926 		goto out;
12927 	}
12928 
12929 	if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
12930 	    TG3_EEPROM_MAGIC_HW) {
12931 		u8 data[NVRAM_SELFBOOT_DATA_SIZE];
12932 		u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
12933 		u8 *buf8 = (u8 *) buf;
12934 
12935 		/* Separate the parity bits and the data bytes.  */
12936 		for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
12937 			if ((i == 0) || (i == 8)) {
12938 				int l;
12939 				u8 msk;
12940 
12941 				for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
12942 					parity[k++] = buf8[i] & msk;
12943 				i++;
12944 			} else if (i == 16) {
12945 				int l;
12946 				u8 msk;
12947 
12948 				for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
12949 					parity[k++] = buf8[i] & msk;
12950 				i++;
12951 
12952 				for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
12953 					parity[k++] = buf8[i] & msk;
12954 				i++;
12955 			}
12956 			data[j++] = buf8[i];
12957 		}
12958 
12959 		err = -EIO;
12960 		for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
12961 			u8 hw8 = hweight8(data[i]);
12962 
12963 			if ((hw8 & 0x1) && parity[i])
12964 				goto out;
12965 			else if (!(hw8 & 0x1) && !parity[i])
12966 				goto out;
12967 		}
12968 		err = 0;
12969 		goto out;
12970 	}
12971 
12972 	err = -EIO;
12973 
12974 	/* Bootstrap checksum at offset 0x10 */
12975 	csum = calc_crc((unsigned char *) buf, 0x10);
12976 	if (csum != le32_to_cpu(buf[0x10/4]))
12977 		goto out;
12978 
12979 	/* Manufacturing block starts at offset 0x74, checksum at 0xfc */
12980 	csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
12981 	if (csum != le32_to_cpu(buf[0xfc/4]))
12982 		goto out;
12983 
12984 	kfree(buf);
12985 
12986 	buf = tg3_vpd_readblock(tp, &len);
12987 	if (!buf)
12988 		return -ENOMEM;
12989 
12990 	i = pci_vpd_find_tag((u8 *)buf, 0, len, PCI_VPD_LRDT_RO_DATA);
12991 	if (i > 0) {
12992 		j = pci_vpd_lrdt_size(&((u8 *)buf)[i]);
12993 		if (j < 0)
12994 			goto out;
12995 
12996 		if (i + PCI_VPD_LRDT_TAG_SIZE + j > len)
12997 			goto out;
12998 
12999 		i += PCI_VPD_LRDT_TAG_SIZE;
13000 		j = pci_vpd_find_info_keyword((u8 *)buf, i, j,
13001 					      PCI_VPD_RO_KEYWORD_CHKSUM);
13002 		if (j > 0) {
13003 			u8 csum8 = 0;
13004 
13005 			j += PCI_VPD_INFO_FLD_HDR_SIZE;
13006 
13007 			for (i = 0; i <= j; i++)
13008 				csum8 += ((u8 *)buf)[i];
13009 
13010 			if (csum8)
13011 				goto out;
13012 		}
13013 	}
13014 
13015 	err = 0;
13016 
13017 out:
13018 	kfree(buf);
13019 	return err;
13020 }
13021 
13022 #define TG3_SERDES_TIMEOUT_SEC	2
13023 #define TG3_COPPER_TIMEOUT_SEC	6
13024 
13025 static int tg3_test_link(struct tg3 *tp)
13026 {
13027 	int i, max;
13028 
13029 	if (!netif_running(tp->dev))
13030 		return -ENODEV;
13031 
13032 	if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
13033 		max = TG3_SERDES_TIMEOUT_SEC;
13034 	else
13035 		max = TG3_COPPER_TIMEOUT_SEC;
13036 
13037 	for (i = 0; i < max; i++) {
13038 		if (tp->link_up)
13039 			return 0;
13040 
13041 		if (msleep_interruptible(1000))
13042 			break;
13043 	}
13044 
13045 	return -EIO;
13046 }
13047 
13048 /* Only test the commonly used registers */
13049 static int tg3_test_registers(struct tg3 *tp)
13050 {
13051 	int i, is_5705, is_5750;
13052 	u32 offset, read_mask, write_mask, val, save_val, read_val;
13053 	static struct {
13054 		u16 offset;
13055 		u16 flags;
13056 #define TG3_FL_5705	0x1
13057 #define TG3_FL_NOT_5705	0x2
13058 #define TG3_FL_NOT_5788	0x4
13059 #define TG3_FL_NOT_5750	0x8
13060 		u32 read_mask;
13061 		u32 write_mask;
13062 	} reg_tbl[] = {
13063 		/* MAC Control Registers */
13064 		{ MAC_MODE, TG3_FL_NOT_5705,
13065 			0x00000000, 0x00ef6f8c },
13066 		{ MAC_MODE, TG3_FL_5705,
13067 			0x00000000, 0x01ef6b8c },
13068 		{ MAC_STATUS, TG3_FL_NOT_5705,
13069 			0x03800107, 0x00000000 },
13070 		{ MAC_STATUS, TG3_FL_5705,
13071 			0x03800100, 0x00000000 },
13072 		{ MAC_ADDR_0_HIGH, 0x0000,
13073 			0x00000000, 0x0000ffff },
13074 		{ MAC_ADDR_0_LOW, 0x0000,
13075 			0x00000000, 0xffffffff },
13076 		{ MAC_RX_MTU_SIZE, 0x0000,
13077 			0x00000000, 0x0000ffff },
13078 		{ MAC_TX_MODE, 0x0000,
13079 			0x00000000, 0x00000070 },
13080 		{ MAC_TX_LENGTHS, 0x0000,
13081 			0x00000000, 0x00003fff },
13082 		{ MAC_RX_MODE, TG3_FL_NOT_5705,
13083 			0x00000000, 0x000007fc },
13084 		{ MAC_RX_MODE, TG3_FL_5705,
13085 			0x00000000, 0x000007dc },
13086 		{ MAC_HASH_REG_0, 0x0000,
13087 			0x00000000, 0xffffffff },
13088 		{ MAC_HASH_REG_1, 0x0000,
13089 			0x00000000, 0xffffffff },
13090 		{ MAC_HASH_REG_2, 0x0000,
13091 			0x00000000, 0xffffffff },
13092 		{ MAC_HASH_REG_3, 0x0000,
13093 			0x00000000, 0xffffffff },
13094 
13095 		/* Receive Data and Receive BD Initiator Control Registers. */
13096 		{ RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
13097 			0x00000000, 0xffffffff },
13098 		{ RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
13099 			0x00000000, 0xffffffff },
13100 		{ RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
13101 			0x00000000, 0x00000003 },
13102 		{ RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
13103 			0x00000000, 0xffffffff },
13104 		{ RCVDBDI_STD_BD+0, 0x0000,
13105 			0x00000000, 0xffffffff },
13106 		{ RCVDBDI_STD_BD+4, 0x0000,
13107 			0x00000000, 0xffffffff },
13108 		{ RCVDBDI_STD_BD+8, 0x0000,
13109 			0x00000000, 0xffff0002 },
13110 		{ RCVDBDI_STD_BD+0xc, 0x0000,
13111 			0x00000000, 0xffffffff },
13112 
13113 		/* Receive BD Initiator Control Registers. */
13114 		{ RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
13115 			0x00000000, 0xffffffff },
13116 		{ RCVBDI_STD_THRESH, TG3_FL_5705,
13117 			0x00000000, 0x000003ff },
13118 		{ RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
13119 			0x00000000, 0xffffffff },
13120 
13121 		/* Host Coalescing Control Registers. */
13122 		{ HOSTCC_MODE, TG3_FL_NOT_5705,
13123 			0x00000000, 0x00000004 },
13124 		{ HOSTCC_MODE, TG3_FL_5705,
13125 			0x00000000, 0x000000f6 },
13126 		{ HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
13127 			0x00000000, 0xffffffff },
13128 		{ HOSTCC_RXCOL_TICKS, TG3_FL_5705,
13129 			0x00000000, 0x000003ff },
13130 		{ HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
13131 			0x00000000, 0xffffffff },
13132 		{ HOSTCC_TXCOL_TICKS, TG3_FL_5705,
13133 			0x00000000, 0x000003ff },
13134 		{ HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
13135 			0x00000000, 0xffffffff },
13136 		{ HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
13137 			0x00000000, 0x000000ff },
13138 		{ HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
13139 			0x00000000, 0xffffffff },
13140 		{ HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
13141 			0x00000000, 0x000000ff },
13142 		{ HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
13143 			0x00000000, 0xffffffff },
13144 		{ HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
13145 			0x00000000, 0xffffffff },
13146 		{ HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
13147 			0x00000000, 0xffffffff },
13148 		{ HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
13149 			0x00000000, 0x000000ff },
13150 		{ HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
13151 			0x00000000, 0xffffffff },
13152 		{ HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
13153 			0x00000000, 0x000000ff },
13154 		{ HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
13155 			0x00000000, 0xffffffff },
13156 		{ HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
13157 			0x00000000, 0xffffffff },
13158 		{ HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
13159 			0x00000000, 0xffffffff },
13160 		{ HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
13161 			0x00000000, 0xffffffff },
13162 		{ HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
13163 			0x00000000, 0xffffffff },
13164 		{ HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
13165 			0xffffffff, 0x00000000 },
13166 		{ HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
13167 			0xffffffff, 0x00000000 },
13168 
13169 		/* Buffer Manager Control Registers. */
13170 		{ BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
13171 			0x00000000, 0x007fff80 },
13172 		{ BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
13173 			0x00000000, 0x007fffff },
13174 		{ BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
13175 			0x00000000, 0x0000003f },
13176 		{ BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
13177 			0x00000000, 0x000001ff },
13178 		{ BUFMGR_MB_HIGH_WATER, 0x0000,
13179 			0x00000000, 0x000001ff },
13180 		{ BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
13181 			0xffffffff, 0x00000000 },
13182 		{ BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
13183 			0xffffffff, 0x00000000 },
13184 
13185 		/* Mailbox Registers */
13186 		{ GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
13187 			0x00000000, 0x000001ff },
13188 		{ GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
13189 			0x00000000, 0x000001ff },
13190 		{ GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
13191 			0x00000000, 0x000007ff },
13192 		{ GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
13193 			0x00000000, 0x000001ff },
13194 
13195 		{ 0xffff, 0x0000, 0x00000000, 0x00000000 },
13196 	};
13197 
13198 	is_5705 = is_5750 = 0;
13199 	if (tg3_flag(tp, 5705_PLUS)) {
13200 		is_5705 = 1;
13201 		if (tg3_flag(tp, 5750_PLUS))
13202 			is_5750 = 1;
13203 	}
13204 
13205 	for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
13206 		if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
13207 			continue;
13208 
13209 		if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
13210 			continue;
13211 
13212 		if (tg3_flag(tp, IS_5788) &&
13213 		    (reg_tbl[i].flags & TG3_FL_NOT_5788))
13214 			continue;
13215 
13216 		if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
13217 			continue;
13218 
13219 		offset = (u32) reg_tbl[i].offset;
13220 		read_mask = reg_tbl[i].read_mask;
13221 		write_mask = reg_tbl[i].write_mask;
13222 
13223 		/* Save the original register content */
13224 		save_val = tr32(offset);
13225 
13226 		/* Determine the read-only value. */
13227 		read_val = save_val & read_mask;
13228 
13229 		/* Write zero to the register, then make sure the read-only bits
13230 		 * are not changed and the read/write bits are all zeros.
13231 		 */
13232 		tw32(offset, 0);
13233 
13234 		val = tr32(offset);
13235 
13236 		/* Test the read-only and read/write bits. */
13237 		if (((val & read_mask) != read_val) || (val & write_mask))
13238 			goto out;
13239 
13240 		/* Write ones to all the bits defined by RdMask and WrMask, then
13241 		 * make sure the read-only bits are not changed and the
13242 		 * read/write bits are all ones.
13243 		 */
13244 		tw32(offset, read_mask | write_mask);
13245 
13246 		val = tr32(offset);
13247 
13248 		/* Test the read-only bits. */
13249 		if ((val & read_mask) != read_val)
13250 			goto out;
13251 
13252 		/* Test the read/write bits. */
13253 		if ((val & write_mask) != write_mask)
13254 			goto out;
13255 
13256 		tw32(offset, save_val);
13257 	}
13258 
13259 	return 0;
13260 
13261 out:
13262 	if (netif_msg_hw(tp))
13263 		netdev_err(tp->dev,
13264 			   "Register test failed at offset %x\n", offset);
13265 	tw32(offset, save_val);
13266 	return -EIO;
13267 }
13268 
13269 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
13270 {
13271 	static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
13272 	int i;
13273 	u32 j;
13274 
13275 	for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
13276 		for (j = 0; j < len; j += 4) {
13277 			u32 val;
13278 
13279 			tg3_write_mem(tp, offset + j, test_pattern[i]);
13280 			tg3_read_mem(tp, offset + j, &val);
13281 			if (val != test_pattern[i])
13282 				return -EIO;
13283 		}
13284 	}
13285 	return 0;
13286 }
13287 
13288 static int tg3_test_memory(struct tg3 *tp)
13289 {
13290 	static struct mem_entry {
13291 		u32 offset;
13292 		u32 len;
13293 	} mem_tbl_570x[] = {
13294 		{ 0x00000000, 0x00b50},
13295 		{ 0x00002000, 0x1c000},
13296 		{ 0xffffffff, 0x00000}
13297 	}, mem_tbl_5705[] = {
13298 		{ 0x00000100, 0x0000c},
13299 		{ 0x00000200, 0x00008},
13300 		{ 0x00004000, 0x00800},
13301 		{ 0x00006000, 0x01000},
13302 		{ 0x00008000, 0x02000},
13303 		{ 0x00010000, 0x0e000},
13304 		{ 0xffffffff, 0x00000}
13305 	}, mem_tbl_5755[] = {
13306 		{ 0x00000200, 0x00008},
13307 		{ 0x00004000, 0x00800},
13308 		{ 0x00006000, 0x00800},
13309 		{ 0x00008000, 0x02000},
13310 		{ 0x00010000, 0x0c000},
13311 		{ 0xffffffff, 0x00000}
13312 	}, mem_tbl_5906[] = {
13313 		{ 0x00000200, 0x00008},
13314 		{ 0x00004000, 0x00400},
13315 		{ 0x00006000, 0x00400},
13316 		{ 0x00008000, 0x01000},
13317 		{ 0x00010000, 0x01000},
13318 		{ 0xffffffff, 0x00000}
13319 	}, mem_tbl_5717[] = {
13320 		{ 0x00000200, 0x00008},
13321 		{ 0x00010000, 0x0a000},
13322 		{ 0x00020000, 0x13c00},
13323 		{ 0xffffffff, 0x00000}
13324 	}, mem_tbl_57765[] = {
13325 		{ 0x00000200, 0x00008},
13326 		{ 0x00004000, 0x00800},
13327 		{ 0x00006000, 0x09800},
13328 		{ 0x00010000, 0x0a000},
13329 		{ 0xffffffff, 0x00000}
13330 	};
13331 	struct mem_entry *mem_tbl;
13332 	int err = 0;
13333 	int i;
13334 
13335 	if (tg3_flag(tp, 5717_PLUS))
13336 		mem_tbl = mem_tbl_5717;
13337 	else if (tg3_flag(tp, 57765_CLASS) ||
13338 		 tg3_asic_rev(tp) == ASIC_REV_5762)
13339 		mem_tbl = mem_tbl_57765;
13340 	else if (tg3_flag(tp, 5755_PLUS))
13341 		mem_tbl = mem_tbl_5755;
13342 	else if (tg3_asic_rev(tp) == ASIC_REV_5906)
13343 		mem_tbl = mem_tbl_5906;
13344 	else if (tg3_flag(tp, 5705_PLUS))
13345 		mem_tbl = mem_tbl_5705;
13346 	else
13347 		mem_tbl = mem_tbl_570x;
13348 
13349 	for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
13350 		err = tg3_do_mem_test(tp, mem_tbl[i].offset, mem_tbl[i].len);
13351 		if (err)
13352 			break;
13353 	}
13354 
13355 	return err;
13356 }
13357 
13358 #define TG3_TSO_MSS		500
13359 
13360 #define TG3_TSO_IP_HDR_LEN	20
13361 #define TG3_TSO_TCP_HDR_LEN	20
13362 #define TG3_TSO_TCP_OPT_LEN	12
13363 
13364 static const u8 tg3_tso_header[] = {
13365 0x08, 0x00,
13366 0x45, 0x00, 0x00, 0x00,
13367 0x00, 0x00, 0x40, 0x00,
13368 0x40, 0x06, 0x00, 0x00,
13369 0x0a, 0x00, 0x00, 0x01,
13370 0x0a, 0x00, 0x00, 0x02,
13371 0x0d, 0x00, 0xe0, 0x00,
13372 0x00, 0x00, 0x01, 0x00,
13373 0x00, 0x00, 0x02, 0x00,
13374 0x80, 0x10, 0x10, 0x00,
13375 0x14, 0x09, 0x00, 0x00,
13376 0x01, 0x01, 0x08, 0x0a,
13377 0x11, 0x11, 0x11, 0x11,
13378 0x11, 0x11, 0x11, 0x11,
13379 };
13380 
13381 static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, bool tso_loopback)
13382 {
13383 	u32 rx_start_idx, rx_idx, tx_idx, opaque_key;
13384 	u32 base_flags = 0, mss = 0, desc_idx, coal_now, data_off, val;
13385 	u32 budget;
13386 	struct sk_buff *skb;
13387 	u8 *tx_data, *rx_data;
13388 	dma_addr_t map;
13389 	int num_pkts, tx_len, rx_len, i, err;
13390 	struct tg3_rx_buffer_desc *desc;
13391 	struct tg3_napi *tnapi, *rnapi;
13392 	struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
13393 
13394 	tnapi = &tp->napi[0];
13395 	rnapi = &tp->napi[0];
13396 	if (tp->irq_cnt > 1) {
13397 		if (tg3_flag(tp, ENABLE_RSS))
13398 			rnapi = &tp->napi[1];
13399 		if (tg3_flag(tp, ENABLE_TSS))
13400 			tnapi = &tp->napi[1];
13401 	}
13402 	coal_now = tnapi->coal_now | rnapi->coal_now;
13403 
13404 	err = -EIO;
13405 
13406 	tx_len = pktsz;
13407 	skb = netdev_alloc_skb(tp->dev, tx_len);
13408 	if (!skb)
13409 		return -ENOMEM;
13410 
13411 	tx_data = skb_put(skb, tx_len);
13412 	memcpy(tx_data, tp->dev->dev_addr, ETH_ALEN);
13413 	memset(tx_data + ETH_ALEN, 0x0, 8);
13414 
13415 	tw32(MAC_RX_MTU_SIZE, tx_len + ETH_FCS_LEN);
13416 
13417 	if (tso_loopback) {
13418 		struct iphdr *iph = (struct iphdr *)&tx_data[ETH_HLEN];
13419 
13420 		u32 hdr_len = TG3_TSO_IP_HDR_LEN + TG3_TSO_TCP_HDR_LEN +
13421 			      TG3_TSO_TCP_OPT_LEN;
13422 
13423 		memcpy(tx_data + ETH_ALEN * 2, tg3_tso_header,
13424 		       sizeof(tg3_tso_header));
13425 		mss = TG3_TSO_MSS;
13426 
13427 		val = tx_len - ETH_ALEN * 2 - sizeof(tg3_tso_header);
13428 		num_pkts = DIV_ROUND_UP(val, TG3_TSO_MSS);
13429 
13430 		/* Set the total length field in the IP header */
13431 		iph->tot_len = htons((u16)(mss + hdr_len));
13432 
13433 		base_flags = (TXD_FLAG_CPU_PRE_DMA |
13434 			      TXD_FLAG_CPU_POST_DMA);
13435 
13436 		if (tg3_flag(tp, HW_TSO_1) ||
13437 		    tg3_flag(tp, HW_TSO_2) ||
13438 		    tg3_flag(tp, HW_TSO_3)) {
13439 			struct tcphdr *th;
13440 			val = ETH_HLEN + TG3_TSO_IP_HDR_LEN;
13441 			th = (struct tcphdr *)&tx_data[val];
13442 			th->check = 0;
13443 		} else
13444 			base_flags |= TXD_FLAG_TCPUDP_CSUM;
13445 
13446 		if (tg3_flag(tp, HW_TSO_3)) {
13447 			mss |= (hdr_len & 0xc) << 12;
13448 			if (hdr_len & 0x10)
13449 				base_flags |= 0x00000010;
13450 			base_flags |= (hdr_len & 0x3e0) << 5;
13451 		} else if (tg3_flag(tp, HW_TSO_2))
13452 			mss |= hdr_len << 9;
13453 		else if (tg3_flag(tp, HW_TSO_1) ||
13454 			 tg3_asic_rev(tp) == ASIC_REV_5705) {
13455 			mss |= (TG3_TSO_TCP_OPT_LEN << 9);
13456 		} else {
13457 			base_flags |= (TG3_TSO_TCP_OPT_LEN << 10);
13458 		}
13459 
13460 		data_off = ETH_ALEN * 2 + sizeof(tg3_tso_header);
13461 	} else {
13462 		num_pkts = 1;
13463 		data_off = ETH_HLEN;
13464 
13465 		if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
13466 		    tx_len > VLAN_ETH_FRAME_LEN)
13467 			base_flags |= TXD_FLAG_JMB_PKT;
13468 	}
13469 
13470 	for (i = data_off; i < tx_len; i++)
13471 		tx_data[i] = (u8) (i & 0xff);
13472 
13473 	map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
13474 	if (pci_dma_mapping_error(tp->pdev, map)) {
13475 		dev_kfree_skb(skb);
13476 		return -EIO;
13477 	}
13478 
13479 	val = tnapi->tx_prod;
13480 	tnapi->tx_buffers[val].skb = skb;
13481 	dma_unmap_addr_set(&tnapi->tx_buffers[val], mapping, map);
13482 
13483 	tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
13484 	       rnapi->coal_now);
13485 
13486 	udelay(10);
13487 
13488 	rx_start_idx = rnapi->hw_status->idx[0].rx_producer;
13489 
13490 	budget = tg3_tx_avail(tnapi);
13491 	if (tg3_tx_frag_set(tnapi, &val, &budget, map, tx_len,
13492 			    base_flags | TXD_FLAG_END, mss, 0)) {
13493 		tnapi->tx_buffers[val].skb = NULL;
13494 		dev_kfree_skb(skb);
13495 		return -EIO;
13496 	}
13497 
13498 	tnapi->tx_prod++;
13499 
13500 	/* Sync BD data before updating mailbox */
13501 	wmb();
13502 
13503 	tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod);
13504 	tr32_mailbox(tnapi->prodmbox);
13505 
13506 	udelay(10);
13507 
13508 	/* 350 usec to allow enough time on some 10/100 Mbps devices.  */
13509 	for (i = 0; i < 35; i++) {
13510 		tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
13511 		       coal_now);
13512 
13513 		udelay(10);
13514 
13515 		tx_idx = tnapi->hw_status->idx[0].tx_consumer;
13516 		rx_idx = rnapi->hw_status->idx[0].rx_producer;
13517 		if ((tx_idx == tnapi->tx_prod) &&
13518 		    (rx_idx == (rx_start_idx + num_pkts)))
13519 			break;
13520 	}
13521 
13522 	tg3_tx_skb_unmap(tnapi, tnapi->tx_prod - 1, -1);
13523 	dev_kfree_skb(skb);
13524 
13525 	if (tx_idx != tnapi->tx_prod)
13526 		goto out;
13527 
13528 	if (rx_idx != rx_start_idx + num_pkts)
13529 		goto out;
13530 
13531 	val = data_off;
13532 	while (rx_idx != rx_start_idx) {
13533 		desc = &rnapi->rx_rcb[rx_start_idx++];
13534 		desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
13535 		opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
13536 
13537 		if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
13538 		    (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
13539 			goto out;
13540 
13541 		rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT)
13542 			 - ETH_FCS_LEN;
13543 
13544 		if (!tso_loopback) {
13545 			if (rx_len != tx_len)
13546 				goto out;
13547 
13548 			if (pktsz <= TG3_RX_STD_DMA_SZ - ETH_FCS_LEN) {
13549 				if (opaque_key != RXD_OPAQUE_RING_STD)
13550 					goto out;
13551 			} else {
13552 				if (opaque_key != RXD_OPAQUE_RING_JUMBO)
13553 					goto out;
13554 			}
13555 		} else if ((desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
13556 			   (desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
13557 			    >> RXD_TCPCSUM_SHIFT != 0xffff) {
13558 			goto out;
13559 		}
13560 
13561 		if (opaque_key == RXD_OPAQUE_RING_STD) {
13562 			rx_data = tpr->rx_std_buffers[desc_idx].data;
13563 			map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx],
13564 					     mapping);
13565 		} else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
13566 			rx_data = tpr->rx_jmb_buffers[desc_idx].data;
13567 			map = dma_unmap_addr(&tpr->rx_jmb_buffers[desc_idx],
13568 					     mapping);
13569 		} else
13570 			goto out;
13571 
13572 		pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len,
13573 					    PCI_DMA_FROMDEVICE);
13574 
13575 		rx_data += TG3_RX_OFFSET(tp);
13576 		for (i = data_off; i < rx_len; i++, val++) {
13577 			if (*(rx_data + i) != (u8) (val & 0xff))
13578 				goto out;
13579 		}
13580 	}
13581 
13582 	err = 0;
13583 
13584 	/* tg3_free_rings will unmap and free the rx_data */
13585 out:
13586 	return err;
13587 }
13588 
13589 #define TG3_STD_LOOPBACK_FAILED		1
13590 #define TG3_JMB_LOOPBACK_FAILED		2
13591 #define TG3_TSO_LOOPBACK_FAILED		4
13592 #define TG3_LOOPBACK_FAILED \
13593 	(TG3_STD_LOOPBACK_FAILED | \
13594 	 TG3_JMB_LOOPBACK_FAILED | \
13595 	 TG3_TSO_LOOPBACK_FAILED)
13596 
13597 static int tg3_test_loopback(struct tg3 *tp, u64 *data, bool do_extlpbk)
13598 {
13599 	int err = -EIO;
13600 	u32 eee_cap;
13601 	u32 jmb_pkt_sz = 9000;
13602 
13603 	if (tp->dma_limit)
13604 		jmb_pkt_sz = tp->dma_limit - ETH_HLEN;
13605 
13606 	eee_cap = tp->phy_flags & TG3_PHYFLG_EEE_CAP;
13607 	tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
13608 
13609 	if (!netif_running(tp->dev)) {
13610 		data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13611 		data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13612 		if (do_extlpbk)
13613 			data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13614 		goto done;
13615 	}
13616 
13617 	err = tg3_reset_hw(tp, true);
13618 	if (err) {
13619 		data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13620 		data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13621 		if (do_extlpbk)
13622 			data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13623 		goto done;
13624 	}
13625 
13626 	if (tg3_flag(tp, ENABLE_RSS)) {
13627 		int i;
13628 
13629 		/* Reroute all rx packets to the 1st queue */
13630 		for (i = MAC_RSS_INDIR_TBL_0;
13631 		     i < MAC_RSS_INDIR_TBL_0 + TG3_RSS_INDIR_TBL_SIZE; i += 4)
13632 			tw32(i, 0x0);
13633 	}
13634 
13635 	/* HW errata - mac loopback fails in some cases on 5780.
13636 	 * Normal traffic and PHY loopback are not affected by
13637 	 * errata.  Also, the MAC loopback test is deprecated for
13638 	 * all newer ASIC revisions.
13639 	 */
13640 	if (tg3_asic_rev(tp) != ASIC_REV_5780 &&
13641 	    !tg3_flag(tp, CPMU_PRESENT)) {
13642 		tg3_mac_loopback(tp, true);
13643 
13644 		if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13645 			data[TG3_MAC_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED;
13646 
13647 		if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13648 		    tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13649 			data[TG3_MAC_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED;
13650 
13651 		tg3_mac_loopback(tp, false);
13652 	}
13653 
13654 	if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
13655 	    !tg3_flag(tp, USE_PHYLIB)) {
13656 		int i;
13657 
13658 		tg3_phy_lpbk_set(tp, 0, false);
13659 
13660 		/* Wait for link */
13661 		for (i = 0; i < 100; i++) {
13662 			if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
13663 				break;
13664 			mdelay(1);
13665 		}
13666 
13667 		if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13668 			data[TG3_PHY_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED;
13669 		if (tg3_flag(tp, TSO_CAPABLE) &&
13670 		    tg3_run_loopback(tp, ETH_FRAME_LEN, true))
13671 			data[TG3_PHY_LOOPB_TEST] |= TG3_TSO_LOOPBACK_FAILED;
13672 		if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13673 		    tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13674 			data[TG3_PHY_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED;
13675 
13676 		if (do_extlpbk) {
13677 			tg3_phy_lpbk_set(tp, 0, true);
13678 
13679 			/* All link indications report up, but the hardware
13680 			 * isn't really ready for about 20 msec.  Double it
13681 			 * to be sure.
13682 			 */
13683 			mdelay(40);
13684 
13685 			if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13686 				data[TG3_EXT_LOOPB_TEST] |=
13687 							TG3_STD_LOOPBACK_FAILED;
13688 			if (tg3_flag(tp, TSO_CAPABLE) &&
13689 			    tg3_run_loopback(tp, ETH_FRAME_LEN, true))
13690 				data[TG3_EXT_LOOPB_TEST] |=
13691 							TG3_TSO_LOOPBACK_FAILED;
13692 			if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13693 			    tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13694 				data[TG3_EXT_LOOPB_TEST] |=
13695 							TG3_JMB_LOOPBACK_FAILED;
13696 		}
13697 
13698 		/* Re-enable gphy autopowerdown. */
13699 		if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
13700 			tg3_phy_toggle_apd(tp, true);
13701 	}
13702 
13703 	err = (data[TG3_MAC_LOOPB_TEST] | data[TG3_PHY_LOOPB_TEST] |
13704 	       data[TG3_EXT_LOOPB_TEST]) ? -EIO : 0;
13705 
13706 done:
13707 	tp->phy_flags |= eee_cap;
13708 
13709 	return err;
13710 }
13711 
13712 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
13713 			  u64 *data)
13714 {
13715 	struct tg3 *tp = netdev_priv(dev);
13716 	bool doextlpbk = etest->flags & ETH_TEST_FL_EXTERNAL_LB;
13717 
13718 	if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
13719 		if (tg3_power_up(tp)) {
13720 			etest->flags |= ETH_TEST_FL_FAILED;
13721 			memset(data, 1, sizeof(u64) * TG3_NUM_TEST);
13722 			return;
13723 		}
13724 		tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
13725 	}
13726 
13727 	memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
13728 
13729 	if (tg3_test_nvram(tp) != 0) {
13730 		etest->flags |= ETH_TEST_FL_FAILED;
13731 		data[TG3_NVRAM_TEST] = 1;
13732 	}
13733 	if (!doextlpbk && tg3_test_link(tp)) {
13734 		etest->flags |= ETH_TEST_FL_FAILED;
13735 		data[TG3_LINK_TEST] = 1;
13736 	}
13737 	if (etest->flags & ETH_TEST_FL_OFFLINE) {
13738 		int err, err2 = 0, irq_sync = 0;
13739 
13740 		if (netif_running(dev)) {
13741 			tg3_phy_stop(tp);
13742 			tg3_netif_stop(tp);
13743 			irq_sync = 1;
13744 		}
13745 
13746 		tg3_full_lock(tp, irq_sync);
13747 		tg3_halt(tp, RESET_KIND_SUSPEND, 1);
13748 		err = tg3_nvram_lock(tp);
13749 		tg3_halt_cpu(tp, RX_CPU_BASE);
13750 		if (!tg3_flag(tp, 5705_PLUS))
13751 			tg3_halt_cpu(tp, TX_CPU_BASE);
13752 		if (!err)
13753 			tg3_nvram_unlock(tp);
13754 
13755 		if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
13756 			tg3_phy_reset(tp);
13757 
13758 		if (tg3_test_registers(tp) != 0) {
13759 			etest->flags |= ETH_TEST_FL_FAILED;
13760 			data[TG3_REGISTER_TEST] = 1;
13761 		}
13762 
13763 		if (tg3_test_memory(tp) != 0) {
13764 			etest->flags |= ETH_TEST_FL_FAILED;
13765 			data[TG3_MEMORY_TEST] = 1;
13766 		}
13767 
13768 		if (doextlpbk)
13769 			etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE;
13770 
13771 		if (tg3_test_loopback(tp, data, doextlpbk))
13772 			etest->flags |= ETH_TEST_FL_FAILED;
13773 
13774 		tg3_full_unlock(tp);
13775 
13776 		if (tg3_test_interrupt(tp) != 0) {
13777 			etest->flags |= ETH_TEST_FL_FAILED;
13778 			data[TG3_INTERRUPT_TEST] = 1;
13779 		}
13780 
13781 		tg3_full_lock(tp, 0);
13782 
13783 		tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
13784 		if (netif_running(dev)) {
13785 			tg3_flag_set(tp, INIT_COMPLETE);
13786 			err2 = tg3_restart_hw(tp, true);
13787 			if (!err2)
13788 				tg3_netif_start(tp);
13789 		}
13790 
13791 		tg3_full_unlock(tp);
13792 
13793 		if (irq_sync && !err2)
13794 			tg3_phy_start(tp);
13795 	}
13796 	if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
13797 		tg3_power_down_prepare(tp);
13798 
13799 }
13800 
13801 static int tg3_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
13802 {
13803 	struct tg3 *tp = netdev_priv(dev);
13804 	struct hwtstamp_config stmpconf;
13805 
13806 	if (!tg3_flag(tp, PTP_CAPABLE))
13807 		return -EOPNOTSUPP;
13808 
13809 	if (copy_from_user(&stmpconf, ifr->ifr_data, sizeof(stmpconf)))
13810 		return -EFAULT;
13811 
13812 	if (stmpconf.flags)
13813 		return -EINVAL;
13814 
13815 	if (stmpconf.tx_type != HWTSTAMP_TX_ON &&
13816 	    stmpconf.tx_type != HWTSTAMP_TX_OFF)
13817 		return -ERANGE;
13818 
13819 	switch (stmpconf.rx_filter) {
13820 	case HWTSTAMP_FILTER_NONE:
13821 		tp->rxptpctl = 0;
13822 		break;
13823 	case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
13824 		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13825 			       TG3_RX_PTP_CTL_ALL_V1_EVENTS;
13826 		break;
13827 	case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
13828 		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13829 			       TG3_RX_PTP_CTL_SYNC_EVNT;
13830 		break;
13831 	case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
13832 		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13833 			       TG3_RX_PTP_CTL_DELAY_REQ;
13834 		break;
13835 	case HWTSTAMP_FILTER_PTP_V2_EVENT:
13836 		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13837 			       TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13838 		break;
13839 	case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
13840 		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13841 			       TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13842 		break;
13843 	case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
13844 		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13845 			       TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13846 		break;
13847 	case HWTSTAMP_FILTER_PTP_V2_SYNC:
13848 		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13849 			       TG3_RX_PTP_CTL_SYNC_EVNT;
13850 		break;
13851 	case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
13852 		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13853 			       TG3_RX_PTP_CTL_SYNC_EVNT;
13854 		break;
13855 	case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
13856 		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13857 			       TG3_RX_PTP_CTL_SYNC_EVNT;
13858 		break;
13859 	case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
13860 		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13861 			       TG3_RX_PTP_CTL_DELAY_REQ;
13862 		break;
13863 	case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
13864 		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13865 			       TG3_RX_PTP_CTL_DELAY_REQ;
13866 		break;
13867 	case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
13868 		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13869 			       TG3_RX_PTP_CTL_DELAY_REQ;
13870 		break;
13871 	default:
13872 		return -ERANGE;
13873 	}
13874 
13875 	if (netif_running(dev) && tp->rxptpctl)
13876 		tw32(TG3_RX_PTP_CTL,
13877 		     tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK);
13878 
13879 	if (stmpconf.tx_type == HWTSTAMP_TX_ON)
13880 		tg3_flag_set(tp, TX_TSTAMP_EN);
13881 	else
13882 		tg3_flag_clear(tp, TX_TSTAMP_EN);
13883 
13884 	return copy_to_user(ifr->ifr_data, &stmpconf, sizeof(stmpconf)) ?
13885 		-EFAULT : 0;
13886 }
13887 
13888 static int tg3_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
13889 {
13890 	struct tg3 *tp = netdev_priv(dev);
13891 	struct hwtstamp_config stmpconf;
13892 
13893 	if (!tg3_flag(tp, PTP_CAPABLE))
13894 		return -EOPNOTSUPP;
13895 
13896 	stmpconf.flags = 0;
13897 	stmpconf.tx_type = (tg3_flag(tp, TX_TSTAMP_EN) ?
13898 			    HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF);
13899 
13900 	switch (tp->rxptpctl) {
13901 	case 0:
13902 		stmpconf.rx_filter = HWTSTAMP_FILTER_NONE;
13903 		break;
13904 	case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_ALL_V1_EVENTS:
13905 		stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
13906 		break;
13907 	case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
13908 		stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
13909 		break;
13910 	case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_DELAY_REQ:
13911 		stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
13912 		break;
13913 	case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS:
13914 		stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
13915 		break;
13916 	case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS:
13917 		stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
13918 		break;
13919 	case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS:
13920 		stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
13921 		break;
13922 	case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
13923 		stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
13924 		break;
13925 	case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
13926 		stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_SYNC;
13927 		break;
13928 	case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
13929 		stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
13930 		break;
13931 	case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_DELAY_REQ:
13932 		stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
13933 		break;
13934 	case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_DELAY_REQ:
13935 		stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ;
13936 		break;
13937 	case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_DELAY_REQ:
13938 		stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
13939 		break;
13940 	default:
13941 		WARN_ON_ONCE(1);
13942 		return -ERANGE;
13943 	}
13944 
13945 	return copy_to_user(ifr->ifr_data, &stmpconf, sizeof(stmpconf)) ?
13946 		-EFAULT : 0;
13947 }
13948 
13949 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
13950 {
13951 	struct mii_ioctl_data *data = if_mii(ifr);
13952 	struct tg3 *tp = netdev_priv(dev);
13953 	int err;
13954 
13955 	if (tg3_flag(tp, USE_PHYLIB)) {
13956 		struct phy_device *phydev;
13957 		if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
13958 			return -EAGAIN;
13959 		phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
13960 		return phy_mii_ioctl(phydev, ifr, cmd);
13961 	}
13962 
13963 	switch (cmd) {
13964 	case SIOCGMIIPHY:
13965 		data->phy_id = tp->phy_addr;
13966 
13967 		/* fallthru */
13968 	case SIOCGMIIREG: {
13969 		u32 mii_regval;
13970 
13971 		if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
13972 			break;			/* We have no PHY */
13973 
13974 		if (!netif_running(dev))
13975 			return -EAGAIN;
13976 
13977 		spin_lock_bh(&tp->lock);
13978 		err = __tg3_readphy(tp, data->phy_id & 0x1f,
13979 				    data->reg_num & 0x1f, &mii_regval);
13980 		spin_unlock_bh(&tp->lock);
13981 
13982 		data->val_out = mii_regval;
13983 
13984 		return err;
13985 	}
13986 
13987 	case SIOCSMIIREG:
13988 		if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
13989 			break;			/* We have no PHY */
13990 
13991 		if (!netif_running(dev))
13992 			return -EAGAIN;
13993 
13994 		spin_lock_bh(&tp->lock);
13995 		err = __tg3_writephy(tp, data->phy_id & 0x1f,
13996 				     data->reg_num & 0x1f, data->val_in);
13997 		spin_unlock_bh(&tp->lock);
13998 
13999 		return err;
14000 
14001 	case SIOCSHWTSTAMP:
14002 		return tg3_hwtstamp_set(dev, ifr);
14003 
14004 	case SIOCGHWTSTAMP:
14005 		return tg3_hwtstamp_get(dev, ifr);
14006 
14007 	default:
14008 		/* do nothing */
14009 		break;
14010 	}
14011 	return -EOPNOTSUPP;
14012 }
14013 
14014 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
14015 {
14016 	struct tg3 *tp = netdev_priv(dev);
14017 
14018 	memcpy(ec, &tp->coal, sizeof(*ec));
14019 	return 0;
14020 }
14021 
14022 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
14023 {
14024 	struct tg3 *tp = netdev_priv(dev);
14025 	u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
14026 	u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
14027 
14028 	if (!tg3_flag(tp, 5705_PLUS)) {
14029 		max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
14030 		max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
14031 		max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
14032 		min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
14033 	}
14034 
14035 	if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
14036 	    (!ec->rx_coalesce_usecs) ||
14037 	    (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
14038 	    (!ec->tx_coalesce_usecs) ||
14039 	    (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
14040 	    (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
14041 	    (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
14042 	    (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
14043 	    (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
14044 	    (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
14045 	    (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
14046 	    (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
14047 		return -EINVAL;
14048 
14049 	/* Only copy relevant parameters, ignore all others. */
14050 	tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
14051 	tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
14052 	tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
14053 	tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
14054 	tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
14055 	tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
14056 	tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
14057 	tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
14058 	tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
14059 
14060 	if (netif_running(dev)) {
14061 		tg3_full_lock(tp, 0);
14062 		__tg3_set_coalesce(tp, &tp->coal);
14063 		tg3_full_unlock(tp);
14064 	}
14065 	return 0;
14066 }
14067 
14068 static int tg3_set_eee(struct net_device *dev, struct ethtool_eee *edata)
14069 {
14070 	struct tg3 *tp = netdev_priv(dev);
14071 
14072 	if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
14073 		netdev_warn(tp->dev, "Board does not support EEE!\n");
14074 		return -EOPNOTSUPP;
14075 	}
14076 
14077 	if (edata->advertised != tp->eee.advertised) {
14078 		netdev_warn(tp->dev,
14079 			    "Direct manipulation of EEE advertisement is not supported\n");
14080 		return -EINVAL;
14081 	}
14082 
14083 	if (edata->tx_lpi_timer > TG3_CPMU_DBTMR1_LNKIDLE_MAX) {
14084 		netdev_warn(tp->dev,
14085 			    "Maximal Tx Lpi timer supported is %#x(u)\n",
14086 			    TG3_CPMU_DBTMR1_LNKIDLE_MAX);
14087 		return -EINVAL;
14088 	}
14089 
14090 	tp->eee = *edata;
14091 
14092 	tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
14093 	tg3_warn_mgmt_link_flap(tp);
14094 
14095 	if (netif_running(tp->dev)) {
14096 		tg3_full_lock(tp, 0);
14097 		tg3_setup_eee(tp);
14098 		tg3_phy_reset(tp);
14099 		tg3_full_unlock(tp);
14100 	}
14101 
14102 	return 0;
14103 }
14104 
14105 static int tg3_get_eee(struct net_device *dev, struct ethtool_eee *edata)
14106 {
14107 	struct tg3 *tp = netdev_priv(dev);
14108 
14109 	if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
14110 		netdev_warn(tp->dev,
14111 			    "Board does not support EEE!\n");
14112 		return -EOPNOTSUPP;
14113 	}
14114 
14115 	*edata = tp->eee;
14116 	return 0;
14117 }
14118 
14119 static const struct ethtool_ops tg3_ethtool_ops = {
14120 	.get_drvinfo		= tg3_get_drvinfo,
14121 	.get_regs_len		= tg3_get_regs_len,
14122 	.get_regs		= tg3_get_regs,
14123 	.get_wol		= tg3_get_wol,
14124 	.set_wol		= tg3_set_wol,
14125 	.get_msglevel		= tg3_get_msglevel,
14126 	.set_msglevel		= tg3_set_msglevel,
14127 	.nway_reset		= tg3_nway_reset,
14128 	.get_link		= ethtool_op_get_link,
14129 	.get_eeprom_len		= tg3_get_eeprom_len,
14130 	.get_eeprom		= tg3_get_eeprom,
14131 	.set_eeprom		= tg3_set_eeprom,
14132 	.get_ringparam		= tg3_get_ringparam,
14133 	.set_ringparam		= tg3_set_ringparam,
14134 	.get_pauseparam		= tg3_get_pauseparam,
14135 	.set_pauseparam		= tg3_set_pauseparam,
14136 	.self_test		= tg3_self_test,
14137 	.get_strings		= tg3_get_strings,
14138 	.set_phys_id		= tg3_set_phys_id,
14139 	.get_ethtool_stats	= tg3_get_ethtool_stats,
14140 	.get_coalesce		= tg3_get_coalesce,
14141 	.set_coalesce		= tg3_set_coalesce,
14142 	.get_sset_count		= tg3_get_sset_count,
14143 	.get_rxnfc		= tg3_get_rxnfc,
14144 	.get_rxfh_indir_size    = tg3_get_rxfh_indir_size,
14145 	.get_rxfh		= tg3_get_rxfh,
14146 	.set_rxfh		= tg3_set_rxfh,
14147 	.get_channels		= tg3_get_channels,
14148 	.set_channels		= tg3_set_channels,
14149 	.get_ts_info		= tg3_get_ts_info,
14150 	.get_eee		= tg3_get_eee,
14151 	.set_eee		= tg3_set_eee,
14152 	.get_link_ksettings	= tg3_get_link_ksettings,
14153 	.set_link_ksettings	= tg3_set_link_ksettings,
14154 };
14155 
14156 static void tg3_get_stats64(struct net_device *dev,
14157 			    struct rtnl_link_stats64 *stats)
14158 {
14159 	struct tg3 *tp = netdev_priv(dev);
14160 
14161 	spin_lock_bh(&tp->lock);
14162 	if (!tp->hw_stats) {
14163 		*stats = tp->net_stats_prev;
14164 		spin_unlock_bh(&tp->lock);
14165 		return;
14166 	}
14167 
14168 	tg3_get_nstats(tp, stats);
14169 	spin_unlock_bh(&tp->lock);
14170 }
14171 
14172 static void tg3_set_rx_mode(struct net_device *dev)
14173 {
14174 	struct tg3 *tp = netdev_priv(dev);
14175 
14176 	if (!netif_running(dev))
14177 		return;
14178 
14179 	tg3_full_lock(tp, 0);
14180 	__tg3_set_rx_mode(dev);
14181 	tg3_full_unlock(tp);
14182 }
14183 
14184 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
14185 			       int new_mtu)
14186 {
14187 	dev->mtu = new_mtu;
14188 
14189 	if (new_mtu > ETH_DATA_LEN) {
14190 		if (tg3_flag(tp, 5780_CLASS)) {
14191 			netdev_update_features(dev);
14192 			tg3_flag_clear(tp, TSO_CAPABLE);
14193 		} else {
14194 			tg3_flag_set(tp, JUMBO_RING_ENABLE);
14195 		}
14196 	} else {
14197 		if (tg3_flag(tp, 5780_CLASS)) {
14198 			tg3_flag_set(tp, TSO_CAPABLE);
14199 			netdev_update_features(dev);
14200 		}
14201 		tg3_flag_clear(tp, JUMBO_RING_ENABLE);
14202 	}
14203 }
14204 
14205 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
14206 {
14207 	struct tg3 *tp = netdev_priv(dev);
14208 	int err;
14209 	bool reset_phy = false;
14210 
14211 	if (!netif_running(dev)) {
14212 		/* We'll just catch it later when the
14213 		 * device is up'd.
14214 		 */
14215 		tg3_set_mtu(dev, tp, new_mtu);
14216 		return 0;
14217 	}
14218 
14219 	tg3_phy_stop(tp);
14220 
14221 	tg3_netif_stop(tp);
14222 
14223 	tg3_set_mtu(dev, tp, new_mtu);
14224 
14225 	tg3_full_lock(tp, 1);
14226 
14227 	tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
14228 
14229 	/* Reset PHY, otherwise the read DMA engine will be in a mode that
14230 	 * breaks all requests to 256 bytes.
14231 	 */
14232 	if (tg3_asic_rev(tp) == ASIC_REV_57766)
14233 		reset_phy = true;
14234 
14235 	err = tg3_restart_hw(tp, reset_phy);
14236 
14237 	if (!err)
14238 		tg3_netif_start(tp);
14239 
14240 	tg3_full_unlock(tp);
14241 
14242 	if (!err)
14243 		tg3_phy_start(tp);
14244 
14245 	return err;
14246 }
14247 
14248 static const struct net_device_ops tg3_netdev_ops = {
14249 	.ndo_open		= tg3_open,
14250 	.ndo_stop		= tg3_close,
14251 	.ndo_start_xmit		= tg3_start_xmit,
14252 	.ndo_get_stats64	= tg3_get_stats64,
14253 	.ndo_validate_addr	= eth_validate_addr,
14254 	.ndo_set_rx_mode	= tg3_set_rx_mode,
14255 	.ndo_set_mac_address	= tg3_set_mac_addr,
14256 	.ndo_do_ioctl		= tg3_ioctl,
14257 	.ndo_tx_timeout		= tg3_tx_timeout,
14258 	.ndo_change_mtu		= tg3_change_mtu,
14259 	.ndo_fix_features	= tg3_fix_features,
14260 	.ndo_set_features	= tg3_set_features,
14261 #ifdef CONFIG_NET_POLL_CONTROLLER
14262 	.ndo_poll_controller	= tg3_poll_controller,
14263 #endif
14264 };
14265 
14266 static void tg3_get_eeprom_size(struct tg3 *tp)
14267 {
14268 	u32 cursize, val, magic;
14269 
14270 	tp->nvram_size = EEPROM_CHIP_SIZE;
14271 
14272 	if (tg3_nvram_read(tp, 0, &magic) != 0)
14273 		return;
14274 
14275 	if ((magic != TG3_EEPROM_MAGIC) &&
14276 	    ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
14277 	    ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
14278 		return;
14279 
14280 	/*
14281 	 * Size the chip by reading offsets at increasing powers of two.
14282 	 * When we encounter our validation signature, we know the addressing
14283 	 * has wrapped around, and thus have our chip size.
14284 	 */
14285 	cursize = 0x10;
14286 
14287 	while (cursize < tp->nvram_size) {
14288 		if (tg3_nvram_read(tp, cursize, &val) != 0)
14289 			return;
14290 
14291 		if (val == magic)
14292 			break;
14293 
14294 		cursize <<= 1;
14295 	}
14296 
14297 	tp->nvram_size = cursize;
14298 }
14299 
14300 static void tg3_get_nvram_size(struct tg3 *tp)
14301 {
14302 	u32 val;
14303 
14304 	if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &val) != 0)
14305 		return;
14306 
14307 	/* Selfboot format */
14308 	if (val != TG3_EEPROM_MAGIC) {
14309 		tg3_get_eeprom_size(tp);
14310 		return;
14311 	}
14312 
14313 	if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
14314 		if (val != 0) {
14315 			/* This is confusing.  We want to operate on the
14316 			 * 16-bit value at offset 0xf2.  The tg3_nvram_read()
14317 			 * call will read from NVRAM and byteswap the data
14318 			 * according to the byteswapping settings for all
14319 			 * other register accesses.  This ensures the data we
14320 			 * want will always reside in the lower 16-bits.
14321 			 * However, the data in NVRAM is in LE format, which
14322 			 * means the data from the NVRAM read will always be
14323 			 * opposite the endianness of the CPU.  The 16-bit
14324 			 * byteswap then brings the data to CPU endianness.
14325 			 */
14326 			tp->nvram_size = swab16((u16)(val & 0x0000ffff)) * 1024;
14327 			return;
14328 		}
14329 	}
14330 	tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14331 }
14332 
14333 static void tg3_get_nvram_info(struct tg3 *tp)
14334 {
14335 	u32 nvcfg1;
14336 
14337 	nvcfg1 = tr32(NVRAM_CFG1);
14338 	if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
14339 		tg3_flag_set(tp, FLASH);
14340 	} else {
14341 		nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14342 		tw32(NVRAM_CFG1, nvcfg1);
14343 	}
14344 
14345 	if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
14346 	    tg3_flag(tp, 5780_CLASS)) {
14347 		switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
14348 		case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
14349 			tp->nvram_jedecnum = JEDEC_ATMEL;
14350 			tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
14351 			tg3_flag_set(tp, NVRAM_BUFFERED);
14352 			break;
14353 		case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
14354 			tp->nvram_jedecnum = JEDEC_ATMEL;
14355 			tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
14356 			break;
14357 		case FLASH_VENDOR_ATMEL_EEPROM:
14358 			tp->nvram_jedecnum = JEDEC_ATMEL;
14359 			tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14360 			tg3_flag_set(tp, NVRAM_BUFFERED);
14361 			break;
14362 		case FLASH_VENDOR_ST:
14363 			tp->nvram_jedecnum = JEDEC_ST;
14364 			tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
14365 			tg3_flag_set(tp, NVRAM_BUFFERED);
14366 			break;
14367 		case FLASH_VENDOR_SAIFUN:
14368 			tp->nvram_jedecnum = JEDEC_SAIFUN;
14369 			tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
14370 			break;
14371 		case FLASH_VENDOR_SST_SMALL:
14372 		case FLASH_VENDOR_SST_LARGE:
14373 			tp->nvram_jedecnum = JEDEC_SST;
14374 			tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
14375 			break;
14376 		}
14377 	} else {
14378 		tp->nvram_jedecnum = JEDEC_ATMEL;
14379 		tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
14380 		tg3_flag_set(tp, NVRAM_BUFFERED);
14381 	}
14382 }
14383 
14384 static void tg3_nvram_get_pagesize(struct tg3 *tp, u32 nvmcfg1)
14385 {
14386 	switch (nvmcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
14387 	case FLASH_5752PAGE_SIZE_256:
14388 		tp->nvram_pagesize = 256;
14389 		break;
14390 	case FLASH_5752PAGE_SIZE_512:
14391 		tp->nvram_pagesize = 512;
14392 		break;
14393 	case FLASH_5752PAGE_SIZE_1K:
14394 		tp->nvram_pagesize = 1024;
14395 		break;
14396 	case FLASH_5752PAGE_SIZE_2K:
14397 		tp->nvram_pagesize = 2048;
14398 		break;
14399 	case FLASH_5752PAGE_SIZE_4K:
14400 		tp->nvram_pagesize = 4096;
14401 		break;
14402 	case FLASH_5752PAGE_SIZE_264:
14403 		tp->nvram_pagesize = 264;
14404 		break;
14405 	case FLASH_5752PAGE_SIZE_528:
14406 		tp->nvram_pagesize = 528;
14407 		break;
14408 	}
14409 }
14410 
14411 static void tg3_get_5752_nvram_info(struct tg3 *tp)
14412 {
14413 	u32 nvcfg1;
14414 
14415 	nvcfg1 = tr32(NVRAM_CFG1);
14416 
14417 	/* NVRAM protection for TPM */
14418 	if (nvcfg1 & (1 << 27))
14419 		tg3_flag_set(tp, PROTECTED_NVRAM);
14420 
14421 	switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14422 	case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
14423 	case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
14424 		tp->nvram_jedecnum = JEDEC_ATMEL;
14425 		tg3_flag_set(tp, NVRAM_BUFFERED);
14426 		break;
14427 	case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14428 		tp->nvram_jedecnum = JEDEC_ATMEL;
14429 		tg3_flag_set(tp, NVRAM_BUFFERED);
14430 		tg3_flag_set(tp, FLASH);
14431 		break;
14432 	case FLASH_5752VENDOR_ST_M45PE10:
14433 	case FLASH_5752VENDOR_ST_M45PE20:
14434 	case FLASH_5752VENDOR_ST_M45PE40:
14435 		tp->nvram_jedecnum = JEDEC_ST;
14436 		tg3_flag_set(tp, NVRAM_BUFFERED);
14437 		tg3_flag_set(tp, FLASH);
14438 		break;
14439 	}
14440 
14441 	if (tg3_flag(tp, FLASH)) {
14442 		tg3_nvram_get_pagesize(tp, nvcfg1);
14443 	} else {
14444 		/* For eeprom, set pagesize to maximum eeprom size */
14445 		tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14446 
14447 		nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14448 		tw32(NVRAM_CFG1, nvcfg1);
14449 	}
14450 }
14451 
14452 static void tg3_get_5755_nvram_info(struct tg3 *tp)
14453 {
14454 	u32 nvcfg1, protect = 0;
14455 
14456 	nvcfg1 = tr32(NVRAM_CFG1);
14457 
14458 	/* NVRAM protection for TPM */
14459 	if (nvcfg1 & (1 << 27)) {
14460 		tg3_flag_set(tp, PROTECTED_NVRAM);
14461 		protect = 1;
14462 	}
14463 
14464 	nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
14465 	switch (nvcfg1) {
14466 	case FLASH_5755VENDOR_ATMEL_FLASH_1:
14467 	case FLASH_5755VENDOR_ATMEL_FLASH_2:
14468 	case FLASH_5755VENDOR_ATMEL_FLASH_3:
14469 	case FLASH_5755VENDOR_ATMEL_FLASH_5:
14470 		tp->nvram_jedecnum = JEDEC_ATMEL;
14471 		tg3_flag_set(tp, NVRAM_BUFFERED);
14472 		tg3_flag_set(tp, FLASH);
14473 		tp->nvram_pagesize = 264;
14474 		if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
14475 		    nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
14476 			tp->nvram_size = (protect ? 0x3e200 :
14477 					  TG3_NVRAM_SIZE_512KB);
14478 		else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
14479 			tp->nvram_size = (protect ? 0x1f200 :
14480 					  TG3_NVRAM_SIZE_256KB);
14481 		else
14482 			tp->nvram_size = (protect ? 0x1f200 :
14483 					  TG3_NVRAM_SIZE_128KB);
14484 		break;
14485 	case FLASH_5752VENDOR_ST_M45PE10:
14486 	case FLASH_5752VENDOR_ST_M45PE20:
14487 	case FLASH_5752VENDOR_ST_M45PE40:
14488 		tp->nvram_jedecnum = JEDEC_ST;
14489 		tg3_flag_set(tp, NVRAM_BUFFERED);
14490 		tg3_flag_set(tp, FLASH);
14491 		tp->nvram_pagesize = 256;
14492 		if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
14493 			tp->nvram_size = (protect ?
14494 					  TG3_NVRAM_SIZE_64KB :
14495 					  TG3_NVRAM_SIZE_128KB);
14496 		else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
14497 			tp->nvram_size = (protect ?
14498 					  TG3_NVRAM_SIZE_64KB :
14499 					  TG3_NVRAM_SIZE_256KB);
14500 		else
14501 			tp->nvram_size = (protect ?
14502 					  TG3_NVRAM_SIZE_128KB :
14503 					  TG3_NVRAM_SIZE_512KB);
14504 		break;
14505 	}
14506 }
14507 
14508 static void tg3_get_5787_nvram_info(struct tg3 *tp)
14509 {
14510 	u32 nvcfg1;
14511 
14512 	nvcfg1 = tr32(NVRAM_CFG1);
14513 
14514 	switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14515 	case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
14516 	case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
14517 	case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
14518 	case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
14519 		tp->nvram_jedecnum = JEDEC_ATMEL;
14520 		tg3_flag_set(tp, NVRAM_BUFFERED);
14521 		tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14522 
14523 		nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14524 		tw32(NVRAM_CFG1, nvcfg1);
14525 		break;
14526 	case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14527 	case FLASH_5755VENDOR_ATMEL_FLASH_1:
14528 	case FLASH_5755VENDOR_ATMEL_FLASH_2:
14529 	case FLASH_5755VENDOR_ATMEL_FLASH_3:
14530 		tp->nvram_jedecnum = JEDEC_ATMEL;
14531 		tg3_flag_set(tp, NVRAM_BUFFERED);
14532 		tg3_flag_set(tp, FLASH);
14533 		tp->nvram_pagesize = 264;
14534 		break;
14535 	case FLASH_5752VENDOR_ST_M45PE10:
14536 	case FLASH_5752VENDOR_ST_M45PE20:
14537 	case FLASH_5752VENDOR_ST_M45PE40:
14538 		tp->nvram_jedecnum = JEDEC_ST;
14539 		tg3_flag_set(tp, NVRAM_BUFFERED);
14540 		tg3_flag_set(tp, FLASH);
14541 		tp->nvram_pagesize = 256;
14542 		break;
14543 	}
14544 }
14545 
14546 static void tg3_get_5761_nvram_info(struct tg3 *tp)
14547 {
14548 	u32 nvcfg1, protect = 0;
14549 
14550 	nvcfg1 = tr32(NVRAM_CFG1);
14551 
14552 	/* NVRAM protection for TPM */
14553 	if (nvcfg1 & (1 << 27)) {
14554 		tg3_flag_set(tp, PROTECTED_NVRAM);
14555 		protect = 1;
14556 	}
14557 
14558 	nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
14559 	switch (nvcfg1) {
14560 	case FLASH_5761VENDOR_ATMEL_ADB021D:
14561 	case FLASH_5761VENDOR_ATMEL_ADB041D:
14562 	case FLASH_5761VENDOR_ATMEL_ADB081D:
14563 	case FLASH_5761VENDOR_ATMEL_ADB161D:
14564 	case FLASH_5761VENDOR_ATMEL_MDB021D:
14565 	case FLASH_5761VENDOR_ATMEL_MDB041D:
14566 	case FLASH_5761VENDOR_ATMEL_MDB081D:
14567 	case FLASH_5761VENDOR_ATMEL_MDB161D:
14568 		tp->nvram_jedecnum = JEDEC_ATMEL;
14569 		tg3_flag_set(tp, NVRAM_BUFFERED);
14570 		tg3_flag_set(tp, FLASH);
14571 		tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14572 		tp->nvram_pagesize = 256;
14573 		break;
14574 	case FLASH_5761VENDOR_ST_A_M45PE20:
14575 	case FLASH_5761VENDOR_ST_A_M45PE40:
14576 	case FLASH_5761VENDOR_ST_A_M45PE80:
14577 	case FLASH_5761VENDOR_ST_A_M45PE16:
14578 	case FLASH_5761VENDOR_ST_M_M45PE20:
14579 	case FLASH_5761VENDOR_ST_M_M45PE40:
14580 	case FLASH_5761VENDOR_ST_M_M45PE80:
14581 	case FLASH_5761VENDOR_ST_M_M45PE16:
14582 		tp->nvram_jedecnum = JEDEC_ST;
14583 		tg3_flag_set(tp, NVRAM_BUFFERED);
14584 		tg3_flag_set(tp, FLASH);
14585 		tp->nvram_pagesize = 256;
14586 		break;
14587 	}
14588 
14589 	if (protect) {
14590 		tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
14591 	} else {
14592 		switch (nvcfg1) {
14593 		case FLASH_5761VENDOR_ATMEL_ADB161D:
14594 		case FLASH_5761VENDOR_ATMEL_MDB161D:
14595 		case FLASH_5761VENDOR_ST_A_M45PE16:
14596 		case FLASH_5761VENDOR_ST_M_M45PE16:
14597 			tp->nvram_size = TG3_NVRAM_SIZE_2MB;
14598 			break;
14599 		case FLASH_5761VENDOR_ATMEL_ADB081D:
14600 		case FLASH_5761VENDOR_ATMEL_MDB081D:
14601 		case FLASH_5761VENDOR_ST_A_M45PE80:
14602 		case FLASH_5761VENDOR_ST_M_M45PE80:
14603 			tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14604 			break;
14605 		case FLASH_5761VENDOR_ATMEL_ADB041D:
14606 		case FLASH_5761VENDOR_ATMEL_MDB041D:
14607 		case FLASH_5761VENDOR_ST_A_M45PE40:
14608 		case FLASH_5761VENDOR_ST_M_M45PE40:
14609 			tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14610 			break;
14611 		case FLASH_5761VENDOR_ATMEL_ADB021D:
14612 		case FLASH_5761VENDOR_ATMEL_MDB021D:
14613 		case FLASH_5761VENDOR_ST_A_M45PE20:
14614 		case FLASH_5761VENDOR_ST_M_M45PE20:
14615 			tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14616 			break;
14617 		}
14618 	}
14619 }
14620 
14621 static void tg3_get_5906_nvram_info(struct tg3 *tp)
14622 {
14623 	tp->nvram_jedecnum = JEDEC_ATMEL;
14624 	tg3_flag_set(tp, NVRAM_BUFFERED);
14625 	tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14626 }
14627 
14628 static void tg3_get_57780_nvram_info(struct tg3 *tp)
14629 {
14630 	u32 nvcfg1;
14631 
14632 	nvcfg1 = tr32(NVRAM_CFG1);
14633 
14634 	switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14635 	case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
14636 	case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
14637 		tp->nvram_jedecnum = JEDEC_ATMEL;
14638 		tg3_flag_set(tp, NVRAM_BUFFERED);
14639 		tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14640 
14641 		nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14642 		tw32(NVRAM_CFG1, nvcfg1);
14643 		return;
14644 	case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14645 	case FLASH_57780VENDOR_ATMEL_AT45DB011D:
14646 	case FLASH_57780VENDOR_ATMEL_AT45DB011B:
14647 	case FLASH_57780VENDOR_ATMEL_AT45DB021D:
14648 	case FLASH_57780VENDOR_ATMEL_AT45DB021B:
14649 	case FLASH_57780VENDOR_ATMEL_AT45DB041D:
14650 	case FLASH_57780VENDOR_ATMEL_AT45DB041B:
14651 		tp->nvram_jedecnum = JEDEC_ATMEL;
14652 		tg3_flag_set(tp, NVRAM_BUFFERED);
14653 		tg3_flag_set(tp, FLASH);
14654 
14655 		switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14656 		case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14657 		case FLASH_57780VENDOR_ATMEL_AT45DB011D:
14658 		case FLASH_57780VENDOR_ATMEL_AT45DB011B:
14659 			tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14660 			break;
14661 		case FLASH_57780VENDOR_ATMEL_AT45DB021D:
14662 		case FLASH_57780VENDOR_ATMEL_AT45DB021B:
14663 			tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14664 			break;
14665 		case FLASH_57780VENDOR_ATMEL_AT45DB041D:
14666 		case FLASH_57780VENDOR_ATMEL_AT45DB041B:
14667 			tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14668 			break;
14669 		}
14670 		break;
14671 	case FLASH_5752VENDOR_ST_M45PE10:
14672 	case FLASH_5752VENDOR_ST_M45PE20:
14673 	case FLASH_5752VENDOR_ST_M45PE40:
14674 		tp->nvram_jedecnum = JEDEC_ST;
14675 		tg3_flag_set(tp, NVRAM_BUFFERED);
14676 		tg3_flag_set(tp, FLASH);
14677 
14678 		switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14679 		case FLASH_5752VENDOR_ST_M45PE10:
14680 			tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14681 			break;
14682 		case FLASH_5752VENDOR_ST_M45PE20:
14683 			tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14684 			break;
14685 		case FLASH_5752VENDOR_ST_M45PE40:
14686 			tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14687 			break;
14688 		}
14689 		break;
14690 	default:
14691 		tg3_flag_set(tp, NO_NVRAM);
14692 		return;
14693 	}
14694 
14695 	tg3_nvram_get_pagesize(tp, nvcfg1);
14696 	if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14697 		tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14698 }
14699 
14700 
14701 static void tg3_get_5717_nvram_info(struct tg3 *tp)
14702 {
14703 	u32 nvcfg1;
14704 
14705 	nvcfg1 = tr32(NVRAM_CFG1);
14706 
14707 	switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14708 	case FLASH_5717VENDOR_ATMEL_EEPROM:
14709 	case FLASH_5717VENDOR_MICRO_EEPROM:
14710 		tp->nvram_jedecnum = JEDEC_ATMEL;
14711 		tg3_flag_set(tp, NVRAM_BUFFERED);
14712 		tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14713 
14714 		nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14715 		tw32(NVRAM_CFG1, nvcfg1);
14716 		return;
14717 	case FLASH_5717VENDOR_ATMEL_MDB011D:
14718 	case FLASH_5717VENDOR_ATMEL_ADB011B:
14719 	case FLASH_5717VENDOR_ATMEL_ADB011D:
14720 	case FLASH_5717VENDOR_ATMEL_MDB021D:
14721 	case FLASH_5717VENDOR_ATMEL_ADB021B:
14722 	case FLASH_5717VENDOR_ATMEL_ADB021D:
14723 	case FLASH_5717VENDOR_ATMEL_45USPT:
14724 		tp->nvram_jedecnum = JEDEC_ATMEL;
14725 		tg3_flag_set(tp, NVRAM_BUFFERED);
14726 		tg3_flag_set(tp, FLASH);
14727 
14728 		switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14729 		case FLASH_5717VENDOR_ATMEL_MDB021D:
14730 			/* Detect size with tg3_nvram_get_size() */
14731 			break;
14732 		case FLASH_5717VENDOR_ATMEL_ADB021B:
14733 		case FLASH_5717VENDOR_ATMEL_ADB021D:
14734 			tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14735 			break;
14736 		default:
14737 			tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14738 			break;
14739 		}
14740 		break;
14741 	case FLASH_5717VENDOR_ST_M_M25PE10:
14742 	case FLASH_5717VENDOR_ST_A_M25PE10:
14743 	case FLASH_5717VENDOR_ST_M_M45PE10:
14744 	case FLASH_5717VENDOR_ST_A_M45PE10:
14745 	case FLASH_5717VENDOR_ST_M_M25PE20:
14746 	case FLASH_5717VENDOR_ST_A_M25PE20:
14747 	case FLASH_5717VENDOR_ST_M_M45PE20:
14748 	case FLASH_5717VENDOR_ST_A_M45PE20:
14749 	case FLASH_5717VENDOR_ST_25USPT:
14750 	case FLASH_5717VENDOR_ST_45USPT:
14751 		tp->nvram_jedecnum = JEDEC_ST;
14752 		tg3_flag_set(tp, NVRAM_BUFFERED);
14753 		tg3_flag_set(tp, FLASH);
14754 
14755 		switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14756 		case FLASH_5717VENDOR_ST_M_M25PE20:
14757 		case FLASH_5717VENDOR_ST_M_M45PE20:
14758 			/* Detect size with tg3_nvram_get_size() */
14759 			break;
14760 		case FLASH_5717VENDOR_ST_A_M25PE20:
14761 		case FLASH_5717VENDOR_ST_A_M45PE20:
14762 			tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14763 			break;
14764 		default:
14765 			tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14766 			break;
14767 		}
14768 		break;
14769 	default:
14770 		tg3_flag_set(tp, NO_NVRAM);
14771 		return;
14772 	}
14773 
14774 	tg3_nvram_get_pagesize(tp, nvcfg1);
14775 	if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14776 		tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14777 }
14778 
14779 static void tg3_get_5720_nvram_info(struct tg3 *tp)
14780 {
14781 	u32 nvcfg1, nvmpinstrp;
14782 
14783 	nvcfg1 = tr32(NVRAM_CFG1);
14784 	nvmpinstrp = nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK;
14785 
14786 	if (tg3_asic_rev(tp) == ASIC_REV_5762) {
14787 		if (!(nvcfg1 & NVRAM_CFG1_5762VENDOR_MASK)) {
14788 			tg3_flag_set(tp, NO_NVRAM);
14789 			return;
14790 		}
14791 
14792 		switch (nvmpinstrp) {
14793 		case FLASH_5762_EEPROM_HD:
14794 			nvmpinstrp = FLASH_5720_EEPROM_HD;
14795 			break;
14796 		case FLASH_5762_EEPROM_LD:
14797 			nvmpinstrp = FLASH_5720_EEPROM_LD;
14798 			break;
14799 		case FLASH_5720VENDOR_M_ST_M45PE20:
14800 			/* This pinstrap supports multiple sizes, so force it
14801 			 * to read the actual size from location 0xf0.
14802 			 */
14803 			nvmpinstrp = FLASH_5720VENDOR_ST_45USPT;
14804 			break;
14805 		}
14806 	}
14807 
14808 	switch (nvmpinstrp) {
14809 	case FLASH_5720_EEPROM_HD:
14810 	case FLASH_5720_EEPROM_LD:
14811 		tp->nvram_jedecnum = JEDEC_ATMEL;
14812 		tg3_flag_set(tp, NVRAM_BUFFERED);
14813 
14814 		nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14815 		tw32(NVRAM_CFG1, nvcfg1);
14816 		if (nvmpinstrp == FLASH_5720_EEPROM_HD)
14817 			tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14818 		else
14819 			tp->nvram_pagesize = ATMEL_AT24C02_CHIP_SIZE;
14820 		return;
14821 	case FLASH_5720VENDOR_M_ATMEL_DB011D:
14822 	case FLASH_5720VENDOR_A_ATMEL_DB011B:
14823 	case FLASH_5720VENDOR_A_ATMEL_DB011D:
14824 	case FLASH_5720VENDOR_M_ATMEL_DB021D:
14825 	case FLASH_5720VENDOR_A_ATMEL_DB021B:
14826 	case FLASH_5720VENDOR_A_ATMEL_DB021D:
14827 	case FLASH_5720VENDOR_M_ATMEL_DB041D:
14828 	case FLASH_5720VENDOR_A_ATMEL_DB041B:
14829 	case FLASH_5720VENDOR_A_ATMEL_DB041D:
14830 	case FLASH_5720VENDOR_M_ATMEL_DB081D:
14831 	case FLASH_5720VENDOR_A_ATMEL_DB081D:
14832 	case FLASH_5720VENDOR_ATMEL_45USPT:
14833 		tp->nvram_jedecnum = JEDEC_ATMEL;
14834 		tg3_flag_set(tp, NVRAM_BUFFERED);
14835 		tg3_flag_set(tp, FLASH);
14836 
14837 		switch (nvmpinstrp) {
14838 		case FLASH_5720VENDOR_M_ATMEL_DB021D:
14839 		case FLASH_5720VENDOR_A_ATMEL_DB021B:
14840 		case FLASH_5720VENDOR_A_ATMEL_DB021D:
14841 			tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14842 			break;
14843 		case FLASH_5720VENDOR_M_ATMEL_DB041D:
14844 		case FLASH_5720VENDOR_A_ATMEL_DB041B:
14845 		case FLASH_5720VENDOR_A_ATMEL_DB041D:
14846 			tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14847 			break;
14848 		case FLASH_5720VENDOR_M_ATMEL_DB081D:
14849 		case FLASH_5720VENDOR_A_ATMEL_DB081D:
14850 			tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14851 			break;
14852 		default:
14853 			if (tg3_asic_rev(tp) != ASIC_REV_5762)
14854 				tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14855 			break;
14856 		}
14857 		break;
14858 	case FLASH_5720VENDOR_M_ST_M25PE10:
14859 	case FLASH_5720VENDOR_M_ST_M45PE10:
14860 	case FLASH_5720VENDOR_A_ST_M25PE10:
14861 	case FLASH_5720VENDOR_A_ST_M45PE10:
14862 	case FLASH_5720VENDOR_M_ST_M25PE20:
14863 	case FLASH_5720VENDOR_M_ST_M45PE20:
14864 	case FLASH_5720VENDOR_A_ST_M25PE20:
14865 	case FLASH_5720VENDOR_A_ST_M45PE20:
14866 	case FLASH_5720VENDOR_M_ST_M25PE40:
14867 	case FLASH_5720VENDOR_M_ST_M45PE40:
14868 	case FLASH_5720VENDOR_A_ST_M25PE40:
14869 	case FLASH_5720VENDOR_A_ST_M45PE40:
14870 	case FLASH_5720VENDOR_M_ST_M25PE80:
14871 	case FLASH_5720VENDOR_M_ST_M45PE80:
14872 	case FLASH_5720VENDOR_A_ST_M25PE80:
14873 	case FLASH_5720VENDOR_A_ST_M45PE80:
14874 	case FLASH_5720VENDOR_ST_25USPT:
14875 	case FLASH_5720VENDOR_ST_45USPT:
14876 		tp->nvram_jedecnum = JEDEC_ST;
14877 		tg3_flag_set(tp, NVRAM_BUFFERED);
14878 		tg3_flag_set(tp, FLASH);
14879 
14880 		switch (nvmpinstrp) {
14881 		case FLASH_5720VENDOR_M_ST_M25PE20:
14882 		case FLASH_5720VENDOR_M_ST_M45PE20:
14883 		case FLASH_5720VENDOR_A_ST_M25PE20:
14884 		case FLASH_5720VENDOR_A_ST_M45PE20:
14885 			tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14886 			break;
14887 		case FLASH_5720VENDOR_M_ST_M25PE40:
14888 		case FLASH_5720VENDOR_M_ST_M45PE40:
14889 		case FLASH_5720VENDOR_A_ST_M25PE40:
14890 		case FLASH_5720VENDOR_A_ST_M45PE40:
14891 			tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14892 			break;
14893 		case FLASH_5720VENDOR_M_ST_M25PE80:
14894 		case FLASH_5720VENDOR_M_ST_M45PE80:
14895 		case FLASH_5720VENDOR_A_ST_M25PE80:
14896 		case FLASH_5720VENDOR_A_ST_M45PE80:
14897 			tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14898 			break;
14899 		default:
14900 			if (tg3_asic_rev(tp) != ASIC_REV_5762)
14901 				tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14902 			break;
14903 		}
14904 		break;
14905 	default:
14906 		tg3_flag_set(tp, NO_NVRAM);
14907 		return;
14908 	}
14909 
14910 	tg3_nvram_get_pagesize(tp, nvcfg1);
14911 	if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14912 		tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14913 
14914 	if (tg3_asic_rev(tp) == ASIC_REV_5762) {
14915 		u32 val;
14916 
14917 		if (tg3_nvram_read(tp, 0, &val))
14918 			return;
14919 
14920 		if (val != TG3_EEPROM_MAGIC &&
14921 		    (val & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW)
14922 			tg3_flag_set(tp, NO_NVRAM);
14923 	}
14924 }
14925 
14926 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
14927 static void tg3_nvram_init(struct tg3 *tp)
14928 {
14929 	if (tg3_flag(tp, IS_SSB_CORE)) {
14930 		/* No NVRAM and EEPROM on the SSB Broadcom GigE core. */
14931 		tg3_flag_clear(tp, NVRAM);
14932 		tg3_flag_clear(tp, NVRAM_BUFFERED);
14933 		tg3_flag_set(tp, NO_NVRAM);
14934 		return;
14935 	}
14936 
14937 	tw32_f(GRC_EEPROM_ADDR,
14938 	     (EEPROM_ADDR_FSM_RESET |
14939 	      (EEPROM_DEFAULT_CLOCK_PERIOD <<
14940 	       EEPROM_ADDR_CLKPERD_SHIFT)));
14941 
14942 	msleep(1);
14943 
14944 	/* Enable seeprom accesses. */
14945 	tw32_f(GRC_LOCAL_CTRL,
14946 	     tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
14947 	udelay(100);
14948 
14949 	if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
14950 	    tg3_asic_rev(tp) != ASIC_REV_5701) {
14951 		tg3_flag_set(tp, NVRAM);
14952 
14953 		if (tg3_nvram_lock(tp)) {
14954 			netdev_warn(tp->dev,
14955 				    "Cannot get nvram lock, %s failed\n",
14956 				    __func__);
14957 			return;
14958 		}
14959 		tg3_enable_nvram_access(tp);
14960 
14961 		tp->nvram_size = 0;
14962 
14963 		if (tg3_asic_rev(tp) == ASIC_REV_5752)
14964 			tg3_get_5752_nvram_info(tp);
14965 		else if (tg3_asic_rev(tp) == ASIC_REV_5755)
14966 			tg3_get_5755_nvram_info(tp);
14967 		else if (tg3_asic_rev(tp) == ASIC_REV_5787 ||
14968 			 tg3_asic_rev(tp) == ASIC_REV_5784 ||
14969 			 tg3_asic_rev(tp) == ASIC_REV_5785)
14970 			tg3_get_5787_nvram_info(tp);
14971 		else if (tg3_asic_rev(tp) == ASIC_REV_5761)
14972 			tg3_get_5761_nvram_info(tp);
14973 		else if (tg3_asic_rev(tp) == ASIC_REV_5906)
14974 			tg3_get_5906_nvram_info(tp);
14975 		else if (tg3_asic_rev(tp) == ASIC_REV_57780 ||
14976 			 tg3_flag(tp, 57765_CLASS))
14977 			tg3_get_57780_nvram_info(tp);
14978 		else if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
14979 			 tg3_asic_rev(tp) == ASIC_REV_5719)
14980 			tg3_get_5717_nvram_info(tp);
14981 		else if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
14982 			 tg3_asic_rev(tp) == ASIC_REV_5762)
14983 			tg3_get_5720_nvram_info(tp);
14984 		else
14985 			tg3_get_nvram_info(tp);
14986 
14987 		if (tp->nvram_size == 0)
14988 			tg3_get_nvram_size(tp);
14989 
14990 		tg3_disable_nvram_access(tp);
14991 		tg3_nvram_unlock(tp);
14992 
14993 	} else {
14994 		tg3_flag_clear(tp, NVRAM);
14995 		tg3_flag_clear(tp, NVRAM_BUFFERED);
14996 
14997 		tg3_get_eeprom_size(tp);
14998 	}
14999 }
15000 
15001 struct subsys_tbl_ent {
15002 	u16 subsys_vendor, subsys_devid;
15003 	u32 phy_id;
15004 };
15005 
15006 static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
15007 	/* Broadcom boards. */
15008 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
15009 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6, TG3_PHY_ID_BCM5401 },
15010 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
15011 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5, TG3_PHY_ID_BCM5701 },
15012 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
15013 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6, TG3_PHY_ID_BCM8002 },
15014 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
15015 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9, 0 },
15016 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
15017 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1, TG3_PHY_ID_BCM5701 },
15018 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
15019 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8, TG3_PHY_ID_BCM5701 },
15020 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
15021 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7, 0 },
15022 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
15023 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10, TG3_PHY_ID_BCM5701 },
15024 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
15025 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12, TG3_PHY_ID_BCM5701 },
15026 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
15027 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1, TG3_PHY_ID_BCM5703 },
15028 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
15029 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2, TG3_PHY_ID_BCM5703 },
15030 
15031 	/* 3com boards. */
15032 	{ TG3PCI_SUBVENDOR_ID_3COM,
15033 	  TG3PCI_SUBDEVICE_ID_3COM_3C996T, TG3_PHY_ID_BCM5401 },
15034 	{ TG3PCI_SUBVENDOR_ID_3COM,
15035 	  TG3PCI_SUBDEVICE_ID_3COM_3C996BT, TG3_PHY_ID_BCM5701 },
15036 	{ TG3PCI_SUBVENDOR_ID_3COM,
15037 	  TG3PCI_SUBDEVICE_ID_3COM_3C996SX, 0 },
15038 	{ TG3PCI_SUBVENDOR_ID_3COM,
15039 	  TG3PCI_SUBDEVICE_ID_3COM_3C1000T, TG3_PHY_ID_BCM5701 },
15040 	{ TG3PCI_SUBVENDOR_ID_3COM,
15041 	  TG3PCI_SUBDEVICE_ID_3COM_3C940BR01, TG3_PHY_ID_BCM5701 },
15042 
15043 	/* DELL boards. */
15044 	{ TG3PCI_SUBVENDOR_ID_DELL,
15045 	  TG3PCI_SUBDEVICE_ID_DELL_VIPER, TG3_PHY_ID_BCM5401 },
15046 	{ TG3PCI_SUBVENDOR_ID_DELL,
15047 	  TG3PCI_SUBDEVICE_ID_DELL_JAGUAR, TG3_PHY_ID_BCM5401 },
15048 	{ TG3PCI_SUBVENDOR_ID_DELL,
15049 	  TG3PCI_SUBDEVICE_ID_DELL_MERLOT, TG3_PHY_ID_BCM5411 },
15050 	{ TG3PCI_SUBVENDOR_ID_DELL,
15051 	  TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT, TG3_PHY_ID_BCM5411 },
15052 
15053 	/* Compaq boards. */
15054 	{ TG3PCI_SUBVENDOR_ID_COMPAQ,
15055 	  TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE, TG3_PHY_ID_BCM5701 },
15056 	{ TG3PCI_SUBVENDOR_ID_COMPAQ,
15057 	  TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2, TG3_PHY_ID_BCM5701 },
15058 	{ TG3PCI_SUBVENDOR_ID_COMPAQ,
15059 	  TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING, 0 },
15060 	{ TG3PCI_SUBVENDOR_ID_COMPAQ,
15061 	  TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780, TG3_PHY_ID_BCM5701 },
15062 	{ TG3PCI_SUBVENDOR_ID_COMPAQ,
15063 	  TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2, TG3_PHY_ID_BCM5701 },
15064 
15065 	/* IBM boards. */
15066 	{ TG3PCI_SUBVENDOR_ID_IBM,
15067 	  TG3PCI_SUBDEVICE_ID_IBM_5703SAX2, 0 }
15068 };
15069 
15070 static struct subsys_tbl_ent *tg3_lookup_by_subsys(struct tg3 *tp)
15071 {
15072 	int i;
15073 
15074 	for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
15075 		if ((subsys_id_to_phy_id[i].subsys_vendor ==
15076 		     tp->pdev->subsystem_vendor) &&
15077 		    (subsys_id_to_phy_id[i].subsys_devid ==
15078 		     tp->pdev->subsystem_device))
15079 			return &subsys_id_to_phy_id[i];
15080 	}
15081 	return NULL;
15082 }
15083 
15084 static void tg3_get_eeprom_hw_cfg(struct tg3 *tp)
15085 {
15086 	u32 val;
15087 
15088 	tp->phy_id = TG3_PHY_ID_INVALID;
15089 	tp->led_ctrl = LED_CTRL_MODE_PHY_1;
15090 
15091 	/* Assume an onboard device and WOL capable by default.  */
15092 	tg3_flag_set(tp, EEPROM_WRITE_PROT);
15093 	tg3_flag_set(tp, WOL_CAP);
15094 
15095 	if (tg3_asic_rev(tp) == ASIC_REV_5906) {
15096 		if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
15097 			tg3_flag_clear(tp, EEPROM_WRITE_PROT);
15098 			tg3_flag_set(tp, IS_NIC);
15099 		}
15100 		val = tr32(VCPU_CFGSHDW);
15101 		if (val & VCPU_CFGSHDW_ASPM_DBNC)
15102 			tg3_flag_set(tp, ASPM_WORKAROUND);
15103 		if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
15104 		    (val & VCPU_CFGSHDW_WOL_MAGPKT)) {
15105 			tg3_flag_set(tp, WOL_ENABLE);
15106 			device_set_wakeup_enable(&tp->pdev->dev, true);
15107 		}
15108 		goto done;
15109 	}
15110 
15111 	tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
15112 	if (val == NIC_SRAM_DATA_SIG_MAGIC) {
15113 		u32 nic_cfg, led_cfg;
15114 		u32 cfg2 = 0, cfg4 = 0, cfg5 = 0;
15115 		u32 nic_phy_id, ver, eeprom_phy_id;
15116 		int eeprom_phy_serdes = 0;
15117 
15118 		tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
15119 		tp->nic_sram_data_cfg = nic_cfg;
15120 
15121 		tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
15122 		ver >>= NIC_SRAM_DATA_VER_SHIFT;
15123 		if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
15124 		    tg3_asic_rev(tp) != ASIC_REV_5701 &&
15125 		    tg3_asic_rev(tp) != ASIC_REV_5703 &&
15126 		    (ver > 0) && (ver < 0x100))
15127 			tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
15128 
15129 		if (tg3_asic_rev(tp) == ASIC_REV_5785)
15130 			tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
15131 
15132 		if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
15133 		    tg3_asic_rev(tp) == ASIC_REV_5719 ||
15134 		    tg3_asic_rev(tp) == ASIC_REV_5720)
15135 			tg3_read_mem(tp, NIC_SRAM_DATA_CFG_5, &cfg5);
15136 
15137 		if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
15138 		    NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
15139 			eeprom_phy_serdes = 1;
15140 
15141 		tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
15142 		if (nic_phy_id != 0) {
15143 			u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
15144 			u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
15145 
15146 			eeprom_phy_id  = (id1 >> 16) << 10;
15147 			eeprom_phy_id |= (id2 & 0xfc00) << 16;
15148 			eeprom_phy_id |= (id2 & 0x03ff) <<  0;
15149 		} else
15150 			eeprom_phy_id = 0;
15151 
15152 		tp->phy_id = eeprom_phy_id;
15153 		if (eeprom_phy_serdes) {
15154 			if (!tg3_flag(tp, 5705_PLUS))
15155 				tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
15156 			else
15157 				tp->phy_flags |= TG3_PHYFLG_MII_SERDES;
15158 		}
15159 
15160 		if (tg3_flag(tp, 5750_PLUS))
15161 			led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
15162 				    SHASTA_EXT_LED_MODE_MASK);
15163 		else
15164 			led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
15165 
15166 		switch (led_cfg) {
15167 		default:
15168 		case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
15169 			tp->led_ctrl = LED_CTRL_MODE_PHY_1;
15170 			break;
15171 
15172 		case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
15173 			tp->led_ctrl = LED_CTRL_MODE_PHY_2;
15174 			break;
15175 
15176 		case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
15177 			tp->led_ctrl = LED_CTRL_MODE_MAC;
15178 
15179 			/* Default to PHY_1_MODE if 0 (MAC_MODE) is
15180 			 * read on some older 5700/5701 bootcode.
15181 			 */
15182 			if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
15183 			    tg3_asic_rev(tp) == ASIC_REV_5701)
15184 				tp->led_ctrl = LED_CTRL_MODE_PHY_1;
15185 
15186 			break;
15187 
15188 		case SHASTA_EXT_LED_SHARED:
15189 			tp->led_ctrl = LED_CTRL_MODE_SHARED;
15190 			if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0 &&
15191 			    tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A1)
15192 				tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
15193 						 LED_CTRL_MODE_PHY_2);
15194 
15195 			if (tg3_flag(tp, 5717_PLUS) ||
15196 			    tg3_asic_rev(tp) == ASIC_REV_5762)
15197 				tp->led_ctrl |= LED_CTRL_BLINK_RATE_OVERRIDE |
15198 						LED_CTRL_BLINK_RATE_MASK;
15199 
15200 			break;
15201 
15202 		case SHASTA_EXT_LED_MAC:
15203 			tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
15204 			break;
15205 
15206 		case SHASTA_EXT_LED_COMBO:
15207 			tp->led_ctrl = LED_CTRL_MODE_COMBO;
15208 			if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0)
15209 				tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
15210 						 LED_CTRL_MODE_PHY_2);
15211 			break;
15212 
15213 		}
15214 
15215 		if ((tg3_asic_rev(tp) == ASIC_REV_5700 ||
15216 		     tg3_asic_rev(tp) == ASIC_REV_5701) &&
15217 		    tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
15218 			tp->led_ctrl = LED_CTRL_MODE_PHY_2;
15219 
15220 		if (tg3_chip_rev(tp) == CHIPREV_5784_AX)
15221 			tp->led_ctrl = LED_CTRL_MODE_PHY_1;
15222 
15223 		if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
15224 			tg3_flag_set(tp, EEPROM_WRITE_PROT);
15225 			if ((tp->pdev->subsystem_vendor ==
15226 			     PCI_VENDOR_ID_ARIMA) &&
15227 			    (tp->pdev->subsystem_device == 0x205a ||
15228 			     tp->pdev->subsystem_device == 0x2063))
15229 				tg3_flag_clear(tp, EEPROM_WRITE_PROT);
15230 		} else {
15231 			tg3_flag_clear(tp, EEPROM_WRITE_PROT);
15232 			tg3_flag_set(tp, IS_NIC);
15233 		}
15234 
15235 		if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
15236 			tg3_flag_set(tp, ENABLE_ASF);
15237 			if (tg3_flag(tp, 5750_PLUS))
15238 				tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
15239 		}
15240 
15241 		if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
15242 		    tg3_flag(tp, 5750_PLUS))
15243 			tg3_flag_set(tp, ENABLE_APE);
15244 
15245 		if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES &&
15246 		    !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
15247 			tg3_flag_clear(tp, WOL_CAP);
15248 
15249 		if (tg3_flag(tp, WOL_CAP) &&
15250 		    (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)) {
15251 			tg3_flag_set(tp, WOL_ENABLE);
15252 			device_set_wakeup_enable(&tp->pdev->dev, true);
15253 		}
15254 
15255 		if (cfg2 & (1 << 17))
15256 			tp->phy_flags |= TG3_PHYFLG_CAPACITIVE_COUPLING;
15257 
15258 		/* serdes signal pre-emphasis in register 0x590 set by */
15259 		/* bootcode if bit 18 is set */
15260 		if (cfg2 & (1 << 18))
15261 			tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS;
15262 
15263 		if ((tg3_flag(tp, 57765_PLUS) ||
15264 		     (tg3_asic_rev(tp) == ASIC_REV_5784 &&
15265 		      tg3_chip_rev(tp) != CHIPREV_5784_AX)) &&
15266 		    (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
15267 			tp->phy_flags |= TG3_PHYFLG_ENABLE_APD;
15268 
15269 		if (tg3_flag(tp, PCI_EXPRESS)) {
15270 			u32 cfg3;
15271 
15272 			tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
15273 			if (tg3_asic_rev(tp) != ASIC_REV_5785 &&
15274 			    !tg3_flag(tp, 57765_PLUS) &&
15275 			    (cfg3 & NIC_SRAM_ASPM_DEBOUNCE))
15276 				tg3_flag_set(tp, ASPM_WORKAROUND);
15277 			if (cfg3 & NIC_SRAM_LNK_FLAP_AVOID)
15278 				tp->phy_flags |= TG3_PHYFLG_KEEP_LINK_ON_PWRDN;
15279 			if (cfg3 & NIC_SRAM_1G_ON_VAUX_OK)
15280 				tp->phy_flags |= TG3_PHYFLG_1G_ON_VAUX_OK;
15281 		}
15282 
15283 		if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE)
15284 			tg3_flag_set(tp, RGMII_INBAND_DISABLE);
15285 		if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
15286 			tg3_flag_set(tp, RGMII_EXT_IBND_RX_EN);
15287 		if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
15288 			tg3_flag_set(tp, RGMII_EXT_IBND_TX_EN);
15289 
15290 		if (cfg5 & NIC_SRAM_DISABLE_1G_HALF_ADV)
15291 			tp->phy_flags |= TG3_PHYFLG_DISABLE_1G_HD_ADV;
15292 	}
15293 done:
15294 	if (tg3_flag(tp, WOL_CAP))
15295 		device_set_wakeup_enable(&tp->pdev->dev,
15296 					 tg3_flag(tp, WOL_ENABLE));
15297 	else
15298 		device_set_wakeup_capable(&tp->pdev->dev, false);
15299 }
15300 
15301 static int tg3_ape_otp_read(struct tg3 *tp, u32 offset, u32 *val)
15302 {
15303 	int i, err;
15304 	u32 val2, off = offset * 8;
15305 
15306 	err = tg3_nvram_lock(tp);
15307 	if (err)
15308 		return err;
15309 
15310 	tg3_ape_write32(tp, TG3_APE_OTP_ADDR, off | APE_OTP_ADDR_CPU_ENABLE);
15311 	tg3_ape_write32(tp, TG3_APE_OTP_CTRL, APE_OTP_CTRL_PROG_EN |
15312 			APE_OTP_CTRL_CMD_RD | APE_OTP_CTRL_START);
15313 	tg3_ape_read32(tp, TG3_APE_OTP_CTRL);
15314 	udelay(10);
15315 
15316 	for (i = 0; i < 100; i++) {
15317 		val2 = tg3_ape_read32(tp, TG3_APE_OTP_STATUS);
15318 		if (val2 & APE_OTP_STATUS_CMD_DONE) {
15319 			*val = tg3_ape_read32(tp, TG3_APE_OTP_RD_DATA);
15320 			break;
15321 		}
15322 		udelay(10);
15323 	}
15324 
15325 	tg3_ape_write32(tp, TG3_APE_OTP_CTRL, 0);
15326 
15327 	tg3_nvram_unlock(tp);
15328 	if (val2 & APE_OTP_STATUS_CMD_DONE)
15329 		return 0;
15330 
15331 	return -EBUSY;
15332 }
15333 
15334 static int tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
15335 {
15336 	int i;
15337 	u32 val;
15338 
15339 	tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
15340 	tw32(OTP_CTRL, cmd);
15341 
15342 	/* Wait for up to 1 ms for command to execute. */
15343 	for (i = 0; i < 100; i++) {
15344 		val = tr32(OTP_STATUS);
15345 		if (val & OTP_STATUS_CMD_DONE)
15346 			break;
15347 		udelay(10);
15348 	}
15349 
15350 	return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
15351 }
15352 
15353 /* Read the gphy configuration from the OTP region of the chip.  The gphy
15354  * configuration is a 32-bit value that straddles the alignment boundary.
15355  * We do two 32-bit reads and then shift and merge the results.
15356  */
15357 static u32 tg3_read_otp_phycfg(struct tg3 *tp)
15358 {
15359 	u32 bhalf_otp, thalf_otp;
15360 
15361 	tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
15362 
15363 	if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
15364 		return 0;
15365 
15366 	tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
15367 
15368 	if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
15369 		return 0;
15370 
15371 	thalf_otp = tr32(OTP_READ_DATA);
15372 
15373 	tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
15374 
15375 	if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
15376 		return 0;
15377 
15378 	bhalf_otp = tr32(OTP_READ_DATA);
15379 
15380 	return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
15381 }
15382 
15383 static void tg3_phy_init_link_config(struct tg3 *tp)
15384 {
15385 	u32 adv = ADVERTISED_Autoneg;
15386 
15387 	if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
15388 		if (!(tp->phy_flags & TG3_PHYFLG_DISABLE_1G_HD_ADV))
15389 			adv |= ADVERTISED_1000baseT_Half;
15390 		adv |= ADVERTISED_1000baseT_Full;
15391 	}
15392 
15393 	if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
15394 		adv |= ADVERTISED_100baseT_Half |
15395 		       ADVERTISED_100baseT_Full |
15396 		       ADVERTISED_10baseT_Half |
15397 		       ADVERTISED_10baseT_Full |
15398 		       ADVERTISED_TP;
15399 	else
15400 		adv |= ADVERTISED_FIBRE;
15401 
15402 	tp->link_config.advertising = adv;
15403 	tp->link_config.speed = SPEED_UNKNOWN;
15404 	tp->link_config.duplex = DUPLEX_UNKNOWN;
15405 	tp->link_config.autoneg = AUTONEG_ENABLE;
15406 	tp->link_config.active_speed = SPEED_UNKNOWN;
15407 	tp->link_config.active_duplex = DUPLEX_UNKNOWN;
15408 
15409 	tp->old_link = -1;
15410 }
15411 
15412 static int tg3_phy_probe(struct tg3 *tp)
15413 {
15414 	u32 hw_phy_id_1, hw_phy_id_2;
15415 	u32 hw_phy_id, hw_phy_id_masked;
15416 	int err;
15417 
15418 	/* flow control autonegotiation is default behavior */
15419 	tg3_flag_set(tp, PAUSE_AUTONEG);
15420 	tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
15421 
15422 	if (tg3_flag(tp, ENABLE_APE)) {
15423 		switch (tp->pci_fn) {
15424 		case 0:
15425 			tp->phy_ape_lock = TG3_APE_LOCK_PHY0;
15426 			break;
15427 		case 1:
15428 			tp->phy_ape_lock = TG3_APE_LOCK_PHY1;
15429 			break;
15430 		case 2:
15431 			tp->phy_ape_lock = TG3_APE_LOCK_PHY2;
15432 			break;
15433 		case 3:
15434 			tp->phy_ape_lock = TG3_APE_LOCK_PHY3;
15435 			break;
15436 		}
15437 	}
15438 
15439 	if (!tg3_flag(tp, ENABLE_ASF) &&
15440 	    !(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
15441 	    !(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
15442 		tp->phy_flags &= ~(TG3_PHYFLG_1G_ON_VAUX_OK |
15443 				   TG3_PHYFLG_KEEP_LINK_ON_PWRDN);
15444 
15445 	if (tg3_flag(tp, USE_PHYLIB))
15446 		return tg3_phy_init(tp);
15447 
15448 	/* Reading the PHY ID register can conflict with ASF
15449 	 * firmware access to the PHY hardware.
15450 	 */
15451 	err = 0;
15452 	if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)) {
15453 		hw_phy_id = hw_phy_id_masked = TG3_PHY_ID_INVALID;
15454 	} else {
15455 		/* Now read the physical PHY_ID from the chip and verify
15456 		 * that it is sane.  If it doesn't look good, we fall back
15457 		 * to either the hard-coded table based PHY_ID and failing
15458 		 * that the value found in the eeprom area.
15459 		 */
15460 		err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
15461 		err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
15462 
15463 		hw_phy_id  = (hw_phy_id_1 & 0xffff) << 10;
15464 		hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
15465 		hw_phy_id |= (hw_phy_id_2 & 0x03ff) <<  0;
15466 
15467 		hw_phy_id_masked = hw_phy_id & TG3_PHY_ID_MASK;
15468 	}
15469 
15470 	if (!err && TG3_KNOWN_PHY_ID(hw_phy_id_masked)) {
15471 		tp->phy_id = hw_phy_id;
15472 		if (hw_phy_id_masked == TG3_PHY_ID_BCM8002)
15473 			tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
15474 		else
15475 			tp->phy_flags &= ~TG3_PHYFLG_PHY_SERDES;
15476 	} else {
15477 		if (tp->phy_id != TG3_PHY_ID_INVALID) {
15478 			/* Do nothing, phy ID already set up in
15479 			 * tg3_get_eeprom_hw_cfg().
15480 			 */
15481 		} else {
15482 			struct subsys_tbl_ent *p;
15483 
15484 			/* No eeprom signature?  Try the hardcoded
15485 			 * subsys device table.
15486 			 */
15487 			p = tg3_lookup_by_subsys(tp);
15488 			if (p) {
15489 				tp->phy_id = p->phy_id;
15490 			} else if (!tg3_flag(tp, IS_SSB_CORE)) {
15491 				/* For now we saw the IDs 0xbc050cd0,
15492 				 * 0xbc050f80 and 0xbc050c30 on devices
15493 				 * connected to an BCM4785 and there are
15494 				 * probably more. Just assume that the phy is
15495 				 * supported when it is connected to a SSB core
15496 				 * for now.
15497 				 */
15498 				return -ENODEV;
15499 			}
15500 
15501 			if (!tp->phy_id ||
15502 			    tp->phy_id == TG3_PHY_ID_BCM8002)
15503 				tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
15504 		}
15505 	}
15506 
15507 	if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
15508 	    (tg3_asic_rev(tp) == ASIC_REV_5719 ||
15509 	     tg3_asic_rev(tp) == ASIC_REV_5720 ||
15510 	     tg3_asic_rev(tp) == ASIC_REV_57766 ||
15511 	     tg3_asic_rev(tp) == ASIC_REV_5762 ||
15512 	     (tg3_asic_rev(tp) == ASIC_REV_5717 &&
15513 	      tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0) ||
15514 	     (tg3_asic_rev(tp) == ASIC_REV_57765 &&
15515 	      tg3_chip_rev_id(tp) != CHIPREV_ID_57765_A0))) {
15516 		tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
15517 
15518 		tp->eee.supported = SUPPORTED_100baseT_Full |
15519 				    SUPPORTED_1000baseT_Full;
15520 		tp->eee.advertised = ADVERTISED_100baseT_Full |
15521 				     ADVERTISED_1000baseT_Full;
15522 		tp->eee.eee_enabled = 1;
15523 		tp->eee.tx_lpi_enabled = 1;
15524 		tp->eee.tx_lpi_timer = TG3_CPMU_DBTMR1_LNKIDLE_2047US;
15525 	}
15526 
15527 	tg3_phy_init_link_config(tp);
15528 
15529 	if (!(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
15530 	    !(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
15531 	    !tg3_flag(tp, ENABLE_APE) &&
15532 	    !tg3_flag(tp, ENABLE_ASF)) {
15533 		u32 bmsr, dummy;
15534 
15535 		tg3_readphy(tp, MII_BMSR, &bmsr);
15536 		if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
15537 		    (bmsr & BMSR_LSTATUS))
15538 			goto skip_phy_reset;
15539 
15540 		err = tg3_phy_reset(tp);
15541 		if (err)
15542 			return err;
15543 
15544 		tg3_phy_set_wirespeed(tp);
15545 
15546 		if (!tg3_phy_copper_an_config_ok(tp, &dummy)) {
15547 			tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
15548 					    tp->link_config.flowctrl);
15549 
15550 			tg3_writephy(tp, MII_BMCR,
15551 				     BMCR_ANENABLE | BMCR_ANRESTART);
15552 		}
15553 	}
15554 
15555 skip_phy_reset:
15556 	if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
15557 		err = tg3_init_5401phy_dsp(tp);
15558 		if (err)
15559 			return err;
15560 
15561 		err = tg3_init_5401phy_dsp(tp);
15562 	}
15563 
15564 	return err;
15565 }
15566 
15567 static void tg3_read_vpd(struct tg3 *tp)
15568 {
15569 	u8 *vpd_data;
15570 	unsigned int block_end, rosize, len;
15571 	u32 vpdlen;
15572 	int j, i = 0;
15573 
15574 	vpd_data = (u8 *)tg3_vpd_readblock(tp, &vpdlen);
15575 	if (!vpd_data)
15576 		goto out_no_vpd;
15577 
15578 	i = pci_vpd_find_tag(vpd_data, 0, vpdlen, PCI_VPD_LRDT_RO_DATA);
15579 	if (i < 0)
15580 		goto out_not_found;
15581 
15582 	rosize = pci_vpd_lrdt_size(&vpd_data[i]);
15583 	block_end = i + PCI_VPD_LRDT_TAG_SIZE + rosize;
15584 	i += PCI_VPD_LRDT_TAG_SIZE;
15585 
15586 	if (block_end > vpdlen)
15587 		goto out_not_found;
15588 
15589 	j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
15590 				      PCI_VPD_RO_KEYWORD_MFR_ID);
15591 	if (j > 0) {
15592 		len = pci_vpd_info_field_size(&vpd_data[j]);
15593 
15594 		j += PCI_VPD_INFO_FLD_HDR_SIZE;
15595 		if (j + len > block_end || len != 4 ||
15596 		    memcmp(&vpd_data[j], "1028", 4))
15597 			goto partno;
15598 
15599 		j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
15600 					      PCI_VPD_RO_KEYWORD_VENDOR0);
15601 		if (j < 0)
15602 			goto partno;
15603 
15604 		len = pci_vpd_info_field_size(&vpd_data[j]);
15605 
15606 		j += PCI_VPD_INFO_FLD_HDR_SIZE;
15607 		if (j + len > block_end)
15608 			goto partno;
15609 
15610 		if (len >= sizeof(tp->fw_ver))
15611 			len = sizeof(tp->fw_ver) - 1;
15612 		memset(tp->fw_ver, 0, sizeof(tp->fw_ver));
15613 		snprintf(tp->fw_ver, sizeof(tp->fw_ver), "%.*s bc ", len,
15614 			 &vpd_data[j]);
15615 	}
15616 
15617 partno:
15618 	i = pci_vpd_find_info_keyword(vpd_data, i, rosize,
15619 				      PCI_VPD_RO_KEYWORD_PARTNO);
15620 	if (i < 0)
15621 		goto out_not_found;
15622 
15623 	len = pci_vpd_info_field_size(&vpd_data[i]);
15624 
15625 	i += PCI_VPD_INFO_FLD_HDR_SIZE;
15626 	if (len > TG3_BPN_SIZE ||
15627 	    (len + i) > vpdlen)
15628 		goto out_not_found;
15629 
15630 	memcpy(tp->board_part_number, &vpd_data[i], len);
15631 
15632 out_not_found:
15633 	kfree(vpd_data);
15634 	if (tp->board_part_number[0])
15635 		return;
15636 
15637 out_no_vpd:
15638 	if (tg3_asic_rev(tp) == ASIC_REV_5717) {
15639 		if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
15640 		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C)
15641 			strcpy(tp->board_part_number, "BCM5717");
15642 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718)
15643 			strcpy(tp->board_part_number, "BCM5718");
15644 		else
15645 			goto nomatch;
15646 	} else if (tg3_asic_rev(tp) == ASIC_REV_57780) {
15647 		if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780)
15648 			strcpy(tp->board_part_number, "BCM57780");
15649 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760)
15650 			strcpy(tp->board_part_number, "BCM57760");
15651 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790)
15652 			strcpy(tp->board_part_number, "BCM57790");
15653 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788)
15654 			strcpy(tp->board_part_number, "BCM57788");
15655 		else
15656 			goto nomatch;
15657 	} else if (tg3_asic_rev(tp) == ASIC_REV_57765) {
15658 		if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761)
15659 			strcpy(tp->board_part_number, "BCM57761");
15660 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765)
15661 			strcpy(tp->board_part_number, "BCM57765");
15662 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781)
15663 			strcpy(tp->board_part_number, "BCM57781");
15664 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785)
15665 			strcpy(tp->board_part_number, "BCM57785");
15666 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791)
15667 			strcpy(tp->board_part_number, "BCM57791");
15668 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
15669 			strcpy(tp->board_part_number, "BCM57795");
15670 		else
15671 			goto nomatch;
15672 	} else if (tg3_asic_rev(tp) == ASIC_REV_57766) {
15673 		if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762)
15674 			strcpy(tp->board_part_number, "BCM57762");
15675 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766)
15676 			strcpy(tp->board_part_number, "BCM57766");
15677 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782)
15678 			strcpy(tp->board_part_number, "BCM57782");
15679 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
15680 			strcpy(tp->board_part_number, "BCM57786");
15681 		else
15682 			goto nomatch;
15683 	} else if (tg3_asic_rev(tp) == ASIC_REV_5906) {
15684 		strcpy(tp->board_part_number, "BCM95906");
15685 	} else {
15686 nomatch:
15687 		strcpy(tp->board_part_number, "none");
15688 	}
15689 }
15690 
15691 static int tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
15692 {
15693 	u32 val;
15694 
15695 	if (tg3_nvram_read(tp, offset, &val) ||
15696 	    (val & 0xfc000000) != 0x0c000000 ||
15697 	    tg3_nvram_read(tp, offset + 4, &val) ||
15698 	    val != 0)
15699 		return 0;
15700 
15701 	return 1;
15702 }
15703 
15704 static void tg3_read_bc_ver(struct tg3 *tp)
15705 {
15706 	u32 val, offset, start, ver_offset;
15707 	int i, dst_off;
15708 	bool newver = false;
15709 
15710 	if (tg3_nvram_read(tp, 0xc, &offset) ||
15711 	    tg3_nvram_read(tp, 0x4, &start))
15712 		return;
15713 
15714 	offset = tg3_nvram_logical_addr(tp, offset);
15715 
15716 	if (tg3_nvram_read(tp, offset, &val))
15717 		return;
15718 
15719 	if ((val & 0xfc000000) == 0x0c000000) {
15720 		if (tg3_nvram_read(tp, offset + 4, &val))
15721 			return;
15722 
15723 		if (val == 0)
15724 			newver = true;
15725 	}
15726 
15727 	dst_off = strlen(tp->fw_ver);
15728 
15729 	if (newver) {
15730 		if (TG3_VER_SIZE - dst_off < 16 ||
15731 		    tg3_nvram_read(tp, offset + 8, &ver_offset))
15732 			return;
15733 
15734 		offset = offset + ver_offset - start;
15735 		for (i = 0; i < 16; i += 4) {
15736 			__be32 v;
15737 			if (tg3_nvram_read_be32(tp, offset + i, &v))
15738 				return;
15739 
15740 			memcpy(tp->fw_ver + dst_off + i, &v, sizeof(v));
15741 		}
15742 	} else {
15743 		u32 major, minor;
15744 
15745 		if (tg3_nvram_read(tp, TG3_NVM_PTREV_BCVER, &ver_offset))
15746 			return;
15747 
15748 		major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >>
15749 			TG3_NVM_BCVER_MAJSFT;
15750 		minor = ver_offset & TG3_NVM_BCVER_MINMSK;
15751 		snprintf(&tp->fw_ver[dst_off], TG3_VER_SIZE - dst_off,
15752 			 "v%d.%02d", major, minor);
15753 	}
15754 }
15755 
15756 static void tg3_read_hwsb_ver(struct tg3 *tp)
15757 {
15758 	u32 val, major, minor;
15759 
15760 	/* Use native endian representation */
15761 	if (tg3_nvram_read(tp, TG3_NVM_HWSB_CFG1, &val))
15762 		return;
15763 
15764 	major = (val & TG3_NVM_HWSB_CFG1_MAJMSK) >>
15765 		TG3_NVM_HWSB_CFG1_MAJSFT;
15766 	minor = (val & TG3_NVM_HWSB_CFG1_MINMSK) >>
15767 		TG3_NVM_HWSB_CFG1_MINSFT;
15768 
15769 	snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor);
15770 }
15771 
15772 static void tg3_read_sb_ver(struct tg3 *tp, u32 val)
15773 {
15774 	u32 offset, major, minor, build;
15775 
15776 	strncat(tp->fw_ver, "sb", TG3_VER_SIZE - strlen(tp->fw_ver) - 1);
15777 
15778 	if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1)
15779 		return;
15780 
15781 	switch (val & TG3_EEPROM_SB_REVISION_MASK) {
15782 	case TG3_EEPROM_SB_REVISION_0:
15783 		offset = TG3_EEPROM_SB_F1R0_EDH_OFF;
15784 		break;
15785 	case TG3_EEPROM_SB_REVISION_2:
15786 		offset = TG3_EEPROM_SB_F1R2_EDH_OFF;
15787 		break;
15788 	case TG3_EEPROM_SB_REVISION_3:
15789 		offset = TG3_EEPROM_SB_F1R3_EDH_OFF;
15790 		break;
15791 	case TG3_EEPROM_SB_REVISION_4:
15792 		offset = TG3_EEPROM_SB_F1R4_EDH_OFF;
15793 		break;
15794 	case TG3_EEPROM_SB_REVISION_5:
15795 		offset = TG3_EEPROM_SB_F1R5_EDH_OFF;
15796 		break;
15797 	case TG3_EEPROM_SB_REVISION_6:
15798 		offset = TG3_EEPROM_SB_F1R6_EDH_OFF;
15799 		break;
15800 	default:
15801 		return;
15802 	}
15803 
15804 	if (tg3_nvram_read(tp, offset, &val))
15805 		return;
15806 
15807 	build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >>
15808 		TG3_EEPROM_SB_EDH_BLD_SHFT;
15809 	major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >>
15810 		TG3_EEPROM_SB_EDH_MAJ_SHFT;
15811 	minor =  val & TG3_EEPROM_SB_EDH_MIN_MASK;
15812 
15813 	if (minor > 99 || build > 26)
15814 		return;
15815 
15816 	offset = strlen(tp->fw_ver);
15817 	snprintf(&tp->fw_ver[offset], TG3_VER_SIZE - offset,
15818 		 " v%d.%02d", major, minor);
15819 
15820 	if (build > 0) {
15821 		offset = strlen(tp->fw_ver);
15822 		if (offset < TG3_VER_SIZE - 1)
15823 			tp->fw_ver[offset] = 'a' + build - 1;
15824 	}
15825 }
15826 
15827 static void tg3_read_mgmtfw_ver(struct tg3 *tp)
15828 {
15829 	u32 val, offset, start;
15830 	int i, vlen;
15831 
15832 	for (offset = TG3_NVM_DIR_START;
15833 	     offset < TG3_NVM_DIR_END;
15834 	     offset += TG3_NVM_DIRENT_SIZE) {
15835 		if (tg3_nvram_read(tp, offset, &val))
15836 			return;
15837 
15838 		if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
15839 			break;
15840 	}
15841 
15842 	if (offset == TG3_NVM_DIR_END)
15843 		return;
15844 
15845 	if (!tg3_flag(tp, 5705_PLUS))
15846 		start = 0x08000000;
15847 	else if (tg3_nvram_read(tp, offset - 4, &start))
15848 		return;
15849 
15850 	if (tg3_nvram_read(tp, offset + 4, &offset) ||
15851 	    !tg3_fw_img_is_valid(tp, offset) ||
15852 	    tg3_nvram_read(tp, offset + 8, &val))
15853 		return;
15854 
15855 	offset += val - start;
15856 
15857 	vlen = strlen(tp->fw_ver);
15858 
15859 	tp->fw_ver[vlen++] = ',';
15860 	tp->fw_ver[vlen++] = ' ';
15861 
15862 	for (i = 0; i < 4; i++) {
15863 		__be32 v;
15864 		if (tg3_nvram_read_be32(tp, offset, &v))
15865 			return;
15866 
15867 		offset += sizeof(v);
15868 
15869 		if (vlen > TG3_VER_SIZE - sizeof(v)) {
15870 			memcpy(&tp->fw_ver[vlen], &v, TG3_VER_SIZE - vlen);
15871 			break;
15872 		}
15873 
15874 		memcpy(&tp->fw_ver[vlen], &v, sizeof(v));
15875 		vlen += sizeof(v);
15876 	}
15877 }
15878 
15879 static void tg3_probe_ncsi(struct tg3 *tp)
15880 {
15881 	u32 apedata;
15882 
15883 	apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
15884 	if (apedata != APE_SEG_SIG_MAGIC)
15885 		return;
15886 
15887 	apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
15888 	if (!(apedata & APE_FW_STATUS_READY))
15889 		return;
15890 
15891 	if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI)
15892 		tg3_flag_set(tp, APE_HAS_NCSI);
15893 }
15894 
15895 static void tg3_read_dash_ver(struct tg3 *tp)
15896 {
15897 	int vlen;
15898 	u32 apedata;
15899 	char *fwtype;
15900 
15901 	apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION);
15902 
15903 	if (tg3_flag(tp, APE_HAS_NCSI))
15904 		fwtype = "NCSI";
15905 	else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725)
15906 		fwtype = "SMASH";
15907 	else
15908 		fwtype = "DASH";
15909 
15910 	vlen = strlen(tp->fw_ver);
15911 
15912 	snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " %s v%d.%d.%d.%d",
15913 		 fwtype,
15914 		 (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT,
15915 		 (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT,
15916 		 (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT,
15917 		 (apedata & APE_FW_VERSION_BLDMSK));
15918 }
15919 
15920 static void tg3_read_otp_ver(struct tg3 *tp)
15921 {
15922 	u32 val, val2;
15923 
15924 	if (tg3_asic_rev(tp) != ASIC_REV_5762)
15925 		return;
15926 
15927 	if (!tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0, &val) &&
15928 	    !tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0 + 4, &val2) &&
15929 	    TG3_OTP_MAGIC0_VALID(val)) {
15930 		u64 val64 = (u64) val << 32 | val2;
15931 		u32 ver = 0;
15932 		int i, vlen;
15933 
15934 		for (i = 0; i < 7; i++) {
15935 			if ((val64 & 0xff) == 0)
15936 				break;
15937 			ver = val64 & 0xff;
15938 			val64 >>= 8;
15939 		}
15940 		vlen = strlen(tp->fw_ver);
15941 		snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " .%02d", ver);
15942 	}
15943 }
15944 
15945 static void tg3_read_fw_ver(struct tg3 *tp)
15946 {
15947 	u32 val;
15948 	bool vpd_vers = false;
15949 
15950 	if (tp->fw_ver[0] != 0)
15951 		vpd_vers = true;
15952 
15953 	if (tg3_flag(tp, NO_NVRAM)) {
15954 		strcat(tp->fw_ver, "sb");
15955 		tg3_read_otp_ver(tp);
15956 		return;
15957 	}
15958 
15959 	if (tg3_nvram_read(tp, 0, &val))
15960 		return;
15961 
15962 	if (val == TG3_EEPROM_MAGIC)
15963 		tg3_read_bc_ver(tp);
15964 	else if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW)
15965 		tg3_read_sb_ver(tp, val);
15966 	else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
15967 		tg3_read_hwsb_ver(tp);
15968 
15969 	if (tg3_flag(tp, ENABLE_ASF)) {
15970 		if (tg3_flag(tp, ENABLE_APE)) {
15971 			tg3_probe_ncsi(tp);
15972 			if (!vpd_vers)
15973 				tg3_read_dash_ver(tp);
15974 		} else if (!vpd_vers) {
15975 			tg3_read_mgmtfw_ver(tp);
15976 		}
15977 	}
15978 
15979 	tp->fw_ver[TG3_VER_SIZE - 1] = 0;
15980 }
15981 
15982 static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp)
15983 {
15984 	if (tg3_flag(tp, LRG_PROD_RING_CAP))
15985 		return TG3_RX_RET_MAX_SIZE_5717;
15986 	else if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))
15987 		return TG3_RX_RET_MAX_SIZE_5700;
15988 	else
15989 		return TG3_RX_RET_MAX_SIZE_5705;
15990 }
15991 
15992 static const struct pci_device_id tg3_write_reorder_chipsets[] = {
15993 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C) },
15994 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE) },
15995 	{ PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8385_0) },
15996 	{ },
15997 };
15998 
15999 static struct pci_dev *tg3_find_peer(struct tg3 *tp)
16000 {
16001 	struct pci_dev *peer;
16002 	unsigned int func, devnr = tp->pdev->devfn & ~7;
16003 
16004 	for (func = 0; func < 8; func++) {
16005 		peer = pci_get_slot(tp->pdev->bus, devnr | func);
16006 		if (peer && peer != tp->pdev)
16007 			break;
16008 		pci_dev_put(peer);
16009 	}
16010 	/* 5704 can be configured in single-port mode, set peer to
16011 	 * tp->pdev in that case.
16012 	 */
16013 	if (!peer) {
16014 		peer = tp->pdev;
16015 		return peer;
16016 	}
16017 
16018 	/*
16019 	 * We don't need to keep the refcount elevated; there's no way
16020 	 * to remove one half of this device without removing the other
16021 	 */
16022 	pci_dev_put(peer);
16023 
16024 	return peer;
16025 }
16026 
16027 static void tg3_detect_asic_rev(struct tg3 *tp, u32 misc_ctrl_reg)
16028 {
16029 	tp->pci_chip_rev_id = misc_ctrl_reg >> MISC_HOST_CTRL_CHIPREV_SHIFT;
16030 	if (tg3_asic_rev(tp) == ASIC_REV_USE_PROD_ID_REG) {
16031 		u32 reg;
16032 
16033 		/* All devices that use the alternate
16034 		 * ASIC REV location have a CPMU.
16035 		 */
16036 		tg3_flag_set(tp, CPMU_PRESENT);
16037 
16038 		if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
16039 		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
16040 		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
16041 		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
16042 		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 ||
16043 		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_57767 ||
16044 		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_57764 ||
16045 		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 ||
16046 		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 ||
16047 		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727 ||
16048 		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_57787)
16049 			reg = TG3PCI_GEN2_PRODID_ASICREV;
16050 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 ||
16051 			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 ||
16052 			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 ||
16053 			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 ||
16054 			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
16055 			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
16056 			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762 ||
16057 			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766 ||
16058 			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782 ||
16059 			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
16060 			reg = TG3PCI_GEN15_PRODID_ASICREV;
16061 		else
16062 			reg = TG3PCI_PRODID_ASICREV;
16063 
16064 		pci_read_config_dword(tp->pdev, reg, &tp->pci_chip_rev_id);
16065 	}
16066 
16067 	/* Wrong chip ID in 5752 A0. This code can be removed later
16068 	 * as A0 is not in production.
16069 	 */
16070 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5752_A0_HW)
16071 		tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
16072 
16073 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5717_C0)
16074 		tp->pci_chip_rev_id = CHIPREV_ID_5720_A0;
16075 
16076 	if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16077 	    tg3_asic_rev(tp) == ASIC_REV_5719 ||
16078 	    tg3_asic_rev(tp) == ASIC_REV_5720)
16079 		tg3_flag_set(tp, 5717_PLUS);
16080 
16081 	if (tg3_asic_rev(tp) == ASIC_REV_57765 ||
16082 	    tg3_asic_rev(tp) == ASIC_REV_57766)
16083 		tg3_flag_set(tp, 57765_CLASS);
16084 
16085 	if (tg3_flag(tp, 57765_CLASS) || tg3_flag(tp, 5717_PLUS) ||
16086 	     tg3_asic_rev(tp) == ASIC_REV_5762)
16087 		tg3_flag_set(tp, 57765_PLUS);
16088 
16089 	/* Intentionally exclude ASIC_REV_5906 */
16090 	if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
16091 	    tg3_asic_rev(tp) == ASIC_REV_5787 ||
16092 	    tg3_asic_rev(tp) == ASIC_REV_5784 ||
16093 	    tg3_asic_rev(tp) == ASIC_REV_5761 ||
16094 	    tg3_asic_rev(tp) == ASIC_REV_5785 ||
16095 	    tg3_asic_rev(tp) == ASIC_REV_57780 ||
16096 	    tg3_flag(tp, 57765_PLUS))
16097 		tg3_flag_set(tp, 5755_PLUS);
16098 
16099 	if (tg3_asic_rev(tp) == ASIC_REV_5780 ||
16100 	    tg3_asic_rev(tp) == ASIC_REV_5714)
16101 		tg3_flag_set(tp, 5780_CLASS);
16102 
16103 	if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
16104 	    tg3_asic_rev(tp) == ASIC_REV_5752 ||
16105 	    tg3_asic_rev(tp) == ASIC_REV_5906 ||
16106 	    tg3_flag(tp, 5755_PLUS) ||
16107 	    tg3_flag(tp, 5780_CLASS))
16108 		tg3_flag_set(tp, 5750_PLUS);
16109 
16110 	if (tg3_asic_rev(tp) == ASIC_REV_5705 ||
16111 	    tg3_flag(tp, 5750_PLUS))
16112 		tg3_flag_set(tp, 5705_PLUS);
16113 }
16114 
16115 static bool tg3_10_100_only_device(struct tg3 *tp,
16116 				   const struct pci_device_id *ent)
16117 {
16118 	u32 grc_misc_cfg = tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK;
16119 
16120 	if ((tg3_asic_rev(tp) == ASIC_REV_5703 &&
16121 	     (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
16122 	    (tp->phy_flags & TG3_PHYFLG_IS_FET))
16123 		return true;
16124 
16125 	if (ent->driver_data & TG3_DRV_DATA_FLAG_10_100_ONLY) {
16126 		if (tg3_asic_rev(tp) == ASIC_REV_5705) {
16127 			if (ent->driver_data & TG3_DRV_DATA_FLAG_5705_10_100)
16128 				return true;
16129 		} else {
16130 			return true;
16131 		}
16132 	}
16133 
16134 	return false;
16135 }
16136 
16137 static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
16138 {
16139 	u32 misc_ctrl_reg;
16140 	u32 pci_state_reg, grc_misc_cfg;
16141 	u32 val;
16142 	u16 pci_cmd;
16143 	int err;
16144 
16145 	/* Force memory write invalidate off.  If we leave it on,
16146 	 * then on 5700_BX chips we have to enable a workaround.
16147 	 * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
16148 	 * to match the cacheline size.  The Broadcom driver have this
16149 	 * workaround but turns MWI off all the times so never uses
16150 	 * it.  This seems to suggest that the workaround is insufficient.
16151 	 */
16152 	pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
16153 	pci_cmd &= ~PCI_COMMAND_INVALIDATE;
16154 	pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
16155 
16156 	/* Important! -- Make sure register accesses are byteswapped
16157 	 * correctly.  Also, for those chips that require it, make
16158 	 * sure that indirect register accesses are enabled before
16159 	 * the first operation.
16160 	 */
16161 	pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
16162 			      &misc_ctrl_reg);
16163 	tp->misc_host_ctrl |= (misc_ctrl_reg &
16164 			       MISC_HOST_CTRL_CHIPREV);
16165 	pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
16166 			       tp->misc_host_ctrl);
16167 
16168 	tg3_detect_asic_rev(tp, misc_ctrl_reg);
16169 
16170 	/* If we have 5702/03 A1 or A2 on certain ICH chipsets,
16171 	 * we need to disable memory and use config. cycles
16172 	 * only to access all registers. The 5702/03 chips
16173 	 * can mistakenly decode the special cycles from the
16174 	 * ICH chipsets as memory write cycles, causing corruption
16175 	 * of register and memory space. Only certain ICH bridges
16176 	 * will drive special cycles with non-zero data during the
16177 	 * address phase which can fall within the 5703's address
16178 	 * range. This is not an ICH bug as the PCI spec allows
16179 	 * non-zero address during special cycles. However, only
16180 	 * these ICH bridges are known to drive non-zero addresses
16181 	 * during special cycles.
16182 	 *
16183 	 * Since special cycles do not cross PCI bridges, we only
16184 	 * enable this workaround if the 5703 is on the secondary
16185 	 * bus of these ICH bridges.
16186 	 */
16187 	if ((tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A1) ||
16188 	    (tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A2)) {
16189 		static struct tg3_dev_id {
16190 			u32	vendor;
16191 			u32	device;
16192 			u32	rev;
16193 		} ich_chipsets[] = {
16194 			{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
16195 			  PCI_ANY_ID },
16196 			{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
16197 			  PCI_ANY_ID },
16198 			{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
16199 			  0xa },
16200 			{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
16201 			  PCI_ANY_ID },
16202 			{ },
16203 		};
16204 		struct tg3_dev_id *pci_id = &ich_chipsets[0];
16205 		struct pci_dev *bridge = NULL;
16206 
16207 		while (pci_id->vendor != 0) {
16208 			bridge = pci_get_device(pci_id->vendor, pci_id->device,
16209 						bridge);
16210 			if (!bridge) {
16211 				pci_id++;
16212 				continue;
16213 			}
16214 			if (pci_id->rev != PCI_ANY_ID) {
16215 				if (bridge->revision > pci_id->rev)
16216 					continue;
16217 			}
16218 			if (bridge->subordinate &&
16219 			    (bridge->subordinate->number ==
16220 			     tp->pdev->bus->number)) {
16221 				tg3_flag_set(tp, ICH_WORKAROUND);
16222 				pci_dev_put(bridge);
16223 				break;
16224 			}
16225 		}
16226 	}
16227 
16228 	if (tg3_asic_rev(tp) == ASIC_REV_5701) {
16229 		static struct tg3_dev_id {
16230 			u32	vendor;
16231 			u32	device;
16232 		} bridge_chipsets[] = {
16233 			{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
16234 			{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
16235 			{ },
16236 		};
16237 		struct tg3_dev_id *pci_id = &bridge_chipsets[0];
16238 		struct pci_dev *bridge = NULL;
16239 
16240 		while (pci_id->vendor != 0) {
16241 			bridge = pci_get_device(pci_id->vendor,
16242 						pci_id->device,
16243 						bridge);
16244 			if (!bridge) {
16245 				pci_id++;
16246 				continue;
16247 			}
16248 			if (bridge->subordinate &&
16249 			    (bridge->subordinate->number <=
16250 			     tp->pdev->bus->number) &&
16251 			    (bridge->subordinate->busn_res.end >=
16252 			     tp->pdev->bus->number)) {
16253 				tg3_flag_set(tp, 5701_DMA_BUG);
16254 				pci_dev_put(bridge);
16255 				break;
16256 			}
16257 		}
16258 	}
16259 
16260 	/* The EPB bridge inside 5714, 5715, and 5780 cannot support
16261 	 * DMA addresses > 40-bit. This bridge may have other additional
16262 	 * 57xx devices behind it in some 4-port NIC designs for example.
16263 	 * Any tg3 device found behind the bridge will also need the 40-bit
16264 	 * DMA workaround.
16265 	 */
16266 	if (tg3_flag(tp, 5780_CLASS)) {
16267 		tg3_flag_set(tp, 40BIT_DMA_BUG);
16268 		tp->msi_cap = tp->pdev->msi_cap;
16269 	} else {
16270 		struct pci_dev *bridge = NULL;
16271 
16272 		do {
16273 			bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
16274 						PCI_DEVICE_ID_SERVERWORKS_EPB,
16275 						bridge);
16276 			if (bridge && bridge->subordinate &&
16277 			    (bridge->subordinate->number <=
16278 			     tp->pdev->bus->number) &&
16279 			    (bridge->subordinate->busn_res.end >=
16280 			     tp->pdev->bus->number)) {
16281 				tg3_flag_set(tp, 40BIT_DMA_BUG);
16282 				pci_dev_put(bridge);
16283 				break;
16284 			}
16285 		} while (bridge);
16286 	}
16287 
16288 	if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
16289 	    tg3_asic_rev(tp) == ASIC_REV_5714)
16290 		tp->pdev_peer = tg3_find_peer(tp);
16291 
16292 	/* Determine TSO capabilities */
16293 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0)
16294 		; /* Do nothing. HW bug. */
16295 	else if (tg3_flag(tp, 57765_PLUS))
16296 		tg3_flag_set(tp, HW_TSO_3);
16297 	else if (tg3_flag(tp, 5755_PLUS) ||
16298 		 tg3_asic_rev(tp) == ASIC_REV_5906)
16299 		tg3_flag_set(tp, HW_TSO_2);
16300 	else if (tg3_flag(tp, 5750_PLUS)) {
16301 		tg3_flag_set(tp, HW_TSO_1);
16302 		tg3_flag_set(tp, TSO_BUG);
16303 		if (tg3_asic_rev(tp) == ASIC_REV_5750 &&
16304 		    tg3_chip_rev_id(tp) >= CHIPREV_ID_5750_C2)
16305 			tg3_flag_clear(tp, TSO_BUG);
16306 	} else if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
16307 		   tg3_asic_rev(tp) != ASIC_REV_5701 &&
16308 		   tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
16309 		tg3_flag_set(tp, FW_TSO);
16310 		tg3_flag_set(tp, TSO_BUG);
16311 		if (tg3_asic_rev(tp) == ASIC_REV_5705)
16312 			tp->fw_needed = FIRMWARE_TG3TSO5;
16313 		else
16314 			tp->fw_needed = FIRMWARE_TG3TSO;
16315 	}
16316 
16317 	/* Selectively allow TSO based on operating conditions */
16318 	if (tg3_flag(tp, HW_TSO_1) ||
16319 	    tg3_flag(tp, HW_TSO_2) ||
16320 	    tg3_flag(tp, HW_TSO_3) ||
16321 	    tg3_flag(tp, FW_TSO)) {
16322 		/* For firmware TSO, assume ASF is disabled.
16323 		 * We'll disable TSO later if we discover ASF
16324 		 * is enabled in tg3_get_eeprom_hw_cfg().
16325 		 */
16326 		tg3_flag_set(tp, TSO_CAPABLE);
16327 	} else {
16328 		tg3_flag_clear(tp, TSO_CAPABLE);
16329 		tg3_flag_clear(tp, TSO_BUG);
16330 		tp->fw_needed = NULL;
16331 	}
16332 
16333 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0)
16334 		tp->fw_needed = FIRMWARE_TG3;
16335 
16336 	if (tg3_asic_rev(tp) == ASIC_REV_57766)
16337 		tp->fw_needed = FIRMWARE_TG357766;
16338 
16339 	tp->irq_max = 1;
16340 
16341 	if (tg3_flag(tp, 5750_PLUS)) {
16342 		tg3_flag_set(tp, SUPPORT_MSI);
16343 		if (tg3_chip_rev(tp) == CHIPREV_5750_AX ||
16344 		    tg3_chip_rev(tp) == CHIPREV_5750_BX ||
16345 		    (tg3_asic_rev(tp) == ASIC_REV_5714 &&
16346 		     tg3_chip_rev_id(tp) <= CHIPREV_ID_5714_A2 &&
16347 		     tp->pdev_peer == tp->pdev))
16348 			tg3_flag_clear(tp, SUPPORT_MSI);
16349 
16350 		if (tg3_flag(tp, 5755_PLUS) ||
16351 		    tg3_asic_rev(tp) == ASIC_REV_5906) {
16352 			tg3_flag_set(tp, 1SHOT_MSI);
16353 		}
16354 
16355 		if (tg3_flag(tp, 57765_PLUS)) {
16356 			tg3_flag_set(tp, SUPPORT_MSIX);
16357 			tp->irq_max = TG3_IRQ_MAX_VECS;
16358 		}
16359 	}
16360 
16361 	tp->txq_max = 1;
16362 	tp->rxq_max = 1;
16363 	if (tp->irq_max > 1) {
16364 		tp->rxq_max = TG3_RSS_MAX_NUM_QS;
16365 		tg3_rss_init_dflt_indir_tbl(tp, TG3_RSS_MAX_NUM_QS);
16366 
16367 		if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
16368 		    tg3_asic_rev(tp) == ASIC_REV_5720)
16369 			tp->txq_max = tp->irq_max - 1;
16370 	}
16371 
16372 	if (tg3_flag(tp, 5755_PLUS) ||
16373 	    tg3_asic_rev(tp) == ASIC_REV_5906)
16374 		tg3_flag_set(tp, SHORT_DMA_BUG);
16375 
16376 	if (tg3_asic_rev(tp) == ASIC_REV_5719)
16377 		tp->dma_limit = TG3_TX_BD_DMA_MAX_4K;
16378 
16379 	if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16380 	    tg3_asic_rev(tp) == ASIC_REV_5719 ||
16381 	    tg3_asic_rev(tp) == ASIC_REV_5720 ||
16382 	    tg3_asic_rev(tp) == ASIC_REV_5762)
16383 		tg3_flag_set(tp, LRG_PROD_RING_CAP);
16384 
16385 	if (tg3_flag(tp, 57765_PLUS) &&
16386 	    tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0)
16387 		tg3_flag_set(tp, USE_JUMBO_BDFLAG);
16388 
16389 	if (!tg3_flag(tp, 5705_PLUS) ||
16390 	    tg3_flag(tp, 5780_CLASS) ||
16391 	    tg3_flag(tp, USE_JUMBO_BDFLAG))
16392 		tg3_flag_set(tp, JUMBO_CAPABLE);
16393 
16394 	pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
16395 			      &pci_state_reg);
16396 
16397 	if (pci_is_pcie(tp->pdev)) {
16398 		u16 lnkctl;
16399 
16400 		tg3_flag_set(tp, PCI_EXPRESS);
16401 
16402 		pcie_capability_read_word(tp->pdev, PCI_EXP_LNKCTL, &lnkctl);
16403 		if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
16404 			if (tg3_asic_rev(tp) == ASIC_REV_5906) {
16405 				tg3_flag_clear(tp, HW_TSO_2);
16406 				tg3_flag_clear(tp, TSO_CAPABLE);
16407 			}
16408 			if (tg3_asic_rev(tp) == ASIC_REV_5784 ||
16409 			    tg3_asic_rev(tp) == ASIC_REV_5761 ||
16410 			    tg3_chip_rev_id(tp) == CHIPREV_ID_57780_A0 ||
16411 			    tg3_chip_rev_id(tp) == CHIPREV_ID_57780_A1)
16412 				tg3_flag_set(tp, CLKREQ_BUG);
16413 		} else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5717_A0) {
16414 			tg3_flag_set(tp, L1PLLPD_EN);
16415 		}
16416 	} else if (tg3_asic_rev(tp) == ASIC_REV_5785) {
16417 		/* BCM5785 devices are effectively PCIe devices, and should
16418 		 * follow PCIe codepaths, but do not have a PCIe capabilities
16419 		 * section.
16420 		 */
16421 		tg3_flag_set(tp, PCI_EXPRESS);
16422 	} else if (!tg3_flag(tp, 5705_PLUS) ||
16423 		   tg3_flag(tp, 5780_CLASS)) {
16424 		tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
16425 		if (!tp->pcix_cap) {
16426 			dev_err(&tp->pdev->dev,
16427 				"Cannot find PCI-X capability, aborting\n");
16428 			return -EIO;
16429 		}
16430 
16431 		if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE))
16432 			tg3_flag_set(tp, PCIX_MODE);
16433 	}
16434 
16435 	/* If we have an AMD 762 or VIA K8T800 chipset, write
16436 	 * reordering to the mailbox registers done by the host
16437 	 * controller can cause major troubles.  We read back from
16438 	 * every mailbox register write to force the writes to be
16439 	 * posted to the chip in order.
16440 	 */
16441 	if (pci_dev_present(tg3_write_reorder_chipsets) &&
16442 	    !tg3_flag(tp, PCI_EXPRESS))
16443 		tg3_flag_set(tp, MBOX_WRITE_REORDER);
16444 
16445 	pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
16446 			     &tp->pci_cacheline_sz);
16447 	pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER,
16448 			     &tp->pci_lat_timer);
16449 	if (tg3_asic_rev(tp) == ASIC_REV_5703 &&
16450 	    tp->pci_lat_timer < 64) {
16451 		tp->pci_lat_timer = 64;
16452 		pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
16453 				      tp->pci_lat_timer);
16454 	}
16455 
16456 	/* Important! -- It is critical that the PCI-X hw workaround
16457 	 * situation is decided before the first MMIO register access.
16458 	 */
16459 	if (tg3_chip_rev(tp) == CHIPREV_5700_BX) {
16460 		/* 5700 BX chips need to have their TX producer index
16461 		 * mailboxes written twice to workaround a bug.
16462 		 */
16463 		tg3_flag_set(tp, TXD_MBOX_HWBUG);
16464 
16465 		/* If we are in PCI-X mode, enable register write workaround.
16466 		 *
16467 		 * The workaround is to use indirect register accesses
16468 		 * for all chip writes not to mailbox registers.
16469 		 */
16470 		if (tg3_flag(tp, PCIX_MODE)) {
16471 			u32 pm_reg;
16472 
16473 			tg3_flag_set(tp, PCIX_TARGET_HWBUG);
16474 
16475 			/* The chip can have it's power management PCI config
16476 			 * space registers clobbered due to this bug.
16477 			 * So explicitly force the chip into D0 here.
16478 			 */
16479 			pci_read_config_dword(tp->pdev,
16480 					      tp->pdev->pm_cap + PCI_PM_CTRL,
16481 					      &pm_reg);
16482 			pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
16483 			pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
16484 			pci_write_config_dword(tp->pdev,
16485 					       tp->pdev->pm_cap + PCI_PM_CTRL,
16486 					       pm_reg);
16487 
16488 			/* Also, force SERR#/PERR# in PCI command. */
16489 			pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
16490 			pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
16491 			pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
16492 		}
16493 	}
16494 
16495 	if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
16496 		tg3_flag_set(tp, PCI_HIGH_SPEED);
16497 	if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
16498 		tg3_flag_set(tp, PCI_32BIT);
16499 
16500 	/* Chip-specific fixup from Broadcom driver */
16501 	if ((tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0) &&
16502 	    (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
16503 		pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
16504 		pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
16505 	}
16506 
16507 	/* Default fast path register access methods */
16508 	tp->read32 = tg3_read32;
16509 	tp->write32 = tg3_write32;
16510 	tp->read32_mbox = tg3_read32;
16511 	tp->write32_mbox = tg3_write32;
16512 	tp->write32_tx_mbox = tg3_write32;
16513 	tp->write32_rx_mbox = tg3_write32;
16514 
16515 	/* Various workaround register access methods */
16516 	if (tg3_flag(tp, PCIX_TARGET_HWBUG))
16517 		tp->write32 = tg3_write_indirect_reg32;
16518 	else if (tg3_asic_rev(tp) == ASIC_REV_5701 ||
16519 		 (tg3_flag(tp, PCI_EXPRESS) &&
16520 		  tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A0)) {
16521 		/*
16522 		 * Back to back register writes can cause problems on these
16523 		 * chips, the workaround is to read back all reg writes
16524 		 * except those to mailbox regs.
16525 		 *
16526 		 * See tg3_write_indirect_reg32().
16527 		 */
16528 		tp->write32 = tg3_write_flush_reg32;
16529 	}
16530 
16531 	if (tg3_flag(tp, TXD_MBOX_HWBUG) || tg3_flag(tp, MBOX_WRITE_REORDER)) {
16532 		tp->write32_tx_mbox = tg3_write32_tx_mbox;
16533 		if (tg3_flag(tp, MBOX_WRITE_REORDER))
16534 			tp->write32_rx_mbox = tg3_write_flush_reg32;
16535 	}
16536 
16537 	if (tg3_flag(tp, ICH_WORKAROUND)) {
16538 		tp->read32 = tg3_read_indirect_reg32;
16539 		tp->write32 = tg3_write_indirect_reg32;
16540 		tp->read32_mbox = tg3_read_indirect_mbox;
16541 		tp->write32_mbox = tg3_write_indirect_mbox;
16542 		tp->write32_tx_mbox = tg3_write_indirect_mbox;
16543 		tp->write32_rx_mbox = tg3_write_indirect_mbox;
16544 
16545 		iounmap(tp->regs);
16546 		tp->regs = NULL;
16547 
16548 		pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
16549 		pci_cmd &= ~PCI_COMMAND_MEMORY;
16550 		pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
16551 	}
16552 	if (tg3_asic_rev(tp) == ASIC_REV_5906) {
16553 		tp->read32_mbox = tg3_read32_mbox_5906;
16554 		tp->write32_mbox = tg3_write32_mbox_5906;
16555 		tp->write32_tx_mbox = tg3_write32_mbox_5906;
16556 		tp->write32_rx_mbox = tg3_write32_mbox_5906;
16557 	}
16558 
16559 	if (tp->write32 == tg3_write_indirect_reg32 ||
16560 	    (tg3_flag(tp, PCIX_MODE) &&
16561 	     (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16562 	      tg3_asic_rev(tp) == ASIC_REV_5701)))
16563 		tg3_flag_set(tp, SRAM_USE_CONFIG);
16564 
16565 	/* The memory arbiter has to be enabled in order for SRAM accesses
16566 	 * to succeed.  Normally on powerup the tg3 chip firmware will make
16567 	 * sure it is enabled, but other entities such as system netboot
16568 	 * code might disable it.
16569 	 */
16570 	val = tr32(MEMARB_MODE);
16571 	tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
16572 
16573 	tp->pci_fn = PCI_FUNC(tp->pdev->devfn) & 3;
16574 	if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
16575 	    tg3_flag(tp, 5780_CLASS)) {
16576 		if (tg3_flag(tp, PCIX_MODE)) {
16577 			pci_read_config_dword(tp->pdev,
16578 					      tp->pcix_cap + PCI_X_STATUS,
16579 					      &val);
16580 			tp->pci_fn = val & 0x7;
16581 		}
16582 	} else if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16583 		   tg3_asic_rev(tp) == ASIC_REV_5719 ||
16584 		   tg3_asic_rev(tp) == ASIC_REV_5720) {
16585 		tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val);
16586 		if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) != NIC_SRAM_CPMUSTAT_SIG)
16587 			val = tr32(TG3_CPMU_STATUS);
16588 
16589 		if (tg3_asic_rev(tp) == ASIC_REV_5717)
16590 			tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5717) ? 1 : 0;
16591 		else
16592 			tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5719) >>
16593 				     TG3_CPMU_STATUS_FSHFT_5719;
16594 	}
16595 
16596 	if (tg3_flag(tp, FLUSH_POSTED_WRITES)) {
16597 		tp->write32_tx_mbox = tg3_write_flush_reg32;
16598 		tp->write32_rx_mbox = tg3_write_flush_reg32;
16599 	}
16600 
16601 	/* Get eeprom hw config before calling tg3_set_power_state().
16602 	 * In particular, the TG3_FLAG_IS_NIC flag must be
16603 	 * determined before calling tg3_set_power_state() so that
16604 	 * we know whether or not to switch out of Vaux power.
16605 	 * When the flag is set, it means that GPIO1 is used for eeprom
16606 	 * write protect and also implies that it is a LOM where GPIOs
16607 	 * are not used to switch power.
16608 	 */
16609 	tg3_get_eeprom_hw_cfg(tp);
16610 
16611 	if (tg3_flag(tp, FW_TSO) && tg3_flag(tp, ENABLE_ASF)) {
16612 		tg3_flag_clear(tp, TSO_CAPABLE);
16613 		tg3_flag_clear(tp, TSO_BUG);
16614 		tp->fw_needed = NULL;
16615 	}
16616 
16617 	if (tg3_flag(tp, ENABLE_APE)) {
16618 		/* Allow reads and writes to the
16619 		 * APE register and memory space.
16620 		 */
16621 		pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
16622 				 PCISTATE_ALLOW_APE_SHMEM_WR |
16623 				 PCISTATE_ALLOW_APE_PSPACE_WR;
16624 		pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
16625 				       pci_state_reg);
16626 
16627 		tg3_ape_lock_init(tp);
16628 	}
16629 
16630 	/* Set up tp->grc_local_ctrl before calling
16631 	 * tg3_pwrsrc_switch_to_vmain().  GPIO1 driven high
16632 	 * will bring 5700's external PHY out of reset.
16633 	 * It is also used as eeprom write protect on LOMs.
16634 	 */
16635 	tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
16636 	if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16637 	    tg3_flag(tp, EEPROM_WRITE_PROT))
16638 		tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
16639 				       GRC_LCLCTRL_GPIO_OUTPUT1);
16640 	/* Unused GPIO3 must be driven as output on 5752 because there
16641 	 * are no pull-up resistors on unused GPIO pins.
16642 	 */
16643 	else if (tg3_asic_rev(tp) == ASIC_REV_5752)
16644 		tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
16645 
16646 	if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
16647 	    tg3_asic_rev(tp) == ASIC_REV_57780 ||
16648 	    tg3_flag(tp, 57765_CLASS))
16649 		tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
16650 
16651 	if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
16652 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
16653 		/* Turn off the debug UART. */
16654 		tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
16655 		if (tg3_flag(tp, IS_NIC))
16656 			/* Keep VMain power. */
16657 			tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
16658 					      GRC_LCLCTRL_GPIO_OUTPUT0;
16659 	}
16660 
16661 	if (tg3_asic_rev(tp) == ASIC_REV_5762)
16662 		tp->grc_local_ctrl |=
16663 			tr32(GRC_LOCAL_CTRL) & GRC_LCLCTRL_GPIO_UART_SEL;
16664 
16665 	/* Switch out of Vaux if it is a NIC */
16666 	tg3_pwrsrc_switch_to_vmain(tp);
16667 
16668 	/* Derive initial jumbo mode from MTU assigned in
16669 	 * ether_setup() via the alloc_etherdev() call
16670 	 */
16671 	if (tp->dev->mtu > ETH_DATA_LEN && !tg3_flag(tp, 5780_CLASS))
16672 		tg3_flag_set(tp, JUMBO_RING_ENABLE);
16673 
16674 	/* Determine WakeOnLan speed to use. */
16675 	if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16676 	    tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
16677 	    tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0 ||
16678 	    tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B2) {
16679 		tg3_flag_clear(tp, WOL_SPEED_100MB);
16680 	} else {
16681 		tg3_flag_set(tp, WOL_SPEED_100MB);
16682 	}
16683 
16684 	if (tg3_asic_rev(tp) == ASIC_REV_5906)
16685 		tp->phy_flags |= TG3_PHYFLG_IS_FET;
16686 
16687 	/* A few boards don't want Ethernet@WireSpeed phy feature */
16688 	if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16689 	    (tg3_asic_rev(tp) == ASIC_REV_5705 &&
16690 	     (tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) &&
16691 	     (tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A1)) ||
16692 	    (tp->phy_flags & TG3_PHYFLG_IS_FET) ||
16693 	    (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
16694 		tp->phy_flags |= TG3_PHYFLG_NO_ETH_WIRE_SPEED;
16695 
16696 	if (tg3_chip_rev(tp) == CHIPREV_5703_AX ||
16697 	    tg3_chip_rev(tp) == CHIPREV_5704_AX)
16698 		tp->phy_flags |= TG3_PHYFLG_ADC_BUG;
16699 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0)
16700 		tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG;
16701 
16702 	if (tg3_flag(tp, 5705_PLUS) &&
16703 	    !(tp->phy_flags & TG3_PHYFLG_IS_FET) &&
16704 	    tg3_asic_rev(tp) != ASIC_REV_5785 &&
16705 	    tg3_asic_rev(tp) != ASIC_REV_57780 &&
16706 	    !tg3_flag(tp, 57765_PLUS)) {
16707 		if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
16708 		    tg3_asic_rev(tp) == ASIC_REV_5787 ||
16709 		    tg3_asic_rev(tp) == ASIC_REV_5784 ||
16710 		    tg3_asic_rev(tp) == ASIC_REV_5761) {
16711 			if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
16712 			    tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
16713 				tp->phy_flags |= TG3_PHYFLG_JITTER_BUG;
16714 			if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
16715 				tp->phy_flags |= TG3_PHYFLG_ADJUST_TRIM;
16716 		} else
16717 			tp->phy_flags |= TG3_PHYFLG_BER_BUG;
16718 	}
16719 
16720 	if (tg3_asic_rev(tp) == ASIC_REV_5784 &&
16721 	    tg3_chip_rev(tp) != CHIPREV_5784_AX) {
16722 		tp->phy_otp = tg3_read_otp_phycfg(tp);
16723 		if (tp->phy_otp == 0)
16724 			tp->phy_otp = TG3_OTP_DEFAULT;
16725 	}
16726 
16727 	if (tg3_flag(tp, CPMU_PRESENT))
16728 		tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
16729 	else
16730 		tp->mi_mode = MAC_MI_MODE_BASE;
16731 
16732 	tp->coalesce_mode = 0;
16733 	if (tg3_chip_rev(tp) != CHIPREV_5700_AX &&
16734 	    tg3_chip_rev(tp) != CHIPREV_5700_BX)
16735 		tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
16736 
16737 	/* Set these bits to enable statistics workaround. */
16738 	if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16739 	    tg3_asic_rev(tp) == ASIC_REV_5762 ||
16740 	    tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
16741 	    tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0) {
16742 		tp->coalesce_mode |= HOSTCC_MODE_ATTN;
16743 		tp->grc_mode |= GRC_MODE_IRQ_ON_FLOW_ATTN;
16744 	}
16745 
16746 	if (tg3_asic_rev(tp) == ASIC_REV_5785 ||
16747 	    tg3_asic_rev(tp) == ASIC_REV_57780)
16748 		tg3_flag_set(tp, USE_PHYLIB);
16749 
16750 	err = tg3_mdio_init(tp);
16751 	if (err)
16752 		return err;
16753 
16754 	/* Initialize data/descriptor byte/word swapping. */
16755 	val = tr32(GRC_MODE);
16756 	if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
16757 	    tg3_asic_rev(tp) == ASIC_REV_5762)
16758 		val &= (GRC_MODE_BYTE_SWAP_B2HRX_DATA |
16759 			GRC_MODE_WORD_SWAP_B2HRX_DATA |
16760 			GRC_MODE_B2HRX_ENABLE |
16761 			GRC_MODE_HTX2B_ENABLE |
16762 			GRC_MODE_HOST_STACKUP);
16763 	else
16764 		val &= GRC_MODE_HOST_STACKUP;
16765 
16766 	tw32(GRC_MODE, val | tp->grc_mode);
16767 
16768 	tg3_switch_clocks(tp);
16769 
16770 	/* Clear this out for sanity. */
16771 	tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
16772 
16773 	/* Clear TG3PCI_REG_BASE_ADDR to prevent hangs. */
16774 	tw32(TG3PCI_REG_BASE_ADDR, 0);
16775 
16776 	pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
16777 			      &pci_state_reg);
16778 	if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
16779 	    !tg3_flag(tp, PCIX_TARGET_HWBUG)) {
16780 		if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
16781 		    tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0 ||
16782 		    tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B2 ||
16783 		    tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B5) {
16784 			void __iomem *sram_base;
16785 
16786 			/* Write some dummy words into the SRAM status block
16787 			 * area, see if it reads back correctly.  If the return
16788 			 * value is bad, force enable the PCIX workaround.
16789 			 */
16790 			sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
16791 
16792 			writel(0x00000000, sram_base);
16793 			writel(0x00000000, sram_base + 4);
16794 			writel(0xffffffff, sram_base + 4);
16795 			if (readl(sram_base) != 0x00000000)
16796 				tg3_flag_set(tp, PCIX_TARGET_HWBUG);
16797 		}
16798 	}
16799 
16800 	udelay(50);
16801 	tg3_nvram_init(tp);
16802 
16803 	/* If the device has an NVRAM, no need to load patch firmware */
16804 	if (tg3_asic_rev(tp) == ASIC_REV_57766 &&
16805 	    !tg3_flag(tp, NO_NVRAM))
16806 		tp->fw_needed = NULL;
16807 
16808 	grc_misc_cfg = tr32(GRC_MISC_CFG);
16809 	grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
16810 
16811 	if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
16812 	    (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
16813 	     grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
16814 		tg3_flag_set(tp, IS_5788);
16815 
16816 	if (!tg3_flag(tp, IS_5788) &&
16817 	    tg3_asic_rev(tp) != ASIC_REV_5700)
16818 		tg3_flag_set(tp, TAGGED_STATUS);
16819 	if (tg3_flag(tp, TAGGED_STATUS)) {
16820 		tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
16821 				      HOSTCC_MODE_CLRTICK_TXBD);
16822 
16823 		tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
16824 		pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
16825 				       tp->misc_host_ctrl);
16826 	}
16827 
16828 	/* Preserve the APE MAC_MODE bits */
16829 	if (tg3_flag(tp, ENABLE_APE))
16830 		tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
16831 	else
16832 		tp->mac_mode = 0;
16833 
16834 	if (tg3_10_100_only_device(tp, ent))
16835 		tp->phy_flags |= TG3_PHYFLG_10_100_ONLY;
16836 
16837 	err = tg3_phy_probe(tp);
16838 	if (err) {
16839 		dev_err(&tp->pdev->dev, "phy probe failed, err %d\n", err);
16840 		/* ... but do not return immediately ... */
16841 		tg3_mdio_fini(tp);
16842 	}
16843 
16844 	tg3_read_vpd(tp);
16845 	tg3_read_fw_ver(tp);
16846 
16847 	if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
16848 		tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
16849 	} else {
16850 		if (tg3_asic_rev(tp) == ASIC_REV_5700)
16851 			tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
16852 		else
16853 			tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
16854 	}
16855 
16856 	/* 5700 {AX,BX} chips have a broken status block link
16857 	 * change bit implementation, so we must use the
16858 	 * status register in those cases.
16859 	 */
16860 	if (tg3_asic_rev(tp) == ASIC_REV_5700)
16861 		tg3_flag_set(tp, USE_LINKCHG_REG);
16862 	else
16863 		tg3_flag_clear(tp, USE_LINKCHG_REG);
16864 
16865 	/* The led_ctrl is set during tg3_phy_probe, here we might
16866 	 * have to force the link status polling mechanism based
16867 	 * upon subsystem IDs.
16868 	 */
16869 	if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
16870 	    tg3_asic_rev(tp) == ASIC_REV_5701 &&
16871 	    !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
16872 		tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
16873 		tg3_flag_set(tp, USE_LINKCHG_REG);
16874 	}
16875 
16876 	/* For all SERDES we poll the MAC status register. */
16877 	if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
16878 		tg3_flag_set(tp, POLL_SERDES);
16879 	else
16880 		tg3_flag_clear(tp, POLL_SERDES);
16881 
16882 	if (tg3_flag(tp, ENABLE_APE) && tg3_flag(tp, ENABLE_ASF))
16883 		tg3_flag_set(tp, POLL_CPMU_LINK);
16884 
16885 	tp->rx_offset = NET_SKB_PAD + NET_IP_ALIGN;
16886 	tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;
16887 	if (tg3_asic_rev(tp) == ASIC_REV_5701 &&
16888 	    tg3_flag(tp, PCIX_MODE)) {
16889 		tp->rx_offset = NET_SKB_PAD;
16890 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
16891 		tp->rx_copy_thresh = ~(u16)0;
16892 #endif
16893 	}
16894 
16895 	tp->rx_std_ring_mask = TG3_RX_STD_RING_SIZE(tp) - 1;
16896 	tp->rx_jmb_ring_mask = TG3_RX_JMB_RING_SIZE(tp) - 1;
16897 	tp->rx_ret_ring_mask = tg3_rx_ret_ring_size(tp) - 1;
16898 
16899 	tp->rx_std_max_post = tp->rx_std_ring_mask + 1;
16900 
16901 	/* Increment the rx prod index on the rx std ring by at most
16902 	 * 8 for these chips to workaround hw errata.
16903 	 */
16904 	if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
16905 	    tg3_asic_rev(tp) == ASIC_REV_5752 ||
16906 	    tg3_asic_rev(tp) == ASIC_REV_5755)
16907 		tp->rx_std_max_post = 8;
16908 
16909 	if (tg3_flag(tp, ASPM_WORKAROUND))
16910 		tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
16911 				     PCIE_PWR_MGMT_L1_THRESH_MSK;
16912 
16913 	return err;
16914 }
16915 
16916 #ifdef CONFIG_SPARC
16917 static int tg3_get_macaddr_sparc(struct tg3 *tp)
16918 {
16919 	struct net_device *dev = tp->dev;
16920 	struct pci_dev *pdev = tp->pdev;
16921 	struct device_node *dp = pci_device_to_OF_node(pdev);
16922 	const unsigned char *addr;
16923 	int len;
16924 
16925 	addr = of_get_property(dp, "local-mac-address", &len);
16926 	if (addr && len == ETH_ALEN) {
16927 		memcpy(dev->dev_addr, addr, ETH_ALEN);
16928 		return 0;
16929 	}
16930 	return -ENODEV;
16931 }
16932 
16933 static int tg3_get_default_macaddr_sparc(struct tg3 *tp)
16934 {
16935 	struct net_device *dev = tp->dev;
16936 
16937 	memcpy(dev->dev_addr, idprom->id_ethaddr, ETH_ALEN);
16938 	return 0;
16939 }
16940 #endif
16941 
16942 static int tg3_get_device_address(struct tg3 *tp)
16943 {
16944 	struct net_device *dev = tp->dev;
16945 	u32 hi, lo, mac_offset;
16946 	int addr_ok = 0;
16947 	int err;
16948 
16949 #ifdef CONFIG_SPARC
16950 	if (!tg3_get_macaddr_sparc(tp))
16951 		return 0;
16952 #endif
16953 
16954 	if (tg3_flag(tp, IS_SSB_CORE)) {
16955 		err = ssb_gige_get_macaddr(tp->pdev, &dev->dev_addr[0]);
16956 		if (!err && is_valid_ether_addr(&dev->dev_addr[0]))
16957 			return 0;
16958 	}
16959 
16960 	mac_offset = 0x7c;
16961 	if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
16962 	    tg3_flag(tp, 5780_CLASS)) {
16963 		if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
16964 			mac_offset = 0xcc;
16965 		if (tg3_nvram_lock(tp))
16966 			tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
16967 		else
16968 			tg3_nvram_unlock(tp);
16969 	} else if (tg3_flag(tp, 5717_PLUS)) {
16970 		if (tp->pci_fn & 1)
16971 			mac_offset = 0xcc;
16972 		if (tp->pci_fn > 1)
16973 			mac_offset += 0x18c;
16974 	} else if (tg3_asic_rev(tp) == ASIC_REV_5906)
16975 		mac_offset = 0x10;
16976 
16977 	/* First try to get it from MAC address mailbox. */
16978 	tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
16979 	if ((hi >> 16) == 0x484b) {
16980 		dev->dev_addr[0] = (hi >>  8) & 0xff;
16981 		dev->dev_addr[1] = (hi >>  0) & 0xff;
16982 
16983 		tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
16984 		dev->dev_addr[2] = (lo >> 24) & 0xff;
16985 		dev->dev_addr[3] = (lo >> 16) & 0xff;
16986 		dev->dev_addr[4] = (lo >>  8) & 0xff;
16987 		dev->dev_addr[5] = (lo >>  0) & 0xff;
16988 
16989 		/* Some old bootcode may report a 0 MAC address in SRAM */
16990 		addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
16991 	}
16992 	if (!addr_ok) {
16993 		/* Next, try NVRAM. */
16994 		if (!tg3_flag(tp, NO_NVRAM) &&
16995 		    !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) &&
16996 		    !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) {
16997 			memcpy(&dev->dev_addr[0], ((char *)&hi) + 2, 2);
16998 			memcpy(&dev->dev_addr[2], (char *)&lo, sizeof(lo));
16999 		}
17000 		/* Finally just fetch it out of the MAC control regs. */
17001 		else {
17002 			hi = tr32(MAC_ADDR_0_HIGH);
17003 			lo = tr32(MAC_ADDR_0_LOW);
17004 
17005 			dev->dev_addr[5] = lo & 0xff;
17006 			dev->dev_addr[4] = (lo >> 8) & 0xff;
17007 			dev->dev_addr[3] = (lo >> 16) & 0xff;
17008 			dev->dev_addr[2] = (lo >> 24) & 0xff;
17009 			dev->dev_addr[1] = hi & 0xff;
17010 			dev->dev_addr[0] = (hi >> 8) & 0xff;
17011 		}
17012 	}
17013 
17014 	if (!is_valid_ether_addr(&dev->dev_addr[0])) {
17015 #ifdef CONFIG_SPARC
17016 		if (!tg3_get_default_macaddr_sparc(tp))
17017 			return 0;
17018 #endif
17019 		return -EINVAL;
17020 	}
17021 	return 0;
17022 }
17023 
17024 #define BOUNDARY_SINGLE_CACHELINE	1
17025 #define BOUNDARY_MULTI_CACHELINE	2
17026 
17027 static u32 tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
17028 {
17029 	int cacheline_size;
17030 	u8 byte;
17031 	int goal;
17032 
17033 	pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
17034 	if (byte == 0)
17035 		cacheline_size = 1024;
17036 	else
17037 		cacheline_size = (int) byte * 4;
17038 
17039 	/* On 5703 and later chips, the boundary bits have no
17040 	 * effect.
17041 	 */
17042 	if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
17043 	    tg3_asic_rev(tp) != ASIC_REV_5701 &&
17044 	    !tg3_flag(tp, PCI_EXPRESS))
17045 		goto out;
17046 
17047 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
17048 	goal = BOUNDARY_MULTI_CACHELINE;
17049 #else
17050 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
17051 	goal = BOUNDARY_SINGLE_CACHELINE;
17052 #else
17053 	goal = 0;
17054 #endif
17055 #endif
17056 
17057 	if (tg3_flag(tp, 57765_PLUS)) {
17058 		val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
17059 		goto out;
17060 	}
17061 
17062 	if (!goal)
17063 		goto out;
17064 
17065 	/* PCI controllers on most RISC systems tend to disconnect
17066 	 * when a device tries to burst across a cache-line boundary.
17067 	 * Therefore, letting tg3 do so just wastes PCI bandwidth.
17068 	 *
17069 	 * Unfortunately, for PCI-E there are only limited
17070 	 * write-side controls for this, and thus for reads
17071 	 * we will still get the disconnects.  We'll also waste
17072 	 * these PCI cycles for both read and write for chips
17073 	 * other than 5700 and 5701 which do not implement the
17074 	 * boundary bits.
17075 	 */
17076 	if (tg3_flag(tp, PCIX_MODE) && !tg3_flag(tp, PCI_EXPRESS)) {
17077 		switch (cacheline_size) {
17078 		case 16:
17079 		case 32:
17080 		case 64:
17081 		case 128:
17082 			if (goal == BOUNDARY_SINGLE_CACHELINE) {
17083 				val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
17084 					DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
17085 			} else {
17086 				val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
17087 					DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
17088 			}
17089 			break;
17090 
17091 		case 256:
17092 			val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
17093 				DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
17094 			break;
17095 
17096 		default:
17097 			val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
17098 				DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
17099 			break;
17100 		}
17101 	} else if (tg3_flag(tp, PCI_EXPRESS)) {
17102 		switch (cacheline_size) {
17103 		case 16:
17104 		case 32:
17105 		case 64:
17106 			if (goal == BOUNDARY_SINGLE_CACHELINE) {
17107 				val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
17108 				val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
17109 				break;
17110 			}
17111 			/* fallthrough */
17112 		case 128:
17113 		default:
17114 			val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
17115 			val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
17116 			break;
17117 		}
17118 	} else {
17119 		switch (cacheline_size) {
17120 		case 16:
17121 			if (goal == BOUNDARY_SINGLE_CACHELINE) {
17122 				val |= (DMA_RWCTRL_READ_BNDRY_16 |
17123 					DMA_RWCTRL_WRITE_BNDRY_16);
17124 				break;
17125 			}
17126 			/* fallthrough */
17127 		case 32:
17128 			if (goal == BOUNDARY_SINGLE_CACHELINE) {
17129 				val |= (DMA_RWCTRL_READ_BNDRY_32 |
17130 					DMA_RWCTRL_WRITE_BNDRY_32);
17131 				break;
17132 			}
17133 			/* fallthrough */
17134 		case 64:
17135 			if (goal == BOUNDARY_SINGLE_CACHELINE) {
17136 				val |= (DMA_RWCTRL_READ_BNDRY_64 |
17137 					DMA_RWCTRL_WRITE_BNDRY_64);
17138 				break;
17139 			}
17140 			/* fallthrough */
17141 		case 128:
17142 			if (goal == BOUNDARY_SINGLE_CACHELINE) {
17143 				val |= (DMA_RWCTRL_READ_BNDRY_128 |
17144 					DMA_RWCTRL_WRITE_BNDRY_128);
17145 				break;
17146 			}
17147 			/* fallthrough */
17148 		case 256:
17149 			val |= (DMA_RWCTRL_READ_BNDRY_256 |
17150 				DMA_RWCTRL_WRITE_BNDRY_256);
17151 			break;
17152 		case 512:
17153 			val |= (DMA_RWCTRL_READ_BNDRY_512 |
17154 				DMA_RWCTRL_WRITE_BNDRY_512);
17155 			break;
17156 		case 1024:
17157 		default:
17158 			val |= (DMA_RWCTRL_READ_BNDRY_1024 |
17159 				DMA_RWCTRL_WRITE_BNDRY_1024);
17160 			break;
17161 		}
17162 	}
17163 
17164 out:
17165 	return val;
17166 }
17167 
17168 static int tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma,
17169 			   int size, bool to_device)
17170 {
17171 	struct tg3_internal_buffer_desc test_desc;
17172 	u32 sram_dma_descs;
17173 	int i, ret;
17174 
17175 	sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
17176 
17177 	tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
17178 	tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
17179 	tw32(RDMAC_STATUS, 0);
17180 	tw32(WDMAC_STATUS, 0);
17181 
17182 	tw32(BUFMGR_MODE, 0);
17183 	tw32(FTQ_RESET, 0);
17184 
17185 	test_desc.addr_hi = ((u64) buf_dma) >> 32;
17186 	test_desc.addr_lo = buf_dma & 0xffffffff;
17187 	test_desc.nic_mbuf = 0x00002100;
17188 	test_desc.len = size;
17189 
17190 	/*
17191 	 * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
17192 	 * the *second* time the tg3 driver was getting loaded after an
17193 	 * initial scan.
17194 	 *
17195 	 * Broadcom tells me:
17196 	 *   ...the DMA engine is connected to the GRC block and a DMA
17197 	 *   reset may affect the GRC block in some unpredictable way...
17198 	 *   The behavior of resets to individual blocks has not been tested.
17199 	 *
17200 	 * Broadcom noted the GRC reset will also reset all sub-components.
17201 	 */
17202 	if (to_device) {
17203 		test_desc.cqid_sqid = (13 << 8) | 2;
17204 
17205 		tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
17206 		udelay(40);
17207 	} else {
17208 		test_desc.cqid_sqid = (16 << 8) | 7;
17209 
17210 		tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
17211 		udelay(40);
17212 	}
17213 	test_desc.flags = 0x00000005;
17214 
17215 	for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
17216 		u32 val;
17217 
17218 		val = *(((u32 *)&test_desc) + i);
17219 		pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
17220 				       sram_dma_descs + (i * sizeof(u32)));
17221 		pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
17222 	}
17223 	pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
17224 
17225 	if (to_device)
17226 		tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
17227 	else
17228 		tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
17229 
17230 	ret = -ENODEV;
17231 	for (i = 0; i < 40; i++) {
17232 		u32 val;
17233 
17234 		if (to_device)
17235 			val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
17236 		else
17237 			val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
17238 		if ((val & 0xffff) == sram_dma_descs) {
17239 			ret = 0;
17240 			break;
17241 		}
17242 
17243 		udelay(100);
17244 	}
17245 
17246 	return ret;
17247 }
17248 
17249 #define TEST_BUFFER_SIZE	0x2000
17250 
17251 static const struct pci_device_id tg3_dma_wait_state_chipsets[] = {
17252 	{ PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
17253 	{ },
17254 };
17255 
17256 static int tg3_test_dma(struct tg3 *tp)
17257 {
17258 	dma_addr_t buf_dma;
17259 	u32 *buf, saved_dma_rwctrl;
17260 	int ret = 0;
17261 
17262 	buf = dma_alloc_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE,
17263 				 &buf_dma, GFP_KERNEL);
17264 	if (!buf) {
17265 		ret = -ENOMEM;
17266 		goto out_nofree;
17267 	}
17268 
17269 	tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
17270 			  (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
17271 
17272 	tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
17273 
17274 	if (tg3_flag(tp, 57765_PLUS))
17275 		goto out;
17276 
17277 	if (tg3_flag(tp, PCI_EXPRESS)) {
17278 		/* DMA read watermark not used on PCIE */
17279 		tp->dma_rwctrl |= 0x00180000;
17280 	} else if (!tg3_flag(tp, PCIX_MODE)) {
17281 		if (tg3_asic_rev(tp) == ASIC_REV_5705 ||
17282 		    tg3_asic_rev(tp) == ASIC_REV_5750)
17283 			tp->dma_rwctrl |= 0x003f0000;
17284 		else
17285 			tp->dma_rwctrl |= 0x003f000f;
17286 	} else {
17287 		if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
17288 		    tg3_asic_rev(tp) == ASIC_REV_5704) {
17289 			u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
17290 			u32 read_water = 0x7;
17291 
17292 			/* If the 5704 is behind the EPB bridge, we can
17293 			 * do the less restrictive ONE_DMA workaround for
17294 			 * better performance.
17295 			 */
17296 			if (tg3_flag(tp, 40BIT_DMA_BUG) &&
17297 			    tg3_asic_rev(tp) == ASIC_REV_5704)
17298 				tp->dma_rwctrl |= 0x8000;
17299 			else if (ccval == 0x6 || ccval == 0x7)
17300 				tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
17301 
17302 			if (tg3_asic_rev(tp) == ASIC_REV_5703)
17303 				read_water = 4;
17304 			/* Set bit 23 to enable PCIX hw bug fix */
17305 			tp->dma_rwctrl |=
17306 				(read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
17307 				(0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
17308 				(1 << 23);
17309 		} else if (tg3_asic_rev(tp) == ASIC_REV_5780) {
17310 			/* 5780 always in PCIX mode */
17311 			tp->dma_rwctrl |= 0x00144000;
17312 		} else if (tg3_asic_rev(tp) == ASIC_REV_5714) {
17313 			/* 5714 always in PCIX mode */
17314 			tp->dma_rwctrl |= 0x00148000;
17315 		} else {
17316 			tp->dma_rwctrl |= 0x001b000f;
17317 		}
17318 	}
17319 	if (tg3_flag(tp, ONE_DMA_AT_ONCE))
17320 		tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
17321 
17322 	if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
17323 	    tg3_asic_rev(tp) == ASIC_REV_5704)
17324 		tp->dma_rwctrl &= 0xfffffff0;
17325 
17326 	if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
17327 	    tg3_asic_rev(tp) == ASIC_REV_5701) {
17328 		/* Remove this if it causes problems for some boards. */
17329 		tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
17330 
17331 		/* On 5700/5701 chips, we need to set this bit.
17332 		 * Otherwise the chip will issue cacheline transactions
17333 		 * to streamable DMA memory with not all the byte
17334 		 * enables turned on.  This is an error on several
17335 		 * RISC PCI controllers, in particular sparc64.
17336 		 *
17337 		 * On 5703/5704 chips, this bit has been reassigned
17338 		 * a different meaning.  In particular, it is used
17339 		 * on those chips to enable a PCI-X workaround.
17340 		 */
17341 		tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
17342 	}
17343 
17344 	tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17345 
17346 
17347 	if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
17348 	    tg3_asic_rev(tp) != ASIC_REV_5701)
17349 		goto out;
17350 
17351 	/* It is best to perform DMA test with maximum write burst size
17352 	 * to expose the 5700/5701 write DMA bug.
17353 	 */
17354 	saved_dma_rwctrl = tp->dma_rwctrl;
17355 	tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
17356 	tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17357 
17358 	while (1) {
17359 		u32 *p = buf, i;
17360 
17361 		for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
17362 			p[i] = i;
17363 
17364 		/* Send the buffer to the chip. */
17365 		ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, true);
17366 		if (ret) {
17367 			dev_err(&tp->pdev->dev,
17368 				"%s: Buffer write failed. err = %d\n",
17369 				__func__, ret);
17370 			break;
17371 		}
17372 
17373 		/* Now read it back. */
17374 		ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, false);
17375 		if (ret) {
17376 			dev_err(&tp->pdev->dev, "%s: Buffer read failed. "
17377 				"err = %d\n", __func__, ret);
17378 			break;
17379 		}
17380 
17381 		/* Verify it. */
17382 		for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
17383 			if (p[i] == i)
17384 				continue;
17385 
17386 			if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
17387 			    DMA_RWCTRL_WRITE_BNDRY_16) {
17388 				tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
17389 				tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
17390 				tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17391 				break;
17392 			} else {
17393 				dev_err(&tp->pdev->dev,
17394 					"%s: Buffer corrupted on read back! "
17395 					"(%d != %d)\n", __func__, p[i], i);
17396 				ret = -ENODEV;
17397 				goto out;
17398 			}
17399 		}
17400 
17401 		if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
17402 			/* Success. */
17403 			ret = 0;
17404 			break;
17405 		}
17406 	}
17407 	if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
17408 	    DMA_RWCTRL_WRITE_BNDRY_16) {
17409 		/* DMA test passed without adjusting DMA boundary,
17410 		 * now look for chipsets that are known to expose the
17411 		 * DMA bug without failing the test.
17412 		 */
17413 		if (pci_dev_present(tg3_dma_wait_state_chipsets)) {
17414 			tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
17415 			tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
17416 		} else {
17417 			/* Safe to use the calculated DMA boundary. */
17418 			tp->dma_rwctrl = saved_dma_rwctrl;
17419 		}
17420 
17421 		tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17422 	}
17423 
17424 out:
17425 	dma_free_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, buf, buf_dma);
17426 out_nofree:
17427 	return ret;
17428 }
17429 
17430 static void tg3_init_bufmgr_config(struct tg3 *tp)
17431 {
17432 	if (tg3_flag(tp, 57765_PLUS)) {
17433 		tp->bufmgr_config.mbuf_read_dma_low_water =
17434 			DEFAULT_MB_RDMA_LOW_WATER_5705;
17435 		tp->bufmgr_config.mbuf_mac_rx_low_water =
17436 			DEFAULT_MB_MACRX_LOW_WATER_57765;
17437 		tp->bufmgr_config.mbuf_high_water =
17438 			DEFAULT_MB_HIGH_WATER_57765;
17439 
17440 		tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
17441 			DEFAULT_MB_RDMA_LOW_WATER_5705;
17442 		tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
17443 			DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765;
17444 		tp->bufmgr_config.mbuf_high_water_jumbo =
17445 			DEFAULT_MB_HIGH_WATER_JUMBO_57765;
17446 	} else if (tg3_flag(tp, 5705_PLUS)) {
17447 		tp->bufmgr_config.mbuf_read_dma_low_water =
17448 			DEFAULT_MB_RDMA_LOW_WATER_5705;
17449 		tp->bufmgr_config.mbuf_mac_rx_low_water =
17450 			DEFAULT_MB_MACRX_LOW_WATER_5705;
17451 		tp->bufmgr_config.mbuf_high_water =
17452 			DEFAULT_MB_HIGH_WATER_5705;
17453 		if (tg3_asic_rev(tp) == ASIC_REV_5906) {
17454 			tp->bufmgr_config.mbuf_mac_rx_low_water =
17455 				DEFAULT_MB_MACRX_LOW_WATER_5906;
17456 			tp->bufmgr_config.mbuf_high_water =
17457 				DEFAULT_MB_HIGH_WATER_5906;
17458 		}
17459 
17460 		tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
17461 			DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
17462 		tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
17463 			DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
17464 		tp->bufmgr_config.mbuf_high_water_jumbo =
17465 			DEFAULT_MB_HIGH_WATER_JUMBO_5780;
17466 	} else {
17467 		tp->bufmgr_config.mbuf_read_dma_low_water =
17468 			DEFAULT_MB_RDMA_LOW_WATER;
17469 		tp->bufmgr_config.mbuf_mac_rx_low_water =
17470 			DEFAULT_MB_MACRX_LOW_WATER;
17471 		tp->bufmgr_config.mbuf_high_water =
17472 			DEFAULT_MB_HIGH_WATER;
17473 
17474 		tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
17475 			DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
17476 		tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
17477 			DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
17478 		tp->bufmgr_config.mbuf_high_water_jumbo =
17479 			DEFAULT_MB_HIGH_WATER_JUMBO;
17480 	}
17481 
17482 	tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
17483 	tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
17484 }
17485 
17486 static char *tg3_phy_string(struct tg3 *tp)
17487 {
17488 	switch (tp->phy_id & TG3_PHY_ID_MASK) {
17489 	case TG3_PHY_ID_BCM5400:	return "5400";
17490 	case TG3_PHY_ID_BCM5401:	return "5401";
17491 	case TG3_PHY_ID_BCM5411:	return "5411";
17492 	case TG3_PHY_ID_BCM5701:	return "5701";
17493 	case TG3_PHY_ID_BCM5703:	return "5703";
17494 	case TG3_PHY_ID_BCM5704:	return "5704";
17495 	case TG3_PHY_ID_BCM5705:	return "5705";
17496 	case TG3_PHY_ID_BCM5750:	return "5750";
17497 	case TG3_PHY_ID_BCM5752:	return "5752";
17498 	case TG3_PHY_ID_BCM5714:	return "5714";
17499 	case TG3_PHY_ID_BCM5780:	return "5780";
17500 	case TG3_PHY_ID_BCM5755:	return "5755";
17501 	case TG3_PHY_ID_BCM5787:	return "5787";
17502 	case TG3_PHY_ID_BCM5784:	return "5784";
17503 	case TG3_PHY_ID_BCM5756:	return "5722/5756";
17504 	case TG3_PHY_ID_BCM5906:	return "5906";
17505 	case TG3_PHY_ID_BCM5761:	return "5761";
17506 	case TG3_PHY_ID_BCM5718C:	return "5718C";
17507 	case TG3_PHY_ID_BCM5718S:	return "5718S";
17508 	case TG3_PHY_ID_BCM57765:	return "57765";
17509 	case TG3_PHY_ID_BCM5719C:	return "5719C";
17510 	case TG3_PHY_ID_BCM5720C:	return "5720C";
17511 	case TG3_PHY_ID_BCM5762:	return "5762C";
17512 	case TG3_PHY_ID_BCM8002:	return "8002/serdes";
17513 	case 0:			return "serdes";
17514 	default:		return "unknown";
17515 	}
17516 }
17517 
17518 static char *tg3_bus_string(struct tg3 *tp, char *str)
17519 {
17520 	if (tg3_flag(tp, PCI_EXPRESS)) {
17521 		strcpy(str, "PCI Express");
17522 		return str;
17523 	} else if (tg3_flag(tp, PCIX_MODE)) {
17524 		u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
17525 
17526 		strcpy(str, "PCIX:");
17527 
17528 		if ((clock_ctrl == 7) ||
17529 		    ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
17530 		     GRC_MISC_CFG_BOARD_ID_5704CIOBE))
17531 			strcat(str, "133MHz");
17532 		else if (clock_ctrl == 0)
17533 			strcat(str, "33MHz");
17534 		else if (clock_ctrl == 2)
17535 			strcat(str, "50MHz");
17536 		else if (clock_ctrl == 4)
17537 			strcat(str, "66MHz");
17538 		else if (clock_ctrl == 6)
17539 			strcat(str, "100MHz");
17540 	} else {
17541 		strcpy(str, "PCI:");
17542 		if (tg3_flag(tp, PCI_HIGH_SPEED))
17543 			strcat(str, "66MHz");
17544 		else
17545 			strcat(str, "33MHz");
17546 	}
17547 	if (tg3_flag(tp, PCI_32BIT))
17548 		strcat(str, ":32-bit");
17549 	else
17550 		strcat(str, ":64-bit");
17551 	return str;
17552 }
17553 
17554 static void tg3_init_coal(struct tg3 *tp)
17555 {
17556 	struct ethtool_coalesce *ec = &tp->coal;
17557 
17558 	memset(ec, 0, sizeof(*ec));
17559 	ec->cmd = ETHTOOL_GCOALESCE;
17560 	ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
17561 	ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
17562 	ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
17563 	ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
17564 	ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
17565 	ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
17566 	ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
17567 	ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
17568 	ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
17569 
17570 	if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
17571 				 HOSTCC_MODE_CLRTICK_TXBD)) {
17572 		ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
17573 		ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
17574 		ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
17575 		ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
17576 	}
17577 
17578 	if (tg3_flag(tp, 5705_PLUS)) {
17579 		ec->rx_coalesce_usecs_irq = 0;
17580 		ec->tx_coalesce_usecs_irq = 0;
17581 		ec->stats_block_coalesce_usecs = 0;
17582 	}
17583 }
17584 
17585 static int tg3_init_one(struct pci_dev *pdev,
17586 				  const struct pci_device_id *ent)
17587 {
17588 	struct net_device *dev;
17589 	struct tg3 *tp;
17590 	int i, err;
17591 	u32 sndmbx, rcvmbx, intmbx;
17592 	char str[40];
17593 	u64 dma_mask, persist_dma_mask;
17594 	netdev_features_t features = 0;
17595 
17596 	printk_once(KERN_INFO "%s\n", version);
17597 
17598 	err = pci_enable_device(pdev);
17599 	if (err) {
17600 		dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
17601 		return err;
17602 	}
17603 
17604 	err = pci_request_regions(pdev, DRV_MODULE_NAME);
17605 	if (err) {
17606 		dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
17607 		goto err_out_disable_pdev;
17608 	}
17609 
17610 	pci_set_master(pdev);
17611 
17612 	dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS);
17613 	if (!dev) {
17614 		err = -ENOMEM;
17615 		goto err_out_free_res;
17616 	}
17617 
17618 	SET_NETDEV_DEV(dev, &pdev->dev);
17619 
17620 	tp = netdev_priv(dev);
17621 	tp->pdev = pdev;
17622 	tp->dev = dev;
17623 	tp->rx_mode = TG3_DEF_RX_MODE;
17624 	tp->tx_mode = TG3_DEF_TX_MODE;
17625 	tp->irq_sync = 1;
17626 	tp->pcierr_recovery = false;
17627 
17628 	if (tg3_debug > 0)
17629 		tp->msg_enable = tg3_debug;
17630 	else
17631 		tp->msg_enable = TG3_DEF_MSG_ENABLE;
17632 
17633 	if (pdev_is_ssb_gige_core(pdev)) {
17634 		tg3_flag_set(tp, IS_SSB_CORE);
17635 		if (ssb_gige_must_flush_posted_writes(pdev))
17636 			tg3_flag_set(tp, FLUSH_POSTED_WRITES);
17637 		if (ssb_gige_one_dma_at_once(pdev))
17638 			tg3_flag_set(tp, ONE_DMA_AT_ONCE);
17639 		if (ssb_gige_have_roboswitch(pdev)) {
17640 			tg3_flag_set(tp, USE_PHYLIB);
17641 			tg3_flag_set(tp, ROBOSWITCH);
17642 		}
17643 		if (ssb_gige_is_rgmii(pdev))
17644 			tg3_flag_set(tp, RGMII_MODE);
17645 	}
17646 
17647 	/* The word/byte swap controls here control register access byte
17648 	 * swapping.  DMA data byte swapping is controlled in the GRC_MODE
17649 	 * setting below.
17650 	 */
17651 	tp->misc_host_ctrl =
17652 		MISC_HOST_CTRL_MASK_PCI_INT |
17653 		MISC_HOST_CTRL_WORD_SWAP |
17654 		MISC_HOST_CTRL_INDIR_ACCESS |
17655 		MISC_HOST_CTRL_PCISTATE_RW;
17656 
17657 	/* The NONFRM (non-frame) byte/word swap controls take effect
17658 	 * on descriptor entries, anything which isn't packet data.
17659 	 *
17660 	 * The StrongARM chips on the board (one for tx, one for rx)
17661 	 * are running in big-endian mode.
17662 	 */
17663 	tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
17664 			GRC_MODE_WSWAP_NONFRM_DATA);
17665 #ifdef __BIG_ENDIAN
17666 	tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
17667 #endif
17668 	spin_lock_init(&tp->lock);
17669 	spin_lock_init(&tp->indirect_lock);
17670 	INIT_WORK(&tp->reset_task, tg3_reset_task);
17671 
17672 	tp->regs = pci_ioremap_bar(pdev, BAR_0);
17673 	if (!tp->regs) {
17674 		dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
17675 		err = -ENOMEM;
17676 		goto err_out_free_dev;
17677 	}
17678 
17679 	if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
17680 	    tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761E ||
17681 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S ||
17682 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761SE ||
17683 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
17684 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
17685 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
17686 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
17687 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 ||
17688 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_57767 ||
17689 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_57764 ||
17690 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 ||
17691 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 ||
17692 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727 ||
17693 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_57787) {
17694 		tg3_flag_set(tp, ENABLE_APE);
17695 		tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
17696 		if (!tp->aperegs) {
17697 			dev_err(&pdev->dev,
17698 				"Cannot map APE registers, aborting\n");
17699 			err = -ENOMEM;
17700 			goto err_out_iounmap;
17701 		}
17702 	}
17703 
17704 	tp->rx_pending = TG3_DEF_RX_RING_PENDING;
17705 	tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
17706 
17707 	dev->ethtool_ops = &tg3_ethtool_ops;
17708 	dev->watchdog_timeo = TG3_TX_TIMEOUT;
17709 	dev->netdev_ops = &tg3_netdev_ops;
17710 	dev->irq = pdev->irq;
17711 
17712 	err = tg3_get_invariants(tp, ent);
17713 	if (err) {
17714 		dev_err(&pdev->dev,
17715 			"Problem fetching invariants of chip, aborting\n");
17716 		goto err_out_apeunmap;
17717 	}
17718 
17719 	/* The EPB bridge inside 5714, 5715, and 5780 and any
17720 	 * device behind the EPB cannot support DMA addresses > 40-bit.
17721 	 * On 64-bit systems with IOMMU, use 40-bit dma_mask.
17722 	 * On 64-bit systems without IOMMU, use 64-bit dma_mask and
17723 	 * do DMA address check in tg3_start_xmit().
17724 	 */
17725 	if (tg3_flag(tp, IS_5788))
17726 		persist_dma_mask = dma_mask = DMA_BIT_MASK(32);
17727 	else if (tg3_flag(tp, 40BIT_DMA_BUG)) {
17728 		persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
17729 #ifdef CONFIG_HIGHMEM
17730 		dma_mask = DMA_BIT_MASK(64);
17731 #endif
17732 	} else
17733 		persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
17734 
17735 	/* Configure DMA attributes. */
17736 	if (dma_mask > DMA_BIT_MASK(32)) {
17737 		err = pci_set_dma_mask(pdev, dma_mask);
17738 		if (!err) {
17739 			features |= NETIF_F_HIGHDMA;
17740 			err = pci_set_consistent_dma_mask(pdev,
17741 							  persist_dma_mask);
17742 			if (err < 0) {
17743 				dev_err(&pdev->dev, "Unable to obtain 64 bit "
17744 					"DMA for consistent allocations\n");
17745 				goto err_out_apeunmap;
17746 			}
17747 		}
17748 	}
17749 	if (err || dma_mask == DMA_BIT_MASK(32)) {
17750 		err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
17751 		if (err) {
17752 			dev_err(&pdev->dev,
17753 				"No usable DMA configuration, aborting\n");
17754 			goto err_out_apeunmap;
17755 		}
17756 	}
17757 
17758 	tg3_init_bufmgr_config(tp);
17759 
17760 	/* 5700 B0 chips do not support checksumming correctly due
17761 	 * to hardware bugs.
17762 	 */
17763 	if (tg3_chip_rev_id(tp) != CHIPREV_ID_5700_B0) {
17764 		features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
17765 
17766 		if (tg3_flag(tp, 5755_PLUS))
17767 			features |= NETIF_F_IPV6_CSUM;
17768 	}
17769 
17770 	/* TSO is on by default on chips that support hardware TSO.
17771 	 * Firmware TSO on older chips gives lower performance, so it
17772 	 * is off by default, but can be enabled using ethtool.
17773 	 */
17774 	if ((tg3_flag(tp, HW_TSO_1) ||
17775 	     tg3_flag(tp, HW_TSO_2) ||
17776 	     tg3_flag(tp, HW_TSO_3)) &&
17777 	    (features & NETIF_F_IP_CSUM))
17778 		features |= NETIF_F_TSO;
17779 	if (tg3_flag(tp, HW_TSO_2) || tg3_flag(tp, HW_TSO_3)) {
17780 		if (features & NETIF_F_IPV6_CSUM)
17781 			features |= NETIF_F_TSO6;
17782 		if (tg3_flag(tp, HW_TSO_3) ||
17783 		    tg3_asic_rev(tp) == ASIC_REV_5761 ||
17784 		    (tg3_asic_rev(tp) == ASIC_REV_5784 &&
17785 		     tg3_chip_rev(tp) != CHIPREV_5784_AX) ||
17786 		    tg3_asic_rev(tp) == ASIC_REV_5785 ||
17787 		    tg3_asic_rev(tp) == ASIC_REV_57780)
17788 			features |= NETIF_F_TSO_ECN;
17789 	}
17790 
17791 	dev->features |= features | NETIF_F_HW_VLAN_CTAG_TX |
17792 			 NETIF_F_HW_VLAN_CTAG_RX;
17793 	dev->vlan_features |= features;
17794 
17795 	/*
17796 	 * Add loopback capability only for a subset of devices that support
17797 	 * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY
17798 	 * loopback for the remaining devices.
17799 	 */
17800 	if (tg3_asic_rev(tp) != ASIC_REV_5780 &&
17801 	    !tg3_flag(tp, CPMU_PRESENT))
17802 		/* Add the loopback capability */
17803 		features |= NETIF_F_LOOPBACK;
17804 
17805 	dev->hw_features |= features;
17806 	dev->priv_flags |= IFF_UNICAST_FLT;
17807 
17808 	/* MTU range: 60 - 9000 or 1500, depending on hardware */
17809 	dev->min_mtu = TG3_MIN_MTU;
17810 	dev->max_mtu = TG3_MAX_MTU(tp);
17811 
17812 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 &&
17813 	    !tg3_flag(tp, TSO_CAPABLE) &&
17814 	    !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
17815 		tg3_flag_set(tp, MAX_RXPEND_64);
17816 		tp->rx_pending = 63;
17817 	}
17818 
17819 	err = tg3_get_device_address(tp);
17820 	if (err) {
17821 		dev_err(&pdev->dev,
17822 			"Could not obtain valid ethernet address, aborting\n");
17823 		goto err_out_apeunmap;
17824 	}
17825 
17826 	intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
17827 	rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
17828 	sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
17829 	for (i = 0; i < tp->irq_max; i++) {
17830 		struct tg3_napi *tnapi = &tp->napi[i];
17831 
17832 		tnapi->tp = tp;
17833 		tnapi->tx_pending = TG3_DEF_TX_RING_PENDING;
17834 
17835 		tnapi->int_mbox = intmbx;
17836 		if (i <= 4)
17837 			intmbx += 0x8;
17838 		else
17839 			intmbx += 0x4;
17840 
17841 		tnapi->consmbox = rcvmbx;
17842 		tnapi->prodmbox = sndmbx;
17843 
17844 		if (i)
17845 			tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1);
17846 		else
17847 			tnapi->coal_now = HOSTCC_MODE_NOW;
17848 
17849 		if (!tg3_flag(tp, SUPPORT_MSIX))
17850 			break;
17851 
17852 		/*
17853 		 * If we support MSIX, we'll be using RSS.  If we're using
17854 		 * RSS, the first vector only handles link interrupts and the
17855 		 * remaining vectors handle rx and tx interrupts.  Reuse the
17856 		 * mailbox values for the next iteration.  The values we setup
17857 		 * above are still useful for the single vectored mode.
17858 		 */
17859 		if (!i)
17860 			continue;
17861 
17862 		rcvmbx += 0x8;
17863 
17864 		if (sndmbx & 0x4)
17865 			sndmbx -= 0x4;
17866 		else
17867 			sndmbx += 0xc;
17868 	}
17869 
17870 	/*
17871 	 * Reset chip in case UNDI or EFI driver did not shutdown
17872 	 * DMA self test will enable WDMAC and we'll see (spurious)
17873 	 * pending DMA on the PCI bus at that point.
17874 	 */
17875 	if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
17876 	    (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
17877 		tg3_full_lock(tp, 0);
17878 		tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
17879 		tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
17880 		tg3_full_unlock(tp);
17881 	}
17882 
17883 	err = tg3_test_dma(tp);
17884 	if (err) {
17885 		dev_err(&pdev->dev, "DMA engine test failed, aborting\n");
17886 		goto err_out_apeunmap;
17887 	}
17888 
17889 	tg3_init_coal(tp);
17890 
17891 	pci_set_drvdata(pdev, dev);
17892 
17893 	if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
17894 	    tg3_asic_rev(tp) == ASIC_REV_5720 ||
17895 	    tg3_asic_rev(tp) == ASIC_REV_5762)
17896 		tg3_flag_set(tp, PTP_CAPABLE);
17897 
17898 	tg3_timer_init(tp);
17899 
17900 	tg3_carrier_off(tp);
17901 
17902 	err = register_netdev(dev);
17903 	if (err) {
17904 		dev_err(&pdev->dev, "Cannot register net device, aborting\n");
17905 		goto err_out_apeunmap;
17906 	}
17907 
17908 	if (tg3_flag(tp, PTP_CAPABLE)) {
17909 		tg3_ptp_init(tp);
17910 		tp->ptp_clock = ptp_clock_register(&tp->ptp_info,
17911 						   &tp->pdev->dev);
17912 		if (IS_ERR(tp->ptp_clock))
17913 			tp->ptp_clock = NULL;
17914 	}
17915 
17916 	netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
17917 		    tp->board_part_number,
17918 		    tg3_chip_rev_id(tp),
17919 		    tg3_bus_string(tp, str),
17920 		    dev->dev_addr);
17921 
17922 	if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)) {
17923 		char *ethtype;
17924 
17925 		if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
17926 			ethtype = "10/100Base-TX";
17927 		else if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
17928 			ethtype = "1000Base-SX";
17929 		else
17930 			ethtype = "10/100/1000Base-T";
17931 
17932 		netdev_info(dev, "attached PHY is %s (%s Ethernet) "
17933 			    "(WireSpeed[%d], EEE[%d])\n",
17934 			    tg3_phy_string(tp), ethtype,
17935 			    (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) == 0,
17936 			    (tp->phy_flags & TG3_PHYFLG_EEE_CAP) != 0);
17937 	}
17938 
17939 	netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
17940 		    (dev->features & NETIF_F_RXCSUM) != 0,
17941 		    tg3_flag(tp, USE_LINKCHG_REG) != 0,
17942 		    (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) != 0,
17943 		    tg3_flag(tp, ENABLE_ASF) != 0,
17944 		    tg3_flag(tp, TSO_CAPABLE) != 0);
17945 	netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
17946 		    tp->dma_rwctrl,
17947 		    pdev->dma_mask == DMA_BIT_MASK(32) ? 32 :
17948 		    ((u64)pdev->dma_mask) == DMA_BIT_MASK(40) ? 40 : 64);
17949 
17950 	pci_save_state(pdev);
17951 
17952 	return 0;
17953 
17954 err_out_apeunmap:
17955 	if (tp->aperegs) {
17956 		iounmap(tp->aperegs);
17957 		tp->aperegs = NULL;
17958 	}
17959 
17960 err_out_iounmap:
17961 	if (tp->regs) {
17962 		iounmap(tp->regs);
17963 		tp->regs = NULL;
17964 	}
17965 
17966 err_out_free_dev:
17967 	free_netdev(dev);
17968 
17969 err_out_free_res:
17970 	pci_release_regions(pdev);
17971 
17972 err_out_disable_pdev:
17973 	if (pci_is_enabled(pdev))
17974 		pci_disable_device(pdev);
17975 	return err;
17976 }
17977 
17978 static void tg3_remove_one(struct pci_dev *pdev)
17979 {
17980 	struct net_device *dev = pci_get_drvdata(pdev);
17981 
17982 	if (dev) {
17983 		struct tg3 *tp = netdev_priv(dev);
17984 
17985 		tg3_ptp_fini(tp);
17986 
17987 		release_firmware(tp->fw);
17988 
17989 		tg3_reset_task_cancel(tp);
17990 
17991 		if (tg3_flag(tp, USE_PHYLIB)) {
17992 			tg3_phy_fini(tp);
17993 			tg3_mdio_fini(tp);
17994 		}
17995 
17996 		unregister_netdev(dev);
17997 		if (tp->aperegs) {
17998 			iounmap(tp->aperegs);
17999 			tp->aperegs = NULL;
18000 		}
18001 		if (tp->regs) {
18002 			iounmap(tp->regs);
18003 			tp->regs = NULL;
18004 		}
18005 		free_netdev(dev);
18006 		pci_release_regions(pdev);
18007 		pci_disable_device(pdev);
18008 	}
18009 }
18010 
18011 #ifdef CONFIG_PM_SLEEP
18012 static int tg3_suspend(struct device *device)
18013 {
18014 	struct pci_dev *pdev = to_pci_dev(device);
18015 	struct net_device *dev = pci_get_drvdata(pdev);
18016 	struct tg3 *tp = netdev_priv(dev);
18017 	int err = 0;
18018 
18019 	rtnl_lock();
18020 
18021 	if (!netif_running(dev))
18022 		goto unlock;
18023 
18024 	tg3_reset_task_cancel(tp);
18025 	tg3_phy_stop(tp);
18026 	tg3_netif_stop(tp);
18027 
18028 	tg3_timer_stop(tp);
18029 
18030 	tg3_full_lock(tp, 1);
18031 	tg3_disable_ints(tp);
18032 	tg3_full_unlock(tp);
18033 
18034 	netif_device_detach(dev);
18035 
18036 	tg3_full_lock(tp, 0);
18037 	tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
18038 	tg3_flag_clear(tp, INIT_COMPLETE);
18039 	tg3_full_unlock(tp);
18040 
18041 	err = tg3_power_down_prepare(tp);
18042 	if (err) {
18043 		int err2;
18044 
18045 		tg3_full_lock(tp, 0);
18046 
18047 		tg3_flag_set(tp, INIT_COMPLETE);
18048 		err2 = tg3_restart_hw(tp, true);
18049 		if (err2)
18050 			goto out;
18051 
18052 		tg3_timer_start(tp);
18053 
18054 		netif_device_attach(dev);
18055 		tg3_netif_start(tp);
18056 
18057 out:
18058 		tg3_full_unlock(tp);
18059 
18060 		if (!err2)
18061 			tg3_phy_start(tp);
18062 	}
18063 
18064 unlock:
18065 	rtnl_unlock();
18066 	return err;
18067 }
18068 
18069 static int tg3_resume(struct device *device)
18070 {
18071 	struct pci_dev *pdev = to_pci_dev(device);
18072 	struct net_device *dev = pci_get_drvdata(pdev);
18073 	struct tg3 *tp = netdev_priv(dev);
18074 	int err = 0;
18075 
18076 	rtnl_lock();
18077 
18078 	if (!netif_running(dev))
18079 		goto unlock;
18080 
18081 	netif_device_attach(dev);
18082 
18083 	tg3_full_lock(tp, 0);
18084 
18085 	tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
18086 
18087 	tg3_flag_set(tp, INIT_COMPLETE);
18088 	err = tg3_restart_hw(tp,
18089 			     !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN));
18090 	if (err)
18091 		goto out;
18092 
18093 	tg3_timer_start(tp);
18094 
18095 	tg3_netif_start(tp);
18096 
18097 out:
18098 	tg3_full_unlock(tp);
18099 
18100 	if (!err)
18101 		tg3_phy_start(tp);
18102 
18103 unlock:
18104 	rtnl_unlock();
18105 	return err;
18106 }
18107 #endif /* CONFIG_PM_SLEEP */
18108 
18109 static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume);
18110 
18111 static void tg3_shutdown(struct pci_dev *pdev)
18112 {
18113 	struct net_device *dev = pci_get_drvdata(pdev);
18114 	struct tg3 *tp = netdev_priv(dev);
18115 
18116 	rtnl_lock();
18117 	netif_device_detach(dev);
18118 
18119 	if (netif_running(dev))
18120 		dev_close(dev);
18121 
18122 	if (system_state == SYSTEM_POWER_OFF)
18123 		tg3_power_down(tp);
18124 
18125 	rtnl_unlock();
18126 }
18127 
18128 /**
18129  * tg3_io_error_detected - called when PCI error is detected
18130  * @pdev: Pointer to PCI device
18131  * @state: The current pci connection state
18132  *
18133  * This function is called after a PCI bus error affecting
18134  * this device has been detected.
18135  */
18136 static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
18137 					      pci_channel_state_t state)
18138 {
18139 	struct net_device *netdev = pci_get_drvdata(pdev);
18140 	struct tg3 *tp = netdev_priv(netdev);
18141 	pci_ers_result_t err = PCI_ERS_RESULT_NEED_RESET;
18142 
18143 	netdev_info(netdev, "PCI I/O error detected\n");
18144 
18145 	rtnl_lock();
18146 
18147 	/* We probably don't have netdev yet */
18148 	if (!netdev || !netif_running(netdev))
18149 		goto done;
18150 
18151 	/* We needn't recover from permanent error */
18152 	if (state == pci_channel_io_frozen)
18153 		tp->pcierr_recovery = true;
18154 
18155 	tg3_phy_stop(tp);
18156 
18157 	tg3_netif_stop(tp);
18158 
18159 	tg3_timer_stop(tp);
18160 
18161 	/* Want to make sure that the reset task doesn't run */
18162 	tg3_reset_task_cancel(tp);
18163 
18164 	netif_device_detach(netdev);
18165 
18166 	/* Clean up software state, even if MMIO is blocked */
18167 	tg3_full_lock(tp, 0);
18168 	tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
18169 	tg3_full_unlock(tp);
18170 
18171 done:
18172 	if (state == pci_channel_io_perm_failure) {
18173 		if (netdev) {
18174 			tg3_napi_enable(tp);
18175 			dev_close(netdev);
18176 		}
18177 		err = PCI_ERS_RESULT_DISCONNECT;
18178 	} else {
18179 		pci_disable_device(pdev);
18180 	}
18181 
18182 	rtnl_unlock();
18183 
18184 	return err;
18185 }
18186 
18187 /**
18188  * tg3_io_slot_reset - called after the pci bus has been reset.
18189  * @pdev: Pointer to PCI device
18190  *
18191  * Restart the card from scratch, as if from a cold-boot.
18192  * At this point, the card has exprienced a hard reset,
18193  * followed by fixups by BIOS, and has its config space
18194  * set up identically to what it was at cold boot.
18195  */
18196 static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev)
18197 {
18198 	struct net_device *netdev = pci_get_drvdata(pdev);
18199 	struct tg3 *tp = netdev_priv(netdev);
18200 	pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
18201 	int err;
18202 
18203 	rtnl_lock();
18204 
18205 	if (pci_enable_device(pdev)) {
18206 		dev_err(&pdev->dev,
18207 			"Cannot re-enable PCI device after reset.\n");
18208 		goto done;
18209 	}
18210 
18211 	pci_set_master(pdev);
18212 	pci_restore_state(pdev);
18213 	pci_save_state(pdev);
18214 
18215 	if (!netdev || !netif_running(netdev)) {
18216 		rc = PCI_ERS_RESULT_RECOVERED;
18217 		goto done;
18218 	}
18219 
18220 	err = tg3_power_up(tp);
18221 	if (err)
18222 		goto done;
18223 
18224 	rc = PCI_ERS_RESULT_RECOVERED;
18225 
18226 done:
18227 	if (rc != PCI_ERS_RESULT_RECOVERED && netdev && netif_running(netdev)) {
18228 		tg3_napi_enable(tp);
18229 		dev_close(netdev);
18230 	}
18231 	rtnl_unlock();
18232 
18233 	return rc;
18234 }
18235 
18236 /**
18237  * tg3_io_resume - called when traffic can start flowing again.
18238  * @pdev: Pointer to PCI device
18239  *
18240  * This callback is called when the error recovery driver tells
18241  * us that its OK to resume normal operation.
18242  */
18243 static void tg3_io_resume(struct pci_dev *pdev)
18244 {
18245 	struct net_device *netdev = pci_get_drvdata(pdev);
18246 	struct tg3 *tp = netdev_priv(netdev);
18247 	int err;
18248 
18249 	rtnl_lock();
18250 
18251 	if (!netdev || !netif_running(netdev))
18252 		goto done;
18253 
18254 	tg3_full_lock(tp, 0);
18255 	tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
18256 	tg3_flag_set(tp, INIT_COMPLETE);
18257 	err = tg3_restart_hw(tp, true);
18258 	if (err) {
18259 		tg3_full_unlock(tp);
18260 		netdev_err(netdev, "Cannot restart hardware after reset.\n");
18261 		goto done;
18262 	}
18263 
18264 	netif_device_attach(netdev);
18265 
18266 	tg3_timer_start(tp);
18267 
18268 	tg3_netif_start(tp);
18269 
18270 	tg3_full_unlock(tp);
18271 
18272 	tg3_phy_start(tp);
18273 
18274 done:
18275 	tp->pcierr_recovery = false;
18276 	rtnl_unlock();
18277 }
18278 
18279 static const struct pci_error_handlers tg3_err_handler = {
18280 	.error_detected	= tg3_io_error_detected,
18281 	.slot_reset	= tg3_io_slot_reset,
18282 	.resume		= tg3_io_resume
18283 };
18284 
18285 static struct pci_driver tg3_driver = {
18286 	.name		= DRV_MODULE_NAME,
18287 	.id_table	= tg3_pci_tbl,
18288 	.probe		= tg3_init_one,
18289 	.remove		= tg3_remove_one,
18290 	.err_handler	= &tg3_err_handler,
18291 	.driver.pm	= &tg3_pm_ops,
18292 	.shutdown	= tg3_shutdown,
18293 };
18294 
18295 module_pci_driver(tg3_driver);
18296