1 /*
2  * tg3.c: Broadcom Tigon3 ethernet driver.
3  *
4  * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5  * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6  * Copyright (C) 2004 Sun Microsystems Inc.
7  * Copyright (C) 2005-2016 Broadcom Corporation.
8  * Copyright (C) 2016-2017 Broadcom Limited.
9  * Copyright (C) 2018 Broadcom. All Rights Reserved. The term "Broadcom"
10  * refers to Broadcom Inc. and/or its subsidiaries.
11  *
12  * Firmware is:
13  *	Derived from proprietary unpublished source code,
14  *	Copyright (C) 2000-2016 Broadcom Corporation.
15  *	Copyright (C) 2016-2017 Broadcom Ltd.
16  *	Copyright (C) 2018 Broadcom. All Rights Reserved. The term "Broadcom"
17  *	refers to Broadcom Inc. and/or its subsidiaries.
18  *
19  *	Permission is hereby granted for the distribution of this firmware
20  *	data in hexadecimal or equivalent format, provided this copyright
21  *	notice is accompanying it.
22  */
23 
24 
25 #include <linux/module.h>
26 #include <linux/moduleparam.h>
27 #include <linux/stringify.h>
28 #include <linux/kernel.h>
29 #include <linux/sched/signal.h>
30 #include <linux/types.h>
31 #include <linux/compiler.h>
32 #include <linux/slab.h>
33 #include <linux/delay.h>
34 #include <linux/in.h>
35 #include <linux/interrupt.h>
36 #include <linux/ioport.h>
37 #include <linux/pci.h>
38 #include <linux/netdevice.h>
39 #include <linux/etherdevice.h>
40 #include <linux/skbuff.h>
41 #include <linux/ethtool.h>
42 #include <linux/mdio.h>
43 #include <linux/mii.h>
44 #include <linux/phy.h>
45 #include <linux/brcmphy.h>
46 #include <linux/if.h>
47 #include <linux/if_vlan.h>
48 #include <linux/ip.h>
49 #include <linux/tcp.h>
50 #include <linux/workqueue.h>
51 #include <linux/prefetch.h>
52 #include <linux/dma-mapping.h>
53 #include <linux/firmware.h>
54 #include <linux/ssb/ssb_driver_gige.h>
55 #include <linux/hwmon.h>
56 #include <linux/hwmon-sysfs.h>
57 #include <linux/crc32poly.h>
58 
59 #include <net/checksum.h>
60 #include <net/ip.h>
61 
62 #include <linux/io.h>
63 #include <asm/byteorder.h>
64 #include <linux/uaccess.h>
65 
66 #include <uapi/linux/net_tstamp.h>
67 #include <linux/ptp_clock_kernel.h>
68 
69 #define BAR_0	0
70 #define BAR_2	2
71 
72 #include "tg3.h"
73 
74 /* Functions & macros to verify TG3_FLAGS types */
75 
76 static inline int _tg3_flag(enum TG3_FLAGS flag, unsigned long *bits)
77 {
78 	return test_bit(flag, bits);
79 }
80 
81 static inline void _tg3_flag_set(enum TG3_FLAGS flag, unsigned long *bits)
82 {
83 	set_bit(flag, bits);
84 }
85 
86 static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
87 {
88 	clear_bit(flag, bits);
89 }
90 
91 #define tg3_flag(tp, flag)				\
92 	_tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags)
93 #define tg3_flag_set(tp, flag)				\
94 	_tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags)
95 #define tg3_flag_clear(tp, flag)			\
96 	_tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags)
97 
98 #define DRV_MODULE_NAME		"tg3"
99 #define TG3_MAJ_NUM			3
100 #define TG3_MIN_NUM			137
101 #define DRV_MODULE_VERSION	\
102 	__stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
103 #define DRV_MODULE_RELDATE	"May 11, 2014"
104 
105 #define RESET_KIND_SHUTDOWN	0
106 #define RESET_KIND_INIT		1
107 #define RESET_KIND_SUSPEND	2
108 
109 #define TG3_DEF_RX_MODE		0
110 #define TG3_DEF_TX_MODE		0
111 #define TG3_DEF_MSG_ENABLE	  \
112 	(NETIF_MSG_DRV		| \
113 	 NETIF_MSG_PROBE	| \
114 	 NETIF_MSG_LINK		| \
115 	 NETIF_MSG_TIMER	| \
116 	 NETIF_MSG_IFDOWN	| \
117 	 NETIF_MSG_IFUP		| \
118 	 NETIF_MSG_RX_ERR	| \
119 	 NETIF_MSG_TX_ERR)
120 
121 #define TG3_GRC_LCLCTL_PWRSW_DELAY	100
122 
123 /* length of time before we decide the hardware is borked,
124  * and dev->tx_timeout() should be called to fix the problem
125  */
126 
127 #define TG3_TX_TIMEOUT			(5 * HZ)
128 
129 /* hardware minimum and maximum for a single frame's data payload */
130 #define TG3_MIN_MTU			ETH_ZLEN
131 #define TG3_MAX_MTU(tp)	\
132 	(tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500)
133 
134 /* These numbers seem to be hard coded in the NIC firmware somehow.
135  * You can't change the ring sizes, but you can change where you place
136  * them in the NIC onboard memory.
137  */
138 #define TG3_RX_STD_RING_SIZE(tp) \
139 	(tg3_flag(tp, LRG_PROD_RING_CAP) ? \
140 	 TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700)
141 #define TG3_DEF_RX_RING_PENDING		200
142 #define TG3_RX_JMB_RING_SIZE(tp) \
143 	(tg3_flag(tp, LRG_PROD_RING_CAP) ? \
144 	 TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
145 #define TG3_DEF_RX_JUMBO_RING_PENDING	100
146 
147 /* Do not place this n-ring entries value into the tp struct itself,
148  * we really want to expose these constants to GCC so that modulo et
149  * al.  operations are done with shifts and masks instead of with
150  * hw multiply/modulo instructions.  Another solution would be to
151  * replace things like '% foo' with '& (foo - 1)'.
152  */
153 
154 #define TG3_TX_RING_SIZE		512
155 #define TG3_DEF_TX_RING_PENDING		(TG3_TX_RING_SIZE - 1)
156 
157 #define TG3_RX_STD_RING_BYTES(tp) \
158 	(sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
159 #define TG3_RX_JMB_RING_BYTES(tp) \
160 	(sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
161 #define TG3_RX_RCB_RING_BYTES(tp) \
162 	(sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1))
163 #define TG3_TX_RING_BYTES	(sizeof(struct tg3_tx_buffer_desc) * \
164 				 TG3_TX_RING_SIZE)
165 #define NEXT_TX(N)		(((N) + 1) & (TG3_TX_RING_SIZE - 1))
166 
167 #define TG3_DMA_BYTE_ENAB		64
168 
169 #define TG3_RX_STD_DMA_SZ		1536
170 #define TG3_RX_JMB_DMA_SZ		9046
171 
172 #define TG3_RX_DMA_TO_MAP_SZ(x)		((x) + TG3_DMA_BYTE_ENAB)
173 
174 #define TG3_RX_STD_MAP_SZ		TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
175 #define TG3_RX_JMB_MAP_SZ		TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
176 
177 #define TG3_RX_STD_BUFF_RING_SIZE(tp) \
178 	(sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
179 
180 #define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
181 	(sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
182 
183 /* Due to a hardware bug, the 5701 can only DMA to memory addresses
184  * that are at least dword aligned when used in PCIX mode.  The driver
185  * works around this bug by double copying the packet.  This workaround
186  * is built into the normal double copy length check for efficiency.
187  *
188  * However, the double copy is only necessary on those architectures
189  * where unaligned memory accesses are inefficient.  For those architectures
190  * where unaligned memory accesses incur little penalty, we can reintegrate
191  * the 5701 in the normal rx path.  Doing so saves a device structure
192  * dereference by hardcoding the double copy threshold in place.
193  */
194 #define TG3_RX_COPY_THRESHOLD		256
195 #if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
196 	#define TG3_RX_COPY_THRESH(tp)	TG3_RX_COPY_THRESHOLD
197 #else
198 	#define TG3_RX_COPY_THRESH(tp)	((tp)->rx_copy_thresh)
199 #endif
200 
201 #if (NET_IP_ALIGN != 0)
202 #define TG3_RX_OFFSET(tp)	((tp)->rx_offset)
203 #else
204 #define TG3_RX_OFFSET(tp)	(NET_SKB_PAD)
205 #endif
206 
207 /* minimum number of free TX descriptors required to wake up TX process */
208 #define TG3_TX_WAKEUP_THRESH(tnapi)		((tnapi)->tx_pending / 4)
209 #define TG3_TX_BD_DMA_MAX_2K		2048
210 #define TG3_TX_BD_DMA_MAX_4K		4096
211 
212 #define TG3_RAW_IP_ALIGN 2
213 
214 #define TG3_MAX_UCAST_ADDR(tp) (tg3_flag((tp), ENABLE_ASF) ? 2 : 3)
215 #define TG3_UCAST_ADDR_IDX(tp) (tg3_flag((tp), ENABLE_ASF) ? 2 : 1)
216 
217 #define TG3_FW_UPDATE_TIMEOUT_SEC	5
218 #define TG3_FW_UPDATE_FREQ_SEC		(TG3_FW_UPDATE_TIMEOUT_SEC / 2)
219 
220 #define FIRMWARE_TG3		"tigon/tg3.bin"
221 #define FIRMWARE_TG357766	"tigon/tg357766.bin"
222 #define FIRMWARE_TG3TSO		"tigon/tg3_tso.bin"
223 #define FIRMWARE_TG3TSO5	"tigon/tg3_tso5.bin"
224 
225 static char version[] =
226 	DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")";
227 
228 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
229 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
230 MODULE_LICENSE("GPL");
231 MODULE_VERSION(DRV_MODULE_VERSION);
232 MODULE_FIRMWARE(FIRMWARE_TG3);
233 MODULE_FIRMWARE(FIRMWARE_TG3TSO);
234 MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
235 
236 static int tg3_debug = -1;	/* -1 == use TG3_DEF_MSG_ENABLE as value */
237 module_param(tg3_debug, int, 0);
238 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
239 
240 #define TG3_DRV_DATA_FLAG_10_100_ONLY	0x0001
241 #define TG3_DRV_DATA_FLAG_5705_10_100	0x0002
242 
243 static const struct pci_device_id tg3_pci_tbl[] = {
244 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
245 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
246 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
247 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
248 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
249 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
250 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
251 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
252 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
253 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
254 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
255 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
256 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
257 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
258 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
259 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
260 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
261 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
262 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901),
263 	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
264 			TG3_DRV_DATA_FLAG_5705_10_100},
265 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2),
266 	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
267 			TG3_DRV_DATA_FLAG_5705_10_100},
268 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
269 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F),
270 	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
271 			TG3_DRV_DATA_FLAG_5705_10_100},
272 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
273 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
274 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750)},
275 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
276 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
277 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F),
278 	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
279 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
280 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
281 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
282 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
283 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F),
284 	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
285 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
286 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
287 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
288 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
289 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
290 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
291 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
292 	{PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5787M,
293 			PCI_VENDOR_ID_LENOVO,
294 			TG3PCI_SUBDEVICE_ID_LENOVO_5787M),
295 	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
296 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
297 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F),
298 	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
299 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
300 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
301 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
302 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
303 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
304 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
305 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
306 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
307 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
308 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
309 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
310 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
311 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
312 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
313 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
314 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
315 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)},
316 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)},
317 	{PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
318 			PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_A),
319 	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
320 	{PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
321 			PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_B),
322 	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
323 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
324 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
325 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790),
326 	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
327 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
328 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
329 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717_C)},
330 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
331 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)},
332 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)},
333 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)},
334 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)},
335 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791),
336 	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
337 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795),
338 	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
339 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)},
340 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)},
341 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57762)},
342 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57766)},
343 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5762)},
344 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5725)},
345 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5727)},
346 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57764)},
347 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57767)},
348 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57787)},
349 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57782)},
350 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57786)},
351 	{PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
352 	{PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
353 	{PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
354 	{PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
355 	{PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
356 	{PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
357 	{PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
358 	{PCI_DEVICE(0x10cf, 0x11a2)}, /* Fujitsu 1000base-SX with BCM5703SKHB */
359 	{}
360 };
361 
362 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
363 
364 static const struct {
365 	const char string[ETH_GSTRING_LEN];
366 } ethtool_stats_keys[] = {
367 	{ "rx_octets" },
368 	{ "rx_fragments" },
369 	{ "rx_ucast_packets" },
370 	{ "rx_mcast_packets" },
371 	{ "rx_bcast_packets" },
372 	{ "rx_fcs_errors" },
373 	{ "rx_align_errors" },
374 	{ "rx_xon_pause_rcvd" },
375 	{ "rx_xoff_pause_rcvd" },
376 	{ "rx_mac_ctrl_rcvd" },
377 	{ "rx_xoff_entered" },
378 	{ "rx_frame_too_long_errors" },
379 	{ "rx_jabbers" },
380 	{ "rx_undersize_packets" },
381 	{ "rx_in_length_errors" },
382 	{ "rx_out_length_errors" },
383 	{ "rx_64_or_less_octet_packets" },
384 	{ "rx_65_to_127_octet_packets" },
385 	{ "rx_128_to_255_octet_packets" },
386 	{ "rx_256_to_511_octet_packets" },
387 	{ "rx_512_to_1023_octet_packets" },
388 	{ "rx_1024_to_1522_octet_packets" },
389 	{ "rx_1523_to_2047_octet_packets" },
390 	{ "rx_2048_to_4095_octet_packets" },
391 	{ "rx_4096_to_8191_octet_packets" },
392 	{ "rx_8192_to_9022_octet_packets" },
393 
394 	{ "tx_octets" },
395 	{ "tx_collisions" },
396 
397 	{ "tx_xon_sent" },
398 	{ "tx_xoff_sent" },
399 	{ "tx_flow_control" },
400 	{ "tx_mac_errors" },
401 	{ "tx_single_collisions" },
402 	{ "tx_mult_collisions" },
403 	{ "tx_deferred" },
404 	{ "tx_excessive_collisions" },
405 	{ "tx_late_collisions" },
406 	{ "tx_collide_2times" },
407 	{ "tx_collide_3times" },
408 	{ "tx_collide_4times" },
409 	{ "tx_collide_5times" },
410 	{ "tx_collide_6times" },
411 	{ "tx_collide_7times" },
412 	{ "tx_collide_8times" },
413 	{ "tx_collide_9times" },
414 	{ "tx_collide_10times" },
415 	{ "tx_collide_11times" },
416 	{ "tx_collide_12times" },
417 	{ "tx_collide_13times" },
418 	{ "tx_collide_14times" },
419 	{ "tx_collide_15times" },
420 	{ "tx_ucast_packets" },
421 	{ "tx_mcast_packets" },
422 	{ "tx_bcast_packets" },
423 	{ "tx_carrier_sense_errors" },
424 	{ "tx_discards" },
425 	{ "tx_errors" },
426 
427 	{ "dma_writeq_full" },
428 	{ "dma_write_prioq_full" },
429 	{ "rxbds_empty" },
430 	{ "rx_discards" },
431 	{ "rx_errors" },
432 	{ "rx_threshold_hit" },
433 
434 	{ "dma_readq_full" },
435 	{ "dma_read_prioq_full" },
436 	{ "tx_comp_queue_full" },
437 
438 	{ "ring_set_send_prod_index" },
439 	{ "ring_status_update" },
440 	{ "nic_irqs" },
441 	{ "nic_avoided_irqs" },
442 	{ "nic_tx_threshold_hit" },
443 
444 	{ "mbuf_lwm_thresh_hit" },
445 };
446 
447 #define TG3_NUM_STATS	ARRAY_SIZE(ethtool_stats_keys)
448 #define TG3_NVRAM_TEST		0
449 #define TG3_LINK_TEST		1
450 #define TG3_REGISTER_TEST	2
451 #define TG3_MEMORY_TEST		3
452 #define TG3_MAC_LOOPB_TEST	4
453 #define TG3_PHY_LOOPB_TEST	5
454 #define TG3_EXT_LOOPB_TEST	6
455 #define TG3_INTERRUPT_TEST	7
456 
457 
458 static const struct {
459 	const char string[ETH_GSTRING_LEN];
460 } ethtool_test_keys[] = {
461 	[TG3_NVRAM_TEST]	= { "nvram test        (online) " },
462 	[TG3_LINK_TEST]		= { "link test         (online) " },
463 	[TG3_REGISTER_TEST]	= { "register test     (offline)" },
464 	[TG3_MEMORY_TEST]	= { "memory test       (offline)" },
465 	[TG3_MAC_LOOPB_TEST]	= { "mac loopback test (offline)" },
466 	[TG3_PHY_LOOPB_TEST]	= { "phy loopback test (offline)" },
467 	[TG3_EXT_LOOPB_TEST]	= { "ext loopback test (offline)" },
468 	[TG3_INTERRUPT_TEST]	= { "interrupt test    (offline)" },
469 };
470 
471 #define TG3_NUM_TEST	ARRAY_SIZE(ethtool_test_keys)
472 
473 
474 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
475 {
476 	writel(val, tp->regs + off);
477 }
478 
479 static u32 tg3_read32(struct tg3 *tp, u32 off)
480 {
481 	return readl(tp->regs + off);
482 }
483 
484 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
485 {
486 	writel(val, tp->aperegs + off);
487 }
488 
489 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
490 {
491 	return readl(tp->aperegs + off);
492 }
493 
494 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
495 {
496 	unsigned long flags;
497 
498 	spin_lock_irqsave(&tp->indirect_lock, flags);
499 	pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
500 	pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
501 	spin_unlock_irqrestore(&tp->indirect_lock, flags);
502 }
503 
504 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
505 {
506 	writel(val, tp->regs + off);
507 	readl(tp->regs + off);
508 }
509 
510 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
511 {
512 	unsigned long flags;
513 	u32 val;
514 
515 	spin_lock_irqsave(&tp->indirect_lock, flags);
516 	pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
517 	pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
518 	spin_unlock_irqrestore(&tp->indirect_lock, flags);
519 	return val;
520 }
521 
522 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
523 {
524 	unsigned long flags;
525 
526 	if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
527 		pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
528 				       TG3_64BIT_REG_LOW, val);
529 		return;
530 	}
531 	if (off == TG3_RX_STD_PROD_IDX_REG) {
532 		pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
533 				       TG3_64BIT_REG_LOW, val);
534 		return;
535 	}
536 
537 	spin_lock_irqsave(&tp->indirect_lock, flags);
538 	pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
539 	pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
540 	spin_unlock_irqrestore(&tp->indirect_lock, flags);
541 
542 	/* In indirect mode when disabling interrupts, we also need
543 	 * to clear the interrupt bit in the GRC local ctrl register.
544 	 */
545 	if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
546 	    (val == 0x1)) {
547 		pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
548 				       tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
549 	}
550 }
551 
552 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
553 {
554 	unsigned long flags;
555 	u32 val;
556 
557 	spin_lock_irqsave(&tp->indirect_lock, flags);
558 	pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
559 	pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
560 	spin_unlock_irqrestore(&tp->indirect_lock, flags);
561 	return val;
562 }
563 
564 /* usec_wait specifies the wait time in usec when writing to certain registers
565  * where it is unsafe to read back the register without some delay.
566  * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
567  * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
568  */
569 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
570 {
571 	if (tg3_flag(tp, PCIX_TARGET_HWBUG) || tg3_flag(tp, ICH_WORKAROUND))
572 		/* Non-posted methods */
573 		tp->write32(tp, off, val);
574 	else {
575 		/* Posted method */
576 		tg3_write32(tp, off, val);
577 		if (usec_wait)
578 			udelay(usec_wait);
579 		tp->read32(tp, off);
580 	}
581 	/* Wait again after the read for the posted method to guarantee that
582 	 * the wait time is met.
583 	 */
584 	if (usec_wait)
585 		udelay(usec_wait);
586 }
587 
588 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
589 {
590 	tp->write32_mbox(tp, off, val);
591 	if (tg3_flag(tp, FLUSH_POSTED_WRITES) ||
592 	    (!tg3_flag(tp, MBOX_WRITE_REORDER) &&
593 	     !tg3_flag(tp, ICH_WORKAROUND)))
594 		tp->read32_mbox(tp, off);
595 }
596 
597 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
598 {
599 	void __iomem *mbox = tp->regs + off;
600 	writel(val, mbox);
601 	if (tg3_flag(tp, TXD_MBOX_HWBUG))
602 		writel(val, mbox);
603 	if (tg3_flag(tp, MBOX_WRITE_REORDER) ||
604 	    tg3_flag(tp, FLUSH_POSTED_WRITES))
605 		readl(mbox);
606 }
607 
608 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
609 {
610 	return readl(tp->regs + off + GRCMBOX_BASE);
611 }
612 
613 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
614 {
615 	writel(val, tp->regs + off + GRCMBOX_BASE);
616 }
617 
618 #define tw32_mailbox(reg, val)		tp->write32_mbox(tp, reg, val)
619 #define tw32_mailbox_f(reg, val)	tw32_mailbox_flush(tp, (reg), (val))
620 #define tw32_rx_mbox(reg, val)		tp->write32_rx_mbox(tp, reg, val)
621 #define tw32_tx_mbox(reg, val)		tp->write32_tx_mbox(tp, reg, val)
622 #define tr32_mailbox(reg)		tp->read32_mbox(tp, reg)
623 
624 #define tw32(reg, val)			tp->write32(tp, reg, val)
625 #define tw32_f(reg, val)		_tw32_flush(tp, (reg), (val), 0)
626 #define tw32_wait_f(reg, val, us)	_tw32_flush(tp, (reg), (val), (us))
627 #define tr32(reg)			tp->read32(tp, reg)
628 
629 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
630 {
631 	unsigned long flags;
632 
633 	if (tg3_asic_rev(tp) == ASIC_REV_5906 &&
634 	    (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
635 		return;
636 
637 	spin_lock_irqsave(&tp->indirect_lock, flags);
638 	if (tg3_flag(tp, SRAM_USE_CONFIG)) {
639 		pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
640 		pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
641 
642 		/* Always leave this as zero. */
643 		pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
644 	} else {
645 		tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
646 		tw32_f(TG3PCI_MEM_WIN_DATA, val);
647 
648 		/* Always leave this as zero. */
649 		tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
650 	}
651 	spin_unlock_irqrestore(&tp->indirect_lock, flags);
652 }
653 
654 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
655 {
656 	unsigned long flags;
657 
658 	if (tg3_asic_rev(tp) == ASIC_REV_5906 &&
659 	    (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
660 		*val = 0;
661 		return;
662 	}
663 
664 	spin_lock_irqsave(&tp->indirect_lock, flags);
665 	if (tg3_flag(tp, SRAM_USE_CONFIG)) {
666 		pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
667 		pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
668 
669 		/* Always leave this as zero. */
670 		pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
671 	} else {
672 		tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
673 		*val = tr32(TG3PCI_MEM_WIN_DATA);
674 
675 		/* Always leave this as zero. */
676 		tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
677 	}
678 	spin_unlock_irqrestore(&tp->indirect_lock, flags);
679 }
680 
681 static void tg3_ape_lock_init(struct tg3 *tp)
682 {
683 	int i;
684 	u32 regbase, bit;
685 
686 	if (tg3_asic_rev(tp) == ASIC_REV_5761)
687 		regbase = TG3_APE_LOCK_GRANT;
688 	else
689 		regbase = TG3_APE_PER_LOCK_GRANT;
690 
691 	/* Make sure the driver hasn't any stale locks. */
692 	for (i = TG3_APE_LOCK_PHY0; i <= TG3_APE_LOCK_GPIO; i++) {
693 		switch (i) {
694 		case TG3_APE_LOCK_PHY0:
695 		case TG3_APE_LOCK_PHY1:
696 		case TG3_APE_LOCK_PHY2:
697 		case TG3_APE_LOCK_PHY3:
698 			bit = APE_LOCK_GRANT_DRIVER;
699 			break;
700 		default:
701 			if (!tp->pci_fn)
702 				bit = APE_LOCK_GRANT_DRIVER;
703 			else
704 				bit = 1 << tp->pci_fn;
705 		}
706 		tg3_ape_write32(tp, regbase + 4 * i, bit);
707 	}
708 
709 }
710 
711 static int tg3_ape_lock(struct tg3 *tp, int locknum)
712 {
713 	int i, off;
714 	int ret = 0;
715 	u32 status, req, gnt, bit;
716 
717 	if (!tg3_flag(tp, ENABLE_APE))
718 		return 0;
719 
720 	switch (locknum) {
721 	case TG3_APE_LOCK_GPIO:
722 		if (tg3_asic_rev(tp) == ASIC_REV_5761)
723 			return 0;
724 		/* fall through */
725 	case TG3_APE_LOCK_GRC:
726 	case TG3_APE_LOCK_MEM:
727 		if (!tp->pci_fn)
728 			bit = APE_LOCK_REQ_DRIVER;
729 		else
730 			bit = 1 << tp->pci_fn;
731 		break;
732 	case TG3_APE_LOCK_PHY0:
733 	case TG3_APE_LOCK_PHY1:
734 	case TG3_APE_LOCK_PHY2:
735 	case TG3_APE_LOCK_PHY3:
736 		bit = APE_LOCK_REQ_DRIVER;
737 		break;
738 	default:
739 		return -EINVAL;
740 	}
741 
742 	if (tg3_asic_rev(tp) == ASIC_REV_5761) {
743 		req = TG3_APE_LOCK_REQ;
744 		gnt = TG3_APE_LOCK_GRANT;
745 	} else {
746 		req = TG3_APE_PER_LOCK_REQ;
747 		gnt = TG3_APE_PER_LOCK_GRANT;
748 	}
749 
750 	off = 4 * locknum;
751 
752 	tg3_ape_write32(tp, req + off, bit);
753 
754 	/* Wait for up to 1 millisecond to acquire lock. */
755 	for (i = 0; i < 100; i++) {
756 		status = tg3_ape_read32(tp, gnt + off);
757 		if (status == bit)
758 			break;
759 		if (pci_channel_offline(tp->pdev))
760 			break;
761 
762 		udelay(10);
763 	}
764 
765 	if (status != bit) {
766 		/* Revoke the lock request. */
767 		tg3_ape_write32(tp, gnt + off, bit);
768 		ret = -EBUSY;
769 	}
770 
771 	return ret;
772 }
773 
774 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
775 {
776 	u32 gnt, bit;
777 
778 	if (!tg3_flag(tp, ENABLE_APE))
779 		return;
780 
781 	switch (locknum) {
782 	case TG3_APE_LOCK_GPIO:
783 		if (tg3_asic_rev(tp) == ASIC_REV_5761)
784 			return;
785 		/* fall through */
786 	case TG3_APE_LOCK_GRC:
787 	case TG3_APE_LOCK_MEM:
788 		if (!tp->pci_fn)
789 			bit = APE_LOCK_GRANT_DRIVER;
790 		else
791 			bit = 1 << tp->pci_fn;
792 		break;
793 	case TG3_APE_LOCK_PHY0:
794 	case TG3_APE_LOCK_PHY1:
795 	case TG3_APE_LOCK_PHY2:
796 	case TG3_APE_LOCK_PHY3:
797 		bit = APE_LOCK_GRANT_DRIVER;
798 		break;
799 	default:
800 		return;
801 	}
802 
803 	if (tg3_asic_rev(tp) == ASIC_REV_5761)
804 		gnt = TG3_APE_LOCK_GRANT;
805 	else
806 		gnt = TG3_APE_PER_LOCK_GRANT;
807 
808 	tg3_ape_write32(tp, gnt + 4 * locknum, bit);
809 }
810 
811 static int tg3_ape_event_lock(struct tg3 *tp, u32 timeout_us)
812 {
813 	u32 apedata;
814 
815 	while (timeout_us) {
816 		if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
817 			return -EBUSY;
818 
819 		apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
820 		if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
821 			break;
822 
823 		tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
824 
825 		udelay(10);
826 		timeout_us -= (timeout_us > 10) ? 10 : timeout_us;
827 	}
828 
829 	return timeout_us ? 0 : -EBUSY;
830 }
831 
832 #ifdef CONFIG_TIGON3_HWMON
833 static int tg3_ape_wait_for_event(struct tg3 *tp, u32 timeout_us)
834 {
835 	u32 i, apedata;
836 
837 	for (i = 0; i < timeout_us / 10; i++) {
838 		apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
839 
840 		if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
841 			break;
842 
843 		udelay(10);
844 	}
845 
846 	return i == timeout_us / 10;
847 }
848 
849 static int tg3_ape_scratchpad_read(struct tg3 *tp, u32 *data, u32 base_off,
850 				   u32 len)
851 {
852 	int err;
853 	u32 i, bufoff, msgoff, maxlen, apedata;
854 
855 	if (!tg3_flag(tp, APE_HAS_NCSI))
856 		return 0;
857 
858 	apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
859 	if (apedata != APE_SEG_SIG_MAGIC)
860 		return -ENODEV;
861 
862 	apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
863 	if (!(apedata & APE_FW_STATUS_READY))
864 		return -EAGAIN;
865 
866 	bufoff = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_OFF) +
867 		 TG3_APE_SHMEM_BASE;
868 	msgoff = bufoff + 2 * sizeof(u32);
869 	maxlen = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_LEN);
870 
871 	while (len) {
872 		u32 length;
873 
874 		/* Cap xfer sizes to scratchpad limits. */
875 		length = (len > maxlen) ? maxlen : len;
876 		len -= length;
877 
878 		apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
879 		if (!(apedata & APE_FW_STATUS_READY))
880 			return -EAGAIN;
881 
882 		/* Wait for up to 1 msec for APE to service previous event. */
883 		err = tg3_ape_event_lock(tp, 1000);
884 		if (err)
885 			return err;
886 
887 		apedata = APE_EVENT_STATUS_DRIVER_EVNT |
888 			  APE_EVENT_STATUS_SCRTCHPD_READ |
889 			  APE_EVENT_STATUS_EVENT_PENDING;
890 		tg3_ape_write32(tp, TG3_APE_EVENT_STATUS, apedata);
891 
892 		tg3_ape_write32(tp, bufoff, base_off);
893 		tg3_ape_write32(tp, bufoff + sizeof(u32), length);
894 
895 		tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
896 		tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
897 
898 		base_off += length;
899 
900 		if (tg3_ape_wait_for_event(tp, 30000))
901 			return -EAGAIN;
902 
903 		for (i = 0; length; i += 4, length -= 4) {
904 			u32 val = tg3_ape_read32(tp, msgoff + i);
905 			memcpy(data, &val, sizeof(u32));
906 			data++;
907 		}
908 	}
909 
910 	return 0;
911 }
912 #endif
913 
914 static int tg3_ape_send_event(struct tg3 *tp, u32 event)
915 {
916 	int err;
917 	u32 apedata;
918 
919 	apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
920 	if (apedata != APE_SEG_SIG_MAGIC)
921 		return -EAGAIN;
922 
923 	apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
924 	if (!(apedata & APE_FW_STATUS_READY))
925 		return -EAGAIN;
926 
927 	/* Wait for up to 20 millisecond for APE to service previous event. */
928 	err = tg3_ape_event_lock(tp, 20000);
929 	if (err)
930 		return err;
931 
932 	tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
933 			event | APE_EVENT_STATUS_EVENT_PENDING);
934 
935 	tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
936 	tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
937 
938 	return 0;
939 }
940 
941 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
942 {
943 	u32 event;
944 	u32 apedata;
945 
946 	if (!tg3_flag(tp, ENABLE_APE))
947 		return;
948 
949 	switch (kind) {
950 	case RESET_KIND_INIT:
951 		tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_COUNT, tp->ape_hb++);
952 		tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
953 				APE_HOST_SEG_SIG_MAGIC);
954 		tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
955 				APE_HOST_SEG_LEN_MAGIC);
956 		apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
957 		tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
958 		tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
959 			APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, TG3_MIN_NUM));
960 		tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
961 				APE_HOST_BEHAV_NO_PHYLOCK);
962 		tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE,
963 				    TG3_APE_HOST_DRVR_STATE_START);
964 
965 		event = APE_EVENT_STATUS_STATE_START;
966 		break;
967 	case RESET_KIND_SHUTDOWN:
968 		if (device_may_wakeup(&tp->pdev->dev) &&
969 		    tg3_flag(tp, WOL_ENABLE)) {
970 			tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED,
971 					    TG3_APE_HOST_WOL_SPEED_AUTO);
972 			apedata = TG3_APE_HOST_DRVR_STATE_WOL;
973 		} else
974 			apedata = TG3_APE_HOST_DRVR_STATE_UNLOAD;
975 
976 		tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, apedata);
977 
978 		event = APE_EVENT_STATUS_STATE_UNLOAD;
979 		break;
980 	default:
981 		return;
982 	}
983 
984 	event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
985 
986 	tg3_ape_send_event(tp, event);
987 }
988 
989 static void tg3_send_ape_heartbeat(struct tg3 *tp,
990 				   unsigned long interval)
991 {
992 	/* Check if hb interval has exceeded */
993 	if (!tg3_flag(tp, ENABLE_APE) ||
994 	    time_before(jiffies, tp->ape_hb_jiffies + interval))
995 		return;
996 
997 	tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_COUNT, tp->ape_hb++);
998 	tp->ape_hb_jiffies = jiffies;
999 }
1000 
1001 static void tg3_disable_ints(struct tg3 *tp)
1002 {
1003 	int i;
1004 
1005 	tw32(TG3PCI_MISC_HOST_CTRL,
1006 	     (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
1007 	for (i = 0; i < tp->irq_max; i++)
1008 		tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001);
1009 }
1010 
1011 static void tg3_enable_ints(struct tg3 *tp)
1012 {
1013 	int i;
1014 
1015 	tp->irq_sync = 0;
1016 	wmb();
1017 
1018 	tw32(TG3PCI_MISC_HOST_CTRL,
1019 	     (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
1020 
1021 	tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE;
1022 	for (i = 0; i < tp->irq_cnt; i++) {
1023 		struct tg3_napi *tnapi = &tp->napi[i];
1024 
1025 		tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
1026 		if (tg3_flag(tp, 1SHOT_MSI))
1027 			tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
1028 
1029 		tp->coal_now |= tnapi->coal_now;
1030 	}
1031 
1032 	/* Force an initial interrupt */
1033 	if (!tg3_flag(tp, TAGGED_STATUS) &&
1034 	    (tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
1035 		tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
1036 	else
1037 		tw32(HOSTCC_MODE, tp->coal_now);
1038 
1039 	tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now);
1040 }
1041 
1042 static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
1043 {
1044 	struct tg3 *tp = tnapi->tp;
1045 	struct tg3_hw_status *sblk = tnapi->hw_status;
1046 	unsigned int work_exists = 0;
1047 
1048 	/* check for phy events */
1049 	if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
1050 		if (sblk->status & SD_STATUS_LINK_CHG)
1051 			work_exists = 1;
1052 	}
1053 
1054 	/* check for TX work to do */
1055 	if (sblk->idx[0].tx_consumer != tnapi->tx_cons)
1056 		work_exists = 1;
1057 
1058 	/* check for RX work to do */
1059 	if (tnapi->rx_rcb_prod_idx &&
1060 	    *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
1061 		work_exists = 1;
1062 
1063 	return work_exists;
1064 }
1065 
1066 /* tg3_int_reenable
1067  *  similar to tg3_enable_ints, but it accurately determines whether there
1068  *  is new work pending and can return without flushing the PIO write
1069  *  which reenables interrupts
1070  */
1071 static void tg3_int_reenable(struct tg3_napi *tnapi)
1072 {
1073 	struct tg3 *tp = tnapi->tp;
1074 
1075 	tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
1076 
1077 	/* When doing tagged status, this work check is unnecessary.
1078 	 * The last_tag we write above tells the chip which piece of
1079 	 * work we've completed.
1080 	 */
1081 	if (!tg3_flag(tp, TAGGED_STATUS) && tg3_has_work(tnapi))
1082 		tw32(HOSTCC_MODE, tp->coalesce_mode |
1083 		     HOSTCC_MODE_ENABLE | tnapi->coal_now);
1084 }
1085 
1086 static void tg3_switch_clocks(struct tg3 *tp)
1087 {
1088 	u32 clock_ctrl;
1089 	u32 orig_clock_ctrl;
1090 
1091 	if (tg3_flag(tp, CPMU_PRESENT) || tg3_flag(tp, 5780_CLASS))
1092 		return;
1093 
1094 	clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
1095 
1096 	orig_clock_ctrl = clock_ctrl;
1097 	clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
1098 		       CLOCK_CTRL_CLKRUN_OENABLE |
1099 		       0x1f);
1100 	tp->pci_clock_ctrl = clock_ctrl;
1101 
1102 	if (tg3_flag(tp, 5705_PLUS)) {
1103 		if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
1104 			tw32_wait_f(TG3PCI_CLOCK_CTRL,
1105 				    clock_ctrl | CLOCK_CTRL_625_CORE, 40);
1106 		}
1107 	} else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
1108 		tw32_wait_f(TG3PCI_CLOCK_CTRL,
1109 			    clock_ctrl |
1110 			    (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
1111 			    40);
1112 		tw32_wait_f(TG3PCI_CLOCK_CTRL,
1113 			    clock_ctrl | (CLOCK_CTRL_ALTCLK),
1114 			    40);
1115 	}
1116 	tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
1117 }
1118 
1119 #define PHY_BUSY_LOOPS	5000
1120 
1121 static int __tg3_readphy(struct tg3 *tp, unsigned int phy_addr, int reg,
1122 			 u32 *val)
1123 {
1124 	u32 frame_val;
1125 	unsigned int loops;
1126 	int ret;
1127 
1128 	if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1129 		tw32_f(MAC_MI_MODE,
1130 		     (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1131 		udelay(80);
1132 	}
1133 
1134 	tg3_ape_lock(tp, tp->phy_ape_lock);
1135 
1136 	*val = 0x0;
1137 
1138 	frame_val  = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1139 		      MI_COM_PHY_ADDR_MASK);
1140 	frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1141 		      MI_COM_REG_ADDR_MASK);
1142 	frame_val |= (MI_COM_CMD_READ | MI_COM_START);
1143 
1144 	tw32_f(MAC_MI_COM, frame_val);
1145 
1146 	loops = PHY_BUSY_LOOPS;
1147 	while (loops != 0) {
1148 		udelay(10);
1149 		frame_val = tr32(MAC_MI_COM);
1150 
1151 		if ((frame_val & MI_COM_BUSY) == 0) {
1152 			udelay(5);
1153 			frame_val = tr32(MAC_MI_COM);
1154 			break;
1155 		}
1156 		loops -= 1;
1157 	}
1158 
1159 	ret = -EBUSY;
1160 	if (loops != 0) {
1161 		*val = frame_val & MI_COM_DATA_MASK;
1162 		ret = 0;
1163 	}
1164 
1165 	if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1166 		tw32_f(MAC_MI_MODE, tp->mi_mode);
1167 		udelay(80);
1168 	}
1169 
1170 	tg3_ape_unlock(tp, tp->phy_ape_lock);
1171 
1172 	return ret;
1173 }
1174 
1175 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
1176 {
1177 	return __tg3_readphy(tp, tp->phy_addr, reg, val);
1178 }
1179 
1180 static int __tg3_writephy(struct tg3 *tp, unsigned int phy_addr, int reg,
1181 			  u32 val)
1182 {
1183 	u32 frame_val;
1184 	unsigned int loops;
1185 	int ret;
1186 
1187 	if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
1188 	    (reg == MII_CTRL1000 || reg == MII_TG3_AUX_CTRL))
1189 		return 0;
1190 
1191 	if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1192 		tw32_f(MAC_MI_MODE,
1193 		     (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1194 		udelay(80);
1195 	}
1196 
1197 	tg3_ape_lock(tp, tp->phy_ape_lock);
1198 
1199 	frame_val  = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1200 		      MI_COM_PHY_ADDR_MASK);
1201 	frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1202 		      MI_COM_REG_ADDR_MASK);
1203 	frame_val |= (val & MI_COM_DATA_MASK);
1204 	frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
1205 
1206 	tw32_f(MAC_MI_COM, frame_val);
1207 
1208 	loops = PHY_BUSY_LOOPS;
1209 	while (loops != 0) {
1210 		udelay(10);
1211 		frame_val = tr32(MAC_MI_COM);
1212 		if ((frame_val & MI_COM_BUSY) == 0) {
1213 			udelay(5);
1214 			frame_val = tr32(MAC_MI_COM);
1215 			break;
1216 		}
1217 		loops -= 1;
1218 	}
1219 
1220 	ret = -EBUSY;
1221 	if (loops != 0)
1222 		ret = 0;
1223 
1224 	if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1225 		tw32_f(MAC_MI_MODE, tp->mi_mode);
1226 		udelay(80);
1227 	}
1228 
1229 	tg3_ape_unlock(tp, tp->phy_ape_lock);
1230 
1231 	return ret;
1232 }
1233 
1234 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
1235 {
1236 	return __tg3_writephy(tp, tp->phy_addr, reg, val);
1237 }
1238 
1239 static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val)
1240 {
1241 	int err;
1242 
1243 	err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1244 	if (err)
1245 		goto done;
1246 
1247 	err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1248 	if (err)
1249 		goto done;
1250 
1251 	err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1252 			   MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1253 	if (err)
1254 		goto done;
1255 
1256 	err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, val);
1257 
1258 done:
1259 	return err;
1260 }
1261 
1262 static int tg3_phy_cl45_read(struct tg3 *tp, u32 devad, u32 addr, u32 *val)
1263 {
1264 	int err;
1265 
1266 	err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1267 	if (err)
1268 		goto done;
1269 
1270 	err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1271 	if (err)
1272 		goto done;
1273 
1274 	err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1275 			   MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1276 	if (err)
1277 		goto done;
1278 
1279 	err = tg3_readphy(tp, MII_TG3_MMD_ADDRESS, val);
1280 
1281 done:
1282 	return err;
1283 }
1284 
1285 static int tg3_phydsp_read(struct tg3 *tp, u32 reg, u32 *val)
1286 {
1287 	int err;
1288 
1289 	err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1290 	if (!err)
1291 		err = tg3_readphy(tp, MII_TG3_DSP_RW_PORT, val);
1292 
1293 	return err;
1294 }
1295 
1296 static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
1297 {
1298 	int err;
1299 
1300 	err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1301 	if (!err)
1302 		err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
1303 
1304 	return err;
1305 }
1306 
1307 static int tg3_phy_auxctl_read(struct tg3 *tp, int reg, u32 *val)
1308 {
1309 	int err;
1310 
1311 	err = tg3_writephy(tp, MII_TG3_AUX_CTRL,
1312 			   (reg << MII_TG3_AUXCTL_MISC_RDSEL_SHIFT) |
1313 			   MII_TG3_AUXCTL_SHDWSEL_MISC);
1314 	if (!err)
1315 		err = tg3_readphy(tp, MII_TG3_AUX_CTRL, val);
1316 
1317 	return err;
1318 }
1319 
1320 static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set)
1321 {
1322 	if (reg == MII_TG3_AUXCTL_SHDWSEL_MISC)
1323 		set |= MII_TG3_AUXCTL_MISC_WREN;
1324 
1325 	return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg);
1326 }
1327 
1328 static int tg3_phy_toggle_auxctl_smdsp(struct tg3 *tp, bool enable)
1329 {
1330 	u32 val;
1331 	int err;
1332 
1333 	err = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
1334 
1335 	if (err)
1336 		return err;
1337 
1338 	if (enable)
1339 		val |= MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
1340 	else
1341 		val &= ~MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
1342 
1343 	err = tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
1344 				   val | MII_TG3_AUXCTL_ACTL_TX_6DB);
1345 
1346 	return err;
1347 }
1348 
1349 static int tg3_phy_shdw_write(struct tg3 *tp, int reg, u32 val)
1350 {
1351 	return tg3_writephy(tp, MII_TG3_MISC_SHDW,
1352 			    reg | val | MII_TG3_MISC_SHDW_WREN);
1353 }
1354 
1355 static int tg3_bmcr_reset(struct tg3 *tp)
1356 {
1357 	u32 phy_control;
1358 	int limit, err;
1359 
1360 	/* OK, reset it, and poll the BMCR_RESET bit until it
1361 	 * clears or we time out.
1362 	 */
1363 	phy_control = BMCR_RESET;
1364 	err = tg3_writephy(tp, MII_BMCR, phy_control);
1365 	if (err != 0)
1366 		return -EBUSY;
1367 
1368 	limit = 5000;
1369 	while (limit--) {
1370 		err = tg3_readphy(tp, MII_BMCR, &phy_control);
1371 		if (err != 0)
1372 			return -EBUSY;
1373 
1374 		if ((phy_control & BMCR_RESET) == 0) {
1375 			udelay(40);
1376 			break;
1377 		}
1378 		udelay(10);
1379 	}
1380 	if (limit < 0)
1381 		return -EBUSY;
1382 
1383 	return 0;
1384 }
1385 
1386 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
1387 {
1388 	struct tg3 *tp = bp->priv;
1389 	u32 val;
1390 
1391 	spin_lock_bh(&tp->lock);
1392 
1393 	if (__tg3_readphy(tp, mii_id, reg, &val))
1394 		val = -EIO;
1395 
1396 	spin_unlock_bh(&tp->lock);
1397 
1398 	return val;
1399 }
1400 
1401 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
1402 {
1403 	struct tg3 *tp = bp->priv;
1404 	u32 ret = 0;
1405 
1406 	spin_lock_bh(&tp->lock);
1407 
1408 	if (__tg3_writephy(tp, mii_id, reg, val))
1409 		ret = -EIO;
1410 
1411 	spin_unlock_bh(&tp->lock);
1412 
1413 	return ret;
1414 }
1415 
1416 static void tg3_mdio_config_5785(struct tg3 *tp)
1417 {
1418 	u32 val;
1419 	struct phy_device *phydev;
1420 
1421 	phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
1422 	switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1423 	case PHY_ID_BCM50610:
1424 	case PHY_ID_BCM50610M:
1425 		val = MAC_PHYCFG2_50610_LED_MODES;
1426 		break;
1427 	case PHY_ID_BCMAC131:
1428 		val = MAC_PHYCFG2_AC131_LED_MODES;
1429 		break;
1430 	case PHY_ID_RTL8211C:
1431 		val = MAC_PHYCFG2_RTL8211C_LED_MODES;
1432 		break;
1433 	case PHY_ID_RTL8201E:
1434 		val = MAC_PHYCFG2_RTL8201E_LED_MODES;
1435 		break;
1436 	default:
1437 		return;
1438 	}
1439 
1440 	if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
1441 		tw32(MAC_PHYCFG2, val);
1442 
1443 		val = tr32(MAC_PHYCFG1);
1444 		val &= ~(MAC_PHYCFG1_RGMII_INT |
1445 			 MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK);
1446 		val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT;
1447 		tw32(MAC_PHYCFG1, val);
1448 
1449 		return;
1450 	}
1451 
1452 	if (!tg3_flag(tp, RGMII_INBAND_DISABLE))
1453 		val |= MAC_PHYCFG2_EMODE_MASK_MASK |
1454 		       MAC_PHYCFG2_FMODE_MASK_MASK |
1455 		       MAC_PHYCFG2_GMODE_MASK_MASK |
1456 		       MAC_PHYCFG2_ACT_MASK_MASK   |
1457 		       MAC_PHYCFG2_QUAL_MASK_MASK |
1458 		       MAC_PHYCFG2_INBAND_ENABLE;
1459 
1460 	tw32(MAC_PHYCFG2, val);
1461 
1462 	val = tr32(MAC_PHYCFG1);
1463 	val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
1464 		 MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
1465 	if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1466 		if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1467 			val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
1468 		if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1469 			val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
1470 	}
1471 	val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT |
1472 	       MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV;
1473 	tw32(MAC_PHYCFG1, val);
1474 
1475 	val = tr32(MAC_EXT_RGMII_MODE);
1476 	val &= ~(MAC_RGMII_MODE_RX_INT_B |
1477 		 MAC_RGMII_MODE_RX_QUALITY |
1478 		 MAC_RGMII_MODE_RX_ACTIVITY |
1479 		 MAC_RGMII_MODE_RX_ENG_DET |
1480 		 MAC_RGMII_MODE_TX_ENABLE |
1481 		 MAC_RGMII_MODE_TX_LOWPWR |
1482 		 MAC_RGMII_MODE_TX_RESET);
1483 	if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1484 		if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1485 			val |= MAC_RGMII_MODE_RX_INT_B |
1486 			       MAC_RGMII_MODE_RX_QUALITY |
1487 			       MAC_RGMII_MODE_RX_ACTIVITY |
1488 			       MAC_RGMII_MODE_RX_ENG_DET;
1489 		if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1490 			val |= MAC_RGMII_MODE_TX_ENABLE |
1491 			       MAC_RGMII_MODE_TX_LOWPWR |
1492 			       MAC_RGMII_MODE_TX_RESET;
1493 	}
1494 	tw32(MAC_EXT_RGMII_MODE, val);
1495 }
1496 
1497 static void tg3_mdio_start(struct tg3 *tp)
1498 {
1499 	tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
1500 	tw32_f(MAC_MI_MODE, tp->mi_mode);
1501 	udelay(80);
1502 
1503 	if (tg3_flag(tp, MDIOBUS_INITED) &&
1504 	    tg3_asic_rev(tp) == ASIC_REV_5785)
1505 		tg3_mdio_config_5785(tp);
1506 }
1507 
1508 static int tg3_mdio_init(struct tg3 *tp)
1509 {
1510 	int i;
1511 	u32 reg;
1512 	struct phy_device *phydev;
1513 
1514 	if (tg3_flag(tp, 5717_PLUS)) {
1515 		u32 is_serdes;
1516 
1517 		tp->phy_addr = tp->pci_fn + 1;
1518 
1519 		if (tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0)
1520 			is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
1521 		else
1522 			is_serdes = tr32(TG3_CPMU_PHY_STRAP) &
1523 				    TG3_CPMU_PHY_STRAP_IS_SERDES;
1524 		if (is_serdes)
1525 			tp->phy_addr += 7;
1526 	} else if (tg3_flag(tp, IS_SSB_CORE) && tg3_flag(tp, ROBOSWITCH)) {
1527 		int addr;
1528 
1529 		addr = ssb_gige_get_phyaddr(tp->pdev);
1530 		if (addr < 0)
1531 			return addr;
1532 		tp->phy_addr = addr;
1533 	} else
1534 		tp->phy_addr = TG3_PHY_MII_ADDR;
1535 
1536 	tg3_mdio_start(tp);
1537 
1538 	if (!tg3_flag(tp, USE_PHYLIB) || tg3_flag(tp, MDIOBUS_INITED))
1539 		return 0;
1540 
1541 	tp->mdio_bus = mdiobus_alloc();
1542 	if (tp->mdio_bus == NULL)
1543 		return -ENOMEM;
1544 
1545 	tp->mdio_bus->name     = "tg3 mdio bus";
1546 	snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
1547 		 (tp->pdev->bus->number << 8) | tp->pdev->devfn);
1548 	tp->mdio_bus->priv     = tp;
1549 	tp->mdio_bus->parent   = &tp->pdev->dev;
1550 	tp->mdio_bus->read     = &tg3_mdio_read;
1551 	tp->mdio_bus->write    = &tg3_mdio_write;
1552 	tp->mdio_bus->phy_mask = ~(1 << tp->phy_addr);
1553 
1554 	/* The bus registration will look for all the PHYs on the mdio bus.
1555 	 * Unfortunately, it does not ensure the PHY is powered up before
1556 	 * accessing the PHY ID registers.  A chip reset is the
1557 	 * quickest way to bring the device back to an operational state..
1558 	 */
1559 	if (tg3_readphy(tp, MII_BMCR, &reg) || (reg & BMCR_PDOWN))
1560 		tg3_bmcr_reset(tp);
1561 
1562 	i = mdiobus_register(tp->mdio_bus);
1563 	if (i) {
1564 		dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i);
1565 		mdiobus_free(tp->mdio_bus);
1566 		return i;
1567 	}
1568 
1569 	phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
1570 
1571 	if (!phydev || !phydev->drv) {
1572 		dev_warn(&tp->pdev->dev, "No PHY devices\n");
1573 		mdiobus_unregister(tp->mdio_bus);
1574 		mdiobus_free(tp->mdio_bus);
1575 		return -ENODEV;
1576 	}
1577 
1578 	switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1579 	case PHY_ID_BCM57780:
1580 		phydev->interface = PHY_INTERFACE_MODE_GMII;
1581 		phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1582 		break;
1583 	case PHY_ID_BCM50610:
1584 	case PHY_ID_BCM50610M:
1585 		phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE |
1586 				     PHY_BRCM_RX_REFCLK_UNUSED |
1587 				     PHY_BRCM_DIS_TXCRXC_NOENRGY |
1588 				     PHY_BRCM_AUTO_PWRDWN_ENABLE;
1589 		if (tg3_flag(tp, RGMII_INBAND_DISABLE))
1590 			phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
1591 		if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1592 			phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
1593 		if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1594 			phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
1595 		/* fall through */
1596 	case PHY_ID_RTL8211C:
1597 		phydev->interface = PHY_INTERFACE_MODE_RGMII;
1598 		break;
1599 	case PHY_ID_RTL8201E:
1600 	case PHY_ID_BCMAC131:
1601 		phydev->interface = PHY_INTERFACE_MODE_MII;
1602 		phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1603 		tp->phy_flags |= TG3_PHYFLG_IS_FET;
1604 		break;
1605 	}
1606 
1607 	tg3_flag_set(tp, MDIOBUS_INITED);
1608 
1609 	if (tg3_asic_rev(tp) == ASIC_REV_5785)
1610 		tg3_mdio_config_5785(tp);
1611 
1612 	return 0;
1613 }
1614 
1615 static void tg3_mdio_fini(struct tg3 *tp)
1616 {
1617 	if (tg3_flag(tp, MDIOBUS_INITED)) {
1618 		tg3_flag_clear(tp, MDIOBUS_INITED);
1619 		mdiobus_unregister(tp->mdio_bus);
1620 		mdiobus_free(tp->mdio_bus);
1621 	}
1622 }
1623 
1624 /* tp->lock is held. */
1625 static inline void tg3_generate_fw_event(struct tg3 *tp)
1626 {
1627 	u32 val;
1628 
1629 	val = tr32(GRC_RX_CPU_EVENT);
1630 	val |= GRC_RX_CPU_DRIVER_EVENT;
1631 	tw32_f(GRC_RX_CPU_EVENT, val);
1632 
1633 	tp->last_event_jiffies = jiffies;
1634 }
1635 
1636 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1637 
1638 /* tp->lock is held. */
1639 static void tg3_wait_for_event_ack(struct tg3 *tp)
1640 {
1641 	int i;
1642 	unsigned int delay_cnt;
1643 	long time_remain;
1644 
1645 	/* If enough time has passed, no wait is necessary. */
1646 	time_remain = (long)(tp->last_event_jiffies + 1 +
1647 		      usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1648 		      (long)jiffies;
1649 	if (time_remain < 0)
1650 		return;
1651 
1652 	/* Check if we can shorten the wait time. */
1653 	delay_cnt = jiffies_to_usecs(time_remain);
1654 	if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1655 		delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1656 	delay_cnt = (delay_cnt >> 3) + 1;
1657 
1658 	for (i = 0; i < delay_cnt; i++) {
1659 		if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1660 			break;
1661 		if (pci_channel_offline(tp->pdev))
1662 			break;
1663 
1664 		udelay(8);
1665 	}
1666 }
1667 
1668 /* tp->lock is held. */
1669 static void tg3_phy_gather_ump_data(struct tg3 *tp, u32 *data)
1670 {
1671 	u32 reg, val;
1672 
1673 	val = 0;
1674 	if (!tg3_readphy(tp, MII_BMCR, &reg))
1675 		val = reg << 16;
1676 	if (!tg3_readphy(tp, MII_BMSR, &reg))
1677 		val |= (reg & 0xffff);
1678 	*data++ = val;
1679 
1680 	val = 0;
1681 	if (!tg3_readphy(tp, MII_ADVERTISE, &reg))
1682 		val = reg << 16;
1683 	if (!tg3_readphy(tp, MII_LPA, &reg))
1684 		val |= (reg & 0xffff);
1685 	*data++ = val;
1686 
1687 	val = 0;
1688 	if (!(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) {
1689 		if (!tg3_readphy(tp, MII_CTRL1000, &reg))
1690 			val = reg << 16;
1691 		if (!tg3_readphy(tp, MII_STAT1000, &reg))
1692 			val |= (reg & 0xffff);
1693 	}
1694 	*data++ = val;
1695 
1696 	if (!tg3_readphy(tp, MII_PHYADDR, &reg))
1697 		val = reg << 16;
1698 	else
1699 		val = 0;
1700 	*data++ = val;
1701 }
1702 
1703 /* tp->lock is held. */
1704 static void tg3_ump_link_report(struct tg3 *tp)
1705 {
1706 	u32 data[4];
1707 
1708 	if (!tg3_flag(tp, 5780_CLASS) || !tg3_flag(tp, ENABLE_ASF))
1709 		return;
1710 
1711 	tg3_phy_gather_ump_data(tp, data);
1712 
1713 	tg3_wait_for_event_ack(tp);
1714 
1715 	tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1716 	tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1717 	tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x0, data[0]);
1718 	tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x4, data[1]);
1719 	tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x8, data[2]);
1720 	tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0xc, data[3]);
1721 
1722 	tg3_generate_fw_event(tp);
1723 }
1724 
1725 /* tp->lock is held. */
1726 static void tg3_stop_fw(struct tg3 *tp)
1727 {
1728 	if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
1729 		/* Wait for RX cpu to ACK the previous event. */
1730 		tg3_wait_for_event_ack(tp);
1731 
1732 		tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
1733 
1734 		tg3_generate_fw_event(tp);
1735 
1736 		/* Wait for RX cpu to ACK this event. */
1737 		tg3_wait_for_event_ack(tp);
1738 	}
1739 }
1740 
1741 /* tp->lock is held. */
1742 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
1743 {
1744 	tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
1745 		      NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
1746 
1747 	if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1748 		switch (kind) {
1749 		case RESET_KIND_INIT:
1750 			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1751 				      DRV_STATE_START);
1752 			break;
1753 
1754 		case RESET_KIND_SHUTDOWN:
1755 			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1756 				      DRV_STATE_UNLOAD);
1757 			break;
1758 
1759 		case RESET_KIND_SUSPEND:
1760 			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1761 				      DRV_STATE_SUSPEND);
1762 			break;
1763 
1764 		default:
1765 			break;
1766 		}
1767 	}
1768 }
1769 
1770 /* tp->lock is held. */
1771 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
1772 {
1773 	if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1774 		switch (kind) {
1775 		case RESET_KIND_INIT:
1776 			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1777 				      DRV_STATE_START_DONE);
1778 			break;
1779 
1780 		case RESET_KIND_SHUTDOWN:
1781 			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1782 				      DRV_STATE_UNLOAD_DONE);
1783 			break;
1784 
1785 		default:
1786 			break;
1787 		}
1788 	}
1789 }
1790 
1791 /* tp->lock is held. */
1792 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
1793 {
1794 	if (tg3_flag(tp, ENABLE_ASF)) {
1795 		switch (kind) {
1796 		case RESET_KIND_INIT:
1797 			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1798 				      DRV_STATE_START);
1799 			break;
1800 
1801 		case RESET_KIND_SHUTDOWN:
1802 			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1803 				      DRV_STATE_UNLOAD);
1804 			break;
1805 
1806 		case RESET_KIND_SUSPEND:
1807 			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1808 				      DRV_STATE_SUSPEND);
1809 			break;
1810 
1811 		default:
1812 			break;
1813 		}
1814 	}
1815 }
1816 
1817 static int tg3_poll_fw(struct tg3 *tp)
1818 {
1819 	int i;
1820 	u32 val;
1821 
1822 	if (tg3_flag(tp, NO_FWARE_REPORTED))
1823 		return 0;
1824 
1825 	if (tg3_flag(tp, IS_SSB_CORE)) {
1826 		/* We don't use firmware. */
1827 		return 0;
1828 	}
1829 
1830 	if (tg3_asic_rev(tp) == ASIC_REV_5906) {
1831 		/* Wait up to 20ms for init done. */
1832 		for (i = 0; i < 200; i++) {
1833 			if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
1834 				return 0;
1835 			if (pci_channel_offline(tp->pdev))
1836 				return -ENODEV;
1837 
1838 			udelay(100);
1839 		}
1840 		return -ENODEV;
1841 	}
1842 
1843 	/* Wait for firmware initialization to complete. */
1844 	for (i = 0; i < 100000; i++) {
1845 		tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
1846 		if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1847 			break;
1848 		if (pci_channel_offline(tp->pdev)) {
1849 			if (!tg3_flag(tp, NO_FWARE_REPORTED)) {
1850 				tg3_flag_set(tp, NO_FWARE_REPORTED);
1851 				netdev_info(tp->dev, "No firmware running\n");
1852 			}
1853 
1854 			break;
1855 		}
1856 
1857 		udelay(10);
1858 	}
1859 
1860 	/* Chip might not be fitted with firmware.  Some Sun onboard
1861 	 * parts are configured like that.  So don't signal the timeout
1862 	 * of the above loop as an error, but do report the lack of
1863 	 * running firmware once.
1864 	 */
1865 	if (i >= 100000 && !tg3_flag(tp, NO_FWARE_REPORTED)) {
1866 		tg3_flag_set(tp, NO_FWARE_REPORTED);
1867 
1868 		netdev_info(tp->dev, "No firmware running\n");
1869 	}
1870 
1871 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) {
1872 		/* The 57765 A0 needs a little more
1873 		 * time to do some important work.
1874 		 */
1875 		mdelay(10);
1876 	}
1877 
1878 	return 0;
1879 }
1880 
1881 static void tg3_link_report(struct tg3 *tp)
1882 {
1883 	if (!netif_carrier_ok(tp->dev)) {
1884 		netif_info(tp, link, tp->dev, "Link is down\n");
1885 		tg3_ump_link_report(tp);
1886 	} else if (netif_msg_link(tp)) {
1887 		netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n",
1888 			    (tp->link_config.active_speed == SPEED_1000 ?
1889 			     1000 :
1890 			     (tp->link_config.active_speed == SPEED_100 ?
1891 			      100 : 10)),
1892 			    (tp->link_config.active_duplex == DUPLEX_FULL ?
1893 			     "full" : "half"));
1894 
1895 		netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n",
1896 			    (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
1897 			    "on" : "off",
1898 			    (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
1899 			    "on" : "off");
1900 
1901 		if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
1902 			netdev_info(tp->dev, "EEE is %s\n",
1903 				    tp->setlpicnt ? "enabled" : "disabled");
1904 
1905 		tg3_ump_link_report(tp);
1906 	}
1907 
1908 	tp->link_up = netif_carrier_ok(tp->dev);
1909 }
1910 
1911 static u32 tg3_decode_flowctrl_1000T(u32 adv)
1912 {
1913 	u32 flowctrl = 0;
1914 
1915 	if (adv & ADVERTISE_PAUSE_CAP) {
1916 		flowctrl |= FLOW_CTRL_RX;
1917 		if (!(adv & ADVERTISE_PAUSE_ASYM))
1918 			flowctrl |= FLOW_CTRL_TX;
1919 	} else if (adv & ADVERTISE_PAUSE_ASYM)
1920 		flowctrl |= FLOW_CTRL_TX;
1921 
1922 	return flowctrl;
1923 }
1924 
1925 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1926 {
1927 	u16 miireg;
1928 
1929 	if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1930 		miireg = ADVERTISE_1000XPAUSE;
1931 	else if (flow_ctrl & FLOW_CTRL_TX)
1932 		miireg = ADVERTISE_1000XPSE_ASYM;
1933 	else if (flow_ctrl & FLOW_CTRL_RX)
1934 		miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1935 	else
1936 		miireg = 0;
1937 
1938 	return miireg;
1939 }
1940 
1941 static u32 tg3_decode_flowctrl_1000X(u32 adv)
1942 {
1943 	u32 flowctrl = 0;
1944 
1945 	if (adv & ADVERTISE_1000XPAUSE) {
1946 		flowctrl |= FLOW_CTRL_RX;
1947 		if (!(adv & ADVERTISE_1000XPSE_ASYM))
1948 			flowctrl |= FLOW_CTRL_TX;
1949 	} else if (adv & ADVERTISE_1000XPSE_ASYM)
1950 		flowctrl |= FLOW_CTRL_TX;
1951 
1952 	return flowctrl;
1953 }
1954 
1955 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1956 {
1957 	u8 cap = 0;
1958 
1959 	if (lcladv & rmtadv & ADVERTISE_1000XPAUSE) {
1960 		cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1961 	} else if (lcladv & rmtadv & ADVERTISE_1000XPSE_ASYM) {
1962 		if (lcladv & ADVERTISE_1000XPAUSE)
1963 			cap = FLOW_CTRL_RX;
1964 		if (rmtadv & ADVERTISE_1000XPAUSE)
1965 			cap = FLOW_CTRL_TX;
1966 	}
1967 
1968 	return cap;
1969 }
1970 
1971 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1972 {
1973 	u8 autoneg;
1974 	u8 flowctrl = 0;
1975 	u32 old_rx_mode = tp->rx_mode;
1976 	u32 old_tx_mode = tp->tx_mode;
1977 
1978 	if (tg3_flag(tp, USE_PHYLIB))
1979 		autoneg = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr)->autoneg;
1980 	else
1981 		autoneg = tp->link_config.autoneg;
1982 
1983 	if (autoneg == AUTONEG_ENABLE && tg3_flag(tp, PAUSE_AUTONEG)) {
1984 		if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
1985 			flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1986 		else
1987 			flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1988 	} else
1989 		flowctrl = tp->link_config.flowctrl;
1990 
1991 	tp->link_config.active_flowctrl = flowctrl;
1992 
1993 	if (flowctrl & FLOW_CTRL_RX)
1994 		tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1995 	else
1996 		tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1997 
1998 	if (old_rx_mode != tp->rx_mode)
1999 		tw32_f(MAC_RX_MODE, tp->rx_mode);
2000 
2001 	if (flowctrl & FLOW_CTRL_TX)
2002 		tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
2003 	else
2004 		tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
2005 
2006 	if (old_tx_mode != tp->tx_mode)
2007 		tw32_f(MAC_TX_MODE, tp->tx_mode);
2008 }
2009 
2010 static void tg3_adjust_link(struct net_device *dev)
2011 {
2012 	u8 oldflowctrl, linkmesg = 0;
2013 	u32 mac_mode, lcl_adv, rmt_adv;
2014 	struct tg3 *tp = netdev_priv(dev);
2015 	struct phy_device *phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
2016 
2017 	spin_lock_bh(&tp->lock);
2018 
2019 	mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
2020 				    MAC_MODE_HALF_DUPLEX);
2021 
2022 	oldflowctrl = tp->link_config.active_flowctrl;
2023 
2024 	if (phydev->link) {
2025 		lcl_adv = 0;
2026 		rmt_adv = 0;
2027 
2028 		if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
2029 			mac_mode |= MAC_MODE_PORT_MODE_MII;
2030 		else if (phydev->speed == SPEED_1000 ||
2031 			 tg3_asic_rev(tp) != ASIC_REV_5785)
2032 			mac_mode |= MAC_MODE_PORT_MODE_GMII;
2033 		else
2034 			mac_mode |= MAC_MODE_PORT_MODE_MII;
2035 
2036 		if (phydev->duplex == DUPLEX_HALF)
2037 			mac_mode |= MAC_MODE_HALF_DUPLEX;
2038 		else {
2039 			lcl_adv = mii_advertise_flowctrl(
2040 				  tp->link_config.flowctrl);
2041 
2042 			if (phydev->pause)
2043 				rmt_adv = LPA_PAUSE_CAP;
2044 			if (phydev->asym_pause)
2045 				rmt_adv |= LPA_PAUSE_ASYM;
2046 		}
2047 
2048 		tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
2049 	} else
2050 		mac_mode |= MAC_MODE_PORT_MODE_GMII;
2051 
2052 	if (mac_mode != tp->mac_mode) {
2053 		tp->mac_mode = mac_mode;
2054 		tw32_f(MAC_MODE, tp->mac_mode);
2055 		udelay(40);
2056 	}
2057 
2058 	if (tg3_asic_rev(tp) == ASIC_REV_5785) {
2059 		if (phydev->speed == SPEED_10)
2060 			tw32(MAC_MI_STAT,
2061 			     MAC_MI_STAT_10MBPS_MODE |
2062 			     MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
2063 		else
2064 			tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
2065 	}
2066 
2067 	if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
2068 		tw32(MAC_TX_LENGTHS,
2069 		     ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2070 		      (6 << TX_LENGTHS_IPG_SHIFT) |
2071 		      (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
2072 	else
2073 		tw32(MAC_TX_LENGTHS,
2074 		     ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2075 		      (6 << TX_LENGTHS_IPG_SHIFT) |
2076 		      (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
2077 
2078 	if (phydev->link != tp->old_link ||
2079 	    phydev->speed != tp->link_config.active_speed ||
2080 	    phydev->duplex != tp->link_config.active_duplex ||
2081 	    oldflowctrl != tp->link_config.active_flowctrl)
2082 		linkmesg = 1;
2083 
2084 	tp->old_link = phydev->link;
2085 	tp->link_config.active_speed = phydev->speed;
2086 	tp->link_config.active_duplex = phydev->duplex;
2087 
2088 	spin_unlock_bh(&tp->lock);
2089 
2090 	if (linkmesg)
2091 		tg3_link_report(tp);
2092 }
2093 
2094 static int tg3_phy_init(struct tg3 *tp)
2095 {
2096 	struct phy_device *phydev;
2097 
2098 	if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)
2099 		return 0;
2100 
2101 	/* Bring the PHY back to a known state. */
2102 	tg3_bmcr_reset(tp);
2103 
2104 	phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
2105 
2106 	/* Attach the MAC to the PHY. */
2107 	phydev = phy_connect(tp->dev, phydev_name(phydev),
2108 			     tg3_adjust_link, phydev->interface);
2109 	if (IS_ERR(phydev)) {
2110 		dev_err(&tp->pdev->dev, "Could not attach to PHY\n");
2111 		return PTR_ERR(phydev);
2112 	}
2113 
2114 	/* Mask with MAC supported features. */
2115 	switch (phydev->interface) {
2116 	case PHY_INTERFACE_MODE_GMII:
2117 	case PHY_INTERFACE_MODE_RGMII:
2118 		if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
2119 			phy_set_max_speed(phydev, SPEED_1000);
2120 			phy_support_asym_pause(phydev);
2121 			break;
2122 		}
2123 		/* fall through */
2124 	case PHY_INTERFACE_MODE_MII:
2125 		phy_set_max_speed(phydev, SPEED_100);
2126 		phy_support_asym_pause(phydev);
2127 		break;
2128 	default:
2129 		phy_disconnect(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr));
2130 		return -EINVAL;
2131 	}
2132 
2133 	tp->phy_flags |= TG3_PHYFLG_IS_CONNECTED;
2134 
2135 	phy_attached_info(phydev);
2136 
2137 	return 0;
2138 }
2139 
2140 static void tg3_phy_start(struct tg3 *tp)
2141 {
2142 	struct phy_device *phydev;
2143 
2144 	if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
2145 		return;
2146 
2147 	phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
2148 
2149 	if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
2150 		tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
2151 		phydev->speed = tp->link_config.speed;
2152 		phydev->duplex = tp->link_config.duplex;
2153 		phydev->autoneg = tp->link_config.autoneg;
2154 		ethtool_convert_legacy_u32_to_link_mode(
2155 			phydev->advertising, tp->link_config.advertising);
2156 	}
2157 
2158 	phy_start(phydev);
2159 
2160 	phy_start_aneg(phydev);
2161 }
2162 
2163 static void tg3_phy_stop(struct tg3 *tp)
2164 {
2165 	if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
2166 		return;
2167 
2168 	phy_stop(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr));
2169 }
2170 
2171 static void tg3_phy_fini(struct tg3 *tp)
2172 {
2173 	if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
2174 		phy_disconnect(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr));
2175 		tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED;
2176 	}
2177 }
2178 
2179 static int tg3_phy_set_extloopbk(struct tg3 *tp)
2180 {
2181 	int err;
2182 	u32 val;
2183 
2184 	if (tp->phy_flags & TG3_PHYFLG_IS_FET)
2185 		return 0;
2186 
2187 	if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2188 		/* Cannot do read-modify-write on 5401 */
2189 		err = tg3_phy_auxctl_write(tp,
2190 					   MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2191 					   MII_TG3_AUXCTL_ACTL_EXTLOOPBK |
2192 					   0x4c20);
2193 		goto done;
2194 	}
2195 
2196 	err = tg3_phy_auxctl_read(tp,
2197 				  MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2198 	if (err)
2199 		return err;
2200 
2201 	val |= MII_TG3_AUXCTL_ACTL_EXTLOOPBK;
2202 	err = tg3_phy_auxctl_write(tp,
2203 				   MII_TG3_AUXCTL_SHDWSEL_AUXCTL, val);
2204 
2205 done:
2206 	return err;
2207 }
2208 
2209 static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
2210 {
2211 	u32 phytest;
2212 
2213 	if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2214 		u32 phy;
2215 
2216 		tg3_writephy(tp, MII_TG3_FET_TEST,
2217 			     phytest | MII_TG3_FET_SHADOW_EN);
2218 		if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) {
2219 			if (enable)
2220 				phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD;
2221 			else
2222 				phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD;
2223 			tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy);
2224 		}
2225 		tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2226 	}
2227 }
2228 
2229 static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
2230 {
2231 	u32 reg;
2232 
2233 	if (!tg3_flag(tp, 5705_PLUS) ||
2234 	    (tg3_flag(tp, 5717_PLUS) &&
2235 	     (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
2236 		return;
2237 
2238 	if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2239 		tg3_phy_fet_toggle_apd(tp, enable);
2240 		return;
2241 	}
2242 
2243 	reg = MII_TG3_MISC_SHDW_SCR5_LPED |
2244 	      MII_TG3_MISC_SHDW_SCR5_DLPTLM |
2245 	      MII_TG3_MISC_SHDW_SCR5_SDTL |
2246 	      MII_TG3_MISC_SHDW_SCR5_C125OE;
2247 	if (tg3_asic_rev(tp) != ASIC_REV_5784 || !enable)
2248 		reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
2249 
2250 	tg3_phy_shdw_write(tp, MII_TG3_MISC_SHDW_SCR5_SEL, reg);
2251 
2252 
2253 	reg = MII_TG3_MISC_SHDW_APD_WKTM_84MS;
2254 	if (enable)
2255 		reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
2256 
2257 	tg3_phy_shdw_write(tp, MII_TG3_MISC_SHDW_APD_SEL, reg);
2258 }
2259 
2260 static void tg3_phy_toggle_automdix(struct tg3 *tp, bool enable)
2261 {
2262 	u32 phy;
2263 
2264 	if (!tg3_flag(tp, 5705_PLUS) ||
2265 	    (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
2266 		return;
2267 
2268 	if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2269 		u32 ephy;
2270 
2271 		if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
2272 			u32 reg = MII_TG3_FET_SHDW_MISCCTRL;
2273 
2274 			tg3_writephy(tp, MII_TG3_FET_TEST,
2275 				     ephy | MII_TG3_FET_SHADOW_EN);
2276 			if (!tg3_readphy(tp, reg, &phy)) {
2277 				if (enable)
2278 					phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2279 				else
2280 					phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2281 				tg3_writephy(tp, reg, phy);
2282 			}
2283 			tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
2284 		}
2285 	} else {
2286 		int ret;
2287 
2288 		ret = tg3_phy_auxctl_read(tp,
2289 					  MII_TG3_AUXCTL_SHDWSEL_MISC, &phy);
2290 		if (!ret) {
2291 			if (enable)
2292 				phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2293 			else
2294 				phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2295 			tg3_phy_auxctl_write(tp,
2296 					     MII_TG3_AUXCTL_SHDWSEL_MISC, phy);
2297 		}
2298 	}
2299 }
2300 
2301 static void tg3_phy_set_wirespeed(struct tg3 *tp)
2302 {
2303 	int ret;
2304 	u32 val;
2305 
2306 	if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED)
2307 		return;
2308 
2309 	ret = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, &val);
2310 	if (!ret)
2311 		tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_MISC,
2312 				     val | MII_TG3_AUXCTL_MISC_WIRESPD_EN);
2313 }
2314 
2315 static void tg3_phy_apply_otp(struct tg3 *tp)
2316 {
2317 	u32 otp, phy;
2318 
2319 	if (!tp->phy_otp)
2320 		return;
2321 
2322 	otp = tp->phy_otp;
2323 
2324 	if (tg3_phy_toggle_auxctl_smdsp(tp, true))
2325 		return;
2326 
2327 	phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
2328 	phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
2329 	tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
2330 
2331 	phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
2332 	      ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
2333 	tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
2334 
2335 	phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
2336 	phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
2337 	tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
2338 
2339 	phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
2340 	tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
2341 
2342 	phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
2343 	tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
2344 
2345 	phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
2346 	      ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
2347 	tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
2348 
2349 	tg3_phy_toggle_auxctl_smdsp(tp, false);
2350 }
2351 
2352 static void tg3_eee_pull_config(struct tg3 *tp, struct ethtool_eee *eee)
2353 {
2354 	u32 val;
2355 	struct ethtool_eee *dest = &tp->eee;
2356 
2357 	if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2358 		return;
2359 
2360 	if (eee)
2361 		dest = eee;
2362 
2363 	if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, TG3_CL45_D7_EEERES_STAT, &val))
2364 		return;
2365 
2366 	/* Pull eee_active */
2367 	if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T ||
2368 	    val == TG3_CL45_D7_EEERES_STAT_LP_100TX) {
2369 		dest->eee_active = 1;
2370 	} else
2371 		dest->eee_active = 0;
2372 
2373 	/* Pull lp advertised settings */
2374 	if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, MDIO_AN_EEE_LPABLE, &val))
2375 		return;
2376 	dest->lp_advertised = mmd_eee_adv_to_ethtool_adv_t(val);
2377 
2378 	/* Pull advertised and eee_enabled settings */
2379 	if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, &val))
2380 		return;
2381 	dest->eee_enabled = !!val;
2382 	dest->advertised = mmd_eee_adv_to_ethtool_adv_t(val);
2383 
2384 	/* Pull tx_lpi_enabled */
2385 	val = tr32(TG3_CPMU_EEE_MODE);
2386 	dest->tx_lpi_enabled = !!(val & TG3_CPMU_EEEMD_LPI_IN_TX);
2387 
2388 	/* Pull lpi timer value */
2389 	dest->tx_lpi_timer = tr32(TG3_CPMU_EEE_DBTMR1) & 0xffff;
2390 }
2391 
2392 static void tg3_phy_eee_adjust(struct tg3 *tp, bool current_link_up)
2393 {
2394 	u32 val;
2395 
2396 	if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2397 		return;
2398 
2399 	tp->setlpicnt = 0;
2400 
2401 	if (tp->link_config.autoneg == AUTONEG_ENABLE &&
2402 	    current_link_up &&
2403 	    tp->link_config.active_duplex == DUPLEX_FULL &&
2404 	    (tp->link_config.active_speed == SPEED_100 ||
2405 	     tp->link_config.active_speed == SPEED_1000)) {
2406 		u32 eeectl;
2407 
2408 		if (tp->link_config.active_speed == SPEED_1000)
2409 			eeectl = TG3_CPMU_EEE_CTRL_EXIT_16_5_US;
2410 		else
2411 			eeectl = TG3_CPMU_EEE_CTRL_EXIT_36_US;
2412 
2413 		tw32(TG3_CPMU_EEE_CTRL, eeectl);
2414 
2415 		tg3_eee_pull_config(tp, NULL);
2416 		if (tp->eee.eee_active)
2417 			tp->setlpicnt = 2;
2418 	}
2419 
2420 	if (!tp->setlpicnt) {
2421 		if (current_link_up &&
2422 		   !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2423 			tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0000);
2424 			tg3_phy_toggle_auxctl_smdsp(tp, false);
2425 		}
2426 
2427 		val = tr32(TG3_CPMU_EEE_MODE);
2428 		tw32(TG3_CPMU_EEE_MODE, val & ~TG3_CPMU_EEEMD_LPI_ENABLE);
2429 	}
2430 }
2431 
2432 static void tg3_phy_eee_enable(struct tg3 *tp)
2433 {
2434 	u32 val;
2435 
2436 	if (tp->link_config.active_speed == SPEED_1000 &&
2437 	    (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2438 	     tg3_asic_rev(tp) == ASIC_REV_5719 ||
2439 	     tg3_flag(tp, 57765_CLASS)) &&
2440 	    !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2441 		val = MII_TG3_DSP_TAP26_ALNOKO |
2442 		      MII_TG3_DSP_TAP26_RMRXSTO;
2443 		tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
2444 		tg3_phy_toggle_auxctl_smdsp(tp, false);
2445 	}
2446 
2447 	val = tr32(TG3_CPMU_EEE_MODE);
2448 	tw32(TG3_CPMU_EEE_MODE, val | TG3_CPMU_EEEMD_LPI_ENABLE);
2449 }
2450 
2451 static int tg3_wait_macro_done(struct tg3 *tp)
2452 {
2453 	int limit = 100;
2454 
2455 	while (limit--) {
2456 		u32 tmp32;
2457 
2458 		if (!tg3_readphy(tp, MII_TG3_DSP_CONTROL, &tmp32)) {
2459 			if ((tmp32 & 0x1000) == 0)
2460 				break;
2461 		}
2462 	}
2463 	if (limit < 0)
2464 		return -EBUSY;
2465 
2466 	return 0;
2467 }
2468 
2469 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
2470 {
2471 	static const u32 test_pat[4][6] = {
2472 	{ 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
2473 	{ 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
2474 	{ 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
2475 	{ 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
2476 	};
2477 	int chan;
2478 
2479 	for (chan = 0; chan < 4; chan++) {
2480 		int i;
2481 
2482 		tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2483 			     (chan * 0x2000) | 0x0200);
2484 		tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2485 
2486 		for (i = 0; i < 6; i++)
2487 			tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
2488 				     test_pat[chan][i]);
2489 
2490 		tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2491 		if (tg3_wait_macro_done(tp)) {
2492 			*resetp = 1;
2493 			return -EBUSY;
2494 		}
2495 
2496 		tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2497 			     (chan * 0x2000) | 0x0200);
2498 		tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0082);
2499 		if (tg3_wait_macro_done(tp)) {
2500 			*resetp = 1;
2501 			return -EBUSY;
2502 		}
2503 
2504 		tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0802);
2505 		if (tg3_wait_macro_done(tp)) {
2506 			*resetp = 1;
2507 			return -EBUSY;
2508 		}
2509 
2510 		for (i = 0; i < 6; i += 2) {
2511 			u32 low, high;
2512 
2513 			if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
2514 			    tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
2515 			    tg3_wait_macro_done(tp)) {
2516 				*resetp = 1;
2517 				return -EBUSY;
2518 			}
2519 			low &= 0x7fff;
2520 			high &= 0x000f;
2521 			if (low != test_pat[chan][i] ||
2522 			    high != test_pat[chan][i+1]) {
2523 				tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
2524 				tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
2525 				tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
2526 
2527 				return -EBUSY;
2528 			}
2529 		}
2530 	}
2531 
2532 	return 0;
2533 }
2534 
2535 static int tg3_phy_reset_chanpat(struct tg3 *tp)
2536 {
2537 	int chan;
2538 
2539 	for (chan = 0; chan < 4; chan++) {
2540 		int i;
2541 
2542 		tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2543 			     (chan * 0x2000) | 0x0200);
2544 		tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2545 		for (i = 0; i < 6; i++)
2546 			tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
2547 		tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2548 		if (tg3_wait_macro_done(tp))
2549 			return -EBUSY;
2550 	}
2551 
2552 	return 0;
2553 }
2554 
2555 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
2556 {
2557 	u32 reg32, phy9_orig;
2558 	int retries, do_phy_reset, err;
2559 
2560 	retries = 10;
2561 	do_phy_reset = 1;
2562 	do {
2563 		if (do_phy_reset) {
2564 			err = tg3_bmcr_reset(tp);
2565 			if (err)
2566 				return err;
2567 			do_phy_reset = 0;
2568 		}
2569 
2570 		/* Disable transmitter and interrupt.  */
2571 		if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
2572 			continue;
2573 
2574 		reg32 |= 0x3000;
2575 		tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2576 
2577 		/* Set full-duplex, 1000 mbps.  */
2578 		tg3_writephy(tp, MII_BMCR,
2579 			     BMCR_FULLDPLX | BMCR_SPEED1000);
2580 
2581 		/* Set to master mode.  */
2582 		if (tg3_readphy(tp, MII_CTRL1000, &phy9_orig))
2583 			continue;
2584 
2585 		tg3_writephy(tp, MII_CTRL1000,
2586 			     CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
2587 
2588 		err = tg3_phy_toggle_auxctl_smdsp(tp, true);
2589 		if (err)
2590 			return err;
2591 
2592 		/* Block the PHY control access.  */
2593 		tg3_phydsp_write(tp, 0x8005, 0x0800);
2594 
2595 		err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
2596 		if (!err)
2597 			break;
2598 	} while (--retries);
2599 
2600 	err = tg3_phy_reset_chanpat(tp);
2601 	if (err)
2602 		return err;
2603 
2604 	tg3_phydsp_write(tp, 0x8005, 0x0000);
2605 
2606 	tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
2607 	tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000);
2608 
2609 	tg3_phy_toggle_auxctl_smdsp(tp, false);
2610 
2611 	tg3_writephy(tp, MII_CTRL1000, phy9_orig);
2612 
2613 	err = tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32);
2614 	if (err)
2615 		return err;
2616 
2617 	reg32 &= ~0x3000;
2618 	tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2619 
2620 	return 0;
2621 }
2622 
2623 static void tg3_carrier_off(struct tg3 *tp)
2624 {
2625 	netif_carrier_off(tp->dev);
2626 	tp->link_up = false;
2627 }
2628 
2629 static void tg3_warn_mgmt_link_flap(struct tg3 *tp)
2630 {
2631 	if (tg3_flag(tp, ENABLE_ASF))
2632 		netdev_warn(tp->dev,
2633 			    "Management side-band traffic will be interrupted during phy settings change\n");
2634 }
2635 
2636 /* This will reset the tigon3 PHY if there is no valid
2637  * link unless the FORCE argument is non-zero.
2638  */
2639 static int tg3_phy_reset(struct tg3 *tp)
2640 {
2641 	u32 val, cpmuctrl;
2642 	int err;
2643 
2644 	if (tg3_asic_rev(tp) == ASIC_REV_5906) {
2645 		val = tr32(GRC_MISC_CFG);
2646 		tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
2647 		udelay(40);
2648 	}
2649 	err  = tg3_readphy(tp, MII_BMSR, &val);
2650 	err |= tg3_readphy(tp, MII_BMSR, &val);
2651 	if (err != 0)
2652 		return -EBUSY;
2653 
2654 	if (netif_running(tp->dev) && tp->link_up) {
2655 		netif_carrier_off(tp->dev);
2656 		tg3_link_report(tp);
2657 	}
2658 
2659 	if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
2660 	    tg3_asic_rev(tp) == ASIC_REV_5704 ||
2661 	    tg3_asic_rev(tp) == ASIC_REV_5705) {
2662 		err = tg3_phy_reset_5703_4_5(tp);
2663 		if (err)
2664 			return err;
2665 		goto out;
2666 	}
2667 
2668 	cpmuctrl = 0;
2669 	if (tg3_asic_rev(tp) == ASIC_REV_5784 &&
2670 	    tg3_chip_rev(tp) != CHIPREV_5784_AX) {
2671 		cpmuctrl = tr32(TG3_CPMU_CTRL);
2672 		if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
2673 			tw32(TG3_CPMU_CTRL,
2674 			     cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
2675 	}
2676 
2677 	err = tg3_bmcr_reset(tp);
2678 	if (err)
2679 		return err;
2680 
2681 	if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
2682 		val = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
2683 		tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, val);
2684 
2685 		tw32(TG3_CPMU_CTRL, cpmuctrl);
2686 	}
2687 
2688 	if (tg3_chip_rev(tp) == CHIPREV_5784_AX ||
2689 	    tg3_chip_rev(tp) == CHIPREV_5761_AX) {
2690 		val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2691 		if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
2692 		    CPMU_LSPD_1000MB_MACCLK_12_5) {
2693 			val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2694 			udelay(40);
2695 			tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2696 		}
2697 	}
2698 
2699 	if (tg3_flag(tp, 5717_PLUS) &&
2700 	    (tp->phy_flags & TG3_PHYFLG_MII_SERDES))
2701 		return 0;
2702 
2703 	tg3_phy_apply_otp(tp);
2704 
2705 	if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
2706 		tg3_phy_toggle_apd(tp, true);
2707 	else
2708 		tg3_phy_toggle_apd(tp, false);
2709 
2710 out:
2711 	if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) &&
2712 	    !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2713 		tg3_phydsp_write(tp, 0x201f, 0x2aaa);
2714 		tg3_phydsp_write(tp, 0x000a, 0x0323);
2715 		tg3_phy_toggle_auxctl_smdsp(tp, false);
2716 	}
2717 
2718 	if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) {
2719 		tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2720 		tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2721 	}
2722 
2723 	if (tp->phy_flags & TG3_PHYFLG_BER_BUG) {
2724 		if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2725 			tg3_phydsp_write(tp, 0x000a, 0x310b);
2726 			tg3_phydsp_write(tp, 0x201f, 0x9506);
2727 			tg3_phydsp_write(tp, 0x401f, 0x14e2);
2728 			tg3_phy_toggle_auxctl_smdsp(tp, false);
2729 		}
2730 	} else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) {
2731 		if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2732 			tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
2733 			if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) {
2734 				tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
2735 				tg3_writephy(tp, MII_TG3_TEST1,
2736 					     MII_TG3_TEST1_TRIM_EN | 0x4);
2737 			} else
2738 				tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
2739 
2740 			tg3_phy_toggle_auxctl_smdsp(tp, false);
2741 		}
2742 	}
2743 
2744 	/* Set Extended packet length bit (bit 14) on all chips that */
2745 	/* support jumbo frames */
2746 	if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2747 		/* Cannot do read-modify-write on 5401 */
2748 		tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
2749 	} else if (tg3_flag(tp, JUMBO_CAPABLE)) {
2750 		/* Set bit 14 with read-modify-write to preserve other bits */
2751 		err = tg3_phy_auxctl_read(tp,
2752 					  MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2753 		if (!err)
2754 			tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2755 					   val | MII_TG3_AUXCTL_ACTL_EXTPKTLEN);
2756 	}
2757 
2758 	/* Set phy register 0x10 bit 0 to high fifo elasticity to support
2759 	 * jumbo frames transmission.
2760 	 */
2761 	if (tg3_flag(tp, JUMBO_CAPABLE)) {
2762 		if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &val))
2763 			tg3_writephy(tp, MII_TG3_EXT_CTRL,
2764 				     val | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
2765 	}
2766 
2767 	if (tg3_asic_rev(tp) == ASIC_REV_5906) {
2768 		/* adjust output voltage */
2769 		tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
2770 	}
2771 
2772 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5762_A0)
2773 		tg3_phydsp_write(tp, 0xffb, 0x4000);
2774 
2775 	tg3_phy_toggle_automdix(tp, true);
2776 	tg3_phy_set_wirespeed(tp);
2777 	return 0;
2778 }
2779 
2780 #define TG3_GPIO_MSG_DRVR_PRES		 0x00000001
2781 #define TG3_GPIO_MSG_NEED_VAUX		 0x00000002
2782 #define TG3_GPIO_MSG_MASK		 (TG3_GPIO_MSG_DRVR_PRES | \
2783 					  TG3_GPIO_MSG_NEED_VAUX)
2784 #define TG3_GPIO_MSG_ALL_DRVR_PRES_MASK \
2785 	((TG3_GPIO_MSG_DRVR_PRES << 0) | \
2786 	 (TG3_GPIO_MSG_DRVR_PRES << 4) | \
2787 	 (TG3_GPIO_MSG_DRVR_PRES << 8) | \
2788 	 (TG3_GPIO_MSG_DRVR_PRES << 12))
2789 
2790 #define TG3_GPIO_MSG_ALL_NEED_VAUX_MASK \
2791 	((TG3_GPIO_MSG_NEED_VAUX << 0) | \
2792 	 (TG3_GPIO_MSG_NEED_VAUX << 4) | \
2793 	 (TG3_GPIO_MSG_NEED_VAUX << 8) | \
2794 	 (TG3_GPIO_MSG_NEED_VAUX << 12))
2795 
2796 static inline u32 tg3_set_function_status(struct tg3 *tp, u32 newstat)
2797 {
2798 	u32 status, shift;
2799 
2800 	if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2801 	    tg3_asic_rev(tp) == ASIC_REV_5719)
2802 		status = tg3_ape_read32(tp, TG3_APE_GPIO_MSG);
2803 	else
2804 		status = tr32(TG3_CPMU_DRV_STATUS);
2805 
2806 	shift = TG3_APE_GPIO_MSG_SHIFT + 4 * tp->pci_fn;
2807 	status &= ~(TG3_GPIO_MSG_MASK << shift);
2808 	status |= (newstat << shift);
2809 
2810 	if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2811 	    tg3_asic_rev(tp) == ASIC_REV_5719)
2812 		tg3_ape_write32(tp, TG3_APE_GPIO_MSG, status);
2813 	else
2814 		tw32(TG3_CPMU_DRV_STATUS, status);
2815 
2816 	return status >> TG3_APE_GPIO_MSG_SHIFT;
2817 }
2818 
2819 static inline int tg3_pwrsrc_switch_to_vmain(struct tg3 *tp)
2820 {
2821 	if (!tg3_flag(tp, IS_NIC))
2822 		return 0;
2823 
2824 	if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2825 	    tg3_asic_rev(tp) == ASIC_REV_5719 ||
2826 	    tg3_asic_rev(tp) == ASIC_REV_5720) {
2827 		if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2828 			return -EIO;
2829 
2830 		tg3_set_function_status(tp, TG3_GPIO_MSG_DRVR_PRES);
2831 
2832 		tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2833 			    TG3_GRC_LCLCTL_PWRSW_DELAY);
2834 
2835 		tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2836 	} else {
2837 		tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2838 			    TG3_GRC_LCLCTL_PWRSW_DELAY);
2839 	}
2840 
2841 	return 0;
2842 }
2843 
2844 static void tg3_pwrsrc_die_with_vmain(struct tg3 *tp)
2845 {
2846 	u32 grc_local_ctrl;
2847 
2848 	if (!tg3_flag(tp, IS_NIC) ||
2849 	    tg3_asic_rev(tp) == ASIC_REV_5700 ||
2850 	    tg3_asic_rev(tp) == ASIC_REV_5701)
2851 		return;
2852 
2853 	grc_local_ctrl = tp->grc_local_ctrl | GRC_LCLCTRL_GPIO_OE1;
2854 
2855 	tw32_wait_f(GRC_LOCAL_CTRL,
2856 		    grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2857 		    TG3_GRC_LCLCTL_PWRSW_DELAY);
2858 
2859 	tw32_wait_f(GRC_LOCAL_CTRL,
2860 		    grc_local_ctrl,
2861 		    TG3_GRC_LCLCTL_PWRSW_DELAY);
2862 
2863 	tw32_wait_f(GRC_LOCAL_CTRL,
2864 		    grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2865 		    TG3_GRC_LCLCTL_PWRSW_DELAY);
2866 }
2867 
2868 static void tg3_pwrsrc_switch_to_vaux(struct tg3 *tp)
2869 {
2870 	if (!tg3_flag(tp, IS_NIC))
2871 		return;
2872 
2873 	if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
2874 	    tg3_asic_rev(tp) == ASIC_REV_5701) {
2875 		tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2876 			    (GRC_LCLCTRL_GPIO_OE0 |
2877 			     GRC_LCLCTRL_GPIO_OE1 |
2878 			     GRC_LCLCTRL_GPIO_OE2 |
2879 			     GRC_LCLCTRL_GPIO_OUTPUT0 |
2880 			     GRC_LCLCTRL_GPIO_OUTPUT1),
2881 			    TG3_GRC_LCLCTL_PWRSW_DELAY);
2882 	} else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
2883 		   tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
2884 		/* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2885 		u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
2886 				     GRC_LCLCTRL_GPIO_OE1 |
2887 				     GRC_LCLCTRL_GPIO_OE2 |
2888 				     GRC_LCLCTRL_GPIO_OUTPUT0 |
2889 				     GRC_LCLCTRL_GPIO_OUTPUT1 |
2890 				     tp->grc_local_ctrl;
2891 		tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2892 			    TG3_GRC_LCLCTL_PWRSW_DELAY);
2893 
2894 		grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
2895 		tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2896 			    TG3_GRC_LCLCTL_PWRSW_DELAY);
2897 
2898 		grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
2899 		tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2900 			    TG3_GRC_LCLCTL_PWRSW_DELAY);
2901 	} else {
2902 		u32 no_gpio2;
2903 		u32 grc_local_ctrl = 0;
2904 
2905 		/* Workaround to prevent overdrawing Amps. */
2906 		if (tg3_asic_rev(tp) == ASIC_REV_5714) {
2907 			grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
2908 			tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2909 				    grc_local_ctrl,
2910 				    TG3_GRC_LCLCTL_PWRSW_DELAY);
2911 		}
2912 
2913 		/* On 5753 and variants, GPIO2 cannot be used. */
2914 		no_gpio2 = tp->nic_sram_data_cfg &
2915 			   NIC_SRAM_DATA_CFG_NO_GPIO2;
2916 
2917 		grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
2918 				  GRC_LCLCTRL_GPIO_OE1 |
2919 				  GRC_LCLCTRL_GPIO_OE2 |
2920 				  GRC_LCLCTRL_GPIO_OUTPUT1 |
2921 				  GRC_LCLCTRL_GPIO_OUTPUT2;
2922 		if (no_gpio2) {
2923 			grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
2924 					    GRC_LCLCTRL_GPIO_OUTPUT2);
2925 		}
2926 		tw32_wait_f(GRC_LOCAL_CTRL,
2927 			    tp->grc_local_ctrl | grc_local_ctrl,
2928 			    TG3_GRC_LCLCTL_PWRSW_DELAY);
2929 
2930 		grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
2931 
2932 		tw32_wait_f(GRC_LOCAL_CTRL,
2933 			    tp->grc_local_ctrl | grc_local_ctrl,
2934 			    TG3_GRC_LCLCTL_PWRSW_DELAY);
2935 
2936 		if (!no_gpio2) {
2937 			grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
2938 			tw32_wait_f(GRC_LOCAL_CTRL,
2939 				    tp->grc_local_ctrl | grc_local_ctrl,
2940 				    TG3_GRC_LCLCTL_PWRSW_DELAY);
2941 		}
2942 	}
2943 }
2944 
2945 static void tg3_frob_aux_power_5717(struct tg3 *tp, bool wol_enable)
2946 {
2947 	u32 msg = 0;
2948 
2949 	/* Serialize power state transitions */
2950 	if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2951 		return;
2952 
2953 	if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE) || wol_enable)
2954 		msg = TG3_GPIO_MSG_NEED_VAUX;
2955 
2956 	msg = tg3_set_function_status(tp, msg);
2957 
2958 	if (msg & TG3_GPIO_MSG_ALL_DRVR_PRES_MASK)
2959 		goto done;
2960 
2961 	if (msg & TG3_GPIO_MSG_ALL_NEED_VAUX_MASK)
2962 		tg3_pwrsrc_switch_to_vaux(tp);
2963 	else
2964 		tg3_pwrsrc_die_with_vmain(tp);
2965 
2966 done:
2967 	tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2968 }
2969 
2970 static void tg3_frob_aux_power(struct tg3 *tp, bool include_wol)
2971 {
2972 	bool need_vaux = false;
2973 
2974 	/* The GPIOs do something completely different on 57765. */
2975 	if (!tg3_flag(tp, IS_NIC) || tg3_flag(tp, 57765_CLASS))
2976 		return;
2977 
2978 	if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2979 	    tg3_asic_rev(tp) == ASIC_REV_5719 ||
2980 	    tg3_asic_rev(tp) == ASIC_REV_5720) {
2981 		tg3_frob_aux_power_5717(tp, include_wol ?
2982 					tg3_flag(tp, WOL_ENABLE) != 0 : 0);
2983 		return;
2984 	}
2985 
2986 	if (tp->pdev_peer && tp->pdev_peer != tp->pdev) {
2987 		struct net_device *dev_peer;
2988 
2989 		dev_peer = pci_get_drvdata(tp->pdev_peer);
2990 
2991 		/* remove_one() may have been run on the peer. */
2992 		if (dev_peer) {
2993 			struct tg3 *tp_peer = netdev_priv(dev_peer);
2994 
2995 			if (tg3_flag(tp_peer, INIT_COMPLETE))
2996 				return;
2997 
2998 			if ((include_wol && tg3_flag(tp_peer, WOL_ENABLE)) ||
2999 			    tg3_flag(tp_peer, ENABLE_ASF))
3000 				need_vaux = true;
3001 		}
3002 	}
3003 
3004 	if ((include_wol && tg3_flag(tp, WOL_ENABLE)) ||
3005 	    tg3_flag(tp, ENABLE_ASF))
3006 		need_vaux = true;
3007 
3008 	if (need_vaux)
3009 		tg3_pwrsrc_switch_to_vaux(tp);
3010 	else
3011 		tg3_pwrsrc_die_with_vmain(tp);
3012 }
3013 
3014 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
3015 {
3016 	if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
3017 		return 1;
3018 	else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) {
3019 		if (speed != SPEED_10)
3020 			return 1;
3021 	} else if (speed == SPEED_10)
3022 		return 1;
3023 
3024 	return 0;
3025 }
3026 
3027 static bool tg3_phy_power_bug(struct tg3 *tp)
3028 {
3029 	switch (tg3_asic_rev(tp)) {
3030 	case ASIC_REV_5700:
3031 	case ASIC_REV_5704:
3032 		return true;
3033 	case ASIC_REV_5780:
3034 		if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
3035 			return true;
3036 		return false;
3037 	case ASIC_REV_5717:
3038 		if (!tp->pci_fn)
3039 			return true;
3040 		return false;
3041 	case ASIC_REV_5719:
3042 	case ASIC_REV_5720:
3043 		if ((tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
3044 		    !tp->pci_fn)
3045 			return true;
3046 		return false;
3047 	}
3048 
3049 	return false;
3050 }
3051 
3052 static bool tg3_phy_led_bug(struct tg3 *tp)
3053 {
3054 	switch (tg3_asic_rev(tp)) {
3055 	case ASIC_REV_5719:
3056 	case ASIC_REV_5720:
3057 		if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
3058 		    !tp->pci_fn)
3059 			return true;
3060 		return false;
3061 	}
3062 
3063 	return false;
3064 }
3065 
3066 static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
3067 {
3068 	u32 val;
3069 
3070 	if (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)
3071 		return;
3072 
3073 	if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
3074 		if (tg3_asic_rev(tp) == ASIC_REV_5704) {
3075 			u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
3076 			u32 serdes_cfg = tr32(MAC_SERDES_CFG);
3077 
3078 			sg_dig_ctrl |=
3079 				SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
3080 			tw32(SG_DIG_CTRL, sg_dig_ctrl);
3081 			tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
3082 		}
3083 		return;
3084 	}
3085 
3086 	if (tg3_asic_rev(tp) == ASIC_REV_5906) {
3087 		tg3_bmcr_reset(tp);
3088 		val = tr32(GRC_MISC_CFG);
3089 		tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
3090 		udelay(40);
3091 		return;
3092 	} else if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
3093 		u32 phytest;
3094 		if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
3095 			u32 phy;
3096 
3097 			tg3_writephy(tp, MII_ADVERTISE, 0);
3098 			tg3_writephy(tp, MII_BMCR,
3099 				     BMCR_ANENABLE | BMCR_ANRESTART);
3100 
3101 			tg3_writephy(tp, MII_TG3_FET_TEST,
3102 				     phytest | MII_TG3_FET_SHADOW_EN);
3103 			if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) {
3104 				phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD;
3105 				tg3_writephy(tp,
3106 					     MII_TG3_FET_SHDW_AUXMODE4,
3107 					     phy);
3108 			}
3109 			tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
3110 		}
3111 		return;
3112 	} else if (do_low_power) {
3113 		if (!tg3_phy_led_bug(tp))
3114 			tg3_writephy(tp, MII_TG3_EXT_CTRL,
3115 				     MII_TG3_EXT_CTRL_FORCE_LED_OFF);
3116 
3117 		val = MII_TG3_AUXCTL_PCTL_100TX_LPWR |
3118 		      MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
3119 		      MII_TG3_AUXCTL_PCTL_VREG_11V;
3120 		tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, val);
3121 	}
3122 
3123 	/* The PHY should not be powered down on some chips because
3124 	 * of bugs.
3125 	 */
3126 	if (tg3_phy_power_bug(tp))
3127 		return;
3128 
3129 	if (tg3_chip_rev(tp) == CHIPREV_5784_AX ||
3130 	    tg3_chip_rev(tp) == CHIPREV_5761_AX) {
3131 		val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
3132 		val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
3133 		val |= CPMU_LSPD_1000MB_MACCLK_12_5;
3134 		tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
3135 	}
3136 
3137 	tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
3138 }
3139 
3140 /* tp->lock is held. */
3141 static int tg3_nvram_lock(struct tg3 *tp)
3142 {
3143 	if (tg3_flag(tp, NVRAM)) {
3144 		int i;
3145 
3146 		if (tp->nvram_lock_cnt == 0) {
3147 			tw32(NVRAM_SWARB, SWARB_REQ_SET1);
3148 			for (i = 0; i < 8000; i++) {
3149 				if (tr32(NVRAM_SWARB) & SWARB_GNT1)
3150 					break;
3151 				udelay(20);
3152 			}
3153 			if (i == 8000) {
3154 				tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
3155 				return -ENODEV;
3156 			}
3157 		}
3158 		tp->nvram_lock_cnt++;
3159 	}
3160 	return 0;
3161 }
3162 
3163 /* tp->lock is held. */
3164 static void tg3_nvram_unlock(struct tg3 *tp)
3165 {
3166 	if (tg3_flag(tp, NVRAM)) {
3167 		if (tp->nvram_lock_cnt > 0)
3168 			tp->nvram_lock_cnt--;
3169 		if (tp->nvram_lock_cnt == 0)
3170 			tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
3171 	}
3172 }
3173 
3174 /* tp->lock is held. */
3175 static void tg3_enable_nvram_access(struct tg3 *tp)
3176 {
3177 	if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
3178 		u32 nvaccess = tr32(NVRAM_ACCESS);
3179 
3180 		tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
3181 	}
3182 }
3183 
3184 /* tp->lock is held. */
3185 static void tg3_disable_nvram_access(struct tg3 *tp)
3186 {
3187 	if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
3188 		u32 nvaccess = tr32(NVRAM_ACCESS);
3189 
3190 		tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
3191 	}
3192 }
3193 
3194 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
3195 					u32 offset, u32 *val)
3196 {
3197 	u32 tmp;
3198 	int i;
3199 
3200 	if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
3201 		return -EINVAL;
3202 
3203 	tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
3204 					EEPROM_ADDR_DEVID_MASK |
3205 					EEPROM_ADDR_READ);
3206 	tw32(GRC_EEPROM_ADDR,
3207 	     tmp |
3208 	     (0 << EEPROM_ADDR_DEVID_SHIFT) |
3209 	     ((offset << EEPROM_ADDR_ADDR_SHIFT) &
3210 	      EEPROM_ADDR_ADDR_MASK) |
3211 	     EEPROM_ADDR_READ | EEPROM_ADDR_START);
3212 
3213 	for (i = 0; i < 1000; i++) {
3214 		tmp = tr32(GRC_EEPROM_ADDR);
3215 
3216 		if (tmp & EEPROM_ADDR_COMPLETE)
3217 			break;
3218 		msleep(1);
3219 	}
3220 	if (!(tmp & EEPROM_ADDR_COMPLETE))
3221 		return -EBUSY;
3222 
3223 	tmp = tr32(GRC_EEPROM_DATA);
3224 
3225 	/*
3226 	 * The data will always be opposite the native endian
3227 	 * format.  Perform a blind byteswap to compensate.
3228 	 */
3229 	*val = swab32(tmp);
3230 
3231 	return 0;
3232 }
3233 
3234 #define NVRAM_CMD_TIMEOUT 10000
3235 
3236 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
3237 {
3238 	int i;
3239 
3240 	tw32(NVRAM_CMD, nvram_cmd);
3241 	for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
3242 		usleep_range(10, 40);
3243 		if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
3244 			udelay(10);
3245 			break;
3246 		}
3247 	}
3248 
3249 	if (i == NVRAM_CMD_TIMEOUT)
3250 		return -EBUSY;
3251 
3252 	return 0;
3253 }
3254 
3255 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
3256 {
3257 	if (tg3_flag(tp, NVRAM) &&
3258 	    tg3_flag(tp, NVRAM_BUFFERED) &&
3259 	    tg3_flag(tp, FLASH) &&
3260 	    !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
3261 	    (tp->nvram_jedecnum == JEDEC_ATMEL))
3262 
3263 		addr = ((addr / tp->nvram_pagesize) <<
3264 			ATMEL_AT45DB0X1B_PAGE_POS) +
3265 		       (addr % tp->nvram_pagesize);
3266 
3267 	return addr;
3268 }
3269 
3270 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
3271 {
3272 	if (tg3_flag(tp, NVRAM) &&
3273 	    tg3_flag(tp, NVRAM_BUFFERED) &&
3274 	    tg3_flag(tp, FLASH) &&
3275 	    !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
3276 	    (tp->nvram_jedecnum == JEDEC_ATMEL))
3277 
3278 		addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
3279 			tp->nvram_pagesize) +
3280 		       (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
3281 
3282 	return addr;
3283 }
3284 
3285 /* NOTE: Data read in from NVRAM is byteswapped according to
3286  * the byteswapping settings for all other register accesses.
3287  * tg3 devices are BE devices, so on a BE machine, the data
3288  * returned will be exactly as it is seen in NVRAM.  On a LE
3289  * machine, the 32-bit value will be byteswapped.
3290  */
3291 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
3292 {
3293 	int ret;
3294 
3295 	if (!tg3_flag(tp, NVRAM))
3296 		return tg3_nvram_read_using_eeprom(tp, offset, val);
3297 
3298 	offset = tg3_nvram_phys_addr(tp, offset);
3299 
3300 	if (offset > NVRAM_ADDR_MSK)
3301 		return -EINVAL;
3302 
3303 	ret = tg3_nvram_lock(tp);
3304 	if (ret)
3305 		return ret;
3306 
3307 	tg3_enable_nvram_access(tp);
3308 
3309 	tw32(NVRAM_ADDR, offset);
3310 	ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
3311 		NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
3312 
3313 	if (ret == 0)
3314 		*val = tr32(NVRAM_RDDATA);
3315 
3316 	tg3_disable_nvram_access(tp);
3317 
3318 	tg3_nvram_unlock(tp);
3319 
3320 	return ret;
3321 }
3322 
3323 /* Ensures NVRAM data is in bytestream format. */
3324 static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val)
3325 {
3326 	u32 v;
3327 	int res = tg3_nvram_read(tp, offset, &v);
3328 	if (!res)
3329 		*val = cpu_to_be32(v);
3330 	return res;
3331 }
3332 
3333 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
3334 				    u32 offset, u32 len, u8 *buf)
3335 {
3336 	int i, j, rc = 0;
3337 	u32 val;
3338 
3339 	for (i = 0; i < len; i += 4) {
3340 		u32 addr;
3341 		__be32 data;
3342 
3343 		addr = offset + i;
3344 
3345 		memcpy(&data, buf + i, 4);
3346 
3347 		/*
3348 		 * The SEEPROM interface expects the data to always be opposite
3349 		 * the native endian format.  We accomplish this by reversing
3350 		 * all the operations that would have been performed on the
3351 		 * data from a call to tg3_nvram_read_be32().
3352 		 */
3353 		tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data)));
3354 
3355 		val = tr32(GRC_EEPROM_ADDR);
3356 		tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
3357 
3358 		val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
3359 			EEPROM_ADDR_READ);
3360 		tw32(GRC_EEPROM_ADDR, val |
3361 			(0 << EEPROM_ADDR_DEVID_SHIFT) |
3362 			(addr & EEPROM_ADDR_ADDR_MASK) |
3363 			EEPROM_ADDR_START |
3364 			EEPROM_ADDR_WRITE);
3365 
3366 		for (j = 0; j < 1000; j++) {
3367 			val = tr32(GRC_EEPROM_ADDR);
3368 
3369 			if (val & EEPROM_ADDR_COMPLETE)
3370 				break;
3371 			msleep(1);
3372 		}
3373 		if (!(val & EEPROM_ADDR_COMPLETE)) {
3374 			rc = -EBUSY;
3375 			break;
3376 		}
3377 	}
3378 
3379 	return rc;
3380 }
3381 
3382 /* offset and length are dword aligned */
3383 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
3384 		u8 *buf)
3385 {
3386 	int ret = 0;
3387 	u32 pagesize = tp->nvram_pagesize;
3388 	u32 pagemask = pagesize - 1;
3389 	u32 nvram_cmd;
3390 	u8 *tmp;
3391 
3392 	tmp = kmalloc(pagesize, GFP_KERNEL);
3393 	if (tmp == NULL)
3394 		return -ENOMEM;
3395 
3396 	while (len) {
3397 		int j;
3398 		u32 phy_addr, page_off, size;
3399 
3400 		phy_addr = offset & ~pagemask;
3401 
3402 		for (j = 0; j < pagesize; j += 4) {
3403 			ret = tg3_nvram_read_be32(tp, phy_addr + j,
3404 						  (__be32 *) (tmp + j));
3405 			if (ret)
3406 				break;
3407 		}
3408 		if (ret)
3409 			break;
3410 
3411 		page_off = offset & pagemask;
3412 		size = pagesize;
3413 		if (len < size)
3414 			size = len;
3415 
3416 		len -= size;
3417 
3418 		memcpy(tmp + page_off, buf, size);
3419 
3420 		offset = offset + (pagesize - page_off);
3421 
3422 		tg3_enable_nvram_access(tp);
3423 
3424 		/*
3425 		 * Before we can erase the flash page, we need
3426 		 * to issue a special "write enable" command.
3427 		 */
3428 		nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3429 
3430 		if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3431 			break;
3432 
3433 		/* Erase the target page */
3434 		tw32(NVRAM_ADDR, phy_addr);
3435 
3436 		nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
3437 			NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
3438 
3439 		if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3440 			break;
3441 
3442 		/* Issue another write enable to start the write. */
3443 		nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3444 
3445 		if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3446 			break;
3447 
3448 		for (j = 0; j < pagesize; j += 4) {
3449 			__be32 data;
3450 
3451 			data = *((__be32 *) (tmp + j));
3452 
3453 			tw32(NVRAM_WRDATA, be32_to_cpu(data));
3454 
3455 			tw32(NVRAM_ADDR, phy_addr + j);
3456 
3457 			nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
3458 				NVRAM_CMD_WR;
3459 
3460 			if (j == 0)
3461 				nvram_cmd |= NVRAM_CMD_FIRST;
3462 			else if (j == (pagesize - 4))
3463 				nvram_cmd |= NVRAM_CMD_LAST;
3464 
3465 			ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3466 			if (ret)
3467 				break;
3468 		}
3469 		if (ret)
3470 			break;
3471 	}
3472 
3473 	nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3474 	tg3_nvram_exec_cmd(tp, nvram_cmd);
3475 
3476 	kfree(tmp);
3477 
3478 	return ret;
3479 }
3480 
3481 /* offset and length are dword aligned */
3482 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
3483 		u8 *buf)
3484 {
3485 	int i, ret = 0;
3486 
3487 	for (i = 0; i < len; i += 4, offset += 4) {
3488 		u32 page_off, phy_addr, nvram_cmd;
3489 		__be32 data;
3490 
3491 		memcpy(&data, buf + i, 4);
3492 		tw32(NVRAM_WRDATA, be32_to_cpu(data));
3493 
3494 		page_off = offset % tp->nvram_pagesize;
3495 
3496 		phy_addr = tg3_nvram_phys_addr(tp, offset);
3497 
3498 		nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
3499 
3500 		if (page_off == 0 || i == 0)
3501 			nvram_cmd |= NVRAM_CMD_FIRST;
3502 		if (page_off == (tp->nvram_pagesize - 4))
3503 			nvram_cmd |= NVRAM_CMD_LAST;
3504 
3505 		if (i == (len - 4))
3506 			nvram_cmd |= NVRAM_CMD_LAST;
3507 
3508 		if ((nvram_cmd & NVRAM_CMD_FIRST) ||
3509 		    !tg3_flag(tp, FLASH) ||
3510 		    !tg3_flag(tp, 57765_PLUS))
3511 			tw32(NVRAM_ADDR, phy_addr);
3512 
3513 		if (tg3_asic_rev(tp) != ASIC_REV_5752 &&
3514 		    !tg3_flag(tp, 5755_PLUS) &&
3515 		    (tp->nvram_jedecnum == JEDEC_ST) &&
3516 		    (nvram_cmd & NVRAM_CMD_FIRST)) {
3517 			u32 cmd;
3518 
3519 			cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3520 			ret = tg3_nvram_exec_cmd(tp, cmd);
3521 			if (ret)
3522 				break;
3523 		}
3524 		if (!tg3_flag(tp, FLASH)) {
3525 			/* We always do complete word writes to eeprom. */
3526 			nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
3527 		}
3528 
3529 		ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3530 		if (ret)
3531 			break;
3532 	}
3533 	return ret;
3534 }
3535 
3536 /* offset and length are dword aligned */
3537 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
3538 {
3539 	int ret;
3540 
3541 	if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3542 		tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
3543 		       ~GRC_LCLCTRL_GPIO_OUTPUT1);
3544 		udelay(40);
3545 	}
3546 
3547 	if (!tg3_flag(tp, NVRAM)) {
3548 		ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
3549 	} else {
3550 		u32 grc_mode;
3551 
3552 		ret = tg3_nvram_lock(tp);
3553 		if (ret)
3554 			return ret;
3555 
3556 		tg3_enable_nvram_access(tp);
3557 		if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM))
3558 			tw32(NVRAM_WRITE1, 0x406);
3559 
3560 		grc_mode = tr32(GRC_MODE);
3561 		tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
3562 
3563 		if (tg3_flag(tp, NVRAM_BUFFERED) || !tg3_flag(tp, FLASH)) {
3564 			ret = tg3_nvram_write_block_buffered(tp, offset, len,
3565 				buf);
3566 		} else {
3567 			ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
3568 				buf);
3569 		}
3570 
3571 		grc_mode = tr32(GRC_MODE);
3572 		tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
3573 
3574 		tg3_disable_nvram_access(tp);
3575 		tg3_nvram_unlock(tp);
3576 	}
3577 
3578 	if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3579 		tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
3580 		udelay(40);
3581 	}
3582 
3583 	return ret;
3584 }
3585 
3586 #define RX_CPU_SCRATCH_BASE	0x30000
3587 #define RX_CPU_SCRATCH_SIZE	0x04000
3588 #define TX_CPU_SCRATCH_BASE	0x34000
3589 #define TX_CPU_SCRATCH_SIZE	0x04000
3590 
3591 /* tp->lock is held. */
3592 static int tg3_pause_cpu(struct tg3 *tp, u32 cpu_base)
3593 {
3594 	int i;
3595 	const int iters = 10000;
3596 
3597 	for (i = 0; i < iters; i++) {
3598 		tw32(cpu_base + CPU_STATE, 0xffffffff);
3599 		tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
3600 		if (tr32(cpu_base + CPU_MODE) & CPU_MODE_HALT)
3601 			break;
3602 		if (pci_channel_offline(tp->pdev))
3603 			return -EBUSY;
3604 	}
3605 
3606 	return (i == iters) ? -EBUSY : 0;
3607 }
3608 
3609 /* tp->lock is held. */
3610 static int tg3_rxcpu_pause(struct tg3 *tp)
3611 {
3612 	int rc = tg3_pause_cpu(tp, RX_CPU_BASE);
3613 
3614 	tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3615 	tw32_f(RX_CPU_BASE + CPU_MODE,  CPU_MODE_HALT);
3616 	udelay(10);
3617 
3618 	return rc;
3619 }
3620 
3621 /* tp->lock is held. */
3622 static int tg3_txcpu_pause(struct tg3 *tp)
3623 {
3624 	return tg3_pause_cpu(tp, TX_CPU_BASE);
3625 }
3626 
3627 /* tp->lock is held. */
3628 static void tg3_resume_cpu(struct tg3 *tp, u32 cpu_base)
3629 {
3630 	tw32(cpu_base + CPU_STATE, 0xffffffff);
3631 	tw32_f(cpu_base + CPU_MODE,  0x00000000);
3632 }
3633 
3634 /* tp->lock is held. */
3635 static void tg3_rxcpu_resume(struct tg3 *tp)
3636 {
3637 	tg3_resume_cpu(tp, RX_CPU_BASE);
3638 }
3639 
3640 /* tp->lock is held. */
3641 static int tg3_halt_cpu(struct tg3 *tp, u32 cpu_base)
3642 {
3643 	int rc;
3644 
3645 	BUG_ON(cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS));
3646 
3647 	if (tg3_asic_rev(tp) == ASIC_REV_5906) {
3648 		u32 val = tr32(GRC_VCPU_EXT_CTRL);
3649 
3650 		tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
3651 		return 0;
3652 	}
3653 	if (cpu_base == RX_CPU_BASE) {
3654 		rc = tg3_rxcpu_pause(tp);
3655 	} else {
3656 		/*
3657 		 * There is only an Rx CPU for the 5750 derivative in the
3658 		 * BCM4785.
3659 		 */
3660 		if (tg3_flag(tp, IS_SSB_CORE))
3661 			return 0;
3662 
3663 		rc = tg3_txcpu_pause(tp);
3664 	}
3665 
3666 	if (rc) {
3667 		netdev_err(tp->dev, "%s timed out, %s CPU\n",
3668 			   __func__, cpu_base == RX_CPU_BASE ? "RX" : "TX");
3669 		return -ENODEV;
3670 	}
3671 
3672 	/* Clear firmware's nvram arbitration. */
3673 	if (tg3_flag(tp, NVRAM))
3674 		tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
3675 	return 0;
3676 }
3677 
3678 static int tg3_fw_data_len(struct tg3 *tp,
3679 			   const struct tg3_firmware_hdr *fw_hdr)
3680 {
3681 	int fw_len;
3682 
3683 	/* Non fragmented firmware have one firmware header followed by a
3684 	 * contiguous chunk of data to be written. The length field in that
3685 	 * header is not the length of data to be written but the complete
3686 	 * length of the bss. The data length is determined based on
3687 	 * tp->fw->size minus headers.
3688 	 *
3689 	 * Fragmented firmware have a main header followed by multiple
3690 	 * fragments. Each fragment is identical to non fragmented firmware
3691 	 * with a firmware header followed by a contiguous chunk of data. In
3692 	 * the main header, the length field is unused and set to 0xffffffff.
3693 	 * In each fragment header the length is the entire size of that
3694 	 * fragment i.e. fragment data + header length. Data length is
3695 	 * therefore length field in the header minus TG3_FW_HDR_LEN.
3696 	 */
3697 	if (tp->fw_len == 0xffffffff)
3698 		fw_len = be32_to_cpu(fw_hdr->len);
3699 	else
3700 		fw_len = tp->fw->size;
3701 
3702 	return (fw_len - TG3_FW_HDR_LEN) / sizeof(u32);
3703 }
3704 
3705 /* tp->lock is held. */
3706 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base,
3707 				 u32 cpu_scratch_base, int cpu_scratch_size,
3708 				 const struct tg3_firmware_hdr *fw_hdr)
3709 {
3710 	int err, i;
3711 	void (*write_op)(struct tg3 *, u32, u32);
3712 	int total_len = tp->fw->size;
3713 
3714 	if (cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)) {
3715 		netdev_err(tp->dev,
3716 			   "%s: Trying to load TX cpu firmware which is 5705\n",
3717 			   __func__);
3718 		return -EINVAL;
3719 	}
3720 
3721 	if (tg3_flag(tp, 5705_PLUS) && tg3_asic_rev(tp) != ASIC_REV_57766)
3722 		write_op = tg3_write_mem;
3723 	else
3724 		write_op = tg3_write_indirect_reg32;
3725 
3726 	if (tg3_asic_rev(tp) != ASIC_REV_57766) {
3727 		/* It is possible that bootcode is still loading at this point.
3728 		 * Get the nvram lock first before halting the cpu.
3729 		 */
3730 		int lock_err = tg3_nvram_lock(tp);
3731 		err = tg3_halt_cpu(tp, cpu_base);
3732 		if (!lock_err)
3733 			tg3_nvram_unlock(tp);
3734 		if (err)
3735 			goto out;
3736 
3737 		for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
3738 			write_op(tp, cpu_scratch_base + i, 0);
3739 		tw32(cpu_base + CPU_STATE, 0xffffffff);
3740 		tw32(cpu_base + CPU_MODE,
3741 		     tr32(cpu_base + CPU_MODE) | CPU_MODE_HALT);
3742 	} else {
3743 		/* Subtract additional main header for fragmented firmware and
3744 		 * advance to the first fragment
3745 		 */
3746 		total_len -= TG3_FW_HDR_LEN;
3747 		fw_hdr++;
3748 	}
3749 
3750 	do {
3751 		u32 *fw_data = (u32 *)(fw_hdr + 1);
3752 		for (i = 0; i < tg3_fw_data_len(tp, fw_hdr); i++)
3753 			write_op(tp, cpu_scratch_base +
3754 				     (be32_to_cpu(fw_hdr->base_addr) & 0xffff) +
3755 				     (i * sizeof(u32)),
3756 				 be32_to_cpu(fw_data[i]));
3757 
3758 		total_len -= be32_to_cpu(fw_hdr->len);
3759 
3760 		/* Advance to next fragment */
3761 		fw_hdr = (struct tg3_firmware_hdr *)
3762 			 ((void *)fw_hdr + be32_to_cpu(fw_hdr->len));
3763 	} while (total_len > 0);
3764 
3765 	err = 0;
3766 
3767 out:
3768 	return err;
3769 }
3770 
3771 /* tp->lock is held. */
3772 static int tg3_pause_cpu_and_set_pc(struct tg3 *tp, u32 cpu_base, u32 pc)
3773 {
3774 	int i;
3775 	const int iters = 5;
3776 
3777 	tw32(cpu_base + CPU_STATE, 0xffffffff);
3778 	tw32_f(cpu_base + CPU_PC, pc);
3779 
3780 	for (i = 0; i < iters; i++) {
3781 		if (tr32(cpu_base + CPU_PC) == pc)
3782 			break;
3783 		tw32(cpu_base + CPU_STATE, 0xffffffff);
3784 		tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
3785 		tw32_f(cpu_base + CPU_PC, pc);
3786 		udelay(1000);
3787 	}
3788 
3789 	return (i == iters) ? -EBUSY : 0;
3790 }
3791 
3792 /* tp->lock is held. */
3793 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
3794 {
3795 	const struct tg3_firmware_hdr *fw_hdr;
3796 	int err;
3797 
3798 	fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3799 
3800 	/* Firmware blob starts with version numbers, followed by
3801 	   start address and length. We are setting complete length.
3802 	   length = end_address_of_bss - start_address_of_text.
3803 	   Remainder is the blob to be loaded contiguously
3804 	   from start address. */
3805 
3806 	err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
3807 				    RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
3808 				    fw_hdr);
3809 	if (err)
3810 		return err;
3811 
3812 	err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
3813 				    TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
3814 				    fw_hdr);
3815 	if (err)
3816 		return err;
3817 
3818 	/* Now startup only the RX cpu. */
3819 	err = tg3_pause_cpu_and_set_pc(tp, RX_CPU_BASE,
3820 				       be32_to_cpu(fw_hdr->base_addr));
3821 	if (err) {
3822 		netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x "
3823 			   "should be %08x\n", __func__,
3824 			   tr32(RX_CPU_BASE + CPU_PC),
3825 				be32_to_cpu(fw_hdr->base_addr));
3826 		return -ENODEV;
3827 	}
3828 
3829 	tg3_rxcpu_resume(tp);
3830 
3831 	return 0;
3832 }
3833 
3834 static int tg3_validate_rxcpu_state(struct tg3 *tp)
3835 {
3836 	const int iters = 1000;
3837 	int i;
3838 	u32 val;
3839 
3840 	/* Wait for boot code to complete initialization and enter service
3841 	 * loop. It is then safe to download service patches
3842 	 */
3843 	for (i = 0; i < iters; i++) {
3844 		if (tr32(RX_CPU_HWBKPT) == TG3_SBROM_IN_SERVICE_LOOP)
3845 			break;
3846 
3847 		udelay(10);
3848 	}
3849 
3850 	if (i == iters) {
3851 		netdev_err(tp->dev, "Boot code not ready for service patches\n");
3852 		return -EBUSY;
3853 	}
3854 
3855 	val = tg3_read_indirect_reg32(tp, TG3_57766_FW_HANDSHAKE);
3856 	if (val & 0xff) {
3857 		netdev_warn(tp->dev,
3858 			    "Other patches exist. Not downloading EEE patch\n");
3859 		return -EEXIST;
3860 	}
3861 
3862 	return 0;
3863 }
3864 
3865 /* tp->lock is held. */
3866 static void tg3_load_57766_firmware(struct tg3 *tp)
3867 {
3868 	struct tg3_firmware_hdr *fw_hdr;
3869 
3870 	if (!tg3_flag(tp, NO_NVRAM))
3871 		return;
3872 
3873 	if (tg3_validate_rxcpu_state(tp))
3874 		return;
3875 
3876 	if (!tp->fw)
3877 		return;
3878 
3879 	/* This firmware blob has a different format than older firmware
3880 	 * releases as given below. The main difference is we have fragmented
3881 	 * data to be written to non-contiguous locations.
3882 	 *
3883 	 * In the beginning we have a firmware header identical to other
3884 	 * firmware which consists of version, base addr and length. The length
3885 	 * here is unused and set to 0xffffffff.
3886 	 *
3887 	 * This is followed by a series of firmware fragments which are
3888 	 * individually identical to previous firmware. i.e. they have the
3889 	 * firmware header and followed by data for that fragment. The version
3890 	 * field of the individual fragment header is unused.
3891 	 */
3892 
3893 	fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3894 	if (be32_to_cpu(fw_hdr->base_addr) != TG3_57766_FW_BASE_ADDR)
3895 		return;
3896 
3897 	if (tg3_rxcpu_pause(tp))
3898 		return;
3899 
3900 	/* tg3_load_firmware_cpu() will always succeed for the 57766 */
3901 	tg3_load_firmware_cpu(tp, 0, TG3_57766_FW_BASE_ADDR, 0, fw_hdr);
3902 
3903 	tg3_rxcpu_resume(tp);
3904 }
3905 
3906 /* tp->lock is held. */
3907 static int tg3_load_tso_firmware(struct tg3 *tp)
3908 {
3909 	const struct tg3_firmware_hdr *fw_hdr;
3910 	unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
3911 	int err;
3912 
3913 	if (!tg3_flag(tp, FW_TSO))
3914 		return 0;
3915 
3916 	fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3917 
3918 	/* Firmware blob starts with version numbers, followed by
3919 	   start address and length. We are setting complete length.
3920 	   length = end_address_of_bss - start_address_of_text.
3921 	   Remainder is the blob to be loaded contiguously
3922 	   from start address. */
3923 
3924 	cpu_scratch_size = tp->fw_len;
3925 
3926 	if (tg3_asic_rev(tp) == ASIC_REV_5705) {
3927 		cpu_base = RX_CPU_BASE;
3928 		cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
3929 	} else {
3930 		cpu_base = TX_CPU_BASE;
3931 		cpu_scratch_base = TX_CPU_SCRATCH_BASE;
3932 		cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
3933 	}
3934 
3935 	err = tg3_load_firmware_cpu(tp, cpu_base,
3936 				    cpu_scratch_base, cpu_scratch_size,
3937 				    fw_hdr);
3938 	if (err)
3939 		return err;
3940 
3941 	/* Now startup the cpu. */
3942 	err = tg3_pause_cpu_and_set_pc(tp, cpu_base,
3943 				       be32_to_cpu(fw_hdr->base_addr));
3944 	if (err) {
3945 		netdev_err(tp->dev,
3946 			   "%s fails to set CPU PC, is %08x should be %08x\n",
3947 			   __func__, tr32(cpu_base + CPU_PC),
3948 			   be32_to_cpu(fw_hdr->base_addr));
3949 		return -ENODEV;
3950 	}
3951 
3952 	tg3_resume_cpu(tp, cpu_base);
3953 	return 0;
3954 }
3955 
3956 /* tp->lock is held. */
3957 static void __tg3_set_one_mac_addr(struct tg3 *tp, u8 *mac_addr, int index)
3958 {
3959 	u32 addr_high, addr_low;
3960 
3961 	addr_high = ((mac_addr[0] << 8) | mac_addr[1]);
3962 	addr_low = ((mac_addr[2] << 24) | (mac_addr[3] << 16) |
3963 		    (mac_addr[4] <<  8) | mac_addr[5]);
3964 
3965 	if (index < 4) {
3966 		tw32(MAC_ADDR_0_HIGH + (index * 8), addr_high);
3967 		tw32(MAC_ADDR_0_LOW + (index * 8), addr_low);
3968 	} else {
3969 		index -= 4;
3970 		tw32(MAC_EXTADDR_0_HIGH + (index * 8), addr_high);
3971 		tw32(MAC_EXTADDR_0_LOW + (index * 8), addr_low);
3972 	}
3973 }
3974 
3975 /* tp->lock is held. */
3976 static void __tg3_set_mac_addr(struct tg3 *tp, bool skip_mac_1)
3977 {
3978 	u32 addr_high;
3979 	int i;
3980 
3981 	for (i = 0; i < 4; i++) {
3982 		if (i == 1 && skip_mac_1)
3983 			continue;
3984 		__tg3_set_one_mac_addr(tp, tp->dev->dev_addr, i);
3985 	}
3986 
3987 	if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
3988 	    tg3_asic_rev(tp) == ASIC_REV_5704) {
3989 		for (i = 4; i < 16; i++)
3990 			__tg3_set_one_mac_addr(tp, tp->dev->dev_addr, i);
3991 	}
3992 
3993 	addr_high = (tp->dev->dev_addr[0] +
3994 		     tp->dev->dev_addr[1] +
3995 		     tp->dev->dev_addr[2] +
3996 		     tp->dev->dev_addr[3] +
3997 		     tp->dev->dev_addr[4] +
3998 		     tp->dev->dev_addr[5]) &
3999 		TX_BACKOFF_SEED_MASK;
4000 	tw32(MAC_TX_BACKOFF_SEED, addr_high);
4001 }
4002 
4003 static void tg3_enable_register_access(struct tg3 *tp)
4004 {
4005 	/*
4006 	 * Make sure register accesses (indirect or otherwise) will function
4007 	 * correctly.
4008 	 */
4009 	pci_write_config_dword(tp->pdev,
4010 			       TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl);
4011 }
4012 
4013 static int tg3_power_up(struct tg3 *tp)
4014 {
4015 	int err;
4016 
4017 	tg3_enable_register_access(tp);
4018 
4019 	err = pci_set_power_state(tp->pdev, PCI_D0);
4020 	if (!err) {
4021 		/* Switch out of Vaux if it is a NIC */
4022 		tg3_pwrsrc_switch_to_vmain(tp);
4023 	} else {
4024 		netdev_err(tp->dev, "Transition to D0 failed\n");
4025 	}
4026 
4027 	return err;
4028 }
4029 
4030 static int tg3_setup_phy(struct tg3 *, bool);
4031 
4032 static int tg3_power_down_prepare(struct tg3 *tp)
4033 {
4034 	u32 misc_host_ctrl;
4035 	bool device_should_wake, do_low_power;
4036 
4037 	tg3_enable_register_access(tp);
4038 
4039 	/* Restore the CLKREQ setting. */
4040 	if (tg3_flag(tp, CLKREQ_BUG))
4041 		pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
4042 					 PCI_EXP_LNKCTL_CLKREQ_EN);
4043 
4044 	misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
4045 	tw32(TG3PCI_MISC_HOST_CTRL,
4046 	     misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
4047 
4048 	device_should_wake = device_may_wakeup(&tp->pdev->dev) &&
4049 			     tg3_flag(tp, WOL_ENABLE);
4050 
4051 	if (tg3_flag(tp, USE_PHYLIB)) {
4052 		do_low_power = false;
4053 		if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) &&
4054 		    !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4055 			__ETHTOOL_DECLARE_LINK_MODE_MASK(advertising) = { 0, };
4056 			struct phy_device *phydev;
4057 			u32 phyid;
4058 
4059 			phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
4060 
4061 			tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
4062 
4063 			tp->link_config.speed = phydev->speed;
4064 			tp->link_config.duplex = phydev->duplex;
4065 			tp->link_config.autoneg = phydev->autoneg;
4066 			ethtool_convert_link_mode_to_legacy_u32(
4067 				&tp->link_config.advertising,
4068 				phydev->advertising);
4069 
4070 			linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, advertising);
4071 			linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT,
4072 					 advertising);
4073 			linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
4074 					 advertising);
4075 			linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT,
4076 					 advertising);
4077 
4078 			if (tg3_flag(tp, ENABLE_ASF) || device_should_wake) {
4079 				if (tg3_flag(tp, WOL_SPEED_100MB)) {
4080 					linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT,
4081 							 advertising);
4082 					linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
4083 							 advertising);
4084 					linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT,
4085 							 advertising);
4086 				} else {
4087 					linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT,
4088 							 advertising);
4089 				}
4090 			}
4091 
4092 			linkmode_copy(phydev->advertising, advertising);
4093 			phy_start_aneg(phydev);
4094 
4095 			phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
4096 			if (phyid != PHY_ID_BCMAC131) {
4097 				phyid &= PHY_BCM_OUI_MASK;
4098 				if (phyid == PHY_BCM_OUI_1 ||
4099 				    phyid == PHY_BCM_OUI_2 ||
4100 				    phyid == PHY_BCM_OUI_3)
4101 					do_low_power = true;
4102 			}
4103 		}
4104 	} else {
4105 		do_low_power = true;
4106 
4107 		if (!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER))
4108 			tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
4109 
4110 		if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
4111 			tg3_setup_phy(tp, false);
4112 	}
4113 
4114 	if (tg3_asic_rev(tp) == ASIC_REV_5906) {
4115 		u32 val;
4116 
4117 		val = tr32(GRC_VCPU_EXT_CTRL);
4118 		tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
4119 	} else if (!tg3_flag(tp, ENABLE_ASF)) {
4120 		int i;
4121 		u32 val;
4122 
4123 		for (i = 0; i < 200; i++) {
4124 			tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
4125 			if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
4126 				break;
4127 			msleep(1);
4128 		}
4129 	}
4130 	if (tg3_flag(tp, WOL_CAP))
4131 		tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
4132 						     WOL_DRV_STATE_SHUTDOWN |
4133 						     WOL_DRV_WOL |
4134 						     WOL_SET_MAGIC_PKT);
4135 
4136 	if (device_should_wake) {
4137 		u32 mac_mode;
4138 
4139 		if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
4140 			if (do_low_power &&
4141 			    !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
4142 				tg3_phy_auxctl_write(tp,
4143 					       MII_TG3_AUXCTL_SHDWSEL_PWRCTL,
4144 					       MII_TG3_AUXCTL_PCTL_WOL_EN |
4145 					       MII_TG3_AUXCTL_PCTL_100TX_LPWR |
4146 					       MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC);
4147 				udelay(40);
4148 			}
4149 
4150 			if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4151 				mac_mode = MAC_MODE_PORT_MODE_GMII;
4152 			else if (tp->phy_flags &
4153 				 TG3_PHYFLG_KEEP_LINK_ON_PWRDN) {
4154 				if (tp->link_config.active_speed == SPEED_1000)
4155 					mac_mode = MAC_MODE_PORT_MODE_GMII;
4156 				else
4157 					mac_mode = MAC_MODE_PORT_MODE_MII;
4158 			} else
4159 				mac_mode = MAC_MODE_PORT_MODE_MII;
4160 
4161 			mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
4162 			if (tg3_asic_rev(tp) == ASIC_REV_5700) {
4163 				u32 speed = tg3_flag(tp, WOL_SPEED_100MB) ?
4164 					     SPEED_100 : SPEED_10;
4165 				if (tg3_5700_link_polarity(tp, speed))
4166 					mac_mode |= MAC_MODE_LINK_POLARITY;
4167 				else
4168 					mac_mode &= ~MAC_MODE_LINK_POLARITY;
4169 			}
4170 		} else {
4171 			mac_mode = MAC_MODE_PORT_MODE_TBI;
4172 		}
4173 
4174 		if (!tg3_flag(tp, 5750_PLUS))
4175 			tw32(MAC_LED_CTRL, tp->led_ctrl);
4176 
4177 		mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
4178 		if ((tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) &&
4179 		    (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)))
4180 			mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
4181 
4182 		if (tg3_flag(tp, ENABLE_APE))
4183 			mac_mode |= MAC_MODE_APE_TX_EN |
4184 				    MAC_MODE_APE_RX_EN |
4185 				    MAC_MODE_TDE_ENABLE;
4186 
4187 		tw32_f(MAC_MODE, mac_mode);
4188 		udelay(100);
4189 
4190 		tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
4191 		udelay(10);
4192 	}
4193 
4194 	if (!tg3_flag(tp, WOL_SPEED_100MB) &&
4195 	    (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4196 	     tg3_asic_rev(tp) == ASIC_REV_5701)) {
4197 		u32 base_val;
4198 
4199 		base_val = tp->pci_clock_ctrl;
4200 		base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
4201 			     CLOCK_CTRL_TXCLK_DISABLE);
4202 
4203 		tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
4204 			    CLOCK_CTRL_PWRDOWN_PLL133, 40);
4205 	} else if (tg3_flag(tp, 5780_CLASS) ||
4206 		   tg3_flag(tp, CPMU_PRESENT) ||
4207 		   tg3_asic_rev(tp) == ASIC_REV_5906) {
4208 		/* do nothing */
4209 	} else if (!(tg3_flag(tp, 5750_PLUS) && tg3_flag(tp, ENABLE_ASF))) {
4210 		u32 newbits1, newbits2;
4211 
4212 		if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4213 		    tg3_asic_rev(tp) == ASIC_REV_5701) {
4214 			newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
4215 				    CLOCK_CTRL_TXCLK_DISABLE |
4216 				    CLOCK_CTRL_ALTCLK);
4217 			newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
4218 		} else if (tg3_flag(tp, 5705_PLUS)) {
4219 			newbits1 = CLOCK_CTRL_625_CORE;
4220 			newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
4221 		} else {
4222 			newbits1 = CLOCK_CTRL_ALTCLK;
4223 			newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
4224 		}
4225 
4226 		tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
4227 			    40);
4228 
4229 		tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
4230 			    40);
4231 
4232 		if (!tg3_flag(tp, 5705_PLUS)) {
4233 			u32 newbits3;
4234 
4235 			if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4236 			    tg3_asic_rev(tp) == ASIC_REV_5701) {
4237 				newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
4238 					    CLOCK_CTRL_TXCLK_DISABLE |
4239 					    CLOCK_CTRL_44MHZ_CORE);
4240 			} else {
4241 				newbits3 = CLOCK_CTRL_44MHZ_CORE;
4242 			}
4243 
4244 			tw32_wait_f(TG3PCI_CLOCK_CTRL,
4245 				    tp->pci_clock_ctrl | newbits3, 40);
4246 		}
4247 	}
4248 
4249 	if (!(device_should_wake) && !tg3_flag(tp, ENABLE_ASF))
4250 		tg3_power_down_phy(tp, do_low_power);
4251 
4252 	tg3_frob_aux_power(tp, true);
4253 
4254 	/* Workaround for unstable PLL clock */
4255 	if ((!tg3_flag(tp, IS_SSB_CORE)) &&
4256 	    ((tg3_chip_rev(tp) == CHIPREV_5750_AX) ||
4257 	     (tg3_chip_rev(tp) == CHIPREV_5750_BX))) {
4258 		u32 val = tr32(0x7d00);
4259 
4260 		val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
4261 		tw32(0x7d00, val);
4262 		if (!tg3_flag(tp, ENABLE_ASF)) {
4263 			int err;
4264 
4265 			err = tg3_nvram_lock(tp);
4266 			tg3_halt_cpu(tp, RX_CPU_BASE);
4267 			if (!err)
4268 				tg3_nvram_unlock(tp);
4269 		}
4270 	}
4271 
4272 	tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
4273 
4274 	tg3_ape_driver_state_change(tp, RESET_KIND_SHUTDOWN);
4275 
4276 	return 0;
4277 }
4278 
4279 static void tg3_power_down(struct tg3 *tp)
4280 {
4281 	pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE));
4282 	pci_set_power_state(tp->pdev, PCI_D3hot);
4283 }
4284 
4285 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u32 *speed, u8 *duplex)
4286 {
4287 	switch (val & MII_TG3_AUX_STAT_SPDMASK) {
4288 	case MII_TG3_AUX_STAT_10HALF:
4289 		*speed = SPEED_10;
4290 		*duplex = DUPLEX_HALF;
4291 		break;
4292 
4293 	case MII_TG3_AUX_STAT_10FULL:
4294 		*speed = SPEED_10;
4295 		*duplex = DUPLEX_FULL;
4296 		break;
4297 
4298 	case MII_TG3_AUX_STAT_100HALF:
4299 		*speed = SPEED_100;
4300 		*duplex = DUPLEX_HALF;
4301 		break;
4302 
4303 	case MII_TG3_AUX_STAT_100FULL:
4304 		*speed = SPEED_100;
4305 		*duplex = DUPLEX_FULL;
4306 		break;
4307 
4308 	case MII_TG3_AUX_STAT_1000HALF:
4309 		*speed = SPEED_1000;
4310 		*duplex = DUPLEX_HALF;
4311 		break;
4312 
4313 	case MII_TG3_AUX_STAT_1000FULL:
4314 		*speed = SPEED_1000;
4315 		*duplex = DUPLEX_FULL;
4316 		break;
4317 
4318 	default:
4319 		if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4320 			*speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
4321 				 SPEED_10;
4322 			*duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
4323 				  DUPLEX_HALF;
4324 			break;
4325 		}
4326 		*speed = SPEED_UNKNOWN;
4327 		*duplex = DUPLEX_UNKNOWN;
4328 		break;
4329 	}
4330 }
4331 
4332 static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
4333 {
4334 	int err = 0;
4335 	u32 val, new_adv;
4336 
4337 	new_adv = ADVERTISE_CSMA;
4338 	new_adv |= ethtool_adv_to_mii_adv_t(advertise) & ADVERTISE_ALL;
4339 	new_adv |= mii_advertise_flowctrl(flowctrl);
4340 
4341 	err = tg3_writephy(tp, MII_ADVERTISE, new_adv);
4342 	if (err)
4343 		goto done;
4344 
4345 	if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4346 		new_adv = ethtool_adv_to_mii_ctrl1000_t(advertise);
4347 
4348 		if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4349 		    tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0)
4350 			new_adv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4351 
4352 		err = tg3_writephy(tp, MII_CTRL1000, new_adv);
4353 		if (err)
4354 			goto done;
4355 	}
4356 
4357 	if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
4358 		goto done;
4359 
4360 	tw32(TG3_CPMU_EEE_MODE,
4361 	     tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE);
4362 
4363 	err = tg3_phy_toggle_auxctl_smdsp(tp, true);
4364 	if (!err) {
4365 		u32 err2;
4366 
4367 		val = 0;
4368 		/* Advertise 100-BaseTX EEE ability */
4369 		if (advertise & ADVERTISED_100baseT_Full)
4370 			val |= MDIO_AN_EEE_ADV_100TX;
4371 		/* Advertise 1000-BaseT EEE ability */
4372 		if (advertise & ADVERTISED_1000baseT_Full)
4373 			val |= MDIO_AN_EEE_ADV_1000T;
4374 
4375 		if (!tp->eee.eee_enabled) {
4376 			val = 0;
4377 			tp->eee.advertised = 0;
4378 		} else {
4379 			tp->eee.advertised = advertise &
4380 					     (ADVERTISED_100baseT_Full |
4381 					      ADVERTISED_1000baseT_Full);
4382 		}
4383 
4384 		err = tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val);
4385 		if (err)
4386 			val = 0;
4387 
4388 		switch (tg3_asic_rev(tp)) {
4389 		case ASIC_REV_5717:
4390 		case ASIC_REV_57765:
4391 		case ASIC_REV_57766:
4392 		case ASIC_REV_5719:
4393 			/* If we advertised any eee advertisements above... */
4394 			if (val)
4395 				val = MII_TG3_DSP_TAP26_ALNOKO |
4396 				      MII_TG3_DSP_TAP26_RMRXSTO |
4397 				      MII_TG3_DSP_TAP26_OPCSINPT;
4398 			tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
4399 			/* Fall through */
4400 		case ASIC_REV_5720:
4401 		case ASIC_REV_5762:
4402 			if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val))
4403 				tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val |
4404 						 MII_TG3_DSP_CH34TP2_HIBW01);
4405 		}
4406 
4407 		err2 = tg3_phy_toggle_auxctl_smdsp(tp, false);
4408 		if (!err)
4409 			err = err2;
4410 	}
4411 
4412 done:
4413 	return err;
4414 }
4415 
4416 static void tg3_phy_copper_begin(struct tg3 *tp)
4417 {
4418 	if (tp->link_config.autoneg == AUTONEG_ENABLE ||
4419 	    (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4420 		u32 adv, fc;
4421 
4422 		if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
4423 		    !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)) {
4424 			adv = ADVERTISED_10baseT_Half |
4425 			      ADVERTISED_10baseT_Full;
4426 			if (tg3_flag(tp, WOL_SPEED_100MB))
4427 				adv |= ADVERTISED_100baseT_Half |
4428 				       ADVERTISED_100baseT_Full;
4429 			if (tp->phy_flags & TG3_PHYFLG_1G_ON_VAUX_OK) {
4430 				if (!(tp->phy_flags &
4431 				      TG3_PHYFLG_DISABLE_1G_HD_ADV))
4432 					adv |= ADVERTISED_1000baseT_Half;
4433 				adv |= ADVERTISED_1000baseT_Full;
4434 			}
4435 
4436 			fc = FLOW_CTRL_TX | FLOW_CTRL_RX;
4437 		} else {
4438 			adv = tp->link_config.advertising;
4439 			if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
4440 				adv &= ~(ADVERTISED_1000baseT_Half |
4441 					 ADVERTISED_1000baseT_Full);
4442 
4443 			fc = tp->link_config.flowctrl;
4444 		}
4445 
4446 		tg3_phy_autoneg_cfg(tp, adv, fc);
4447 
4448 		if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
4449 		    (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)) {
4450 			/* Normally during power down we want to autonegotiate
4451 			 * the lowest possible speed for WOL. However, to avoid
4452 			 * link flap, we leave it untouched.
4453 			 */
4454 			return;
4455 		}
4456 
4457 		tg3_writephy(tp, MII_BMCR,
4458 			     BMCR_ANENABLE | BMCR_ANRESTART);
4459 	} else {
4460 		int i;
4461 		u32 bmcr, orig_bmcr;
4462 
4463 		tp->link_config.active_speed = tp->link_config.speed;
4464 		tp->link_config.active_duplex = tp->link_config.duplex;
4465 
4466 		if (tg3_asic_rev(tp) == ASIC_REV_5714) {
4467 			/* With autoneg disabled, 5715 only links up when the
4468 			 * advertisement register has the configured speed
4469 			 * enabled.
4470 			 */
4471 			tg3_writephy(tp, MII_ADVERTISE, ADVERTISE_ALL);
4472 		}
4473 
4474 		bmcr = 0;
4475 		switch (tp->link_config.speed) {
4476 		default:
4477 		case SPEED_10:
4478 			break;
4479 
4480 		case SPEED_100:
4481 			bmcr |= BMCR_SPEED100;
4482 			break;
4483 
4484 		case SPEED_1000:
4485 			bmcr |= BMCR_SPEED1000;
4486 			break;
4487 		}
4488 
4489 		if (tp->link_config.duplex == DUPLEX_FULL)
4490 			bmcr |= BMCR_FULLDPLX;
4491 
4492 		if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
4493 		    (bmcr != orig_bmcr)) {
4494 			tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
4495 			for (i = 0; i < 1500; i++) {
4496 				u32 tmp;
4497 
4498 				udelay(10);
4499 				if (tg3_readphy(tp, MII_BMSR, &tmp) ||
4500 				    tg3_readphy(tp, MII_BMSR, &tmp))
4501 					continue;
4502 				if (!(tmp & BMSR_LSTATUS)) {
4503 					udelay(40);
4504 					break;
4505 				}
4506 			}
4507 			tg3_writephy(tp, MII_BMCR, bmcr);
4508 			udelay(40);
4509 		}
4510 	}
4511 }
4512 
4513 static int tg3_phy_pull_config(struct tg3 *tp)
4514 {
4515 	int err;
4516 	u32 val;
4517 
4518 	err = tg3_readphy(tp, MII_BMCR, &val);
4519 	if (err)
4520 		goto done;
4521 
4522 	if (!(val & BMCR_ANENABLE)) {
4523 		tp->link_config.autoneg = AUTONEG_DISABLE;
4524 		tp->link_config.advertising = 0;
4525 		tg3_flag_clear(tp, PAUSE_AUTONEG);
4526 
4527 		err = -EIO;
4528 
4529 		switch (val & (BMCR_SPEED1000 | BMCR_SPEED100)) {
4530 		case 0:
4531 			if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
4532 				goto done;
4533 
4534 			tp->link_config.speed = SPEED_10;
4535 			break;
4536 		case BMCR_SPEED100:
4537 			if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
4538 				goto done;
4539 
4540 			tp->link_config.speed = SPEED_100;
4541 			break;
4542 		case BMCR_SPEED1000:
4543 			if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4544 				tp->link_config.speed = SPEED_1000;
4545 				break;
4546 			}
4547 			/* Fall through */
4548 		default:
4549 			goto done;
4550 		}
4551 
4552 		if (val & BMCR_FULLDPLX)
4553 			tp->link_config.duplex = DUPLEX_FULL;
4554 		else
4555 			tp->link_config.duplex = DUPLEX_HALF;
4556 
4557 		tp->link_config.flowctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
4558 
4559 		err = 0;
4560 		goto done;
4561 	}
4562 
4563 	tp->link_config.autoneg = AUTONEG_ENABLE;
4564 	tp->link_config.advertising = ADVERTISED_Autoneg;
4565 	tg3_flag_set(tp, PAUSE_AUTONEG);
4566 
4567 	if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
4568 		u32 adv;
4569 
4570 		err = tg3_readphy(tp, MII_ADVERTISE, &val);
4571 		if (err)
4572 			goto done;
4573 
4574 		adv = mii_adv_to_ethtool_adv_t(val & ADVERTISE_ALL);
4575 		tp->link_config.advertising |= adv | ADVERTISED_TP;
4576 
4577 		tp->link_config.flowctrl = tg3_decode_flowctrl_1000T(val);
4578 	} else {
4579 		tp->link_config.advertising |= ADVERTISED_FIBRE;
4580 	}
4581 
4582 	if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4583 		u32 adv;
4584 
4585 		if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
4586 			err = tg3_readphy(tp, MII_CTRL1000, &val);
4587 			if (err)
4588 				goto done;
4589 
4590 			adv = mii_ctrl1000_to_ethtool_adv_t(val);
4591 		} else {
4592 			err = tg3_readphy(tp, MII_ADVERTISE, &val);
4593 			if (err)
4594 				goto done;
4595 
4596 			adv = tg3_decode_flowctrl_1000X(val);
4597 			tp->link_config.flowctrl = adv;
4598 
4599 			val &= (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL);
4600 			adv = mii_adv_to_ethtool_adv_x(val);
4601 		}
4602 
4603 		tp->link_config.advertising |= adv;
4604 	}
4605 
4606 done:
4607 	return err;
4608 }
4609 
4610 static int tg3_init_5401phy_dsp(struct tg3 *tp)
4611 {
4612 	int err;
4613 
4614 	/* Turn off tap power management. */
4615 	/* Set Extended packet length bit */
4616 	err = tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
4617 
4618 	err |= tg3_phydsp_write(tp, 0x0012, 0x1804);
4619 	err |= tg3_phydsp_write(tp, 0x0013, 0x1204);
4620 	err |= tg3_phydsp_write(tp, 0x8006, 0x0132);
4621 	err |= tg3_phydsp_write(tp, 0x8006, 0x0232);
4622 	err |= tg3_phydsp_write(tp, 0x201f, 0x0a20);
4623 
4624 	udelay(40);
4625 
4626 	return err;
4627 }
4628 
4629 static bool tg3_phy_eee_config_ok(struct tg3 *tp)
4630 {
4631 	struct ethtool_eee eee;
4632 
4633 	if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
4634 		return true;
4635 
4636 	tg3_eee_pull_config(tp, &eee);
4637 
4638 	if (tp->eee.eee_enabled) {
4639 		if (tp->eee.advertised != eee.advertised ||
4640 		    tp->eee.tx_lpi_timer != eee.tx_lpi_timer ||
4641 		    tp->eee.tx_lpi_enabled != eee.tx_lpi_enabled)
4642 			return false;
4643 	} else {
4644 		/* EEE is disabled but we're advertising */
4645 		if (eee.advertised)
4646 			return false;
4647 	}
4648 
4649 	return true;
4650 }
4651 
4652 static bool tg3_phy_copper_an_config_ok(struct tg3 *tp, u32 *lcladv)
4653 {
4654 	u32 advmsk, tgtadv, advertising;
4655 
4656 	advertising = tp->link_config.advertising;
4657 	tgtadv = ethtool_adv_to_mii_adv_t(advertising) & ADVERTISE_ALL;
4658 
4659 	advmsk = ADVERTISE_ALL;
4660 	if (tp->link_config.active_duplex == DUPLEX_FULL) {
4661 		tgtadv |= mii_advertise_flowctrl(tp->link_config.flowctrl);
4662 		advmsk |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
4663 	}
4664 
4665 	if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
4666 		return false;
4667 
4668 	if ((*lcladv & advmsk) != tgtadv)
4669 		return false;
4670 
4671 	if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4672 		u32 tg3_ctrl;
4673 
4674 		tgtadv = ethtool_adv_to_mii_ctrl1000_t(advertising);
4675 
4676 		if (tg3_readphy(tp, MII_CTRL1000, &tg3_ctrl))
4677 			return false;
4678 
4679 		if (tgtadv &&
4680 		    (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4681 		     tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0)) {
4682 			tgtadv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4683 			tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL |
4684 				     CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
4685 		} else {
4686 			tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL);
4687 		}
4688 
4689 		if (tg3_ctrl != tgtadv)
4690 			return false;
4691 	}
4692 
4693 	return true;
4694 }
4695 
4696 static bool tg3_phy_copper_fetch_rmtadv(struct tg3 *tp, u32 *rmtadv)
4697 {
4698 	u32 lpeth = 0;
4699 
4700 	if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4701 		u32 val;
4702 
4703 		if (tg3_readphy(tp, MII_STAT1000, &val))
4704 			return false;
4705 
4706 		lpeth = mii_stat1000_to_ethtool_lpa_t(val);
4707 	}
4708 
4709 	if (tg3_readphy(tp, MII_LPA, rmtadv))
4710 		return false;
4711 
4712 	lpeth |= mii_lpa_to_ethtool_lpa_t(*rmtadv);
4713 	tp->link_config.rmt_adv = lpeth;
4714 
4715 	return true;
4716 }
4717 
4718 static bool tg3_test_and_report_link_chg(struct tg3 *tp, bool curr_link_up)
4719 {
4720 	if (curr_link_up != tp->link_up) {
4721 		if (curr_link_up) {
4722 			netif_carrier_on(tp->dev);
4723 		} else {
4724 			netif_carrier_off(tp->dev);
4725 			if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4726 				tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4727 		}
4728 
4729 		tg3_link_report(tp);
4730 		return true;
4731 	}
4732 
4733 	return false;
4734 }
4735 
4736 static void tg3_clear_mac_status(struct tg3 *tp)
4737 {
4738 	tw32(MAC_EVENT, 0);
4739 
4740 	tw32_f(MAC_STATUS,
4741 	       MAC_STATUS_SYNC_CHANGED |
4742 	       MAC_STATUS_CFG_CHANGED |
4743 	       MAC_STATUS_MI_COMPLETION |
4744 	       MAC_STATUS_LNKSTATE_CHANGED);
4745 	udelay(40);
4746 }
4747 
4748 static void tg3_setup_eee(struct tg3 *tp)
4749 {
4750 	u32 val;
4751 
4752 	val = TG3_CPMU_EEE_LNKIDL_PCIE_NL0 |
4753 	      TG3_CPMU_EEE_LNKIDL_UART_IDL;
4754 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0)
4755 		val |= TG3_CPMU_EEE_LNKIDL_APE_TX_MT;
4756 
4757 	tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL, val);
4758 
4759 	tw32_f(TG3_CPMU_EEE_CTRL,
4760 	       TG3_CPMU_EEE_CTRL_EXIT_20_1_US);
4761 
4762 	val = TG3_CPMU_EEEMD_ERLY_L1_XIT_DET |
4763 	      (tp->eee.tx_lpi_enabled ? TG3_CPMU_EEEMD_LPI_IN_TX : 0) |
4764 	      TG3_CPMU_EEEMD_LPI_IN_RX |
4765 	      TG3_CPMU_EEEMD_EEE_ENABLE;
4766 
4767 	if (tg3_asic_rev(tp) != ASIC_REV_5717)
4768 		val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN;
4769 
4770 	if (tg3_flag(tp, ENABLE_APE))
4771 		val |= TG3_CPMU_EEEMD_APE_TX_DET_EN;
4772 
4773 	tw32_f(TG3_CPMU_EEE_MODE, tp->eee.eee_enabled ? val : 0);
4774 
4775 	tw32_f(TG3_CPMU_EEE_DBTMR1,
4776 	       TG3_CPMU_DBTMR1_PCIEXIT_2047US |
4777 	       (tp->eee.tx_lpi_timer & 0xffff));
4778 
4779 	tw32_f(TG3_CPMU_EEE_DBTMR2,
4780 	       TG3_CPMU_DBTMR2_APE_TX_2047US |
4781 	       TG3_CPMU_DBTMR2_TXIDXEQ_2047US);
4782 }
4783 
4784 static int tg3_setup_copper_phy(struct tg3 *tp, bool force_reset)
4785 {
4786 	bool current_link_up;
4787 	u32 bmsr, val;
4788 	u32 lcl_adv, rmt_adv;
4789 	u32 current_speed;
4790 	u8 current_duplex;
4791 	int i, err;
4792 
4793 	tg3_clear_mac_status(tp);
4794 
4795 	if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
4796 		tw32_f(MAC_MI_MODE,
4797 		     (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
4798 		udelay(80);
4799 	}
4800 
4801 	tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, 0);
4802 
4803 	/* Some third-party PHYs need to be reset on link going
4804 	 * down.
4805 	 */
4806 	if ((tg3_asic_rev(tp) == ASIC_REV_5703 ||
4807 	     tg3_asic_rev(tp) == ASIC_REV_5704 ||
4808 	     tg3_asic_rev(tp) == ASIC_REV_5705) &&
4809 	    tp->link_up) {
4810 		tg3_readphy(tp, MII_BMSR, &bmsr);
4811 		if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4812 		    !(bmsr & BMSR_LSTATUS))
4813 			force_reset = true;
4814 	}
4815 	if (force_reset)
4816 		tg3_phy_reset(tp);
4817 
4818 	if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
4819 		tg3_readphy(tp, MII_BMSR, &bmsr);
4820 		if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
4821 		    !tg3_flag(tp, INIT_COMPLETE))
4822 			bmsr = 0;
4823 
4824 		if (!(bmsr & BMSR_LSTATUS)) {
4825 			err = tg3_init_5401phy_dsp(tp);
4826 			if (err)
4827 				return err;
4828 
4829 			tg3_readphy(tp, MII_BMSR, &bmsr);
4830 			for (i = 0; i < 1000; i++) {
4831 				udelay(10);
4832 				if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4833 				    (bmsr & BMSR_LSTATUS)) {
4834 					udelay(40);
4835 					break;
4836 				}
4837 			}
4838 
4839 			if ((tp->phy_id & TG3_PHY_ID_REV_MASK) ==
4840 			    TG3_PHY_REV_BCM5401_B0 &&
4841 			    !(bmsr & BMSR_LSTATUS) &&
4842 			    tp->link_config.active_speed == SPEED_1000) {
4843 				err = tg3_phy_reset(tp);
4844 				if (!err)
4845 					err = tg3_init_5401phy_dsp(tp);
4846 				if (err)
4847 					return err;
4848 			}
4849 		}
4850 	} else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4851 		   tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0) {
4852 		/* 5701 {A0,B0} CRC bug workaround */
4853 		tg3_writephy(tp, 0x15, 0x0a75);
4854 		tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4855 		tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
4856 		tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4857 	}
4858 
4859 	/* Clear pending interrupts... */
4860 	tg3_readphy(tp, MII_TG3_ISTAT, &val);
4861 	tg3_readphy(tp, MII_TG3_ISTAT, &val);
4862 
4863 	if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT)
4864 		tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
4865 	else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET))
4866 		tg3_writephy(tp, MII_TG3_IMASK, ~0);
4867 
4868 	if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4869 	    tg3_asic_rev(tp) == ASIC_REV_5701) {
4870 		if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
4871 			tg3_writephy(tp, MII_TG3_EXT_CTRL,
4872 				     MII_TG3_EXT_CTRL_LNK3_LED_MODE);
4873 		else
4874 			tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
4875 	}
4876 
4877 	current_link_up = false;
4878 	current_speed = SPEED_UNKNOWN;
4879 	current_duplex = DUPLEX_UNKNOWN;
4880 	tp->phy_flags &= ~TG3_PHYFLG_MDIX_STATE;
4881 	tp->link_config.rmt_adv = 0;
4882 
4883 	if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) {
4884 		err = tg3_phy_auxctl_read(tp,
4885 					  MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4886 					  &val);
4887 		if (!err && !(val & (1 << 10))) {
4888 			tg3_phy_auxctl_write(tp,
4889 					     MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4890 					     val | (1 << 10));
4891 			goto relink;
4892 		}
4893 	}
4894 
4895 	bmsr = 0;
4896 	for (i = 0; i < 100; i++) {
4897 		tg3_readphy(tp, MII_BMSR, &bmsr);
4898 		if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4899 		    (bmsr & BMSR_LSTATUS))
4900 			break;
4901 		udelay(40);
4902 	}
4903 
4904 	if (bmsr & BMSR_LSTATUS) {
4905 		u32 aux_stat, bmcr;
4906 
4907 		tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
4908 		for (i = 0; i < 2000; i++) {
4909 			udelay(10);
4910 			if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
4911 			    aux_stat)
4912 				break;
4913 		}
4914 
4915 		tg3_aux_stat_to_speed_duplex(tp, aux_stat,
4916 					     &current_speed,
4917 					     &current_duplex);
4918 
4919 		bmcr = 0;
4920 		for (i = 0; i < 200; i++) {
4921 			tg3_readphy(tp, MII_BMCR, &bmcr);
4922 			if (tg3_readphy(tp, MII_BMCR, &bmcr))
4923 				continue;
4924 			if (bmcr && bmcr != 0x7fff)
4925 				break;
4926 			udelay(10);
4927 		}
4928 
4929 		lcl_adv = 0;
4930 		rmt_adv = 0;
4931 
4932 		tp->link_config.active_speed = current_speed;
4933 		tp->link_config.active_duplex = current_duplex;
4934 
4935 		if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4936 			bool eee_config_ok = tg3_phy_eee_config_ok(tp);
4937 
4938 			if ((bmcr & BMCR_ANENABLE) &&
4939 			    eee_config_ok &&
4940 			    tg3_phy_copper_an_config_ok(tp, &lcl_adv) &&
4941 			    tg3_phy_copper_fetch_rmtadv(tp, &rmt_adv))
4942 				current_link_up = true;
4943 
4944 			/* EEE settings changes take effect only after a phy
4945 			 * reset.  If we have skipped a reset due to Link Flap
4946 			 * Avoidance being enabled, do it now.
4947 			 */
4948 			if (!eee_config_ok &&
4949 			    (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
4950 			    !force_reset) {
4951 				tg3_setup_eee(tp);
4952 				tg3_phy_reset(tp);
4953 			}
4954 		} else {
4955 			if (!(bmcr & BMCR_ANENABLE) &&
4956 			    tp->link_config.speed == current_speed &&
4957 			    tp->link_config.duplex == current_duplex) {
4958 				current_link_up = true;
4959 			}
4960 		}
4961 
4962 		if (current_link_up &&
4963 		    tp->link_config.active_duplex == DUPLEX_FULL) {
4964 			u32 reg, bit;
4965 
4966 			if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4967 				reg = MII_TG3_FET_GEN_STAT;
4968 				bit = MII_TG3_FET_GEN_STAT_MDIXSTAT;
4969 			} else {
4970 				reg = MII_TG3_EXT_STAT;
4971 				bit = MII_TG3_EXT_STAT_MDIX;
4972 			}
4973 
4974 			if (!tg3_readphy(tp, reg, &val) && (val & bit))
4975 				tp->phy_flags |= TG3_PHYFLG_MDIX_STATE;
4976 
4977 			tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
4978 		}
4979 	}
4980 
4981 relink:
4982 	if (!current_link_up || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4983 		tg3_phy_copper_begin(tp);
4984 
4985 		if (tg3_flag(tp, ROBOSWITCH)) {
4986 			current_link_up = true;
4987 			/* FIXME: when BCM5325 switch is used use 100 MBit/s */
4988 			current_speed = SPEED_1000;
4989 			current_duplex = DUPLEX_FULL;
4990 			tp->link_config.active_speed = current_speed;
4991 			tp->link_config.active_duplex = current_duplex;
4992 		}
4993 
4994 		tg3_readphy(tp, MII_BMSR, &bmsr);
4995 		if ((!tg3_readphy(tp, MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) ||
4996 		    (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
4997 			current_link_up = true;
4998 	}
4999 
5000 	tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
5001 	if (current_link_up) {
5002 		if (tp->link_config.active_speed == SPEED_100 ||
5003 		    tp->link_config.active_speed == SPEED_10)
5004 			tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
5005 		else
5006 			tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5007 	} else if (tp->phy_flags & TG3_PHYFLG_IS_FET)
5008 		tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
5009 	else
5010 		tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5011 
5012 	/* In order for the 5750 core in BCM4785 chip to work properly
5013 	 * in RGMII mode, the Led Control Register must be set up.
5014 	 */
5015 	if (tg3_flag(tp, RGMII_MODE)) {
5016 		u32 led_ctrl = tr32(MAC_LED_CTRL);
5017 		led_ctrl &= ~(LED_CTRL_1000MBPS_ON | LED_CTRL_100MBPS_ON);
5018 
5019 		if (tp->link_config.active_speed == SPEED_10)
5020 			led_ctrl |= LED_CTRL_LNKLED_OVERRIDE;
5021 		else if (tp->link_config.active_speed == SPEED_100)
5022 			led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE |
5023 				     LED_CTRL_100MBPS_ON);
5024 		else if (tp->link_config.active_speed == SPEED_1000)
5025 			led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE |
5026 				     LED_CTRL_1000MBPS_ON);
5027 
5028 		tw32(MAC_LED_CTRL, led_ctrl);
5029 		udelay(40);
5030 	}
5031 
5032 	tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
5033 	if (tp->link_config.active_duplex == DUPLEX_HALF)
5034 		tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
5035 
5036 	if (tg3_asic_rev(tp) == ASIC_REV_5700) {
5037 		if (current_link_up &&
5038 		    tg3_5700_link_polarity(tp, tp->link_config.active_speed))
5039 			tp->mac_mode |= MAC_MODE_LINK_POLARITY;
5040 		else
5041 			tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
5042 	}
5043 
5044 	/* ??? Without this setting Netgear GA302T PHY does not
5045 	 * ??? send/receive packets...
5046 	 */
5047 	if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 &&
5048 	    tg3_chip_rev_id(tp) == CHIPREV_ID_5700_ALTIMA) {
5049 		tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
5050 		tw32_f(MAC_MI_MODE, tp->mi_mode);
5051 		udelay(80);
5052 	}
5053 
5054 	tw32_f(MAC_MODE, tp->mac_mode);
5055 	udelay(40);
5056 
5057 	tg3_phy_eee_adjust(tp, current_link_up);
5058 
5059 	if (tg3_flag(tp, USE_LINKCHG_REG)) {
5060 		/* Polled via timer. */
5061 		tw32_f(MAC_EVENT, 0);
5062 	} else {
5063 		tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5064 	}
5065 	udelay(40);
5066 
5067 	if (tg3_asic_rev(tp) == ASIC_REV_5700 &&
5068 	    current_link_up &&
5069 	    tp->link_config.active_speed == SPEED_1000 &&
5070 	    (tg3_flag(tp, PCIX_MODE) || tg3_flag(tp, PCI_HIGH_SPEED))) {
5071 		udelay(120);
5072 		tw32_f(MAC_STATUS,
5073 		     (MAC_STATUS_SYNC_CHANGED |
5074 		      MAC_STATUS_CFG_CHANGED));
5075 		udelay(40);
5076 		tg3_write_mem(tp,
5077 			      NIC_SRAM_FIRMWARE_MBOX,
5078 			      NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
5079 	}
5080 
5081 	/* Prevent send BD corruption. */
5082 	if (tg3_flag(tp, CLKREQ_BUG)) {
5083 		if (tp->link_config.active_speed == SPEED_100 ||
5084 		    tp->link_config.active_speed == SPEED_10)
5085 			pcie_capability_clear_word(tp->pdev, PCI_EXP_LNKCTL,
5086 						   PCI_EXP_LNKCTL_CLKREQ_EN);
5087 		else
5088 			pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
5089 						 PCI_EXP_LNKCTL_CLKREQ_EN);
5090 	}
5091 
5092 	tg3_test_and_report_link_chg(tp, current_link_up);
5093 
5094 	return 0;
5095 }
5096 
5097 struct tg3_fiber_aneginfo {
5098 	int state;
5099 #define ANEG_STATE_UNKNOWN		0
5100 #define ANEG_STATE_AN_ENABLE		1
5101 #define ANEG_STATE_RESTART_INIT		2
5102 #define ANEG_STATE_RESTART		3
5103 #define ANEG_STATE_DISABLE_LINK_OK	4
5104 #define ANEG_STATE_ABILITY_DETECT_INIT	5
5105 #define ANEG_STATE_ABILITY_DETECT	6
5106 #define ANEG_STATE_ACK_DETECT_INIT	7
5107 #define ANEG_STATE_ACK_DETECT		8
5108 #define ANEG_STATE_COMPLETE_ACK_INIT	9
5109 #define ANEG_STATE_COMPLETE_ACK		10
5110 #define ANEG_STATE_IDLE_DETECT_INIT	11
5111 #define ANEG_STATE_IDLE_DETECT		12
5112 #define ANEG_STATE_LINK_OK		13
5113 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT	14
5114 #define ANEG_STATE_NEXT_PAGE_WAIT	15
5115 
5116 	u32 flags;
5117 #define MR_AN_ENABLE		0x00000001
5118 #define MR_RESTART_AN		0x00000002
5119 #define MR_AN_COMPLETE		0x00000004
5120 #define MR_PAGE_RX		0x00000008
5121 #define MR_NP_LOADED		0x00000010
5122 #define MR_TOGGLE_TX		0x00000020
5123 #define MR_LP_ADV_FULL_DUPLEX	0x00000040
5124 #define MR_LP_ADV_HALF_DUPLEX	0x00000080
5125 #define MR_LP_ADV_SYM_PAUSE	0x00000100
5126 #define MR_LP_ADV_ASYM_PAUSE	0x00000200
5127 #define MR_LP_ADV_REMOTE_FAULT1	0x00000400
5128 #define MR_LP_ADV_REMOTE_FAULT2	0x00000800
5129 #define MR_LP_ADV_NEXT_PAGE	0x00001000
5130 #define MR_TOGGLE_RX		0x00002000
5131 #define MR_NP_RX		0x00004000
5132 
5133 #define MR_LINK_OK		0x80000000
5134 
5135 	unsigned long link_time, cur_time;
5136 
5137 	u32 ability_match_cfg;
5138 	int ability_match_count;
5139 
5140 	char ability_match, idle_match, ack_match;
5141 
5142 	u32 txconfig, rxconfig;
5143 #define ANEG_CFG_NP		0x00000080
5144 #define ANEG_CFG_ACK		0x00000040
5145 #define ANEG_CFG_RF2		0x00000020
5146 #define ANEG_CFG_RF1		0x00000010
5147 #define ANEG_CFG_PS2		0x00000001
5148 #define ANEG_CFG_PS1		0x00008000
5149 #define ANEG_CFG_HD		0x00004000
5150 #define ANEG_CFG_FD		0x00002000
5151 #define ANEG_CFG_INVAL		0x00001f06
5152 
5153 };
5154 #define ANEG_OK		0
5155 #define ANEG_DONE	1
5156 #define ANEG_TIMER_ENAB	2
5157 #define ANEG_FAILED	-1
5158 
5159 #define ANEG_STATE_SETTLE_TIME	10000
5160 
5161 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
5162 				   struct tg3_fiber_aneginfo *ap)
5163 {
5164 	u16 flowctrl;
5165 	unsigned long delta;
5166 	u32 rx_cfg_reg;
5167 	int ret;
5168 
5169 	if (ap->state == ANEG_STATE_UNKNOWN) {
5170 		ap->rxconfig = 0;
5171 		ap->link_time = 0;
5172 		ap->cur_time = 0;
5173 		ap->ability_match_cfg = 0;
5174 		ap->ability_match_count = 0;
5175 		ap->ability_match = 0;
5176 		ap->idle_match = 0;
5177 		ap->ack_match = 0;
5178 	}
5179 	ap->cur_time++;
5180 
5181 	if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
5182 		rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
5183 
5184 		if (rx_cfg_reg != ap->ability_match_cfg) {
5185 			ap->ability_match_cfg = rx_cfg_reg;
5186 			ap->ability_match = 0;
5187 			ap->ability_match_count = 0;
5188 		} else {
5189 			if (++ap->ability_match_count > 1) {
5190 				ap->ability_match = 1;
5191 				ap->ability_match_cfg = rx_cfg_reg;
5192 			}
5193 		}
5194 		if (rx_cfg_reg & ANEG_CFG_ACK)
5195 			ap->ack_match = 1;
5196 		else
5197 			ap->ack_match = 0;
5198 
5199 		ap->idle_match = 0;
5200 	} else {
5201 		ap->idle_match = 1;
5202 		ap->ability_match_cfg = 0;
5203 		ap->ability_match_count = 0;
5204 		ap->ability_match = 0;
5205 		ap->ack_match = 0;
5206 
5207 		rx_cfg_reg = 0;
5208 	}
5209 
5210 	ap->rxconfig = rx_cfg_reg;
5211 	ret = ANEG_OK;
5212 
5213 	switch (ap->state) {
5214 	case ANEG_STATE_UNKNOWN:
5215 		if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
5216 			ap->state = ANEG_STATE_AN_ENABLE;
5217 
5218 		/* fall through */
5219 	case ANEG_STATE_AN_ENABLE:
5220 		ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
5221 		if (ap->flags & MR_AN_ENABLE) {
5222 			ap->link_time = 0;
5223 			ap->cur_time = 0;
5224 			ap->ability_match_cfg = 0;
5225 			ap->ability_match_count = 0;
5226 			ap->ability_match = 0;
5227 			ap->idle_match = 0;
5228 			ap->ack_match = 0;
5229 
5230 			ap->state = ANEG_STATE_RESTART_INIT;
5231 		} else {
5232 			ap->state = ANEG_STATE_DISABLE_LINK_OK;
5233 		}
5234 		break;
5235 
5236 	case ANEG_STATE_RESTART_INIT:
5237 		ap->link_time = ap->cur_time;
5238 		ap->flags &= ~(MR_NP_LOADED);
5239 		ap->txconfig = 0;
5240 		tw32(MAC_TX_AUTO_NEG, 0);
5241 		tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5242 		tw32_f(MAC_MODE, tp->mac_mode);
5243 		udelay(40);
5244 
5245 		ret = ANEG_TIMER_ENAB;
5246 		ap->state = ANEG_STATE_RESTART;
5247 
5248 		/* fall through */
5249 	case ANEG_STATE_RESTART:
5250 		delta = ap->cur_time - ap->link_time;
5251 		if (delta > ANEG_STATE_SETTLE_TIME)
5252 			ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
5253 		else
5254 			ret = ANEG_TIMER_ENAB;
5255 		break;
5256 
5257 	case ANEG_STATE_DISABLE_LINK_OK:
5258 		ret = ANEG_DONE;
5259 		break;
5260 
5261 	case ANEG_STATE_ABILITY_DETECT_INIT:
5262 		ap->flags &= ~(MR_TOGGLE_TX);
5263 		ap->txconfig = ANEG_CFG_FD;
5264 		flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5265 		if (flowctrl & ADVERTISE_1000XPAUSE)
5266 			ap->txconfig |= ANEG_CFG_PS1;
5267 		if (flowctrl & ADVERTISE_1000XPSE_ASYM)
5268 			ap->txconfig |= ANEG_CFG_PS2;
5269 		tw32(MAC_TX_AUTO_NEG, ap->txconfig);
5270 		tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5271 		tw32_f(MAC_MODE, tp->mac_mode);
5272 		udelay(40);
5273 
5274 		ap->state = ANEG_STATE_ABILITY_DETECT;
5275 		break;
5276 
5277 	case ANEG_STATE_ABILITY_DETECT:
5278 		if (ap->ability_match != 0 && ap->rxconfig != 0)
5279 			ap->state = ANEG_STATE_ACK_DETECT_INIT;
5280 		break;
5281 
5282 	case ANEG_STATE_ACK_DETECT_INIT:
5283 		ap->txconfig |= ANEG_CFG_ACK;
5284 		tw32(MAC_TX_AUTO_NEG, ap->txconfig);
5285 		tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5286 		tw32_f(MAC_MODE, tp->mac_mode);
5287 		udelay(40);
5288 
5289 		ap->state = ANEG_STATE_ACK_DETECT;
5290 
5291 		/* fall through */
5292 	case ANEG_STATE_ACK_DETECT:
5293 		if (ap->ack_match != 0) {
5294 			if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
5295 			    (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
5296 				ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
5297 			} else {
5298 				ap->state = ANEG_STATE_AN_ENABLE;
5299 			}
5300 		} else if (ap->ability_match != 0 &&
5301 			   ap->rxconfig == 0) {
5302 			ap->state = ANEG_STATE_AN_ENABLE;
5303 		}
5304 		break;
5305 
5306 	case ANEG_STATE_COMPLETE_ACK_INIT:
5307 		if (ap->rxconfig & ANEG_CFG_INVAL) {
5308 			ret = ANEG_FAILED;
5309 			break;
5310 		}
5311 		ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
5312 			       MR_LP_ADV_HALF_DUPLEX |
5313 			       MR_LP_ADV_SYM_PAUSE |
5314 			       MR_LP_ADV_ASYM_PAUSE |
5315 			       MR_LP_ADV_REMOTE_FAULT1 |
5316 			       MR_LP_ADV_REMOTE_FAULT2 |
5317 			       MR_LP_ADV_NEXT_PAGE |
5318 			       MR_TOGGLE_RX |
5319 			       MR_NP_RX);
5320 		if (ap->rxconfig & ANEG_CFG_FD)
5321 			ap->flags |= MR_LP_ADV_FULL_DUPLEX;
5322 		if (ap->rxconfig & ANEG_CFG_HD)
5323 			ap->flags |= MR_LP_ADV_HALF_DUPLEX;
5324 		if (ap->rxconfig & ANEG_CFG_PS1)
5325 			ap->flags |= MR_LP_ADV_SYM_PAUSE;
5326 		if (ap->rxconfig & ANEG_CFG_PS2)
5327 			ap->flags |= MR_LP_ADV_ASYM_PAUSE;
5328 		if (ap->rxconfig & ANEG_CFG_RF1)
5329 			ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
5330 		if (ap->rxconfig & ANEG_CFG_RF2)
5331 			ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
5332 		if (ap->rxconfig & ANEG_CFG_NP)
5333 			ap->flags |= MR_LP_ADV_NEXT_PAGE;
5334 
5335 		ap->link_time = ap->cur_time;
5336 
5337 		ap->flags ^= (MR_TOGGLE_TX);
5338 		if (ap->rxconfig & 0x0008)
5339 			ap->flags |= MR_TOGGLE_RX;
5340 		if (ap->rxconfig & ANEG_CFG_NP)
5341 			ap->flags |= MR_NP_RX;
5342 		ap->flags |= MR_PAGE_RX;
5343 
5344 		ap->state = ANEG_STATE_COMPLETE_ACK;
5345 		ret = ANEG_TIMER_ENAB;
5346 		break;
5347 
5348 	case ANEG_STATE_COMPLETE_ACK:
5349 		if (ap->ability_match != 0 &&
5350 		    ap->rxconfig == 0) {
5351 			ap->state = ANEG_STATE_AN_ENABLE;
5352 			break;
5353 		}
5354 		delta = ap->cur_time - ap->link_time;
5355 		if (delta > ANEG_STATE_SETTLE_TIME) {
5356 			if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
5357 				ap->state = ANEG_STATE_IDLE_DETECT_INIT;
5358 			} else {
5359 				if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
5360 				    !(ap->flags & MR_NP_RX)) {
5361 					ap->state = ANEG_STATE_IDLE_DETECT_INIT;
5362 				} else {
5363 					ret = ANEG_FAILED;
5364 				}
5365 			}
5366 		}
5367 		break;
5368 
5369 	case ANEG_STATE_IDLE_DETECT_INIT:
5370 		ap->link_time = ap->cur_time;
5371 		tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
5372 		tw32_f(MAC_MODE, tp->mac_mode);
5373 		udelay(40);
5374 
5375 		ap->state = ANEG_STATE_IDLE_DETECT;
5376 		ret = ANEG_TIMER_ENAB;
5377 		break;
5378 
5379 	case ANEG_STATE_IDLE_DETECT:
5380 		if (ap->ability_match != 0 &&
5381 		    ap->rxconfig == 0) {
5382 			ap->state = ANEG_STATE_AN_ENABLE;
5383 			break;
5384 		}
5385 		delta = ap->cur_time - ap->link_time;
5386 		if (delta > ANEG_STATE_SETTLE_TIME) {
5387 			/* XXX another gem from the Broadcom driver :( */
5388 			ap->state = ANEG_STATE_LINK_OK;
5389 		}
5390 		break;
5391 
5392 	case ANEG_STATE_LINK_OK:
5393 		ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
5394 		ret = ANEG_DONE;
5395 		break;
5396 
5397 	case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
5398 		/* ??? unimplemented */
5399 		break;
5400 
5401 	case ANEG_STATE_NEXT_PAGE_WAIT:
5402 		/* ??? unimplemented */
5403 		break;
5404 
5405 	default:
5406 		ret = ANEG_FAILED;
5407 		break;
5408 	}
5409 
5410 	return ret;
5411 }
5412 
5413 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
5414 {
5415 	int res = 0;
5416 	struct tg3_fiber_aneginfo aninfo;
5417 	int status = ANEG_FAILED;
5418 	unsigned int tick;
5419 	u32 tmp;
5420 
5421 	tw32_f(MAC_TX_AUTO_NEG, 0);
5422 
5423 	tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
5424 	tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
5425 	udelay(40);
5426 
5427 	tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
5428 	udelay(40);
5429 
5430 	memset(&aninfo, 0, sizeof(aninfo));
5431 	aninfo.flags |= MR_AN_ENABLE;
5432 	aninfo.state = ANEG_STATE_UNKNOWN;
5433 	aninfo.cur_time = 0;
5434 	tick = 0;
5435 	while (++tick < 195000) {
5436 		status = tg3_fiber_aneg_smachine(tp, &aninfo);
5437 		if (status == ANEG_DONE || status == ANEG_FAILED)
5438 			break;
5439 
5440 		udelay(1);
5441 	}
5442 
5443 	tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
5444 	tw32_f(MAC_MODE, tp->mac_mode);
5445 	udelay(40);
5446 
5447 	*txflags = aninfo.txconfig;
5448 	*rxflags = aninfo.flags;
5449 
5450 	if (status == ANEG_DONE &&
5451 	    (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
5452 			     MR_LP_ADV_FULL_DUPLEX)))
5453 		res = 1;
5454 
5455 	return res;
5456 }
5457 
5458 static void tg3_init_bcm8002(struct tg3 *tp)
5459 {
5460 	u32 mac_status = tr32(MAC_STATUS);
5461 	int i;
5462 
5463 	/* Reset when initting first time or we have a link. */
5464 	if (tg3_flag(tp, INIT_COMPLETE) &&
5465 	    !(mac_status & MAC_STATUS_PCS_SYNCED))
5466 		return;
5467 
5468 	/* Set PLL lock range. */
5469 	tg3_writephy(tp, 0x16, 0x8007);
5470 
5471 	/* SW reset */
5472 	tg3_writephy(tp, MII_BMCR, BMCR_RESET);
5473 
5474 	/* Wait for reset to complete. */
5475 	/* XXX schedule_timeout() ... */
5476 	for (i = 0; i < 500; i++)
5477 		udelay(10);
5478 
5479 	/* Config mode; select PMA/Ch 1 regs. */
5480 	tg3_writephy(tp, 0x10, 0x8411);
5481 
5482 	/* Enable auto-lock and comdet, select txclk for tx. */
5483 	tg3_writephy(tp, 0x11, 0x0a10);
5484 
5485 	tg3_writephy(tp, 0x18, 0x00a0);
5486 	tg3_writephy(tp, 0x16, 0x41ff);
5487 
5488 	/* Assert and deassert POR. */
5489 	tg3_writephy(tp, 0x13, 0x0400);
5490 	udelay(40);
5491 	tg3_writephy(tp, 0x13, 0x0000);
5492 
5493 	tg3_writephy(tp, 0x11, 0x0a50);
5494 	udelay(40);
5495 	tg3_writephy(tp, 0x11, 0x0a10);
5496 
5497 	/* Wait for signal to stabilize */
5498 	/* XXX schedule_timeout() ... */
5499 	for (i = 0; i < 15000; i++)
5500 		udelay(10);
5501 
5502 	/* Deselect the channel register so we can read the PHYID
5503 	 * later.
5504 	 */
5505 	tg3_writephy(tp, 0x10, 0x8011);
5506 }
5507 
5508 static bool tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
5509 {
5510 	u16 flowctrl;
5511 	bool current_link_up;
5512 	u32 sg_dig_ctrl, sg_dig_status;
5513 	u32 serdes_cfg, expected_sg_dig_ctrl;
5514 	int workaround, port_a;
5515 
5516 	serdes_cfg = 0;
5517 	expected_sg_dig_ctrl = 0;
5518 	workaround = 0;
5519 	port_a = 1;
5520 	current_link_up = false;
5521 
5522 	if (tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A0 &&
5523 	    tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A1) {
5524 		workaround = 1;
5525 		if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
5526 			port_a = 0;
5527 
5528 		/* preserve bits 0-11,13,14 for signal pre-emphasis */
5529 		/* preserve bits 20-23 for voltage regulator */
5530 		serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
5531 	}
5532 
5533 	sg_dig_ctrl = tr32(SG_DIG_CTRL);
5534 
5535 	if (tp->link_config.autoneg != AUTONEG_ENABLE) {
5536 		if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
5537 			if (workaround) {
5538 				u32 val = serdes_cfg;
5539 
5540 				if (port_a)
5541 					val |= 0xc010000;
5542 				else
5543 					val |= 0x4010000;
5544 				tw32_f(MAC_SERDES_CFG, val);
5545 			}
5546 
5547 			tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
5548 		}
5549 		if (mac_status & MAC_STATUS_PCS_SYNCED) {
5550 			tg3_setup_flow_control(tp, 0, 0);
5551 			current_link_up = true;
5552 		}
5553 		goto out;
5554 	}
5555 
5556 	/* Want auto-negotiation.  */
5557 	expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
5558 
5559 	flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5560 	if (flowctrl & ADVERTISE_1000XPAUSE)
5561 		expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
5562 	if (flowctrl & ADVERTISE_1000XPSE_ASYM)
5563 		expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
5564 
5565 	if (sg_dig_ctrl != expected_sg_dig_ctrl) {
5566 		if ((tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT) &&
5567 		    tp->serdes_counter &&
5568 		    ((mac_status & (MAC_STATUS_PCS_SYNCED |
5569 				    MAC_STATUS_RCVD_CFG)) ==
5570 		     MAC_STATUS_PCS_SYNCED)) {
5571 			tp->serdes_counter--;
5572 			current_link_up = true;
5573 			goto out;
5574 		}
5575 restart_autoneg:
5576 		if (workaround)
5577 			tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
5578 		tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
5579 		udelay(5);
5580 		tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
5581 
5582 		tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
5583 		tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5584 	} else if (mac_status & (MAC_STATUS_PCS_SYNCED |
5585 				 MAC_STATUS_SIGNAL_DET)) {
5586 		sg_dig_status = tr32(SG_DIG_STATUS);
5587 		mac_status = tr32(MAC_STATUS);
5588 
5589 		if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
5590 		    (mac_status & MAC_STATUS_PCS_SYNCED)) {
5591 			u32 local_adv = 0, remote_adv = 0;
5592 
5593 			if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
5594 				local_adv |= ADVERTISE_1000XPAUSE;
5595 			if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
5596 				local_adv |= ADVERTISE_1000XPSE_ASYM;
5597 
5598 			if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
5599 				remote_adv |= LPA_1000XPAUSE;
5600 			if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
5601 				remote_adv |= LPA_1000XPAUSE_ASYM;
5602 
5603 			tp->link_config.rmt_adv =
5604 					   mii_adv_to_ethtool_adv_x(remote_adv);
5605 
5606 			tg3_setup_flow_control(tp, local_adv, remote_adv);
5607 			current_link_up = true;
5608 			tp->serdes_counter = 0;
5609 			tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5610 		} else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
5611 			if (tp->serdes_counter)
5612 				tp->serdes_counter--;
5613 			else {
5614 				if (workaround) {
5615 					u32 val = serdes_cfg;
5616 
5617 					if (port_a)
5618 						val |= 0xc010000;
5619 					else
5620 						val |= 0x4010000;
5621 
5622 					tw32_f(MAC_SERDES_CFG, val);
5623 				}
5624 
5625 				tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
5626 				udelay(40);
5627 
5628 				/* Link parallel detection - link is up */
5629 				/* only if we have PCS_SYNC and not */
5630 				/* receiving config code words */
5631 				mac_status = tr32(MAC_STATUS);
5632 				if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
5633 				    !(mac_status & MAC_STATUS_RCVD_CFG)) {
5634 					tg3_setup_flow_control(tp, 0, 0);
5635 					current_link_up = true;
5636 					tp->phy_flags |=
5637 						TG3_PHYFLG_PARALLEL_DETECT;
5638 					tp->serdes_counter =
5639 						SERDES_PARALLEL_DET_TIMEOUT;
5640 				} else
5641 					goto restart_autoneg;
5642 			}
5643 		}
5644 	} else {
5645 		tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
5646 		tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5647 	}
5648 
5649 out:
5650 	return current_link_up;
5651 }
5652 
5653 static bool tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
5654 {
5655 	bool current_link_up = false;
5656 
5657 	if (!(mac_status & MAC_STATUS_PCS_SYNCED))
5658 		goto out;
5659 
5660 	if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5661 		u32 txflags, rxflags;
5662 		int i;
5663 
5664 		if (fiber_autoneg(tp, &txflags, &rxflags)) {
5665 			u32 local_adv = 0, remote_adv = 0;
5666 
5667 			if (txflags & ANEG_CFG_PS1)
5668 				local_adv |= ADVERTISE_1000XPAUSE;
5669 			if (txflags & ANEG_CFG_PS2)
5670 				local_adv |= ADVERTISE_1000XPSE_ASYM;
5671 
5672 			if (rxflags & MR_LP_ADV_SYM_PAUSE)
5673 				remote_adv |= LPA_1000XPAUSE;
5674 			if (rxflags & MR_LP_ADV_ASYM_PAUSE)
5675 				remote_adv |= LPA_1000XPAUSE_ASYM;
5676 
5677 			tp->link_config.rmt_adv =
5678 					   mii_adv_to_ethtool_adv_x(remote_adv);
5679 
5680 			tg3_setup_flow_control(tp, local_adv, remote_adv);
5681 
5682 			current_link_up = true;
5683 		}
5684 		for (i = 0; i < 30; i++) {
5685 			udelay(20);
5686 			tw32_f(MAC_STATUS,
5687 			       (MAC_STATUS_SYNC_CHANGED |
5688 				MAC_STATUS_CFG_CHANGED));
5689 			udelay(40);
5690 			if ((tr32(MAC_STATUS) &
5691 			     (MAC_STATUS_SYNC_CHANGED |
5692 			      MAC_STATUS_CFG_CHANGED)) == 0)
5693 				break;
5694 		}
5695 
5696 		mac_status = tr32(MAC_STATUS);
5697 		if (!current_link_up &&
5698 		    (mac_status & MAC_STATUS_PCS_SYNCED) &&
5699 		    !(mac_status & MAC_STATUS_RCVD_CFG))
5700 			current_link_up = true;
5701 	} else {
5702 		tg3_setup_flow_control(tp, 0, 0);
5703 
5704 		/* Forcing 1000FD link up. */
5705 		current_link_up = true;
5706 
5707 		tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
5708 		udelay(40);
5709 
5710 		tw32_f(MAC_MODE, tp->mac_mode);
5711 		udelay(40);
5712 	}
5713 
5714 out:
5715 	return current_link_up;
5716 }
5717 
5718 static int tg3_setup_fiber_phy(struct tg3 *tp, bool force_reset)
5719 {
5720 	u32 orig_pause_cfg;
5721 	u32 orig_active_speed;
5722 	u8 orig_active_duplex;
5723 	u32 mac_status;
5724 	bool current_link_up;
5725 	int i;
5726 
5727 	orig_pause_cfg = tp->link_config.active_flowctrl;
5728 	orig_active_speed = tp->link_config.active_speed;
5729 	orig_active_duplex = tp->link_config.active_duplex;
5730 
5731 	if (!tg3_flag(tp, HW_AUTONEG) &&
5732 	    tp->link_up &&
5733 	    tg3_flag(tp, INIT_COMPLETE)) {
5734 		mac_status = tr32(MAC_STATUS);
5735 		mac_status &= (MAC_STATUS_PCS_SYNCED |
5736 			       MAC_STATUS_SIGNAL_DET |
5737 			       MAC_STATUS_CFG_CHANGED |
5738 			       MAC_STATUS_RCVD_CFG);
5739 		if (mac_status == (MAC_STATUS_PCS_SYNCED |
5740 				   MAC_STATUS_SIGNAL_DET)) {
5741 			tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5742 					    MAC_STATUS_CFG_CHANGED));
5743 			return 0;
5744 		}
5745 	}
5746 
5747 	tw32_f(MAC_TX_AUTO_NEG, 0);
5748 
5749 	tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
5750 	tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
5751 	tw32_f(MAC_MODE, tp->mac_mode);
5752 	udelay(40);
5753 
5754 	if (tp->phy_id == TG3_PHY_ID_BCM8002)
5755 		tg3_init_bcm8002(tp);
5756 
5757 	/* Enable link change event even when serdes polling.  */
5758 	tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5759 	udelay(40);
5760 
5761 	current_link_up = false;
5762 	tp->link_config.rmt_adv = 0;
5763 	mac_status = tr32(MAC_STATUS);
5764 
5765 	if (tg3_flag(tp, HW_AUTONEG))
5766 		current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
5767 	else
5768 		current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
5769 
5770 	tp->napi[0].hw_status->status =
5771 		(SD_STATUS_UPDATED |
5772 		 (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG));
5773 
5774 	for (i = 0; i < 100; i++) {
5775 		tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5776 				    MAC_STATUS_CFG_CHANGED));
5777 		udelay(5);
5778 		if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
5779 					 MAC_STATUS_CFG_CHANGED |
5780 					 MAC_STATUS_LNKSTATE_CHANGED)) == 0)
5781 			break;
5782 	}
5783 
5784 	mac_status = tr32(MAC_STATUS);
5785 	if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
5786 		current_link_up = false;
5787 		if (tp->link_config.autoneg == AUTONEG_ENABLE &&
5788 		    tp->serdes_counter == 0) {
5789 			tw32_f(MAC_MODE, (tp->mac_mode |
5790 					  MAC_MODE_SEND_CONFIGS));
5791 			udelay(1);
5792 			tw32_f(MAC_MODE, tp->mac_mode);
5793 		}
5794 	}
5795 
5796 	if (current_link_up) {
5797 		tp->link_config.active_speed = SPEED_1000;
5798 		tp->link_config.active_duplex = DUPLEX_FULL;
5799 		tw32(MAC_LED_CTRL, (tp->led_ctrl |
5800 				    LED_CTRL_LNKLED_OVERRIDE |
5801 				    LED_CTRL_1000MBPS_ON));
5802 	} else {
5803 		tp->link_config.active_speed = SPEED_UNKNOWN;
5804 		tp->link_config.active_duplex = DUPLEX_UNKNOWN;
5805 		tw32(MAC_LED_CTRL, (tp->led_ctrl |
5806 				    LED_CTRL_LNKLED_OVERRIDE |
5807 				    LED_CTRL_TRAFFIC_OVERRIDE));
5808 	}
5809 
5810 	if (!tg3_test_and_report_link_chg(tp, current_link_up)) {
5811 		u32 now_pause_cfg = tp->link_config.active_flowctrl;
5812 		if (orig_pause_cfg != now_pause_cfg ||
5813 		    orig_active_speed != tp->link_config.active_speed ||
5814 		    orig_active_duplex != tp->link_config.active_duplex)
5815 			tg3_link_report(tp);
5816 	}
5817 
5818 	return 0;
5819 }
5820 
5821 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, bool force_reset)
5822 {
5823 	int err = 0;
5824 	u32 bmsr, bmcr;
5825 	u32 current_speed = SPEED_UNKNOWN;
5826 	u8 current_duplex = DUPLEX_UNKNOWN;
5827 	bool current_link_up = false;
5828 	u32 local_adv, remote_adv, sgsr;
5829 
5830 	if ((tg3_asic_rev(tp) == ASIC_REV_5719 ||
5831 	     tg3_asic_rev(tp) == ASIC_REV_5720) &&
5832 	     !tg3_readphy(tp, SERDES_TG3_1000X_STATUS, &sgsr) &&
5833 	     (sgsr & SERDES_TG3_SGMII_MODE)) {
5834 
5835 		if (force_reset)
5836 			tg3_phy_reset(tp);
5837 
5838 		tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
5839 
5840 		if (!(sgsr & SERDES_TG3_LINK_UP)) {
5841 			tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5842 		} else {
5843 			current_link_up = true;
5844 			if (sgsr & SERDES_TG3_SPEED_1000) {
5845 				current_speed = SPEED_1000;
5846 				tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5847 			} else if (sgsr & SERDES_TG3_SPEED_100) {
5848 				current_speed = SPEED_100;
5849 				tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
5850 			} else {
5851 				current_speed = SPEED_10;
5852 				tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
5853 			}
5854 
5855 			if (sgsr & SERDES_TG3_FULL_DUPLEX)
5856 				current_duplex = DUPLEX_FULL;
5857 			else
5858 				current_duplex = DUPLEX_HALF;
5859 		}
5860 
5861 		tw32_f(MAC_MODE, tp->mac_mode);
5862 		udelay(40);
5863 
5864 		tg3_clear_mac_status(tp);
5865 
5866 		goto fiber_setup_done;
5867 	}
5868 
5869 	tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5870 	tw32_f(MAC_MODE, tp->mac_mode);
5871 	udelay(40);
5872 
5873 	tg3_clear_mac_status(tp);
5874 
5875 	if (force_reset)
5876 		tg3_phy_reset(tp);
5877 
5878 	tp->link_config.rmt_adv = 0;
5879 
5880 	err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5881 	err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5882 	if (tg3_asic_rev(tp) == ASIC_REV_5714) {
5883 		if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5884 			bmsr |= BMSR_LSTATUS;
5885 		else
5886 			bmsr &= ~BMSR_LSTATUS;
5887 	}
5888 
5889 	err |= tg3_readphy(tp, MII_BMCR, &bmcr);
5890 
5891 	if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
5892 	    (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
5893 		/* do nothing, just check for link up at the end */
5894 	} else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5895 		u32 adv, newadv;
5896 
5897 		err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5898 		newadv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
5899 				 ADVERTISE_1000XPAUSE |
5900 				 ADVERTISE_1000XPSE_ASYM |
5901 				 ADVERTISE_SLCT);
5902 
5903 		newadv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5904 		newadv |= ethtool_adv_to_mii_adv_x(tp->link_config.advertising);
5905 
5906 		if ((newadv != adv) || !(bmcr & BMCR_ANENABLE)) {
5907 			tg3_writephy(tp, MII_ADVERTISE, newadv);
5908 			bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
5909 			tg3_writephy(tp, MII_BMCR, bmcr);
5910 
5911 			tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5912 			tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
5913 			tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5914 
5915 			return err;
5916 		}
5917 	} else {
5918 		u32 new_bmcr;
5919 
5920 		bmcr &= ~BMCR_SPEED1000;
5921 		new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
5922 
5923 		if (tp->link_config.duplex == DUPLEX_FULL)
5924 			new_bmcr |= BMCR_FULLDPLX;
5925 
5926 		if (new_bmcr != bmcr) {
5927 			/* BMCR_SPEED1000 is a reserved bit that needs
5928 			 * to be set on write.
5929 			 */
5930 			new_bmcr |= BMCR_SPEED1000;
5931 
5932 			/* Force a linkdown */
5933 			if (tp->link_up) {
5934 				u32 adv;
5935 
5936 				err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5937 				adv &= ~(ADVERTISE_1000XFULL |
5938 					 ADVERTISE_1000XHALF |
5939 					 ADVERTISE_SLCT);
5940 				tg3_writephy(tp, MII_ADVERTISE, adv);
5941 				tg3_writephy(tp, MII_BMCR, bmcr |
5942 							   BMCR_ANRESTART |
5943 							   BMCR_ANENABLE);
5944 				udelay(10);
5945 				tg3_carrier_off(tp);
5946 			}
5947 			tg3_writephy(tp, MII_BMCR, new_bmcr);
5948 			bmcr = new_bmcr;
5949 			err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5950 			err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5951 			if (tg3_asic_rev(tp) == ASIC_REV_5714) {
5952 				if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5953 					bmsr |= BMSR_LSTATUS;
5954 				else
5955 					bmsr &= ~BMSR_LSTATUS;
5956 			}
5957 			tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5958 		}
5959 	}
5960 
5961 	if (bmsr & BMSR_LSTATUS) {
5962 		current_speed = SPEED_1000;
5963 		current_link_up = true;
5964 		if (bmcr & BMCR_FULLDPLX)
5965 			current_duplex = DUPLEX_FULL;
5966 		else
5967 			current_duplex = DUPLEX_HALF;
5968 
5969 		local_adv = 0;
5970 		remote_adv = 0;
5971 
5972 		if (bmcr & BMCR_ANENABLE) {
5973 			u32 common;
5974 
5975 			err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
5976 			err |= tg3_readphy(tp, MII_LPA, &remote_adv);
5977 			common = local_adv & remote_adv;
5978 			if (common & (ADVERTISE_1000XHALF |
5979 				      ADVERTISE_1000XFULL)) {
5980 				if (common & ADVERTISE_1000XFULL)
5981 					current_duplex = DUPLEX_FULL;
5982 				else
5983 					current_duplex = DUPLEX_HALF;
5984 
5985 				tp->link_config.rmt_adv =
5986 					   mii_adv_to_ethtool_adv_x(remote_adv);
5987 			} else if (!tg3_flag(tp, 5780_CLASS)) {
5988 				/* Link is up via parallel detect */
5989 			} else {
5990 				current_link_up = false;
5991 			}
5992 		}
5993 	}
5994 
5995 fiber_setup_done:
5996 	if (current_link_up && current_duplex == DUPLEX_FULL)
5997 		tg3_setup_flow_control(tp, local_adv, remote_adv);
5998 
5999 	tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
6000 	if (tp->link_config.active_duplex == DUPLEX_HALF)
6001 		tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
6002 
6003 	tw32_f(MAC_MODE, tp->mac_mode);
6004 	udelay(40);
6005 
6006 	tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
6007 
6008 	tp->link_config.active_speed = current_speed;
6009 	tp->link_config.active_duplex = current_duplex;
6010 
6011 	tg3_test_and_report_link_chg(tp, current_link_up);
6012 	return err;
6013 }
6014 
6015 static void tg3_serdes_parallel_detect(struct tg3 *tp)
6016 {
6017 	if (tp->serdes_counter) {
6018 		/* Give autoneg time to complete. */
6019 		tp->serdes_counter--;
6020 		return;
6021 	}
6022 
6023 	if (!tp->link_up &&
6024 	    (tp->link_config.autoneg == AUTONEG_ENABLE)) {
6025 		u32 bmcr;
6026 
6027 		tg3_readphy(tp, MII_BMCR, &bmcr);
6028 		if (bmcr & BMCR_ANENABLE) {
6029 			u32 phy1, phy2;
6030 
6031 			/* Select shadow register 0x1f */
6032 			tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x7c00);
6033 			tg3_readphy(tp, MII_TG3_MISC_SHDW, &phy1);
6034 
6035 			/* Select expansion interrupt status register */
6036 			tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
6037 					 MII_TG3_DSP_EXP1_INT_STAT);
6038 			tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
6039 			tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
6040 
6041 			if ((phy1 & 0x10) && !(phy2 & 0x20)) {
6042 				/* We have signal detect and not receiving
6043 				 * config code words, link is up by parallel
6044 				 * detection.
6045 				 */
6046 
6047 				bmcr &= ~BMCR_ANENABLE;
6048 				bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
6049 				tg3_writephy(tp, MII_BMCR, bmcr);
6050 				tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT;
6051 			}
6052 		}
6053 	} else if (tp->link_up &&
6054 		   (tp->link_config.autoneg == AUTONEG_ENABLE) &&
6055 		   (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
6056 		u32 phy2;
6057 
6058 		/* Select expansion interrupt status register */
6059 		tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
6060 				 MII_TG3_DSP_EXP1_INT_STAT);
6061 		tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
6062 		if (phy2 & 0x20) {
6063 			u32 bmcr;
6064 
6065 			/* Config code words received, turn on autoneg. */
6066 			tg3_readphy(tp, MII_BMCR, &bmcr);
6067 			tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
6068 
6069 			tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
6070 
6071 		}
6072 	}
6073 }
6074 
6075 static int tg3_setup_phy(struct tg3 *tp, bool force_reset)
6076 {
6077 	u32 val;
6078 	int err;
6079 
6080 	if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
6081 		err = tg3_setup_fiber_phy(tp, force_reset);
6082 	else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
6083 		err = tg3_setup_fiber_mii_phy(tp, force_reset);
6084 	else
6085 		err = tg3_setup_copper_phy(tp, force_reset);
6086 
6087 	if (tg3_chip_rev(tp) == CHIPREV_5784_AX) {
6088 		u32 scale;
6089 
6090 		val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
6091 		if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
6092 			scale = 65;
6093 		else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
6094 			scale = 6;
6095 		else
6096 			scale = 12;
6097 
6098 		val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
6099 		val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
6100 		tw32(GRC_MISC_CFG, val);
6101 	}
6102 
6103 	val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
6104 	      (6 << TX_LENGTHS_IPG_SHIFT);
6105 	if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
6106 	    tg3_asic_rev(tp) == ASIC_REV_5762)
6107 		val |= tr32(MAC_TX_LENGTHS) &
6108 		       (TX_LENGTHS_JMB_FRM_LEN_MSK |
6109 			TX_LENGTHS_CNT_DWN_VAL_MSK);
6110 
6111 	if (tp->link_config.active_speed == SPEED_1000 &&
6112 	    tp->link_config.active_duplex == DUPLEX_HALF)
6113 		tw32(MAC_TX_LENGTHS, val |
6114 		     (0xff << TX_LENGTHS_SLOT_TIME_SHIFT));
6115 	else
6116 		tw32(MAC_TX_LENGTHS, val |
6117 		     (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
6118 
6119 	if (!tg3_flag(tp, 5705_PLUS)) {
6120 		if (tp->link_up) {
6121 			tw32(HOSTCC_STAT_COAL_TICKS,
6122 			     tp->coal.stats_block_coalesce_usecs);
6123 		} else {
6124 			tw32(HOSTCC_STAT_COAL_TICKS, 0);
6125 		}
6126 	}
6127 
6128 	if (tg3_flag(tp, ASPM_WORKAROUND)) {
6129 		val = tr32(PCIE_PWR_MGMT_THRESH);
6130 		if (!tp->link_up)
6131 			val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
6132 			      tp->pwrmgmt_thresh;
6133 		else
6134 			val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
6135 		tw32(PCIE_PWR_MGMT_THRESH, val);
6136 	}
6137 
6138 	return err;
6139 }
6140 
6141 /* tp->lock must be held */
6142 static u64 tg3_refclk_read(struct tg3 *tp, struct ptp_system_timestamp *sts)
6143 {
6144 	u64 stamp;
6145 
6146 	ptp_read_system_prets(sts);
6147 	stamp = tr32(TG3_EAV_REF_CLCK_LSB);
6148 	ptp_read_system_postts(sts);
6149 	stamp |= (u64)tr32(TG3_EAV_REF_CLCK_MSB) << 32;
6150 
6151 	return stamp;
6152 }
6153 
6154 /* tp->lock must be held */
6155 static void tg3_refclk_write(struct tg3 *tp, u64 newval)
6156 {
6157 	u32 clock_ctl = tr32(TG3_EAV_REF_CLCK_CTL);
6158 
6159 	tw32(TG3_EAV_REF_CLCK_CTL, clock_ctl | TG3_EAV_REF_CLCK_CTL_STOP);
6160 	tw32(TG3_EAV_REF_CLCK_LSB, newval & 0xffffffff);
6161 	tw32(TG3_EAV_REF_CLCK_MSB, newval >> 32);
6162 	tw32_f(TG3_EAV_REF_CLCK_CTL, clock_ctl | TG3_EAV_REF_CLCK_CTL_RESUME);
6163 }
6164 
6165 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync);
6166 static inline void tg3_full_unlock(struct tg3 *tp);
6167 static int tg3_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info)
6168 {
6169 	struct tg3 *tp = netdev_priv(dev);
6170 
6171 	info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
6172 				SOF_TIMESTAMPING_RX_SOFTWARE |
6173 				SOF_TIMESTAMPING_SOFTWARE;
6174 
6175 	if (tg3_flag(tp, PTP_CAPABLE)) {
6176 		info->so_timestamping |= SOF_TIMESTAMPING_TX_HARDWARE |
6177 					SOF_TIMESTAMPING_RX_HARDWARE |
6178 					SOF_TIMESTAMPING_RAW_HARDWARE;
6179 	}
6180 
6181 	if (tp->ptp_clock)
6182 		info->phc_index = ptp_clock_index(tp->ptp_clock);
6183 	else
6184 		info->phc_index = -1;
6185 
6186 	info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
6187 
6188 	info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
6189 			   (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
6190 			   (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
6191 			   (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT);
6192 	return 0;
6193 }
6194 
6195 static int tg3_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
6196 {
6197 	struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6198 	bool neg_adj = false;
6199 	u32 correction = 0;
6200 
6201 	if (ppb < 0) {
6202 		neg_adj = true;
6203 		ppb = -ppb;
6204 	}
6205 
6206 	/* Frequency adjustment is performed using hardware with a 24 bit
6207 	 * accumulator and a programmable correction value. On each clk, the
6208 	 * correction value gets added to the accumulator and when it
6209 	 * overflows, the time counter is incremented/decremented.
6210 	 *
6211 	 * So conversion from ppb to correction value is
6212 	 *		ppb * (1 << 24) / 1000000000
6213 	 */
6214 	correction = div_u64((u64)ppb * (1 << 24), 1000000000ULL) &
6215 		     TG3_EAV_REF_CLK_CORRECT_MASK;
6216 
6217 	tg3_full_lock(tp, 0);
6218 
6219 	if (correction)
6220 		tw32(TG3_EAV_REF_CLK_CORRECT_CTL,
6221 		     TG3_EAV_REF_CLK_CORRECT_EN |
6222 		     (neg_adj ? TG3_EAV_REF_CLK_CORRECT_NEG : 0) | correction);
6223 	else
6224 		tw32(TG3_EAV_REF_CLK_CORRECT_CTL, 0);
6225 
6226 	tg3_full_unlock(tp);
6227 
6228 	return 0;
6229 }
6230 
6231 static int tg3_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
6232 {
6233 	struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6234 
6235 	tg3_full_lock(tp, 0);
6236 	tp->ptp_adjust += delta;
6237 	tg3_full_unlock(tp);
6238 
6239 	return 0;
6240 }
6241 
6242 static int tg3_ptp_gettimex(struct ptp_clock_info *ptp, struct timespec64 *ts,
6243 			    struct ptp_system_timestamp *sts)
6244 {
6245 	u64 ns;
6246 	struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6247 
6248 	tg3_full_lock(tp, 0);
6249 	ns = tg3_refclk_read(tp, sts);
6250 	ns += tp->ptp_adjust;
6251 	tg3_full_unlock(tp);
6252 
6253 	*ts = ns_to_timespec64(ns);
6254 
6255 	return 0;
6256 }
6257 
6258 static int tg3_ptp_settime(struct ptp_clock_info *ptp,
6259 			   const struct timespec64 *ts)
6260 {
6261 	u64 ns;
6262 	struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6263 
6264 	ns = timespec64_to_ns(ts);
6265 
6266 	tg3_full_lock(tp, 0);
6267 	tg3_refclk_write(tp, ns);
6268 	tp->ptp_adjust = 0;
6269 	tg3_full_unlock(tp);
6270 
6271 	return 0;
6272 }
6273 
6274 static int tg3_ptp_enable(struct ptp_clock_info *ptp,
6275 			  struct ptp_clock_request *rq, int on)
6276 {
6277 	struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6278 	u32 clock_ctl;
6279 	int rval = 0;
6280 
6281 	switch (rq->type) {
6282 	case PTP_CLK_REQ_PEROUT:
6283 		/* Reject requests with unsupported flags */
6284 		if (rq->perout.flags)
6285 			return -EOPNOTSUPP;
6286 
6287 		if (rq->perout.index != 0)
6288 			return -EINVAL;
6289 
6290 		tg3_full_lock(tp, 0);
6291 		clock_ctl = tr32(TG3_EAV_REF_CLCK_CTL);
6292 		clock_ctl &= ~TG3_EAV_CTL_TSYNC_GPIO_MASK;
6293 
6294 		if (on) {
6295 			u64 nsec;
6296 
6297 			nsec = rq->perout.start.sec * 1000000000ULL +
6298 			       rq->perout.start.nsec;
6299 
6300 			if (rq->perout.period.sec || rq->perout.period.nsec) {
6301 				netdev_warn(tp->dev,
6302 					    "Device supports only a one-shot timesync output, period must be 0\n");
6303 				rval = -EINVAL;
6304 				goto err_out;
6305 			}
6306 
6307 			if (nsec & (1ULL << 63)) {
6308 				netdev_warn(tp->dev,
6309 					    "Start value (nsec) is over limit. Maximum size of start is only 63 bits\n");
6310 				rval = -EINVAL;
6311 				goto err_out;
6312 			}
6313 
6314 			tw32(TG3_EAV_WATCHDOG0_LSB, (nsec & 0xffffffff));
6315 			tw32(TG3_EAV_WATCHDOG0_MSB,
6316 			     TG3_EAV_WATCHDOG0_EN |
6317 			     ((nsec >> 32) & TG3_EAV_WATCHDOG_MSB_MASK));
6318 
6319 			tw32(TG3_EAV_REF_CLCK_CTL,
6320 			     clock_ctl | TG3_EAV_CTL_TSYNC_WDOG0);
6321 		} else {
6322 			tw32(TG3_EAV_WATCHDOG0_MSB, 0);
6323 			tw32(TG3_EAV_REF_CLCK_CTL, clock_ctl);
6324 		}
6325 
6326 err_out:
6327 		tg3_full_unlock(tp);
6328 		return rval;
6329 
6330 	default:
6331 		break;
6332 	}
6333 
6334 	return -EOPNOTSUPP;
6335 }
6336 
6337 static const struct ptp_clock_info tg3_ptp_caps = {
6338 	.owner		= THIS_MODULE,
6339 	.name		= "tg3 clock",
6340 	.max_adj	= 250000000,
6341 	.n_alarm	= 0,
6342 	.n_ext_ts	= 0,
6343 	.n_per_out	= 1,
6344 	.n_pins		= 0,
6345 	.pps		= 0,
6346 	.adjfreq	= tg3_ptp_adjfreq,
6347 	.adjtime	= tg3_ptp_adjtime,
6348 	.gettimex64	= tg3_ptp_gettimex,
6349 	.settime64	= tg3_ptp_settime,
6350 	.enable		= tg3_ptp_enable,
6351 };
6352 
6353 static void tg3_hwclock_to_timestamp(struct tg3 *tp, u64 hwclock,
6354 				     struct skb_shared_hwtstamps *timestamp)
6355 {
6356 	memset(timestamp, 0, sizeof(struct skb_shared_hwtstamps));
6357 	timestamp->hwtstamp  = ns_to_ktime((hwclock & TG3_TSTAMP_MASK) +
6358 					   tp->ptp_adjust);
6359 }
6360 
6361 /* tp->lock must be held */
6362 static void tg3_ptp_init(struct tg3 *tp)
6363 {
6364 	if (!tg3_flag(tp, PTP_CAPABLE))
6365 		return;
6366 
6367 	/* Initialize the hardware clock to the system time. */
6368 	tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()));
6369 	tp->ptp_adjust = 0;
6370 	tp->ptp_info = tg3_ptp_caps;
6371 }
6372 
6373 /* tp->lock must be held */
6374 static void tg3_ptp_resume(struct tg3 *tp)
6375 {
6376 	if (!tg3_flag(tp, PTP_CAPABLE))
6377 		return;
6378 
6379 	tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()) + tp->ptp_adjust);
6380 	tp->ptp_adjust = 0;
6381 }
6382 
6383 static void tg3_ptp_fini(struct tg3 *tp)
6384 {
6385 	if (!tg3_flag(tp, PTP_CAPABLE) || !tp->ptp_clock)
6386 		return;
6387 
6388 	ptp_clock_unregister(tp->ptp_clock);
6389 	tp->ptp_clock = NULL;
6390 	tp->ptp_adjust = 0;
6391 }
6392 
6393 static inline int tg3_irq_sync(struct tg3 *tp)
6394 {
6395 	return tp->irq_sync;
6396 }
6397 
6398 static inline void tg3_rd32_loop(struct tg3 *tp, u32 *dst, u32 off, u32 len)
6399 {
6400 	int i;
6401 
6402 	dst = (u32 *)((u8 *)dst + off);
6403 	for (i = 0; i < len; i += sizeof(u32))
6404 		*dst++ = tr32(off + i);
6405 }
6406 
6407 static void tg3_dump_legacy_regs(struct tg3 *tp, u32 *regs)
6408 {
6409 	tg3_rd32_loop(tp, regs, TG3PCI_VENDOR, 0xb0);
6410 	tg3_rd32_loop(tp, regs, MAILBOX_INTERRUPT_0, 0x200);
6411 	tg3_rd32_loop(tp, regs, MAC_MODE, 0x4f0);
6412 	tg3_rd32_loop(tp, regs, SNDDATAI_MODE, 0xe0);
6413 	tg3_rd32_loop(tp, regs, SNDDATAC_MODE, 0x04);
6414 	tg3_rd32_loop(tp, regs, SNDBDS_MODE, 0x80);
6415 	tg3_rd32_loop(tp, regs, SNDBDI_MODE, 0x48);
6416 	tg3_rd32_loop(tp, regs, SNDBDC_MODE, 0x04);
6417 	tg3_rd32_loop(tp, regs, RCVLPC_MODE, 0x20);
6418 	tg3_rd32_loop(tp, regs, RCVLPC_SELLST_BASE, 0x15c);
6419 	tg3_rd32_loop(tp, regs, RCVDBDI_MODE, 0x0c);
6420 	tg3_rd32_loop(tp, regs, RCVDBDI_JUMBO_BD, 0x3c);
6421 	tg3_rd32_loop(tp, regs, RCVDBDI_BD_PROD_IDX_0, 0x44);
6422 	tg3_rd32_loop(tp, regs, RCVDCC_MODE, 0x04);
6423 	tg3_rd32_loop(tp, regs, RCVBDI_MODE, 0x20);
6424 	tg3_rd32_loop(tp, regs, RCVCC_MODE, 0x14);
6425 	tg3_rd32_loop(tp, regs, RCVLSC_MODE, 0x08);
6426 	tg3_rd32_loop(tp, regs, MBFREE_MODE, 0x08);
6427 	tg3_rd32_loop(tp, regs, HOSTCC_MODE, 0x100);
6428 
6429 	if (tg3_flag(tp, SUPPORT_MSIX))
6430 		tg3_rd32_loop(tp, regs, HOSTCC_RXCOL_TICKS_VEC1, 0x180);
6431 
6432 	tg3_rd32_loop(tp, regs, MEMARB_MODE, 0x10);
6433 	tg3_rd32_loop(tp, regs, BUFMGR_MODE, 0x58);
6434 	tg3_rd32_loop(tp, regs, RDMAC_MODE, 0x08);
6435 	tg3_rd32_loop(tp, regs, WDMAC_MODE, 0x08);
6436 	tg3_rd32_loop(tp, regs, RX_CPU_MODE, 0x04);
6437 	tg3_rd32_loop(tp, regs, RX_CPU_STATE, 0x04);
6438 	tg3_rd32_loop(tp, regs, RX_CPU_PGMCTR, 0x04);
6439 	tg3_rd32_loop(tp, regs, RX_CPU_HWBKPT, 0x04);
6440 
6441 	if (!tg3_flag(tp, 5705_PLUS)) {
6442 		tg3_rd32_loop(tp, regs, TX_CPU_MODE, 0x04);
6443 		tg3_rd32_loop(tp, regs, TX_CPU_STATE, 0x04);
6444 		tg3_rd32_loop(tp, regs, TX_CPU_PGMCTR, 0x04);
6445 	}
6446 
6447 	tg3_rd32_loop(tp, regs, GRCMBOX_INTERRUPT_0, 0x110);
6448 	tg3_rd32_loop(tp, regs, FTQ_RESET, 0x120);
6449 	tg3_rd32_loop(tp, regs, MSGINT_MODE, 0x0c);
6450 	tg3_rd32_loop(tp, regs, DMAC_MODE, 0x04);
6451 	tg3_rd32_loop(tp, regs, GRC_MODE, 0x4c);
6452 
6453 	if (tg3_flag(tp, NVRAM))
6454 		tg3_rd32_loop(tp, regs, NVRAM_CMD, 0x24);
6455 }
6456 
6457 static void tg3_dump_state(struct tg3 *tp)
6458 {
6459 	int i;
6460 	u32 *regs;
6461 
6462 	regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC);
6463 	if (!regs)
6464 		return;
6465 
6466 	if (tg3_flag(tp, PCI_EXPRESS)) {
6467 		/* Read up to but not including private PCI registers */
6468 		for (i = 0; i < TG3_PCIE_TLDLPL_PORT; i += sizeof(u32))
6469 			regs[i / sizeof(u32)] = tr32(i);
6470 	} else
6471 		tg3_dump_legacy_regs(tp, regs);
6472 
6473 	for (i = 0; i < TG3_REG_BLK_SIZE / sizeof(u32); i += 4) {
6474 		if (!regs[i + 0] && !regs[i + 1] &&
6475 		    !regs[i + 2] && !regs[i + 3])
6476 			continue;
6477 
6478 		netdev_err(tp->dev, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
6479 			   i * 4,
6480 			   regs[i + 0], regs[i + 1], regs[i + 2], regs[i + 3]);
6481 	}
6482 
6483 	kfree(regs);
6484 
6485 	for (i = 0; i < tp->irq_cnt; i++) {
6486 		struct tg3_napi *tnapi = &tp->napi[i];
6487 
6488 		/* SW status block */
6489 		netdev_err(tp->dev,
6490 			 "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
6491 			   i,
6492 			   tnapi->hw_status->status,
6493 			   tnapi->hw_status->status_tag,
6494 			   tnapi->hw_status->rx_jumbo_consumer,
6495 			   tnapi->hw_status->rx_consumer,
6496 			   tnapi->hw_status->rx_mini_consumer,
6497 			   tnapi->hw_status->idx[0].rx_producer,
6498 			   tnapi->hw_status->idx[0].tx_consumer);
6499 
6500 		netdev_err(tp->dev,
6501 		"%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n",
6502 			   i,
6503 			   tnapi->last_tag, tnapi->last_irq_tag,
6504 			   tnapi->tx_prod, tnapi->tx_cons, tnapi->tx_pending,
6505 			   tnapi->rx_rcb_ptr,
6506 			   tnapi->prodring.rx_std_prod_idx,
6507 			   tnapi->prodring.rx_std_cons_idx,
6508 			   tnapi->prodring.rx_jmb_prod_idx,
6509 			   tnapi->prodring.rx_jmb_cons_idx);
6510 	}
6511 }
6512 
6513 /* This is called whenever we suspect that the system chipset is re-
6514  * ordering the sequence of MMIO to the tx send mailbox. The symptom
6515  * is bogus tx completions. We try to recover by setting the
6516  * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
6517  * in the workqueue.
6518  */
6519 static void tg3_tx_recover(struct tg3 *tp)
6520 {
6521 	BUG_ON(tg3_flag(tp, MBOX_WRITE_REORDER) ||
6522 	       tp->write32_tx_mbox == tg3_write_indirect_mbox);
6523 
6524 	netdev_warn(tp->dev,
6525 		    "The system may be re-ordering memory-mapped I/O "
6526 		    "cycles to the network device, attempting to recover. "
6527 		    "Please report the problem to the driver maintainer "
6528 		    "and include system chipset information.\n");
6529 
6530 	tg3_flag_set(tp, TX_RECOVERY_PENDING);
6531 }
6532 
6533 static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
6534 {
6535 	/* Tell compiler to fetch tx indices from memory. */
6536 	barrier();
6537 	return tnapi->tx_pending -
6538 	       ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1));
6539 }
6540 
6541 /* Tigon3 never reports partial packet sends.  So we do not
6542  * need special logic to handle SKBs that have not had all
6543  * of their frags sent yet, like SunGEM does.
6544  */
6545 static void tg3_tx(struct tg3_napi *tnapi)
6546 {
6547 	struct tg3 *tp = tnapi->tp;
6548 	u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer;
6549 	u32 sw_idx = tnapi->tx_cons;
6550 	struct netdev_queue *txq;
6551 	int index = tnapi - tp->napi;
6552 	unsigned int pkts_compl = 0, bytes_compl = 0;
6553 
6554 	if (tg3_flag(tp, ENABLE_TSS))
6555 		index--;
6556 
6557 	txq = netdev_get_tx_queue(tp->dev, index);
6558 
6559 	while (sw_idx != hw_idx) {
6560 		struct tg3_tx_ring_info *ri = &tnapi->tx_buffers[sw_idx];
6561 		struct sk_buff *skb = ri->skb;
6562 		int i, tx_bug = 0;
6563 
6564 		if (unlikely(skb == NULL)) {
6565 			tg3_tx_recover(tp);
6566 			return;
6567 		}
6568 
6569 		if (tnapi->tx_ring[sw_idx].len_flags & TXD_FLAG_HWTSTAMP) {
6570 			struct skb_shared_hwtstamps timestamp;
6571 			u64 hwclock = tr32(TG3_TX_TSTAMP_LSB);
6572 			hwclock |= (u64)tr32(TG3_TX_TSTAMP_MSB) << 32;
6573 
6574 			tg3_hwclock_to_timestamp(tp, hwclock, &timestamp);
6575 
6576 			skb_tstamp_tx(skb, &timestamp);
6577 		}
6578 
6579 		pci_unmap_single(tp->pdev,
6580 				 dma_unmap_addr(ri, mapping),
6581 				 skb_headlen(skb),
6582 				 PCI_DMA_TODEVICE);
6583 
6584 		ri->skb = NULL;
6585 
6586 		while (ri->fragmented) {
6587 			ri->fragmented = false;
6588 			sw_idx = NEXT_TX(sw_idx);
6589 			ri = &tnapi->tx_buffers[sw_idx];
6590 		}
6591 
6592 		sw_idx = NEXT_TX(sw_idx);
6593 
6594 		for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
6595 			ri = &tnapi->tx_buffers[sw_idx];
6596 			if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
6597 				tx_bug = 1;
6598 
6599 			pci_unmap_page(tp->pdev,
6600 				       dma_unmap_addr(ri, mapping),
6601 				       skb_frag_size(&skb_shinfo(skb)->frags[i]),
6602 				       PCI_DMA_TODEVICE);
6603 
6604 			while (ri->fragmented) {
6605 				ri->fragmented = false;
6606 				sw_idx = NEXT_TX(sw_idx);
6607 				ri = &tnapi->tx_buffers[sw_idx];
6608 			}
6609 
6610 			sw_idx = NEXT_TX(sw_idx);
6611 		}
6612 
6613 		pkts_compl++;
6614 		bytes_compl += skb->len;
6615 
6616 		dev_consume_skb_any(skb);
6617 
6618 		if (unlikely(tx_bug)) {
6619 			tg3_tx_recover(tp);
6620 			return;
6621 		}
6622 	}
6623 
6624 	netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
6625 
6626 	tnapi->tx_cons = sw_idx;
6627 
6628 	/* Need to make the tx_cons update visible to tg3_start_xmit()
6629 	 * before checking for netif_queue_stopped().  Without the
6630 	 * memory barrier, there is a small possibility that tg3_start_xmit()
6631 	 * will miss it and cause the queue to be stopped forever.
6632 	 */
6633 	smp_mb();
6634 
6635 	if (unlikely(netif_tx_queue_stopped(txq) &&
6636 		     (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) {
6637 		__netif_tx_lock(txq, smp_processor_id());
6638 		if (netif_tx_queue_stopped(txq) &&
6639 		    (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))
6640 			netif_tx_wake_queue(txq);
6641 		__netif_tx_unlock(txq);
6642 	}
6643 }
6644 
6645 static void tg3_frag_free(bool is_frag, void *data)
6646 {
6647 	if (is_frag)
6648 		skb_free_frag(data);
6649 	else
6650 		kfree(data);
6651 }
6652 
6653 static void tg3_rx_data_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
6654 {
6655 	unsigned int skb_size = SKB_DATA_ALIGN(map_sz + TG3_RX_OFFSET(tp)) +
6656 		   SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
6657 
6658 	if (!ri->data)
6659 		return;
6660 
6661 	pci_unmap_single(tp->pdev, dma_unmap_addr(ri, mapping),
6662 			 map_sz, PCI_DMA_FROMDEVICE);
6663 	tg3_frag_free(skb_size <= PAGE_SIZE, ri->data);
6664 	ri->data = NULL;
6665 }
6666 
6667 
6668 /* Returns size of skb allocated or < 0 on error.
6669  *
6670  * We only need to fill in the address because the other members
6671  * of the RX descriptor are invariant, see tg3_init_rings.
6672  *
6673  * Note the purposeful assymetry of cpu vs. chip accesses.  For
6674  * posting buffers we only dirty the first cache line of the RX
6675  * descriptor (containing the address).  Whereas for the RX status
6676  * buffers the cpu only reads the last cacheline of the RX descriptor
6677  * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
6678  */
6679 static int tg3_alloc_rx_data(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
6680 			     u32 opaque_key, u32 dest_idx_unmasked,
6681 			     unsigned int *frag_size)
6682 {
6683 	struct tg3_rx_buffer_desc *desc;
6684 	struct ring_info *map;
6685 	u8 *data;
6686 	dma_addr_t mapping;
6687 	int skb_size, data_size, dest_idx;
6688 
6689 	switch (opaque_key) {
6690 	case RXD_OPAQUE_RING_STD:
6691 		dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
6692 		desc = &tpr->rx_std[dest_idx];
6693 		map = &tpr->rx_std_buffers[dest_idx];
6694 		data_size = tp->rx_pkt_map_sz;
6695 		break;
6696 
6697 	case RXD_OPAQUE_RING_JUMBO:
6698 		dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
6699 		desc = &tpr->rx_jmb[dest_idx].std;
6700 		map = &tpr->rx_jmb_buffers[dest_idx];
6701 		data_size = TG3_RX_JMB_MAP_SZ;
6702 		break;
6703 
6704 	default:
6705 		return -EINVAL;
6706 	}
6707 
6708 	/* Do not overwrite any of the map or rp information
6709 	 * until we are sure we can commit to a new buffer.
6710 	 *
6711 	 * Callers depend upon this behavior and assume that
6712 	 * we leave everything unchanged if we fail.
6713 	 */
6714 	skb_size = SKB_DATA_ALIGN(data_size + TG3_RX_OFFSET(tp)) +
6715 		   SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
6716 	if (skb_size <= PAGE_SIZE) {
6717 		data = napi_alloc_frag(skb_size);
6718 		*frag_size = skb_size;
6719 	} else {
6720 		data = kmalloc(skb_size, GFP_ATOMIC);
6721 		*frag_size = 0;
6722 	}
6723 	if (!data)
6724 		return -ENOMEM;
6725 
6726 	mapping = pci_map_single(tp->pdev,
6727 				 data + TG3_RX_OFFSET(tp),
6728 				 data_size,
6729 				 PCI_DMA_FROMDEVICE);
6730 	if (unlikely(pci_dma_mapping_error(tp->pdev, mapping))) {
6731 		tg3_frag_free(skb_size <= PAGE_SIZE, data);
6732 		return -EIO;
6733 	}
6734 
6735 	map->data = data;
6736 	dma_unmap_addr_set(map, mapping, mapping);
6737 
6738 	desc->addr_hi = ((u64)mapping >> 32);
6739 	desc->addr_lo = ((u64)mapping & 0xffffffff);
6740 
6741 	return data_size;
6742 }
6743 
6744 /* We only need to move over in the address because the other
6745  * members of the RX descriptor are invariant.  See notes above
6746  * tg3_alloc_rx_data for full details.
6747  */
6748 static void tg3_recycle_rx(struct tg3_napi *tnapi,
6749 			   struct tg3_rx_prodring_set *dpr,
6750 			   u32 opaque_key, int src_idx,
6751 			   u32 dest_idx_unmasked)
6752 {
6753 	struct tg3 *tp = tnapi->tp;
6754 	struct tg3_rx_buffer_desc *src_desc, *dest_desc;
6755 	struct ring_info *src_map, *dest_map;
6756 	struct tg3_rx_prodring_set *spr = &tp->napi[0].prodring;
6757 	int dest_idx;
6758 
6759 	switch (opaque_key) {
6760 	case RXD_OPAQUE_RING_STD:
6761 		dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
6762 		dest_desc = &dpr->rx_std[dest_idx];
6763 		dest_map = &dpr->rx_std_buffers[dest_idx];
6764 		src_desc = &spr->rx_std[src_idx];
6765 		src_map = &spr->rx_std_buffers[src_idx];
6766 		break;
6767 
6768 	case RXD_OPAQUE_RING_JUMBO:
6769 		dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
6770 		dest_desc = &dpr->rx_jmb[dest_idx].std;
6771 		dest_map = &dpr->rx_jmb_buffers[dest_idx];
6772 		src_desc = &spr->rx_jmb[src_idx].std;
6773 		src_map = &spr->rx_jmb_buffers[src_idx];
6774 		break;
6775 
6776 	default:
6777 		return;
6778 	}
6779 
6780 	dest_map->data = src_map->data;
6781 	dma_unmap_addr_set(dest_map, mapping,
6782 			   dma_unmap_addr(src_map, mapping));
6783 	dest_desc->addr_hi = src_desc->addr_hi;
6784 	dest_desc->addr_lo = src_desc->addr_lo;
6785 
6786 	/* Ensure that the update to the skb happens after the physical
6787 	 * addresses have been transferred to the new BD location.
6788 	 */
6789 	smp_wmb();
6790 
6791 	src_map->data = NULL;
6792 }
6793 
6794 /* The RX ring scheme is composed of multiple rings which post fresh
6795  * buffers to the chip, and one special ring the chip uses to report
6796  * status back to the host.
6797  *
6798  * The special ring reports the status of received packets to the
6799  * host.  The chip does not write into the original descriptor the
6800  * RX buffer was obtained from.  The chip simply takes the original
6801  * descriptor as provided by the host, updates the status and length
6802  * field, then writes this into the next status ring entry.
6803  *
6804  * Each ring the host uses to post buffers to the chip is described
6805  * by a TG3_BDINFO entry in the chips SRAM area.  When a packet arrives,
6806  * it is first placed into the on-chip ram.  When the packet's length
6807  * is known, it walks down the TG3_BDINFO entries to select the ring.
6808  * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
6809  * which is within the range of the new packet's length is chosen.
6810  *
6811  * The "separate ring for rx status" scheme may sound queer, but it makes
6812  * sense from a cache coherency perspective.  If only the host writes
6813  * to the buffer post rings, and only the chip writes to the rx status
6814  * rings, then cache lines never move beyond shared-modified state.
6815  * If both the host and chip were to write into the same ring, cache line
6816  * eviction could occur since both entities want it in an exclusive state.
6817  */
6818 static int tg3_rx(struct tg3_napi *tnapi, int budget)
6819 {
6820 	struct tg3 *tp = tnapi->tp;
6821 	u32 work_mask, rx_std_posted = 0;
6822 	u32 std_prod_idx, jmb_prod_idx;
6823 	u32 sw_idx = tnapi->rx_rcb_ptr;
6824 	u16 hw_idx;
6825 	int received;
6826 	struct tg3_rx_prodring_set *tpr = &tnapi->prodring;
6827 
6828 	hw_idx = *(tnapi->rx_rcb_prod_idx);
6829 	/*
6830 	 * We need to order the read of hw_idx and the read of
6831 	 * the opaque cookie.
6832 	 */
6833 	rmb();
6834 	work_mask = 0;
6835 	received = 0;
6836 	std_prod_idx = tpr->rx_std_prod_idx;
6837 	jmb_prod_idx = tpr->rx_jmb_prod_idx;
6838 	while (sw_idx != hw_idx && budget > 0) {
6839 		struct ring_info *ri;
6840 		struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
6841 		unsigned int len;
6842 		struct sk_buff *skb;
6843 		dma_addr_t dma_addr;
6844 		u32 opaque_key, desc_idx, *post_ptr;
6845 		u8 *data;
6846 		u64 tstamp = 0;
6847 
6848 		desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
6849 		opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
6850 		if (opaque_key == RXD_OPAQUE_RING_STD) {
6851 			ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx];
6852 			dma_addr = dma_unmap_addr(ri, mapping);
6853 			data = ri->data;
6854 			post_ptr = &std_prod_idx;
6855 			rx_std_posted++;
6856 		} else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
6857 			ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx];
6858 			dma_addr = dma_unmap_addr(ri, mapping);
6859 			data = ri->data;
6860 			post_ptr = &jmb_prod_idx;
6861 		} else
6862 			goto next_pkt_nopost;
6863 
6864 		work_mask |= opaque_key;
6865 
6866 		if (desc->err_vlan & RXD_ERR_MASK) {
6867 		drop_it:
6868 			tg3_recycle_rx(tnapi, tpr, opaque_key,
6869 				       desc_idx, *post_ptr);
6870 		drop_it_no_recycle:
6871 			/* Other statistics kept track of by card. */
6872 			tp->rx_dropped++;
6873 			goto next_pkt;
6874 		}
6875 
6876 		prefetch(data + TG3_RX_OFFSET(tp));
6877 		len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
6878 		      ETH_FCS_LEN;
6879 
6880 		if ((desc->type_flags & RXD_FLAG_PTPSTAT_MASK) ==
6881 		     RXD_FLAG_PTPSTAT_PTPV1 ||
6882 		    (desc->type_flags & RXD_FLAG_PTPSTAT_MASK) ==
6883 		     RXD_FLAG_PTPSTAT_PTPV2) {
6884 			tstamp = tr32(TG3_RX_TSTAMP_LSB);
6885 			tstamp |= (u64)tr32(TG3_RX_TSTAMP_MSB) << 32;
6886 		}
6887 
6888 		if (len > TG3_RX_COPY_THRESH(tp)) {
6889 			int skb_size;
6890 			unsigned int frag_size;
6891 
6892 			skb_size = tg3_alloc_rx_data(tp, tpr, opaque_key,
6893 						    *post_ptr, &frag_size);
6894 			if (skb_size < 0)
6895 				goto drop_it;
6896 
6897 			pci_unmap_single(tp->pdev, dma_addr, skb_size,
6898 					 PCI_DMA_FROMDEVICE);
6899 
6900 			/* Ensure that the update to the data happens
6901 			 * after the usage of the old DMA mapping.
6902 			 */
6903 			smp_wmb();
6904 
6905 			ri->data = NULL;
6906 
6907 			skb = build_skb(data, frag_size);
6908 			if (!skb) {
6909 				tg3_frag_free(frag_size != 0, data);
6910 				goto drop_it_no_recycle;
6911 			}
6912 			skb_reserve(skb, TG3_RX_OFFSET(tp));
6913 		} else {
6914 			tg3_recycle_rx(tnapi, tpr, opaque_key,
6915 				       desc_idx, *post_ptr);
6916 
6917 			skb = netdev_alloc_skb(tp->dev,
6918 					       len + TG3_RAW_IP_ALIGN);
6919 			if (skb == NULL)
6920 				goto drop_it_no_recycle;
6921 
6922 			skb_reserve(skb, TG3_RAW_IP_ALIGN);
6923 			pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
6924 			memcpy(skb->data,
6925 			       data + TG3_RX_OFFSET(tp),
6926 			       len);
6927 			pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
6928 		}
6929 
6930 		skb_put(skb, len);
6931 		if (tstamp)
6932 			tg3_hwclock_to_timestamp(tp, tstamp,
6933 						 skb_hwtstamps(skb));
6934 
6935 		if ((tp->dev->features & NETIF_F_RXCSUM) &&
6936 		    (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
6937 		    (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
6938 		      >> RXD_TCPCSUM_SHIFT) == 0xffff))
6939 			skb->ip_summed = CHECKSUM_UNNECESSARY;
6940 		else
6941 			skb_checksum_none_assert(skb);
6942 
6943 		skb->protocol = eth_type_trans(skb, tp->dev);
6944 
6945 		if (len > (tp->dev->mtu + ETH_HLEN) &&
6946 		    skb->protocol != htons(ETH_P_8021Q) &&
6947 		    skb->protocol != htons(ETH_P_8021AD)) {
6948 			dev_kfree_skb_any(skb);
6949 			goto drop_it_no_recycle;
6950 		}
6951 
6952 		if (desc->type_flags & RXD_FLAG_VLAN &&
6953 		    !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG))
6954 			__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
6955 					       desc->err_vlan & RXD_VLAN_MASK);
6956 
6957 		napi_gro_receive(&tnapi->napi, skb);
6958 
6959 		received++;
6960 		budget--;
6961 
6962 next_pkt:
6963 		(*post_ptr)++;
6964 
6965 		if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
6966 			tpr->rx_std_prod_idx = std_prod_idx &
6967 					       tp->rx_std_ring_mask;
6968 			tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6969 				     tpr->rx_std_prod_idx);
6970 			work_mask &= ~RXD_OPAQUE_RING_STD;
6971 			rx_std_posted = 0;
6972 		}
6973 next_pkt_nopost:
6974 		sw_idx++;
6975 		sw_idx &= tp->rx_ret_ring_mask;
6976 
6977 		/* Refresh hw_idx to see if there is new work */
6978 		if (sw_idx == hw_idx) {
6979 			hw_idx = *(tnapi->rx_rcb_prod_idx);
6980 			rmb();
6981 		}
6982 	}
6983 
6984 	/* ACK the status ring. */
6985 	tnapi->rx_rcb_ptr = sw_idx;
6986 	tw32_rx_mbox(tnapi->consmbox, sw_idx);
6987 
6988 	/* Refill RX ring(s). */
6989 	if (!tg3_flag(tp, ENABLE_RSS)) {
6990 		/* Sync BD data before updating mailbox */
6991 		wmb();
6992 
6993 		if (work_mask & RXD_OPAQUE_RING_STD) {
6994 			tpr->rx_std_prod_idx = std_prod_idx &
6995 					       tp->rx_std_ring_mask;
6996 			tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6997 				     tpr->rx_std_prod_idx);
6998 		}
6999 		if (work_mask & RXD_OPAQUE_RING_JUMBO) {
7000 			tpr->rx_jmb_prod_idx = jmb_prod_idx &
7001 					       tp->rx_jmb_ring_mask;
7002 			tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
7003 				     tpr->rx_jmb_prod_idx);
7004 		}
7005 	} else if (work_mask) {
7006 		/* rx_std_buffers[] and rx_jmb_buffers[] entries must be
7007 		 * updated before the producer indices can be updated.
7008 		 */
7009 		smp_wmb();
7010 
7011 		tpr->rx_std_prod_idx = std_prod_idx & tp->rx_std_ring_mask;
7012 		tpr->rx_jmb_prod_idx = jmb_prod_idx & tp->rx_jmb_ring_mask;
7013 
7014 		if (tnapi != &tp->napi[1]) {
7015 			tp->rx_refill = true;
7016 			napi_schedule(&tp->napi[1].napi);
7017 		}
7018 	}
7019 
7020 	return received;
7021 }
7022 
7023 static void tg3_poll_link(struct tg3 *tp)
7024 {
7025 	/* handle link change and other phy events */
7026 	if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
7027 		struct tg3_hw_status *sblk = tp->napi[0].hw_status;
7028 
7029 		if (sblk->status & SD_STATUS_LINK_CHG) {
7030 			sblk->status = SD_STATUS_UPDATED |
7031 				       (sblk->status & ~SD_STATUS_LINK_CHG);
7032 			spin_lock(&tp->lock);
7033 			if (tg3_flag(tp, USE_PHYLIB)) {
7034 				tw32_f(MAC_STATUS,
7035 				     (MAC_STATUS_SYNC_CHANGED |
7036 				      MAC_STATUS_CFG_CHANGED |
7037 				      MAC_STATUS_MI_COMPLETION |
7038 				      MAC_STATUS_LNKSTATE_CHANGED));
7039 				udelay(40);
7040 			} else
7041 				tg3_setup_phy(tp, false);
7042 			spin_unlock(&tp->lock);
7043 		}
7044 	}
7045 }
7046 
7047 static int tg3_rx_prodring_xfer(struct tg3 *tp,
7048 				struct tg3_rx_prodring_set *dpr,
7049 				struct tg3_rx_prodring_set *spr)
7050 {
7051 	u32 si, di, cpycnt, src_prod_idx;
7052 	int i, err = 0;
7053 
7054 	while (1) {
7055 		src_prod_idx = spr->rx_std_prod_idx;
7056 
7057 		/* Make sure updates to the rx_std_buffers[] entries and the
7058 		 * standard producer index are seen in the correct order.
7059 		 */
7060 		smp_rmb();
7061 
7062 		if (spr->rx_std_cons_idx == src_prod_idx)
7063 			break;
7064 
7065 		if (spr->rx_std_cons_idx < src_prod_idx)
7066 			cpycnt = src_prod_idx - spr->rx_std_cons_idx;
7067 		else
7068 			cpycnt = tp->rx_std_ring_mask + 1 -
7069 				 spr->rx_std_cons_idx;
7070 
7071 		cpycnt = min(cpycnt,
7072 			     tp->rx_std_ring_mask + 1 - dpr->rx_std_prod_idx);
7073 
7074 		si = spr->rx_std_cons_idx;
7075 		di = dpr->rx_std_prod_idx;
7076 
7077 		for (i = di; i < di + cpycnt; i++) {
7078 			if (dpr->rx_std_buffers[i].data) {
7079 				cpycnt = i - di;
7080 				err = -ENOSPC;
7081 				break;
7082 			}
7083 		}
7084 
7085 		if (!cpycnt)
7086 			break;
7087 
7088 		/* Ensure that updates to the rx_std_buffers ring and the
7089 		 * shadowed hardware producer ring from tg3_recycle_skb() are
7090 		 * ordered correctly WRT the skb check above.
7091 		 */
7092 		smp_rmb();
7093 
7094 		memcpy(&dpr->rx_std_buffers[di],
7095 		       &spr->rx_std_buffers[si],
7096 		       cpycnt * sizeof(struct ring_info));
7097 
7098 		for (i = 0; i < cpycnt; i++, di++, si++) {
7099 			struct tg3_rx_buffer_desc *sbd, *dbd;
7100 			sbd = &spr->rx_std[si];
7101 			dbd = &dpr->rx_std[di];
7102 			dbd->addr_hi = sbd->addr_hi;
7103 			dbd->addr_lo = sbd->addr_lo;
7104 		}
7105 
7106 		spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) &
7107 				       tp->rx_std_ring_mask;
7108 		dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) &
7109 				       tp->rx_std_ring_mask;
7110 	}
7111 
7112 	while (1) {
7113 		src_prod_idx = spr->rx_jmb_prod_idx;
7114 
7115 		/* Make sure updates to the rx_jmb_buffers[] entries and
7116 		 * the jumbo producer index are seen in the correct order.
7117 		 */
7118 		smp_rmb();
7119 
7120 		if (spr->rx_jmb_cons_idx == src_prod_idx)
7121 			break;
7122 
7123 		if (spr->rx_jmb_cons_idx < src_prod_idx)
7124 			cpycnt = src_prod_idx - spr->rx_jmb_cons_idx;
7125 		else
7126 			cpycnt = tp->rx_jmb_ring_mask + 1 -
7127 				 spr->rx_jmb_cons_idx;
7128 
7129 		cpycnt = min(cpycnt,
7130 			     tp->rx_jmb_ring_mask + 1 - dpr->rx_jmb_prod_idx);
7131 
7132 		si = spr->rx_jmb_cons_idx;
7133 		di = dpr->rx_jmb_prod_idx;
7134 
7135 		for (i = di; i < di + cpycnt; i++) {
7136 			if (dpr->rx_jmb_buffers[i].data) {
7137 				cpycnt = i - di;
7138 				err = -ENOSPC;
7139 				break;
7140 			}
7141 		}
7142 
7143 		if (!cpycnt)
7144 			break;
7145 
7146 		/* Ensure that updates to the rx_jmb_buffers ring and the
7147 		 * shadowed hardware producer ring from tg3_recycle_skb() are
7148 		 * ordered correctly WRT the skb check above.
7149 		 */
7150 		smp_rmb();
7151 
7152 		memcpy(&dpr->rx_jmb_buffers[di],
7153 		       &spr->rx_jmb_buffers[si],
7154 		       cpycnt * sizeof(struct ring_info));
7155 
7156 		for (i = 0; i < cpycnt; i++, di++, si++) {
7157 			struct tg3_rx_buffer_desc *sbd, *dbd;
7158 			sbd = &spr->rx_jmb[si].std;
7159 			dbd = &dpr->rx_jmb[di].std;
7160 			dbd->addr_hi = sbd->addr_hi;
7161 			dbd->addr_lo = sbd->addr_lo;
7162 		}
7163 
7164 		spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) &
7165 				       tp->rx_jmb_ring_mask;
7166 		dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) &
7167 				       tp->rx_jmb_ring_mask;
7168 	}
7169 
7170 	return err;
7171 }
7172 
7173 static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
7174 {
7175 	struct tg3 *tp = tnapi->tp;
7176 
7177 	/* run TX completion thread */
7178 	if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
7179 		tg3_tx(tnapi);
7180 		if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7181 			return work_done;
7182 	}
7183 
7184 	if (!tnapi->rx_rcb_prod_idx)
7185 		return work_done;
7186 
7187 	/* run RX thread, within the bounds set by NAPI.
7188 	 * All RX "locking" is done by ensuring outside
7189 	 * code synchronizes with tg3->napi.poll()
7190 	 */
7191 	if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
7192 		work_done += tg3_rx(tnapi, budget - work_done);
7193 
7194 	if (tg3_flag(tp, ENABLE_RSS) && tnapi == &tp->napi[1]) {
7195 		struct tg3_rx_prodring_set *dpr = &tp->napi[0].prodring;
7196 		int i, err = 0;
7197 		u32 std_prod_idx = dpr->rx_std_prod_idx;
7198 		u32 jmb_prod_idx = dpr->rx_jmb_prod_idx;
7199 
7200 		tp->rx_refill = false;
7201 		for (i = 1; i <= tp->rxq_cnt; i++)
7202 			err |= tg3_rx_prodring_xfer(tp, dpr,
7203 						    &tp->napi[i].prodring);
7204 
7205 		wmb();
7206 
7207 		if (std_prod_idx != dpr->rx_std_prod_idx)
7208 			tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
7209 				     dpr->rx_std_prod_idx);
7210 
7211 		if (jmb_prod_idx != dpr->rx_jmb_prod_idx)
7212 			tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
7213 				     dpr->rx_jmb_prod_idx);
7214 
7215 		if (err)
7216 			tw32_f(HOSTCC_MODE, tp->coal_now);
7217 	}
7218 
7219 	return work_done;
7220 }
7221 
7222 static inline void tg3_reset_task_schedule(struct tg3 *tp)
7223 {
7224 	if (!test_and_set_bit(TG3_FLAG_RESET_TASK_PENDING, tp->tg3_flags))
7225 		schedule_work(&tp->reset_task);
7226 }
7227 
7228 static inline void tg3_reset_task_cancel(struct tg3 *tp)
7229 {
7230 	cancel_work_sync(&tp->reset_task);
7231 	tg3_flag_clear(tp, RESET_TASK_PENDING);
7232 	tg3_flag_clear(tp, TX_RECOVERY_PENDING);
7233 }
7234 
7235 static int tg3_poll_msix(struct napi_struct *napi, int budget)
7236 {
7237 	struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
7238 	struct tg3 *tp = tnapi->tp;
7239 	int work_done = 0;
7240 	struct tg3_hw_status *sblk = tnapi->hw_status;
7241 
7242 	while (1) {
7243 		work_done = tg3_poll_work(tnapi, work_done, budget);
7244 
7245 		if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7246 			goto tx_recovery;
7247 
7248 		if (unlikely(work_done >= budget))
7249 			break;
7250 
7251 		/* tp->last_tag is used in tg3_int_reenable() below
7252 		 * to tell the hw how much work has been processed,
7253 		 * so we must read it before checking for more work.
7254 		 */
7255 		tnapi->last_tag = sblk->status_tag;
7256 		tnapi->last_irq_tag = tnapi->last_tag;
7257 		rmb();
7258 
7259 		/* check for RX/TX work to do */
7260 		if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons &&
7261 			   *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) {
7262 
7263 			/* This test here is not race free, but will reduce
7264 			 * the number of interrupts by looping again.
7265 			 */
7266 			if (tnapi == &tp->napi[1] && tp->rx_refill)
7267 				continue;
7268 
7269 			napi_complete_done(napi, work_done);
7270 			/* Reenable interrupts. */
7271 			tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
7272 
7273 			/* This test here is synchronized by napi_schedule()
7274 			 * and napi_complete() to close the race condition.
7275 			 */
7276 			if (unlikely(tnapi == &tp->napi[1] && tp->rx_refill)) {
7277 				tw32(HOSTCC_MODE, tp->coalesce_mode |
7278 						  HOSTCC_MODE_ENABLE |
7279 						  tnapi->coal_now);
7280 			}
7281 			break;
7282 		}
7283 	}
7284 
7285 	tg3_send_ape_heartbeat(tp, TG3_APE_HB_INTERVAL << 1);
7286 	return work_done;
7287 
7288 tx_recovery:
7289 	/* work_done is guaranteed to be less than budget. */
7290 	napi_complete(napi);
7291 	tg3_reset_task_schedule(tp);
7292 	return work_done;
7293 }
7294 
7295 static void tg3_process_error(struct tg3 *tp)
7296 {
7297 	u32 val;
7298 	bool real_error = false;
7299 
7300 	if (tg3_flag(tp, ERROR_PROCESSED))
7301 		return;
7302 
7303 	/* Check Flow Attention register */
7304 	val = tr32(HOSTCC_FLOW_ATTN);
7305 	if (val & ~HOSTCC_FLOW_ATTN_MBUF_LWM) {
7306 		netdev_err(tp->dev, "FLOW Attention error.  Resetting chip.\n");
7307 		real_error = true;
7308 	}
7309 
7310 	if (tr32(MSGINT_STATUS) & ~MSGINT_STATUS_MSI_REQ) {
7311 		netdev_err(tp->dev, "MSI Status error.  Resetting chip.\n");
7312 		real_error = true;
7313 	}
7314 
7315 	if (tr32(RDMAC_STATUS) || tr32(WDMAC_STATUS)) {
7316 		netdev_err(tp->dev, "DMA Status error.  Resetting chip.\n");
7317 		real_error = true;
7318 	}
7319 
7320 	if (!real_error)
7321 		return;
7322 
7323 	tg3_dump_state(tp);
7324 
7325 	tg3_flag_set(tp, ERROR_PROCESSED);
7326 	tg3_reset_task_schedule(tp);
7327 }
7328 
7329 static int tg3_poll(struct napi_struct *napi, int budget)
7330 {
7331 	struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
7332 	struct tg3 *tp = tnapi->tp;
7333 	int work_done = 0;
7334 	struct tg3_hw_status *sblk = tnapi->hw_status;
7335 
7336 	while (1) {
7337 		if (sblk->status & SD_STATUS_ERROR)
7338 			tg3_process_error(tp);
7339 
7340 		tg3_poll_link(tp);
7341 
7342 		work_done = tg3_poll_work(tnapi, work_done, budget);
7343 
7344 		if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7345 			goto tx_recovery;
7346 
7347 		if (unlikely(work_done >= budget))
7348 			break;
7349 
7350 		if (tg3_flag(tp, TAGGED_STATUS)) {
7351 			/* tp->last_tag is used in tg3_int_reenable() below
7352 			 * to tell the hw how much work has been processed,
7353 			 * so we must read it before checking for more work.
7354 			 */
7355 			tnapi->last_tag = sblk->status_tag;
7356 			tnapi->last_irq_tag = tnapi->last_tag;
7357 			rmb();
7358 		} else
7359 			sblk->status &= ~SD_STATUS_UPDATED;
7360 
7361 		if (likely(!tg3_has_work(tnapi))) {
7362 			napi_complete_done(napi, work_done);
7363 			tg3_int_reenable(tnapi);
7364 			break;
7365 		}
7366 	}
7367 
7368 	tg3_send_ape_heartbeat(tp, TG3_APE_HB_INTERVAL << 1);
7369 	return work_done;
7370 
7371 tx_recovery:
7372 	/* work_done is guaranteed to be less than budget. */
7373 	napi_complete(napi);
7374 	tg3_reset_task_schedule(tp);
7375 	return work_done;
7376 }
7377 
7378 static void tg3_napi_disable(struct tg3 *tp)
7379 {
7380 	int i;
7381 
7382 	for (i = tp->irq_cnt - 1; i >= 0; i--)
7383 		napi_disable(&tp->napi[i].napi);
7384 }
7385 
7386 static void tg3_napi_enable(struct tg3 *tp)
7387 {
7388 	int i;
7389 
7390 	for (i = 0; i < tp->irq_cnt; i++)
7391 		napi_enable(&tp->napi[i].napi);
7392 }
7393 
7394 static void tg3_napi_init(struct tg3 *tp)
7395 {
7396 	int i;
7397 
7398 	netif_napi_add(tp->dev, &tp->napi[0].napi, tg3_poll, 64);
7399 	for (i = 1; i < tp->irq_cnt; i++)
7400 		netif_napi_add(tp->dev, &tp->napi[i].napi, tg3_poll_msix, 64);
7401 }
7402 
7403 static void tg3_napi_fini(struct tg3 *tp)
7404 {
7405 	int i;
7406 
7407 	for (i = 0; i < tp->irq_cnt; i++)
7408 		netif_napi_del(&tp->napi[i].napi);
7409 }
7410 
7411 static inline void tg3_netif_stop(struct tg3 *tp)
7412 {
7413 	netif_trans_update(tp->dev);	/* prevent tx timeout */
7414 	tg3_napi_disable(tp);
7415 	netif_carrier_off(tp->dev);
7416 	netif_tx_disable(tp->dev);
7417 }
7418 
7419 /* tp->lock must be held */
7420 static inline void tg3_netif_start(struct tg3 *tp)
7421 {
7422 	tg3_ptp_resume(tp);
7423 
7424 	/* NOTE: unconditional netif_tx_wake_all_queues is only
7425 	 * appropriate so long as all callers are assured to
7426 	 * have free tx slots (such as after tg3_init_hw)
7427 	 */
7428 	netif_tx_wake_all_queues(tp->dev);
7429 
7430 	if (tp->link_up)
7431 		netif_carrier_on(tp->dev);
7432 
7433 	tg3_napi_enable(tp);
7434 	tp->napi[0].hw_status->status |= SD_STATUS_UPDATED;
7435 	tg3_enable_ints(tp);
7436 }
7437 
7438 static void tg3_irq_quiesce(struct tg3 *tp)
7439 	__releases(tp->lock)
7440 	__acquires(tp->lock)
7441 {
7442 	int i;
7443 
7444 	BUG_ON(tp->irq_sync);
7445 
7446 	tp->irq_sync = 1;
7447 	smp_mb();
7448 
7449 	spin_unlock_bh(&tp->lock);
7450 
7451 	for (i = 0; i < tp->irq_cnt; i++)
7452 		synchronize_irq(tp->napi[i].irq_vec);
7453 
7454 	spin_lock_bh(&tp->lock);
7455 }
7456 
7457 /* Fully shutdown all tg3 driver activity elsewhere in the system.
7458  * If irq_sync is non-zero, then the IRQ handler must be synchronized
7459  * with as well.  Most of the time, this is not necessary except when
7460  * shutting down the device.
7461  */
7462 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
7463 {
7464 	spin_lock_bh(&tp->lock);
7465 	if (irq_sync)
7466 		tg3_irq_quiesce(tp);
7467 }
7468 
7469 static inline void tg3_full_unlock(struct tg3 *tp)
7470 {
7471 	spin_unlock_bh(&tp->lock);
7472 }
7473 
7474 /* One-shot MSI handler - Chip automatically disables interrupt
7475  * after sending MSI so driver doesn't have to do it.
7476  */
7477 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
7478 {
7479 	struct tg3_napi *tnapi = dev_id;
7480 	struct tg3 *tp = tnapi->tp;
7481 
7482 	prefetch(tnapi->hw_status);
7483 	if (tnapi->rx_rcb)
7484 		prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7485 
7486 	if (likely(!tg3_irq_sync(tp)))
7487 		napi_schedule(&tnapi->napi);
7488 
7489 	return IRQ_HANDLED;
7490 }
7491 
7492 /* MSI ISR - No need to check for interrupt sharing and no need to
7493  * flush status block and interrupt mailbox. PCI ordering rules
7494  * guarantee that MSI will arrive after the status block.
7495  */
7496 static irqreturn_t tg3_msi(int irq, void *dev_id)
7497 {
7498 	struct tg3_napi *tnapi = dev_id;
7499 	struct tg3 *tp = tnapi->tp;
7500 
7501 	prefetch(tnapi->hw_status);
7502 	if (tnapi->rx_rcb)
7503 		prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7504 	/*
7505 	 * Writing any value to intr-mbox-0 clears PCI INTA# and
7506 	 * chip-internal interrupt pending events.
7507 	 * Writing non-zero to intr-mbox-0 additional tells the
7508 	 * NIC to stop sending us irqs, engaging "in-intr-handler"
7509 	 * event coalescing.
7510 	 */
7511 	tw32_mailbox(tnapi->int_mbox, 0x00000001);
7512 	if (likely(!tg3_irq_sync(tp)))
7513 		napi_schedule(&tnapi->napi);
7514 
7515 	return IRQ_RETVAL(1);
7516 }
7517 
7518 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
7519 {
7520 	struct tg3_napi *tnapi = dev_id;
7521 	struct tg3 *tp = tnapi->tp;
7522 	struct tg3_hw_status *sblk = tnapi->hw_status;
7523 	unsigned int handled = 1;
7524 
7525 	/* In INTx mode, it is possible for the interrupt to arrive at
7526 	 * the CPU before the status block posted prior to the interrupt.
7527 	 * Reading the PCI State register will confirm whether the
7528 	 * interrupt is ours and will flush the status block.
7529 	 */
7530 	if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
7531 		if (tg3_flag(tp, CHIP_RESETTING) ||
7532 		    (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7533 			handled = 0;
7534 			goto out;
7535 		}
7536 	}
7537 
7538 	/*
7539 	 * Writing any value to intr-mbox-0 clears PCI INTA# and
7540 	 * chip-internal interrupt pending events.
7541 	 * Writing non-zero to intr-mbox-0 additional tells the
7542 	 * NIC to stop sending us irqs, engaging "in-intr-handler"
7543 	 * event coalescing.
7544 	 *
7545 	 * Flush the mailbox to de-assert the IRQ immediately to prevent
7546 	 * spurious interrupts.  The flush impacts performance but
7547 	 * excessive spurious interrupts can be worse in some cases.
7548 	 */
7549 	tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
7550 	if (tg3_irq_sync(tp))
7551 		goto out;
7552 	sblk->status &= ~SD_STATUS_UPDATED;
7553 	if (likely(tg3_has_work(tnapi))) {
7554 		prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7555 		napi_schedule(&tnapi->napi);
7556 	} else {
7557 		/* No work, shared interrupt perhaps?  re-enable
7558 		 * interrupts, and flush that PCI write
7559 		 */
7560 		tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
7561 			       0x00000000);
7562 	}
7563 out:
7564 	return IRQ_RETVAL(handled);
7565 }
7566 
7567 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
7568 {
7569 	struct tg3_napi *tnapi = dev_id;
7570 	struct tg3 *tp = tnapi->tp;
7571 	struct tg3_hw_status *sblk = tnapi->hw_status;
7572 	unsigned int handled = 1;
7573 
7574 	/* In INTx mode, it is possible for the interrupt to arrive at
7575 	 * the CPU before the status block posted prior to the interrupt.
7576 	 * Reading the PCI State register will confirm whether the
7577 	 * interrupt is ours and will flush the status block.
7578 	 */
7579 	if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) {
7580 		if (tg3_flag(tp, CHIP_RESETTING) ||
7581 		    (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7582 			handled = 0;
7583 			goto out;
7584 		}
7585 	}
7586 
7587 	/*
7588 	 * writing any value to intr-mbox-0 clears PCI INTA# and
7589 	 * chip-internal interrupt pending events.
7590 	 * writing non-zero to intr-mbox-0 additional tells the
7591 	 * NIC to stop sending us irqs, engaging "in-intr-handler"
7592 	 * event coalescing.
7593 	 *
7594 	 * Flush the mailbox to de-assert the IRQ immediately to prevent
7595 	 * spurious interrupts.  The flush impacts performance but
7596 	 * excessive spurious interrupts can be worse in some cases.
7597 	 */
7598 	tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
7599 
7600 	/*
7601 	 * In a shared interrupt configuration, sometimes other devices'
7602 	 * interrupts will scream.  We record the current status tag here
7603 	 * so that the above check can report that the screaming interrupts
7604 	 * are unhandled.  Eventually they will be silenced.
7605 	 */
7606 	tnapi->last_irq_tag = sblk->status_tag;
7607 
7608 	if (tg3_irq_sync(tp))
7609 		goto out;
7610 
7611 	prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7612 
7613 	napi_schedule(&tnapi->napi);
7614 
7615 out:
7616 	return IRQ_RETVAL(handled);
7617 }
7618 
7619 /* ISR for interrupt test */
7620 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
7621 {
7622 	struct tg3_napi *tnapi = dev_id;
7623 	struct tg3 *tp = tnapi->tp;
7624 	struct tg3_hw_status *sblk = tnapi->hw_status;
7625 
7626 	if ((sblk->status & SD_STATUS_UPDATED) ||
7627 	    !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7628 		tg3_disable_ints(tp);
7629 		return IRQ_RETVAL(1);
7630 	}
7631 	return IRQ_RETVAL(0);
7632 }
7633 
7634 #ifdef CONFIG_NET_POLL_CONTROLLER
7635 static void tg3_poll_controller(struct net_device *dev)
7636 {
7637 	int i;
7638 	struct tg3 *tp = netdev_priv(dev);
7639 
7640 	if (tg3_irq_sync(tp))
7641 		return;
7642 
7643 	for (i = 0; i < tp->irq_cnt; i++)
7644 		tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]);
7645 }
7646 #endif
7647 
7648 static void tg3_tx_timeout(struct net_device *dev)
7649 {
7650 	struct tg3 *tp = netdev_priv(dev);
7651 
7652 	if (netif_msg_tx_err(tp)) {
7653 		netdev_err(dev, "transmit timed out, resetting\n");
7654 		tg3_dump_state(tp);
7655 	}
7656 
7657 	tg3_reset_task_schedule(tp);
7658 }
7659 
7660 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
7661 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
7662 {
7663 	u32 base = (u32) mapping & 0xffffffff;
7664 
7665 	return base + len + 8 < base;
7666 }
7667 
7668 /* Test for TSO DMA buffers that cross into regions which are within MSS bytes
7669  * of any 4GB boundaries: 4G, 8G, etc
7670  */
7671 static inline int tg3_4g_tso_overflow_test(struct tg3 *tp, dma_addr_t mapping,
7672 					   u32 len, u32 mss)
7673 {
7674 	if (tg3_asic_rev(tp) == ASIC_REV_5762 && mss) {
7675 		u32 base = (u32) mapping & 0xffffffff;
7676 
7677 		return ((base + len + (mss & 0x3fff)) < base);
7678 	}
7679 	return 0;
7680 }
7681 
7682 /* Test for DMA addresses > 40-bit */
7683 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
7684 					  int len)
7685 {
7686 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
7687 	if (tg3_flag(tp, 40BIT_DMA_BUG))
7688 		return ((u64) mapping + len) > DMA_BIT_MASK(40);
7689 	return 0;
7690 #else
7691 	return 0;
7692 #endif
7693 }
7694 
7695 static inline void tg3_tx_set_bd(struct tg3_tx_buffer_desc *txbd,
7696 				 dma_addr_t mapping, u32 len, u32 flags,
7697 				 u32 mss, u32 vlan)
7698 {
7699 	txbd->addr_hi = ((u64) mapping >> 32);
7700 	txbd->addr_lo = ((u64) mapping & 0xffffffff);
7701 	txbd->len_flags = (len << TXD_LEN_SHIFT) | (flags & 0x0000ffff);
7702 	txbd->vlan_tag = (mss << TXD_MSS_SHIFT) | (vlan << TXD_VLAN_TAG_SHIFT);
7703 }
7704 
7705 static bool tg3_tx_frag_set(struct tg3_napi *tnapi, u32 *entry, u32 *budget,
7706 			    dma_addr_t map, u32 len, u32 flags,
7707 			    u32 mss, u32 vlan)
7708 {
7709 	struct tg3 *tp = tnapi->tp;
7710 	bool hwbug = false;
7711 
7712 	if (tg3_flag(tp, SHORT_DMA_BUG) && len <= 8)
7713 		hwbug = true;
7714 
7715 	if (tg3_4g_overflow_test(map, len))
7716 		hwbug = true;
7717 
7718 	if (tg3_4g_tso_overflow_test(tp, map, len, mss))
7719 		hwbug = true;
7720 
7721 	if (tg3_40bit_overflow_test(tp, map, len))
7722 		hwbug = true;
7723 
7724 	if (tp->dma_limit) {
7725 		u32 prvidx = *entry;
7726 		u32 tmp_flag = flags & ~TXD_FLAG_END;
7727 		while (len > tp->dma_limit && *budget) {
7728 			u32 frag_len = tp->dma_limit;
7729 			len -= tp->dma_limit;
7730 
7731 			/* Avoid the 8byte DMA problem */
7732 			if (len <= 8) {
7733 				len += tp->dma_limit / 2;
7734 				frag_len = tp->dma_limit / 2;
7735 			}
7736 
7737 			tnapi->tx_buffers[*entry].fragmented = true;
7738 
7739 			tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7740 				      frag_len, tmp_flag, mss, vlan);
7741 			*budget -= 1;
7742 			prvidx = *entry;
7743 			*entry = NEXT_TX(*entry);
7744 
7745 			map += frag_len;
7746 		}
7747 
7748 		if (len) {
7749 			if (*budget) {
7750 				tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7751 					      len, flags, mss, vlan);
7752 				*budget -= 1;
7753 				*entry = NEXT_TX(*entry);
7754 			} else {
7755 				hwbug = true;
7756 				tnapi->tx_buffers[prvidx].fragmented = false;
7757 			}
7758 		}
7759 	} else {
7760 		tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7761 			      len, flags, mss, vlan);
7762 		*entry = NEXT_TX(*entry);
7763 	}
7764 
7765 	return hwbug;
7766 }
7767 
7768 static void tg3_tx_skb_unmap(struct tg3_napi *tnapi, u32 entry, int last)
7769 {
7770 	int i;
7771 	struct sk_buff *skb;
7772 	struct tg3_tx_ring_info *txb = &tnapi->tx_buffers[entry];
7773 
7774 	skb = txb->skb;
7775 	txb->skb = NULL;
7776 
7777 	pci_unmap_single(tnapi->tp->pdev,
7778 			 dma_unmap_addr(txb, mapping),
7779 			 skb_headlen(skb),
7780 			 PCI_DMA_TODEVICE);
7781 
7782 	while (txb->fragmented) {
7783 		txb->fragmented = false;
7784 		entry = NEXT_TX(entry);
7785 		txb = &tnapi->tx_buffers[entry];
7786 	}
7787 
7788 	for (i = 0; i <= last; i++) {
7789 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
7790 
7791 		entry = NEXT_TX(entry);
7792 		txb = &tnapi->tx_buffers[entry];
7793 
7794 		pci_unmap_page(tnapi->tp->pdev,
7795 			       dma_unmap_addr(txb, mapping),
7796 			       skb_frag_size(frag), PCI_DMA_TODEVICE);
7797 
7798 		while (txb->fragmented) {
7799 			txb->fragmented = false;
7800 			entry = NEXT_TX(entry);
7801 			txb = &tnapi->tx_buffers[entry];
7802 		}
7803 	}
7804 }
7805 
7806 /* Workaround 4GB and 40-bit hardware DMA bugs. */
7807 static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
7808 				       struct sk_buff **pskb,
7809 				       u32 *entry, u32 *budget,
7810 				       u32 base_flags, u32 mss, u32 vlan)
7811 {
7812 	struct tg3 *tp = tnapi->tp;
7813 	struct sk_buff *new_skb, *skb = *pskb;
7814 	dma_addr_t new_addr = 0;
7815 	int ret = 0;
7816 
7817 	if (tg3_asic_rev(tp) != ASIC_REV_5701)
7818 		new_skb = skb_copy(skb, GFP_ATOMIC);
7819 	else {
7820 		int more_headroom = 4 - ((unsigned long)skb->data & 3);
7821 
7822 		new_skb = skb_copy_expand(skb,
7823 					  skb_headroom(skb) + more_headroom,
7824 					  skb_tailroom(skb), GFP_ATOMIC);
7825 	}
7826 
7827 	if (!new_skb) {
7828 		ret = -1;
7829 	} else {
7830 		/* New SKB is guaranteed to be linear. */
7831 		new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
7832 					  PCI_DMA_TODEVICE);
7833 		/* Make sure the mapping succeeded */
7834 		if (pci_dma_mapping_error(tp->pdev, new_addr)) {
7835 			dev_kfree_skb_any(new_skb);
7836 			ret = -1;
7837 		} else {
7838 			u32 save_entry = *entry;
7839 
7840 			base_flags |= TXD_FLAG_END;
7841 
7842 			tnapi->tx_buffers[*entry].skb = new_skb;
7843 			dma_unmap_addr_set(&tnapi->tx_buffers[*entry],
7844 					   mapping, new_addr);
7845 
7846 			if (tg3_tx_frag_set(tnapi, entry, budget, new_addr,
7847 					    new_skb->len, base_flags,
7848 					    mss, vlan)) {
7849 				tg3_tx_skb_unmap(tnapi, save_entry, -1);
7850 				dev_kfree_skb_any(new_skb);
7851 				ret = -1;
7852 			}
7853 		}
7854 	}
7855 
7856 	dev_consume_skb_any(skb);
7857 	*pskb = new_skb;
7858 	return ret;
7859 }
7860 
7861 static bool tg3_tso_bug_gso_check(struct tg3_napi *tnapi, struct sk_buff *skb)
7862 {
7863 	/* Check if we will never have enough descriptors,
7864 	 * as gso_segs can be more than current ring size
7865 	 */
7866 	return skb_shinfo(skb)->gso_segs < tnapi->tx_pending / 3;
7867 }
7868 
7869 static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *);
7870 
7871 /* Use GSO to workaround all TSO packets that meet HW bug conditions
7872  * indicated in tg3_tx_frag_set()
7873  */
7874 static int tg3_tso_bug(struct tg3 *tp, struct tg3_napi *tnapi,
7875 		       struct netdev_queue *txq, struct sk_buff *skb)
7876 {
7877 	struct sk_buff *segs, *nskb;
7878 	u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3;
7879 
7880 	/* Estimate the number of fragments in the worst case */
7881 	if (unlikely(tg3_tx_avail(tnapi) <= frag_cnt_est)) {
7882 		netif_tx_stop_queue(txq);
7883 
7884 		/* netif_tx_stop_queue() must be done before checking
7885 		 * checking tx index in tg3_tx_avail() below, because in
7886 		 * tg3_tx(), we update tx index before checking for
7887 		 * netif_tx_queue_stopped().
7888 		 */
7889 		smp_mb();
7890 		if (tg3_tx_avail(tnapi) <= frag_cnt_est)
7891 			return NETDEV_TX_BUSY;
7892 
7893 		netif_tx_wake_queue(txq);
7894 	}
7895 
7896 	segs = skb_gso_segment(skb, tp->dev->features &
7897 				    ~(NETIF_F_TSO | NETIF_F_TSO6));
7898 	if (IS_ERR(segs) || !segs)
7899 		goto tg3_tso_bug_end;
7900 
7901 	do {
7902 		nskb = segs;
7903 		segs = segs->next;
7904 		nskb->next = NULL;
7905 		tg3_start_xmit(nskb, tp->dev);
7906 	} while (segs);
7907 
7908 tg3_tso_bug_end:
7909 	dev_consume_skb_any(skb);
7910 
7911 	return NETDEV_TX_OK;
7912 }
7913 
7914 /* hard_start_xmit for all devices */
7915 static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
7916 {
7917 	struct tg3 *tp = netdev_priv(dev);
7918 	u32 len, entry, base_flags, mss, vlan = 0;
7919 	u32 budget;
7920 	int i = -1, would_hit_hwbug;
7921 	dma_addr_t mapping;
7922 	struct tg3_napi *tnapi;
7923 	struct netdev_queue *txq;
7924 	unsigned int last;
7925 	struct iphdr *iph = NULL;
7926 	struct tcphdr *tcph = NULL;
7927 	__sum16 tcp_csum = 0, ip_csum = 0;
7928 	__be16 ip_tot_len = 0;
7929 
7930 	txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
7931 	tnapi = &tp->napi[skb_get_queue_mapping(skb)];
7932 	if (tg3_flag(tp, ENABLE_TSS))
7933 		tnapi++;
7934 
7935 	budget = tg3_tx_avail(tnapi);
7936 
7937 	/* We are running in BH disabled context with netif_tx_lock
7938 	 * and TX reclaim runs via tp->napi.poll inside of a software
7939 	 * interrupt.  Furthermore, IRQ processing runs lockless so we have
7940 	 * no IRQ context deadlocks to worry about either.  Rejoice!
7941 	 */
7942 	if (unlikely(budget <= (skb_shinfo(skb)->nr_frags + 1))) {
7943 		if (!netif_tx_queue_stopped(txq)) {
7944 			netif_tx_stop_queue(txq);
7945 
7946 			/* This is a hard error, log it. */
7947 			netdev_err(dev,
7948 				   "BUG! Tx Ring full when queue awake!\n");
7949 		}
7950 		return NETDEV_TX_BUSY;
7951 	}
7952 
7953 	entry = tnapi->tx_prod;
7954 	base_flags = 0;
7955 
7956 	mss = skb_shinfo(skb)->gso_size;
7957 	if (mss) {
7958 		u32 tcp_opt_len, hdr_len;
7959 
7960 		if (skb_cow_head(skb, 0))
7961 			goto drop;
7962 
7963 		iph = ip_hdr(skb);
7964 		tcp_opt_len = tcp_optlen(skb);
7965 
7966 		hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb) - ETH_HLEN;
7967 
7968 		/* HW/FW can not correctly segment packets that have been
7969 		 * vlan encapsulated.
7970 		 */
7971 		if (skb->protocol == htons(ETH_P_8021Q) ||
7972 		    skb->protocol == htons(ETH_P_8021AD)) {
7973 			if (tg3_tso_bug_gso_check(tnapi, skb))
7974 				return tg3_tso_bug(tp, tnapi, txq, skb);
7975 			goto drop;
7976 		}
7977 
7978 		if (!skb_is_gso_v6(skb)) {
7979 			if (unlikely((ETH_HLEN + hdr_len) > 80) &&
7980 			    tg3_flag(tp, TSO_BUG)) {
7981 				if (tg3_tso_bug_gso_check(tnapi, skb))
7982 					return tg3_tso_bug(tp, tnapi, txq, skb);
7983 				goto drop;
7984 			}
7985 			ip_csum = iph->check;
7986 			ip_tot_len = iph->tot_len;
7987 			iph->check = 0;
7988 			iph->tot_len = htons(mss + hdr_len);
7989 		}
7990 
7991 		base_flags |= (TXD_FLAG_CPU_PRE_DMA |
7992 			       TXD_FLAG_CPU_POST_DMA);
7993 
7994 		tcph = tcp_hdr(skb);
7995 		tcp_csum = tcph->check;
7996 
7997 		if (tg3_flag(tp, HW_TSO_1) ||
7998 		    tg3_flag(tp, HW_TSO_2) ||
7999 		    tg3_flag(tp, HW_TSO_3)) {
8000 			tcph->check = 0;
8001 			base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
8002 		} else {
8003 			tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
8004 							 0, IPPROTO_TCP, 0);
8005 		}
8006 
8007 		if (tg3_flag(tp, HW_TSO_3)) {
8008 			mss |= (hdr_len & 0xc) << 12;
8009 			if (hdr_len & 0x10)
8010 				base_flags |= 0x00000010;
8011 			base_flags |= (hdr_len & 0x3e0) << 5;
8012 		} else if (tg3_flag(tp, HW_TSO_2))
8013 			mss |= hdr_len << 9;
8014 		else if (tg3_flag(tp, HW_TSO_1) ||
8015 			 tg3_asic_rev(tp) == ASIC_REV_5705) {
8016 			if (tcp_opt_len || iph->ihl > 5) {
8017 				int tsflags;
8018 
8019 				tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
8020 				mss |= (tsflags << 11);
8021 			}
8022 		} else {
8023 			if (tcp_opt_len || iph->ihl > 5) {
8024 				int tsflags;
8025 
8026 				tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
8027 				base_flags |= tsflags << 12;
8028 			}
8029 		}
8030 	} else if (skb->ip_summed == CHECKSUM_PARTIAL) {
8031 		/* HW/FW can not correctly checksum packets that have been
8032 		 * vlan encapsulated.
8033 		 */
8034 		if (skb->protocol == htons(ETH_P_8021Q) ||
8035 		    skb->protocol == htons(ETH_P_8021AD)) {
8036 			if (skb_checksum_help(skb))
8037 				goto drop;
8038 		} else  {
8039 			base_flags |= TXD_FLAG_TCPUDP_CSUM;
8040 		}
8041 	}
8042 
8043 	if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
8044 	    !mss && skb->len > VLAN_ETH_FRAME_LEN)
8045 		base_flags |= TXD_FLAG_JMB_PKT;
8046 
8047 	if (skb_vlan_tag_present(skb)) {
8048 		base_flags |= TXD_FLAG_VLAN;
8049 		vlan = skb_vlan_tag_get(skb);
8050 	}
8051 
8052 	if ((unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) &&
8053 	    tg3_flag(tp, TX_TSTAMP_EN)) {
8054 		skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
8055 		base_flags |= TXD_FLAG_HWTSTAMP;
8056 	}
8057 
8058 	len = skb_headlen(skb);
8059 
8060 	mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
8061 	if (pci_dma_mapping_error(tp->pdev, mapping))
8062 		goto drop;
8063 
8064 
8065 	tnapi->tx_buffers[entry].skb = skb;
8066 	dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
8067 
8068 	would_hit_hwbug = 0;
8069 
8070 	if (tg3_flag(tp, 5701_DMA_BUG))
8071 		would_hit_hwbug = 1;
8072 
8073 	if (tg3_tx_frag_set(tnapi, &entry, &budget, mapping, len, base_flags |
8074 			  ((skb_shinfo(skb)->nr_frags == 0) ? TXD_FLAG_END : 0),
8075 			    mss, vlan)) {
8076 		would_hit_hwbug = 1;
8077 	} else if (skb_shinfo(skb)->nr_frags > 0) {
8078 		u32 tmp_mss = mss;
8079 
8080 		if (!tg3_flag(tp, HW_TSO_1) &&
8081 		    !tg3_flag(tp, HW_TSO_2) &&
8082 		    !tg3_flag(tp, HW_TSO_3))
8083 			tmp_mss = 0;
8084 
8085 		/* Now loop through additional data
8086 		 * fragments, and queue them.
8087 		 */
8088 		last = skb_shinfo(skb)->nr_frags - 1;
8089 		for (i = 0; i <= last; i++) {
8090 			skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
8091 
8092 			len = skb_frag_size(frag);
8093 			mapping = skb_frag_dma_map(&tp->pdev->dev, frag, 0,
8094 						   len, DMA_TO_DEVICE);
8095 
8096 			tnapi->tx_buffers[entry].skb = NULL;
8097 			dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
8098 					   mapping);
8099 			if (dma_mapping_error(&tp->pdev->dev, mapping))
8100 				goto dma_error;
8101 
8102 			if (!budget ||
8103 			    tg3_tx_frag_set(tnapi, &entry, &budget, mapping,
8104 					    len, base_flags |
8105 					    ((i == last) ? TXD_FLAG_END : 0),
8106 					    tmp_mss, vlan)) {
8107 				would_hit_hwbug = 1;
8108 				break;
8109 			}
8110 		}
8111 	}
8112 
8113 	if (would_hit_hwbug) {
8114 		tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i);
8115 
8116 		if (mss && tg3_tso_bug_gso_check(tnapi, skb)) {
8117 			/* If it's a TSO packet, do GSO instead of
8118 			 * allocating and copying to a large linear SKB
8119 			 */
8120 			if (ip_tot_len) {
8121 				iph->check = ip_csum;
8122 				iph->tot_len = ip_tot_len;
8123 			}
8124 			tcph->check = tcp_csum;
8125 			return tg3_tso_bug(tp, tnapi, txq, skb);
8126 		}
8127 
8128 		/* If the workaround fails due to memory/mapping
8129 		 * failure, silently drop this packet.
8130 		 */
8131 		entry = tnapi->tx_prod;
8132 		budget = tg3_tx_avail(tnapi);
8133 		if (tigon3_dma_hwbug_workaround(tnapi, &skb, &entry, &budget,
8134 						base_flags, mss, vlan))
8135 			goto drop_nofree;
8136 	}
8137 
8138 	skb_tx_timestamp(skb);
8139 	netdev_tx_sent_queue(txq, skb->len);
8140 
8141 	/* Sync BD data before updating mailbox */
8142 	wmb();
8143 
8144 	tnapi->tx_prod = entry;
8145 	if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
8146 		netif_tx_stop_queue(txq);
8147 
8148 		/* netif_tx_stop_queue() must be done before checking
8149 		 * checking tx index in tg3_tx_avail() below, because in
8150 		 * tg3_tx(), we update tx index before checking for
8151 		 * netif_tx_queue_stopped().
8152 		 */
8153 		smp_mb();
8154 		if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
8155 			netif_tx_wake_queue(txq);
8156 	}
8157 
8158 	if (!netdev_xmit_more() || netif_xmit_stopped(txq)) {
8159 		/* Packets are ready, update Tx producer idx on card. */
8160 		tw32_tx_mbox(tnapi->prodmbox, entry);
8161 	}
8162 
8163 	return NETDEV_TX_OK;
8164 
8165 dma_error:
8166 	tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, --i);
8167 	tnapi->tx_buffers[tnapi->tx_prod].skb = NULL;
8168 drop:
8169 	dev_kfree_skb_any(skb);
8170 drop_nofree:
8171 	tp->tx_dropped++;
8172 	return NETDEV_TX_OK;
8173 }
8174 
8175 static void tg3_mac_loopback(struct tg3 *tp, bool enable)
8176 {
8177 	if (enable) {
8178 		tp->mac_mode &= ~(MAC_MODE_HALF_DUPLEX |
8179 				  MAC_MODE_PORT_MODE_MASK);
8180 
8181 		tp->mac_mode |= MAC_MODE_PORT_INT_LPBACK;
8182 
8183 		if (!tg3_flag(tp, 5705_PLUS))
8184 			tp->mac_mode |= MAC_MODE_LINK_POLARITY;
8185 
8186 		if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
8187 			tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
8188 		else
8189 			tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
8190 	} else {
8191 		tp->mac_mode &= ~MAC_MODE_PORT_INT_LPBACK;
8192 
8193 		if (tg3_flag(tp, 5705_PLUS) ||
8194 		    (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) ||
8195 		    tg3_asic_rev(tp) == ASIC_REV_5700)
8196 			tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
8197 	}
8198 
8199 	tw32(MAC_MODE, tp->mac_mode);
8200 	udelay(40);
8201 }
8202 
8203 static int tg3_phy_lpbk_set(struct tg3 *tp, u32 speed, bool extlpbk)
8204 {
8205 	u32 val, bmcr, mac_mode, ptest = 0;
8206 
8207 	tg3_phy_toggle_apd(tp, false);
8208 	tg3_phy_toggle_automdix(tp, false);
8209 
8210 	if (extlpbk && tg3_phy_set_extloopbk(tp))
8211 		return -EIO;
8212 
8213 	bmcr = BMCR_FULLDPLX;
8214 	switch (speed) {
8215 	case SPEED_10:
8216 		break;
8217 	case SPEED_100:
8218 		bmcr |= BMCR_SPEED100;
8219 		break;
8220 	case SPEED_1000:
8221 	default:
8222 		if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
8223 			speed = SPEED_100;
8224 			bmcr |= BMCR_SPEED100;
8225 		} else {
8226 			speed = SPEED_1000;
8227 			bmcr |= BMCR_SPEED1000;
8228 		}
8229 	}
8230 
8231 	if (extlpbk) {
8232 		if (!(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
8233 			tg3_readphy(tp, MII_CTRL1000, &val);
8234 			val |= CTL1000_AS_MASTER |
8235 			       CTL1000_ENABLE_MASTER;
8236 			tg3_writephy(tp, MII_CTRL1000, val);
8237 		} else {
8238 			ptest = MII_TG3_FET_PTEST_TRIM_SEL |
8239 				MII_TG3_FET_PTEST_TRIM_2;
8240 			tg3_writephy(tp, MII_TG3_FET_PTEST, ptest);
8241 		}
8242 	} else
8243 		bmcr |= BMCR_LOOPBACK;
8244 
8245 	tg3_writephy(tp, MII_BMCR, bmcr);
8246 
8247 	/* The write needs to be flushed for the FETs */
8248 	if (tp->phy_flags & TG3_PHYFLG_IS_FET)
8249 		tg3_readphy(tp, MII_BMCR, &bmcr);
8250 
8251 	udelay(40);
8252 
8253 	if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
8254 	    tg3_asic_rev(tp) == ASIC_REV_5785) {
8255 		tg3_writephy(tp, MII_TG3_FET_PTEST, ptest |
8256 			     MII_TG3_FET_PTEST_FRC_TX_LINK |
8257 			     MII_TG3_FET_PTEST_FRC_TX_LOCK);
8258 
8259 		/* The write needs to be flushed for the AC131 */
8260 		tg3_readphy(tp, MII_TG3_FET_PTEST, &val);
8261 	}
8262 
8263 	/* Reset to prevent losing 1st rx packet intermittently */
8264 	if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
8265 	    tg3_flag(tp, 5780_CLASS)) {
8266 		tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8267 		udelay(10);
8268 		tw32_f(MAC_RX_MODE, tp->rx_mode);
8269 	}
8270 
8271 	mac_mode = tp->mac_mode &
8272 		   ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
8273 	if (speed == SPEED_1000)
8274 		mac_mode |= MAC_MODE_PORT_MODE_GMII;
8275 	else
8276 		mac_mode |= MAC_MODE_PORT_MODE_MII;
8277 
8278 	if (tg3_asic_rev(tp) == ASIC_REV_5700) {
8279 		u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK;
8280 
8281 		if (masked_phy_id == TG3_PHY_ID_BCM5401)
8282 			mac_mode &= ~MAC_MODE_LINK_POLARITY;
8283 		else if (masked_phy_id == TG3_PHY_ID_BCM5411)
8284 			mac_mode |= MAC_MODE_LINK_POLARITY;
8285 
8286 		tg3_writephy(tp, MII_TG3_EXT_CTRL,
8287 			     MII_TG3_EXT_CTRL_LNK3_LED_MODE);
8288 	}
8289 
8290 	tw32(MAC_MODE, mac_mode);
8291 	udelay(40);
8292 
8293 	return 0;
8294 }
8295 
8296 static void tg3_set_loopback(struct net_device *dev, netdev_features_t features)
8297 {
8298 	struct tg3 *tp = netdev_priv(dev);
8299 
8300 	if (features & NETIF_F_LOOPBACK) {
8301 		if (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)
8302 			return;
8303 
8304 		spin_lock_bh(&tp->lock);
8305 		tg3_mac_loopback(tp, true);
8306 		netif_carrier_on(tp->dev);
8307 		spin_unlock_bh(&tp->lock);
8308 		netdev_info(dev, "Internal MAC loopback mode enabled.\n");
8309 	} else {
8310 		if (!(tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
8311 			return;
8312 
8313 		spin_lock_bh(&tp->lock);
8314 		tg3_mac_loopback(tp, false);
8315 		/* Force link status check */
8316 		tg3_setup_phy(tp, true);
8317 		spin_unlock_bh(&tp->lock);
8318 		netdev_info(dev, "Internal MAC loopback mode disabled.\n");
8319 	}
8320 }
8321 
8322 static netdev_features_t tg3_fix_features(struct net_device *dev,
8323 	netdev_features_t features)
8324 {
8325 	struct tg3 *tp = netdev_priv(dev);
8326 
8327 	if (dev->mtu > ETH_DATA_LEN && tg3_flag(tp, 5780_CLASS))
8328 		features &= ~NETIF_F_ALL_TSO;
8329 
8330 	return features;
8331 }
8332 
8333 static int tg3_set_features(struct net_device *dev, netdev_features_t features)
8334 {
8335 	netdev_features_t changed = dev->features ^ features;
8336 
8337 	if ((changed & NETIF_F_LOOPBACK) && netif_running(dev))
8338 		tg3_set_loopback(dev, features);
8339 
8340 	return 0;
8341 }
8342 
8343 static void tg3_rx_prodring_free(struct tg3 *tp,
8344 				 struct tg3_rx_prodring_set *tpr)
8345 {
8346 	int i;
8347 
8348 	if (tpr != &tp->napi[0].prodring) {
8349 		for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx;
8350 		     i = (i + 1) & tp->rx_std_ring_mask)
8351 			tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
8352 					tp->rx_pkt_map_sz);
8353 
8354 		if (tg3_flag(tp, JUMBO_CAPABLE)) {
8355 			for (i = tpr->rx_jmb_cons_idx;
8356 			     i != tpr->rx_jmb_prod_idx;
8357 			     i = (i + 1) & tp->rx_jmb_ring_mask) {
8358 				tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
8359 						TG3_RX_JMB_MAP_SZ);
8360 			}
8361 		}
8362 
8363 		return;
8364 	}
8365 
8366 	for (i = 0; i <= tp->rx_std_ring_mask; i++)
8367 		tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
8368 				tp->rx_pkt_map_sz);
8369 
8370 	if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
8371 		for (i = 0; i <= tp->rx_jmb_ring_mask; i++)
8372 			tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
8373 					TG3_RX_JMB_MAP_SZ);
8374 	}
8375 }
8376 
8377 /* Initialize rx rings for packet processing.
8378  *
8379  * The chip has been shut down and the driver detached from
8380  * the networking, so no interrupts or new tx packets will
8381  * end up in the driver.  tp->{tx,}lock are held and thus
8382  * we may not sleep.
8383  */
8384 static int tg3_rx_prodring_alloc(struct tg3 *tp,
8385 				 struct tg3_rx_prodring_set *tpr)
8386 {
8387 	u32 i, rx_pkt_dma_sz;
8388 
8389 	tpr->rx_std_cons_idx = 0;
8390 	tpr->rx_std_prod_idx = 0;
8391 	tpr->rx_jmb_cons_idx = 0;
8392 	tpr->rx_jmb_prod_idx = 0;
8393 
8394 	if (tpr != &tp->napi[0].prodring) {
8395 		memset(&tpr->rx_std_buffers[0], 0,
8396 		       TG3_RX_STD_BUFF_RING_SIZE(tp));
8397 		if (tpr->rx_jmb_buffers)
8398 			memset(&tpr->rx_jmb_buffers[0], 0,
8399 			       TG3_RX_JMB_BUFF_RING_SIZE(tp));
8400 		goto done;
8401 	}
8402 
8403 	/* Zero out all descriptors. */
8404 	memset(tpr->rx_std, 0, TG3_RX_STD_RING_BYTES(tp));
8405 
8406 	rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ;
8407 	if (tg3_flag(tp, 5780_CLASS) &&
8408 	    tp->dev->mtu > ETH_DATA_LEN)
8409 		rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ;
8410 	tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz);
8411 
8412 	/* Initialize invariants of the rings, we only set this
8413 	 * stuff once.  This works because the card does not
8414 	 * write into the rx buffer posting rings.
8415 	 */
8416 	for (i = 0; i <= tp->rx_std_ring_mask; i++) {
8417 		struct tg3_rx_buffer_desc *rxd;
8418 
8419 		rxd = &tpr->rx_std[i];
8420 		rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT;
8421 		rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
8422 		rxd->opaque = (RXD_OPAQUE_RING_STD |
8423 			       (i << RXD_OPAQUE_INDEX_SHIFT));
8424 	}
8425 
8426 	/* Now allocate fresh SKBs for each rx ring. */
8427 	for (i = 0; i < tp->rx_pending; i++) {
8428 		unsigned int frag_size;
8429 
8430 		if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_STD, i,
8431 				      &frag_size) < 0) {
8432 			netdev_warn(tp->dev,
8433 				    "Using a smaller RX standard ring. Only "
8434 				    "%d out of %d buffers were allocated "
8435 				    "successfully\n", i, tp->rx_pending);
8436 			if (i == 0)
8437 				goto initfail;
8438 			tp->rx_pending = i;
8439 			break;
8440 		}
8441 	}
8442 
8443 	if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
8444 		goto done;
8445 
8446 	memset(tpr->rx_jmb, 0, TG3_RX_JMB_RING_BYTES(tp));
8447 
8448 	if (!tg3_flag(tp, JUMBO_RING_ENABLE))
8449 		goto done;
8450 
8451 	for (i = 0; i <= tp->rx_jmb_ring_mask; i++) {
8452 		struct tg3_rx_buffer_desc *rxd;
8453 
8454 		rxd = &tpr->rx_jmb[i].std;
8455 		rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT;
8456 		rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
8457 				  RXD_FLAG_JUMBO;
8458 		rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
8459 		       (i << RXD_OPAQUE_INDEX_SHIFT));
8460 	}
8461 
8462 	for (i = 0; i < tp->rx_jumbo_pending; i++) {
8463 		unsigned int frag_size;
8464 
8465 		if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_JUMBO, i,
8466 				      &frag_size) < 0) {
8467 			netdev_warn(tp->dev,
8468 				    "Using a smaller RX jumbo ring. Only %d "
8469 				    "out of %d buffers were allocated "
8470 				    "successfully\n", i, tp->rx_jumbo_pending);
8471 			if (i == 0)
8472 				goto initfail;
8473 			tp->rx_jumbo_pending = i;
8474 			break;
8475 		}
8476 	}
8477 
8478 done:
8479 	return 0;
8480 
8481 initfail:
8482 	tg3_rx_prodring_free(tp, tpr);
8483 	return -ENOMEM;
8484 }
8485 
8486 static void tg3_rx_prodring_fini(struct tg3 *tp,
8487 				 struct tg3_rx_prodring_set *tpr)
8488 {
8489 	kfree(tpr->rx_std_buffers);
8490 	tpr->rx_std_buffers = NULL;
8491 	kfree(tpr->rx_jmb_buffers);
8492 	tpr->rx_jmb_buffers = NULL;
8493 	if (tpr->rx_std) {
8494 		dma_free_coherent(&tp->pdev->dev, TG3_RX_STD_RING_BYTES(tp),
8495 				  tpr->rx_std, tpr->rx_std_mapping);
8496 		tpr->rx_std = NULL;
8497 	}
8498 	if (tpr->rx_jmb) {
8499 		dma_free_coherent(&tp->pdev->dev, TG3_RX_JMB_RING_BYTES(tp),
8500 				  tpr->rx_jmb, tpr->rx_jmb_mapping);
8501 		tpr->rx_jmb = NULL;
8502 	}
8503 }
8504 
8505 static int tg3_rx_prodring_init(struct tg3 *tp,
8506 				struct tg3_rx_prodring_set *tpr)
8507 {
8508 	tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp),
8509 				      GFP_KERNEL);
8510 	if (!tpr->rx_std_buffers)
8511 		return -ENOMEM;
8512 
8513 	tpr->rx_std = dma_alloc_coherent(&tp->pdev->dev,
8514 					 TG3_RX_STD_RING_BYTES(tp),
8515 					 &tpr->rx_std_mapping,
8516 					 GFP_KERNEL);
8517 	if (!tpr->rx_std)
8518 		goto err_out;
8519 
8520 	if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
8521 		tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp),
8522 					      GFP_KERNEL);
8523 		if (!tpr->rx_jmb_buffers)
8524 			goto err_out;
8525 
8526 		tpr->rx_jmb = dma_alloc_coherent(&tp->pdev->dev,
8527 						 TG3_RX_JMB_RING_BYTES(tp),
8528 						 &tpr->rx_jmb_mapping,
8529 						 GFP_KERNEL);
8530 		if (!tpr->rx_jmb)
8531 			goto err_out;
8532 	}
8533 
8534 	return 0;
8535 
8536 err_out:
8537 	tg3_rx_prodring_fini(tp, tpr);
8538 	return -ENOMEM;
8539 }
8540 
8541 /* Free up pending packets in all rx/tx rings.
8542  *
8543  * The chip has been shut down and the driver detached from
8544  * the networking, so no interrupts or new tx packets will
8545  * end up in the driver.  tp->{tx,}lock is not held and we are not
8546  * in an interrupt context and thus may sleep.
8547  */
8548 static void tg3_free_rings(struct tg3 *tp)
8549 {
8550 	int i, j;
8551 
8552 	for (j = 0; j < tp->irq_cnt; j++) {
8553 		struct tg3_napi *tnapi = &tp->napi[j];
8554 
8555 		tg3_rx_prodring_free(tp, &tnapi->prodring);
8556 
8557 		if (!tnapi->tx_buffers)
8558 			continue;
8559 
8560 		for (i = 0; i < TG3_TX_RING_SIZE; i++) {
8561 			struct sk_buff *skb = tnapi->tx_buffers[i].skb;
8562 
8563 			if (!skb)
8564 				continue;
8565 
8566 			tg3_tx_skb_unmap(tnapi, i,
8567 					 skb_shinfo(skb)->nr_frags - 1);
8568 
8569 			dev_consume_skb_any(skb);
8570 		}
8571 		netdev_tx_reset_queue(netdev_get_tx_queue(tp->dev, j));
8572 	}
8573 }
8574 
8575 /* Initialize tx/rx rings for packet processing.
8576  *
8577  * The chip has been shut down and the driver detached from
8578  * the networking, so no interrupts or new tx packets will
8579  * end up in the driver.  tp->{tx,}lock are held and thus
8580  * we may not sleep.
8581  */
8582 static int tg3_init_rings(struct tg3 *tp)
8583 {
8584 	int i;
8585 
8586 	/* Free up all the SKBs. */
8587 	tg3_free_rings(tp);
8588 
8589 	for (i = 0; i < tp->irq_cnt; i++) {
8590 		struct tg3_napi *tnapi = &tp->napi[i];
8591 
8592 		tnapi->last_tag = 0;
8593 		tnapi->last_irq_tag = 0;
8594 		tnapi->hw_status->status = 0;
8595 		tnapi->hw_status->status_tag = 0;
8596 		memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8597 
8598 		tnapi->tx_prod = 0;
8599 		tnapi->tx_cons = 0;
8600 		if (tnapi->tx_ring)
8601 			memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES);
8602 
8603 		tnapi->rx_rcb_ptr = 0;
8604 		if (tnapi->rx_rcb)
8605 			memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
8606 
8607 		if (tnapi->prodring.rx_std &&
8608 		    tg3_rx_prodring_alloc(tp, &tnapi->prodring)) {
8609 			tg3_free_rings(tp);
8610 			return -ENOMEM;
8611 		}
8612 	}
8613 
8614 	return 0;
8615 }
8616 
8617 static void tg3_mem_tx_release(struct tg3 *tp)
8618 {
8619 	int i;
8620 
8621 	for (i = 0; i < tp->irq_max; i++) {
8622 		struct tg3_napi *tnapi = &tp->napi[i];
8623 
8624 		if (tnapi->tx_ring) {
8625 			dma_free_coherent(&tp->pdev->dev, TG3_TX_RING_BYTES,
8626 				tnapi->tx_ring, tnapi->tx_desc_mapping);
8627 			tnapi->tx_ring = NULL;
8628 		}
8629 
8630 		kfree(tnapi->tx_buffers);
8631 		tnapi->tx_buffers = NULL;
8632 	}
8633 }
8634 
8635 static int tg3_mem_tx_acquire(struct tg3 *tp)
8636 {
8637 	int i;
8638 	struct tg3_napi *tnapi = &tp->napi[0];
8639 
8640 	/* If multivector TSS is enabled, vector 0 does not handle
8641 	 * tx interrupts.  Don't allocate any resources for it.
8642 	 */
8643 	if (tg3_flag(tp, ENABLE_TSS))
8644 		tnapi++;
8645 
8646 	for (i = 0; i < tp->txq_cnt; i++, tnapi++) {
8647 		tnapi->tx_buffers = kcalloc(TG3_TX_RING_SIZE,
8648 					    sizeof(struct tg3_tx_ring_info),
8649 					    GFP_KERNEL);
8650 		if (!tnapi->tx_buffers)
8651 			goto err_out;
8652 
8653 		tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev,
8654 						    TG3_TX_RING_BYTES,
8655 						    &tnapi->tx_desc_mapping,
8656 						    GFP_KERNEL);
8657 		if (!tnapi->tx_ring)
8658 			goto err_out;
8659 	}
8660 
8661 	return 0;
8662 
8663 err_out:
8664 	tg3_mem_tx_release(tp);
8665 	return -ENOMEM;
8666 }
8667 
8668 static void tg3_mem_rx_release(struct tg3 *tp)
8669 {
8670 	int i;
8671 
8672 	for (i = 0; i < tp->irq_max; i++) {
8673 		struct tg3_napi *tnapi = &tp->napi[i];
8674 
8675 		tg3_rx_prodring_fini(tp, &tnapi->prodring);
8676 
8677 		if (!tnapi->rx_rcb)
8678 			continue;
8679 
8680 		dma_free_coherent(&tp->pdev->dev,
8681 				  TG3_RX_RCB_RING_BYTES(tp),
8682 				  tnapi->rx_rcb,
8683 				  tnapi->rx_rcb_mapping);
8684 		tnapi->rx_rcb = NULL;
8685 	}
8686 }
8687 
8688 static int tg3_mem_rx_acquire(struct tg3 *tp)
8689 {
8690 	unsigned int i, limit;
8691 
8692 	limit = tp->rxq_cnt;
8693 
8694 	/* If RSS is enabled, we need a (dummy) producer ring
8695 	 * set on vector zero.  This is the true hw prodring.
8696 	 */
8697 	if (tg3_flag(tp, ENABLE_RSS))
8698 		limit++;
8699 
8700 	for (i = 0; i < limit; i++) {
8701 		struct tg3_napi *tnapi = &tp->napi[i];
8702 
8703 		if (tg3_rx_prodring_init(tp, &tnapi->prodring))
8704 			goto err_out;
8705 
8706 		/* If multivector RSS is enabled, vector 0
8707 		 * does not handle rx or tx interrupts.
8708 		 * Don't allocate any resources for it.
8709 		 */
8710 		if (!i && tg3_flag(tp, ENABLE_RSS))
8711 			continue;
8712 
8713 		tnapi->rx_rcb = dma_alloc_coherent(&tp->pdev->dev,
8714 						   TG3_RX_RCB_RING_BYTES(tp),
8715 						   &tnapi->rx_rcb_mapping,
8716 						   GFP_KERNEL);
8717 		if (!tnapi->rx_rcb)
8718 			goto err_out;
8719 	}
8720 
8721 	return 0;
8722 
8723 err_out:
8724 	tg3_mem_rx_release(tp);
8725 	return -ENOMEM;
8726 }
8727 
8728 /*
8729  * Must not be invoked with interrupt sources disabled and
8730  * the hardware shutdown down.
8731  */
8732 static void tg3_free_consistent(struct tg3 *tp)
8733 {
8734 	int i;
8735 
8736 	for (i = 0; i < tp->irq_cnt; i++) {
8737 		struct tg3_napi *tnapi = &tp->napi[i];
8738 
8739 		if (tnapi->hw_status) {
8740 			dma_free_coherent(&tp->pdev->dev, TG3_HW_STATUS_SIZE,
8741 					  tnapi->hw_status,
8742 					  tnapi->status_mapping);
8743 			tnapi->hw_status = NULL;
8744 		}
8745 	}
8746 
8747 	tg3_mem_rx_release(tp);
8748 	tg3_mem_tx_release(tp);
8749 
8750 	/* tp->hw_stats can be referenced safely:
8751 	 *     1. under rtnl_lock
8752 	 *     2. or under tp->lock if TG3_FLAG_INIT_COMPLETE is set.
8753 	 */
8754 	if (tp->hw_stats) {
8755 		dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats),
8756 				  tp->hw_stats, tp->stats_mapping);
8757 		tp->hw_stats = NULL;
8758 	}
8759 }
8760 
8761 /*
8762  * Must not be invoked with interrupt sources disabled and
8763  * the hardware shutdown down.  Can sleep.
8764  */
8765 static int tg3_alloc_consistent(struct tg3 *tp)
8766 {
8767 	int i;
8768 
8769 	tp->hw_stats = dma_alloc_coherent(&tp->pdev->dev,
8770 					  sizeof(struct tg3_hw_stats),
8771 					  &tp->stats_mapping, GFP_KERNEL);
8772 	if (!tp->hw_stats)
8773 		goto err_out;
8774 
8775 	for (i = 0; i < tp->irq_cnt; i++) {
8776 		struct tg3_napi *tnapi = &tp->napi[i];
8777 		struct tg3_hw_status *sblk;
8778 
8779 		tnapi->hw_status = dma_alloc_coherent(&tp->pdev->dev,
8780 						      TG3_HW_STATUS_SIZE,
8781 						      &tnapi->status_mapping,
8782 						      GFP_KERNEL);
8783 		if (!tnapi->hw_status)
8784 			goto err_out;
8785 
8786 		sblk = tnapi->hw_status;
8787 
8788 		if (tg3_flag(tp, ENABLE_RSS)) {
8789 			u16 *prodptr = NULL;
8790 
8791 			/*
8792 			 * When RSS is enabled, the status block format changes
8793 			 * slightly.  The "rx_jumbo_consumer", "reserved",
8794 			 * and "rx_mini_consumer" members get mapped to the
8795 			 * other three rx return ring producer indexes.
8796 			 */
8797 			switch (i) {
8798 			case 1:
8799 				prodptr = &sblk->idx[0].rx_producer;
8800 				break;
8801 			case 2:
8802 				prodptr = &sblk->rx_jumbo_consumer;
8803 				break;
8804 			case 3:
8805 				prodptr = &sblk->reserved;
8806 				break;
8807 			case 4:
8808 				prodptr = &sblk->rx_mini_consumer;
8809 				break;
8810 			}
8811 			tnapi->rx_rcb_prod_idx = prodptr;
8812 		} else {
8813 			tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer;
8814 		}
8815 	}
8816 
8817 	if (tg3_mem_tx_acquire(tp) || tg3_mem_rx_acquire(tp))
8818 		goto err_out;
8819 
8820 	return 0;
8821 
8822 err_out:
8823 	tg3_free_consistent(tp);
8824 	return -ENOMEM;
8825 }
8826 
8827 #define MAX_WAIT_CNT 1000
8828 
8829 /* To stop a block, clear the enable bit and poll till it
8830  * clears.  tp->lock is held.
8831  */
8832 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, bool silent)
8833 {
8834 	unsigned int i;
8835 	u32 val;
8836 
8837 	if (tg3_flag(tp, 5705_PLUS)) {
8838 		switch (ofs) {
8839 		case RCVLSC_MODE:
8840 		case DMAC_MODE:
8841 		case MBFREE_MODE:
8842 		case BUFMGR_MODE:
8843 		case MEMARB_MODE:
8844 			/* We can't enable/disable these bits of the
8845 			 * 5705/5750, just say success.
8846 			 */
8847 			return 0;
8848 
8849 		default:
8850 			break;
8851 		}
8852 	}
8853 
8854 	val = tr32(ofs);
8855 	val &= ~enable_bit;
8856 	tw32_f(ofs, val);
8857 
8858 	for (i = 0; i < MAX_WAIT_CNT; i++) {
8859 		if (pci_channel_offline(tp->pdev)) {
8860 			dev_err(&tp->pdev->dev,
8861 				"tg3_stop_block device offline, "
8862 				"ofs=%lx enable_bit=%x\n",
8863 				ofs, enable_bit);
8864 			return -ENODEV;
8865 		}
8866 
8867 		udelay(100);
8868 		val = tr32(ofs);
8869 		if ((val & enable_bit) == 0)
8870 			break;
8871 	}
8872 
8873 	if (i == MAX_WAIT_CNT && !silent) {
8874 		dev_err(&tp->pdev->dev,
8875 			"tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
8876 			ofs, enable_bit);
8877 		return -ENODEV;
8878 	}
8879 
8880 	return 0;
8881 }
8882 
8883 /* tp->lock is held. */
8884 static int tg3_abort_hw(struct tg3 *tp, bool silent)
8885 {
8886 	int i, err;
8887 
8888 	tg3_disable_ints(tp);
8889 
8890 	if (pci_channel_offline(tp->pdev)) {
8891 		tp->rx_mode &= ~(RX_MODE_ENABLE | TX_MODE_ENABLE);
8892 		tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
8893 		err = -ENODEV;
8894 		goto err_no_dev;
8895 	}
8896 
8897 	tp->rx_mode &= ~RX_MODE_ENABLE;
8898 	tw32_f(MAC_RX_MODE, tp->rx_mode);
8899 	udelay(10);
8900 
8901 	err  = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
8902 	err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
8903 	err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
8904 	err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
8905 	err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
8906 	err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
8907 
8908 	err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
8909 	err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
8910 	err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
8911 	err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
8912 	err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
8913 	err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
8914 	err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
8915 
8916 	tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
8917 	tw32_f(MAC_MODE, tp->mac_mode);
8918 	udelay(40);
8919 
8920 	tp->tx_mode &= ~TX_MODE_ENABLE;
8921 	tw32_f(MAC_TX_MODE, tp->tx_mode);
8922 
8923 	for (i = 0; i < MAX_WAIT_CNT; i++) {
8924 		udelay(100);
8925 		if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
8926 			break;
8927 	}
8928 	if (i >= MAX_WAIT_CNT) {
8929 		dev_err(&tp->pdev->dev,
8930 			"%s timed out, TX_MODE_ENABLE will not clear "
8931 			"MAC_TX_MODE=%08x\n", __func__, tr32(MAC_TX_MODE));
8932 		err |= -ENODEV;
8933 	}
8934 
8935 	err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
8936 	err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
8937 	err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
8938 
8939 	tw32(FTQ_RESET, 0xffffffff);
8940 	tw32(FTQ_RESET, 0x00000000);
8941 
8942 	err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
8943 	err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
8944 
8945 err_no_dev:
8946 	for (i = 0; i < tp->irq_cnt; i++) {
8947 		struct tg3_napi *tnapi = &tp->napi[i];
8948 		if (tnapi->hw_status)
8949 			memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8950 	}
8951 
8952 	return err;
8953 }
8954 
8955 /* Save PCI command register before chip reset */
8956 static void tg3_save_pci_state(struct tg3 *tp)
8957 {
8958 	pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
8959 }
8960 
8961 /* Restore PCI state after chip reset */
8962 static void tg3_restore_pci_state(struct tg3 *tp)
8963 {
8964 	u32 val;
8965 
8966 	/* Re-enable indirect register accesses. */
8967 	pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
8968 			       tp->misc_host_ctrl);
8969 
8970 	/* Set MAX PCI retry to zero. */
8971 	val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
8972 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0 &&
8973 	    tg3_flag(tp, PCIX_MODE))
8974 		val |= PCISTATE_RETRY_SAME_DMA;
8975 	/* Allow reads and writes to the APE register and memory space. */
8976 	if (tg3_flag(tp, ENABLE_APE))
8977 		val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
8978 		       PCISTATE_ALLOW_APE_SHMEM_WR |
8979 		       PCISTATE_ALLOW_APE_PSPACE_WR;
8980 	pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
8981 
8982 	pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
8983 
8984 	if (!tg3_flag(tp, PCI_EXPRESS)) {
8985 		pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
8986 				      tp->pci_cacheline_sz);
8987 		pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
8988 				      tp->pci_lat_timer);
8989 	}
8990 
8991 	/* Make sure PCI-X relaxed ordering bit is clear. */
8992 	if (tg3_flag(tp, PCIX_MODE)) {
8993 		u16 pcix_cmd;
8994 
8995 		pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8996 				     &pcix_cmd);
8997 		pcix_cmd &= ~PCI_X_CMD_ERO;
8998 		pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8999 				      pcix_cmd);
9000 	}
9001 
9002 	if (tg3_flag(tp, 5780_CLASS)) {
9003 
9004 		/* Chip reset on 5780 will reset MSI enable bit,
9005 		 * so need to restore it.
9006 		 */
9007 		if (tg3_flag(tp, USING_MSI)) {
9008 			u16 ctrl;
9009 
9010 			pci_read_config_word(tp->pdev,
9011 					     tp->msi_cap + PCI_MSI_FLAGS,
9012 					     &ctrl);
9013 			pci_write_config_word(tp->pdev,
9014 					      tp->msi_cap + PCI_MSI_FLAGS,
9015 					      ctrl | PCI_MSI_FLAGS_ENABLE);
9016 			val = tr32(MSGINT_MODE);
9017 			tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
9018 		}
9019 	}
9020 }
9021 
9022 static void tg3_override_clk(struct tg3 *tp)
9023 {
9024 	u32 val;
9025 
9026 	switch (tg3_asic_rev(tp)) {
9027 	case ASIC_REV_5717:
9028 		val = tr32(TG3_CPMU_CLCK_ORIDE_ENABLE);
9029 		tw32(TG3_CPMU_CLCK_ORIDE_ENABLE, val |
9030 		     TG3_CPMU_MAC_ORIDE_ENABLE);
9031 		break;
9032 
9033 	case ASIC_REV_5719:
9034 	case ASIC_REV_5720:
9035 		tw32(TG3_CPMU_CLCK_ORIDE, CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
9036 		break;
9037 
9038 	default:
9039 		return;
9040 	}
9041 }
9042 
9043 static void tg3_restore_clk(struct tg3 *tp)
9044 {
9045 	u32 val;
9046 
9047 	switch (tg3_asic_rev(tp)) {
9048 	case ASIC_REV_5717:
9049 		val = tr32(TG3_CPMU_CLCK_ORIDE_ENABLE);
9050 		tw32(TG3_CPMU_CLCK_ORIDE_ENABLE,
9051 		     val & ~TG3_CPMU_MAC_ORIDE_ENABLE);
9052 		break;
9053 
9054 	case ASIC_REV_5719:
9055 	case ASIC_REV_5720:
9056 		val = tr32(TG3_CPMU_CLCK_ORIDE);
9057 		tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
9058 		break;
9059 
9060 	default:
9061 		return;
9062 	}
9063 }
9064 
9065 /* tp->lock is held. */
9066 static int tg3_chip_reset(struct tg3 *tp)
9067 	__releases(tp->lock)
9068 	__acquires(tp->lock)
9069 {
9070 	u32 val;
9071 	void (*write_op)(struct tg3 *, u32, u32);
9072 	int i, err;
9073 
9074 	if (!pci_device_is_present(tp->pdev))
9075 		return -ENODEV;
9076 
9077 	tg3_nvram_lock(tp);
9078 
9079 	tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
9080 
9081 	/* No matching tg3_nvram_unlock() after this because
9082 	 * chip reset below will undo the nvram lock.
9083 	 */
9084 	tp->nvram_lock_cnt = 0;
9085 
9086 	/* GRC_MISC_CFG core clock reset will clear the memory
9087 	 * enable bit in PCI register 4 and the MSI enable bit
9088 	 * on some chips, so we save relevant registers here.
9089 	 */
9090 	tg3_save_pci_state(tp);
9091 
9092 	if (tg3_asic_rev(tp) == ASIC_REV_5752 ||
9093 	    tg3_flag(tp, 5755_PLUS))
9094 		tw32(GRC_FASTBOOT_PC, 0);
9095 
9096 	/*
9097 	 * We must avoid the readl() that normally takes place.
9098 	 * It locks machines, causes machine checks, and other
9099 	 * fun things.  So, temporarily disable the 5701
9100 	 * hardware workaround, while we do the reset.
9101 	 */
9102 	write_op = tp->write32;
9103 	if (write_op == tg3_write_flush_reg32)
9104 		tp->write32 = tg3_write32;
9105 
9106 	/* Prevent the irq handler from reading or writing PCI registers
9107 	 * during chip reset when the memory enable bit in the PCI command
9108 	 * register may be cleared.  The chip does not generate interrupt
9109 	 * at this time, but the irq handler may still be called due to irq
9110 	 * sharing or irqpoll.
9111 	 */
9112 	tg3_flag_set(tp, CHIP_RESETTING);
9113 	for (i = 0; i < tp->irq_cnt; i++) {
9114 		struct tg3_napi *tnapi = &tp->napi[i];
9115 		if (tnapi->hw_status) {
9116 			tnapi->hw_status->status = 0;
9117 			tnapi->hw_status->status_tag = 0;
9118 		}
9119 		tnapi->last_tag = 0;
9120 		tnapi->last_irq_tag = 0;
9121 	}
9122 	smp_mb();
9123 
9124 	tg3_full_unlock(tp);
9125 
9126 	for (i = 0; i < tp->irq_cnt; i++)
9127 		synchronize_irq(tp->napi[i].irq_vec);
9128 
9129 	tg3_full_lock(tp, 0);
9130 
9131 	if (tg3_asic_rev(tp) == ASIC_REV_57780) {
9132 		val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
9133 		tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
9134 	}
9135 
9136 	/* do the reset */
9137 	val = GRC_MISC_CFG_CORECLK_RESET;
9138 
9139 	if (tg3_flag(tp, PCI_EXPRESS)) {
9140 		/* Force PCIe 1.0a mode */
9141 		if (tg3_asic_rev(tp) != ASIC_REV_5785 &&
9142 		    !tg3_flag(tp, 57765_PLUS) &&
9143 		    tr32(TG3_PCIE_PHY_TSTCTL) ==
9144 		    (TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM))
9145 			tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM);
9146 
9147 		if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0) {
9148 			tw32(GRC_MISC_CFG, (1 << 29));
9149 			val |= (1 << 29);
9150 		}
9151 	}
9152 
9153 	if (tg3_asic_rev(tp) == ASIC_REV_5906) {
9154 		tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
9155 		tw32(GRC_VCPU_EXT_CTRL,
9156 		     tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
9157 	}
9158 
9159 	/* Set the clock to the highest frequency to avoid timeouts. With link
9160 	 * aware mode, the clock speed could be slow and bootcode does not
9161 	 * complete within the expected time. Override the clock to allow the
9162 	 * bootcode to finish sooner and then restore it.
9163 	 */
9164 	tg3_override_clk(tp);
9165 
9166 	/* Manage gphy power for all CPMU absent PCIe devices. */
9167 	if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, CPMU_PRESENT))
9168 		val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
9169 
9170 	tw32(GRC_MISC_CFG, val);
9171 
9172 	/* restore 5701 hardware bug workaround write method */
9173 	tp->write32 = write_op;
9174 
9175 	/* Unfortunately, we have to delay before the PCI read back.
9176 	 * Some 575X chips even will not respond to a PCI cfg access
9177 	 * when the reset command is given to the chip.
9178 	 *
9179 	 * How do these hardware designers expect things to work
9180 	 * properly if the PCI write is posted for a long period
9181 	 * of time?  It is always necessary to have some method by
9182 	 * which a register read back can occur to push the write
9183 	 * out which does the reset.
9184 	 *
9185 	 * For most tg3 variants the trick below was working.
9186 	 * Ho hum...
9187 	 */
9188 	udelay(120);
9189 
9190 	/* Flush PCI posted writes.  The normal MMIO registers
9191 	 * are inaccessible at this time so this is the only
9192 	 * way to make this reliably (actually, this is no longer
9193 	 * the case, see above).  I tried to use indirect
9194 	 * register read/write but this upset some 5701 variants.
9195 	 */
9196 	pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
9197 
9198 	udelay(120);
9199 
9200 	if (tg3_flag(tp, PCI_EXPRESS) && pci_is_pcie(tp->pdev)) {
9201 		u16 val16;
9202 
9203 		if (tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A0) {
9204 			int j;
9205 			u32 cfg_val;
9206 
9207 			/* Wait for link training to complete.  */
9208 			for (j = 0; j < 5000; j++)
9209 				udelay(100);
9210 
9211 			pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
9212 			pci_write_config_dword(tp->pdev, 0xc4,
9213 					       cfg_val | (1 << 15));
9214 		}
9215 
9216 		/* Clear the "no snoop" and "relaxed ordering" bits. */
9217 		val16 = PCI_EXP_DEVCTL_RELAX_EN | PCI_EXP_DEVCTL_NOSNOOP_EN;
9218 		/*
9219 		 * Older PCIe devices only support the 128 byte
9220 		 * MPS setting.  Enforce the restriction.
9221 		 */
9222 		if (!tg3_flag(tp, CPMU_PRESENT))
9223 			val16 |= PCI_EXP_DEVCTL_PAYLOAD;
9224 		pcie_capability_clear_word(tp->pdev, PCI_EXP_DEVCTL, val16);
9225 
9226 		/* Clear error status */
9227 		pcie_capability_write_word(tp->pdev, PCI_EXP_DEVSTA,
9228 				      PCI_EXP_DEVSTA_CED |
9229 				      PCI_EXP_DEVSTA_NFED |
9230 				      PCI_EXP_DEVSTA_FED |
9231 				      PCI_EXP_DEVSTA_URD);
9232 	}
9233 
9234 	tg3_restore_pci_state(tp);
9235 
9236 	tg3_flag_clear(tp, CHIP_RESETTING);
9237 	tg3_flag_clear(tp, ERROR_PROCESSED);
9238 
9239 	val = 0;
9240 	if (tg3_flag(tp, 5780_CLASS))
9241 		val = tr32(MEMARB_MODE);
9242 	tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
9243 
9244 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A3) {
9245 		tg3_stop_fw(tp);
9246 		tw32(0x5000, 0x400);
9247 	}
9248 
9249 	if (tg3_flag(tp, IS_SSB_CORE)) {
9250 		/*
9251 		 * BCM4785: In order to avoid repercussions from using
9252 		 * potentially defective internal ROM, stop the Rx RISC CPU,
9253 		 * which is not required.
9254 		 */
9255 		tg3_stop_fw(tp);
9256 		tg3_halt_cpu(tp, RX_CPU_BASE);
9257 	}
9258 
9259 	err = tg3_poll_fw(tp);
9260 	if (err)
9261 		return err;
9262 
9263 	tw32(GRC_MODE, tp->grc_mode);
9264 
9265 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0) {
9266 		val = tr32(0xc4);
9267 
9268 		tw32(0xc4, val | (1 << 15));
9269 	}
9270 
9271 	if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
9272 	    tg3_asic_rev(tp) == ASIC_REV_5705) {
9273 		tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
9274 		if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0)
9275 			tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
9276 		tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
9277 	}
9278 
9279 	if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
9280 		tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
9281 		val = tp->mac_mode;
9282 	} else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
9283 		tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
9284 		val = tp->mac_mode;
9285 	} else
9286 		val = 0;
9287 
9288 	tw32_f(MAC_MODE, val);
9289 	udelay(40);
9290 
9291 	tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
9292 
9293 	tg3_mdio_start(tp);
9294 
9295 	if (tg3_flag(tp, PCI_EXPRESS) &&
9296 	    tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0 &&
9297 	    tg3_asic_rev(tp) != ASIC_REV_5785 &&
9298 	    !tg3_flag(tp, 57765_PLUS)) {
9299 		val = tr32(0x7c00);
9300 
9301 		tw32(0x7c00, val | (1 << 25));
9302 	}
9303 
9304 	tg3_restore_clk(tp);
9305 
9306 	/* Increase the core clock speed to fix tx timeout issue for 5762
9307 	 * with 100Mbps link speed.
9308 	 */
9309 	if (tg3_asic_rev(tp) == ASIC_REV_5762) {
9310 		val = tr32(TG3_CPMU_CLCK_ORIDE_ENABLE);
9311 		tw32(TG3_CPMU_CLCK_ORIDE_ENABLE, val |
9312 		     TG3_CPMU_MAC_ORIDE_ENABLE);
9313 	}
9314 
9315 	/* Reprobe ASF enable state.  */
9316 	tg3_flag_clear(tp, ENABLE_ASF);
9317 	tp->phy_flags &= ~(TG3_PHYFLG_1G_ON_VAUX_OK |
9318 			   TG3_PHYFLG_KEEP_LINK_ON_PWRDN);
9319 
9320 	tg3_flag_clear(tp, ASF_NEW_HANDSHAKE);
9321 	tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
9322 	if (val == NIC_SRAM_DATA_SIG_MAGIC) {
9323 		u32 nic_cfg;
9324 
9325 		tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
9326 		if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
9327 			tg3_flag_set(tp, ENABLE_ASF);
9328 			tp->last_event_jiffies = jiffies;
9329 			if (tg3_flag(tp, 5750_PLUS))
9330 				tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
9331 
9332 			tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &nic_cfg);
9333 			if (nic_cfg & NIC_SRAM_1G_ON_VAUX_OK)
9334 				tp->phy_flags |= TG3_PHYFLG_1G_ON_VAUX_OK;
9335 			if (nic_cfg & NIC_SRAM_LNK_FLAP_AVOID)
9336 				tp->phy_flags |= TG3_PHYFLG_KEEP_LINK_ON_PWRDN;
9337 		}
9338 	}
9339 
9340 	return 0;
9341 }
9342 
9343 static void tg3_get_nstats(struct tg3 *, struct rtnl_link_stats64 *);
9344 static void tg3_get_estats(struct tg3 *, struct tg3_ethtool_stats *);
9345 static void __tg3_set_rx_mode(struct net_device *);
9346 
9347 /* tp->lock is held. */
9348 static int tg3_halt(struct tg3 *tp, int kind, bool silent)
9349 {
9350 	int err;
9351 
9352 	tg3_stop_fw(tp);
9353 
9354 	tg3_write_sig_pre_reset(tp, kind);
9355 
9356 	tg3_abort_hw(tp, silent);
9357 	err = tg3_chip_reset(tp);
9358 
9359 	__tg3_set_mac_addr(tp, false);
9360 
9361 	tg3_write_sig_legacy(tp, kind);
9362 	tg3_write_sig_post_reset(tp, kind);
9363 
9364 	if (tp->hw_stats) {
9365 		/* Save the stats across chip resets... */
9366 		tg3_get_nstats(tp, &tp->net_stats_prev);
9367 		tg3_get_estats(tp, &tp->estats_prev);
9368 
9369 		/* And make sure the next sample is new data */
9370 		memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
9371 	}
9372 
9373 	return err;
9374 }
9375 
9376 static int tg3_set_mac_addr(struct net_device *dev, void *p)
9377 {
9378 	struct tg3 *tp = netdev_priv(dev);
9379 	struct sockaddr *addr = p;
9380 	int err = 0;
9381 	bool skip_mac_1 = false;
9382 
9383 	if (!is_valid_ether_addr(addr->sa_data))
9384 		return -EADDRNOTAVAIL;
9385 
9386 	memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
9387 
9388 	if (!netif_running(dev))
9389 		return 0;
9390 
9391 	if (tg3_flag(tp, ENABLE_ASF)) {
9392 		u32 addr0_high, addr0_low, addr1_high, addr1_low;
9393 
9394 		addr0_high = tr32(MAC_ADDR_0_HIGH);
9395 		addr0_low = tr32(MAC_ADDR_0_LOW);
9396 		addr1_high = tr32(MAC_ADDR_1_HIGH);
9397 		addr1_low = tr32(MAC_ADDR_1_LOW);
9398 
9399 		/* Skip MAC addr 1 if ASF is using it. */
9400 		if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
9401 		    !(addr1_high == 0 && addr1_low == 0))
9402 			skip_mac_1 = true;
9403 	}
9404 	spin_lock_bh(&tp->lock);
9405 	__tg3_set_mac_addr(tp, skip_mac_1);
9406 	__tg3_set_rx_mode(dev);
9407 	spin_unlock_bh(&tp->lock);
9408 
9409 	return err;
9410 }
9411 
9412 /* tp->lock is held. */
9413 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
9414 			   dma_addr_t mapping, u32 maxlen_flags,
9415 			   u32 nic_addr)
9416 {
9417 	tg3_write_mem(tp,
9418 		      (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
9419 		      ((u64) mapping >> 32));
9420 	tg3_write_mem(tp,
9421 		      (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
9422 		      ((u64) mapping & 0xffffffff));
9423 	tg3_write_mem(tp,
9424 		      (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
9425 		       maxlen_flags);
9426 
9427 	if (!tg3_flag(tp, 5705_PLUS))
9428 		tg3_write_mem(tp,
9429 			      (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
9430 			      nic_addr);
9431 }
9432 
9433 
9434 static void tg3_coal_tx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
9435 {
9436 	int i = 0;
9437 
9438 	if (!tg3_flag(tp, ENABLE_TSS)) {
9439 		tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
9440 		tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
9441 		tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
9442 	} else {
9443 		tw32(HOSTCC_TXCOL_TICKS, 0);
9444 		tw32(HOSTCC_TXMAX_FRAMES, 0);
9445 		tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
9446 
9447 		for (; i < tp->txq_cnt; i++) {
9448 			u32 reg;
9449 
9450 			reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18;
9451 			tw32(reg, ec->tx_coalesce_usecs);
9452 			reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18;
9453 			tw32(reg, ec->tx_max_coalesced_frames);
9454 			reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18;
9455 			tw32(reg, ec->tx_max_coalesced_frames_irq);
9456 		}
9457 	}
9458 
9459 	for (; i < tp->irq_max - 1; i++) {
9460 		tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0);
9461 		tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0);
9462 		tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
9463 	}
9464 }
9465 
9466 static void tg3_coal_rx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
9467 {
9468 	int i = 0;
9469 	u32 limit = tp->rxq_cnt;
9470 
9471 	if (!tg3_flag(tp, ENABLE_RSS)) {
9472 		tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
9473 		tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
9474 		tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
9475 		limit--;
9476 	} else {
9477 		tw32(HOSTCC_RXCOL_TICKS, 0);
9478 		tw32(HOSTCC_RXMAX_FRAMES, 0);
9479 		tw32(HOSTCC_RXCOAL_MAXF_INT, 0);
9480 	}
9481 
9482 	for (; i < limit; i++) {
9483 		u32 reg;
9484 
9485 		reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18;
9486 		tw32(reg, ec->rx_coalesce_usecs);
9487 		reg = HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18;
9488 		tw32(reg, ec->rx_max_coalesced_frames);
9489 		reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18;
9490 		tw32(reg, ec->rx_max_coalesced_frames_irq);
9491 	}
9492 
9493 	for (; i < tp->irq_max - 1; i++) {
9494 		tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0);
9495 		tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0);
9496 		tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
9497 	}
9498 }
9499 
9500 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
9501 {
9502 	tg3_coal_tx_init(tp, ec);
9503 	tg3_coal_rx_init(tp, ec);
9504 
9505 	if (!tg3_flag(tp, 5705_PLUS)) {
9506 		u32 val = ec->stats_block_coalesce_usecs;
9507 
9508 		tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
9509 		tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
9510 
9511 		if (!tp->link_up)
9512 			val = 0;
9513 
9514 		tw32(HOSTCC_STAT_COAL_TICKS, val);
9515 	}
9516 }
9517 
9518 /* tp->lock is held. */
9519 static void tg3_tx_rcbs_disable(struct tg3 *tp)
9520 {
9521 	u32 txrcb, limit;
9522 
9523 	/* Disable all transmit rings but the first. */
9524 	if (!tg3_flag(tp, 5705_PLUS))
9525 		limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16;
9526 	else if (tg3_flag(tp, 5717_PLUS))
9527 		limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4;
9528 	else if (tg3_flag(tp, 57765_CLASS) ||
9529 		 tg3_asic_rev(tp) == ASIC_REV_5762)
9530 		limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2;
9531 	else
9532 		limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
9533 
9534 	for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
9535 	     txrcb < limit; txrcb += TG3_BDINFO_SIZE)
9536 		tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS,
9537 			      BDINFO_FLAGS_DISABLED);
9538 }
9539 
9540 /* tp->lock is held. */
9541 static void tg3_tx_rcbs_init(struct tg3 *tp)
9542 {
9543 	int i = 0;
9544 	u32 txrcb = NIC_SRAM_SEND_RCB;
9545 
9546 	if (tg3_flag(tp, ENABLE_TSS))
9547 		i++;
9548 
9549 	for (; i < tp->irq_max; i++, txrcb += TG3_BDINFO_SIZE) {
9550 		struct tg3_napi *tnapi = &tp->napi[i];
9551 
9552 		if (!tnapi->tx_ring)
9553 			continue;
9554 
9555 		tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
9556 			       (TG3_TX_RING_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT),
9557 			       NIC_SRAM_TX_BUFFER_DESC);
9558 	}
9559 }
9560 
9561 /* tp->lock is held. */
9562 static void tg3_rx_ret_rcbs_disable(struct tg3 *tp)
9563 {
9564 	u32 rxrcb, limit;
9565 
9566 	/* Disable all receive return rings but the first. */
9567 	if (tg3_flag(tp, 5717_PLUS))
9568 		limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17;
9569 	else if (!tg3_flag(tp, 5705_PLUS))
9570 		limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
9571 	else if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
9572 		 tg3_asic_rev(tp) == ASIC_REV_5762 ||
9573 		 tg3_flag(tp, 57765_CLASS))
9574 		limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4;
9575 	else
9576 		limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
9577 
9578 	for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
9579 	     rxrcb < limit; rxrcb += TG3_BDINFO_SIZE)
9580 		tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS,
9581 			      BDINFO_FLAGS_DISABLED);
9582 }
9583 
9584 /* tp->lock is held. */
9585 static void tg3_rx_ret_rcbs_init(struct tg3 *tp)
9586 {
9587 	int i = 0;
9588 	u32 rxrcb = NIC_SRAM_RCV_RET_RCB;
9589 
9590 	if (tg3_flag(tp, ENABLE_RSS))
9591 		i++;
9592 
9593 	for (; i < tp->irq_max; i++, rxrcb += TG3_BDINFO_SIZE) {
9594 		struct tg3_napi *tnapi = &tp->napi[i];
9595 
9596 		if (!tnapi->rx_rcb)
9597 			continue;
9598 
9599 		tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
9600 			       (tp->rx_ret_ring_mask + 1) <<
9601 				BDINFO_FLAGS_MAXLEN_SHIFT, 0);
9602 	}
9603 }
9604 
9605 /* tp->lock is held. */
9606 static void tg3_rings_reset(struct tg3 *tp)
9607 {
9608 	int i;
9609 	u32 stblk;
9610 	struct tg3_napi *tnapi = &tp->napi[0];
9611 
9612 	tg3_tx_rcbs_disable(tp);
9613 
9614 	tg3_rx_ret_rcbs_disable(tp);
9615 
9616 	/* Disable interrupts */
9617 	tw32_mailbox_f(tp->napi[0].int_mbox, 1);
9618 	tp->napi[0].chk_msi_cnt = 0;
9619 	tp->napi[0].last_rx_cons = 0;
9620 	tp->napi[0].last_tx_cons = 0;
9621 
9622 	/* Zero mailbox registers. */
9623 	if (tg3_flag(tp, SUPPORT_MSIX)) {
9624 		for (i = 1; i < tp->irq_max; i++) {
9625 			tp->napi[i].tx_prod = 0;
9626 			tp->napi[i].tx_cons = 0;
9627 			if (tg3_flag(tp, ENABLE_TSS))
9628 				tw32_mailbox(tp->napi[i].prodmbox, 0);
9629 			tw32_rx_mbox(tp->napi[i].consmbox, 0);
9630 			tw32_mailbox_f(tp->napi[i].int_mbox, 1);
9631 			tp->napi[i].chk_msi_cnt = 0;
9632 			tp->napi[i].last_rx_cons = 0;
9633 			tp->napi[i].last_tx_cons = 0;
9634 		}
9635 		if (!tg3_flag(tp, ENABLE_TSS))
9636 			tw32_mailbox(tp->napi[0].prodmbox, 0);
9637 	} else {
9638 		tp->napi[0].tx_prod = 0;
9639 		tp->napi[0].tx_cons = 0;
9640 		tw32_mailbox(tp->napi[0].prodmbox, 0);
9641 		tw32_rx_mbox(tp->napi[0].consmbox, 0);
9642 	}
9643 
9644 	/* Make sure the NIC-based send BD rings are disabled. */
9645 	if (!tg3_flag(tp, 5705_PLUS)) {
9646 		u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW;
9647 		for (i = 0; i < 16; i++)
9648 			tw32_tx_mbox(mbox + i * 8, 0);
9649 	}
9650 
9651 	/* Clear status block in ram. */
9652 	memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
9653 
9654 	/* Set status block DMA address */
9655 	tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
9656 	     ((u64) tnapi->status_mapping >> 32));
9657 	tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
9658 	     ((u64) tnapi->status_mapping & 0xffffffff));
9659 
9660 	stblk = HOSTCC_STATBLCK_RING1;
9661 
9662 	for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) {
9663 		u64 mapping = (u64)tnapi->status_mapping;
9664 		tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32);
9665 		tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff);
9666 		stblk += 8;
9667 
9668 		/* Clear status block in ram. */
9669 		memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
9670 	}
9671 
9672 	tg3_tx_rcbs_init(tp);
9673 	tg3_rx_ret_rcbs_init(tp);
9674 }
9675 
9676 static void tg3_setup_rxbd_thresholds(struct tg3 *tp)
9677 {
9678 	u32 val, bdcache_maxcnt, host_rep_thresh, nic_rep_thresh;
9679 
9680 	if (!tg3_flag(tp, 5750_PLUS) ||
9681 	    tg3_flag(tp, 5780_CLASS) ||
9682 	    tg3_asic_rev(tp) == ASIC_REV_5750 ||
9683 	    tg3_asic_rev(tp) == ASIC_REV_5752 ||
9684 	    tg3_flag(tp, 57765_PLUS))
9685 		bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5700;
9686 	else if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
9687 		 tg3_asic_rev(tp) == ASIC_REV_5787)
9688 		bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5755;
9689 	else
9690 		bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5906;
9691 
9692 	nic_rep_thresh = min(bdcache_maxcnt / 2, tp->rx_std_max_post);
9693 	host_rep_thresh = max_t(u32, tp->rx_pending / 8, 1);
9694 
9695 	val = min(nic_rep_thresh, host_rep_thresh);
9696 	tw32(RCVBDI_STD_THRESH, val);
9697 
9698 	if (tg3_flag(tp, 57765_PLUS))
9699 		tw32(STD_REPLENISH_LWM, bdcache_maxcnt);
9700 
9701 	if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
9702 		return;
9703 
9704 	bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700;
9705 
9706 	host_rep_thresh = max_t(u32, tp->rx_jumbo_pending / 8, 1);
9707 
9708 	val = min(bdcache_maxcnt / 2, host_rep_thresh);
9709 	tw32(RCVBDI_JUMBO_THRESH, val);
9710 
9711 	if (tg3_flag(tp, 57765_PLUS))
9712 		tw32(JMB_REPLENISH_LWM, bdcache_maxcnt);
9713 }
9714 
9715 static inline u32 calc_crc(unsigned char *buf, int len)
9716 {
9717 	u32 reg;
9718 	u32 tmp;
9719 	int j, k;
9720 
9721 	reg = 0xffffffff;
9722 
9723 	for (j = 0; j < len; j++) {
9724 		reg ^= buf[j];
9725 
9726 		for (k = 0; k < 8; k++) {
9727 			tmp = reg & 0x01;
9728 
9729 			reg >>= 1;
9730 
9731 			if (tmp)
9732 				reg ^= CRC32_POLY_LE;
9733 		}
9734 	}
9735 
9736 	return ~reg;
9737 }
9738 
9739 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
9740 {
9741 	/* accept or reject all multicast frames */
9742 	tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
9743 	tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
9744 	tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
9745 	tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
9746 }
9747 
9748 static void __tg3_set_rx_mode(struct net_device *dev)
9749 {
9750 	struct tg3 *tp = netdev_priv(dev);
9751 	u32 rx_mode;
9752 
9753 	rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
9754 				  RX_MODE_KEEP_VLAN_TAG);
9755 
9756 #if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE)
9757 	/* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
9758 	 * flag clear.
9759 	 */
9760 	if (!tg3_flag(tp, ENABLE_ASF))
9761 		rx_mode |= RX_MODE_KEEP_VLAN_TAG;
9762 #endif
9763 
9764 	if (dev->flags & IFF_PROMISC) {
9765 		/* Promiscuous mode. */
9766 		rx_mode |= RX_MODE_PROMISC;
9767 	} else if (dev->flags & IFF_ALLMULTI) {
9768 		/* Accept all multicast. */
9769 		tg3_set_multi(tp, 1);
9770 	} else if (netdev_mc_empty(dev)) {
9771 		/* Reject all multicast. */
9772 		tg3_set_multi(tp, 0);
9773 	} else {
9774 		/* Accept one or more multicast(s). */
9775 		struct netdev_hw_addr *ha;
9776 		u32 mc_filter[4] = { 0, };
9777 		u32 regidx;
9778 		u32 bit;
9779 		u32 crc;
9780 
9781 		netdev_for_each_mc_addr(ha, dev) {
9782 			crc = calc_crc(ha->addr, ETH_ALEN);
9783 			bit = ~crc & 0x7f;
9784 			regidx = (bit & 0x60) >> 5;
9785 			bit &= 0x1f;
9786 			mc_filter[regidx] |= (1 << bit);
9787 		}
9788 
9789 		tw32(MAC_HASH_REG_0, mc_filter[0]);
9790 		tw32(MAC_HASH_REG_1, mc_filter[1]);
9791 		tw32(MAC_HASH_REG_2, mc_filter[2]);
9792 		tw32(MAC_HASH_REG_3, mc_filter[3]);
9793 	}
9794 
9795 	if (netdev_uc_count(dev) > TG3_MAX_UCAST_ADDR(tp)) {
9796 		rx_mode |= RX_MODE_PROMISC;
9797 	} else if (!(dev->flags & IFF_PROMISC)) {
9798 		/* Add all entries into to the mac addr filter list */
9799 		int i = 0;
9800 		struct netdev_hw_addr *ha;
9801 
9802 		netdev_for_each_uc_addr(ha, dev) {
9803 			__tg3_set_one_mac_addr(tp, ha->addr,
9804 					       i + TG3_UCAST_ADDR_IDX(tp));
9805 			i++;
9806 		}
9807 	}
9808 
9809 	if (rx_mode != tp->rx_mode) {
9810 		tp->rx_mode = rx_mode;
9811 		tw32_f(MAC_RX_MODE, rx_mode);
9812 		udelay(10);
9813 	}
9814 }
9815 
9816 static void tg3_rss_init_dflt_indir_tbl(struct tg3 *tp, u32 qcnt)
9817 {
9818 	int i;
9819 
9820 	for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
9821 		tp->rss_ind_tbl[i] = ethtool_rxfh_indir_default(i, qcnt);
9822 }
9823 
9824 static void tg3_rss_check_indir_tbl(struct tg3 *tp)
9825 {
9826 	int i;
9827 
9828 	if (!tg3_flag(tp, SUPPORT_MSIX))
9829 		return;
9830 
9831 	if (tp->rxq_cnt == 1) {
9832 		memset(&tp->rss_ind_tbl[0], 0, sizeof(tp->rss_ind_tbl));
9833 		return;
9834 	}
9835 
9836 	/* Validate table against current IRQ count */
9837 	for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) {
9838 		if (tp->rss_ind_tbl[i] >= tp->rxq_cnt)
9839 			break;
9840 	}
9841 
9842 	if (i != TG3_RSS_INDIR_TBL_SIZE)
9843 		tg3_rss_init_dflt_indir_tbl(tp, tp->rxq_cnt);
9844 }
9845 
9846 static void tg3_rss_write_indir_tbl(struct tg3 *tp)
9847 {
9848 	int i = 0;
9849 	u32 reg = MAC_RSS_INDIR_TBL_0;
9850 
9851 	while (i < TG3_RSS_INDIR_TBL_SIZE) {
9852 		u32 val = tp->rss_ind_tbl[i];
9853 		i++;
9854 		for (; i % 8; i++) {
9855 			val <<= 4;
9856 			val |= tp->rss_ind_tbl[i];
9857 		}
9858 		tw32(reg, val);
9859 		reg += 4;
9860 	}
9861 }
9862 
9863 static inline u32 tg3_lso_rd_dma_workaround_bit(struct tg3 *tp)
9864 {
9865 	if (tg3_asic_rev(tp) == ASIC_REV_5719)
9866 		return TG3_LSO_RD_DMA_TX_LENGTH_WA_5719;
9867 	else
9868 		return TG3_LSO_RD_DMA_TX_LENGTH_WA_5720;
9869 }
9870 
9871 /* tp->lock is held. */
9872 static int tg3_reset_hw(struct tg3 *tp, bool reset_phy)
9873 {
9874 	u32 val, rdmac_mode;
9875 	int i, err, limit;
9876 	struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
9877 
9878 	tg3_disable_ints(tp);
9879 
9880 	tg3_stop_fw(tp);
9881 
9882 	tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
9883 
9884 	if (tg3_flag(tp, INIT_COMPLETE))
9885 		tg3_abort_hw(tp, 1);
9886 
9887 	if ((tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
9888 	    !(tp->phy_flags & TG3_PHYFLG_USER_CONFIGURED)) {
9889 		tg3_phy_pull_config(tp);
9890 		tg3_eee_pull_config(tp, NULL);
9891 		tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
9892 	}
9893 
9894 	/* Enable MAC control of LPI */
9895 	if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
9896 		tg3_setup_eee(tp);
9897 
9898 	if (reset_phy)
9899 		tg3_phy_reset(tp);
9900 
9901 	err = tg3_chip_reset(tp);
9902 	if (err)
9903 		return err;
9904 
9905 	tg3_write_sig_legacy(tp, RESET_KIND_INIT);
9906 
9907 	if (tg3_chip_rev(tp) == CHIPREV_5784_AX) {
9908 		val = tr32(TG3_CPMU_CTRL);
9909 		val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
9910 		tw32(TG3_CPMU_CTRL, val);
9911 
9912 		val = tr32(TG3_CPMU_LSPD_10MB_CLK);
9913 		val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
9914 		val |= CPMU_LSPD_10MB_MACCLK_6_25;
9915 		tw32(TG3_CPMU_LSPD_10MB_CLK, val);
9916 
9917 		val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
9918 		val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
9919 		val |= CPMU_LNK_AWARE_MACCLK_6_25;
9920 		tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
9921 
9922 		val = tr32(TG3_CPMU_HST_ACC);
9923 		val &= ~CPMU_HST_ACC_MACCLK_MASK;
9924 		val |= CPMU_HST_ACC_MACCLK_6_25;
9925 		tw32(TG3_CPMU_HST_ACC, val);
9926 	}
9927 
9928 	if (tg3_asic_rev(tp) == ASIC_REV_57780) {
9929 		val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK;
9930 		val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN |
9931 		       PCIE_PWR_MGMT_L1_THRESH_4MS;
9932 		tw32(PCIE_PWR_MGMT_THRESH, val);
9933 
9934 		val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK;
9935 		tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS);
9936 
9937 		tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR);
9938 
9939 		val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
9940 		tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
9941 	}
9942 
9943 	if (tg3_flag(tp, L1PLLPD_EN)) {
9944 		u32 grc_mode = tr32(GRC_MODE);
9945 
9946 		/* Access the lower 1K of PL PCIE block registers. */
9947 		val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9948 		tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
9949 
9950 		val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1);
9951 		tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1,
9952 		     val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN);
9953 
9954 		tw32(GRC_MODE, grc_mode);
9955 	}
9956 
9957 	if (tg3_flag(tp, 57765_CLASS)) {
9958 		if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) {
9959 			u32 grc_mode = tr32(GRC_MODE);
9960 
9961 			/* Access the lower 1K of PL PCIE block registers. */
9962 			val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9963 			tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
9964 
9965 			val = tr32(TG3_PCIE_TLDLPL_PORT +
9966 				   TG3_PCIE_PL_LO_PHYCTL5);
9967 			tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5,
9968 			     val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ);
9969 
9970 			tw32(GRC_MODE, grc_mode);
9971 		}
9972 
9973 		if (tg3_chip_rev(tp) != CHIPREV_57765_AX) {
9974 			u32 grc_mode;
9975 
9976 			/* Fix transmit hangs */
9977 			val = tr32(TG3_CPMU_PADRNG_CTL);
9978 			val |= TG3_CPMU_PADRNG_CTL_RDIV2;
9979 			tw32(TG3_CPMU_PADRNG_CTL, val);
9980 
9981 			grc_mode = tr32(GRC_MODE);
9982 
9983 			/* Access the lower 1K of DL PCIE block registers. */
9984 			val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9985 			tw32(GRC_MODE, val | GRC_MODE_PCIE_DL_SEL);
9986 
9987 			val = tr32(TG3_PCIE_TLDLPL_PORT +
9988 				   TG3_PCIE_DL_LO_FTSMAX);
9989 			val &= ~TG3_PCIE_DL_LO_FTSMAX_MSK;
9990 			tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_DL_LO_FTSMAX,
9991 			     val | TG3_PCIE_DL_LO_FTSMAX_VAL);
9992 
9993 			tw32(GRC_MODE, grc_mode);
9994 		}
9995 
9996 		val = tr32(TG3_CPMU_LSPD_10MB_CLK);
9997 		val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
9998 		val |= CPMU_LSPD_10MB_MACCLK_6_25;
9999 		tw32(TG3_CPMU_LSPD_10MB_CLK, val);
10000 	}
10001 
10002 	/* This works around an issue with Athlon chipsets on
10003 	 * B3 tigon3 silicon.  This bit has no effect on any
10004 	 * other revision.  But do not set this on PCI Express
10005 	 * chips and don't even touch the clocks if the CPMU is present.
10006 	 */
10007 	if (!tg3_flag(tp, CPMU_PRESENT)) {
10008 		if (!tg3_flag(tp, PCI_EXPRESS))
10009 			tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
10010 		tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
10011 	}
10012 
10013 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0 &&
10014 	    tg3_flag(tp, PCIX_MODE)) {
10015 		val = tr32(TG3PCI_PCISTATE);
10016 		val |= PCISTATE_RETRY_SAME_DMA;
10017 		tw32(TG3PCI_PCISTATE, val);
10018 	}
10019 
10020 	if (tg3_flag(tp, ENABLE_APE)) {
10021 		/* Allow reads and writes to the
10022 		 * APE register and memory space.
10023 		 */
10024 		val = tr32(TG3PCI_PCISTATE);
10025 		val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
10026 		       PCISTATE_ALLOW_APE_SHMEM_WR |
10027 		       PCISTATE_ALLOW_APE_PSPACE_WR;
10028 		tw32(TG3PCI_PCISTATE, val);
10029 	}
10030 
10031 	if (tg3_chip_rev(tp) == CHIPREV_5704_BX) {
10032 		/* Enable some hw fixes.  */
10033 		val = tr32(TG3PCI_MSI_DATA);
10034 		val |= (1 << 26) | (1 << 28) | (1 << 29);
10035 		tw32(TG3PCI_MSI_DATA, val);
10036 	}
10037 
10038 	/* Descriptor ring init may make accesses to the
10039 	 * NIC SRAM area to setup the TX descriptors, so we
10040 	 * can only do this after the hardware has been
10041 	 * successfully reset.
10042 	 */
10043 	err = tg3_init_rings(tp);
10044 	if (err)
10045 		return err;
10046 
10047 	if (tg3_flag(tp, 57765_PLUS)) {
10048 		val = tr32(TG3PCI_DMA_RW_CTRL) &
10049 		      ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
10050 		if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0)
10051 			val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK;
10052 		if (!tg3_flag(tp, 57765_CLASS) &&
10053 		    tg3_asic_rev(tp) != ASIC_REV_5717 &&
10054 		    tg3_asic_rev(tp) != ASIC_REV_5762)
10055 			val |= DMA_RWCTRL_TAGGED_STAT_WA;
10056 		tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
10057 	} else if (tg3_asic_rev(tp) != ASIC_REV_5784 &&
10058 		   tg3_asic_rev(tp) != ASIC_REV_5761) {
10059 		/* This value is determined during the probe time DMA
10060 		 * engine test, tg3_test_dma.
10061 		 */
10062 		tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
10063 	}
10064 
10065 	tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
10066 			  GRC_MODE_4X_NIC_SEND_RINGS |
10067 			  GRC_MODE_NO_TX_PHDR_CSUM |
10068 			  GRC_MODE_NO_RX_PHDR_CSUM);
10069 	tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
10070 
10071 	/* Pseudo-header checksum is done by hardware logic and not
10072 	 * the offload processers, so make the chip do the pseudo-
10073 	 * header checksums on receive.  For transmit it is more
10074 	 * convenient to do the pseudo-header checksum in software
10075 	 * as Linux does that on transmit for us in all cases.
10076 	 */
10077 	tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
10078 
10079 	val = GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP;
10080 	if (tp->rxptpctl)
10081 		tw32(TG3_RX_PTP_CTL,
10082 		     tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK);
10083 
10084 	if (tg3_flag(tp, PTP_CAPABLE))
10085 		val |= GRC_MODE_TIME_SYNC_ENABLE;
10086 
10087 	tw32(GRC_MODE, tp->grc_mode | val);
10088 
10089 	/* On one of the AMD platform, MRRS is restricted to 4000 because of
10090 	 * south bridge limitation. As a workaround, Driver is setting MRRS
10091 	 * to 2048 instead of default 4096.
10092 	 */
10093 	if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
10094 	    tp->pdev->subsystem_device == TG3PCI_SUBDEVICE_ID_DELL_5762) {
10095 		val = tr32(TG3PCI_DEV_STATUS_CTRL) & ~MAX_READ_REQ_MASK;
10096 		tw32(TG3PCI_DEV_STATUS_CTRL, val | MAX_READ_REQ_SIZE_2048);
10097 	}
10098 
10099 	/* Setup the timer prescalar register.  Clock is always 66Mhz. */
10100 	val = tr32(GRC_MISC_CFG);
10101 	val &= ~0xff;
10102 	val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
10103 	tw32(GRC_MISC_CFG, val);
10104 
10105 	/* Initialize MBUF/DESC pool. */
10106 	if (tg3_flag(tp, 5750_PLUS)) {
10107 		/* Do nothing.  */
10108 	} else if (tg3_asic_rev(tp) != ASIC_REV_5705) {
10109 		tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
10110 		if (tg3_asic_rev(tp) == ASIC_REV_5704)
10111 			tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
10112 		else
10113 			tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
10114 		tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
10115 		tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
10116 	} else if (tg3_flag(tp, TSO_CAPABLE)) {
10117 		int fw_len;
10118 
10119 		fw_len = tp->fw_len;
10120 		fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
10121 		tw32(BUFMGR_MB_POOL_ADDR,
10122 		     NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
10123 		tw32(BUFMGR_MB_POOL_SIZE,
10124 		     NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
10125 	}
10126 
10127 	if (tp->dev->mtu <= ETH_DATA_LEN) {
10128 		tw32(BUFMGR_MB_RDMA_LOW_WATER,
10129 		     tp->bufmgr_config.mbuf_read_dma_low_water);
10130 		tw32(BUFMGR_MB_MACRX_LOW_WATER,
10131 		     tp->bufmgr_config.mbuf_mac_rx_low_water);
10132 		tw32(BUFMGR_MB_HIGH_WATER,
10133 		     tp->bufmgr_config.mbuf_high_water);
10134 	} else {
10135 		tw32(BUFMGR_MB_RDMA_LOW_WATER,
10136 		     tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
10137 		tw32(BUFMGR_MB_MACRX_LOW_WATER,
10138 		     tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
10139 		tw32(BUFMGR_MB_HIGH_WATER,
10140 		     tp->bufmgr_config.mbuf_high_water_jumbo);
10141 	}
10142 	tw32(BUFMGR_DMA_LOW_WATER,
10143 	     tp->bufmgr_config.dma_low_water);
10144 	tw32(BUFMGR_DMA_HIGH_WATER,
10145 	     tp->bufmgr_config.dma_high_water);
10146 
10147 	val = BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE;
10148 	if (tg3_asic_rev(tp) == ASIC_REV_5719)
10149 		val |= BUFMGR_MODE_NO_TX_UNDERRUN;
10150 	if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
10151 	    tg3_asic_rev(tp) == ASIC_REV_5762 ||
10152 	    tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
10153 	    tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0)
10154 		val |= BUFMGR_MODE_MBLOW_ATTN_ENAB;
10155 	tw32(BUFMGR_MODE, val);
10156 	for (i = 0; i < 2000; i++) {
10157 		if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
10158 			break;
10159 		udelay(10);
10160 	}
10161 	if (i >= 2000) {
10162 		netdev_err(tp->dev, "%s cannot enable BUFMGR\n", __func__);
10163 		return -ENODEV;
10164 	}
10165 
10166 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5906_A1)
10167 		tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
10168 
10169 	tg3_setup_rxbd_thresholds(tp);
10170 
10171 	/* Initialize TG3_BDINFO's at:
10172 	 *  RCVDBDI_STD_BD:	standard eth size rx ring
10173 	 *  RCVDBDI_JUMBO_BD:	jumbo frame rx ring
10174 	 *  RCVDBDI_MINI_BD:	small frame rx ring (??? does not work)
10175 	 *
10176 	 * like so:
10177 	 *  TG3_BDINFO_HOST_ADDR:	high/low parts of DMA address of ring
10178 	 *  TG3_BDINFO_MAXLEN_FLAGS:	(rx max buffer size << 16) |
10179 	 *                              ring attribute flags
10180 	 *  TG3_BDINFO_NIC_ADDR:	location of descriptors in nic SRAM
10181 	 *
10182 	 * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
10183 	 * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
10184 	 *
10185 	 * The size of each ring is fixed in the firmware, but the location is
10186 	 * configurable.
10187 	 */
10188 	tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
10189 	     ((u64) tpr->rx_std_mapping >> 32));
10190 	tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
10191 	     ((u64) tpr->rx_std_mapping & 0xffffffff));
10192 	if (!tg3_flag(tp, 5717_PLUS))
10193 		tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
10194 		     NIC_SRAM_RX_BUFFER_DESC);
10195 
10196 	/* Disable the mini ring */
10197 	if (!tg3_flag(tp, 5705_PLUS))
10198 		tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
10199 		     BDINFO_FLAGS_DISABLED);
10200 
10201 	/* Program the jumbo buffer descriptor ring control
10202 	 * blocks on those devices that have them.
10203 	 */
10204 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
10205 	    (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))) {
10206 
10207 		if (tg3_flag(tp, JUMBO_RING_ENABLE)) {
10208 			tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
10209 			     ((u64) tpr->rx_jmb_mapping >> 32));
10210 			tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
10211 			     ((u64) tpr->rx_jmb_mapping & 0xffffffff));
10212 			val = TG3_RX_JMB_RING_SIZE(tp) <<
10213 			      BDINFO_FLAGS_MAXLEN_SHIFT;
10214 			tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
10215 			     val | BDINFO_FLAGS_USE_EXT_RECV);
10216 			if (!tg3_flag(tp, USE_JUMBO_BDFLAG) ||
10217 			    tg3_flag(tp, 57765_CLASS) ||
10218 			    tg3_asic_rev(tp) == ASIC_REV_5762)
10219 				tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
10220 				     NIC_SRAM_RX_JUMBO_BUFFER_DESC);
10221 		} else {
10222 			tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
10223 			     BDINFO_FLAGS_DISABLED);
10224 		}
10225 
10226 		if (tg3_flag(tp, 57765_PLUS)) {
10227 			val = TG3_RX_STD_RING_SIZE(tp);
10228 			val <<= BDINFO_FLAGS_MAXLEN_SHIFT;
10229 			val |= (TG3_RX_STD_DMA_SZ << 2);
10230 		} else
10231 			val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT;
10232 	} else
10233 		val = TG3_RX_STD_MAX_SIZE_5700 << BDINFO_FLAGS_MAXLEN_SHIFT;
10234 
10235 	tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val);
10236 
10237 	tpr->rx_std_prod_idx = tp->rx_pending;
10238 	tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx);
10239 
10240 	tpr->rx_jmb_prod_idx =
10241 		tg3_flag(tp, JUMBO_RING_ENABLE) ? tp->rx_jumbo_pending : 0;
10242 	tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx);
10243 
10244 	tg3_rings_reset(tp);
10245 
10246 	/* Initialize MAC address and backoff seed. */
10247 	__tg3_set_mac_addr(tp, false);
10248 
10249 	/* MTU + ethernet header + FCS + optional VLAN tag */
10250 	tw32(MAC_RX_MTU_SIZE,
10251 	     tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
10252 
10253 	/* The slot time is changed by tg3_setup_phy if we
10254 	 * run at gigabit with half duplex.
10255 	 */
10256 	val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
10257 	      (6 << TX_LENGTHS_IPG_SHIFT) |
10258 	      (32 << TX_LENGTHS_SLOT_TIME_SHIFT);
10259 
10260 	if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
10261 	    tg3_asic_rev(tp) == ASIC_REV_5762)
10262 		val |= tr32(MAC_TX_LENGTHS) &
10263 		       (TX_LENGTHS_JMB_FRM_LEN_MSK |
10264 			TX_LENGTHS_CNT_DWN_VAL_MSK);
10265 
10266 	tw32(MAC_TX_LENGTHS, val);
10267 
10268 	/* Receive rules. */
10269 	tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
10270 	tw32(RCVLPC_CONFIG, 0x0181);
10271 
10272 	/* Calculate RDMAC_MODE setting early, we need it to determine
10273 	 * the RCVLPC_STATE_ENABLE mask.
10274 	 */
10275 	rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
10276 		      RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
10277 		      RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
10278 		      RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
10279 		      RDMAC_MODE_LNGREAD_ENAB);
10280 
10281 	if (tg3_asic_rev(tp) == ASIC_REV_5717)
10282 		rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS;
10283 
10284 	if (tg3_asic_rev(tp) == ASIC_REV_5784 ||
10285 	    tg3_asic_rev(tp) == ASIC_REV_5785 ||
10286 	    tg3_asic_rev(tp) == ASIC_REV_57780)
10287 		rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
10288 			      RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
10289 			      RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
10290 
10291 	if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
10292 	    tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
10293 		if (tg3_flag(tp, TSO_CAPABLE) &&
10294 		    tg3_asic_rev(tp) == ASIC_REV_5705) {
10295 			rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
10296 		} else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
10297 			   !tg3_flag(tp, IS_5788)) {
10298 			rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
10299 		}
10300 	}
10301 
10302 	if (tg3_flag(tp, PCI_EXPRESS))
10303 		rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
10304 
10305 	if (tg3_asic_rev(tp) == ASIC_REV_57766) {
10306 		tp->dma_limit = 0;
10307 		if (tp->dev->mtu <= ETH_DATA_LEN) {
10308 			rdmac_mode |= RDMAC_MODE_JMB_2K_MMRR;
10309 			tp->dma_limit = TG3_TX_BD_DMA_MAX_2K;
10310 		}
10311 	}
10312 
10313 	if (tg3_flag(tp, HW_TSO_1) ||
10314 	    tg3_flag(tp, HW_TSO_2) ||
10315 	    tg3_flag(tp, HW_TSO_3))
10316 		rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN;
10317 
10318 	if (tg3_flag(tp, 57765_PLUS) ||
10319 	    tg3_asic_rev(tp) == ASIC_REV_5785 ||
10320 	    tg3_asic_rev(tp) == ASIC_REV_57780)
10321 		rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;
10322 
10323 	if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
10324 	    tg3_asic_rev(tp) == ASIC_REV_5762)
10325 		rdmac_mode |= tr32(RDMAC_MODE) & RDMAC_MODE_H2BNC_VLAN_DET;
10326 
10327 	if (tg3_asic_rev(tp) == ASIC_REV_5761 ||
10328 	    tg3_asic_rev(tp) == ASIC_REV_5784 ||
10329 	    tg3_asic_rev(tp) == ASIC_REV_5785 ||
10330 	    tg3_asic_rev(tp) == ASIC_REV_57780 ||
10331 	    tg3_flag(tp, 57765_PLUS)) {
10332 		u32 tgtreg;
10333 
10334 		if (tg3_asic_rev(tp) == ASIC_REV_5762)
10335 			tgtreg = TG3_RDMA_RSRVCTRL_REG2;
10336 		else
10337 			tgtreg = TG3_RDMA_RSRVCTRL_REG;
10338 
10339 		val = tr32(tgtreg);
10340 		if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
10341 		    tg3_asic_rev(tp) == ASIC_REV_5762) {
10342 			val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK |
10343 				 TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK |
10344 				 TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK);
10345 			val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B |
10346 			       TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
10347 			       TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K;
10348 		}
10349 		tw32(tgtreg, val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
10350 	}
10351 
10352 	if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
10353 	    tg3_asic_rev(tp) == ASIC_REV_5720 ||
10354 	    tg3_asic_rev(tp) == ASIC_REV_5762) {
10355 		u32 tgtreg;
10356 
10357 		if (tg3_asic_rev(tp) == ASIC_REV_5762)
10358 			tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL2;
10359 		else
10360 			tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL;
10361 
10362 		val = tr32(tgtreg);
10363 		tw32(tgtreg, val |
10364 		     TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K |
10365 		     TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K);
10366 	}
10367 
10368 	/* Receive/send statistics. */
10369 	if (tg3_flag(tp, 5750_PLUS)) {
10370 		val = tr32(RCVLPC_STATS_ENABLE);
10371 		val &= ~RCVLPC_STATSENAB_DACK_FIX;
10372 		tw32(RCVLPC_STATS_ENABLE, val);
10373 	} else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
10374 		   tg3_flag(tp, TSO_CAPABLE)) {
10375 		val = tr32(RCVLPC_STATS_ENABLE);
10376 		val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
10377 		tw32(RCVLPC_STATS_ENABLE, val);
10378 	} else {
10379 		tw32(RCVLPC_STATS_ENABLE, 0xffffff);
10380 	}
10381 	tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
10382 	tw32(SNDDATAI_STATSENAB, 0xffffff);
10383 	tw32(SNDDATAI_STATSCTRL,
10384 	     (SNDDATAI_SCTRL_ENABLE |
10385 	      SNDDATAI_SCTRL_FASTUPD));
10386 
10387 	/* Setup host coalescing engine. */
10388 	tw32(HOSTCC_MODE, 0);
10389 	for (i = 0; i < 2000; i++) {
10390 		if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
10391 			break;
10392 		udelay(10);
10393 	}
10394 
10395 	__tg3_set_coalesce(tp, &tp->coal);
10396 
10397 	if (!tg3_flag(tp, 5705_PLUS)) {
10398 		/* Status/statistics block address.  See tg3_timer,
10399 		 * the tg3_periodic_fetch_stats call there, and
10400 		 * tg3_get_stats to see how this works for 5705/5750 chips.
10401 		 */
10402 		tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
10403 		     ((u64) tp->stats_mapping >> 32));
10404 		tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
10405 		     ((u64) tp->stats_mapping & 0xffffffff));
10406 		tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
10407 
10408 		tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
10409 
10410 		/* Clear statistics and status block memory areas */
10411 		for (i = NIC_SRAM_STATS_BLK;
10412 		     i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
10413 		     i += sizeof(u32)) {
10414 			tg3_write_mem(tp, i, 0);
10415 			udelay(40);
10416 		}
10417 	}
10418 
10419 	tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
10420 
10421 	tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
10422 	tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
10423 	if (!tg3_flag(tp, 5705_PLUS))
10424 		tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
10425 
10426 	if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
10427 		tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
10428 		/* reset to prevent losing 1st rx packet intermittently */
10429 		tw32_f(MAC_RX_MODE, RX_MODE_RESET);
10430 		udelay(10);
10431 	}
10432 
10433 	tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
10434 			MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE |
10435 			MAC_MODE_FHDE_ENABLE;
10436 	if (tg3_flag(tp, ENABLE_APE))
10437 		tp->mac_mode |= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
10438 	if (!tg3_flag(tp, 5705_PLUS) &&
10439 	    !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
10440 	    tg3_asic_rev(tp) != ASIC_REV_5700)
10441 		tp->mac_mode |= MAC_MODE_LINK_POLARITY;
10442 	tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
10443 	udelay(40);
10444 
10445 	/* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
10446 	 * If TG3_FLAG_IS_NIC is zero, we should read the
10447 	 * register to preserve the GPIO settings for LOMs. The GPIOs,
10448 	 * whether used as inputs or outputs, are set by boot code after
10449 	 * reset.
10450 	 */
10451 	if (!tg3_flag(tp, IS_NIC)) {
10452 		u32 gpio_mask;
10453 
10454 		gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
10455 			    GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
10456 			    GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
10457 
10458 		if (tg3_asic_rev(tp) == ASIC_REV_5752)
10459 			gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
10460 				     GRC_LCLCTRL_GPIO_OUTPUT3;
10461 
10462 		if (tg3_asic_rev(tp) == ASIC_REV_5755)
10463 			gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
10464 
10465 		tp->grc_local_ctrl &= ~gpio_mask;
10466 		tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
10467 
10468 		/* GPIO1 must be driven high for eeprom write protect */
10469 		if (tg3_flag(tp, EEPROM_WRITE_PROT))
10470 			tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
10471 					       GRC_LCLCTRL_GPIO_OUTPUT1);
10472 	}
10473 	tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
10474 	udelay(100);
10475 
10476 	if (tg3_flag(tp, USING_MSIX)) {
10477 		val = tr32(MSGINT_MODE);
10478 		val |= MSGINT_MODE_ENABLE;
10479 		if (tp->irq_cnt > 1)
10480 			val |= MSGINT_MODE_MULTIVEC_EN;
10481 		if (!tg3_flag(tp, 1SHOT_MSI))
10482 			val |= MSGINT_MODE_ONE_SHOT_DISABLE;
10483 		tw32(MSGINT_MODE, val);
10484 	}
10485 
10486 	if (!tg3_flag(tp, 5705_PLUS)) {
10487 		tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
10488 		udelay(40);
10489 	}
10490 
10491 	val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
10492 	       WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
10493 	       WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
10494 	       WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
10495 	       WDMAC_MODE_LNGREAD_ENAB);
10496 
10497 	if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
10498 	    tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
10499 		if (tg3_flag(tp, TSO_CAPABLE) &&
10500 		    (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 ||
10501 		     tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A2)) {
10502 			/* nothing */
10503 		} else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
10504 			   !tg3_flag(tp, IS_5788)) {
10505 			val |= WDMAC_MODE_RX_ACCEL;
10506 		}
10507 	}
10508 
10509 	/* Enable host coalescing bug fix */
10510 	if (tg3_flag(tp, 5755_PLUS))
10511 		val |= WDMAC_MODE_STATUS_TAG_FIX;
10512 
10513 	if (tg3_asic_rev(tp) == ASIC_REV_5785)
10514 		val |= WDMAC_MODE_BURST_ALL_DATA;
10515 
10516 	tw32_f(WDMAC_MODE, val);
10517 	udelay(40);
10518 
10519 	if (tg3_flag(tp, PCIX_MODE)) {
10520 		u16 pcix_cmd;
10521 
10522 		pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
10523 				     &pcix_cmd);
10524 		if (tg3_asic_rev(tp) == ASIC_REV_5703) {
10525 			pcix_cmd &= ~PCI_X_CMD_MAX_READ;
10526 			pcix_cmd |= PCI_X_CMD_READ_2K;
10527 		} else if (tg3_asic_rev(tp) == ASIC_REV_5704) {
10528 			pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
10529 			pcix_cmd |= PCI_X_CMD_READ_2K;
10530 		}
10531 		pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
10532 				      pcix_cmd);
10533 	}
10534 
10535 	tw32_f(RDMAC_MODE, rdmac_mode);
10536 	udelay(40);
10537 
10538 	if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
10539 	    tg3_asic_rev(tp) == ASIC_REV_5720) {
10540 		for (i = 0; i < TG3_NUM_RDMA_CHANNELS; i++) {
10541 			if (tr32(TG3_RDMA_LENGTH + (i << 2)) > TG3_MAX_MTU(tp))
10542 				break;
10543 		}
10544 		if (i < TG3_NUM_RDMA_CHANNELS) {
10545 			val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
10546 			val |= tg3_lso_rd_dma_workaround_bit(tp);
10547 			tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
10548 			tg3_flag_set(tp, 5719_5720_RDMA_BUG);
10549 		}
10550 	}
10551 
10552 	tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
10553 	if (!tg3_flag(tp, 5705_PLUS))
10554 		tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
10555 
10556 	if (tg3_asic_rev(tp) == ASIC_REV_5761)
10557 		tw32(SNDDATAC_MODE,
10558 		     SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
10559 	else
10560 		tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
10561 
10562 	tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
10563 	tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
10564 	val = RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ;
10565 	if (tg3_flag(tp, LRG_PROD_RING_CAP))
10566 		val |= RCVDBDI_MODE_LRG_RING_SZ;
10567 	tw32(RCVDBDI_MODE, val);
10568 	tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
10569 	if (tg3_flag(tp, HW_TSO_1) ||
10570 	    tg3_flag(tp, HW_TSO_2) ||
10571 	    tg3_flag(tp, HW_TSO_3))
10572 		tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
10573 	val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE;
10574 	if (tg3_flag(tp, ENABLE_TSS))
10575 		val |= SNDBDI_MODE_MULTI_TXQ_EN;
10576 	tw32(SNDBDI_MODE, val);
10577 	tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
10578 
10579 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) {
10580 		err = tg3_load_5701_a0_firmware_fix(tp);
10581 		if (err)
10582 			return err;
10583 	}
10584 
10585 	if (tg3_asic_rev(tp) == ASIC_REV_57766) {
10586 		/* Ignore any errors for the firmware download. If download
10587 		 * fails, the device will operate with EEE disabled
10588 		 */
10589 		tg3_load_57766_firmware(tp);
10590 	}
10591 
10592 	if (tg3_flag(tp, TSO_CAPABLE)) {
10593 		err = tg3_load_tso_firmware(tp);
10594 		if (err)
10595 			return err;
10596 	}
10597 
10598 	tp->tx_mode = TX_MODE_ENABLE;
10599 
10600 	if (tg3_flag(tp, 5755_PLUS) ||
10601 	    tg3_asic_rev(tp) == ASIC_REV_5906)
10602 		tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX;
10603 
10604 	if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
10605 	    tg3_asic_rev(tp) == ASIC_REV_5762) {
10606 		val = TX_MODE_JMB_FRM_LEN | TX_MODE_CNT_DN_MODE;
10607 		tp->tx_mode &= ~val;
10608 		tp->tx_mode |= tr32(MAC_TX_MODE) & val;
10609 	}
10610 
10611 	tw32_f(MAC_TX_MODE, tp->tx_mode);
10612 	udelay(100);
10613 
10614 	if (tg3_flag(tp, ENABLE_RSS)) {
10615 		u32 rss_key[10];
10616 
10617 		tg3_rss_write_indir_tbl(tp);
10618 
10619 		netdev_rss_key_fill(rss_key, 10 * sizeof(u32));
10620 
10621 		for (i = 0; i < 10 ; i++)
10622 			tw32(MAC_RSS_HASH_KEY_0 + i*4, rss_key[i]);
10623 	}
10624 
10625 	tp->rx_mode = RX_MODE_ENABLE;
10626 	if (tg3_flag(tp, 5755_PLUS))
10627 		tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
10628 
10629 	if (tg3_asic_rev(tp) == ASIC_REV_5762)
10630 		tp->rx_mode |= RX_MODE_IPV4_FRAG_FIX;
10631 
10632 	if (tg3_flag(tp, ENABLE_RSS))
10633 		tp->rx_mode |= RX_MODE_RSS_ENABLE |
10634 			       RX_MODE_RSS_ITBL_HASH_BITS_7 |
10635 			       RX_MODE_RSS_IPV6_HASH_EN |
10636 			       RX_MODE_RSS_TCP_IPV6_HASH_EN |
10637 			       RX_MODE_RSS_IPV4_HASH_EN |
10638 			       RX_MODE_RSS_TCP_IPV4_HASH_EN;
10639 
10640 	tw32_f(MAC_RX_MODE, tp->rx_mode);
10641 	udelay(10);
10642 
10643 	tw32(MAC_LED_CTRL, tp->led_ctrl);
10644 
10645 	tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
10646 	if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
10647 		tw32_f(MAC_RX_MODE, RX_MODE_RESET);
10648 		udelay(10);
10649 	}
10650 	tw32_f(MAC_RX_MODE, tp->rx_mode);
10651 	udelay(10);
10652 
10653 	if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
10654 		if ((tg3_asic_rev(tp) == ASIC_REV_5704) &&
10655 		    !(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) {
10656 			/* Set drive transmission level to 1.2V  */
10657 			/* only if the signal pre-emphasis bit is not set  */
10658 			val = tr32(MAC_SERDES_CFG);
10659 			val &= 0xfffff000;
10660 			val |= 0x880;
10661 			tw32(MAC_SERDES_CFG, val);
10662 		}
10663 		if (tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A1)
10664 			tw32(MAC_SERDES_CFG, 0x616000);
10665 	}
10666 
10667 	/* Prevent chip from dropping frames when flow control
10668 	 * is enabled.
10669 	 */
10670 	if (tg3_flag(tp, 57765_CLASS))
10671 		val = 1;
10672 	else
10673 		val = 2;
10674 	tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val);
10675 
10676 	if (tg3_asic_rev(tp) == ASIC_REV_5704 &&
10677 	    (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
10678 		/* Use hardware link auto-negotiation */
10679 		tg3_flag_set(tp, HW_AUTONEG);
10680 	}
10681 
10682 	if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
10683 	    tg3_asic_rev(tp) == ASIC_REV_5714) {
10684 		u32 tmp;
10685 
10686 		tmp = tr32(SERDES_RX_CTRL);
10687 		tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
10688 		tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
10689 		tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
10690 		tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
10691 	}
10692 
10693 	if (!tg3_flag(tp, USE_PHYLIB)) {
10694 		if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10695 			tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
10696 
10697 		err = tg3_setup_phy(tp, false);
10698 		if (err)
10699 			return err;
10700 
10701 		if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
10702 		    !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
10703 			u32 tmp;
10704 
10705 			/* Clear CRC stats. */
10706 			if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
10707 				tg3_writephy(tp, MII_TG3_TEST1,
10708 					     tmp | MII_TG3_TEST1_CRC_EN);
10709 				tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &tmp);
10710 			}
10711 		}
10712 	}
10713 
10714 	__tg3_set_rx_mode(tp->dev);
10715 
10716 	/* Initialize receive rules. */
10717 	tw32(MAC_RCV_RULE_0,  0xc2000000 & RCV_RULE_DISABLE_MASK);
10718 	tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
10719 	tw32(MAC_RCV_RULE_1,  0x86000004 & RCV_RULE_DISABLE_MASK);
10720 	tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
10721 
10722 	if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS))
10723 		limit = 8;
10724 	else
10725 		limit = 16;
10726 	if (tg3_flag(tp, ENABLE_ASF))
10727 		limit -= 4;
10728 	switch (limit) {
10729 	case 16:
10730 		tw32(MAC_RCV_RULE_15,  0); tw32(MAC_RCV_VALUE_15,  0);
10731 		/* fall through */
10732 	case 15:
10733 		tw32(MAC_RCV_RULE_14,  0); tw32(MAC_RCV_VALUE_14,  0);
10734 		/* fall through */
10735 	case 14:
10736 		tw32(MAC_RCV_RULE_13,  0); tw32(MAC_RCV_VALUE_13,  0);
10737 		/* fall through */
10738 	case 13:
10739 		tw32(MAC_RCV_RULE_12,  0); tw32(MAC_RCV_VALUE_12,  0);
10740 		/* fall through */
10741 	case 12:
10742 		tw32(MAC_RCV_RULE_11,  0); tw32(MAC_RCV_VALUE_11,  0);
10743 		/* fall through */
10744 	case 11:
10745 		tw32(MAC_RCV_RULE_10,  0); tw32(MAC_RCV_VALUE_10,  0);
10746 		/* fall through */
10747 	case 10:
10748 		tw32(MAC_RCV_RULE_9,  0); tw32(MAC_RCV_VALUE_9,  0);
10749 		/* fall through */
10750 	case 9:
10751 		tw32(MAC_RCV_RULE_8,  0); tw32(MAC_RCV_VALUE_8,  0);
10752 		/* fall through */
10753 	case 8:
10754 		tw32(MAC_RCV_RULE_7,  0); tw32(MAC_RCV_VALUE_7,  0);
10755 		/* fall through */
10756 	case 7:
10757 		tw32(MAC_RCV_RULE_6,  0); tw32(MAC_RCV_VALUE_6,  0);
10758 		/* fall through */
10759 	case 6:
10760 		tw32(MAC_RCV_RULE_5,  0); tw32(MAC_RCV_VALUE_5,  0);
10761 		/* fall through */
10762 	case 5:
10763 		tw32(MAC_RCV_RULE_4,  0); tw32(MAC_RCV_VALUE_4,  0);
10764 		/* fall through */
10765 	case 4:
10766 		/* tw32(MAC_RCV_RULE_3,  0); tw32(MAC_RCV_VALUE_3,  0); */
10767 	case 3:
10768 		/* tw32(MAC_RCV_RULE_2,  0); tw32(MAC_RCV_VALUE_2,  0); */
10769 	case 2:
10770 	case 1:
10771 
10772 	default:
10773 		break;
10774 	}
10775 
10776 	if (tg3_flag(tp, ENABLE_APE))
10777 		/* Write our heartbeat update interval to APE. */
10778 		tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
10779 				APE_HOST_HEARTBEAT_INT_5SEC);
10780 
10781 	tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
10782 
10783 	return 0;
10784 }
10785 
10786 /* Called at device open time to get the chip ready for
10787  * packet processing.  Invoked with tp->lock held.
10788  */
10789 static int tg3_init_hw(struct tg3 *tp, bool reset_phy)
10790 {
10791 	/* Chip may have been just powered on. If so, the boot code may still
10792 	 * be running initialization. Wait for it to finish to avoid races in
10793 	 * accessing the hardware.
10794 	 */
10795 	tg3_enable_register_access(tp);
10796 	tg3_poll_fw(tp);
10797 
10798 	tg3_switch_clocks(tp);
10799 
10800 	tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
10801 
10802 	return tg3_reset_hw(tp, reset_phy);
10803 }
10804 
10805 #ifdef CONFIG_TIGON3_HWMON
10806 static void tg3_sd_scan_scratchpad(struct tg3 *tp, struct tg3_ocir *ocir)
10807 {
10808 	int i;
10809 
10810 	for (i = 0; i < TG3_SD_NUM_RECS; i++, ocir++) {
10811 		u32 off = i * TG3_OCIR_LEN, len = TG3_OCIR_LEN;
10812 
10813 		tg3_ape_scratchpad_read(tp, (u32 *) ocir, off, len);
10814 		off += len;
10815 
10816 		if (ocir->signature != TG3_OCIR_SIG_MAGIC ||
10817 		    !(ocir->version_flags & TG3_OCIR_FLAG_ACTIVE))
10818 			memset(ocir, 0, TG3_OCIR_LEN);
10819 	}
10820 }
10821 
10822 /* sysfs attributes for hwmon */
10823 static ssize_t tg3_show_temp(struct device *dev,
10824 			     struct device_attribute *devattr, char *buf)
10825 {
10826 	struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
10827 	struct tg3 *tp = dev_get_drvdata(dev);
10828 	u32 temperature;
10829 
10830 	spin_lock_bh(&tp->lock);
10831 	tg3_ape_scratchpad_read(tp, &temperature, attr->index,
10832 				sizeof(temperature));
10833 	spin_unlock_bh(&tp->lock);
10834 	return sprintf(buf, "%u\n", temperature * 1000);
10835 }
10836 
10837 
10838 static SENSOR_DEVICE_ATTR(temp1_input, 0444, tg3_show_temp, NULL,
10839 			  TG3_TEMP_SENSOR_OFFSET);
10840 static SENSOR_DEVICE_ATTR(temp1_crit, 0444, tg3_show_temp, NULL,
10841 			  TG3_TEMP_CAUTION_OFFSET);
10842 static SENSOR_DEVICE_ATTR(temp1_max, 0444, tg3_show_temp, NULL,
10843 			  TG3_TEMP_MAX_OFFSET);
10844 
10845 static struct attribute *tg3_attrs[] = {
10846 	&sensor_dev_attr_temp1_input.dev_attr.attr,
10847 	&sensor_dev_attr_temp1_crit.dev_attr.attr,
10848 	&sensor_dev_attr_temp1_max.dev_attr.attr,
10849 	NULL
10850 };
10851 ATTRIBUTE_GROUPS(tg3);
10852 
10853 static void tg3_hwmon_close(struct tg3 *tp)
10854 {
10855 	if (tp->hwmon_dev) {
10856 		hwmon_device_unregister(tp->hwmon_dev);
10857 		tp->hwmon_dev = NULL;
10858 	}
10859 }
10860 
10861 static void tg3_hwmon_open(struct tg3 *tp)
10862 {
10863 	int i;
10864 	u32 size = 0;
10865 	struct pci_dev *pdev = tp->pdev;
10866 	struct tg3_ocir ocirs[TG3_SD_NUM_RECS];
10867 
10868 	tg3_sd_scan_scratchpad(tp, ocirs);
10869 
10870 	for (i = 0; i < TG3_SD_NUM_RECS; i++) {
10871 		if (!ocirs[i].src_data_length)
10872 			continue;
10873 
10874 		size += ocirs[i].src_hdr_length;
10875 		size += ocirs[i].src_data_length;
10876 	}
10877 
10878 	if (!size)
10879 		return;
10880 
10881 	tp->hwmon_dev = hwmon_device_register_with_groups(&pdev->dev, "tg3",
10882 							  tp, tg3_groups);
10883 	if (IS_ERR(tp->hwmon_dev)) {
10884 		tp->hwmon_dev = NULL;
10885 		dev_err(&pdev->dev, "Cannot register hwmon device, aborting\n");
10886 	}
10887 }
10888 #else
10889 static inline void tg3_hwmon_close(struct tg3 *tp) { }
10890 static inline void tg3_hwmon_open(struct tg3 *tp) { }
10891 #endif /* CONFIG_TIGON3_HWMON */
10892 
10893 
10894 #define TG3_STAT_ADD32(PSTAT, REG) \
10895 do {	u32 __val = tr32(REG); \
10896 	(PSTAT)->low += __val; \
10897 	if ((PSTAT)->low < __val) \
10898 		(PSTAT)->high += 1; \
10899 } while (0)
10900 
10901 static void tg3_periodic_fetch_stats(struct tg3 *tp)
10902 {
10903 	struct tg3_hw_stats *sp = tp->hw_stats;
10904 
10905 	if (!tp->link_up)
10906 		return;
10907 
10908 	TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
10909 	TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
10910 	TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
10911 	TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
10912 	TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
10913 	TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
10914 	TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
10915 	TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
10916 	TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
10917 	TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
10918 	TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
10919 	TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
10920 	TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
10921 	if (unlikely(tg3_flag(tp, 5719_5720_RDMA_BUG) &&
10922 		     (sp->tx_ucast_packets.low + sp->tx_mcast_packets.low +
10923 		      sp->tx_bcast_packets.low) > TG3_NUM_RDMA_CHANNELS)) {
10924 		u32 val;
10925 
10926 		val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
10927 		val &= ~tg3_lso_rd_dma_workaround_bit(tp);
10928 		tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
10929 		tg3_flag_clear(tp, 5719_5720_RDMA_BUG);
10930 	}
10931 
10932 	TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
10933 	TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
10934 	TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
10935 	TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
10936 	TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
10937 	TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
10938 	TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
10939 	TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
10940 	TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
10941 	TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
10942 	TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
10943 	TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
10944 	TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
10945 	TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
10946 
10947 	TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
10948 	if (tg3_asic_rev(tp) != ASIC_REV_5717 &&
10949 	    tg3_asic_rev(tp) != ASIC_REV_5762 &&
10950 	    tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0 &&
10951 	    tg3_chip_rev_id(tp) != CHIPREV_ID_5720_A0) {
10952 		TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
10953 	} else {
10954 		u32 val = tr32(HOSTCC_FLOW_ATTN);
10955 		val = (val & HOSTCC_FLOW_ATTN_MBUF_LWM) ? 1 : 0;
10956 		if (val) {
10957 			tw32(HOSTCC_FLOW_ATTN, HOSTCC_FLOW_ATTN_MBUF_LWM);
10958 			sp->rx_discards.low += val;
10959 			if (sp->rx_discards.low < val)
10960 				sp->rx_discards.high += 1;
10961 		}
10962 		sp->mbuf_lwm_thresh_hit = sp->rx_discards;
10963 	}
10964 	TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
10965 }
10966 
10967 static void tg3_chk_missed_msi(struct tg3 *tp)
10968 {
10969 	u32 i;
10970 
10971 	for (i = 0; i < tp->irq_cnt; i++) {
10972 		struct tg3_napi *tnapi = &tp->napi[i];
10973 
10974 		if (tg3_has_work(tnapi)) {
10975 			if (tnapi->last_rx_cons == tnapi->rx_rcb_ptr &&
10976 			    tnapi->last_tx_cons == tnapi->tx_cons) {
10977 				if (tnapi->chk_msi_cnt < 1) {
10978 					tnapi->chk_msi_cnt++;
10979 					return;
10980 				}
10981 				tg3_msi(0, tnapi);
10982 			}
10983 		}
10984 		tnapi->chk_msi_cnt = 0;
10985 		tnapi->last_rx_cons = tnapi->rx_rcb_ptr;
10986 		tnapi->last_tx_cons = tnapi->tx_cons;
10987 	}
10988 }
10989 
10990 static void tg3_timer(struct timer_list *t)
10991 {
10992 	struct tg3 *tp = from_timer(tp, t, timer);
10993 
10994 	spin_lock(&tp->lock);
10995 
10996 	if (tp->irq_sync || tg3_flag(tp, RESET_TASK_PENDING)) {
10997 		spin_unlock(&tp->lock);
10998 		goto restart_timer;
10999 	}
11000 
11001 	if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
11002 	    tg3_flag(tp, 57765_CLASS))
11003 		tg3_chk_missed_msi(tp);
11004 
11005 	if (tg3_flag(tp, FLUSH_POSTED_WRITES)) {
11006 		/* BCM4785: Flush posted writes from GbE to host memory. */
11007 		tr32(HOSTCC_MODE);
11008 	}
11009 
11010 	if (!tg3_flag(tp, TAGGED_STATUS)) {
11011 		/* All of this garbage is because when using non-tagged
11012 		 * IRQ status the mailbox/status_block protocol the chip
11013 		 * uses with the cpu is race prone.
11014 		 */
11015 		if (tp->napi[0].hw_status->status & SD_STATUS_UPDATED) {
11016 			tw32(GRC_LOCAL_CTRL,
11017 			     tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
11018 		} else {
11019 			tw32(HOSTCC_MODE, tp->coalesce_mode |
11020 			     HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW);
11021 		}
11022 
11023 		if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
11024 			spin_unlock(&tp->lock);
11025 			tg3_reset_task_schedule(tp);
11026 			goto restart_timer;
11027 		}
11028 	}
11029 
11030 	/* This part only runs once per second. */
11031 	if (!--tp->timer_counter) {
11032 		if (tg3_flag(tp, 5705_PLUS))
11033 			tg3_periodic_fetch_stats(tp);
11034 
11035 		if (tp->setlpicnt && !--tp->setlpicnt)
11036 			tg3_phy_eee_enable(tp);
11037 
11038 		if (tg3_flag(tp, USE_LINKCHG_REG)) {
11039 			u32 mac_stat;
11040 			int phy_event;
11041 
11042 			mac_stat = tr32(MAC_STATUS);
11043 
11044 			phy_event = 0;
11045 			if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) {
11046 				if (mac_stat & MAC_STATUS_MI_INTERRUPT)
11047 					phy_event = 1;
11048 			} else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
11049 				phy_event = 1;
11050 
11051 			if (phy_event)
11052 				tg3_setup_phy(tp, false);
11053 		} else if (tg3_flag(tp, POLL_SERDES)) {
11054 			u32 mac_stat = tr32(MAC_STATUS);
11055 			int need_setup = 0;
11056 
11057 			if (tp->link_up &&
11058 			    (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
11059 				need_setup = 1;
11060 			}
11061 			if (!tp->link_up &&
11062 			    (mac_stat & (MAC_STATUS_PCS_SYNCED |
11063 					 MAC_STATUS_SIGNAL_DET))) {
11064 				need_setup = 1;
11065 			}
11066 			if (need_setup) {
11067 				if (!tp->serdes_counter) {
11068 					tw32_f(MAC_MODE,
11069 					     (tp->mac_mode &
11070 					      ~MAC_MODE_PORT_MODE_MASK));
11071 					udelay(40);
11072 					tw32_f(MAC_MODE, tp->mac_mode);
11073 					udelay(40);
11074 				}
11075 				tg3_setup_phy(tp, false);
11076 			}
11077 		} else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
11078 			   tg3_flag(tp, 5780_CLASS)) {
11079 			tg3_serdes_parallel_detect(tp);
11080 		} else if (tg3_flag(tp, POLL_CPMU_LINK)) {
11081 			u32 cpmu = tr32(TG3_CPMU_STATUS);
11082 			bool link_up = !((cpmu & TG3_CPMU_STATUS_LINK_MASK) ==
11083 					 TG3_CPMU_STATUS_LINK_MASK);
11084 
11085 			if (link_up != tp->link_up)
11086 				tg3_setup_phy(tp, false);
11087 		}
11088 
11089 		tp->timer_counter = tp->timer_multiplier;
11090 	}
11091 
11092 	/* Heartbeat is only sent once every 2 seconds.
11093 	 *
11094 	 * The heartbeat is to tell the ASF firmware that the host
11095 	 * driver is still alive.  In the event that the OS crashes,
11096 	 * ASF needs to reset the hardware to free up the FIFO space
11097 	 * that may be filled with rx packets destined for the host.
11098 	 * If the FIFO is full, ASF will no longer function properly.
11099 	 *
11100 	 * Unintended resets have been reported on real time kernels
11101 	 * where the timer doesn't run on time.  Netpoll will also have
11102 	 * same problem.
11103 	 *
11104 	 * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
11105 	 * to check the ring condition when the heartbeat is expiring
11106 	 * before doing the reset.  This will prevent most unintended
11107 	 * resets.
11108 	 */
11109 	if (!--tp->asf_counter) {
11110 		if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
11111 			tg3_wait_for_event_ack(tp);
11112 
11113 			tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
11114 				      FWCMD_NICDRV_ALIVE3);
11115 			tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
11116 			tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX,
11117 				      TG3_FW_UPDATE_TIMEOUT_SEC);
11118 
11119 			tg3_generate_fw_event(tp);
11120 		}
11121 		tp->asf_counter = tp->asf_multiplier;
11122 	}
11123 
11124 	/* Update the APE heartbeat every 5 seconds.*/
11125 	tg3_send_ape_heartbeat(tp, TG3_APE_HB_INTERVAL);
11126 
11127 	spin_unlock(&tp->lock);
11128 
11129 restart_timer:
11130 	tp->timer.expires = jiffies + tp->timer_offset;
11131 	add_timer(&tp->timer);
11132 }
11133 
11134 static void tg3_timer_init(struct tg3 *tp)
11135 {
11136 	if (tg3_flag(tp, TAGGED_STATUS) &&
11137 	    tg3_asic_rev(tp) != ASIC_REV_5717 &&
11138 	    !tg3_flag(tp, 57765_CLASS))
11139 		tp->timer_offset = HZ;
11140 	else
11141 		tp->timer_offset = HZ / 10;
11142 
11143 	BUG_ON(tp->timer_offset > HZ);
11144 
11145 	tp->timer_multiplier = (HZ / tp->timer_offset);
11146 	tp->asf_multiplier = (HZ / tp->timer_offset) *
11147 			     TG3_FW_UPDATE_FREQ_SEC;
11148 
11149 	timer_setup(&tp->timer, tg3_timer, 0);
11150 }
11151 
11152 static void tg3_timer_start(struct tg3 *tp)
11153 {
11154 	tp->asf_counter   = tp->asf_multiplier;
11155 	tp->timer_counter = tp->timer_multiplier;
11156 
11157 	tp->timer.expires = jiffies + tp->timer_offset;
11158 	add_timer(&tp->timer);
11159 }
11160 
11161 static void tg3_timer_stop(struct tg3 *tp)
11162 {
11163 	del_timer_sync(&tp->timer);
11164 }
11165 
11166 /* Restart hardware after configuration changes, self-test, etc.
11167  * Invoked with tp->lock held.
11168  */
11169 static int tg3_restart_hw(struct tg3 *tp, bool reset_phy)
11170 	__releases(tp->lock)
11171 	__acquires(tp->lock)
11172 {
11173 	int err;
11174 
11175 	err = tg3_init_hw(tp, reset_phy);
11176 	if (err) {
11177 		netdev_err(tp->dev,
11178 			   "Failed to re-initialize device, aborting\n");
11179 		tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11180 		tg3_full_unlock(tp);
11181 		tg3_timer_stop(tp);
11182 		tp->irq_sync = 0;
11183 		tg3_napi_enable(tp);
11184 		dev_close(tp->dev);
11185 		tg3_full_lock(tp, 0);
11186 	}
11187 	return err;
11188 }
11189 
11190 static void tg3_reset_task(struct work_struct *work)
11191 {
11192 	struct tg3 *tp = container_of(work, struct tg3, reset_task);
11193 	int err;
11194 
11195 	rtnl_lock();
11196 	tg3_full_lock(tp, 0);
11197 
11198 	if (!netif_running(tp->dev)) {
11199 		tg3_flag_clear(tp, RESET_TASK_PENDING);
11200 		tg3_full_unlock(tp);
11201 		rtnl_unlock();
11202 		return;
11203 	}
11204 
11205 	tg3_full_unlock(tp);
11206 
11207 	tg3_phy_stop(tp);
11208 
11209 	tg3_netif_stop(tp);
11210 
11211 	tg3_full_lock(tp, 1);
11212 
11213 	if (tg3_flag(tp, TX_RECOVERY_PENDING)) {
11214 		tp->write32_tx_mbox = tg3_write32_tx_mbox;
11215 		tp->write32_rx_mbox = tg3_write_flush_reg32;
11216 		tg3_flag_set(tp, MBOX_WRITE_REORDER);
11217 		tg3_flag_clear(tp, TX_RECOVERY_PENDING);
11218 	}
11219 
11220 	tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
11221 	err = tg3_init_hw(tp, true);
11222 	if (err)
11223 		goto out;
11224 
11225 	tg3_netif_start(tp);
11226 
11227 out:
11228 	tg3_full_unlock(tp);
11229 
11230 	if (!err)
11231 		tg3_phy_start(tp);
11232 
11233 	tg3_flag_clear(tp, RESET_TASK_PENDING);
11234 	rtnl_unlock();
11235 }
11236 
11237 static int tg3_request_irq(struct tg3 *tp, int irq_num)
11238 {
11239 	irq_handler_t fn;
11240 	unsigned long flags;
11241 	char *name;
11242 	struct tg3_napi *tnapi = &tp->napi[irq_num];
11243 
11244 	if (tp->irq_cnt == 1)
11245 		name = tp->dev->name;
11246 	else {
11247 		name = &tnapi->irq_lbl[0];
11248 		if (tnapi->tx_buffers && tnapi->rx_rcb)
11249 			snprintf(name, IFNAMSIZ,
11250 				 "%s-txrx-%d", tp->dev->name, irq_num);
11251 		else if (tnapi->tx_buffers)
11252 			snprintf(name, IFNAMSIZ,
11253 				 "%s-tx-%d", tp->dev->name, irq_num);
11254 		else if (tnapi->rx_rcb)
11255 			snprintf(name, IFNAMSIZ,
11256 				 "%s-rx-%d", tp->dev->name, irq_num);
11257 		else
11258 			snprintf(name, IFNAMSIZ,
11259 				 "%s-%d", tp->dev->name, irq_num);
11260 		name[IFNAMSIZ-1] = 0;
11261 	}
11262 
11263 	if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
11264 		fn = tg3_msi;
11265 		if (tg3_flag(tp, 1SHOT_MSI))
11266 			fn = tg3_msi_1shot;
11267 		flags = 0;
11268 	} else {
11269 		fn = tg3_interrupt;
11270 		if (tg3_flag(tp, TAGGED_STATUS))
11271 			fn = tg3_interrupt_tagged;
11272 		flags = IRQF_SHARED;
11273 	}
11274 
11275 	return request_irq(tnapi->irq_vec, fn, flags, name, tnapi);
11276 }
11277 
11278 static int tg3_test_interrupt(struct tg3 *tp)
11279 {
11280 	struct tg3_napi *tnapi = &tp->napi[0];
11281 	struct net_device *dev = tp->dev;
11282 	int err, i, intr_ok = 0;
11283 	u32 val;
11284 
11285 	if (!netif_running(dev))
11286 		return -ENODEV;
11287 
11288 	tg3_disable_ints(tp);
11289 
11290 	free_irq(tnapi->irq_vec, tnapi);
11291 
11292 	/*
11293 	 * Turn off MSI one shot mode.  Otherwise this test has no
11294 	 * observable way to know whether the interrupt was delivered.
11295 	 */
11296 	if (tg3_flag(tp, 57765_PLUS)) {
11297 		val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE;
11298 		tw32(MSGINT_MODE, val);
11299 	}
11300 
11301 	err = request_irq(tnapi->irq_vec, tg3_test_isr,
11302 			  IRQF_SHARED, dev->name, tnapi);
11303 	if (err)
11304 		return err;
11305 
11306 	tnapi->hw_status->status &= ~SD_STATUS_UPDATED;
11307 	tg3_enable_ints(tp);
11308 
11309 	tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
11310 	       tnapi->coal_now);
11311 
11312 	for (i = 0; i < 5; i++) {
11313 		u32 int_mbox, misc_host_ctrl;
11314 
11315 		int_mbox = tr32_mailbox(tnapi->int_mbox);
11316 		misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
11317 
11318 		if ((int_mbox != 0) ||
11319 		    (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
11320 			intr_ok = 1;
11321 			break;
11322 		}
11323 
11324 		if (tg3_flag(tp, 57765_PLUS) &&
11325 		    tnapi->hw_status->status_tag != tnapi->last_tag)
11326 			tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
11327 
11328 		msleep(10);
11329 	}
11330 
11331 	tg3_disable_ints(tp);
11332 
11333 	free_irq(tnapi->irq_vec, tnapi);
11334 
11335 	err = tg3_request_irq(tp, 0);
11336 
11337 	if (err)
11338 		return err;
11339 
11340 	if (intr_ok) {
11341 		/* Reenable MSI one shot mode. */
11342 		if (tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, 1SHOT_MSI)) {
11343 			val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE;
11344 			tw32(MSGINT_MODE, val);
11345 		}
11346 		return 0;
11347 	}
11348 
11349 	return -EIO;
11350 }
11351 
11352 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
11353  * successfully restored
11354  */
11355 static int tg3_test_msi(struct tg3 *tp)
11356 {
11357 	int err;
11358 	u16 pci_cmd;
11359 
11360 	if (!tg3_flag(tp, USING_MSI))
11361 		return 0;
11362 
11363 	/* Turn off SERR reporting in case MSI terminates with Master
11364 	 * Abort.
11365 	 */
11366 	pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
11367 	pci_write_config_word(tp->pdev, PCI_COMMAND,
11368 			      pci_cmd & ~PCI_COMMAND_SERR);
11369 
11370 	err = tg3_test_interrupt(tp);
11371 
11372 	pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
11373 
11374 	if (!err)
11375 		return 0;
11376 
11377 	/* other failures */
11378 	if (err != -EIO)
11379 		return err;
11380 
11381 	/* MSI test failed, go back to INTx mode */
11382 	netdev_warn(tp->dev, "No interrupt was generated using MSI. Switching "
11383 		    "to INTx mode. Please report this failure to the PCI "
11384 		    "maintainer and include system chipset information\n");
11385 
11386 	free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
11387 
11388 	pci_disable_msi(tp->pdev);
11389 
11390 	tg3_flag_clear(tp, USING_MSI);
11391 	tp->napi[0].irq_vec = tp->pdev->irq;
11392 
11393 	err = tg3_request_irq(tp, 0);
11394 	if (err)
11395 		return err;
11396 
11397 	/* Need to reset the chip because the MSI cycle may have terminated
11398 	 * with Master Abort.
11399 	 */
11400 	tg3_full_lock(tp, 1);
11401 
11402 	tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11403 	err = tg3_init_hw(tp, true);
11404 
11405 	tg3_full_unlock(tp);
11406 
11407 	if (err)
11408 		free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
11409 
11410 	return err;
11411 }
11412 
11413 static int tg3_request_firmware(struct tg3 *tp)
11414 {
11415 	const struct tg3_firmware_hdr *fw_hdr;
11416 
11417 	if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) {
11418 		netdev_err(tp->dev, "Failed to load firmware \"%s\"\n",
11419 			   tp->fw_needed);
11420 		return -ENOENT;
11421 	}
11422 
11423 	fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
11424 
11425 	/* Firmware blob starts with version numbers, followed by
11426 	 * start address and _full_ length including BSS sections
11427 	 * (which must be longer than the actual data, of course
11428 	 */
11429 
11430 	tp->fw_len = be32_to_cpu(fw_hdr->len);	/* includes bss */
11431 	if (tp->fw_len < (tp->fw->size - TG3_FW_HDR_LEN)) {
11432 		netdev_err(tp->dev, "bogus length %d in \"%s\"\n",
11433 			   tp->fw_len, tp->fw_needed);
11434 		release_firmware(tp->fw);
11435 		tp->fw = NULL;
11436 		return -EINVAL;
11437 	}
11438 
11439 	/* We no longer need firmware; we have it. */
11440 	tp->fw_needed = NULL;
11441 	return 0;
11442 }
11443 
11444 static u32 tg3_irq_count(struct tg3 *tp)
11445 {
11446 	u32 irq_cnt = max(tp->rxq_cnt, tp->txq_cnt);
11447 
11448 	if (irq_cnt > 1) {
11449 		/* We want as many rx rings enabled as there are cpus.
11450 		 * In multiqueue MSI-X mode, the first MSI-X vector
11451 		 * only deals with link interrupts, etc, so we add
11452 		 * one to the number of vectors we are requesting.
11453 		 */
11454 		irq_cnt = min_t(unsigned, irq_cnt + 1, tp->irq_max);
11455 	}
11456 
11457 	return irq_cnt;
11458 }
11459 
11460 static bool tg3_enable_msix(struct tg3 *tp)
11461 {
11462 	int i, rc;
11463 	struct msix_entry msix_ent[TG3_IRQ_MAX_VECS];
11464 
11465 	tp->txq_cnt = tp->txq_req;
11466 	tp->rxq_cnt = tp->rxq_req;
11467 	if (!tp->rxq_cnt)
11468 		tp->rxq_cnt = netif_get_num_default_rss_queues();
11469 	if (tp->rxq_cnt > tp->rxq_max)
11470 		tp->rxq_cnt = tp->rxq_max;
11471 
11472 	/* Disable multiple TX rings by default.  Simple round-robin hardware
11473 	 * scheduling of the TX rings can cause starvation of rings with
11474 	 * small packets when other rings have TSO or jumbo packets.
11475 	 */
11476 	if (!tp->txq_req)
11477 		tp->txq_cnt = 1;
11478 
11479 	tp->irq_cnt = tg3_irq_count(tp);
11480 
11481 	for (i = 0; i < tp->irq_max; i++) {
11482 		msix_ent[i].entry  = i;
11483 		msix_ent[i].vector = 0;
11484 	}
11485 
11486 	rc = pci_enable_msix_range(tp->pdev, msix_ent, 1, tp->irq_cnt);
11487 	if (rc < 0) {
11488 		return false;
11489 	} else if (rc < tp->irq_cnt) {
11490 		netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n",
11491 			      tp->irq_cnt, rc);
11492 		tp->irq_cnt = rc;
11493 		tp->rxq_cnt = max(rc - 1, 1);
11494 		if (tp->txq_cnt)
11495 			tp->txq_cnt = min(tp->rxq_cnt, tp->txq_max);
11496 	}
11497 
11498 	for (i = 0; i < tp->irq_max; i++)
11499 		tp->napi[i].irq_vec = msix_ent[i].vector;
11500 
11501 	if (netif_set_real_num_rx_queues(tp->dev, tp->rxq_cnt)) {
11502 		pci_disable_msix(tp->pdev);
11503 		return false;
11504 	}
11505 
11506 	if (tp->irq_cnt == 1)
11507 		return true;
11508 
11509 	tg3_flag_set(tp, ENABLE_RSS);
11510 
11511 	if (tp->txq_cnt > 1)
11512 		tg3_flag_set(tp, ENABLE_TSS);
11513 
11514 	netif_set_real_num_tx_queues(tp->dev, tp->txq_cnt);
11515 
11516 	return true;
11517 }
11518 
11519 static void tg3_ints_init(struct tg3 *tp)
11520 {
11521 	if ((tg3_flag(tp, SUPPORT_MSI) || tg3_flag(tp, SUPPORT_MSIX)) &&
11522 	    !tg3_flag(tp, TAGGED_STATUS)) {
11523 		/* All MSI supporting chips should support tagged
11524 		 * status.  Assert that this is the case.
11525 		 */
11526 		netdev_warn(tp->dev,
11527 			    "MSI without TAGGED_STATUS? Not using MSI\n");
11528 		goto defcfg;
11529 	}
11530 
11531 	if (tg3_flag(tp, SUPPORT_MSIX) && tg3_enable_msix(tp))
11532 		tg3_flag_set(tp, USING_MSIX);
11533 	else if (tg3_flag(tp, SUPPORT_MSI) && pci_enable_msi(tp->pdev) == 0)
11534 		tg3_flag_set(tp, USING_MSI);
11535 
11536 	if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
11537 		u32 msi_mode = tr32(MSGINT_MODE);
11538 		if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1)
11539 			msi_mode |= MSGINT_MODE_MULTIVEC_EN;
11540 		if (!tg3_flag(tp, 1SHOT_MSI))
11541 			msi_mode |= MSGINT_MODE_ONE_SHOT_DISABLE;
11542 		tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
11543 	}
11544 defcfg:
11545 	if (!tg3_flag(tp, USING_MSIX)) {
11546 		tp->irq_cnt = 1;
11547 		tp->napi[0].irq_vec = tp->pdev->irq;
11548 	}
11549 
11550 	if (tp->irq_cnt == 1) {
11551 		tp->txq_cnt = 1;
11552 		tp->rxq_cnt = 1;
11553 		netif_set_real_num_tx_queues(tp->dev, 1);
11554 		netif_set_real_num_rx_queues(tp->dev, 1);
11555 	}
11556 }
11557 
11558 static void tg3_ints_fini(struct tg3 *tp)
11559 {
11560 	if (tg3_flag(tp, USING_MSIX))
11561 		pci_disable_msix(tp->pdev);
11562 	else if (tg3_flag(tp, USING_MSI))
11563 		pci_disable_msi(tp->pdev);
11564 	tg3_flag_clear(tp, USING_MSI);
11565 	tg3_flag_clear(tp, USING_MSIX);
11566 	tg3_flag_clear(tp, ENABLE_RSS);
11567 	tg3_flag_clear(tp, ENABLE_TSS);
11568 }
11569 
11570 static int tg3_start(struct tg3 *tp, bool reset_phy, bool test_irq,
11571 		     bool init)
11572 {
11573 	struct net_device *dev = tp->dev;
11574 	int i, err;
11575 
11576 	/*
11577 	 * Setup interrupts first so we know how
11578 	 * many NAPI resources to allocate
11579 	 */
11580 	tg3_ints_init(tp);
11581 
11582 	tg3_rss_check_indir_tbl(tp);
11583 
11584 	/* The placement of this call is tied
11585 	 * to the setup and use of Host TX descriptors.
11586 	 */
11587 	err = tg3_alloc_consistent(tp);
11588 	if (err)
11589 		goto out_ints_fini;
11590 
11591 	tg3_napi_init(tp);
11592 
11593 	tg3_napi_enable(tp);
11594 
11595 	for (i = 0; i < tp->irq_cnt; i++) {
11596 		err = tg3_request_irq(tp, i);
11597 		if (err) {
11598 			for (i--; i >= 0; i--) {
11599 				struct tg3_napi *tnapi = &tp->napi[i];
11600 
11601 				free_irq(tnapi->irq_vec, tnapi);
11602 			}
11603 			goto out_napi_fini;
11604 		}
11605 	}
11606 
11607 	tg3_full_lock(tp, 0);
11608 
11609 	if (init)
11610 		tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
11611 
11612 	err = tg3_init_hw(tp, reset_phy);
11613 	if (err) {
11614 		tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11615 		tg3_free_rings(tp);
11616 	}
11617 
11618 	tg3_full_unlock(tp);
11619 
11620 	if (err)
11621 		goto out_free_irq;
11622 
11623 	if (test_irq && tg3_flag(tp, USING_MSI)) {
11624 		err = tg3_test_msi(tp);
11625 
11626 		if (err) {
11627 			tg3_full_lock(tp, 0);
11628 			tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11629 			tg3_free_rings(tp);
11630 			tg3_full_unlock(tp);
11631 
11632 			goto out_napi_fini;
11633 		}
11634 
11635 		if (!tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
11636 			u32 val = tr32(PCIE_TRANSACTION_CFG);
11637 
11638 			tw32(PCIE_TRANSACTION_CFG,
11639 			     val | PCIE_TRANS_CFG_1SHOT_MSI);
11640 		}
11641 	}
11642 
11643 	tg3_phy_start(tp);
11644 
11645 	tg3_hwmon_open(tp);
11646 
11647 	tg3_full_lock(tp, 0);
11648 
11649 	tg3_timer_start(tp);
11650 	tg3_flag_set(tp, INIT_COMPLETE);
11651 	tg3_enable_ints(tp);
11652 
11653 	tg3_ptp_resume(tp);
11654 
11655 	tg3_full_unlock(tp);
11656 
11657 	netif_tx_start_all_queues(dev);
11658 
11659 	/*
11660 	 * Reset loopback feature if it was turned on while the device was down
11661 	 * make sure that it's installed properly now.
11662 	 */
11663 	if (dev->features & NETIF_F_LOOPBACK)
11664 		tg3_set_loopback(dev, dev->features);
11665 
11666 	return 0;
11667 
11668 out_free_irq:
11669 	for (i = tp->irq_cnt - 1; i >= 0; i--) {
11670 		struct tg3_napi *tnapi = &tp->napi[i];
11671 		free_irq(tnapi->irq_vec, tnapi);
11672 	}
11673 
11674 out_napi_fini:
11675 	tg3_napi_disable(tp);
11676 	tg3_napi_fini(tp);
11677 	tg3_free_consistent(tp);
11678 
11679 out_ints_fini:
11680 	tg3_ints_fini(tp);
11681 
11682 	return err;
11683 }
11684 
11685 static void tg3_stop(struct tg3 *tp)
11686 {
11687 	int i;
11688 
11689 	tg3_reset_task_cancel(tp);
11690 	tg3_netif_stop(tp);
11691 
11692 	tg3_timer_stop(tp);
11693 
11694 	tg3_hwmon_close(tp);
11695 
11696 	tg3_phy_stop(tp);
11697 
11698 	tg3_full_lock(tp, 1);
11699 
11700 	tg3_disable_ints(tp);
11701 
11702 	tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11703 	tg3_free_rings(tp);
11704 	tg3_flag_clear(tp, INIT_COMPLETE);
11705 
11706 	tg3_full_unlock(tp);
11707 
11708 	for (i = tp->irq_cnt - 1; i >= 0; i--) {
11709 		struct tg3_napi *tnapi = &tp->napi[i];
11710 		free_irq(tnapi->irq_vec, tnapi);
11711 	}
11712 
11713 	tg3_ints_fini(tp);
11714 
11715 	tg3_napi_fini(tp);
11716 
11717 	tg3_free_consistent(tp);
11718 }
11719 
11720 static int tg3_open(struct net_device *dev)
11721 {
11722 	struct tg3 *tp = netdev_priv(dev);
11723 	int err;
11724 
11725 	if (tp->pcierr_recovery) {
11726 		netdev_err(dev, "Failed to open device. PCI error recovery "
11727 			   "in progress\n");
11728 		return -EAGAIN;
11729 	}
11730 
11731 	if (tp->fw_needed) {
11732 		err = tg3_request_firmware(tp);
11733 		if (tg3_asic_rev(tp) == ASIC_REV_57766) {
11734 			if (err) {
11735 				netdev_warn(tp->dev, "EEE capability disabled\n");
11736 				tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
11737 			} else if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
11738 				netdev_warn(tp->dev, "EEE capability restored\n");
11739 				tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
11740 			}
11741 		} else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) {
11742 			if (err)
11743 				return err;
11744 		} else if (err) {
11745 			netdev_warn(tp->dev, "TSO capability disabled\n");
11746 			tg3_flag_clear(tp, TSO_CAPABLE);
11747 		} else if (!tg3_flag(tp, TSO_CAPABLE)) {
11748 			netdev_notice(tp->dev, "TSO capability restored\n");
11749 			tg3_flag_set(tp, TSO_CAPABLE);
11750 		}
11751 	}
11752 
11753 	tg3_carrier_off(tp);
11754 
11755 	err = tg3_power_up(tp);
11756 	if (err)
11757 		return err;
11758 
11759 	tg3_full_lock(tp, 0);
11760 
11761 	tg3_disable_ints(tp);
11762 	tg3_flag_clear(tp, INIT_COMPLETE);
11763 
11764 	tg3_full_unlock(tp);
11765 
11766 	err = tg3_start(tp,
11767 			!(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN),
11768 			true, true);
11769 	if (err) {
11770 		tg3_frob_aux_power(tp, false);
11771 		pci_set_power_state(tp->pdev, PCI_D3hot);
11772 	}
11773 
11774 	return err;
11775 }
11776 
11777 static int tg3_close(struct net_device *dev)
11778 {
11779 	struct tg3 *tp = netdev_priv(dev);
11780 
11781 	if (tp->pcierr_recovery) {
11782 		netdev_err(dev, "Failed to close device. PCI error recovery "
11783 			   "in progress\n");
11784 		return -EAGAIN;
11785 	}
11786 
11787 	tg3_stop(tp);
11788 
11789 	if (pci_device_is_present(tp->pdev)) {
11790 		tg3_power_down_prepare(tp);
11791 
11792 		tg3_carrier_off(tp);
11793 	}
11794 	return 0;
11795 }
11796 
11797 static inline u64 get_stat64(tg3_stat64_t *val)
11798 {
11799        return ((u64)val->high << 32) | ((u64)val->low);
11800 }
11801 
11802 static u64 tg3_calc_crc_errors(struct tg3 *tp)
11803 {
11804 	struct tg3_hw_stats *hw_stats = tp->hw_stats;
11805 
11806 	if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
11807 	    (tg3_asic_rev(tp) == ASIC_REV_5700 ||
11808 	     tg3_asic_rev(tp) == ASIC_REV_5701)) {
11809 		u32 val;
11810 
11811 		if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
11812 			tg3_writephy(tp, MII_TG3_TEST1,
11813 				     val | MII_TG3_TEST1_CRC_EN);
11814 			tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &val);
11815 		} else
11816 			val = 0;
11817 
11818 		tp->phy_crc_errors += val;
11819 
11820 		return tp->phy_crc_errors;
11821 	}
11822 
11823 	return get_stat64(&hw_stats->rx_fcs_errors);
11824 }
11825 
11826 #define ESTAT_ADD(member) \
11827 	estats->member =	old_estats->member + \
11828 				get_stat64(&hw_stats->member)
11829 
11830 static void tg3_get_estats(struct tg3 *tp, struct tg3_ethtool_stats *estats)
11831 {
11832 	struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
11833 	struct tg3_hw_stats *hw_stats = tp->hw_stats;
11834 
11835 	ESTAT_ADD(rx_octets);
11836 	ESTAT_ADD(rx_fragments);
11837 	ESTAT_ADD(rx_ucast_packets);
11838 	ESTAT_ADD(rx_mcast_packets);
11839 	ESTAT_ADD(rx_bcast_packets);
11840 	ESTAT_ADD(rx_fcs_errors);
11841 	ESTAT_ADD(rx_align_errors);
11842 	ESTAT_ADD(rx_xon_pause_rcvd);
11843 	ESTAT_ADD(rx_xoff_pause_rcvd);
11844 	ESTAT_ADD(rx_mac_ctrl_rcvd);
11845 	ESTAT_ADD(rx_xoff_entered);
11846 	ESTAT_ADD(rx_frame_too_long_errors);
11847 	ESTAT_ADD(rx_jabbers);
11848 	ESTAT_ADD(rx_undersize_packets);
11849 	ESTAT_ADD(rx_in_length_errors);
11850 	ESTAT_ADD(rx_out_length_errors);
11851 	ESTAT_ADD(rx_64_or_less_octet_packets);
11852 	ESTAT_ADD(rx_65_to_127_octet_packets);
11853 	ESTAT_ADD(rx_128_to_255_octet_packets);
11854 	ESTAT_ADD(rx_256_to_511_octet_packets);
11855 	ESTAT_ADD(rx_512_to_1023_octet_packets);
11856 	ESTAT_ADD(rx_1024_to_1522_octet_packets);
11857 	ESTAT_ADD(rx_1523_to_2047_octet_packets);
11858 	ESTAT_ADD(rx_2048_to_4095_octet_packets);
11859 	ESTAT_ADD(rx_4096_to_8191_octet_packets);
11860 	ESTAT_ADD(rx_8192_to_9022_octet_packets);
11861 
11862 	ESTAT_ADD(tx_octets);
11863 	ESTAT_ADD(tx_collisions);
11864 	ESTAT_ADD(tx_xon_sent);
11865 	ESTAT_ADD(tx_xoff_sent);
11866 	ESTAT_ADD(tx_flow_control);
11867 	ESTAT_ADD(tx_mac_errors);
11868 	ESTAT_ADD(tx_single_collisions);
11869 	ESTAT_ADD(tx_mult_collisions);
11870 	ESTAT_ADD(tx_deferred);
11871 	ESTAT_ADD(tx_excessive_collisions);
11872 	ESTAT_ADD(tx_late_collisions);
11873 	ESTAT_ADD(tx_collide_2times);
11874 	ESTAT_ADD(tx_collide_3times);
11875 	ESTAT_ADD(tx_collide_4times);
11876 	ESTAT_ADD(tx_collide_5times);
11877 	ESTAT_ADD(tx_collide_6times);
11878 	ESTAT_ADD(tx_collide_7times);
11879 	ESTAT_ADD(tx_collide_8times);
11880 	ESTAT_ADD(tx_collide_9times);
11881 	ESTAT_ADD(tx_collide_10times);
11882 	ESTAT_ADD(tx_collide_11times);
11883 	ESTAT_ADD(tx_collide_12times);
11884 	ESTAT_ADD(tx_collide_13times);
11885 	ESTAT_ADD(tx_collide_14times);
11886 	ESTAT_ADD(tx_collide_15times);
11887 	ESTAT_ADD(tx_ucast_packets);
11888 	ESTAT_ADD(tx_mcast_packets);
11889 	ESTAT_ADD(tx_bcast_packets);
11890 	ESTAT_ADD(tx_carrier_sense_errors);
11891 	ESTAT_ADD(tx_discards);
11892 	ESTAT_ADD(tx_errors);
11893 
11894 	ESTAT_ADD(dma_writeq_full);
11895 	ESTAT_ADD(dma_write_prioq_full);
11896 	ESTAT_ADD(rxbds_empty);
11897 	ESTAT_ADD(rx_discards);
11898 	ESTAT_ADD(rx_errors);
11899 	ESTAT_ADD(rx_threshold_hit);
11900 
11901 	ESTAT_ADD(dma_readq_full);
11902 	ESTAT_ADD(dma_read_prioq_full);
11903 	ESTAT_ADD(tx_comp_queue_full);
11904 
11905 	ESTAT_ADD(ring_set_send_prod_index);
11906 	ESTAT_ADD(ring_status_update);
11907 	ESTAT_ADD(nic_irqs);
11908 	ESTAT_ADD(nic_avoided_irqs);
11909 	ESTAT_ADD(nic_tx_threshold_hit);
11910 
11911 	ESTAT_ADD(mbuf_lwm_thresh_hit);
11912 }
11913 
11914 static void tg3_get_nstats(struct tg3 *tp, struct rtnl_link_stats64 *stats)
11915 {
11916 	struct rtnl_link_stats64 *old_stats = &tp->net_stats_prev;
11917 	struct tg3_hw_stats *hw_stats = tp->hw_stats;
11918 
11919 	stats->rx_packets = old_stats->rx_packets +
11920 		get_stat64(&hw_stats->rx_ucast_packets) +
11921 		get_stat64(&hw_stats->rx_mcast_packets) +
11922 		get_stat64(&hw_stats->rx_bcast_packets);
11923 
11924 	stats->tx_packets = old_stats->tx_packets +
11925 		get_stat64(&hw_stats->tx_ucast_packets) +
11926 		get_stat64(&hw_stats->tx_mcast_packets) +
11927 		get_stat64(&hw_stats->tx_bcast_packets);
11928 
11929 	stats->rx_bytes = old_stats->rx_bytes +
11930 		get_stat64(&hw_stats->rx_octets);
11931 	stats->tx_bytes = old_stats->tx_bytes +
11932 		get_stat64(&hw_stats->tx_octets);
11933 
11934 	stats->rx_errors = old_stats->rx_errors +
11935 		get_stat64(&hw_stats->rx_errors);
11936 	stats->tx_errors = old_stats->tx_errors +
11937 		get_stat64(&hw_stats->tx_errors) +
11938 		get_stat64(&hw_stats->tx_mac_errors) +
11939 		get_stat64(&hw_stats->tx_carrier_sense_errors) +
11940 		get_stat64(&hw_stats->tx_discards);
11941 
11942 	stats->multicast = old_stats->multicast +
11943 		get_stat64(&hw_stats->rx_mcast_packets);
11944 	stats->collisions = old_stats->collisions +
11945 		get_stat64(&hw_stats->tx_collisions);
11946 
11947 	stats->rx_length_errors = old_stats->rx_length_errors +
11948 		get_stat64(&hw_stats->rx_frame_too_long_errors) +
11949 		get_stat64(&hw_stats->rx_undersize_packets);
11950 
11951 	stats->rx_frame_errors = old_stats->rx_frame_errors +
11952 		get_stat64(&hw_stats->rx_align_errors);
11953 	stats->tx_aborted_errors = old_stats->tx_aborted_errors +
11954 		get_stat64(&hw_stats->tx_discards);
11955 	stats->tx_carrier_errors = old_stats->tx_carrier_errors +
11956 		get_stat64(&hw_stats->tx_carrier_sense_errors);
11957 
11958 	stats->rx_crc_errors = old_stats->rx_crc_errors +
11959 		tg3_calc_crc_errors(tp);
11960 
11961 	stats->rx_missed_errors = old_stats->rx_missed_errors +
11962 		get_stat64(&hw_stats->rx_discards);
11963 
11964 	stats->rx_dropped = tp->rx_dropped;
11965 	stats->tx_dropped = tp->tx_dropped;
11966 }
11967 
11968 static int tg3_get_regs_len(struct net_device *dev)
11969 {
11970 	return TG3_REG_BLK_SIZE;
11971 }
11972 
11973 static void tg3_get_regs(struct net_device *dev,
11974 		struct ethtool_regs *regs, void *_p)
11975 {
11976 	struct tg3 *tp = netdev_priv(dev);
11977 
11978 	regs->version = 0;
11979 
11980 	memset(_p, 0, TG3_REG_BLK_SIZE);
11981 
11982 	if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11983 		return;
11984 
11985 	tg3_full_lock(tp, 0);
11986 
11987 	tg3_dump_legacy_regs(tp, (u32 *)_p);
11988 
11989 	tg3_full_unlock(tp);
11990 }
11991 
11992 static int tg3_get_eeprom_len(struct net_device *dev)
11993 {
11994 	struct tg3 *tp = netdev_priv(dev);
11995 
11996 	return tp->nvram_size;
11997 }
11998 
11999 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
12000 {
12001 	struct tg3 *tp = netdev_priv(dev);
12002 	int ret, cpmu_restore = 0;
12003 	u8  *pd;
12004 	u32 i, offset, len, b_offset, b_count, cpmu_val = 0;
12005 	__be32 val;
12006 
12007 	if (tg3_flag(tp, NO_NVRAM))
12008 		return -EINVAL;
12009 
12010 	offset = eeprom->offset;
12011 	len = eeprom->len;
12012 	eeprom->len = 0;
12013 
12014 	eeprom->magic = TG3_EEPROM_MAGIC;
12015 
12016 	/* Override clock, link aware and link idle modes */
12017 	if (tg3_flag(tp, CPMU_PRESENT)) {
12018 		cpmu_val = tr32(TG3_CPMU_CTRL);
12019 		if (cpmu_val & (CPMU_CTRL_LINK_AWARE_MODE |
12020 				CPMU_CTRL_LINK_IDLE_MODE)) {
12021 			tw32(TG3_CPMU_CTRL, cpmu_val &
12022 					    ~(CPMU_CTRL_LINK_AWARE_MODE |
12023 					     CPMU_CTRL_LINK_IDLE_MODE));
12024 			cpmu_restore = 1;
12025 		}
12026 	}
12027 	tg3_override_clk(tp);
12028 
12029 	if (offset & 3) {
12030 		/* adjustments to start on required 4 byte boundary */
12031 		b_offset = offset & 3;
12032 		b_count = 4 - b_offset;
12033 		if (b_count > len) {
12034 			/* i.e. offset=1 len=2 */
12035 			b_count = len;
12036 		}
12037 		ret = tg3_nvram_read_be32(tp, offset-b_offset, &val);
12038 		if (ret)
12039 			goto eeprom_done;
12040 		memcpy(data, ((char *)&val) + b_offset, b_count);
12041 		len -= b_count;
12042 		offset += b_count;
12043 		eeprom->len += b_count;
12044 	}
12045 
12046 	/* read bytes up to the last 4 byte boundary */
12047 	pd = &data[eeprom->len];
12048 	for (i = 0; i < (len - (len & 3)); i += 4) {
12049 		ret = tg3_nvram_read_be32(tp, offset + i, &val);
12050 		if (ret) {
12051 			if (i)
12052 				i -= 4;
12053 			eeprom->len += i;
12054 			goto eeprom_done;
12055 		}
12056 		memcpy(pd + i, &val, 4);
12057 		if (need_resched()) {
12058 			if (signal_pending(current)) {
12059 				eeprom->len += i;
12060 				ret = -EINTR;
12061 				goto eeprom_done;
12062 			}
12063 			cond_resched();
12064 		}
12065 	}
12066 	eeprom->len += i;
12067 
12068 	if (len & 3) {
12069 		/* read last bytes not ending on 4 byte boundary */
12070 		pd = &data[eeprom->len];
12071 		b_count = len & 3;
12072 		b_offset = offset + len - b_count;
12073 		ret = tg3_nvram_read_be32(tp, b_offset, &val);
12074 		if (ret)
12075 			goto eeprom_done;
12076 		memcpy(pd, &val, b_count);
12077 		eeprom->len += b_count;
12078 	}
12079 	ret = 0;
12080 
12081 eeprom_done:
12082 	/* Restore clock, link aware and link idle modes */
12083 	tg3_restore_clk(tp);
12084 	if (cpmu_restore)
12085 		tw32(TG3_CPMU_CTRL, cpmu_val);
12086 
12087 	return ret;
12088 }
12089 
12090 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
12091 {
12092 	struct tg3 *tp = netdev_priv(dev);
12093 	int ret;
12094 	u32 offset, len, b_offset, odd_len;
12095 	u8 *buf;
12096 	__be32 start = 0, end;
12097 
12098 	if (tg3_flag(tp, NO_NVRAM) ||
12099 	    eeprom->magic != TG3_EEPROM_MAGIC)
12100 		return -EINVAL;
12101 
12102 	offset = eeprom->offset;
12103 	len = eeprom->len;
12104 
12105 	if ((b_offset = (offset & 3))) {
12106 		/* adjustments to start on required 4 byte boundary */
12107 		ret = tg3_nvram_read_be32(tp, offset-b_offset, &start);
12108 		if (ret)
12109 			return ret;
12110 		len += b_offset;
12111 		offset &= ~3;
12112 		if (len < 4)
12113 			len = 4;
12114 	}
12115 
12116 	odd_len = 0;
12117 	if (len & 3) {
12118 		/* adjustments to end on required 4 byte boundary */
12119 		odd_len = 1;
12120 		len = (len + 3) & ~3;
12121 		ret = tg3_nvram_read_be32(tp, offset+len-4, &end);
12122 		if (ret)
12123 			return ret;
12124 	}
12125 
12126 	buf = data;
12127 	if (b_offset || odd_len) {
12128 		buf = kmalloc(len, GFP_KERNEL);
12129 		if (!buf)
12130 			return -ENOMEM;
12131 		if (b_offset)
12132 			memcpy(buf, &start, 4);
12133 		if (odd_len)
12134 			memcpy(buf+len-4, &end, 4);
12135 		memcpy(buf + b_offset, data, eeprom->len);
12136 	}
12137 
12138 	ret = tg3_nvram_write_block(tp, offset, len, buf);
12139 
12140 	if (buf != data)
12141 		kfree(buf);
12142 
12143 	return ret;
12144 }
12145 
12146 static int tg3_get_link_ksettings(struct net_device *dev,
12147 				  struct ethtool_link_ksettings *cmd)
12148 {
12149 	struct tg3 *tp = netdev_priv(dev);
12150 	u32 supported, advertising;
12151 
12152 	if (tg3_flag(tp, USE_PHYLIB)) {
12153 		struct phy_device *phydev;
12154 		if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
12155 			return -EAGAIN;
12156 		phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
12157 		phy_ethtool_ksettings_get(phydev, cmd);
12158 
12159 		return 0;
12160 	}
12161 
12162 	supported = (SUPPORTED_Autoneg);
12163 
12164 	if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
12165 		supported |= (SUPPORTED_1000baseT_Half |
12166 			      SUPPORTED_1000baseT_Full);
12167 
12168 	if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
12169 		supported |= (SUPPORTED_100baseT_Half |
12170 			      SUPPORTED_100baseT_Full |
12171 			      SUPPORTED_10baseT_Half |
12172 			      SUPPORTED_10baseT_Full |
12173 			      SUPPORTED_TP);
12174 		cmd->base.port = PORT_TP;
12175 	} else {
12176 		supported |= SUPPORTED_FIBRE;
12177 		cmd->base.port = PORT_FIBRE;
12178 	}
12179 	ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
12180 						supported);
12181 
12182 	advertising = tp->link_config.advertising;
12183 	if (tg3_flag(tp, PAUSE_AUTONEG)) {
12184 		if (tp->link_config.flowctrl & FLOW_CTRL_RX) {
12185 			if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
12186 				advertising |= ADVERTISED_Pause;
12187 			} else {
12188 				advertising |= ADVERTISED_Pause |
12189 					ADVERTISED_Asym_Pause;
12190 			}
12191 		} else if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
12192 			advertising |= ADVERTISED_Asym_Pause;
12193 		}
12194 	}
12195 	ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
12196 						advertising);
12197 
12198 	if (netif_running(dev) && tp->link_up) {
12199 		cmd->base.speed = tp->link_config.active_speed;
12200 		cmd->base.duplex = tp->link_config.active_duplex;
12201 		ethtool_convert_legacy_u32_to_link_mode(
12202 			cmd->link_modes.lp_advertising,
12203 			tp->link_config.rmt_adv);
12204 
12205 		if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
12206 			if (tp->phy_flags & TG3_PHYFLG_MDIX_STATE)
12207 				cmd->base.eth_tp_mdix = ETH_TP_MDI_X;
12208 			else
12209 				cmd->base.eth_tp_mdix = ETH_TP_MDI;
12210 		}
12211 	} else {
12212 		cmd->base.speed = SPEED_UNKNOWN;
12213 		cmd->base.duplex = DUPLEX_UNKNOWN;
12214 		cmd->base.eth_tp_mdix = ETH_TP_MDI_INVALID;
12215 	}
12216 	cmd->base.phy_address = tp->phy_addr;
12217 	cmd->base.autoneg = tp->link_config.autoneg;
12218 	return 0;
12219 }
12220 
12221 static int tg3_set_link_ksettings(struct net_device *dev,
12222 				  const struct ethtool_link_ksettings *cmd)
12223 {
12224 	struct tg3 *tp = netdev_priv(dev);
12225 	u32 speed = cmd->base.speed;
12226 	u32 advertising;
12227 
12228 	if (tg3_flag(tp, USE_PHYLIB)) {
12229 		struct phy_device *phydev;
12230 		if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
12231 			return -EAGAIN;
12232 		phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
12233 		return phy_ethtool_ksettings_set(phydev, cmd);
12234 	}
12235 
12236 	if (cmd->base.autoneg != AUTONEG_ENABLE &&
12237 	    cmd->base.autoneg != AUTONEG_DISABLE)
12238 		return -EINVAL;
12239 
12240 	if (cmd->base.autoneg == AUTONEG_DISABLE &&
12241 	    cmd->base.duplex != DUPLEX_FULL &&
12242 	    cmd->base.duplex != DUPLEX_HALF)
12243 		return -EINVAL;
12244 
12245 	ethtool_convert_link_mode_to_legacy_u32(&advertising,
12246 						cmd->link_modes.advertising);
12247 
12248 	if (cmd->base.autoneg == AUTONEG_ENABLE) {
12249 		u32 mask = ADVERTISED_Autoneg |
12250 			   ADVERTISED_Pause |
12251 			   ADVERTISED_Asym_Pause;
12252 
12253 		if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
12254 			mask |= ADVERTISED_1000baseT_Half |
12255 				ADVERTISED_1000baseT_Full;
12256 
12257 		if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
12258 			mask |= ADVERTISED_100baseT_Half |
12259 				ADVERTISED_100baseT_Full |
12260 				ADVERTISED_10baseT_Half |
12261 				ADVERTISED_10baseT_Full |
12262 				ADVERTISED_TP;
12263 		else
12264 			mask |= ADVERTISED_FIBRE;
12265 
12266 		if (advertising & ~mask)
12267 			return -EINVAL;
12268 
12269 		mask &= (ADVERTISED_1000baseT_Half |
12270 			 ADVERTISED_1000baseT_Full |
12271 			 ADVERTISED_100baseT_Half |
12272 			 ADVERTISED_100baseT_Full |
12273 			 ADVERTISED_10baseT_Half |
12274 			 ADVERTISED_10baseT_Full);
12275 
12276 		advertising &= mask;
12277 	} else {
12278 		if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) {
12279 			if (speed != SPEED_1000)
12280 				return -EINVAL;
12281 
12282 			if (cmd->base.duplex != DUPLEX_FULL)
12283 				return -EINVAL;
12284 		} else {
12285 			if (speed != SPEED_100 &&
12286 			    speed != SPEED_10)
12287 				return -EINVAL;
12288 		}
12289 	}
12290 
12291 	tg3_full_lock(tp, 0);
12292 
12293 	tp->link_config.autoneg = cmd->base.autoneg;
12294 	if (cmd->base.autoneg == AUTONEG_ENABLE) {
12295 		tp->link_config.advertising = (advertising |
12296 					      ADVERTISED_Autoneg);
12297 		tp->link_config.speed = SPEED_UNKNOWN;
12298 		tp->link_config.duplex = DUPLEX_UNKNOWN;
12299 	} else {
12300 		tp->link_config.advertising = 0;
12301 		tp->link_config.speed = speed;
12302 		tp->link_config.duplex = cmd->base.duplex;
12303 	}
12304 
12305 	tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
12306 
12307 	tg3_warn_mgmt_link_flap(tp);
12308 
12309 	if (netif_running(dev))
12310 		tg3_setup_phy(tp, true);
12311 
12312 	tg3_full_unlock(tp);
12313 
12314 	return 0;
12315 }
12316 
12317 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
12318 {
12319 	struct tg3 *tp = netdev_priv(dev);
12320 
12321 	strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
12322 	strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
12323 	strlcpy(info->fw_version, tp->fw_ver, sizeof(info->fw_version));
12324 	strlcpy(info->bus_info, pci_name(tp->pdev), sizeof(info->bus_info));
12325 }
12326 
12327 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
12328 {
12329 	struct tg3 *tp = netdev_priv(dev);
12330 
12331 	if (tg3_flag(tp, WOL_CAP) && device_can_wakeup(&tp->pdev->dev))
12332 		wol->supported = WAKE_MAGIC;
12333 	else
12334 		wol->supported = 0;
12335 	wol->wolopts = 0;
12336 	if (tg3_flag(tp, WOL_ENABLE) && device_can_wakeup(&tp->pdev->dev))
12337 		wol->wolopts = WAKE_MAGIC;
12338 	memset(&wol->sopass, 0, sizeof(wol->sopass));
12339 }
12340 
12341 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
12342 {
12343 	struct tg3 *tp = netdev_priv(dev);
12344 	struct device *dp = &tp->pdev->dev;
12345 
12346 	if (wol->wolopts & ~WAKE_MAGIC)
12347 		return -EINVAL;
12348 	if ((wol->wolopts & WAKE_MAGIC) &&
12349 	    !(tg3_flag(tp, WOL_CAP) && device_can_wakeup(dp)))
12350 		return -EINVAL;
12351 
12352 	device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC);
12353 
12354 	if (device_may_wakeup(dp))
12355 		tg3_flag_set(tp, WOL_ENABLE);
12356 	else
12357 		tg3_flag_clear(tp, WOL_ENABLE);
12358 
12359 	return 0;
12360 }
12361 
12362 static u32 tg3_get_msglevel(struct net_device *dev)
12363 {
12364 	struct tg3 *tp = netdev_priv(dev);
12365 	return tp->msg_enable;
12366 }
12367 
12368 static void tg3_set_msglevel(struct net_device *dev, u32 value)
12369 {
12370 	struct tg3 *tp = netdev_priv(dev);
12371 	tp->msg_enable = value;
12372 }
12373 
12374 static int tg3_nway_reset(struct net_device *dev)
12375 {
12376 	struct tg3 *tp = netdev_priv(dev);
12377 	int r;
12378 
12379 	if (!netif_running(dev))
12380 		return -EAGAIN;
12381 
12382 	if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
12383 		return -EINVAL;
12384 
12385 	tg3_warn_mgmt_link_flap(tp);
12386 
12387 	if (tg3_flag(tp, USE_PHYLIB)) {
12388 		if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
12389 			return -EAGAIN;
12390 		r = phy_start_aneg(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr));
12391 	} else {
12392 		u32 bmcr;
12393 
12394 		spin_lock_bh(&tp->lock);
12395 		r = -EINVAL;
12396 		tg3_readphy(tp, MII_BMCR, &bmcr);
12397 		if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
12398 		    ((bmcr & BMCR_ANENABLE) ||
12399 		     (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT))) {
12400 			tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
12401 						   BMCR_ANENABLE);
12402 			r = 0;
12403 		}
12404 		spin_unlock_bh(&tp->lock);
12405 	}
12406 
12407 	return r;
12408 }
12409 
12410 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
12411 {
12412 	struct tg3 *tp = netdev_priv(dev);
12413 
12414 	ering->rx_max_pending = tp->rx_std_ring_mask;
12415 	if (tg3_flag(tp, JUMBO_RING_ENABLE))
12416 		ering->rx_jumbo_max_pending = tp->rx_jmb_ring_mask;
12417 	else
12418 		ering->rx_jumbo_max_pending = 0;
12419 
12420 	ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
12421 
12422 	ering->rx_pending = tp->rx_pending;
12423 	if (tg3_flag(tp, JUMBO_RING_ENABLE))
12424 		ering->rx_jumbo_pending = tp->rx_jumbo_pending;
12425 	else
12426 		ering->rx_jumbo_pending = 0;
12427 
12428 	ering->tx_pending = tp->napi[0].tx_pending;
12429 }
12430 
12431 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
12432 {
12433 	struct tg3 *tp = netdev_priv(dev);
12434 	int i, irq_sync = 0, err = 0;
12435 	bool reset_phy = false;
12436 
12437 	if ((ering->rx_pending > tp->rx_std_ring_mask) ||
12438 	    (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) ||
12439 	    (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
12440 	    (ering->tx_pending <= MAX_SKB_FRAGS) ||
12441 	    (tg3_flag(tp, TSO_BUG) &&
12442 	     (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
12443 		return -EINVAL;
12444 
12445 	if (netif_running(dev)) {
12446 		tg3_phy_stop(tp);
12447 		tg3_netif_stop(tp);
12448 		irq_sync = 1;
12449 	}
12450 
12451 	tg3_full_lock(tp, irq_sync);
12452 
12453 	tp->rx_pending = ering->rx_pending;
12454 
12455 	if (tg3_flag(tp, MAX_RXPEND_64) &&
12456 	    tp->rx_pending > 63)
12457 		tp->rx_pending = 63;
12458 
12459 	if (tg3_flag(tp, JUMBO_RING_ENABLE))
12460 		tp->rx_jumbo_pending = ering->rx_jumbo_pending;
12461 
12462 	for (i = 0; i < tp->irq_max; i++)
12463 		tp->napi[i].tx_pending = ering->tx_pending;
12464 
12465 	if (netif_running(dev)) {
12466 		tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12467 		/* Reset PHY to avoid PHY lock up */
12468 		if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
12469 		    tg3_asic_rev(tp) == ASIC_REV_5719 ||
12470 		    tg3_asic_rev(tp) == ASIC_REV_5720)
12471 			reset_phy = true;
12472 
12473 		err = tg3_restart_hw(tp, reset_phy);
12474 		if (!err)
12475 			tg3_netif_start(tp);
12476 	}
12477 
12478 	tg3_full_unlock(tp);
12479 
12480 	if (irq_sync && !err)
12481 		tg3_phy_start(tp);
12482 
12483 	return err;
12484 }
12485 
12486 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
12487 {
12488 	struct tg3 *tp = netdev_priv(dev);
12489 
12490 	epause->autoneg = !!tg3_flag(tp, PAUSE_AUTONEG);
12491 
12492 	if (tp->link_config.flowctrl & FLOW_CTRL_RX)
12493 		epause->rx_pause = 1;
12494 	else
12495 		epause->rx_pause = 0;
12496 
12497 	if (tp->link_config.flowctrl & FLOW_CTRL_TX)
12498 		epause->tx_pause = 1;
12499 	else
12500 		epause->tx_pause = 0;
12501 }
12502 
12503 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
12504 {
12505 	struct tg3 *tp = netdev_priv(dev);
12506 	int err = 0;
12507 	bool reset_phy = false;
12508 
12509 	if (tp->link_config.autoneg == AUTONEG_ENABLE)
12510 		tg3_warn_mgmt_link_flap(tp);
12511 
12512 	if (tg3_flag(tp, USE_PHYLIB)) {
12513 		struct phy_device *phydev;
12514 
12515 		phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
12516 
12517 		if (!phy_validate_pause(phydev, epause))
12518 			return -EINVAL;
12519 
12520 		tp->link_config.flowctrl = 0;
12521 		phy_set_asym_pause(phydev, epause->rx_pause, epause->tx_pause);
12522 		if (epause->rx_pause) {
12523 			tp->link_config.flowctrl |= FLOW_CTRL_RX;
12524 
12525 			if (epause->tx_pause) {
12526 				tp->link_config.flowctrl |= FLOW_CTRL_TX;
12527 			}
12528 		} else if (epause->tx_pause) {
12529 			tp->link_config.flowctrl |= FLOW_CTRL_TX;
12530 		}
12531 
12532 		if (epause->autoneg)
12533 			tg3_flag_set(tp, PAUSE_AUTONEG);
12534 		else
12535 			tg3_flag_clear(tp, PAUSE_AUTONEG);
12536 
12537 		if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
12538 			if (phydev->autoneg) {
12539 				/* phy_set_asym_pause() will
12540 				 * renegotiate the link to inform our
12541 				 * link partner of our flow control
12542 				 * settings, even if the flow control
12543 				 * is forced.  Let tg3_adjust_link()
12544 				 * do the final flow control setup.
12545 				 */
12546 				return 0;
12547 			}
12548 
12549 			if (!epause->autoneg)
12550 				tg3_setup_flow_control(tp, 0, 0);
12551 		}
12552 	} else {
12553 		int irq_sync = 0;
12554 
12555 		if (netif_running(dev)) {
12556 			tg3_netif_stop(tp);
12557 			irq_sync = 1;
12558 		}
12559 
12560 		tg3_full_lock(tp, irq_sync);
12561 
12562 		if (epause->autoneg)
12563 			tg3_flag_set(tp, PAUSE_AUTONEG);
12564 		else
12565 			tg3_flag_clear(tp, PAUSE_AUTONEG);
12566 		if (epause->rx_pause)
12567 			tp->link_config.flowctrl |= FLOW_CTRL_RX;
12568 		else
12569 			tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
12570 		if (epause->tx_pause)
12571 			tp->link_config.flowctrl |= FLOW_CTRL_TX;
12572 		else
12573 			tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
12574 
12575 		if (netif_running(dev)) {
12576 			tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12577 			/* Reset PHY to avoid PHY lock up */
12578 			if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
12579 			    tg3_asic_rev(tp) == ASIC_REV_5719 ||
12580 			    tg3_asic_rev(tp) == ASIC_REV_5720)
12581 				reset_phy = true;
12582 
12583 			err = tg3_restart_hw(tp, reset_phy);
12584 			if (!err)
12585 				tg3_netif_start(tp);
12586 		}
12587 
12588 		tg3_full_unlock(tp);
12589 	}
12590 
12591 	tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
12592 
12593 	return err;
12594 }
12595 
12596 static int tg3_get_sset_count(struct net_device *dev, int sset)
12597 {
12598 	switch (sset) {
12599 	case ETH_SS_TEST:
12600 		return TG3_NUM_TEST;
12601 	case ETH_SS_STATS:
12602 		return TG3_NUM_STATS;
12603 	default:
12604 		return -EOPNOTSUPP;
12605 	}
12606 }
12607 
12608 static int tg3_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
12609 			 u32 *rules __always_unused)
12610 {
12611 	struct tg3 *tp = netdev_priv(dev);
12612 
12613 	if (!tg3_flag(tp, SUPPORT_MSIX))
12614 		return -EOPNOTSUPP;
12615 
12616 	switch (info->cmd) {
12617 	case ETHTOOL_GRXRINGS:
12618 		if (netif_running(tp->dev))
12619 			info->data = tp->rxq_cnt;
12620 		else {
12621 			info->data = num_online_cpus();
12622 			if (info->data > TG3_RSS_MAX_NUM_QS)
12623 				info->data = TG3_RSS_MAX_NUM_QS;
12624 		}
12625 
12626 		return 0;
12627 
12628 	default:
12629 		return -EOPNOTSUPP;
12630 	}
12631 }
12632 
12633 static u32 tg3_get_rxfh_indir_size(struct net_device *dev)
12634 {
12635 	u32 size = 0;
12636 	struct tg3 *tp = netdev_priv(dev);
12637 
12638 	if (tg3_flag(tp, SUPPORT_MSIX))
12639 		size = TG3_RSS_INDIR_TBL_SIZE;
12640 
12641 	return size;
12642 }
12643 
12644 static int tg3_get_rxfh(struct net_device *dev, u32 *indir, u8 *key, u8 *hfunc)
12645 {
12646 	struct tg3 *tp = netdev_priv(dev);
12647 	int i;
12648 
12649 	if (hfunc)
12650 		*hfunc = ETH_RSS_HASH_TOP;
12651 	if (!indir)
12652 		return 0;
12653 
12654 	for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
12655 		indir[i] = tp->rss_ind_tbl[i];
12656 
12657 	return 0;
12658 }
12659 
12660 static int tg3_set_rxfh(struct net_device *dev, const u32 *indir, const u8 *key,
12661 			const u8 hfunc)
12662 {
12663 	struct tg3 *tp = netdev_priv(dev);
12664 	size_t i;
12665 
12666 	/* We require at least one supported parameter to be changed and no
12667 	 * change in any of the unsupported parameters
12668 	 */
12669 	if (key ||
12670 	    (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP))
12671 		return -EOPNOTSUPP;
12672 
12673 	if (!indir)
12674 		return 0;
12675 
12676 	for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
12677 		tp->rss_ind_tbl[i] = indir[i];
12678 
12679 	if (!netif_running(dev) || !tg3_flag(tp, ENABLE_RSS))
12680 		return 0;
12681 
12682 	/* It is legal to write the indirection
12683 	 * table while the device is running.
12684 	 */
12685 	tg3_full_lock(tp, 0);
12686 	tg3_rss_write_indir_tbl(tp);
12687 	tg3_full_unlock(tp);
12688 
12689 	return 0;
12690 }
12691 
12692 static void tg3_get_channels(struct net_device *dev,
12693 			     struct ethtool_channels *channel)
12694 {
12695 	struct tg3 *tp = netdev_priv(dev);
12696 	u32 deflt_qs = netif_get_num_default_rss_queues();
12697 
12698 	channel->max_rx = tp->rxq_max;
12699 	channel->max_tx = tp->txq_max;
12700 
12701 	if (netif_running(dev)) {
12702 		channel->rx_count = tp->rxq_cnt;
12703 		channel->tx_count = tp->txq_cnt;
12704 	} else {
12705 		if (tp->rxq_req)
12706 			channel->rx_count = tp->rxq_req;
12707 		else
12708 			channel->rx_count = min(deflt_qs, tp->rxq_max);
12709 
12710 		if (tp->txq_req)
12711 			channel->tx_count = tp->txq_req;
12712 		else
12713 			channel->tx_count = min(deflt_qs, tp->txq_max);
12714 	}
12715 }
12716 
12717 static int tg3_set_channels(struct net_device *dev,
12718 			    struct ethtool_channels *channel)
12719 {
12720 	struct tg3 *tp = netdev_priv(dev);
12721 
12722 	if (!tg3_flag(tp, SUPPORT_MSIX))
12723 		return -EOPNOTSUPP;
12724 
12725 	if (channel->rx_count > tp->rxq_max ||
12726 	    channel->tx_count > tp->txq_max)
12727 		return -EINVAL;
12728 
12729 	tp->rxq_req = channel->rx_count;
12730 	tp->txq_req = channel->tx_count;
12731 
12732 	if (!netif_running(dev))
12733 		return 0;
12734 
12735 	tg3_stop(tp);
12736 
12737 	tg3_carrier_off(tp);
12738 
12739 	tg3_start(tp, true, false, false);
12740 
12741 	return 0;
12742 }
12743 
12744 static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
12745 {
12746 	switch (stringset) {
12747 	case ETH_SS_STATS:
12748 		memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
12749 		break;
12750 	case ETH_SS_TEST:
12751 		memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
12752 		break;
12753 	default:
12754 		WARN_ON(1);	/* we need a WARN() */
12755 		break;
12756 	}
12757 }
12758 
12759 static int tg3_set_phys_id(struct net_device *dev,
12760 			    enum ethtool_phys_id_state state)
12761 {
12762 	struct tg3 *tp = netdev_priv(dev);
12763 
12764 	switch (state) {
12765 	case ETHTOOL_ID_ACTIVE:
12766 		return 1;	/* cycle on/off once per second */
12767 
12768 	case ETHTOOL_ID_ON:
12769 		tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
12770 		     LED_CTRL_1000MBPS_ON |
12771 		     LED_CTRL_100MBPS_ON |
12772 		     LED_CTRL_10MBPS_ON |
12773 		     LED_CTRL_TRAFFIC_OVERRIDE |
12774 		     LED_CTRL_TRAFFIC_BLINK |
12775 		     LED_CTRL_TRAFFIC_LED);
12776 		break;
12777 
12778 	case ETHTOOL_ID_OFF:
12779 		tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
12780 		     LED_CTRL_TRAFFIC_OVERRIDE);
12781 		break;
12782 
12783 	case ETHTOOL_ID_INACTIVE:
12784 		tw32(MAC_LED_CTRL, tp->led_ctrl);
12785 		break;
12786 	}
12787 
12788 	return 0;
12789 }
12790 
12791 static void tg3_get_ethtool_stats(struct net_device *dev,
12792 				   struct ethtool_stats *estats, u64 *tmp_stats)
12793 {
12794 	struct tg3 *tp = netdev_priv(dev);
12795 
12796 	if (tp->hw_stats)
12797 		tg3_get_estats(tp, (struct tg3_ethtool_stats *)tmp_stats);
12798 	else
12799 		memset(tmp_stats, 0, sizeof(struct tg3_ethtool_stats));
12800 }
12801 
12802 static __be32 *tg3_vpd_readblock(struct tg3 *tp, u32 *vpdlen)
12803 {
12804 	int i;
12805 	__be32 *buf;
12806 	u32 offset = 0, len = 0;
12807 	u32 magic, val;
12808 
12809 	if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &magic))
12810 		return NULL;
12811 
12812 	if (magic == TG3_EEPROM_MAGIC) {
12813 		for (offset = TG3_NVM_DIR_START;
12814 		     offset < TG3_NVM_DIR_END;
12815 		     offset += TG3_NVM_DIRENT_SIZE) {
12816 			if (tg3_nvram_read(tp, offset, &val))
12817 				return NULL;
12818 
12819 			if ((val >> TG3_NVM_DIRTYPE_SHIFT) ==
12820 			    TG3_NVM_DIRTYPE_EXTVPD)
12821 				break;
12822 		}
12823 
12824 		if (offset != TG3_NVM_DIR_END) {
12825 			len = (val & TG3_NVM_DIRTYPE_LENMSK) * 4;
12826 			if (tg3_nvram_read(tp, offset + 4, &offset))
12827 				return NULL;
12828 
12829 			offset = tg3_nvram_logical_addr(tp, offset);
12830 		}
12831 	}
12832 
12833 	if (!offset || !len) {
12834 		offset = TG3_NVM_VPD_OFF;
12835 		len = TG3_NVM_VPD_LEN;
12836 	}
12837 
12838 	buf = kmalloc(len, GFP_KERNEL);
12839 	if (buf == NULL)
12840 		return NULL;
12841 
12842 	if (magic == TG3_EEPROM_MAGIC) {
12843 		for (i = 0; i < len; i += 4) {
12844 			/* The data is in little-endian format in NVRAM.
12845 			 * Use the big-endian read routines to preserve
12846 			 * the byte order as it exists in NVRAM.
12847 			 */
12848 			if (tg3_nvram_read_be32(tp, offset + i, &buf[i/4]))
12849 				goto error;
12850 		}
12851 	} else {
12852 		u8 *ptr;
12853 		ssize_t cnt;
12854 		unsigned int pos = 0;
12855 
12856 		ptr = (u8 *)&buf[0];
12857 		for (i = 0; pos < len && i < 3; i++, pos += cnt, ptr += cnt) {
12858 			cnt = pci_read_vpd(tp->pdev, pos,
12859 					   len - pos, ptr);
12860 			if (cnt == -ETIMEDOUT || cnt == -EINTR)
12861 				cnt = 0;
12862 			else if (cnt < 0)
12863 				goto error;
12864 		}
12865 		if (pos != len)
12866 			goto error;
12867 	}
12868 
12869 	*vpdlen = len;
12870 
12871 	return buf;
12872 
12873 error:
12874 	kfree(buf);
12875 	return NULL;
12876 }
12877 
12878 #define NVRAM_TEST_SIZE 0x100
12879 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE	0x14
12880 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE	0x18
12881 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE	0x1c
12882 #define NVRAM_SELFBOOT_FORMAT1_4_SIZE	0x20
12883 #define NVRAM_SELFBOOT_FORMAT1_5_SIZE	0x24
12884 #define NVRAM_SELFBOOT_FORMAT1_6_SIZE	0x50
12885 #define NVRAM_SELFBOOT_HW_SIZE 0x20
12886 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
12887 
12888 static int tg3_test_nvram(struct tg3 *tp)
12889 {
12890 	u32 csum, magic, len;
12891 	__be32 *buf;
12892 	int i, j, k, err = 0, size;
12893 
12894 	if (tg3_flag(tp, NO_NVRAM))
12895 		return 0;
12896 
12897 	if (tg3_nvram_read(tp, 0, &magic) != 0)
12898 		return -EIO;
12899 
12900 	if (magic == TG3_EEPROM_MAGIC)
12901 		size = NVRAM_TEST_SIZE;
12902 	else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
12903 		if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
12904 		    TG3_EEPROM_SB_FORMAT_1) {
12905 			switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
12906 			case TG3_EEPROM_SB_REVISION_0:
12907 				size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
12908 				break;
12909 			case TG3_EEPROM_SB_REVISION_2:
12910 				size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
12911 				break;
12912 			case TG3_EEPROM_SB_REVISION_3:
12913 				size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
12914 				break;
12915 			case TG3_EEPROM_SB_REVISION_4:
12916 				size = NVRAM_SELFBOOT_FORMAT1_4_SIZE;
12917 				break;
12918 			case TG3_EEPROM_SB_REVISION_5:
12919 				size = NVRAM_SELFBOOT_FORMAT1_5_SIZE;
12920 				break;
12921 			case TG3_EEPROM_SB_REVISION_6:
12922 				size = NVRAM_SELFBOOT_FORMAT1_6_SIZE;
12923 				break;
12924 			default:
12925 				return -EIO;
12926 			}
12927 		} else
12928 			return 0;
12929 	} else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
12930 		size = NVRAM_SELFBOOT_HW_SIZE;
12931 	else
12932 		return -EIO;
12933 
12934 	buf = kmalloc(size, GFP_KERNEL);
12935 	if (buf == NULL)
12936 		return -ENOMEM;
12937 
12938 	err = -EIO;
12939 	for (i = 0, j = 0; i < size; i += 4, j++) {
12940 		err = tg3_nvram_read_be32(tp, i, &buf[j]);
12941 		if (err)
12942 			break;
12943 	}
12944 	if (i < size)
12945 		goto out;
12946 
12947 	/* Selfboot format */
12948 	magic = be32_to_cpu(buf[0]);
12949 	if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
12950 	    TG3_EEPROM_MAGIC_FW) {
12951 		u8 *buf8 = (u8 *) buf, csum8 = 0;
12952 
12953 		if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
12954 		    TG3_EEPROM_SB_REVISION_2) {
12955 			/* For rev 2, the csum doesn't include the MBA. */
12956 			for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
12957 				csum8 += buf8[i];
12958 			for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
12959 				csum8 += buf8[i];
12960 		} else {
12961 			for (i = 0; i < size; i++)
12962 				csum8 += buf8[i];
12963 		}
12964 
12965 		if (csum8 == 0) {
12966 			err = 0;
12967 			goto out;
12968 		}
12969 
12970 		err = -EIO;
12971 		goto out;
12972 	}
12973 
12974 	if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
12975 	    TG3_EEPROM_MAGIC_HW) {
12976 		u8 data[NVRAM_SELFBOOT_DATA_SIZE];
12977 		u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
12978 		u8 *buf8 = (u8 *) buf;
12979 
12980 		/* Separate the parity bits and the data bytes.  */
12981 		for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
12982 			if ((i == 0) || (i == 8)) {
12983 				int l;
12984 				u8 msk;
12985 
12986 				for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
12987 					parity[k++] = buf8[i] & msk;
12988 				i++;
12989 			} else if (i == 16) {
12990 				int l;
12991 				u8 msk;
12992 
12993 				for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
12994 					parity[k++] = buf8[i] & msk;
12995 				i++;
12996 
12997 				for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
12998 					parity[k++] = buf8[i] & msk;
12999 				i++;
13000 			}
13001 			data[j++] = buf8[i];
13002 		}
13003 
13004 		err = -EIO;
13005 		for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
13006 			u8 hw8 = hweight8(data[i]);
13007 
13008 			if ((hw8 & 0x1) && parity[i])
13009 				goto out;
13010 			else if (!(hw8 & 0x1) && !parity[i])
13011 				goto out;
13012 		}
13013 		err = 0;
13014 		goto out;
13015 	}
13016 
13017 	err = -EIO;
13018 
13019 	/* Bootstrap checksum at offset 0x10 */
13020 	csum = calc_crc((unsigned char *) buf, 0x10);
13021 	if (csum != le32_to_cpu(buf[0x10/4]))
13022 		goto out;
13023 
13024 	/* Manufacturing block starts at offset 0x74, checksum at 0xfc */
13025 	csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
13026 	if (csum != le32_to_cpu(buf[0xfc/4]))
13027 		goto out;
13028 
13029 	kfree(buf);
13030 
13031 	buf = tg3_vpd_readblock(tp, &len);
13032 	if (!buf)
13033 		return -ENOMEM;
13034 
13035 	i = pci_vpd_find_tag((u8 *)buf, 0, len, PCI_VPD_LRDT_RO_DATA);
13036 	if (i > 0) {
13037 		j = pci_vpd_lrdt_size(&((u8 *)buf)[i]);
13038 		if (j < 0)
13039 			goto out;
13040 
13041 		if (i + PCI_VPD_LRDT_TAG_SIZE + j > len)
13042 			goto out;
13043 
13044 		i += PCI_VPD_LRDT_TAG_SIZE;
13045 		j = pci_vpd_find_info_keyword((u8 *)buf, i, j,
13046 					      PCI_VPD_RO_KEYWORD_CHKSUM);
13047 		if (j > 0) {
13048 			u8 csum8 = 0;
13049 
13050 			j += PCI_VPD_INFO_FLD_HDR_SIZE;
13051 
13052 			for (i = 0; i <= j; i++)
13053 				csum8 += ((u8 *)buf)[i];
13054 
13055 			if (csum8)
13056 				goto out;
13057 		}
13058 	}
13059 
13060 	err = 0;
13061 
13062 out:
13063 	kfree(buf);
13064 	return err;
13065 }
13066 
13067 #define TG3_SERDES_TIMEOUT_SEC	2
13068 #define TG3_COPPER_TIMEOUT_SEC	6
13069 
13070 static int tg3_test_link(struct tg3 *tp)
13071 {
13072 	int i, max;
13073 
13074 	if (!netif_running(tp->dev))
13075 		return -ENODEV;
13076 
13077 	if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
13078 		max = TG3_SERDES_TIMEOUT_SEC;
13079 	else
13080 		max = TG3_COPPER_TIMEOUT_SEC;
13081 
13082 	for (i = 0; i < max; i++) {
13083 		if (tp->link_up)
13084 			return 0;
13085 
13086 		if (msleep_interruptible(1000))
13087 			break;
13088 	}
13089 
13090 	return -EIO;
13091 }
13092 
13093 /* Only test the commonly used registers */
13094 static int tg3_test_registers(struct tg3 *tp)
13095 {
13096 	int i, is_5705, is_5750;
13097 	u32 offset, read_mask, write_mask, val, save_val, read_val;
13098 	static struct {
13099 		u16 offset;
13100 		u16 flags;
13101 #define TG3_FL_5705	0x1
13102 #define TG3_FL_NOT_5705	0x2
13103 #define TG3_FL_NOT_5788	0x4
13104 #define TG3_FL_NOT_5750	0x8
13105 		u32 read_mask;
13106 		u32 write_mask;
13107 	} reg_tbl[] = {
13108 		/* MAC Control Registers */
13109 		{ MAC_MODE, TG3_FL_NOT_5705,
13110 			0x00000000, 0x00ef6f8c },
13111 		{ MAC_MODE, TG3_FL_5705,
13112 			0x00000000, 0x01ef6b8c },
13113 		{ MAC_STATUS, TG3_FL_NOT_5705,
13114 			0x03800107, 0x00000000 },
13115 		{ MAC_STATUS, TG3_FL_5705,
13116 			0x03800100, 0x00000000 },
13117 		{ MAC_ADDR_0_HIGH, 0x0000,
13118 			0x00000000, 0x0000ffff },
13119 		{ MAC_ADDR_0_LOW, 0x0000,
13120 			0x00000000, 0xffffffff },
13121 		{ MAC_RX_MTU_SIZE, 0x0000,
13122 			0x00000000, 0x0000ffff },
13123 		{ MAC_TX_MODE, 0x0000,
13124 			0x00000000, 0x00000070 },
13125 		{ MAC_TX_LENGTHS, 0x0000,
13126 			0x00000000, 0x00003fff },
13127 		{ MAC_RX_MODE, TG3_FL_NOT_5705,
13128 			0x00000000, 0x000007fc },
13129 		{ MAC_RX_MODE, TG3_FL_5705,
13130 			0x00000000, 0x000007dc },
13131 		{ MAC_HASH_REG_0, 0x0000,
13132 			0x00000000, 0xffffffff },
13133 		{ MAC_HASH_REG_1, 0x0000,
13134 			0x00000000, 0xffffffff },
13135 		{ MAC_HASH_REG_2, 0x0000,
13136 			0x00000000, 0xffffffff },
13137 		{ MAC_HASH_REG_3, 0x0000,
13138 			0x00000000, 0xffffffff },
13139 
13140 		/* Receive Data and Receive BD Initiator Control Registers. */
13141 		{ RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
13142 			0x00000000, 0xffffffff },
13143 		{ RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
13144 			0x00000000, 0xffffffff },
13145 		{ RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
13146 			0x00000000, 0x00000003 },
13147 		{ RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
13148 			0x00000000, 0xffffffff },
13149 		{ RCVDBDI_STD_BD+0, 0x0000,
13150 			0x00000000, 0xffffffff },
13151 		{ RCVDBDI_STD_BD+4, 0x0000,
13152 			0x00000000, 0xffffffff },
13153 		{ RCVDBDI_STD_BD+8, 0x0000,
13154 			0x00000000, 0xffff0002 },
13155 		{ RCVDBDI_STD_BD+0xc, 0x0000,
13156 			0x00000000, 0xffffffff },
13157 
13158 		/* Receive BD Initiator Control Registers. */
13159 		{ RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
13160 			0x00000000, 0xffffffff },
13161 		{ RCVBDI_STD_THRESH, TG3_FL_5705,
13162 			0x00000000, 0x000003ff },
13163 		{ RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
13164 			0x00000000, 0xffffffff },
13165 
13166 		/* Host Coalescing Control Registers. */
13167 		{ HOSTCC_MODE, TG3_FL_NOT_5705,
13168 			0x00000000, 0x00000004 },
13169 		{ HOSTCC_MODE, TG3_FL_5705,
13170 			0x00000000, 0x000000f6 },
13171 		{ HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
13172 			0x00000000, 0xffffffff },
13173 		{ HOSTCC_RXCOL_TICKS, TG3_FL_5705,
13174 			0x00000000, 0x000003ff },
13175 		{ HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
13176 			0x00000000, 0xffffffff },
13177 		{ HOSTCC_TXCOL_TICKS, TG3_FL_5705,
13178 			0x00000000, 0x000003ff },
13179 		{ HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
13180 			0x00000000, 0xffffffff },
13181 		{ HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
13182 			0x00000000, 0x000000ff },
13183 		{ HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
13184 			0x00000000, 0xffffffff },
13185 		{ HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
13186 			0x00000000, 0x000000ff },
13187 		{ HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
13188 			0x00000000, 0xffffffff },
13189 		{ HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
13190 			0x00000000, 0xffffffff },
13191 		{ HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
13192 			0x00000000, 0xffffffff },
13193 		{ HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
13194 			0x00000000, 0x000000ff },
13195 		{ HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
13196 			0x00000000, 0xffffffff },
13197 		{ HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
13198 			0x00000000, 0x000000ff },
13199 		{ HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
13200 			0x00000000, 0xffffffff },
13201 		{ HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
13202 			0x00000000, 0xffffffff },
13203 		{ HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
13204 			0x00000000, 0xffffffff },
13205 		{ HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
13206 			0x00000000, 0xffffffff },
13207 		{ HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
13208 			0x00000000, 0xffffffff },
13209 		{ HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
13210 			0xffffffff, 0x00000000 },
13211 		{ HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
13212 			0xffffffff, 0x00000000 },
13213 
13214 		/* Buffer Manager Control Registers. */
13215 		{ BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
13216 			0x00000000, 0x007fff80 },
13217 		{ BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
13218 			0x00000000, 0x007fffff },
13219 		{ BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
13220 			0x00000000, 0x0000003f },
13221 		{ BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
13222 			0x00000000, 0x000001ff },
13223 		{ BUFMGR_MB_HIGH_WATER, 0x0000,
13224 			0x00000000, 0x000001ff },
13225 		{ BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
13226 			0xffffffff, 0x00000000 },
13227 		{ BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
13228 			0xffffffff, 0x00000000 },
13229 
13230 		/* Mailbox Registers */
13231 		{ GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
13232 			0x00000000, 0x000001ff },
13233 		{ GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
13234 			0x00000000, 0x000001ff },
13235 		{ GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
13236 			0x00000000, 0x000007ff },
13237 		{ GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
13238 			0x00000000, 0x000001ff },
13239 
13240 		{ 0xffff, 0x0000, 0x00000000, 0x00000000 },
13241 	};
13242 
13243 	is_5705 = is_5750 = 0;
13244 	if (tg3_flag(tp, 5705_PLUS)) {
13245 		is_5705 = 1;
13246 		if (tg3_flag(tp, 5750_PLUS))
13247 			is_5750 = 1;
13248 	}
13249 
13250 	for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
13251 		if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
13252 			continue;
13253 
13254 		if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
13255 			continue;
13256 
13257 		if (tg3_flag(tp, IS_5788) &&
13258 		    (reg_tbl[i].flags & TG3_FL_NOT_5788))
13259 			continue;
13260 
13261 		if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
13262 			continue;
13263 
13264 		offset = (u32) reg_tbl[i].offset;
13265 		read_mask = reg_tbl[i].read_mask;
13266 		write_mask = reg_tbl[i].write_mask;
13267 
13268 		/* Save the original register content */
13269 		save_val = tr32(offset);
13270 
13271 		/* Determine the read-only value. */
13272 		read_val = save_val & read_mask;
13273 
13274 		/* Write zero to the register, then make sure the read-only bits
13275 		 * are not changed and the read/write bits are all zeros.
13276 		 */
13277 		tw32(offset, 0);
13278 
13279 		val = tr32(offset);
13280 
13281 		/* Test the read-only and read/write bits. */
13282 		if (((val & read_mask) != read_val) || (val & write_mask))
13283 			goto out;
13284 
13285 		/* Write ones to all the bits defined by RdMask and WrMask, then
13286 		 * make sure the read-only bits are not changed and the
13287 		 * read/write bits are all ones.
13288 		 */
13289 		tw32(offset, read_mask | write_mask);
13290 
13291 		val = tr32(offset);
13292 
13293 		/* Test the read-only bits. */
13294 		if ((val & read_mask) != read_val)
13295 			goto out;
13296 
13297 		/* Test the read/write bits. */
13298 		if ((val & write_mask) != write_mask)
13299 			goto out;
13300 
13301 		tw32(offset, save_val);
13302 	}
13303 
13304 	return 0;
13305 
13306 out:
13307 	if (netif_msg_hw(tp))
13308 		netdev_err(tp->dev,
13309 			   "Register test failed at offset %x\n", offset);
13310 	tw32(offset, save_val);
13311 	return -EIO;
13312 }
13313 
13314 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
13315 {
13316 	static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
13317 	int i;
13318 	u32 j;
13319 
13320 	for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
13321 		for (j = 0; j < len; j += 4) {
13322 			u32 val;
13323 
13324 			tg3_write_mem(tp, offset + j, test_pattern[i]);
13325 			tg3_read_mem(tp, offset + j, &val);
13326 			if (val != test_pattern[i])
13327 				return -EIO;
13328 		}
13329 	}
13330 	return 0;
13331 }
13332 
13333 static int tg3_test_memory(struct tg3 *tp)
13334 {
13335 	static struct mem_entry {
13336 		u32 offset;
13337 		u32 len;
13338 	} mem_tbl_570x[] = {
13339 		{ 0x00000000, 0x00b50},
13340 		{ 0x00002000, 0x1c000},
13341 		{ 0xffffffff, 0x00000}
13342 	}, mem_tbl_5705[] = {
13343 		{ 0x00000100, 0x0000c},
13344 		{ 0x00000200, 0x00008},
13345 		{ 0x00004000, 0x00800},
13346 		{ 0x00006000, 0x01000},
13347 		{ 0x00008000, 0x02000},
13348 		{ 0x00010000, 0x0e000},
13349 		{ 0xffffffff, 0x00000}
13350 	}, mem_tbl_5755[] = {
13351 		{ 0x00000200, 0x00008},
13352 		{ 0x00004000, 0x00800},
13353 		{ 0x00006000, 0x00800},
13354 		{ 0x00008000, 0x02000},
13355 		{ 0x00010000, 0x0c000},
13356 		{ 0xffffffff, 0x00000}
13357 	}, mem_tbl_5906[] = {
13358 		{ 0x00000200, 0x00008},
13359 		{ 0x00004000, 0x00400},
13360 		{ 0x00006000, 0x00400},
13361 		{ 0x00008000, 0x01000},
13362 		{ 0x00010000, 0x01000},
13363 		{ 0xffffffff, 0x00000}
13364 	}, mem_tbl_5717[] = {
13365 		{ 0x00000200, 0x00008},
13366 		{ 0x00010000, 0x0a000},
13367 		{ 0x00020000, 0x13c00},
13368 		{ 0xffffffff, 0x00000}
13369 	}, mem_tbl_57765[] = {
13370 		{ 0x00000200, 0x00008},
13371 		{ 0x00004000, 0x00800},
13372 		{ 0x00006000, 0x09800},
13373 		{ 0x00010000, 0x0a000},
13374 		{ 0xffffffff, 0x00000}
13375 	};
13376 	struct mem_entry *mem_tbl;
13377 	int err = 0;
13378 	int i;
13379 
13380 	if (tg3_flag(tp, 5717_PLUS))
13381 		mem_tbl = mem_tbl_5717;
13382 	else if (tg3_flag(tp, 57765_CLASS) ||
13383 		 tg3_asic_rev(tp) == ASIC_REV_5762)
13384 		mem_tbl = mem_tbl_57765;
13385 	else if (tg3_flag(tp, 5755_PLUS))
13386 		mem_tbl = mem_tbl_5755;
13387 	else if (tg3_asic_rev(tp) == ASIC_REV_5906)
13388 		mem_tbl = mem_tbl_5906;
13389 	else if (tg3_flag(tp, 5705_PLUS))
13390 		mem_tbl = mem_tbl_5705;
13391 	else
13392 		mem_tbl = mem_tbl_570x;
13393 
13394 	for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
13395 		err = tg3_do_mem_test(tp, mem_tbl[i].offset, mem_tbl[i].len);
13396 		if (err)
13397 			break;
13398 	}
13399 
13400 	return err;
13401 }
13402 
13403 #define TG3_TSO_MSS		500
13404 
13405 #define TG3_TSO_IP_HDR_LEN	20
13406 #define TG3_TSO_TCP_HDR_LEN	20
13407 #define TG3_TSO_TCP_OPT_LEN	12
13408 
13409 static const u8 tg3_tso_header[] = {
13410 0x08, 0x00,
13411 0x45, 0x00, 0x00, 0x00,
13412 0x00, 0x00, 0x40, 0x00,
13413 0x40, 0x06, 0x00, 0x00,
13414 0x0a, 0x00, 0x00, 0x01,
13415 0x0a, 0x00, 0x00, 0x02,
13416 0x0d, 0x00, 0xe0, 0x00,
13417 0x00, 0x00, 0x01, 0x00,
13418 0x00, 0x00, 0x02, 0x00,
13419 0x80, 0x10, 0x10, 0x00,
13420 0x14, 0x09, 0x00, 0x00,
13421 0x01, 0x01, 0x08, 0x0a,
13422 0x11, 0x11, 0x11, 0x11,
13423 0x11, 0x11, 0x11, 0x11,
13424 };
13425 
13426 static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, bool tso_loopback)
13427 {
13428 	u32 rx_start_idx, rx_idx, tx_idx, opaque_key;
13429 	u32 base_flags = 0, mss = 0, desc_idx, coal_now, data_off, val;
13430 	u32 budget;
13431 	struct sk_buff *skb;
13432 	u8 *tx_data, *rx_data;
13433 	dma_addr_t map;
13434 	int num_pkts, tx_len, rx_len, i, err;
13435 	struct tg3_rx_buffer_desc *desc;
13436 	struct tg3_napi *tnapi, *rnapi;
13437 	struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
13438 
13439 	tnapi = &tp->napi[0];
13440 	rnapi = &tp->napi[0];
13441 	if (tp->irq_cnt > 1) {
13442 		if (tg3_flag(tp, ENABLE_RSS))
13443 			rnapi = &tp->napi[1];
13444 		if (tg3_flag(tp, ENABLE_TSS))
13445 			tnapi = &tp->napi[1];
13446 	}
13447 	coal_now = tnapi->coal_now | rnapi->coal_now;
13448 
13449 	err = -EIO;
13450 
13451 	tx_len = pktsz;
13452 	skb = netdev_alloc_skb(tp->dev, tx_len);
13453 	if (!skb)
13454 		return -ENOMEM;
13455 
13456 	tx_data = skb_put(skb, tx_len);
13457 	memcpy(tx_data, tp->dev->dev_addr, ETH_ALEN);
13458 	memset(tx_data + ETH_ALEN, 0x0, 8);
13459 
13460 	tw32(MAC_RX_MTU_SIZE, tx_len + ETH_FCS_LEN);
13461 
13462 	if (tso_loopback) {
13463 		struct iphdr *iph = (struct iphdr *)&tx_data[ETH_HLEN];
13464 
13465 		u32 hdr_len = TG3_TSO_IP_HDR_LEN + TG3_TSO_TCP_HDR_LEN +
13466 			      TG3_TSO_TCP_OPT_LEN;
13467 
13468 		memcpy(tx_data + ETH_ALEN * 2, tg3_tso_header,
13469 		       sizeof(tg3_tso_header));
13470 		mss = TG3_TSO_MSS;
13471 
13472 		val = tx_len - ETH_ALEN * 2 - sizeof(tg3_tso_header);
13473 		num_pkts = DIV_ROUND_UP(val, TG3_TSO_MSS);
13474 
13475 		/* Set the total length field in the IP header */
13476 		iph->tot_len = htons((u16)(mss + hdr_len));
13477 
13478 		base_flags = (TXD_FLAG_CPU_PRE_DMA |
13479 			      TXD_FLAG_CPU_POST_DMA);
13480 
13481 		if (tg3_flag(tp, HW_TSO_1) ||
13482 		    tg3_flag(tp, HW_TSO_2) ||
13483 		    tg3_flag(tp, HW_TSO_3)) {
13484 			struct tcphdr *th;
13485 			val = ETH_HLEN + TG3_TSO_IP_HDR_LEN;
13486 			th = (struct tcphdr *)&tx_data[val];
13487 			th->check = 0;
13488 		} else
13489 			base_flags |= TXD_FLAG_TCPUDP_CSUM;
13490 
13491 		if (tg3_flag(tp, HW_TSO_3)) {
13492 			mss |= (hdr_len & 0xc) << 12;
13493 			if (hdr_len & 0x10)
13494 				base_flags |= 0x00000010;
13495 			base_flags |= (hdr_len & 0x3e0) << 5;
13496 		} else if (tg3_flag(tp, HW_TSO_2))
13497 			mss |= hdr_len << 9;
13498 		else if (tg3_flag(tp, HW_TSO_1) ||
13499 			 tg3_asic_rev(tp) == ASIC_REV_5705) {
13500 			mss |= (TG3_TSO_TCP_OPT_LEN << 9);
13501 		} else {
13502 			base_flags |= (TG3_TSO_TCP_OPT_LEN << 10);
13503 		}
13504 
13505 		data_off = ETH_ALEN * 2 + sizeof(tg3_tso_header);
13506 	} else {
13507 		num_pkts = 1;
13508 		data_off = ETH_HLEN;
13509 
13510 		if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
13511 		    tx_len > VLAN_ETH_FRAME_LEN)
13512 			base_flags |= TXD_FLAG_JMB_PKT;
13513 	}
13514 
13515 	for (i = data_off; i < tx_len; i++)
13516 		tx_data[i] = (u8) (i & 0xff);
13517 
13518 	map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
13519 	if (pci_dma_mapping_error(tp->pdev, map)) {
13520 		dev_kfree_skb(skb);
13521 		return -EIO;
13522 	}
13523 
13524 	val = tnapi->tx_prod;
13525 	tnapi->tx_buffers[val].skb = skb;
13526 	dma_unmap_addr_set(&tnapi->tx_buffers[val], mapping, map);
13527 
13528 	tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
13529 	       rnapi->coal_now);
13530 
13531 	udelay(10);
13532 
13533 	rx_start_idx = rnapi->hw_status->idx[0].rx_producer;
13534 
13535 	budget = tg3_tx_avail(tnapi);
13536 	if (tg3_tx_frag_set(tnapi, &val, &budget, map, tx_len,
13537 			    base_flags | TXD_FLAG_END, mss, 0)) {
13538 		tnapi->tx_buffers[val].skb = NULL;
13539 		dev_kfree_skb(skb);
13540 		return -EIO;
13541 	}
13542 
13543 	tnapi->tx_prod++;
13544 
13545 	/* Sync BD data before updating mailbox */
13546 	wmb();
13547 
13548 	tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod);
13549 	tr32_mailbox(tnapi->prodmbox);
13550 
13551 	udelay(10);
13552 
13553 	/* 350 usec to allow enough time on some 10/100 Mbps devices.  */
13554 	for (i = 0; i < 35; i++) {
13555 		tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
13556 		       coal_now);
13557 
13558 		udelay(10);
13559 
13560 		tx_idx = tnapi->hw_status->idx[0].tx_consumer;
13561 		rx_idx = rnapi->hw_status->idx[0].rx_producer;
13562 		if ((tx_idx == tnapi->tx_prod) &&
13563 		    (rx_idx == (rx_start_idx + num_pkts)))
13564 			break;
13565 	}
13566 
13567 	tg3_tx_skb_unmap(tnapi, tnapi->tx_prod - 1, -1);
13568 	dev_kfree_skb(skb);
13569 
13570 	if (tx_idx != tnapi->tx_prod)
13571 		goto out;
13572 
13573 	if (rx_idx != rx_start_idx + num_pkts)
13574 		goto out;
13575 
13576 	val = data_off;
13577 	while (rx_idx != rx_start_idx) {
13578 		desc = &rnapi->rx_rcb[rx_start_idx++];
13579 		desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
13580 		opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
13581 
13582 		if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
13583 		    (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
13584 			goto out;
13585 
13586 		rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT)
13587 			 - ETH_FCS_LEN;
13588 
13589 		if (!tso_loopback) {
13590 			if (rx_len != tx_len)
13591 				goto out;
13592 
13593 			if (pktsz <= TG3_RX_STD_DMA_SZ - ETH_FCS_LEN) {
13594 				if (opaque_key != RXD_OPAQUE_RING_STD)
13595 					goto out;
13596 			} else {
13597 				if (opaque_key != RXD_OPAQUE_RING_JUMBO)
13598 					goto out;
13599 			}
13600 		} else if ((desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
13601 			   (desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
13602 			    >> RXD_TCPCSUM_SHIFT != 0xffff) {
13603 			goto out;
13604 		}
13605 
13606 		if (opaque_key == RXD_OPAQUE_RING_STD) {
13607 			rx_data = tpr->rx_std_buffers[desc_idx].data;
13608 			map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx],
13609 					     mapping);
13610 		} else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
13611 			rx_data = tpr->rx_jmb_buffers[desc_idx].data;
13612 			map = dma_unmap_addr(&tpr->rx_jmb_buffers[desc_idx],
13613 					     mapping);
13614 		} else
13615 			goto out;
13616 
13617 		pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len,
13618 					    PCI_DMA_FROMDEVICE);
13619 
13620 		rx_data += TG3_RX_OFFSET(tp);
13621 		for (i = data_off; i < rx_len; i++, val++) {
13622 			if (*(rx_data + i) != (u8) (val & 0xff))
13623 				goto out;
13624 		}
13625 	}
13626 
13627 	err = 0;
13628 
13629 	/* tg3_free_rings will unmap and free the rx_data */
13630 out:
13631 	return err;
13632 }
13633 
13634 #define TG3_STD_LOOPBACK_FAILED		1
13635 #define TG3_JMB_LOOPBACK_FAILED		2
13636 #define TG3_TSO_LOOPBACK_FAILED		4
13637 #define TG3_LOOPBACK_FAILED \
13638 	(TG3_STD_LOOPBACK_FAILED | \
13639 	 TG3_JMB_LOOPBACK_FAILED | \
13640 	 TG3_TSO_LOOPBACK_FAILED)
13641 
13642 static int tg3_test_loopback(struct tg3 *tp, u64 *data, bool do_extlpbk)
13643 {
13644 	int err = -EIO;
13645 	u32 eee_cap;
13646 	u32 jmb_pkt_sz = 9000;
13647 
13648 	if (tp->dma_limit)
13649 		jmb_pkt_sz = tp->dma_limit - ETH_HLEN;
13650 
13651 	eee_cap = tp->phy_flags & TG3_PHYFLG_EEE_CAP;
13652 	tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
13653 
13654 	if (!netif_running(tp->dev)) {
13655 		data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13656 		data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13657 		if (do_extlpbk)
13658 			data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13659 		goto done;
13660 	}
13661 
13662 	err = tg3_reset_hw(tp, true);
13663 	if (err) {
13664 		data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13665 		data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13666 		if (do_extlpbk)
13667 			data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13668 		goto done;
13669 	}
13670 
13671 	if (tg3_flag(tp, ENABLE_RSS)) {
13672 		int i;
13673 
13674 		/* Reroute all rx packets to the 1st queue */
13675 		for (i = MAC_RSS_INDIR_TBL_0;
13676 		     i < MAC_RSS_INDIR_TBL_0 + TG3_RSS_INDIR_TBL_SIZE; i += 4)
13677 			tw32(i, 0x0);
13678 	}
13679 
13680 	/* HW errata - mac loopback fails in some cases on 5780.
13681 	 * Normal traffic and PHY loopback are not affected by
13682 	 * errata.  Also, the MAC loopback test is deprecated for
13683 	 * all newer ASIC revisions.
13684 	 */
13685 	if (tg3_asic_rev(tp) != ASIC_REV_5780 &&
13686 	    !tg3_flag(tp, CPMU_PRESENT)) {
13687 		tg3_mac_loopback(tp, true);
13688 
13689 		if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13690 			data[TG3_MAC_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED;
13691 
13692 		if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13693 		    tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13694 			data[TG3_MAC_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED;
13695 
13696 		tg3_mac_loopback(tp, false);
13697 	}
13698 
13699 	if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
13700 	    !tg3_flag(tp, USE_PHYLIB)) {
13701 		int i;
13702 
13703 		tg3_phy_lpbk_set(tp, 0, false);
13704 
13705 		/* Wait for link */
13706 		for (i = 0; i < 100; i++) {
13707 			if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
13708 				break;
13709 			mdelay(1);
13710 		}
13711 
13712 		if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13713 			data[TG3_PHY_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED;
13714 		if (tg3_flag(tp, TSO_CAPABLE) &&
13715 		    tg3_run_loopback(tp, ETH_FRAME_LEN, true))
13716 			data[TG3_PHY_LOOPB_TEST] |= TG3_TSO_LOOPBACK_FAILED;
13717 		if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13718 		    tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13719 			data[TG3_PHY_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED;
13720 
13721 		if (do_extlpbk) {
13722 			tg3_phy_lpbk_set(tp, 0, true);
13723 
13724 			/* All link indications report up, but the hardware
13725 			 * isn't really ready for about 20 msec.  Double it
13726 			 * to be sure.
13727 			 */
13728 			mdelay(40);
13729 
13730 			if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13731 				data[TG3_EXT_LOOPB_TEST] |=
13732 							TG3_STD_LOOPBACK_FAILED;
13733 			if (tg3_flag(tp, TSO_CAPABLE) &&
13734 			    tg3_run_loopback(tp, ETH_FRAME_LEN, true))
13735 				data[TG3_EXT_LOOPB_TEST] |=
13736 							TG3_TSO_LOOPBACK_FAILED;
13737 			if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13738 			    tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13739 				data[TG3_EXT_LOOPB_TEST] |=
13740 							TG3_JMB_LOOPBACK_FAILED;
13741 		}
13742 
13743 		/* Re-enable gphy autopowerdown. */
13744 		if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
13745 			tg3_phy_toggle_apd(tp, true);
13746 	}
13747 
13748 	err = (data[TG3_MAC_LOOPB_TEST] | data[TG3_PHY_LOOPB_TEST] |
13749 	       data[TG3_EXT_LOOPB_TEST]) ? -EIO : 0;
13750 
13751 done:
13752 	tp->phy_flags |= eee_cap;
13753 
13754 	return err;
13755 }
13756 
13757 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
13758 			  u64 *data)
13759 {
13760 	struct tg3 *tp = netdev_priv(dev);
13761 	bool doextlpbk = etest->flags & ETH_TEST_FL_EXTERNAL_LB;
13762 
13763 	if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
13764 		if (tg3_power_up(tp)) {
13765 			etest->flags |= ETH_TEST_FL_FAILED;
13766 			memset(data, 1, sizeof(u64) * TG3_NUM_TEST);
13767 			return;
13768 		}
13769 		tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
13770 	}
13771 
13772 	memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
13773 
13774 	if (tg3_test_nvram(tp) != 0) {
13775 		etest->flags |= ETH_TEST_FL_FAILED;
13776 		data[TG3_NVRAM_TEST] = 1;
13777 	}
13778 	if (!doextlpbk && tg3_test_link(tp)) {
13779 		etest->flags |= ETH_TEST_FL_FAILED;
13780 		data[TG3_LINK_TEST] = 1;
13781 	}
13782 	if (etest->flags & ETH_TEST_FL_OFFLINE) {
13783 		int err, err2 = 0, irq_sync = 0;
13784 
13785 		if (netif_running(dev)) {
13786 			tg3_phy_stop(tp);
13787 			tg3_netif_stop(tp);
13788 			irq_sync = 1;
13789 		}
13790 
13791 		tg3_full_lock(tp, irq_sync);
13792 		tg3_halt(tp, RESET_KIND_SUSPEND, 1);
13793 		err = tg3_nvram_lock(tp);
13794 		tg3_halt_cpu(tp, RX_CPU_BASE);
13795 		if (!tg3_flag(tp, 5705_PLUS))
13796 			tg3_halt_cpu(tp, TX_CPU_BASE);
13797 		if (!err)
13798 			tg3_nvram_unlock(tp);
13799 
13800 		if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
13801 			tg3_phy_reset(tp);
13802 
13803 		if (tg3_test_registers(tp) != 0) {
13804 			etest->flags |= ETH_TEST_FL_FAILED;
13805 			data[TG3_REGISTER_TEST] = 1;
13806 		}
13807 
13808 		if (tg3_test_memory(tp) != 0) {
13809 			etest->flags |= ETH_TEST_FL_FAILED;
13810 			data[TG3_MEMORY_TEST] = 1;
13811 		}
13812 
13813 		if (doextlpbk)
13814 			etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE;
13815 
13816 		if (tg3_test_loopback(tp, data, doextlpbk))
13817 			etest->flags |= ETH_TEST_FL_FAILED;
13818 
13819 		tg3_full_unlock(tp);
13820 
13821 		if (tg3_test_interrupt(tp) != 0) {
13822 			etest->flags |= ETH_TEST_FL_FAILED;
13823 			data[TG3_INTERRUPT_TEST] = 1;
13824 		}
13825 
13826 		tg3_full_lock(tp, 0);
13827 
13828 		tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
13829 		if (netif_running(dev)) {
13830 			tg3_flag_set(tp, INIT_COMPLETE);
13831 			err2 = tg3_restart_hw(tp, true);
13832 			if (!err2)
13833 				tg3_netif_start(tp);
13834 		}
13835 
13836 		tg3_full_unlock(tp);
13837 
13838 		if (irq_sync && !err2)
13839 			tg3_phy_start(tp);
13840 	}
13841 	if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
13842 		tg3_power_down_prepare(tp);
13843 
13844 }
13845 
13846 static int tg3_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
13847 {
13848 	struct tg3 *tp = netdev_priv(dev);
13849 	struct hwtstamp_config stmpconf;
13850 
13851 	if (!tg3_flag(tp, PTP_CAPABLE))
13852 		return -EOPNOTSUPP;
13853 
13854 	if (copy_from_user(&stmpconf, ifr->ifr_data, sizeof(stmpconf)))
13855 		return -EFAULT;
13856 
13857 	if (stmpconf.flags)
13858 		return -EINVAL;
13859 
13860 	if (stmpconf.tx_type != HWTSTAMP_TX_ON &&
13861 	    stmpconf.tx_type != HWTSTAMP_TX_OFF)
13862 		return -ERANGE;
13863 
13864 	switch (stmpconf.rx_filter) {
13865 	case HWTSTAMP_FILTER_NONE:
13866 		tp->rxptpctl = 0;
13867 		break;
13868 	case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
13869 		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13870 			       TG3_RX_PTP_CTL_ALL_V1_EVENTS;
13871 		break;
13872 	case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
13873 		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13874 			       TG3_RX_PTP_CTL_SYNC_EVNT;
13875 		break;
13876 	case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
13877 		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13878 			       TG3_RX_PTP_CTL_DELAY_REQ;
13879 		break;
13880 	case HWTSTAMP_FILTER_PTP_V2_EVENT:
13881 		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13882 			       TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13883 		break;
13884 	case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
13885 		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13886 			       TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13887 		break;
13888 	case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
13889 		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13890 			       TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13891 		break;
13892 	case HWTSTAMP_FILTER_PTP_V2_SYNC:
13893 		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13894 			       TG3_RX_PTP_CTL_SYNC_EVNT;
13895 		break;
13896 	case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
13897 		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13898 			       TG3_RX_PTP_CTL_SYNC_EVNT;
13899 		break;
13900 	case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
13901 		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13902 			       TG3_RX_PTP_CTL_SYNC_EVNT;
13903 		break;
13904 	case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
13905 		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13906 			       TG3_RX_PTP_CTL_DELAY_REQ;
13907 		break;
13908 	case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
13909 		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13910 			       TG3_RX_PTP_CTL_DELAY_REQ;
13911 		break;
13912 	case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
13913 		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13914 			       TG3_RX_PTP_CTL_DELAY_REQ;
13915 		break;
13916 	default:
13917 		return -ERANGE;
13918 	}
13919 
13920 	if (netif_running(dev) && tp->rxptpctl)
13921 		tw32(TG3_RX_PTP_CTL,
13922 		     tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK);
13923 
13924 	if (stmpconf.tx_type == HWTSTAMP_TX_ON)
13925 		tg3_flag_set(tp, TX_TSTAMP_EN);
13926 	else
13927 		tg3_flag_clear(tp, TX_TSTAMP_EN);
13928 
13929 	return copy_to_user(ifr->ifr_data, &stmpconf, sizeof(stmpconf)) ?
13930 		-EFAULT : 0;
13931 }
13932 
13933 static int tg3_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
13934 {
13935 	struct tg3 *tp = netdev_priv(dev);
13936 	struct hwtstamp_config stmpconf;
13937 
13938 	if (!tg3_flag(tp, PTP_CAPABLE))
13939 		return -EOPNOTSUPP;
13940 
13941 	stmpconf.flags = 0;
13942 	stmpconf.tx_type = (tg3_flag(tp, TX_TSTAMP_EN) ?
13943 			    HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF);
13944 
13945 	switch (tp->rxptpctl) {
13946 	case 0:
13947 		stmpconf.rx_filter = HWTSTAMP_FILTER_NONE;
13948 		break;
13949 	case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_ALL_V1_EVENTS:
13950 		stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
13951 		break;
13952 	case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
13953 		stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
13954 		break;
13955 	case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_DELAY_REQ:
13956 		stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
13957 		break;
13958 	case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS:
13959 		stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
13960 		break;
13961 	case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS:
13962 		stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
13963 		break;
13964 	case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS:
13965 		stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
13966 		break;
13967 	case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
13968 		stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
13969 		break;
13970 	case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
13971 		stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_SYNC;
13972 		break;
13973 	case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
13974 		stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
13975 		break;
13976 	case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_DELAY_REQ:
13977 		stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
13978 		break;
13979 	case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_DELAY_REQ:
13980 		stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ;
13981 		break;
13982 	case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_DELAY_REQ:
13983 		stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
13984 		break;
13985 	default:
13986 		WARN_ON_ONCE(1);
13987 		return -ERANGE;
13988 	}
13989 
13990 	return copy_to_user(ifr->ifr_data, &stmpconf, sizeof(stmpconf)) ?
13991 		-EFAULT : 0;
13992 }
13993 
13994 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
13995 {
13996 	struct mii_ioctl_data *data = if_mii(ifr);
13997 	struct tg3 *tp = netdev_priv(dev);
13998 	int err;
13999 
14000 	if (tg3_flag(tp, USE_PHYLIB)) {
14001 		struct phy_device *phydev;
14002 		if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
14003 			return -EAGAIN;
14004 		phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
14005 		return phy_mii_ioctl(phydev, ifr, cmd);
14006 	}
14007 
14008 	switch (cmd) {
14009 	case SIOCGMIIPHY:
14010 		data->phy_id = tp->phy_addr;
14011 
14012 		/* fall through */
14013 	case SIOCGMIIREG: {
14014 		u32 mii_regval;
14015 
14016 		if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
14017 			break;			/* We have no PHY */
14018 
14019 		if (!netif_running(dev))
14020 			return -EAGAIN;
14021 
14022 		spin_lock_bh(&tp->lock);
14023 		err = __tg3_readphy(tp, data->phy_id & 0x1f,
14024 				    data->reg_num & 0x1f, &mii_regval);
14025 		spin_unlock_bh(&tp->lock);
14026 
14027 		data->val_out = mii_regval;
14028 
14029 		return err;
14030 	}
14031 
14032 	case SIOCSMIIREG:
14033 		if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
14034 			break;			/* We have no PHY */
14035 
14036 		if (!netif_running(dev))
14037 			return -EAGAIN;
14038 
14039 		spin_lock_bh(&tp->lock);
14040 		err = __tg3_writephy(tp, data->phy_id & 0x1f,
14041 				     data->reg_num & 0x1f, data->val_in);
14042 		spin_unlock_bh(&tp->lock);
14043 
14044 		return err;
14045 
14046 	case SIOCSHWTSTAMP:
14047 		return tg3_hwtstamp_set(dev, ifr);
14048 
14049 	case SIOCGHWTSTAMP:
14050 		return tg3_hwtstamp_get(dev, ifr);
14051 
14052 	default:
14053 		/* do nothing */
14054 		break;
14055 	}
14056 	return -EOPNOTSUPP;
14057 }
14058 
14059 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
14060 {
14061 	struct tg3 *tp = netdev_priv(dev);
14062 
14063 	memcpy(ec, &tp->coal, sizeof(*ec));
14064 	return 0;
14065 }
14066 
14067 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
14068 {
14069 	struct tg3 *tp = netdev_priv(dev);
14070 	u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
14071 	u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
14072 
14073 	if (!tg3_flag(tp, 5705_PLUS)) {
14074 		max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
14075 		max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
14076 		max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
14077 		min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
14078 	}
14079 
14080 	if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
14081 	    (!ec->rx_coalesce_usecs) ||
14082 	    (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
14083 	    (!ec->tx_coalesce_usecs) ||
14084 	    (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
14085 	    (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
14086 	    (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
14087 	    (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
14088 	    (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
14089 	    (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
14090 	    (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
14091 	    (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
14092 		return -EINVAL;
14093 
14094 	/* Only copy relevant parameters, ignore all others. */
14095 	tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
14096 	tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
14097 	tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
14098 	tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
14099 	tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
14100 	tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
14101 	tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
14102 	tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
14103 	tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
14104 
14105 	if (netif_running(dev)) {
14106 		tg3_full_lock(tp, 0);
14107 		__tg3_set_coalesce(tp, &tp->coal);
14108 		tg3_full_unlock(tp);
14109 	}
14110 	return 0;
14111 }
14112 
14113 static int tg3_set_eee(struct net_device *dev, struct ethtool_eee *edata)
14114 {
14115 	struct tg3 *tp = netdev_priv(dev);
14116 
14117 	if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
14118 		netdev_warn(tp->dev, "Board does not support EEE!\n");
14119 		return -EOPNOTSUPP;
14120 	}
14121 
14122 	if (edata->advertised != tp->eee.advertised) {
14123 		netdev_warn(tp->dev,
14124 			    "Direct manipulation of EEE advertisement is not supported\n");
14125 		return -EINVAL;
14126 	}
14127 
14128 	if (edata->tx_lpi_timer > TG3_CPMU_DBTMR1_LNKIDLE_MAX) {
14129 		netdev_warn(tp->dev,
14130 			    "Maximal Tx Lpi timer supported is %#x(u)\n",
14131 			    TG3_CPMU_DBTMR1_LNKIDLE_MAX);
14132 		return -EINVAL;
14133 	}
14134 
14135 	tp->eee = *edata;
14136 
14137 	tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
14138 	tg3_warn_mgmt_link_flap(tp);
14139 
14140 	if (netif_running(tp->dev)) {
14141 		tg3_full_lock(tp, 0);
14142 		tg3_setup_eee(tp);
14143 		tg3_phy_reset(tp);
14144 		tg3_full_unlock(tp);
14145 	}
14146 
14147 	return 0;
14148 }
14149 
14150 static int tg3_get_eee(struct net_device *dev, struct ethtool_eee *edata)
14151 {
14152 	struct tg3 *tp = netdev_priv(dev);
14153 
14154 	if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
14155 		netdev_warn(tp->dev,
14156 			    "Board does not support EEE!\n");
14157 		return -EOPNOTSUPP;
14158 	}
14159 
14160 	*edata = tp->eee;
14161 	return 0;
14162 }
14163 
14164 static const struct ethtool_ops tg3_ethtool_ops = {
14165 	.get_drvinfo		= tg3_get_drvinfo,
14166 	.get_regs_len		= tg3_get_regs_len,
14167 	.get_regs		= tg3_get_regs,
14168 	.get_wol		= tg3_get_wol,
14169 	.set_wol		= tg3_set_wol,
14170 	.get_msglevel		= tg3_get_msglevel,
14171 	.set_msglevel		= tg3_set_msglevel,
14172 	.nway_reset		= tg3_nway_reset,
14173 	.get_link		= ethtool_op_get_link,
14174 	.get_eeprom_len		= tg3_get_eeprom_len,
14175 	.get_eeprom		= tg3_get_eeprom,
14176 	.set_eeprom		= tg3_set_eeprom,
14177 	.get_ringparam		= tg3_get_ringparam,
14178 	.set_ringparam		= tg3_set_ringparam,
14179 	.get_pauseparam		= tg3_get_pauseparam,
14180 	.set_pauseparam		= tg3_set_pauseparam,
14181 	.self_test		= tg3_self_test,
14182 	.get_strings		= tg3_get_strings,
14183 	.set_phys_id		= tg3_set_phys_id,
14184 	.get_ethtool_stats	= tg3_get_ethtool_stats,
14185 	.get_coalesce		= tg3_get_coalesce,
14186 	.set_coalesce		= tg3_set_coalesce,
14187 	.get_sset_count		= tg3_get_sset_count,
14188 	.get_rxnfc		= tg3_get_rxnfc,
14189 	.get_rxfh_indir_size    = tg3_get_rxfh_indir_size,
14190 	.get_rxfh		= tg3_get_rxfh,
14191 	.set_rxfh		= tg3_set_rxfh,
14192 	.get_channels		= tg3_get_channels,
14193 	.set_channels		= tg3_set_channels,
14194 	.get_ts_info		= tg3_get_ts_info,
14195 	.get_eee		= tg3_get_eee,
14196 	.set_eee		= tg3_set_eee,
14197 	.get_link_ksettings	= tg3_get_link_ksettings,
14198 	.set_link_ksettings	= tg3_set_link_ksettings,
14199 };
14200 
14201 static void tg3_get_stats64(struct net_device *dev,
14202 			    struct rtnl_link_stats64 *stats)
14203 {
14204 	struct tg3 *tp = netdev_priv(dev);
14205 
14206 	spin_lock_bh(&tp->lock);
14207 	if (!tp->hw_stats || !tg3_flag(tp, INIT_COMPLETE)) {
14208 		*stats = tp->net_stats_prev;
14209 		spin_unlock_bh(&tp->lock);
14210 		return;
14211 	}
14212 
14213 	tg3_get_nstats(tp, stats);
14214 	spin_unlock_bh(&tp->lock);
14215 }
14216 
14217 static void tg3_set_rx_mode(struct net_device *dev)
14218 {
14219 	struct tg3 *tp = netdev_priv(dev);
14220 
14221 	if (!netif_running(dev))
14222 		return;
14223 
14224 	tg3_full_lock(tp, 0);
14225 	__tg3_set_rx_mode(dev);
14226 	tg3_full_unlock(tp);
14227 }
14228 
14229 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
14230 			       int new_mtu)
14231 {
14232 	dev->mtu = new_mtu;
14233 
14234 	if (new_mtu > ETH_DATA_LEN) {
14235 		if (tg3_flag(tp, 5780_CLASS)) {
14236 			netdev_update_features(dev);
14237 			tg3_flag_clear(tp, TSO_CAPABLE);
14238 		} else {
14239 			tg3_flag_set(tp, JUMBO_RING_ENABLE);
14240 		}
14241 	} else {
14242 		if (tg3_flag(tp, 5780_CLASS)) {
14243 			tg3_flag_set(tp, TSO_CAPABLE);
14244 			netdev_update_features(dev);
14245 		}
14246 		tg3_flag_clear(tp, JUMBO_RING_ENABLE);
14247 	}
14248 }
14249 
14250 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
14251 {
14252 	struct tg3 *tp = netdev_priv(dev);
14253 	int err;
14254 	bool reset_phy = false;
14255 
14256 	if (!netif_running(dev)) {
14257 		/* We'll just catch it later when the
14258 		 * device is up'd.
14259 		 */
14260 		tg3_set_mtu(dev, tp, new_mtu);
14261 		return 0;
14262 	}
14263 
14264 	tg3_phy_stop(tp);
14265 
14266 	tg3_netif_stop(tp);
14267 
14268 	tg3_set_mtu(dev, tp, new_mtu);
14269 
14270 	tg3_full_lock(tp, 1);
14271 
14272 	tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
14273 
14274 	/* Reset PHY, otherwise the read DMA engine will be in a mode that
14275 	 * breaks all requests to 256 bytes.
14276 	 */
14277 	if (tg3_asic_rev(tp) == ASIC_REV_57766 ||
14278 	    tg3_asic_rev(tp) == ASIC_REV_5717 ||
14279 	    tg3_asic_rev(tp) == ASIC_REV_5719 ||
14280 	    tg3_asic_rev(tp) == ASIC_REV_5720)
14281 		reset_phy = true;
14282 
14283 	err = tg3_restart_hw(tp, reset_phy);
14284 
14285 	if (!err)
14286 		tg3_netif_start(tp);
14287 
14288 	tg3_full_unlock(tp);
14289 
14290 	if (!err)
14291 		tg3_phy_start(tp);
14292 
14293 	return err;
14294 }
14295 
14296 static const struct net_device_ops tg3_netdev_ops = {
14297 	.ndo_open		= tg3_open,
14298 	.ndo_stop		= tg3_close,
14299 	.ndo_start_xmit		= tg3_start_xmit,
14300 	.ndo_get_stats64	= tg3_get_stats64,
14301 	.ndo_validate_addr	= eth_validate_addr,
14302 	.ndo_set_rx_mode	= tg3_set_rx_mode,
14303 	.ndo_set_mac_address	= tg3_set_mac_addr,
14304 	.ndo_do_ioctl		= tg3_ioctl,
14305 	.ndo_tx_timeout		= tg3_tx_timeout,
14306 	.ndo_change_mtu		= tg3_change_mtu,
14307 	.ndo_fix_features	= tg3_fix_features,
14308 	.ndo_set_features	= tg3_set_features,
14309 #ifdef CONFIG_NET_POLL_CONTROLLER
14310 	.ndo_poll_controller	= tg3_poll_controller,
14311 #endif
14312 };
14313 
14314 static void tg3_get_eeprom_size(struct tg3 *tp)
14315 {
14316 	u32 cursize, val, magic;
14317 
14318 	tp->nvram_size = EEPROM_CHIP_SIZE;
14319 
14320 	if (tg3_nvram_read(tp, 0, &magic) != 0)
14321 		return;
14322 
14323 	if ((magic != TG3_EEPROM_MAGIC) &&
14324 	    ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
14325 	    ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
14326 		return;
14327 
14328 	/*
14329 	 * Size the chip by reading offsets at increasing powers of two.
14330 	 * When we encounter our validation signature, we know the addressing
14331 	 * has wrapped around, and thus have our chip size.
14332 	 */
14333 	cursize = 0x10;
14334 
14335 	while (cursize < tp->nvram_size) {
14336 		if (tg3_nvram_read(tp, cursize, &val) != 0)
14337 			return;
14338 
14339 		if (val == magic)
14340 			break;
14341 
14342 		cursize <<= 1;
14343 	}
14344 
14345 	tp->nvram_size = cursize;
14346 }
14347 
14348 static void tg3_get_nvram_size(struct tg3 *tp)
14349 {
14350 	u32 val;
14351 
14352 	if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &val) != 0)
14353 		return;
14354 
14355 	/* Selfboot format */
14356 	if (val != TG3_EEPROM_MAGIC) {
14357 		tg3_get_eeprom_size(tp);
14358 		return;
14359 	}
14360 
14361 	if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
14362 		if (val != 0) {
14363 			/* This is confusing.  We want to operate on the
14364 			 * 16-bit value at offset 0xf2.  The tg3_nvram_read()
14365 			 * call will read from NVRAM and byteswap the data
14366 			 * according to the byteswapping settings for all
14367 			 * other register accesses.  This ensures the data we
14368 			 * want will always reside in the lower 16-bits.
14369 			 * However, the data in NVRAM is in LE format, which
14370 			 * means the data from the NVRAM read will always be
14371 			 * opposite the endianness of the CPU.  The 16-bit
14372 			 * byteswap then brings the data to CPU endianness.
14373 			 */
14374 			tp->nvram_size = swab16((u16)(val & 0x0000ffff)) * 1024;
14375 			return;
14376 		}
14377 	}
14378 	tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14379 }
14380 
14381 static void tg3_get_nvram_info(struct tg3 *tp)
14382 {
14383 	u32 nvcfg1;
14384 
14385 	nvcfg1 = tr32(NVRAM_CFG1);
14386 	if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
14387 		tg3_flag_set(tp, FLASH);
14388 	} else {
14389 		nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14390 		tw32(NVRAM_CFG1, nvcfg1);
14391 	}
14392 
14393 	if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
14394 	    tg3_flag(tp, 5780_CLASS)) {
14395 		switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
14396 		case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
14397 			tp->nvram_jedecnum = JEDEC_ATMEL;
14398 			tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
14399 			tg3_flag_set(tp, NVRAM_BUFFERED);
14400 			break;
14401 		case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
14402 			tp->nvram_jedecnum = JEDEC_ATMEL;
14403 			tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
14404 			break;
14405 		case FLASH_VENDOR_ATMEL_EEPROM:
14406 			tp->nvram_jedecnum = JEDEC_ATMEL;
14407 			tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14408 			tg3_flag_set(tp, NVRAM_BUFFERED);
14409 			break;
14410 		case FLASH_VENDOR_ST:
14411 			tp->nvram_jedecnum = JEDEC_ST;
14412 			tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
14413 			tg3_flag_set(tp, NVRAM_BUFFERED);
14414 			break;
14415 		case FLASH_VENDOR_SAIFUN:
14416 			tp->nvram_jedecnum = JEDEC_SAIFUN;
14417 			tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
14418 			break;
14419 		case FLASH_VENDOR_SST_SMALL:
14420 		case FLASH_VENDOR_SST_LARGE:
14421 			tp->nvram_jedecnum = JEDEC_SST;
14422 			tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
14423 			break;
14424 		}
14425 	} else {
14426 		tp->nvram_jedecnum = JEDEC_ATMEL;
14427 		tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
14428 		tg3_flag_set(tp, NVRAM_BUFFERED);
14429 	}
14430 }
14431 
14432 static void tg3_nvram_get_pagesize(struct tg3 *tp, u32 nvmcfg1)
14433 {
14434 	switch (nvmcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
14435 	case FLASH_5752PAGE_SIZE_256:
14436 		tp->nvram_pagesize = 256;
14437 		break;
14438 	case FLASH_5752PAGE_SIZE_512:
14439 		tp->nvram_pagesize = 512;
14440 		break;
14441 	case FLASH_5752PAGE_SIZE_1K:
14442 		tp->nvram_pagesize = 1024;
14443 		break;
14444 	case FLASH_5752PAGE_SIZE_2K:
14445 		tp->nvram_pagesize = 2048;
14446 		break;
14447 	case FLASH_5752PAGE_SIZE_4K:
14448 		tp->nvram_pagesize = 4096;
14449 		break;
14450 	case FLASH_5752PAGE_SIZE_264:
14451 		tp->nvram_pagesize = 264;
14452 		break;
14453 	case FLASH_5752PAGE_SIZE_528:
14454 		tp->nvram_pagesize = 528;
14455 		break;
14456 	}
14457 }
14458 
14459 static void tg3_get_5752_nvram_info(struct tg3 *tp)
14460 {
14461 	u32 nvcfg1;
14462 
14463 	nvcfg1 = tr32(NVRAM_CFG1);
14464 
14465 	/* NVRAM protection for TPM */
14466 	if (nvcfg1 & (1 << 27))
14467 		tg3_flag_set(tp, PROTECTED_NVRAM);
14468 
14469 	switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14470 	case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
14471 	case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
14472 		tp->nvram_jedecnum = JEDEC_ATMEL;
14473 		tg3_flag_set(tp, NVRAM_BUFFERED);
14474 		break;
14475 	case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14476 		tp->nvram_jedecnum = JEDEC_ATMEL;
14477 		tg3_flag_set(tp, NVRAM_BUFFERED);
14478 		tg3_flag_set(tp, FLASH);
14479 		break;
14480 	case FLASH_5752VENDOR_ST_M45PE10:
14481 	case FLASH_5752VENDOR_ST_M45PE20:
14482 	case FLASH_5752VENDOR_ST_M45PE40:
14483 		tp->nvram_jedecnum = JEDEC_ST;
14484 		tg3_flag_set(tp, NVRAM_BUFFERED);
14485 		tg3_flag_set(tp, FLASH);
14486 		break;
14487 	}
14488 
14489 	if (tg3_flag(tp, FLASH)) {
14490 		tg3_nvram_get_pagesize(tp, nvcfg1);
14491 	} else {
14492 		/* For eeprom, set pagesize to maximum eeprom size */
14493 		tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14494 
14495 		nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14496 		tw32(NVRAM_CFG1, nvcfg1);
14497 	}
14498 }
14499 
14500 static void tg3_get_5755_nvram_info(struct tg3 *tp)
14501 {
14502 	u32 nvcfg1, protect = 0;
14503 
14504 	nvcfg1 = tr32(NVRAM_CFG1);
14505 
14506 	/* NVRAM protection for TPM */
14507 	if (nvcfg1 & (1 << 27)) {
14508 		tg3_flag_set(tp, PROTECTED_NVRAM);
14509 		protect = 1;
14510 	}
14511 
14512 	nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
14513 	switch (nvcfg1) {
14514 	case FLASH_5755VENDOR_ATMEL_FLASH_1:
14515 	case FLASH_5755VENDOR_ATMEL_FLASH_2:
14516 	case FLASH_5755VENDOR_ATMEL_FLASH_3:
14517 	case FLASH_5755VENDOR_ATMEL_FLASH_5:
14518 		tp->nvram_jedecnum = JEDEC_ATMEL;
14519 		tg3_flag_set(tp, NVRAM_BUFFERED);
14520 		tg3_flag_set(tp, FLASH);
14521 		tp->nvram_pagesize = 264;
14522 		if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
14523 		    nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
14524 			tp->nvram_size = (protect ? 0x3e200 :
14525 					  TG3_NVRAM_SIZE_512KB);
14526 		else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
14527 			tp->nvram_size = (protect ? 0x1f200 :
14528 					  TG3_NVRAM_SIZE_256KB);
14529 		else
14530 			tp->nvram_size = (protect ? 0x1f200 :
14531 					  TG3_NVRAM_SIZE_128KB);
14532 		break;
14533 	case FLASH_5752VENDOR_ST_M45PE10:
14534 	case FLASH_5752VENDOR_ST_M45PE20:
14535 	case FLASH_5752VENDOR_ST_M45PE40:
14536 		tp->nvram_jedecnum = JEDEC_ST;
14537 		tg3_flag_set(tp, NVRAM_BUFFERED);
14538 		tg3_flag_set(tp, FLASH);
14539 		tp->nvram_pagesize = 256;
14540 		if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
14541 			tp->nvram_size = (protect ?
14542 					  TG3_NVRAM_SIZE_64KB :
14543 					  TG3_NVRAM_SIZE_128KB);
14544 		else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
14545 			tp->nvram_size = (protect ?
14546 					  TG3_NVRAM_SIZE_64KB :
14547 					  TG3_NVRAM_SIZE_256KB);
14548 		else
14549 			tp->nvram_size = (protect ?
14550 					  TG3_NVRAM_SIZE_128KB :
14551 					  TG3_NVRAM_SIZE_512KB);
14552 		break;
14553 	}
14554 }
14555 
14556 static void tg3_get_5787_nvram_info(struct tg3 *tp)
14557 {
14558 	u32 nvcfg1;
14559 
14560 	nvcfg1 = tr32(NVRAM_CFG1);
14561 
14562 	switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14563 	case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
14564 	case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
14565 	case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
14566 	case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
14567 		tp->nvram_jedecnum = JEDEC_ATMEL;
14568 		tg3_flag_set(tp, NVRAM_BUFFERED);
14569 		tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14570 
14571 		nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14572 		tw32(NVRAM_CFG1, nvcfg1);
14573 		break;
14574 	case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14575 	case FLASH_5755VENDOR_ATMEL_FLASH_1:
14576 	case FLASH_5755VENDOR_ATMEL_FLASH_2:
14577 	case FLASH_5755VENDOR_ATMEL_FLASH_3:
14578 		tp->nvram_jedecnum = JEDEC_ATMEL;
14579 		tg3_flag_set(tp, NVRAM_BUFFERED);
14580 		tg3_flag_set(tp, FLASH);
14581 		tp->nvram_pagesize = 264;
14582 		break;
14583 	case FLASH_5752VENDOR_ST_M45PE10:
14584 	case FLASH_5752VENDOR_ST_M45PE20:
14585 	case FLASH_5752VENDOR_ST_M45PE40:
14586 		tp->nvram_jedecnum = JEDEC_ST;
14587 		tg3_flag_set(tp, NVRAM_BUFFERED);
14588 		tg3_flag_set(tp, FLASH);
14589 		tp->nvram_pagesize = 256;
14590 		break;
14591 	}
14592 }
14593 
14594 static void tg3_get_5761_nvram_info(struct tg3 *tp)
14595 {
14596 	u32 nvcfg1, protect = 0;
14597 
14598 	nvcfg1 = tr32(NVRAM_CFG1);
14599 
14600 	/* NVRAM protection for TPM */
14601 	if (nvcfg1 & (1 << 27)) {
14602 		tg3_flag_set(tp, PROTECTED_NVRAM);
14603 		protect = 1;
14604 	}
14605 
14606 	nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
14607 	switch (nvcfg1) {
14608 	case FLASH_5761VENDOR_ATMEL_ADB021D:
14609 	case FLASH_5761VENDOR_ATMEL_ADB041D:
14610 	case FLASH_5761VENDOR_ATMEL_ADB081D:
14611 	case FLASH_5761VENDOR_ATMEL_ADB161D:
14612 	case FLASH_5761VENDOR_ATMEL_MDB021D:
14613 	case FLASH_5761VENDOR_ATMEL_MDB041D:
14614 	case FLASH_5761VENDOR_ATMEL_MDB081D:
14615 	case FLASH_5761VENDOR_ATMEL_MDB161D:
14616 		tp->nvram_jedecnum = JEDEC_ATMEL;
14617 		tg3_flag_set(tp, NVRAM_BUFFERED);
14618 		tg3_flag_set(tp, FLASH);
14619 		tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14620 		tp->nvram_pagesize = 256;
14621 		break;
14622 	case FLASH_5761VENDOR_ST_A_M45PE20:
14623 	case FLASH_5761VENDOR_ST_A_M45PE40:
14624 	case FLASH_5761VENDOR_ST_A_M45PE80:
14625 	case FLASH_5761VENDOR_ST_A_M45PE16:
14626 	case FLASH_5761VENDOR_ST_M_M45PE20:
14627 	case FLASH_5761VENDOR_ST_M_M45PE40:
14628 	case FLASH_5761VENDOR_ST_M_M45PE80:
14629 	case FLASH_5761VENDOR_ST_M_M45PE16:
14630 		tp->nvram_jedecnum = JEDEC_ST;
14631 		tg3_flag_set(tp, NVRAM_BUFFERED);
14632 		tg3_flag_set(tp, FLASH);
14633 		tp->nvram_pagesize = 256;
14634 		break;
14635 	}
14636 
14637 	if (protect) {
14638 		tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
14639 	} else {
14640 		switch (nvcfg1) {
14641 		case FLASH_5761VENDOR_ATMEL_ADB161D:
14642 		case FLASH_5761VENDOR_ATMEL_MDB161D:
14643 		case FLASH_5761VENDOR_ST_A_M45PE16:
14644 		case FLASH_5761VENDOR_ST_M_M45PE16:
14645 			tp->nvram_size = TG3_NVRAM_SIZE_2MB;
14646 			break;
14647 		case FLASH_5761VENDOR_ATMEL_ADB081D:
14648 		case FLASH_5761VENDOR_ATMEL_MDB081D:
14649 		case FLASH_5761VENDOR_ST_A_M45PE80:
14650 		case FLASH_5761VENDOR_ST_M_M45PE80:
14651 			tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14652 			break;
14653 		case FLASH_5761VENDOR_ATMEL_ADB041D:
14654 		case FLASH_5761VENDOR_ATMEL_MDB041D:
14655 		case FLASH_5761VENDOR_ST_A_M45PE40:
14656 		case FLASH_5761VENDOR_ST_M_M45PE40:
14657 			tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14658 			break;
14659 		case FLASH_5761VENDOR_ATMEL_ADB021D:
14660 		case FLASH_5761VENDOR_ATMEL_MDB021D:
14661 		case FLASH_5761VENDOR_ST_A_M45PE20:
14662 		case FLASH_5761VENDOR_ST_M_M45PE20:
14663 			tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14664 			break;
14665 		}
14666 	}
14667 }
14668 
14669 static void tg3_get_5906_nvram_info(struct tg3 *tp)
14670 {
14671 	tp->nvram_jedecnum = JEDEC_ATMEL;
14672 	tg3_flag_set(tp, NVRAM_BUFFERED);
14673 	tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14674 }
14675 
14676 static void tg3_get_57780_nvram_info(struct tg3 *tp)
14677 {
14678 	u32 nvcfg1;
14679 
14680 	nvcfg1 = tr32(NVRAM_CFG1);
14681 
14682 	switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14683 	case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
14684 	case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
14685 		tp->nvram_jedecnum = JEDEC_ATMEL;
14686 		tg3_flag_set(tp, NVRAM_BUFFERED);
14687 		tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14688 
14689 		nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14690 		tw32(NVRAM_CFG1, nvcfg1);
14691 		return;
14692 	case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14693 	case FLASH_57780VENDOR_ATMEL_AT45DB011D:
14694 	case FLASH_57780VENDOR_ATMEL_AT45DB011B:
14695 	case FLASH_57780VENDOR_ATMEL_AT45DB021D:
14696 	case FLASH_57780VENDOR_ATMEL_AT45DB021B:
14697 	case FLASH_57780VENDOR_ATMEL_AT45DB041D:
14698 	case FLASH_57780VENDOR_ATMEL_AT45DB041B:
14699 		tp->nvram_jedecnum = JEDEC_ATMEL;
14700 		tg3_flag_set(tp, NVRAM_BUFFERED);
14701 		tg3_flag_set(tp, FLASH);
14702 
14703 		switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14704 		case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14705 		case FLASH_57780VENDOR_ATMEL_AT45DB011D:
14706 		case FLASH_57780VENDOR_ATMEL_AT45DB011B:
14707 			tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14708 			break;
14709 		case FLASH_57780VENDOR_ATMEL_AT45DB021D:
14710 		case FLASH_57780VENDOR_ATMEL_AT45DB021B:
14711 			tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14712 			break;
14713 		case FLASH_57780VENDOR_ATMEL_AT45DB041D:
14714 		case FLASH_57780VENDOR_ATMEL_AT45DB041B:
14715 			tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14716 			break;
14717 		}
14718 		break;
14719 	case FLASH_5752VENDOR_ST_M45PE10:
14720 	case FLASH_5752VENDOR_ST_M45PE20:
14721 	case FLASH_5752VENDOR_ST_M45PE40:
14722 		tp->nvram_jedecnum = JEDEC_ST;
14723 		tg3_flag_set(tp, NVRAM_BUFFERED);
14724 		tg3_flag_set(tp, FLASH);
14725 
14726 		switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14727 		case FLASH_5752VENDOR_ST_M45PE10:
14728 			tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14729 			break;
14730 		case FLASH_5752VENDOR_ST_M45PE20:
14731 			tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14732 			break;
14733 		case FLASH_5752VENDOR_ST_M45PE40:
14734 			tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14735 			break;
14736 		}
14737 		break;
14738 	default:
14739 		tg3_flag_set(tp, NO_NVRAM);
14740 		return;
14741 	}
14742 
14743 	tg3_nvram_get_pagesize(tp, nvcfg1);
14744 	if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14745 		tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14746 }
14747 
14748 
14749 static void tg3_get_5717_nvram_info(struct tg3 *tp)
14750 {
14751 	u32 nvcfg1;
14752 
14753 	nvcfg1 = tr32(NVRAM_CFG1);
14754 
14755 	switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14756 	case FLASH_5717VENDOR_ATMEL_EEPROM:
14757 	case FLASH_5717VENDOR_MICRO_EEPROM:
14758 		tp->nvram_jedecnum = JEDEC_ATMEL;
14759 		tg3_flag_set(tp, NVRAM_BUFFERED);
14760 		tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14761 
14762 		nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14763 		tw32(NVRAM_CFG1, nvcfg1);
14764 		return;
14765 	case FLASH_5717VENDOR_ATMEL_MDB011D:
14766 	case FLASH_5717VENDOR_ATMEL_ADB011B:
14767 	case FLASH_5717VENDOR_ATMEL_ADB011D:
14768 	case FLASH_5717VENDOR_ATMEL_MDB021D:
14769 	case FLASH_5717VENDOR_ATMEL_ADB021B:
14770 	case FLASH_5717VENDOR_ATMEL_ADB021D:
14771 	case FLASH_5717VENDOR_ATMEL_45USPT:
14772 		tp->nvram_jedecnum = JEDEC_ATMEL;
14773 		tg3_flag_set(tp, NVRAM_BUFFERED);
14774 		tg3_flag_set(tp, FLASH);
14775 
14776 		switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14777 		case FLASH_5717VENDOR_ATMEL_MDB021D:
14778 			/* Detect size with tg3_nvram_get_size() */
14779 			break;
14780 		case FLASH_5717VENDOR_ATMEL_ADB021B:
14781 		case FLASH_5717VENDOR_ATMEL_ADB021D:
14782 			tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14783 			break;
14784 		default:
14785 			tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14786 			break;
14787 		}
14788 		break;
14789 	case FLASH_5717VENDOR_ST_M_M25PE10:
14790 	case FLASH_5717VENDOR_ST_A_M25PE10:
14791 	case FLASH_5717VENDOR_ST_M_M45PE10:
14792 	case FLASH_5717VENDOR_ST_A_M45PE10:
14793 	case FLASH_5717VENDOR_ST_M_M25PE20:
14794 	case FLASH_5717VENDOR_ST_A_M25PE20:
14795 	case FLASH_5717VENDOR_ST_M_M45PE20:
14796 	case FLASH_5717VENDOR_ST_A_M45PE20:
14797 	case FLASH_5717VENDOR_ST_25USPT:
14798 	case FLASH_5717VENDOR_ST_45USPT:
14799 		tp->nvram_jedecnum = JEDEC_ST;
14800 		tg3_flag_set(tp, NVRAM_BUFFERED);
14801 		tg3_flag_set(tp, FLASH);
14802 
14803 		switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14804 		case FLASH_5717VENDOR_ST_M_M25PE20:
14805 		case FLASH_5717VENDOR_ST_M_M45PE20:
14806 			/* Detect size with tg3_nvram_get_size() */
14807 			break;
14808 		case FLASH_5717VENDOR_ST_A_M25PE20:
14809 		case FLASH_5717VENDOR_ST_A_M45PE20:
14810 			tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14811 			break;
14812 		default:
14813 			tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14814 			break;
14815 		}
14816 		break;
14817 	default:
14818 		tg3_flag_set(tp, NO_NVRAM);
14819 		return;
14820 	}
14821 
14822 	tg3_nvram_get_pagesize(tp, nvcfg1);
14823 	if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14824 		tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14825 }
14826 
14827 static void tg3_get_5720_nvram_info(struct tg3 *tp)
14828 {
14829 	u32 nvcfg1, nvmpinstrp, nv_status;
14830 
14831 	nvcfg1 = tr32(NVRAM_CFG1);
14832 	nvmpinstrp = nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK;
14833 
14834 	if (tg3_asic_rev(tp) == ASIC_REV_5762) {
14835 		if (!(nvcfg1 & NVRAM_CFG1_5762VENDOR_MASK)) {
14836 			tg3_flag_set(tp, NO_NVRAM);
14837 			return;
14838 		}
14839 
14840 		switch (nvmpinstrp) {
14841 		case FLASH_5762_MX25L_100:
14842 		case FLASH_5762_MX25L_200:
14843 		case FLASH_5762_MX25L_400:
14844 		case FLASH_5762_MX25L_800:
14845 		case FLASH_5762_MX25L_160_320:
14846 			tp->nvram_pagesize = 4096;
14847 			tp->nvram_jedecnum = JEDEC_MACRONIX;
14848 			tg3_flag_set(tp, NVRAM_BUFFERED);
14849 			tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14850 			tg3_flag_set(tp, FLASH);
14851 			nv_status = tr32(NVRAM_AUTOSENSE_STATUS);
14852 			tp->nvram_size =
14853 				(1 << (nv_status >> AUTOSENSE_DEVID &
14854 						AUTOSENSE_DEVID_MASK)
14855 					<< AUTOSENSE_SIZE_IN_MB);
14856 			return;
14857 
14858 		case FLASH_5762_EEPROM_HD:
14859 			nvmpinstrp = FLASH_5720_EEPROM_HD;
14860 			break;
14861 		case FLASH_5762_EEPROM_LD:
14862 			nvmpinstrp = FLASH_5720_EEPROM_LD;
14863 			break;
14864 		case FLASH_5720VENDOR_M_ST_M45PE20:
14865 			/* This pinstrap supports multiple sizes, so force it
14866 			 * to read the actual size from location 0xf0.
14867 			 */
14868 			nvmpinstrp = FLASH_5720VENDOR_ST_45USPT;
14869 			break;
14870 		}
14871 	}
14872 
14873 	switch (nvmpinstrp) {
14874 	case FLASH_5720_EEPROM_HD:
14875 	case FLASH_5720_EEPROM_LD:
14876 		tp->nvram_jedecnum = JEDEC_ATMEL;
14877 		tg3_flag_set(tp, NVRAM_BUFFERED);
14878 
14879 		nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14880 		tw32(NVRAM_CFG1, nvcfg1);
14881 		if (nvmpinstrp == FLASH_5720_EEPROM_HD)
14882 			tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14883 		else
14884 			tp->nvram_pagesize = ATMEL_AT24C02_CHIP_SIZE;
14885 		return;
14886 	case FLASH_5720VENDOR_M_ATMEL_DB011D:
14887 	case FLASH_5720VENDOR_A_ATMEL_DB011B:
14888 	case FLASH_5720VENDOR_A_ATMEL_DB011D:
14889 	case FLASH_5720VENDOR_M_ATMEL_DB021D:
14890 	case FLASH_5720VENDOR_A_ATMEL_DB021B:
14891 	case FLASH_5720VENDOR_A_ATMEL_DB021D:
14892 	case FLASH_5720VENDOR_M_ATMEL_DB041D:
14893 	case FLASH_5720VENDOR_A_ATMEL_DB041B:
14894 	case FLASH_5720VENDOR_A_ATMEL_DB041D:
14895 	case FLASH_5720VENDOR_M_ATMEL_DB081D:
14896 	case FLASH_5720VENDOR_A_ATMEL_DB081D:
14897 	case FLASH_5720VENDOR_ATMEL_45USPT:
14898 		tp->nvram_jedecnum = JEDEC_ATMEL;
14899 		tg3_flag_set(tp, NVRAM_BUFFERED);
14900 		tg3_flag_set(tp, FLASH);
14901 
14902 		switch (nvmpinstrp) {
14903 		case FLASH_5720VENDOR_M_ATMEL_DB021D:
14904 		case FLASH_5720VENDOR_A_ATMEL_DB021B:
14905 		case FLASH_5720VENDOR_A_ATMEL_DB021D:
14906 			tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14907 			break;
14908 		case FLASH_5720VENDOR_M_ATMEL_DB041D:
14909 		case FLASH_5720VENDOR_A_ATMEL_DB041B:
14910 		case FLASH_5720VENDOR_A_ATMEL_DB041D:
14911 			tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14912 			break;
14913 		case FLASH_5720VENDOR_M_ATMEL_DB081D:
14914 		case FLASH_5720VENDOR_A_ATMEL_DB081D:
14915 			tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14916 			break;
14917 		default:
14918 			if (tg3_asic_rev(tp) != ASIC_REV_5762)
14919 				tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14920 			break;
14921 		}
14922 		break;
14923 	case FLASH_5720VENDOR_M_ST_M25PE10:
14924 	case FLASH_5720VENDOR_M_ST_M45PE10:
14925 	case FLASH_5720VENDOR_A_ST_M25PE10:
14926 	case FLASH_5720VENDOR_A_ST_M45PE10:
14927 	case FLASH_5720VENDOR_M_ST_M25PE20:
14928 	case FLASH_5720VENDOR_M_ST_M45PE20:
14929 	case FLASH_5720VENDOR_A_ST_M25PE20:
14930 	case FLASH_5720VENDOR_A_ST_M45PE20:
14931 	case FLASH_5720VENDOR_M_ST_M25PE40:
14932 	case FLASH_5720VENDOR_M_ST_M45PE40:
14933 	case FLASH_5720VENDOR_A_ST_M25PE40:
14934 	case FLASH_5720VENDOR_A_ST_M45PE40:
14935 	case FLASH_5720VENDOR_M_ST_M25PE80:
14936 	case FLASH_5720VENDOR_M_ST_M45PE80:
14937 	case FLASH_5720VENDOR_A_ST_M25PE80:
14938 	case FLASH_5720VENDOR_A_ST_M45PE80:
14939 	case FLASH_5720VENDOR_ST_25USPT:
14940 	case FLASH_5720VENDOR_ST_45USPT:
14941 		tp->nvram_jedecnum = JEDEC_ST;
14942 		tg3_flag_set(tp, NVRAM_BUFFERED);
14943 		tg3_flag_set(tp, FLASH);
14944 
14945 		switch (nvmpinstrp) {
14946 		case FLASH_5720VENDOR_M_ST_M25PE20:
14947 		case FLASH_5720VENDOR_M_ST_M45PE20:
14948 		case FLASH_5720VENDOR_A_ST_M25PE20:
14949 		case FLASH_5720VENDOR_A_ST_M45PE20:
14950 			tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14951 			break;
14952 		case FLASH_5720VENDOR_M_ST_M25PE40:
14953 		case FLASH_5720VENDOR_M_ST_M45PE40:
14954 		case FLASH_5720VENDOR_A_ST_M25PE40:
14955 		case FLASH_5720VENDOR_A_ST_M45PE40:
14956 			tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14957 			break;
14958 		case FLASH_5720VENDOR_M_ST_M25PE80:
14959 		case FLASH_5720VENDOR_M_ST_M45PE80:
14960 		case FLASH_5720VENDOR_A_ST_M25PE80:
14961 		case FLASH_5720VENDOR_A_ST_M45PE80:
14962 			tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14963 			break;
14964 		default:
14965 			if (tg3_asic_rev(tp) != ASIC_REV_5762)
14966 				tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14967 			break;
14968 		}
14969 		break;
14970 	default:
14971 		tg3_flag_set(tp, NO_NVRAM);
14972 		return;
14973 	}
14974 
14975 	tg3_nvram_get_pagesize(tp, nvcfg1);
14976 	if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14977 		tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14978 
14979 	if (tg3_asic_rev(tp) == ASIC_REV_5762) {
14980 		u32 val;
14981 
14982 		if (tg3_nvram_read(tp, 0, &val))
14983 			return;
14984 
14985 		if (val != TG3_EEPROM_MAGIC &&
14986 		    (val & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW)
14987 			tg3_flag_set(tp, NO_NVRAM);
14988 	}
14989 }
14990 
14991 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
14992 static void tg3_nvram_init(struct tg3 *tp)
14993 {
14994 	if (tg3_flag(tp, IS_SSB_CORE)) {
14995 		/* No NVRAM and EEPROM on the SSB Broadcom GigE core. */
14996 		tg3_flag_clear(tp, NVRAM);
14997 		tg3_flag_clear(tp, NVRAM_BUFFERED);
14998 		tg3_flag_set(tp, NO_NVRAM);
14999 		return;
15000 	}
15001 
15002 	tw32_f(GRC_EEPROM_ADDR,
15003 	     (EEPROM_ADDR_FSM_RESET |
15004 	      (EEPROM_DEFAULT_CLOCK_PERIOD <<
15005 	       EEPROM_ADDR_CLKPERD_SHIFT)));
15006 
15007 	msleep(1);
15008 
15009 	/* Enable seeprom accesses. */
15010 	tw32_f(GRC_LOCAL_CTRL,
15011 	     tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
15012 	udelay(100);
15013 
15014 	if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
15015 	    tg3_asic_rev(tp) != ASIC_REV_5701) {
15016 		tg3_flag_set(tp, NVRAM);
15017 
15018 		if (tg3_nvram_lock(tp)) {
15019 			netdev_warn(tp->dev,
15020 				    "Cannot get nvram lock, %s failed\n",
15021 				    __func__);
15022 			return;
15023 		}
15024 		tg3_enable_nvram_access(tp);
15025 
15026 		tp->nvram_size = 0;
15027 
15028 		if (tg3_asic_rev(tp) == ASIC_REV_5752)
15029 			tg3_get_5752_nvram_info(tp);
15030 		else if (tg3_asic_rev(tp) == ASIC_REV_5755)
15031 			tg3_get_5755_nvram_info(tp);
15032 		else if (tg3_asic_rev(tp) == ASIC_REV_5787 ||
15033 			 tg3_asic_rev(tp) == ASIC_REV_5784 ||
15034 			 tg3_asic_rev(tp) == ASIC_REV_5785)
15035 			tg3_get_5787_nvram_info(tp);
15036 		else if (tg3_asic_rev(tp) == ASIC_REV_5761)
15037 			tg3_get_5761_nvram_info(tp);
15038 		else if (tg3_asic_rev(tp) == ASIC_REV_5906)
15039 			tg3_get_5906_nvram_info(tp);
15040 		else if (tg3_asic_rev(tp) == ASIC_REV_57780 ||
15041 			 tg3_flag(tp, 57765_CLASS))
15042 			tg3_get_57780_nvram_info(tp);
15043 		else if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
15044 			 tg3_asic_rev(tp) == ASIC_REV_5719)
15045 			tg3_get_5717_nvram_info(tp);
15046 		else if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
15047 			 tg3_asic_rev(tp) == ASIC_REV_5762)
15048 			tg3_get_5720_nvram_info(tp);
15049 		else
15050 			tg3_get_nvram_info(tp);
15051 
15052 		if (tp->nvram_size == 0)
15053 			tg3_get_nvram_size(tp);
15054 
15055 		tg3_disable_nvram_access(tp);
15056 		tg3_nvram_unlock(tp);
15057 
15058 	} else {
15059 		tg3_flag_clear(tp, NVRAM);
15060 		tg3_flag_clear(tp, NVRAM_BUFFERED);
15061 
15062 		tg3_get_eeprom_size(tp);
15063 	}
15064 }
15065 
15066 struct subsys_tbl_ent {
15067 	u16 subsys_vendor, subsys_devid;
15068 	u32 phy_id;
15069 };
15070 
15071 static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
15072 	/* Broadcom boards. */
15073 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
15074 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6, TG3_PHY_ID_BCM5401 },
15075 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
15076 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5, TG3_PHY_ID_BCM5701 },
15077 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
15078 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6, TG3_PHY_ID_BCM8002 },
15079 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
15080 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9, 0 },
15081 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
15082 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1, TG3_PHY_ID_BCM5701 },
15083 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
15084 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8, TG3_PHY_ID_BCM5701 },
15085 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
15086 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7, 0 },
15087 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
15088 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10, TG3_PHY_ID_BCM5701 },
15089 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
15090 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12, TG3_PHY_ID_BCM5701 },
15091 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
15092 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1, TG3_PHY_ID_BCM5703 },
15093 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
15094 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2, TG3_PHY_ID_BCM5703 },
15095 
15096 	/* 3com boards. */
15097 	{ TG3PCI_SUBVENDOR_ID_3COM,
15098 	  TG3PCI_SUBDEVICE_ID_3COM_3C996T, TG3_PHY_ID_BCM5401 },
15099 	{ TG3PCI_SUBVENDOR_ID_3COM,
15100 	  TG3PCI_SUBDEVICE_ID_3COM_3C996BT, TG3_PHY_ID_BCM5701 },
15101 	{ TG3PCI_SUBVENDOR_ID_3COM,
15102 	  TG3PCI_SUBDEVICE_ID_3COM_3C996SX, 0 },
15103 	{ TG3PCI_SUBVENDOR_ID_3COM,
15104 	  TG3PCI_SUBDEVICE_ID_3COM_3C1000T, TG3_PHY_ID_BCM5701 },
15105 	{ TG3PCI_SUBVENDOR_ID_3COM,
15106 	  TG3PCI_SUBDEVICE_ID_3COM_3C940BR01, TG3_PHY_ID_BCM5701 },
15107 
15108 	/* DELL boards. */
15109 	{ TG3PCI_SUBVENDOR_ID_DELL,
15110 	  TG3PCI_SUBDEVICE_ID_DELL_VIPER, TG3_PHY_ID_BCM5401 },
15111 	{ TG3PCI_SUBVENDOR_ID_DELL,
15112 	  TG3PCI_SUBDEVICE_ID_DELL_JAGUAR, TG3_PHY_ID_BCM5401 },
15113 	{ TG3PCI_SUBVENDOR_ID_DELL,
15114 	  TG3PCI_SUBDEVICE_ID_DELL_MERLOT, TG3_PHY_ID_BCM5411 },
15115 	{ TG3PCI_SUBVENDOR_ID_DELL,
15116 	  TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT, TG3_PHY_ID_BCM5411 },
15117 
15118 	/* Compaq boards. */
15119 	{ TG3PCI_SUBVENDOR_ID_COMPAQ,
15120 	  TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE, TG3_PHY_ID_BCM5701 },
15121 	{ TG3PCI_SUBVENDOR_ID_COMPAQ,
15122 	  TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2, TG3_PHY_ID_BCM5701 },
15123 	{ TG3PCI_SUBVENDOR_ID_COMPAQ,
15124 	  TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING, 0 },
15125 	{ TG3PCI_SUBVENDOR_ID_COMPAQ,
15126 	  TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780, TG3_PHY_ID_BCM5701 },
15127 	{ TG3PCI_SUBVENDOR_ID_COMPAQ,
15128 	  TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2, TG3_PHY_ID_BCM5701 },
15129 
15130 	/* IBM boards. */
15131 	{ TG3PCI_SUBVENDOR_ID_IBM,
15132 	  TG3PCI_SUBDEVICE_ID_IBM_5703SAX2, 0 }
15133 };
15134 
15135 static struct subsys_tbl_ent *tg3_lookup_by_subsys(struct tg3 *tp)
15136 {
15137 	int i;
15138 
15139 	for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
15140 		if ((subsys_id_to_phy_id[i].subsys_vendor ==
15141 		     tp->pdev->subsystem_vendor) &&
15142 		    (subsys_id_to_phy_id[i].subsys_devid ==
15143 		     tp->pdev->subsystem_device))
15144 			return &subsys_id_to_phy_id[i];
15145 	}
15146 	return NULL;
15147 }
15148 
15149 static void tg3_get_eeprom_hw_cfg(struct tg3 *tp)
15150 {
15151 	u32 val;
15152 
15153 	tp->phy_id = TG3_PHY_ID_INVALID;
15154 	tp->led_ctrl = LED_CTRL_MODE_PHY_1;
15155 
15156 	/* Assume an onboard device and WOL capable by default.  */
15157 	tg3_flag_set(tp, EEPROM_WRITE_PROT);
15158 	tg3_flag_set(tp, WOL_CAP);
15159 
15160 	if (tg3_asic_rev(tp) == ASIC_REV_5906) {
15161 		if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
15162 			tg3_flag_clear(tp, EEPROM_WRITE_PROT);
15163 			tg3_flag_set(tp, IS_NIC);
15164 		}
15165 		val = tr32(VCPU_CFGSHDW);
15166 		if (val & VCPU_CFGSHDW_ASPM_DBNC)
15167 			tg3_flag_set(tp, ASPM_WORKAROUND);
15168 		if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
15169 		    (val & VCPU_CFGSHDW_WOL_MAGPKT)) {
15170 			tg3_flag_set(tp, WOL_ENABLE);
15171 			device_set_wakeup_enable(&tp->pdev->dev, true);
15172 		}
15173 		goto done;
15174 	}
15175 
15176 	tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
15177 	if (val == NIC_SRAM_DATA_SIG_MAGIC) {
15178 		u32 nic_cfg, led_cfg;
15179 		u32 cfg2 = 0, cfg4 = 0, cfg5 = 0;
15180 		u32 nic_phy_id, ver, eeprom_phy_id;
15181 		int eeprom_phy_serdes = 0;
15182 
15183 		tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
15184 		tp->nic_sram_data_cfg = nic_cfg;
15185 
15186 		tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
15187 		ver >>= NIC_SRAM_DATA_VER_SHIFT;
15188 		if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
15189 		    tg3_asic_rev(tp) != ASIC_REV_5701 &&
15190 		    tg3_asic_rev(tp) != ASIC_REV_5703 &&
15191 		    (ver > 0) && (ver < 0x100))
15192 			tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
15193 
15194 		if (tg3_asic_rev(tp) == ASIC_REV_5785)
15195 			tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
15196 
15197 		if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
15198 		    tg3_asic_rev(tp) == ASIC_REV_5719 ||
15199 		    tg3_asic_rev(tp) == ASIC_REV_5720)
15200 			tg3_read_mem(tp, NIC_SRAM_DATA_CFG_5, &cfg5);
15201 
15202 		if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
15203 		    NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
15204 			eeprom_phy_serdes = 1;
15205 
15206 		tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
15207 		if (nic_phy_id != 0) {
15208 			u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
15209 			u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
15210 
15211 			eeprom_phy_id  = (id1 >> 16) << 10;
15212 			eeprom_phy_id |= (id2 & 0xfc00) << 16;
15213 			eeprom_phy_id |= (id2 & 0x03ff) <<  0;
15214 		} else
15215 			eeprom_phy_id = 0;
15216 
15217 		tp->phy_id = eeprom_phy_id;
15218 		if (eeprom_phy_serdes) {
15219 			if (!tg3_flag(tp, 5705_PLUS))
15220 				tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
15221 			else
15222 				tp->phy_flags |= TG3_PHYFLG_MII_SERDES;
15223 		}
15224 
15225 		if (tg3_flag(tp, 5750_PLUS))
15226 			led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
15227 				    SHASTA_EXT_LED_MODE_MASK);
15228 		else
15229 			led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
15230 
15231 		switch (led_cfg) {
15232 		default:
15233 		case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
15234 			tp->led_ctrl = LED_CTRL_MODE_PHY_1;
15235 			break;
15236 
15237 		case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
15238 			tp->led_ctrl = LED_CTRL_MODE_PHY_2;
15239 			break;
15240 
15241 		case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
15242 			tp->led_ctrl = LED_CTRL_MODE_MAC;
15243 
15244 			/* Default to PHY_1_MODE if 0 (MAC_MODE) is
15245 			 * read on some older 5700/5701 bootcode.
15246 			 */
15247 			if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
15248 			    tg3_asic_rev(tp) == ASIC_REV_5701)
15249 				tp->led_ctrl = LED_CTRL_MODE_PHY_1;
15250 
15251 			break;
15252 
15253 		case SHASTA_EXT_LED_SHARED:
15254 			tp->led_ctrl = LED_CTRL_MODE_SHARED;
15255 			if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0 &&
15256 			    tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A1)
15257 				tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
15258 						 LED_CTRL_MODE_PHY_2);
15259 
15260 			if (tg3_flag(tp, 5717_PLUS) ||
15261 			    tg3_asic_rev(tp) == ASIC_REV_5762)
15262 				tp->led_ctrl |= LED_CTRL_BLINK_RATE_OVERRIDE |
15263 						LED_CTRL_BLINK_RATE_MASK;
15264 
15265 			break;
15266 
15267 		case SHASTA_EXT_LED_MAC:
15268 			tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
15269 			break;
15270 
15271 		case SHASTA_EXT_LED_COMBO:
15272 			tp->led_ctrl = LED_CTRL_MODE_COMBO;
15273 			if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0)
15274 				tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
15275 						 LED_CTRL_MODE_PHY_2);
15276 			break;
15277 
15278 		}
15279 
15280 		if ((tg3_asic_rev(tp) == ASIC_REV_5700 ||
15281 		     tg3_asic_rev(tp) == ASIC_REV_5701) &&
15282 		    tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
15283 			tp->led_ctrl = LED_CTRL_MODE_PHY_2;
15284 
15285 		if (tg3_chip_rev(tp) == CHIPREV_5784_AX)
15286 			tp->led_ctrl = LED_CTRL_MODE_PHY_1;
15287 
15288 		if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
15289 			tg3_flag_set(tp, EEPROM_WRITE_PROT);
15290 			if ((tp->pdev->subsystem_vendor ==
15291 			     PCI_VENDOR_ID_ARIMA) &&
15292 			    (tp->pdev->subsystem_device == 0x205a ||
15293 			     tp->pdev->subsystem_device == 0x2063))
15294 				tg3_flag_clear(tp, EEPROM_WRITE_PROT);
15295 		} else {
15296 			tg3_flag_clear(tp, EEPROM_WRITE_PROT);
15297 			tg3_flag_set(tp, IS_NIC);
15298 		}
15299 
15300 		if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
15301 			tg3_flag_set(tp, ENABLE_ASF);
15302 			if (tg3_flag(tp, 5750_PLUS))
15303 				tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
15304 		}
15305 
15306 		if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
15307 		    tg3_flag(tp, 5750_PLUS))
15308 			tg3_flag_set(tp, ENABLE_APE);
15309 
15310 		if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES &&
15311 		    !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
15312 			tg3_flag_clear(tp, WOL_CAP);
15313 
15314 		if (tg3_flag(tp, WOL_CAP) &&
15315 		    (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)) {
15316 			tg3_flag_set(tp, WOL_ENABLE);
15317 			device_set_wakeup_enable(&tp->pdev->dev, true);
15318 		}
15319 
15320 		if (cfg2 & (1 << 17))
15321 			tp->phy_flags |= TG3_PHYFLG_CAPACITIVE_COUPLING;
15322 
15323 		/* serdes signal pre-emphasis in register 0x590 set by */
15324 		/* bootcode if bit 18 is set */
15325 		if (cfg2 & (1 << 18))
15326 			tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS;
15327 
15328 		if ((tg3_flag(tp, 57765_PLUS) ||
15329 		     (tg3_asic_rev(tp) == ASIC_REV_5784 &&
15330 		      tg3_chip_rev(tp) != CHIPREV_5784_AX)) &&
15331 		    (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
15332 			tp->phy_flags |= TG3_PHYFLG_ENABLE_APD;
15333 
15334 		if (tg3_flag(tp, PCI_EXPRESS)) {
15335 			u32 cfg3;
15336 
15337 			tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
15338 			if (tg3_asic_rev(tp) != ASIC_REV_5785 &&
15339 			    !tg3_flag(tp, 57765_PLUS) &&
15340 			    (cfg3 & NIC_SRAM_ASPM_DEBOUNCE))
15341 				tg3_flag_set(tp, ASPM_WORKAROUND);
15342 			if (cfg3 & NIC_SRAM_LNK_FLAP_AVOID)
15343 				tp->phy_flags |= TG3_PHYFLG_KEEP_LINK_ON_PWRDN;
15344 			if (cfg3 & NIC_SRAM_1G_ON_VAUX_OK)
15345 				tp->phy_flags |= TG3_PHYFLG_1G_ON_VAUX_OK;
15346 		}
15347 
15348 		if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE)
15349 			tg3_flag_set(tp, RGMII_INBAND_DISABLE);
15350 		if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
15351 			tg3_flag_set(tp, RGMII_EXT_IBND_RX_EN);
15352 		if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
15353 			tg3_flag_set(tp, RGMII_EXT_IBND_TX_EN);
15354 
15355 		if (cfg5 & NIC_SRAM_DISABLE_1G_HALF_ADV)
15356 			tp->phy_flags |= TG3_PHYFLG_DISABLE_1G_HD_ADV;
15357 	}
15358 done:
15359 	if (tg3_flag(tp, WOL_CAP))
15360 		device_set_wakeup_enable(&tp->pdev->dev,
15361 					 tg3_flag(tp, WOL_ENABLE));
15362 	else
15363 		device_set_wakeup_capable(&tp->pdev->dev, false);
15364 }
15365 
15366 static int tg3_ape_otp_read(struct tg3 *tp, u32 offset, u32 *val)
15367 {
15368 	int i, err;
15369 	u32 val2, off = offset * 8;
15370 
15371 	err = tg3_nvram_lock(tp);
15372 	if (err)
15373 		return err;
15374 
15375 	tg3_ape_write32(tp, TG3_APE_OTP_ADDR, off | APE_OTP_ADDR_CPU_ENABLE);
15376 	tg3_ape_write32(tp, TG3_APE_OTP_CTRL, APE_OTP_CTRL_PROG_EN |
15377 			APE_OTP_CTRL_CMD_RD | APE_OTP_CTRL_START);
15378 	tg3_ape_read32(tp, TG3_APE_OTP_CTRL);
15379 	udelay(10);
15380 
15381 	for (i = 0; i < 100; i++) {
15382 		val2 = tg3_ape_read32(tp, TG3_APE_OTP_STATUS);
15383 		if (val2 & APE_OTP_STATUS_CMD_DONE) {
15384 			*val = tg3_ape_read32(tp, TG3_APE_OTP_RD_DATA);
15385 			break;
15386 		}
15387 		udelay(10);
15388 	}
15389 
15390 	tg3_ape_write32(tp, TG3_APE_OTP_CTRL, 0);
15391 
15392 	tg3_nvram_unlock(tp);
15393 	if (val2 & APE_OTP_STATUS_CMD_DONE)
15394 		return 0;
15395 
15396 	return -EBUSY;
15397 }
15398 
15399 static int tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
15400 {
15401 	int i;
15402 	u32 val;
15403 
15404 	tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
15405 	tw32(OTP_CTRL, cmd);
15406 
15407 	/* Wait for up to 1 ms for command to execute. */
15408 	for (i = 0; i < 100; i++) {
15409 		val = tr32(OTP_STATUS);
15410 		if (val & OTP_STATUS_CMD_DONE)
15411 			break;
15412 		udelay(10);
15413 	}
15414 
15415 	return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
15416 }
15417 
15418 /* Read the gphy configuration from the OTP region of the chip.  The gphy
15419  * configuration is a 32-bit value that straddles the alignment boundary.
15420  * We do two 32-bit reads and then shift and merge the results.
15421  */
15422 static u32 tg3_read_otp_phycfg(struct tg3 *tp)
15423 {
15424 	u32 bhalf_otp, thalf_otp;
15425 
15426 	tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
15427 
15428 	if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
15429 		return 0;
15430 
15431 	tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
15432 
15433 	if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
15434 		return 0;
15435 
15436 	thalf_otp = tr32(OTP_READ_DATA);
15437 
15438 	tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
15439 
15440 	if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
15441 		return 0;
15442 
15443 	bhalf_otp = tr32(OTP_READ_DATA);
15444 
15445 	return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
15446 }
15447 
15448 static void tg3_phy_init_link_config(struct tg3 *tp)
15449 {
15450 	u32 adv = ADVERTISED_Autoneg;
15451 
15452 	if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
15453 		if (!(tp->phy_flags & TG3_PHYFLG_DISABLE_1G_HD_ADV))
15454 			adv |= ADVERTISED_1000baseT_Half;
15455 		adv |= ADVERTISED_1000baseT_Full;
15456 	}
15457 
15458 	if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
15459 		adv |= ADVERTISED_100baseT_Half |
15460 		       ADVERTISED_100baseT_Full |
15461 		       ADVERTISED_10baseT_Half |
15462 		       ADVERTISED_10baseT_Full |
15463 		       ADVERTISED_TP;
15464 	else
15465 		adv |= ADVERTISED_FIBRE;
15466 
15467 	tp->link_config.advertising = adv;
15468 	tp->link_config.speed = SPEED_UNKNOWN;
15469 	tp->link_config.duplex = DUPLEX_UNKNOWN;
15470 	tp->link_config.autoneg = AUTONEG_ENABLE;
15471 	tp->link_config.active_speed = SPEED_UNKNOWN;
15472 	tp->link_config.active_duplex = DUPLEX_UNKNOWN;
15473 
15474 	tp->old_link = -1;
15475 }
15476 
15477 static int tg3_phy_probe(struct tg3 *tp)
15478 {
15479 	u32 hw_phy_id_1, hw_phy_id_2;
15480 	u32 hw_phy_id, hw_phy_id_masked;
15481 	int err;
15482 
15483 	/* flow control autonegotiation is default behavior */
15484 	tg3_flag_set(tp, PAUSE_AUTONEG);
15485 	tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
15486 
15487 	if (tg3_flag(tp, ENABLE_APE)) {
15488 		switch (tp->pci_fn) {
15489 		case 0:
15490 			tp->phy_ape_lock = TG3_APE_LOCK_PHY0;
15491 			break;
15492 		case 1:
15493 			tp->phy_ape_lock = TG3_APE_LOCK_PHY1;
15494 			break;
15495 		case 2:
15496 			tp->phy_ape_lock = TG3_APE_LOCK_PHY2;
15497 			break;
15498 		case 3:
15499 			tp->phy_ape_lock = TG3_APE_LOCK_PHY3;
15500 			break;
15501 		}
15502 	}
15503 
15504 	if (!tg3_flag(tp, ENABLE_ASF) &&
15505 	    !(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
15506 	    !(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
15507 		tp->phy_flags &= ~(TG3_PHYFLG_1G_ON_VAUX_OK |
15508 				   TG3_PHYFLG_KEEP_LINK_ON_PWRDN);
15509 
15510 	if (tg3_flag(tp, USE_PHYLIB))
15511 		return tg3_phy_init(tp);
15512 
15513 	/* Reading the PHY ID register can conflict with ASF
15514 	 * firmware access to the PHY hardware.
15515 	 */
15516 	err = 0;
15517 	if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)) {
15518 		hw_phy_id = hw_phy_id_masked = TG3_PHY_ID_INVALID;
15519 	} else {
15520 		/* Now read the physical PHY_ID from the chip and verify
15521 		 * that it is sane.  If it doesn't look good, we fall back
15522 		 * to either the hard-coded table based PHY_ID and failing
15523 		 * that the value found in the eeprom area.
15524 		 */
15525 		err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
15526 		err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
15527 
15528 		hw_phy_id  = (hw_phy_id_1 & 0xffff) << 10;
15529 		hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
15530 		hw_phy_id |= (hw_phy_id_2 & 0x03ff) <<  0;
15531 
15532 		hw_phy_id_masked = hw_phy_id & TG3_PHY_ID_MASK;
15533 	}
15534 
15535 	if (!err && TG3_KNOWN_PHY_ID(hw_phy_id_masked)) {
15536 		tp->phy_id = hw_phy_id;
15537 		if (hw_phy_id_masked == TG3_PHY_ID_BCM8002)
15538 			tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
15539 		else
15540 			tp->phy_flags &= ~TG3_PHYFLG_PHY_SERDES;
15541 	} else {
15542 		if (tp->phy_id != TG3_PHY_ID_INVALID) {
15543 			/* Do nothing, phy ID already set up in
15544 			 * tg3_get_eeprom_hw_cfg().
15545 			 */
15546 		} else {
15547 			struct subsys_tbl_ent *p;
15548 
15549 			/* No eeprom signature?  Try the hardcoded
15550 			 * subsys device table.
15551 			 */
15552 			p = tg3_lookup_by_subsys(tp);
15553 			if (p) {
15554 				tp->phy_id = p->phy_id;
15555 			} else if (!tg3_flag(tp, IS_SSB_CORE)) {
15556 				/* For now we saw the IDs 0xbc050cd0,
15557 				 * 0xbc050f80 and 0xbc050c30 on devices
15558 				 * connected to an BCM4785 and there are
15559 				 * probably more. Just assume that the phy is
15560 				 * supported when it is connected to a SSB core
15561 				 * for now.
15562 				 */
15563 				return -ENODEV;
15564 			}
15565 
15566 			if (!tp->phy_id ||
15567 			    tp->phy_id == TG3_PHY_ID_BCM8002)
15568 				tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
15569 		}
15570 	}
15571 
15572 	if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
15573 	    (tg3_asic_rev(tp) == ASIC_REV_5719 ||
15574 	     tg3_asic_rev(tp) == ASIC_REV_5720 ||
15575 	     tg3_asic_rev(tp) == ASIC_REV_57766 ||
15576 	     tg3_asic_rev(tp) == ASIC_REV_5762 ||
15577 	     (tg3_asic_rev(tp) == ASIC_REV_5717 &&
15578 	      tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0) ||
15579 	     (tg3_asic_rev(tp) == ASIC_REV_57765 &&
15580 	      tg3_chip_rev_id(tp) != CHIPREV_ID_57765_A0))) {
15581 		tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
15582 
15583 		tp->eee.supported = SUPPORTED_100baseT_Full |
15584 				    SUPPORTED_1000baseT_Full;
15585 		tp->eee.advertised = ADVERTISED_100baseT_Full |
15586 				     ADVERTISED_1000baseT_Full;
15587 		tp->eee.eee_enabled = 1;
15588 		tp->eee.tx_lpi_enabled = 1;
15589 		tp->eee.tx_lpi_timer = TG3_CPMU_DBTMR1_LNKIDLE_2047US;
15590 	}
15591 
15592 	tg3_phy_init_link_config(tp);
15593 
15594 	if (!(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
15595 	    !(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
15596 	    !tg3_flag(tp, ENABLE_APE) &&
15597 	    !tg3_flag(tp, ENABLE_ASF)) {
15598 		u32 bmsr, dummy;
15599 
15600 		tg3_readphy(tp, MII_BMSR, &bmsr);
15601 		if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
15602 		    (bmsr & BMSR_LSTATUS))
15603 			goto skip_phy_reset;
15604 
15605 		err = tg3_phy_reset(tp);
15606 		if (err)
15607 			return err;
15608 
15609 		tg3_phy_set_wirespeed(tp);
15610 
15611 		if (!tg3_phy_copper_an_config_ok(tp, &dummy)) {
15612 			tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
15613 					    tp->link_config.flowctrl);
15614 
15615 			tg3_writephy(tp, MII_BMCR,
15616 				     BMCR_ANENABLE | BMCR_ANRESTART);
15617 		}
15618 	}
15619 
15620 skip_phy_reset:
15621 	if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
15622 		err = tg3_init_5401phy_dsp(tp);
15623 		if (err)
15624 			return err;
15625 
15626 		err = tg3_init_5401phy_dsp(tp);
15627 	}
15628 
15629 	return err;
15630 }
15631 
15632 static void tg3_read_vpd(struct tg3 *tp)
15633 {
15634 	u8 *vpd_data;
15635 	unsigned int block_end, rosize, len;
15636 	u32 vpdlen;
15637 	int j, i = 0;
15638 
15639 	vpd_data = (u8 *)tg3_vpd_readblock(tp, &vpdlen);
15640 	if (!vpd_data)
15641 		goto out_no_vpd;
15642 
15643 	i = pci_vpd_find_tag(vpd_data, 0, vpdlen, PCI_VPD_LRDT_RO_DATA);
15644 	if (i < 0)
15645 		goto out_not_found;
15646 
15647 	rosize = pci_vpd_lrdt_size(&vpd_data[i]);
15648 	block_end = i + PCI_VPD_LRDT_TAG_SIZE + rosize;
15649 	i += PCI_VPD_LRDT_TAG_SIZE;
15650 
15651 	if (block_end > vpdlen)
15652 		goto out_not_found;
15653 
15654 	j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
15655 				      PCI_VPD_RO_KEYWORD_MFR_ID);
15656 	if (j > 0) {
15657 		len = pci_vpd_info_field_size(&vpd_data[j]);
15658 
15659 		j += PCI_VPD_INFO_FLD_HDR_SIZE;
15660 		if (j + len > block_end || len != 4 ||
15661 		    memcmp(&vpd_data[j], "1028", 4))
15662 			goto partno;
15663 
15664 		j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
15665 					      PCI_VPD_RO_KEYWORD_VENDOR0);
15666 		if (j < 0)
15667 			goto partno;
15668 
15669 		len = pci_vpd_info_field_size(&vpd_data[j]);
15670 
15671 		j += PCI_VPD_INFO_FLD_HDR_SIZE;
15672 		if (j + len > block_end)
15673 			goto partno;
15674 
15675 		if (len >= sizeof(tp->fw_ver))
15676 			len = sizeof(tp->fw_ver) - 1;
15677 		memset(tp->fw_ver, 0, sizeof(tp->fw_ver));
15678 		snprintf(tp->fw_ver, sizeof(tp->fw_ver), "%.*s bc ", len,
15679 			 &vpd_data[j]);
15680 	}
15681 
15682 partno:
15683 	i = pci_vpd_find_info_keyword(vpd_data, i, rosize,
15684 				      PCI_VPD_RO_KEYWORD_PARTNO);
15685 	if (i < 0)
15686 		goto out_not_found;
15687 
15688 	len = pci_vpd_info_field_size(&vpd_data[i]);
15689 
15690 	i += PCI_VPD_INFO_FLD_HDR_SIZE;
15691 	if (len > TG3_BPN_SIZE ||
15692 	    (len + i) > vpdlen)
15693 		goto out_not_found;
15694 
15695 	memcpy(tp->board_part_number, &vpd_data[i], len);
15696 
15697 out_not_found:
15698 	kfree(vpd_data);
15699 	if (tp->board_part_number[0])
15700 		return;
15701 
15702 out_no_vpd:
15703 	if (tg3_asic_rev(tp) == ASIC_REV_5717) {
15704 		if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
15705 		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C)
15706 			strcpy(tp->board_part_number, "BCM5717");
15707 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718)
15708 			strcpy(tp->board_part_number, "BCM5718");
15709 		else
15710 			goto nomatch;
15711 	} else if (tg3_asic_rev(tp) == ASIC_REV_57780) {
15712 		if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780)
15713 			strcpy(tp->board_part_number, "BCM57780");
15714 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760)
15715 			strcpy(tp->board_part_number, "BCM57760");
15716 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790)
15717 			strcpy(tp->board_part_number, "BCM57790");
15718 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788)
15719 			strcpy(tp->board_part_number, "BCM57788");
15720 		else
15721 			goto nomatch;
15722 	} else if (tg3_asic_rev(tp) == ASIC_REV_57765) {
15723 		if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761)
15724 			strcpy(tp->board_part_number, "BCM57761");
15725 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765)
15726 			strcpy(tp->board_part_number, "BCM57765");
15727 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781)
15728 			strcpy(tp->board_part_number, "BCM57781");
15729 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785)
15730 			strcpy(tp->board_part_number, "BCM57785");
15731 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791)
15732 			strcpy(tp->board_part_number, "BCM57791");
15733 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
15734 			strcpy(tp->board_part_number, "BCM57795");
15735 		else
15736 			goto nomatch;
15737 	} else if (tg3_asic_rev(tp) == ASIC_REV_57766) {
15738 		if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762)
15739 			strcpy(tp->board_part_number, "BCM57762");
15740 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766)
15741 			strcpy(tp->board_part_number, "BCM57766");
15742 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782)
15743 			strcpy(tp->board_part_number, "BCM57782");
15744 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
15745 			strcpy(tp->board_part_number, "BCM57786");
15746 		else
15747 			goto nomatch;
15748 	} else if (tg3_asic_rev(tp) == ASIC_REV_5906) {
15749 		strcpy(tp->board_part_number, "BCM95906");
15750 	} else {
15751 nomatch:
15752 		strcpy(tp->board_part_number, "none");
15753 	}
15754 }
15755 
15756 static int tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
15757 {
15758 	u32 val;
15759 
15760 	if (tg3_nvram_read(tp, offset, &val) ||
15761 	    (val & 0xfc000000) != 0x0c000000 ||
15762 	    tg3_nvram_read(tp, offset + 4, &val) ||
15763 	    val != 0)
15764 		return 0;
15765 
15766 	return 1;
15767 }
15768 
15769 static void tg3_read_bc_ver(struct tg3 *tp)
15770 {
15771 	u32 val, offset, start, ver_offset;
15772 	int i, dst_off;
15773 	bool newver = false;
15774 
15775 	if (tg3_nvram_read(tp, 0xc, &offset) ||
15776 	    tg3_nvram_read(tp, 0x4, &start))
15777 		return;
15778 
15779 	offset = tg3_nvram_logical_addr(tp, offset);
15780 
15781 	if (tg3_nvram_read(tp, offset, &val))
15782 		return;
15783 
15784 	if ((val & 0xfc000000) == 0x0c000000) {
15785 		if (tg3_nvram_read(tp, offset + 4, &val))
15786 			return;
15787 
15788 		if (val == 0)
15789 			newver = true;
15790 	}
15791 
15792 	dst_off = strlen(tp->fw_ver);
15793 
15794 	if (newver) {
15795 		if (TG3_VER_SIZE - dst_off < 16 ||
15796 		    tg3_nvram_read(tp, offset + 8, &ver_offset))
15797 			return;
15798 
15799 		offset = offset + ver_offset - start;
15800 		for (i = 0; i < 16; i += 4) {
15801 			__be32 v;
15802 			if (tg3_nvram_read_be32(tp, offset + i, &v))
15803 				return;
15804 
15805 			memcpy(tp->fw_ver + dst_off + i, &v, sizeof(v));
15806 		}
15807 	} else {
15808 		u32 major, minor;
15809 
15810 		if (tg3_nvram_read(tp, TG3_NVM_PTREV_BCVER, &ver_offset))
15811 			return;
15812 
15813 		major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >>
15814 			TG3_NVM_BCVER_MAJSFT;
15815 		minor = ver_offset & TG3_NVM_BCVER_MINMSK;
15816 		snprintf(&tp->fw_ver[dst_off], TG3_VER_SIZE - dst_off,
15817 			 "v%d.%02d", major, minor);
15818 	}
15819 }
15820 
15821 static void tg3_read_hwsb_ver(struct tg3 *tp)
15822 {
15823 	u32 val, major, minor;
15824 
15825 	/* Use native endian representation */
15826 	if (tg3_nvram_read(tp, TG3_NVM_HWSB_CFG1, &val))
15827 		return;
15828 
15829 	major = (val & TG3_NVM_HWSB_CFG1_MAJMSK) >>
15830 		TG3_NVM_HWSB_CFG1_MAJSFT;
15831 	minor = (val & TG3_NVM_HWSB_CFG1_MINMSK) >>
15832 		TG3_NVM_HWSB_CFG1_MINSFT;
15833 
15834 	snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor);
15835 }
15836 
15837 static void tg3_read_sb_ver(struct tg3 *tp, u32 val)
15838 {
15839 	u32 offset, major, minor, build;
15840 
15841 	strncat(tp->fw_ver, "sb", TG3_VER_SIZE - strlen(tp->fw_ver) - 1);
15842 
15843 	if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1)
15844 		return;
15845 
15846 	switch (val & TG3_EEPROM_SB_REVISION_MASK) {
15847 	case TG3_EEPROM_SB_REVISION_0:
15848 		offset = TG3_EEPROM_SB_F1R0_EDH_OFF;
15849 		break;
15850 	case TG3_EEPROM_SB_REVISION_2:
15851 		offset = TG3_EEPROM_SB_F1R2_EDH_OFF;
15852 		break;
15853 	case TG3_EEPROM_SB_REVISION_3:
15854 		offset = TG3_EEPROM_SB_F1R3_EDH_OFF;
15855 		break;
15856 	case TG3_EEPROM_SB_REVISION_4:
15857 		offset = TG3_EEPROM_SB_F1R4_EDH_OFF;
15858 		break;
15859 	case TG3_EEPROM_SB_REVISION_5:
15860 		offset = TG3_EEPROM_SB_F1R5_EDH_OFF;
15861 		break;
15862 	case TG3_EEPROM_SB_REVISION_6:
15863 		offset = TG3_EEPROM_SB_F1R6_EDH_OFF;
15864 		break;
15865 	default:
15866 		return;
15867 	}
15868 
15869 	if (tg3_nvram_read(tp, offset, &val))
15870 		return;
15871 
15872 	build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >>
15873 		TG3_EEPROM_SB_EDH_BLD_SHFT;
15874 	major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >>
15875 		TG3_EEPROM_SB_EDH_MAJ_SHFT;
15876 	minor =  val & TG3_EEPROM_SB_EDH_MIN_MASK;
15877 
15878 	if (minor > 99 || build > 26)
15879 		return;
15880 
15881 	offset = strlen(tp->fw_ver);
15882 	snprintf(&tp->fw_ver[offset], TG3_VER_SIZE - offset,
15883 		 " v%d.%02d", major, minor);
15884 
15885 	if (build > 0) {
15886 		offset = strlen(tp->fw_ver);
15887 		if (offset < TG3_VER_SIZE - 1)
15888 			tp->fw_ver[offset] = 'a' + build - 1;
15889 	}
15890 }
15891 
15892 static void tg3_read_mgmtfw_ver(struct tg3 *tp)
15893 {
15894 	u32 val, offset, start;
15895 	int i, vlen;
15896 
15897 	for (offset = TG3_NVM_DIR_START;
15898 	     offset < TG3_NVM_DIR_END;
15899 	     offset += TG3_NVM_DIRENT_SIZE) {
15900 		if (tg3_nvram_read(tp, offset, &val))
15901 			return;
15902 
15903 		if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
15904 			break;
15905 	}
15906 
15907 	if (offset == TG3_NVM_DIR_END)
15908 		return;
15909 
15910 	if (!tg3_flag(tp, 5705_PLUS))
15911 		start = 0x08000000;
15912 	else if (tg3_nvram_read(tp, offset - 4, &start))
15913 		return;
15914 
15915 	if (tg3_nvram_read(tp, offset + 4, &offset) ||
15916 	    !tg3_fw_img_is_valid(tp, offset) ||
15917 	    tg3_nvram_read(tp, offset + 8, &val))
15918 		return;
15919 
15920 	offset += val - start;
15921 
15922 	vlen = strlen(tp->fw_ver);
15923 
15924 	tp->fw_ver[vlen++] = ',';
15925 	tp->fw_ver[vlen++] = ' ';
15926 
15927 	for (i = 0; i < 4; i++) {
15928 		__be32 v;
15929 		if (tg3_nvram_read_be32(tp, offset, &v))
15930 			return;
15931 
15932 		offset += sizeof(v);
15933 
15934 		if (vlen > TG3_VER_SIZE - sizeof(v)) {
15935 			memcpy(&tp->fw_ver[vlen], &v, TG3_VER_SIZE - vlen);
15936 			break;
15937 		}
15938 
15939 		memcpy(&tp->fw_ver[vlen], &v, sizeof(v));
15940 		vlen += sizeof(v);
15941 	}
15942 }
15943 
15944 static void tg3_probe_ncsi(struct tg3 *tp)
15945 {
15946 	u32 apedata;
15947 
15948 	apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
15949 	if (apedata != APE_SEG_SIG_MAGIC)
15950 		return;
15951 
15952 	apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
15953 	if (!(apedata & APE_FW_STATUS_READY))
15954 		return;
15955 
15956 	if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI)
15957 		tg3_flag_set(tp, APE_HAS_NCSI);
15958 }
15959 
15960 static void tg3_read_dash_ver(struct tg3 *tp)
15961 {
15962 	int vlen;
15963 	u32 apedata;
15964 	char *fwtype;
15965 
15966 	apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION);
15967 
15968 	if (tg3_flag(tp, APE_HAS_NCSI))
15969 		fwtype = "NCSI";
15970 	else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725)
15971 		fwtype = "SMASH";
15972 	else
15973 		fwtype = "DASH";
15974 
15975 	vlen = strlen(tp->fw_ver);
15976 
15977 	snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " %s v%d.%d.%d.%d",
15978 		 fwtype,
15979 		 (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT,
15980 		 (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT,
15981 		 (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT,
15982 		 (apedata & APE_FW_VERSION_BLDMSK));
15983 }
15984 
15985 static void tg3_read_otp_ver(struct tg3 *tp)
15986 {
15987 	u32 val, val2;
15988 
15989 	if (tg3_asic_rev(tp) != ASIC_REV_5762)
15990 		return;
15991 
15992 	if (!tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0, &val) &&
15993 	    !tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0 + 4, &val2) &&
15994 	    TG3_OTP_MAGIC0_VALID(val)) {
15995 		u64 val64 = (u64) val << 32 | val2;
15996 		u32 ver = 0;
15997 		int i, vlen;
15998 
15999 		for (i = 0; i < 7; i++) {
16000 			if ((val64 & 0xff) == 0)
16001 				break;
16002 			ver = val64 & 0xff;
16003 			val64 >>= 8;
16004 		}
16005 		vlen = strlen(tp->fw_ver);
16006 		snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " .%02d", ver);
16007 	}
16008 }
16009 
16010 static void tg3_read_fw_ver(struct tg3 *tp)
16011 {
16012 	u32 val;
16013 	bool vpd_vers = false;
16014 
16015 	if (tp->fw_ver[0] != 0)
16016 		vpd_vers = true;
16017 
16018 	if (tg3_flag(tp, NO_NVRAM)) {
16019 		strcat(tp->fw_ver, "sb");
16020 		tg3_read_otp_ver(tp);
16021 		return;
16022 	}
16023 
16024 	if (tg3_nvram_read(tp, 0, &val))
16025 		return;
16026 
16027 	if (val == TG3_EEPROM_MAGIC)
16028 		tg3_read_bc_ver(tp);
16029 	else if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW)
16030 		tg3_read_sb_ver(tp, val);
16031 	else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
16032 		tg3_read_hwsb_ver(tp);
16033 
16034 	if (tg3_flag(tp, ENABLE_ASF)) {
16035 		if (tg3_flag(tp, ENABLE_APE)) {
16036 			tg3_probe_ncsi(tp);
16037 			if (!vpd_vers)
16038 				tg3_read_dash_ver(tp);
16039 		} else if (!vpd_vers) {
16040 			tg3_read_mgmtfw_ver(tp);
16041 		}
16042 	}
16043 
16044 	tp->fw_ver[TG3_VER_SIZE - 1] = 0;
16045 }
16046 
16047 static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp)
16048 {
16049 	if (tg3_flag(tp, LRG_PROD_RING_CAP))
16050 		return TG3_RX_RET_MAX_SIZE_5717;
16051 	else if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))
16052 		return TG3_RX_RET_MAX_SIZE_5700;
16053 	else
16054 		return TG3_RX_RET_MAX_SIZE_5705;
16055 }
16056 
16057 static const struct pci_device_id tg3_write_reorder_chipsets[] = {
16058 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C) },
16059 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE) },
16060 	{ PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8385_0) },
16061 	{ },
16062 };
16063 
16064 static struct pci_dev *tg3_find_peer(struct tg3 *tp)
16065 {
16066 	struct pci_dev *peer;
16067 	unsigned int func, devnr = tp->pdev->devfn & ~7;
16068 
16069 	for (func = 0; func < 8; func++) {
16070 		peer = pci_get_slot(tp->pdev->bus, devnr | func);
16071 		if (peer && peer != tp->pdev)
16072 			break;
16073 		pci_dev_put(peer);
16074 	}
16075 	/* 5704 can be configured in single-port mode, set peer to
16076 	 * tp->pdev in that case.
16077 	 */
16078 	if (!peer) {
16079 		peer = tp->pdev;
16080 		return peer;
16081 	}
16082 
16083 	/*
16084 	 * We don't need to keep the refcount elevated; there's no way
16085 	 * to remove one half of this device without removing the other
16086 	 */
16087 	pci_dev_put(peer);
16088 
16089 	return peer;
16090 }
16091 
16092 static void tg3_detect_asic_rev(struct tg3 *tp, u32 misc_ctrl_reg)
16093 {
16094 	tp->pci_chip_rev_id = misc_ctrl_reg >> MISC_HOST_CTRL_CHIPREV_SHIFT;
16095 	if (tg3_asic_rev(tp) == ASIC_REV_USE_PROD_ID_REG) {
16096 		u32 reg;
16097 
16098 		/* All devices that use the alternate
16099 		 * ASIC REV location have a CPMU.
16100 		 */
16101 		tg3_flag_set(tp, CPMU_PRESENT);
16102 
16103 		if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
16104 		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
16105 		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
16106 		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
16107 		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 ||
16108 		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_57767 ||
16109 		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_57764 ||
16110 		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 ||
16111 		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 ||
16112 		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727 ||
16113 		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_57787)
16114 			reg = TG3PCI_GEN2_PRODID_ASICREV;
16115 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 ||
16116 			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 ||
16117 			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 ||
16118 			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 ||
16119 			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
16120 			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
16121 			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762 ||
16122 			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766 ||
16123 			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782 ||
16124 			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
16125 			reg = TG3PCI_GEN15_PRODID_ASICREV;
16126 		else
16127 			reg = TG3PCI_PRODID_ASICREV;
16128 
16129 		pci_read_config_dword(tp->pdev, reg, &tp->pci_chip_rev_id);
16130 	}
16131 
16132 	/* Wrong chip ID in 5752 A0. This code can be removed later
16133 	 * as A0 is not in production.
16134 	 */
16135 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5752_A0_HW)
16136 		tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
16137 
16138 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5717_C0)
16139 		tp->pci_chip_rev_id = CHIPREV_ID_5720_A0;
16140 
16141 	if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16142 	    tg3_asic_rev(tp) == ASIC_REV_5719 ||
16143 	    tg3_asic_rev(tp) == ASIC_REV_5720)
16144 		tg3_flag_set(tp, 5717_PLUS);
16145 
16146 	if (tg3_asic_rev(tp) == ASIC_REV_57765 ||
16147 	    tg3_asic_rev(tp) == ASIC_REV_57766)
16148 		tg3_flag_set(tp, 57765_CLASS);
16149 
16150 	if (tg3_flag(tp, 57765_CLASS) || tg3_flag(tp, 5717_PLUS) ||
16151 	     tg3_asic_rev(tp) == ASIC_REV_5762)
16152 		tg3_flag_set(tp, 57765_PLUS);
16153 
16154 	/* Intentionally exclude ASIC_REV_5906 */
16155 	if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
16156 	    tg3_asic_rev(tp) == ASIC_REV_5787 ||
16157 	    tg3_asic_rev(tp) == ASIC_REV_5784 ||
16158 	    tg3_asic_rev(tp) == ASIC_REV_5761 ||
16159 	    tg3_asic_rev(tp) == ASIC_REV_5785 ||
16160 	    tg3_asic_rev(tp) == ASIC_REV_57780 ||
16161 	    tg3_flag(tp, 57765_PLUS))
16162 		tg3_flag_set(tp, 5755_PLUS);
16163 
16164 	if (tg3_asic_rev(tp) == ASIC_REV_5780 ||
16165 	    tg3_asic_rev(tp) == ASIC_REV_5714)
16166 		tg3_flag_set(tp, 5780_CLASS);
16167 
16168 	if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
16169 	    tg3_asic_rev(tp) == ASIC_REV_5752 ||
16170 	    tg3_asic_rev(tp) == ASIC_REV_5906 ||
16171 	    tg3_flag(tp, 5755_PLUS) ||
16172 	    tg3_flag(tp, 5780_CLASS))
16173 		tg3_flag_set(tp, 5750_PLUS);
16174 
16175 	if (tg3_asic_rev(tp) == ASIC_REV_5705 ||
16176 	    tg3_flag(tp, 5750_PLUS))
16177 		tg3_flag_set(tp, 5705_PLUS);
16178 }
16179 
16180 static bool tg3_10_100_only_device(struct tg3 *tp,
16181 				   const struct pci_device_id *ent)
16182 {
16183 	u32 grc_misc_cfg = tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK;
16184 
16185 	if ((tg3_asic_rev(tp) == ASIC_REV_5703 &&
16186 	     (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
16187 	    (tp->phy_flags & TG3_PHYFLG_IS_FET))
16188 		return true;
16189 
16190 	if (ent->driver_data & TG3_DRV_DATA_FLAG_10_100_ONLY) {
16191 		if (tg3_asic_rev(tp) == ASIC_REV_5705) {
16192 			if (ent->driver_data & TG3_DRV_DATA_FLAG_5705_10_100)
16193 				return true;
16194 		} else {
16195 			return true;
16196 		}
16197 	}
16198 
16199 	return false;
16200 }
16201 
16202 static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
16203 {
16204 	u32 misc_ctrl_reg;
16205 	u32 pci_state_reg, grc_misc_cfg;
16206 	u32 val;
16207 	u16 pci_cmd;
16208 	int err;
16209 
16210 	/* Force memory write invalidate off.  If we leave it on,
16211 	 * then on 5700_BX chips we have to enable a workaround.
16212 	 * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
16213 	 * to match the cacheline size.  The Broadcom driver have this
16214 	 * workaround but turns MWI off all the times so never uses
16215 	 * it.  This seems to suggest that the workaround is insufficient.
16216 	 */
16217 	pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
16218 	pci_cmd &= ~PCI_COMMAND_INVALIDATE;
16219 	pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
16220 
16221 	/* Important! -- Make sure register accesses are byteswapped
16222 	 * correctly.  Also, for those chips that require it, make
16223 	 * sure that indirect register accesses are enabled before
16224 	 * the first operation.
16225 	 */
16226 	pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
16227 			      &misc_ctrl_reg);
16228 	tp->misc_host_ctrl |= (misc_ctrl_reg &
16229 			       MISC_HOST_CTRL_CHIPREV);
16230 	pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
16231 			       tp->misc_host_ctrl);
16232 
16233 	tg3_detect_asic_rev(tp, misc_ctrl_reg);
16234 
16235 	/* If we have 5702/03 A1 or A2 on certain ICH chipsets,
16236 	 * we need to disable memory and use config. cycles
16237 	 * only to access all registers. The 5702/03 chips
16238 	 * can mistakenly decode the special cycles from the
16239 	 * ICH chipsets as memory write cycles, causing corruption
16240 	 * of register and memory space. Only certain ICH bridges
16241 	 * will drive special cycles with non-zero data during the
16242 	 * address phase which can fall within the 5703's address
16243 	 * range. This is not an ICH bug as the PCI spec allows
16244 	 * non-zero address during special cycles. However, only
16245 	 * these ICH bridges are known to drive non-zero addresses
16246 	 * during special cycles.
16247 	 *
16248 	 * Since special cycles do not cross PCI bridges, we only
16249 	 * enable this workaround if the 5703 is on the secondary
16250 	 * bus of these ICH bridges.
16251 	 */
16252 	if ((tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A1) ||
16253 	    (tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A2)) {
16254 		static struct tg3_dev_id {
16255 			u32	vendor;
16256 			u32	device;
16257 			u32	rev;
16258 		} ich_chipsets[] = {
16259 			{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
16260 			  PCI_ANY_ID },
16261 			{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
16262 			  PCI_ANY_ID },
16263 			{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
16264 			  0xa },
16265 			{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
16266 			  PCI_ANY_ID },
16267 			{ },
16268 		};
16269 		struct tg3_dev_id *pci_id = &ich_chipsets[0];
16270 		struct pci_dev *bridge = NULL;
16271 
16272 		while (pci_id->vendor != 0) {
16273 			bridge = pci_get_device(pci_id->vendor, pci_id->device,
16274 						bridge);
16275 			if (!bridge) {
16276 				pci_id++;
16277 				continue;
16278 			}
16279 			if (pci_id->rev != PCI_ANY_ID) {
16280 				if (bridge->revision > pci_id->rev)
16281 					continue;
16282 			}
16283 			if (bridge->subordinate &&
16284 			    (bridge->subordinate->number ==
16285 			     tp->pdev->bus->number)) {
16286 				tg3_flag_set(tp, ICH_WORKAROUND);
16287 				pci_dev_put(bridge);
16288 				break;
16289 			}
16290 		}
16291 	}
16292 
16293 	if (tg3_asic_rev(tp) == ASIC_REV_5701) {
16294 		static struct tg3_dev_id {
16295 			u32	vendor;
16296 			u32	device;
16297 		} bridge_chipsets[] = {
16298 			{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
16299 			{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
16300 			{ },
16301 		};
16302 		struct tg3_dev_id *pci_id = &bridge_chipsets[0];
16303 		struct pci_dev *bridge = NULL;
16304 
16305 		while (pci_id->vendor != 0) {
16306 			bridge = pci_get_device(pci_id->vendor,
16307 						pci_id->device,
16308 						bridge);
16309 			if (!bridge) {
16310 				pci_id++;
16311 				continue;
16312 			}
16313 			if (bridge->subordinate &&
16314 			    (bridge->subordinate->number <=
16315 			     tp->pdev->bus->number) &&
16316 			    (bridge->subordinate->busn_res.end >=
16317 			     tp->pdev->bus->number)) {
16318 				tg3_flag_set(tp, 5701_DMA_BUG);
16319 				pci_dev_put(bridge);
16320 				break;
16321 			}
16322 		}
16323 	}
16324 
16325 	/* The EPB bridge inside 5714, 5715, and 5780 cannot support
16326 	 * DMA addresses > 40-bit. This bridge may have other additional
16327 	 * 57xx devices behind it in some 4-port NIC designs for example.
16328 	 * Any tg3 device found behind the bridge will also need the 40-bit
16329 	 * DMA workaround.
16330 	 */
16331 	if (tg3_flag(tp, 5780_CLASS)) {
16332 		tg3_flag_set(tp, 40BIT_DMA_BUG);
16333 		tp->msi_cap = tp->pdev->msi_cap;
16334 	} else {
16335 		struct pci_dev *bridge = NULL;
16336 
16337 		do {
16338 			bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
16339 						PCI_DEVICE_ID_SERVERWORKS_EPB,
16340 						bridge);
16341 			if (bridge && bridge->subordinate &&
16342 			    (bridge->subordinate->number <=
16343 			     tp->pdev->bus->number) &&
16344 			    (bridge->subordinate->busn_res.end >=
16345 			     tp->pdev->bus->number)) {
16346 				tg3_flag_set(tp, 40BIT_DMA_BUG);
16347 				pci_dev_put(bridge);
16348 				break;
16349 			}
16350 		} while (bridge);
16351 	}
16352 
16353 	if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
16354 	    tg3_asic_rev(tp) == ASIC_REV_5714)
16355 		tp->pdev_peer = tg3_find_peer(tp);
16356 
16357 	/* Determine TSO capabilities */
16358 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0)
16359 		; /* Do nothing. HW bug. */
16360 	else if (tg3_flag(tp, 57765_PLUS))
16361 		tg3_flag_set(tp, HW_TSO_3);
16362 	else if (tg3_flag(tp, 5755_PLUS) ||
16363 		 tg3_asic_rev(tp) == ASIC_REV_5906)
16364 		tg3_flag_set(tp, HW_TSO_2);
16365 	else if (tg3_flag(tp, 5750_PLUS)) {
16366 		tg3_flag_set(tp, HW_TSO_1);
16367 		tg3_flag_set(tp, TSO_BUG);
16368 		if (tg3_asic_rev(tp) == ASIC_REV_5750 &&
16369 		    tg3_chip_rev_id(tp) >= CHIPREV_ID_5750_C2)
16370 			tg3_flag_clear(tp, TSO_BUG);
16371 	} else if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
16372 		   tg3_asic_rev(tp) != ASIC_REV_5701 &&
16373 		   tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
16374 		tg3_flag_set(tp, FW_TSO);
16375 		tg3_flag_set(tp, TSO_BUG);
16376 		if (tg3_asic_rev(tp) == ASIC_REV_5705)
16377 			tp->fw_needed = FIRMWARE_TG3TSO5;
16378 		else
16379 			tp->fw_needed = FIRMWARE_TG3TSO;
16380 	}
16381 
16382 	/* Selectively allow TSO based on operating conditions */
16383 	if (tg3_flag(tp, HW_TSO_1) ||
16384 	    tg3_flag(tp, HW_TSO_2) ||
16385 	    tg3_flag(tp, HW_TSO_3) ||
16386 	    tg3_flag(tp, FW_TSO)) {
16387 		/* For firmware TSO, assume ASF is disabled.
16388 		 * We'll disable TSO later if we discover ASF
16389 		 * is enabled in tg3_get_eeprom_hw_cfg().
16390 		 */
16391 		tg3_flag_set(tp, TSO_CAPABLE);
16392 	} else {
16393 		tg3_flag_clear(tp, TSO_CAPABLE);
16394 		tg3_flag_clear(tp, TSO_BUG);
16395 		tp->fw_needed = NULL;
16396 	}
16397 
16398 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0)
16399 		tp->fw_needed = FIRMWARE_TG3;
16400 
16401 	if (tg3_asic_rev(tp) == ASIC_REV_57766)
16402 		tp->fw_needed = FIRMWARE_TG357766;
16403 
16404 	tp->irq_max = 1;
16405 
16406 	if (tg3_flag(tp, 5750_PLUS)) {
16407 		tg3_flag_set(tp, SUPPORT_MSI);
16408 		if (tg3_chip_rev(tp) == CHIPREV_5750_AX ||
16409 		    tg3_chip_rev(tp) == CHIPREV_5750_BX ||
16410 		    (tg3_asic_rev(tp) == ASIC_REV_5714 &&
16411 		     tg3_chip_rev_id(tp) <= CHIPREV_ID_5714_A2 &&
16412 		     tp->pdev_peer == tp->pdev))
16413 			tg3_flag_clear(tp, SUPPORT_MSI);
16414 
16415 		if (tg3_flag(tp, 5755_PLUS) ||
16416 		    tg3_asic_rev(tp) == ASIC_REV_5906) {
16417 			tg3_flag_set(tp, 1SHOT_MSI);
16418 		}
16419 
16420 		if (tg3_flag(tp, 57765_PLUS)) {
16421 			tg3_flag_set(tp, SUPPORT_MSIX);
16422 			tp->irq_max = TG3_IRQ_MAX_VECS;
16423 		}
16424 	}
16425 
16426 	tp->txq_max = 1;
16427 	tp->rxq_max = 1;
16428 	if (tp->irq_max > 1) {
16429 		tp->rxq_max = TG3_RSS_MAX_NUM_QS;
16430 		tg3_rss_init_dflt_indir_tbl(tp, TG3_RSS_MAX_NUM_QS);
16431 
16432 		if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
16433 		    tg3_asic_rev(tp) == ASIC_REV_5720)
16434 			tp->txq_max = tp->irq_max - 1;
16435 	}
16436 
16437 	if (tg3_flag(tp, 5755_PLUS) ||
16438 	    tg3_asic_rev(tp) == ASIC_REV_5906)
16439 		tg3_flag_set(tp, SHORT_DMA_BUG);
16440 
16441 	if (tg3_asic_rev(tp) == ASIC_REV_5719)
16442 		tp->dma_limit = TG3_TX_BD_DMA_MAX_4K;
16443 
16444 	if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16445 	    tg3_asic_rev(tp) == ASIC_REV_5719 ||
16446 	    tg3_asic_rev(tp) == ASIC_REV_5720 ||
16447 	    tg3_asic_rev(tp) == ASIC_REV_5762)
16448 		tg3_flag_set(tp, LRG_PROD_RING_CAP);
16449 
16450 	if (tg3_flag(tp, 57765_PLUS) &&
16451 	    tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0)
16452 		tg3_flag_set(tp, USE_JUMBO_BDFLAG);
16453 
16454 	if (!tg3_flag(tp, 5705_PLUS) ||
16455 	    tg3_flag(tp, 5780_CLASS) ||
16456 	    tg3_flag(tp, USE_JUMBO_BDFLAG))
16457 		tg3_flag_set(tp, JUMBO_CAPABLE);
16458 
16459 	pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
16460 			      &pci_state_reg);
16461 
16462 	if (pci_is_pcie(tp->pdev)) {
16463 		u16 lnkctl;
16464 
16465 		tg3_flag_set(tp, PCI_EXPRESS);
16466 
16467 		pcie_capability_read_word(tp->pdev, PCI_EXP_LNKCTL, &lnkctl);
16468 		if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
16469 			if (tg3_asic_rev(tp) == ASIC_REV_5906) {
16470 				tg3_flag_clear(tp, HW_TSO_2);
16471 				tg3_flag_clear(tp, TSO_CAPABLE);
16472 			}
16473 			if (tg3_asic_rev(tp) == ASIC_REV_5784 ||
16474 			    tg3_asic_rev(tp) == ASIC_REV_5761 ||
16475 			    tg3_chip_rev_id(tp) == CHIPREV_ID_57780_A0 ||
16476 			    tg3_chip_rev_id(tp) == CHIPREV_ID_57780_A1)
16477 				tg3_flag_set(tp, CLKREQ_BUG);
16478 		} else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5717_A0) {
16479 			tg3_flag_set(tp, L1PLLPD_EN);
16480 		}
16481 	} else if (tg3_asic_rev(tp) == ASIC_REV_5785) {
16482 		/* BCM5785 devices are effectively PCIe devices, and should
16483 		 * follow PCIe codepaths, but do not have a PCIe capabilities
16484 		 * section.
16485 		 */
16486 		tg3_flag_set(tp, PCI_EXPRESS);
16487 	} else if (!tg3_flag(tp, 5705_PLUS) ||
16488 		   tg3_flag(tp, 5780_CLASS)) {
16489 		tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
16490 		if (!tp->pcix_cap) {
16491 			dev_err(&tp->pdev->dev,
16492 				"Cannot find PCI-X capability, aborting\n");
16493 			return -EIO;
16494 		}
16495 
16496 		if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE))
16497 			tg3_flag_set(tp, PCIX_MODE);
16498 	}
16499 
16500 	/* If we have an AMD 762 or VIA K8T800 chipset, write
16501 	 * reordering to the mailbox registers done by the host
16502 	 * controller can cause major troubles.  We read back from
16503 	 * every mailbox register write to force the writes to be
16504 	 * posted to the chip in order.
16505 	 */
16506 	if (pci_dev_present(tg3_write_reorder_chipsets) &&
16507 	    !tg3_flag(tp, PCI_EXPRESS))
16508 		tg3_flag_set(tp, MBOX_WRITE_REORDER);
16509 
16510 	pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
16511 			     &tp->pci_cacheline_sz);
16512 	pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER,
16513 			     &tp->pci_lat_timer);
16514 	if (tg3_asic_rev(tp) == ASIC_REV_5703 &&
16515 	    tp->pci_lat_timer < 64) {
16516 		tp->pci_lat_timer = 64;
16517 		pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
16518 				      tp->pci_lat_timer);
16519 	}
16520 
16521 	/* Important! -- It is critical that the PCI-X hw workaround
16522 	 * situation is decided before the first MMIO register access.
16523 	 */
16524 	if (tg3_chip_rev(tp) == CHIPREV_5700_BX) {
16525 		/* 5700 BX chips need to have their TX producer index
16526 		 * mailboxes written twice to workaround a bug.
16527 		 */
16528 		tg3_flag_set(tp, TXD_MBOX_HWBUG);
16529 
16530 		/* If we are in PCI-X mode, enable register write workaround.
16531 		 *
16532 		 * The workaround is to use indirect register accesses
16533 		 * for all chip writes not to mailbox registers.
16534 		 */
16535 		if (tg3_flag(tp, PCIX_MODE)) {
16536 			u32 pm_reg;
16537 
16538 			tg3_flag_set(tp, PCIX_TARGET_HWBUG);
16539 
16540 			/* The chip can have it's power management PCI config
16541 			 * space registers clobbered due to this bug.
16542 			 * So explicitly force the chip into D0 here.
16543 			 */
16544 			pci_read_config_dword(tp->pdev,
16545 					      tp->pdev->pm_cap + PCI_PM_CTRL,
16546 					      &pm_reg);
16547 			pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
16548 			pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
16549 			pci_write_config_dword(tp->pdev,
16550 					       tp->pdev->pm_cap + PCI_PM_CTRL,
16551 					       pm_reg);
16552 
16553 			/* Also, force SERR#/PERR# in PCI command. */
16554 			pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
16555 			pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
16556 			pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
16557 		}
16558 	}
16559 
16560 	if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
16561 		tg3_flag_set(tp, PCI_HIGH_SPEED);
16562 	if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
16563 		tg3_flag_set(tp, PCI_32BIT);
16564 
16565 	/* Chip-specific fixup from Broadcom driver */
16566 	if ((tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0) &&
16567 	    (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
16568 		pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
16569 		pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
16570 	}
16571 
16572 	/* Default fast path register access methods */
16573 	tp->read32 = tg3_read32;
16574 	tp->write32 = tg3_write32;
16575 	tp->read32_mbox = tg3_read32;
16576 	tp->write32_mbox = tg3_write32;
16577 	tp->write32_tx_mbox = tg3_write32;
16578 	tp->write32_rx_mbox = tg3_write32;
16579 
16580 	/* Various workaround register access methods */
16581 	if (tg3_flag(tp, PCIX_TARGET_HWBUG))
16582 		tp->write32 = tg3_write_indirect_reg32;
16583 	else if (tg3_asic_rev(tp) == ASIC_REV_5701 ||
16584 		 (tg3_flag(tp, PCI_EXPRESS) &&
16585 		  tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A0)) {
16586 		/*
16587 		 * Back to back register writes can cause problems on these
16588 		 * chips, the workaround is to read back all reg writes
16589 		 * except those to mailbox regs.
16590 		 *
16591 		 * See tg3_write_indirect_reg32().
16592 		 */
16593 		tp->write32 = tg3_write_flush_reg32;
16594 	}
16595 
16596 	if (tg3_flag(tp, TXD_MBOX_HWBUG) || tg3_flag(tp, MBOX_WRITE_REORDER)) {
16597 		tp->write32_tx_mbox = tg3_write32_tx_mbox;
16598 		if (tg3_flag(tp, MBOX_WRITE_REORDER))
16599 			tp->write32_rx_mbox = tg3_write_flush_reg32;
16600 	}
16601 
16602 	if (tg3_flag(tp, ICH_WORKAROUND)) {
16603 		tp->read32 = tg3_read_indirect_reg32;
16604 		tp->write32 = tg3_write_indirect_reg32;
16605 		tp->read32_mbox = tg3_read_indirect_mbox;
16606 		tp->write32_mbox = tg3_write_indirect_mbox;
16607 		tp->write32_tx_mbox = tg3_write_indirect_mbox;
16608 		tp->write32_rx_mbox = tg3_write_indirect_mbox;
16609 
16610 		iounmap(tp->regs);
16611 		tp->regs = NULL;
16612 
16613 		pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
16614 		pci_cmd &= ~PCI_COMMAND_MEMORY;
16615 		pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
16616 	}
16617 	if (tg3_asic_rev(tp) == ASIC_REV_5906) {
16618 		tp->read32_mbox = tg3_read32_mbox_5906;
16619 		tp->write32_mbox = tg3_write32_mbox_5906;
16620 		tp->write32_tx_mbox = tg3_write32_mbox_5906;
16621 		tp->write32_rx_mbox = tg3_write32_mbox_5906;
16622 	}
16623 
16624 	if (tp->write32 == tg3_write_indirect_reg32 ||
16625 	    (tg3_flag(tp, PCIX_MODE) &&
16626 	     (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16627 	      tg3_asic_rev(tp) == ASIC_REV_5701)))
16628 		tg3_flag_set(tp, SRAM_USE_CONFIG);
16629 
16630 	/* The memory arbiter has to be enabled in order for SRAM accesses
16631 	 * to succeed.  Normally on powerup the tg3 chip firmware will make
16632 	 * sure it is enabled, but other entities such as system netboot
16633 	 * code might disable it.
16634 	 */
16635 	val = tr32(MEMARB_MODE);
16636 	tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
16637 
16638 	tp->pci_fn = PCI_FUNC(tp->pdev->devfn) & 3;
16639 	if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
16640 	    tg3_flag(tp, 5780_CLASS)) {
16641 		if (tg3_flag(tp, PCIX_MODE)) {
16642 			pci_read_config_dword(tp->pdev,
16643 					      tp->pcix_cap + PCI_X_STATUS,
16644 					      &val);
16645 			tp->pci_fn = val & 0x7;
16646 		}
16647 	} else if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16648 		   tg3_asic_rev(tp) == ASIC_REV_5719 ||
16649 		   tg3_asic_rev(tp) == ASIC_REV_5720) {
16650 		tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val);
16651 		if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) != NIC_SRAM_CPMUSTAT_SIG)
16652 			val = tr32(TG3_CPMU_STATUS);
16653 
16654 		if (tg3_asic_rev(tp) == ASIC_REV_5717)
16655 			tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5717) ? 1 : 0;
16656 		else
16657 			tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5719) >>
16658 				     TG3_CPMU_STATUS_FSHFT_5719;
16659 	}
16660 
16661 	if (tg3_flag(tp, FLUSH_POSTED_WRITES)) {
16662 		tp->write32_tx_mbox = tg3_write_flush_reg32;
16663 		tp->write32_rx_mbox = tg3_write_flush_reg32;
16664 	}
16665 
16666 	/* Get eeprom hw config before calling tg3_set_power_state().
16667 	 * In particular, the TG3_FLAG_IS_NIC flag must be
16668 	 * determined before calling tg3_set_power_state() so that
16669 	 * we know whether or not to switch out of Vaux power.
16670 	 * When the flag is set, it means that GPIO1 is used for eeprom
16671 	 * write protect and also implies that it is a LOM where GPIOs
16672 	 * are not used to switch power.
16673 	 */
16674 	tg3_get_eeprom_hw_cfg(tp);
16675 
16676 	if (tg3_flag(tp, FW_TSO) && tg3_flag(tp, ENABLE_ASF)) {
16677 		tg3_flag_clear(tp, TSO_CAPABLE);
16678 		tg3_flag_clear(tp, TSO_BUG);
16679 		tp->fw_needed = NULL;
16680 	}
16681 
16682 	if (tg3_flag(tp, ENABLE_APE)) {
16683 		/* Allow reads and writes to the
16684 		 * APE register and memory space.
16685 		 */
16686 		pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
16687 				 PCISTATE_ALLOW_APE_SHMEM_WR |
16688 				 PCISTATE_ALLOW_APE_PSPACE_WR;
16689 		pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
16690 				       pci_state_reg);
16691 
16692 		tg3_ape_lock_init(tp);
16693 		tp->ape_hb_interval =
16694 			msecs_to_jiffies(APE_HOST_HEARTBEAT_INT_5SEC);
16695 	}
16696 
16697 	/* Set up tp->grc_local_ctrl before calling
16698 	 * tg3_pwrsrc_switch_to_vmain().  GPIO1 driven high
16699 	 * will bring 5700's external PHY out of reset.
16700 	 * It is also used as eeprom write protect on LOMs.
16701 	 */
16702 	tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
16703 	if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16704 	    tg3_flag(tp, EEPROM_WRITE_PROT))
16705 		tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
16706 				       GRC_LCLCTRL_GPIO_OUTPUT1);
16707 	/* Unused GPIO3 must be driven as output on 5752 because there
16708 	 * are no pull-up resistors on unused GPIO pins.
16709 	 */
16710 	else if (tg3_asic_rev(tp) == ASIC_REV_5752)
16711 		tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
16712 
16713 	if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
16714 	    tg3_asic_rev(tp) == ASIC_REV_57780 ||
16715 	    tg3_flag(tp, 57765_CLASS))
16716 		tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
16717 
16718 	if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
16719 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
16720 		/* Turn off the debug UART. */
16721 		tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
16722 		if (tg3_flag(tp, IS_NIC))
16723 			/* Keep VMain power. */
16724 			tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
16725 					      GRC_LCLCTRL_GPIO_OUTPUT0;
16726 	}
16727 
16728 	if (tg3_asic_rev(tp) == ASIC_REV_5762)
16729 		tp->grc_local_ctrl |=
16730 			tr32(GRC_LOCAL_CTRL) & GRC_LCLCTRL_GPIO_UART_SEL;
16731 
16732 	/* Switch out of Vaux if it is a NIC */
16733 	tg3_pwrsrc_switch_to_vmain(tp);
16734 
16735 	/* Derive initial jumbo mode from MTU assigned in
16736 	 * ether_setup() via the alloc_etherdev() call
16737 	 */
16738 	if (tp->dev->mtu > ETH_DATA_LEN && !tg3_flag(tp, 5780_CLASS))
16739 		tg3_flag_set(tp, JUMBO_RING_ENABLE);
16740 
16741 	/* Determine WakeOnLan speed to use. */
16742 	if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16743 	    tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
16744 	    tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0 ||
16745 	    tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B2) {
16746 		tg3_flag_clear(tp, WOL_SPEED_100MB);
16747 	} else {
16748 		tg3_flag_set(tp, WOL_SPEED_100MB);
16749 	}
16750 
16751 	if (tg3_asic_rev(tp) == ASIC_REV_5906)
16752 		tp->phy_flags |= TG3_PHYFLG_IS_FET;
16753 
16754 	/* A few boards don't want Ethernet@WireSpeed phy feature */
16755 	if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16756 	    (tg3_asic_rev(tp) == ASIC_REV_5705 &&
16757 	     (tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) &&
16758 	     (tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A1)) ||
16759 	    (tp->phy_flags & TG3_PHYFLG_IS_FET) ||
16760 	    (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
16761 		tp->phy_flags |= TG3_PHYFLG_NO_ETH_WIRE_SPEED;
16762 
16763 	if (tg3_chip_rev(tp) == CHIPREV_5703_AX ||
16764 	    tg3_chip_rev(tp) == CHIPREV_5704_AX)
16765 		tp->phy_flags |= TG3_PHYFLG_ADC_BUG;
16766 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0)
16767 		tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG;
16768 
16769 	if (tg3_flag(tp, 5705_PLUS) &&
16770 	    !(tp->phy_flags & TG3_PHYFLG_IS_FET) &&
16771 	    tg3_asic_rev(tp) != ASIC_REV_5785 &&
16772 	    tg3_asic_rev(tp) != ASIC_REV_57780 &&
16773 	    !tg3_flag(tp, 57765_PLUS)) {
16774 		if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
16775 		    tg3_asic_rev(tp) == ASIC_REV_5787 ||
16776 		    tg3_asic_rev(tp) == ASIC_REV_5784 ||
16777 		    tg3_asic_rev(tp) == ASIC_REV_5761) {
16778 			if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
16779 			    tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
16780 				tp->phy_flags |= TG3_PHYFLG_JITTER_BUG;
16781 			if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
16782 				tp->phy_flags |= TG3_PHYFLG_ADJUST_TRIM;
16783 		} else
16784 			tp->phy_flags |= TG3_PHYFLG_BER_BUG;
16785 	}
16786 
16787 	if (tg3_asic_rev(tp) == ASIC_REV_5784 &&
16788 	    tg3_chip_rev(tp) != CHIPREV_5784_AX) {
16789 		tp->phy_otp = tg3_read_otp_phycfg(tp);
16790 		if (tp->phy_otp == 0)
16791 			tp->phy_otp = TG3_OTP_DEFAULT;
16792 	}
16793 
16794 	if (tg3_flag(tp, CPMU_PRESENT))
16795 		tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
16796 	else
16797 		tp->mi_mode = MAC_MI_MODE_BASE;
16798 
16799 	tp->coalesce_mode = 0;
16800 	if (tg3_chip_rev(tp) != CHIPREV_5700_AX &&
16801 	    tg3_chip_rev(tp) != CHIPREV_5700_BX)
16802 		tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
16803 
16804 	/* Set these bits to enable statistics workaround. */
16805 	if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16806 	    tg3_asic_rev(tp) == ASIC_REV_5762 ||
16807 	    tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
16808 	    tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0) {
16809 		tp->coalesce_mode |= HOSTCC_MODE_ATTN;
16810 		tp->grc_mode |= GRC_MODE_IRQ_ON_FLOW_ATTN;
16811 	}
16812 
16813 	if (tg3_asic_rev(tp) == ASIC_REV_5785 ||
16814 	    tg3_asic_rev(tp) == ASIC_REV_57780)
16815 		tg3_flag_set(tp, USE_PHYLIB);
16816 
16817 	err = tg3_mdio_init(tp);
16818 	if (err)
16819 		return err;
16820 
16821 	/* Initialize data/descriptor byte/word swapping. */
16822 	val = tr32(GRC_MODE);
16823 	if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
16824 	    tg3_asic_rev(tp) == ASIC_REV_5762)
16825 		val &= (GRC_MODE_BYTE_SWAP_B2HRX_DATA |
16826 			GRC_MODE_WORD_SWAP_B2HRX_DATA |
16827 			GRC_MODE_B2HRX_ENABLE |
16828 			GRC_MODE_HTX2B_ENABLE |
16829 			GRC_MODE_HOST_STACKUP);
16830 	else
16831 		val &= GRC_MODE_HOST_STACKUP;
16832 
16833 	tw32(GRC_MODE, val | tp->grc_mode);
16834 
16835 	tg3_switch_clocks(tp);
16836 
16837 	/* Clear this out for sanity. */
16838 	tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
16839 
16840 	/* Clear TG3PCI_REG_BASE_ADDR to prevent hangs. */
16841 	tw32(TG3PCI_REG_BASE_ADDR, 0);
16842 
16843 	pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
16844 			      &pci_state_reg);
16845 	if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
16846 	    !tg3_flag(tp, PCIX_TARGET_HWBUG)) {
16847 		if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
16848 		    tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0 ||
16849 		    tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B2 ||
16850 		    tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B5) {
16851 			void __iomem *sram_base;
16852 
16853 			/* Write some dummy words into the SRAM status block
16854 			 * area, see if it reads back correctly.  If the return
16855 			 * value is bad, force enable the PCIX workaround.
16856 			 */
16857 			sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
16858 
16859 			writel(0x00000000, sram_base);
16860 			writel(0x00000000, sram_base + 4);
16861 			writel(0xffffffff, sram_base + 4);
16862 			if (readl(sram_base) != 0x00000000)
16863 				tg3_flag_set(tp, PCIX_TARGET_HWBUG);
16864 		}
16865 	}
16866 
16867 	udelay(50);
16868 	tg3_nvram_init(tp);
16869 
16870 	/* If the device has an NVRAM, no need to load patch firmware */
16871 	if (tg3_asic_rev(tp) == ASIC_REV_57766 &&
16872 	    !tg3_flag(tp, NO_NVRAM))
16873 		tp->fw_needed = NULL;
16874 
16875 	grc_misc_cfg = tr32(GRC_MISC_CFG);
16876 	grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
16877 
16878 	if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
16879 	    (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
16880 	     grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
16881 		tg3_flag_set(tp, IS_5788);
16882 
16883 	if (!tg3_flag(tp, IS_5788) &&
16884 	    tg3_asic_rev(tp) != ASIC_REV_5700)
16885 		tg3_flag_set(tp, TAGGED_STATUS);
16886 	if (tg3_flag(tp, TAGGED_STATUS)) {
16887 		tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
16888 				      HOSTCC_MODE_CLRTICK_TXBD);
16889 
16890 		tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
16891 		pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
16892 				       tp->misc_host_ctrl);
16893 	}
16894 
16895 	/* Preserve the APE MAC_MODE bits */
16896 	if (tg3_flag(tp, ENABLE_APE))
16897 		tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
16898 	else
16899 		tp->mac_mode = 0;
16900 
16901 	if (tg3_10_100_only_device(tp, ent))
16902 		tp->phy_flags |= TG3_PHYFLG_10_100_ONLY;
16903 
16904 	err = tg3_phy_probe(tp);
16905 	if (err) {
16906 		dev_err(&tp->pdev->dev, "phy probe failed, err %d\n", err);
16907 		/* ... but do not return immediately ... */
16908 		tg3_mdio_fini(tp);
16909 	}
16910 
16911 	tg3_read_vpd(tp);
16912 	tg3_read_fw_ver(tp);
16913 
16914 	if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
16915 		tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
16916 	} else {
16917 		if (tg3_asic_rev(tp) == ASIC_REV_5700)
16918 			tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
16919 		else
16920 			tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
16921 	}
16922 
16923 	/* 5700 {AX,BX} chips have a broken status block link
16924 	 * change bit implementation, so we must use the
16925 	 * status register in those cases.
16926 	 */
16927 	if (tg3_asic_rev(tp) == ASIC_REV_5700)
16928 		tg3_flag_set(tp, USE_LINKCHG_REG);
16929 	else
16930 		tg3_flag_clear(tp, USE_LINKCHG_REG);
16931 
16932 	/* The led_ctrl is set during tg3_phy_probe, here we might
16933 	 * have to force the link status polling mechanism based
16934 	 * upon subsystem IDs.
16935 	 */
16936 	if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
16937 	    tg3_asic_rev(tp) == ASIC_REV_5701 &&
16938 	    !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
16939 		tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
16940 		tg3_flag_set(tp, USE_LINKCHG_REG);
16941 	}
16942 
16943 	/* For all SERDES we poll the MAC status register. */
16944 	if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
16945 		tg3_flag_set(tp, POLL_SERDES);
16946 	else
16947 		tg3_flag_clear(tp, POLL_SERDES);
16948 
16949 	if (tg3_flag(tp, ENABLE_APE) && tg3_flag(tp, ENABLE_ASF))
16950 		tg3_flag_set(tp, POLL_CPMU_LINK);
16951 
16952 	tp->rx_offset = NET_SKB_PAD + NET_IP_ALIGN;
16953 	tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;
16954 	if (tg3_asic_rev(tp) == ASIC_REV_5701 &&
16955 	    tg3_flag(tp, PCIX_MODE)) {
16956 		tp->rx_offset = NET_SKB_PAD;
16957 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
16958 		tp->rx_copy_thresh = ~(u16)0;
16959 #endif
16960 	}
16961 
16962 	tp->rx_std_ring_mask = TG3_RX_STD_RING_SIZE(tp) - 1;
16963 	tp->rx_jmb_ring_mask = TG3_RX_JMB_RING_SIZE(tp) - 1;
16964 	tp->rx_ret_ring_mask = tg3_rx_ret_ring_size(tp) - 1;
16965 
16966 	tp->rx_std_max_post = tp->rx_std_ring_mask + 1;
16967 
16968 	/* Increment the rx prod index on the rx std ring by at most
16969 	 * 8 for these chips to workaround hw errata.
16970 	 */
16971 	if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
16972 	    tg3_asic_rev(tp) == ASIC_REV_5752 ||
16973 	    tg3_asic_rev(tp) == ASIC_REV_5755)
16974 		tp->rx_std_max_post = 8;
16975 
16976 	if (tg3_flag(tp, ASPM_WORKAROUND))
16977 		tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
16978 				     PCIE_PWR_MGMT_L1_THRESH_MSK;
16979 
16980 	return err;
16981 }
16982 
16983 static int tg3_get_device_address(struct tg3 *tp)
16984 {
16985 	struct net_device *dev = tp->dev;
16986 	u32 hi, lo, mac_offset;
16987 	int addr_ok = 0;
16988 	int err;
16989 
16990 	if (!eth_platform_get_mac_address(&tp->pdev->dev, dev->dev_addr))
16991 		return 0;
16992 
16993 	if (tg3_flag(tp, IS_SSB_CORE)) {
16994 		err = ssb_gige_get_macaddr(tp->pdev, &dev->dev_addr[0]);
16995 		if (!err && is_valid_ether_addr(&dev->dev_addr[0]))
16996 			return 0;
16997 	}
16998 
16999 	mac_offset = 0x7c;
17000 	if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
17001 	    tg3_flag(tp, 5780_CLASS)) {
17002 		if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
17003 			mac_offset = 0xcc;
17004 		if (tg3_nvram_lock(tp))
17005 			tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
17006 		else
17007 			tg3_nvram_unlock(tp);
17008 	} else if (tg3_flag(tp, 5717_PLUS)) {
17009 		if (tp->pci_fn & 1)
17010 			mac_offset = 0xcc;
17011 		if (tp->pci_fn > 1)
17012 			mac_offset += 0x18c;
17013 	} else if (tg3_asic_rev(tp) == ASIC_REV_5906)
17014 		mac_offset = 0x10;
17015 
17016 	/* First try to get it from MAC address mailbox. */
17017 	tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
17018 	if ((hi >> 16) == 0x484b) {
17019 		dev->dev_addr[0] = (hi >>  8) & 0xff;
17020 		dev->dev_addr[1] = (hi >>  0) & 0xff;
17021 
17022 		tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
17023 		dev->dev_addr[2] = (lo >> 24) & 0xff;
17024 		dev->dev_addr[3] = (lo >> 16) & 0xff;
17025 		dev->dev_addr[4] = (lo >>  8) & 0xff;
17026 		dev->dev_addr[5] = (lo >>  0) & 0xff;
17027 
17028 		/* Some old bootcode may report a 0 MAC address in SRAM */
17029 		addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
17030 	}
17031 	if (!addr_ok) {
17032 		/* Next, try NVRAM. */
17033 		if (!tg3_flag(tp, NO_NVRAM) &&
17034 		    !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) &&
17035 		    !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) {
17036 			memcpy(&dev->dev_addr[0], ((char *)&hi) + 2, 2);
17037 			memcpy(&dev->dev_addr[2], (char *)&lo, sizeof(lo));
17038 		}
17039 		/* Finally just fetch it out of the MAC control regs. */
17040 		else {
17041 			hi = tr32(MAC_ADDR_0_HIGH);
17042 			lo = tr32(MAC_ADDR_0_LOW);
17043 
17044 			dev->dev_addr[5] = lo & 0xff;
17045 			dev->dev_addr[4] = (lo >> 8) & 0xff;
17046 			dev->dev_addr[3] = (lo >> 16) & 0xff;
17047 			dev->dev_addr[2] = (lo >> 24) & 0xff;
17048 			dev->dev_addr[1] = hi & 0xff;
17049 			dev->dev_addr[0] = (hi >> 8) & 0xff;
17050 		}
17051 	}
17052 
17053 	if (!is_valid_ether_addr(&dev->dev_addr[0]))
17054 		return -EINVAL;
17055 	return 0;
17056 }
17057 
17058 #define BOUNDARY_SINGLE_CACHELINE	1
17059 #define BOUNDARY_MULTI_CACHELINE	2
17060 
17061 static u32 tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
17062 {
17063 	int cacheline_size;
17064 	u8 byte;
17065 	int goal;
17066 
17067 	pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
17068 	if (byte == 0)
17069 		cacheline_size = 1024;
17070 	else
17071 		cacheline_size = (int) byte * 4;
17072 
17073 	/* On 5703 and later chips, the boundary bits have no
17074 	 * effect.
17075 	 */
17076 	if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
17077 	    tg3_asic_rev(tp) != ASIC_REV_5701 &&
17078 	    !tg3_flag(tp, PCI_EXPRESS))
17079 		goto out;
17080 
17081 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
17082 	goal = BOUNDARY_MULTI_CACHELINE;
17083 #else
17084 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
17085 	goal = BOUNDARY_SINGLE_CACHELINE;
17086 #else
17087 	goal = 0;
17088 #endif
17089 #endif
17090 
17091 	if (tg3_flag(tp, 57765_PLUS)) {
17092 		val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
17093 		goto out;
17094 	}
17095 
17096 	if (!goal)
17097 		goto out;
17098 
17099 	/* PCI controllers on most RISC systems tend to disconnect
17100 	 * when a device tries to burst across a cache-line boundary.
17101 	 * Therefore, letting tg3 do so just wastes PCI bandwidth.
17102 	 *
17103 	 * Unfortunately, for PCI-E there are only limited
17104 	 * write-side controls for this, and thus for reads
17105 	 * we will still get the disconnects.  We'll also waste
17106 	 * these PCI cycles for both read and write for chips
17107 	 * other than 5700 and 5701 which do not implement the
17108 	 * boundary bits.
17109 	 */
17110 	if (tg3_flag(tp, PCIX_MODE) && !tg3_flag(tp, PCI_EXPRESS)) {
17111 		switch (cacheline_size) {
17112 		case 16:
17113 		case 32:
17114 		case 64:
17115 		case 128:
17116 			if (goal == BOUNDARY_SINGLE_CACHELINE) {
17117 				val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
17118 					DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
17119 			} else {
17120 				val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
17121 					DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
17122 			}
17123 			break;
17124 
17125 		case 256:
17126 			val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
17127 				DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
17128 			break;
17129 
17130 		default:
17131 			val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
17132 				DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
17133 			break;
17134 		}
17135 	} else if (tg3_flag(tp, PCI_EXPRESS)) {
17136 		switch (cacheline_size) {
17137 		case 16:
17138 		case 32:
17139 		case 64:
17140 			if (goal == BOUNDARY_SINGLE_CACHELINE) {
17141 				val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
17142 				val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
17143 				break;
17144 			}
17145 			/* fallthrough */
17146 		case 128:
17147 		default:
17148 			val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
17149 			val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
17150 			break;
17151 		}
17152 	} else {
17153 		switch (cacheline_size) {
17154 		case 16:
17155 			if (goal == BOUNDARY_SINGLE_CACHELINE) {
17156 				val |= (DMA_RWCTRL_READ_BNDRY_16 |
17157 					DMA_RWCTRL_WRITE_BNDRY_16);
17158 				break;
17159 			}
17160 			/* fallthrough */
17161 		case 32:
17162 			if (goal == BOUNDARY_SINGLE_CACHELINE) {
17163 				val |= (DMA_RWCTRL_READ_BNDRY_32 |
17164 					DMA_RWCTRL_WRITE_BNDRY_32);
17165 				break;
17166 			}
17167 			/* fallthrough */
17168 		case 64:
17169 			if (goal == BOUNDARY_SINGLE_CACHELINE) {
17170 				val |= (DMA_RWCTRL_READ_BNDRY_64 |
17171 					DMA_RWCTRL_WRITE_BNDRY_64);
17172 				break;
17173 			}
17174 			/* fallthrough */
17175 		case 128:
17176 			if (goal == BOUNDARY_SINGLE_CACHELINE) {
17177 				val |= (DMA_RWCTRL_READ_BNDRY_128 |
17178 					DMA_RWCTRL_WRITE_BNDRY_128);
17179 				break;
17180 			}
17181 			/* fallthrough */
17182 		case 256:
17183 			val |= (DMA_RWCTRL_READ_BNDRY_256 |
17184 				DMA_RWCTRL_WRITE_BNDRY_256);
17185 			break;
17186 		case 512:
17187 			val |= (DMA_RWCTRL_READ_BNDRY_512 |
17188 				DMA_RWCTRL_WRITE_BNDRY_512);
17189 			break;
17190 		case 1024:
17191 		default:
17192 			val |= (DMA_RWCTRL_READ_BNDRY_1024 |
17193 				DMA_RWCTRL_WRITE_BNDRY_1024);
17194 			break;
17195 		}
17196 	}
17197 
17198 out:
17199 	return val;
17200 }
17201 
17202 static int tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma,
17203 			   int size, bool to_device)
17204 {
17205 	struct tg3_internal_buffer_desc test_desc;
17206 	u32 sram_dma_descs;
17207 	int i, ret;
17208 
17209 	sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
17210 
17211 	tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
17212 	tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
17213 	tw32(RDMAC_STATUS, 0);
17214 	tw32(WDMAC_STATUS, 0);
17215 
17216 	tw32(BUFMGR_MODE, 0);
17217 	tw32(FTQ_RESET, 0);
17218 
17219 	test_desc.addr_hi = ((u64) buf_dma) >> 32;
17220 	test_desc.addr_lo = buf_dma & 0xffffffff;
17221 	test_desc.nic_mbuf = 0x00002100;
17222 	test_desc.len = size;
17223 
17224 	/*
17225 	 * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
17226 	 * the *second* time the tg3 driver was getting loaded after an
17227 	 * initial scan.
17228 	 *
17229 	 * Broadcom tells me:
17230 	 *   ...the DMA engine is connected to the GRC block and a DMA
17231 	 *   reset may affect the GRC block in some unpredictable way...
17232 	 *   The behavior of resets to individual blocks has not been tested.
17233 	 *
17234 	 * Broadcom noted the GRC reset will also reset all sub-components.
17235 	 */
17236 	if (to_device) {
17237 		test_desc.cqid_sqid = (13 << 8) | 2;
17238 
17239 		tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
17240 		udelay(40);
17241 	} else {
17242 		test_desc.cqid_sqid = (16 << 8) | 7;
17243 
17244 		tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
17245 		udelay(40);
17246 	}
17247 	test_desc.flags = 0x00000005;
17248 
17249 	for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
17250 		u32 val;
17251 
17252 		val = *(((u32 *)&test_desc) + i);
17253 		pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
17254 				       sram_dma_descs + (i * sizeof(u32)));
17255 		pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
17256 	}
17257 	pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
17258 
17259 	if (to_device)
17260 		tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
17261 	else
17262 		tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
17263 
17264 	ret = -ENODEV;
17265 	for (i = 0; i < 40; i++) {
17266 		u32 val;
17267 
17268 		if (to_device)
17269 			val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
17270 		else
17271 			val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
17272 		if ((val & 0xffff) == sram_dma_descs) {
17273 			ret = 0;
17274 			break;
17275 		}
17276 
17277 		udelay(100);
17278 	}
17279 
17280 	return ret;
17281 }
17282 
17283 #define TEST_BUFFER_SIZE	0x2000
17284 
17285 static const struct pci_device_id tg3_dma_wait_state_chipsets[] = {
17286 	{ PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
17287 	{ },
17288 };
17289 
17290 static int tg3_test_dma(struct tg3 *tp)
17291 {
17292 	dma_addr_t buf_dma;
17293 	u32 *buf, saved_dma_rwctrl;
17294 	int ret = 0;
17295 
17296 	buf = dma_alloc_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE,
17297 				 &buf_dma, GFP_KERNEL);
17298 	if (!buf) {
17299 		ret = -ENOMEM;
17300 		goto out_nofree;
17301 	}
17302 
17303 	tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
17304 			  (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
17305 
17306 	tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
17307 
17308 	if (tg3_flag(tp, 57765_PLUS))
17309 		goto out;
17310 
17311 	if (tg3_flag(tp, PCI_EXPRESS)) {
17312 		/* DMA read watermark not used on PCIE */
17313 		tp->dma_rwctrl |= 0x00180000;
17314 	} else if (!tg3_flag(tp, PCIX_MODE)) {
17315 		if (tg3_asic_rev(tp) == ASIC_REV_5705 ||
17316 		    tg3_asic_rev(tp) == ASIC_REV_5750)
17317 			tp->dma_rwctrl |= 0x003f0000;
17318 		else
17319 			tp->dma_rwctrl |= 0x003f000f;
17320 	} else {
17321 		if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
17322 		    tg3_asic_rev(tp) == ASIC_REV_5704) {
17323 			u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
17324 			u32 read_water = 0x7;
17325 
17326 			/* If the 5704 is behind the EPB bridge, we can
17327 			 * do the less restrictive ONE_DMA workaround for
17328 			 * better performance.
17329 			 */
17330 			if (tg3_flag(tp, 40BIT_DMA_BUG) &&
17331 			    tg3_asic_rev(tp) == ASIC_REV_5704)
17332 				tp->dma_rwctrl |= 0x8000;
17333 			else if (ccval == 0x6 || ccval == 0x7)
17334 				tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
17335 
17336 			if (tg3_asic_rev(tp) == ASIC_REV_5703)
17337 				read_water = 4;
17338 			/* Set bit 23 to enable PCIX hw bug fix */
17339 			tp->dma_rwctrl |=
17340 				(read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
17341 				(0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
17342 				(1 << 23);
17343 		} else if (tg3_asic_rev(tp) == ASIC_REV_5780) {
17344 			/* 5780 always in PCIX mode */
17345 			tp->dma_rwctrl |= 0x00144000;
17346 		} else if (tg3_asic_rev(tp) == ASIC_REV_5714) {
17347 			/* 5714 always in PCIX mode */
17348 			tp->dma_rwctrl |= 0x00148000;
17349 		} else {
17350 			tp->dma_rwctrl |= 0x001b000f;
17351 		}
17352 	}
17353 	if (tg3_flag(tp, ONE_DMA_AT_ONCE))
17354 		tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
17355 
17356 	if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
17357 	    tg3_asic_rev(tp) == ASIC_REV_5704)
17358 		tp->dma_rwctrl &= 0xfffffff0;
17359 
17360 	if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
17361 	    tg3_asic_rev(tp) == ASIC_REV_5701) {
17362 		/* Remove this if it causes problems for some boards. */
17363 		tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
17364 
17365 		/* On 5700/5701 chips, we need to set this bit.
17366 		 * Otherwise the chip will issue cacheline transactions
17367 		 * to streamable DMA memory with not all the byte
17368 		 * enables turned on.  This is an error on several
17369 		 * RISC PCI controllers, in particular sparc64.
17370 		 *
17371 		 * On 5703/5704 chips, this bit has been reassigned
17372 		 * a different meaning.  In particular, it is used
17373 		 * on those chips to enable a PCI-X workaround.
17374 		 */
17375 		tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
17376 	}
17377 
17378 	tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17379 
17380 
17381 	if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
17382 	    tg3_asic_rev(tp) != ASIC_REV_5701)
17383 		goto out;
17384 
17385 	/* It is best to perform DMA test with maximum write burst size
17386 	 * to expose the 5700/5701 write DMA bug.
17387 	 */
17388 	saved_dma_rwctrl = tp->dma_rwctrl;
17389 	tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
17390 	tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17391 
17392 	while (1) {
17393 		u32 *p = buf, i;
17394 
17395 		for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
17396 			p[i] = i;
17397 
17398 		/* Send the buffer to the chip. */
17399 		ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, true);
17400 		if (ret) {
17401 			dev_err(&tp->pdev->dev,
17402 				"%s: Buffer write failed. err = %d\n",
17403 				__func__, ret);
17404 			break;
17405 		}
17406 
17407 		/* Now read it back. */
17408 		ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, false);
17409 		if (ret) {
17410 			dev_err(&tp->pdev->dev, "%s: Buffer read failed. "
17411 				"err = %d\n", __func__, ret);
17412 			break;
17413 		}
17414 
17415 		/* Verify it. */
17416 		for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
17417 			if (p[i] == i)
17418 				continue;
17419 
17420 			if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
17421 			    DMA_RWCTRL_WRITE_BNDRY_16) {
17422 				tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
17423 				tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
17424 				tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17425 				break;
17426 			} else {
17427 				dev_err(&tp->pdev->dev,
17428 					"%s: Buffer corrupted on read back! "
17429 					"(%d != %d)\n", __func__, p[i], i);
17430 				ret = -ENODEV;
17431 				goto out;
17432 			}
17433 		}
17434 
17435 		if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
17436 			/* Success. */
17437 			ret = 0;
17438 			break;
17439 		}
17440 	}
17441 	if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
17442 	    DMA_RWCTRL_WRITE_BNDRY_16) {
17443 		/* DMA test passed without adjusting DMA boundary,
17444 		 * now look for chipsets that are known to expose the
17445 		 * DMA bug without failing the test.
17446 		 */
17447 		if (pci_dev_present(tg3_dma_wait_state_chipsets)) {
17448 			tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
17449 			tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
17450 		} else {
17451 			/* Safe to use the calculated DMA boundary. */
17452 			tp->dma_rwctrl = saved_dma_rwctrl;
17453 		}
17454 
17455 		tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17456 	}
17457 
17458 out:
17459 	dma_free_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, buf, buf_dma);
17460 out_nofree:
17461 	return ret;
17462 }
17463 
17464 static void tg3_init_bufmgr_config(struct tg3 *tp)
17465 {
17466 	if (tg3_flag(tp, 57765_PLUS)) {
17467 		tp->bufmgr_config.mbuf_read_dma_low_water =
17468 			DEFAULT_MB_RDMA_LOW_WATER_5705;
17469 		tp->bufmgr_config.mbuf_mac_rx_low_water =
17470 			DEFAULT_MB_MACRX_LOW_WATER_57765;
17471 		tp->bufmgr_config.mbuf_high_water =
17472 			DEFAULT_MB_HIGH_WATER_57765;
17473 
17474 		tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
17475 			DEFAULT_MB_RDMA_LOW_WATER_5705;
17476 		tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
17477 			DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765;
17478 		tp->bufmgr_config.mbuf_high_water_jumbo =
17479 			DEFAULT_MB_HIGH_WATER_JUMBO_57765;
17480 	} else if (tg3_flag(tp, 5705_PLUS)) {
17481 		tp->bufmgr_config.mbuf_read_dma_low_water =
17482 			DEFAULT_MB_RDMA_LOW_WATER_5705;
17483 		tp->bufmgr_config.mbuf_mac_rx_low_water =
17484 			DEFAULT_MB_MACRX_LOW_WATER_5705;
17485 		tp->bufmgr_config.mbuf_high_water =
17486 			DEFAULT_MB_HIGH_WATER_5705;
17487 		if (tg3_asic_rev(tp) == ASIC_REV_5906) {
17488 			tp->bufmgr_config.mbuf_mac_rx_low_water =
17489 				DEFAULT_MB_MACRX_LOW_WATER_5906;
17490 			tp->bufmgr_config.mbuf_high_water =
17491 				DEFAULT_MB_HIGH_WATER_5906;
17492 		}
17493 
17494 		tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
17495 			DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
17496 		tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
17497 			DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
17498 		tp->bufmgr_config.mbuf_high_water_jumbo =
17499 			DEFAULT_MB_HIGH_WATER_JUMBO_5780;
17500 	} else {
17501 		tp->bufmgr_config.mbuf_read_dma_low_water =
17502 			DEFAULT_MB_RDMA_LOW_WATER;
17503 		tp->bufmgr_config.mbuf_mac_rx_low_water =
17504 			DEFAULT_MB_MACRX_LOW_WATER;
17505 		tp->bufmgr_config.mbuf_high_water =
17506 			DEFAULT_MB_HIGH_WATER;
17507 
17508 		tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
17509 			DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
17510 		tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
17511 			DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
17512 		tp->bufmgr_config.mbuf_high_water_jumbo =
17513 			DEFAULT_MB_HIGH_WATER_JUMBO;
17514 	}
17515 
17516 	tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
17517 	tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
17518 }
17519 
17520 static char *tg3_phy_string(struct tg3 *tp)
17521 {
17522 	switch (tp->phy_id & TG3_PHY_ID_MASK) {
17523 	case TG3_PHY_ID_BCM5400:	return "5400";
17524 	case TG3_PHY_ID_BCM5401:	return "5401";
17525 	case TG3_PHY_ID_BCM5411:	return "5411";
17526 	case TG3_PHY_ID_BCM5701:	return "5701";
17527 	case TG3_PHY_ID_BCM5703:	return "5703";
17528 	case TG3_PHY_ID_BCM5704:	return "5704";
17529 	case TG3_PHY_ID_BCM5705:	return "5705";
17530 	case TG3_PHY_ID_BCM5750:	return "5750";
17531 	case TG3_PHY_ID_BCM5752:	return "5752";
17532 	case TG3_PHY_ID_BCM5714:	return "5714";
17533 	case TG3_PHY_ID_BCM5780:	return "5780";
17534 	case TG3_PHY_ID_BCM5755:	return "5755";
17535 	case TG3_PHY_ID_BCM5787:	return "5787";
17536 	case TG3_PHY_ID_BCM5784:	return "5784";
17537 	case TG3_PHY_ID_BCM5756:	return "5722/5756";
17538 	case TG3_PHY_ID_BCM5906:	return "5906";
17539 	case TG3_PHY_ID_BCM5761:	return "5761";
17540 	case TG3_PHY_ID_BCM5718C:	return "5718C";
17541 	case TG3_PHY_ID_BCM5718S:	return "5718S";
17542 	case TG3_PHY_ID_BCM57765:	return "57765";
17543 	case TG3_PHY_ID_BCM5719C:	return "5719C";
17544 	case TG3_PHY_ID_BCM5720C:	return "5720C";
17545 	case TG3_PHY_ID_BCM5762:	return "5762C";
17546 	case TG3_PHY_ID_BCM8002:	return "8002/serdes";
17547 	case 0:			return "serdes";
17548 	default:		return "unknown";
17549 	}
17550 }
17551 
17552 static char *tg3_bus_string(struct tg3 *tp, char *str)
17553 {
17554 	if (tg3_flag(tp, PCI_EXPRESS)) {
17555 		strcpy(str, "PCI Express");
17556 		return str;
17557 	} else if (tg3_flag(tp, PCIX_MODE)) {
17558 		u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
17559 
17560 		strcpy(str, "PCIX:");
17561 
17562 		if ((clock_ctrl == 7) ||
17563 		    ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
17564 		     GRC_MISC_CFG_BOARD_ID_5704CIOBE))
17565 			strcat(str, "133MHz");
17566 		else if (clock_ctrl == 0)
17567 			strcat(str, "33MHz");
17568 		else if (clock_ctrl == 2)
17569 			strcat(str, "50MHz");
17570 		else if (clock_ctrl == 4)
17571 			strcat(str, "66MHz");
17572 		else if (clock_ctrl == 6)
17573 			strcat(str, "100MHz");
17574 	} else {
17575 		strcpy(str, "PCI:");
17576 		if (tg3_flag(tp, PCI_HIGH_SPEED))
17577 			strcat(str, "66MHz");
17578 		else
17579 			strcat(str, "33MHz");
17580 	}
17581 	if (tg3_flag(tp, PCI_32BIT))
17582 		strcat(str, ":32-bit");
17583 	else
17584 		strcat(str, ":64-bit");
17585 	return str;
17586 }
17587 
17588 static void tg3_init_coal(struct tg3 *tp)
17589 {
17590 	struct ethtool_coalesce *ec = &tp->coal;
17591 
17592 	memset(ec, 0, sizeof(*ec));
17593 	ec->cmd = ETHTOOL_GCOALESCE;
17594 	ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
17595 	ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
17596 	ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
17597 	ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
17598 	ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
17599 	ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
17600 	ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
17601 	ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
17602 	ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
17603 
17604 	if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
17605 				 HOSTCC_MODE_CLRTICK_TXBD)) {
17606 		ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
17607 		ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
17608 		ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
17609 		ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
17610 	}
17611 
17612 	if (tg3_flag(tp, 5705_PLUS)) {
17613 		ec->rx_coalesce_usecs_irq = 0;
17614 		ec->tx_coalesce_usecs_irq = 0;
17615 		ec->stats_block_coalesce_usecs = 0;
17616 	}
17617 }
17618 
17619 static int tg3_init_one(struct pci_dev *pdev,
17620 				  const struct pci_device_id *ent)
17621 {
17622 	struct net_device *dev;
17623 	struct tg3 *tp;
17624 	int i, err;
17625 	u32 sndmbx, rcvmbx, intmbx;
17626 	char str[40];
17627 	u64 dma_mask, persist_dma_mask;
17628 	netdev_features_t features = 0;
17629 
17630 	printk_once(KERN_INFO "%s\n", version);
17631 
17632 	err = pci_enable_device(pdev);
17633 	if (err) {
17634 		dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
17635 		return err;
17636 	}
17637 
17638 	err = pci_request_regions(pdev, DRV_MODULE_NAME);
17639 	if (err) {
17640 		dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
17641 		goto err_out_disable_pdev;
17642 	}
17643 
17644 	pci_set_master(pdev);
17645 
17646 	dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS);
17647 	if (!dev) {
17648 		err = -ENOMEM;
17649 		goto err_out_free_res;
17650 	}
17651 
17652 	SET_NETDEV_DEV(dev, &pdev->dev);
17653 
17654 	tp = netdev_priv(dev);
17655 	tp->pdev = pdev;
17656 	tp->dev = dev;
17657 	tp->rx_mode = TG3_DEF_RX_MODE;
17658 	tp->tx_mode = TG3_DEF_TX_MODE;
17659 	tp->irq_sync = 1;
17660 	tp->pcierr_recovery = false;
17661 
17662 	if (tg3_debug > 0)
17663 		tp->msg_enable = tg3_debug;
17664 	else
17665 		tp->msg_enable = TG3_DEF_MSG_ENABLE;
17666 
17667 	if (pdev_is_ssb_gige_core(pdev)) {
17668 		tg3_flag_set(tp, IS_SSB_CORE);
17669 		if (ssb_gige_must_flush_posted_writes(pdev))
17670 			tg3_flag_set(tp, FLUSH_POSTED_WRITES);
17671 		if (ssb_gige_one_dma_at_once(pdev))
17672 			tg3_flag_set(tp, ONE_DMA_AT_ONCE);
17673 		if (ssb_gige_have_roboswitch(pdev)) {
17674 			tg3_flag_set(tp, USE_PHYLIB);
17675 			tg3_flag_set(tp, ROBOSWITCH);
17676 		}
17677 		if (ssb_gige_is_rgmii(pdev))
17678 			tg3_flag_set(tp, RGMII_MODE);
17679 	}
17680 
17681 	/* The word/byte swap controls here control register access byte
17682 	 * swapping.  DMA data byte swapping is controlled in the GRC_MODE
17683 	 * setting below.
17684 	 */
17685 	tp->misc_host_ctrl =
17686 		MISC_HOST_CTRL_MASK_PCI_INT |
17687 		MISC_HOST_CTRL_WORD_SWAP |
17688 		MISC_HOST_CTRL_INDIR_ACCESS |
17689 		MISC_HOST_CTRL_PCISTATE_RW;
17690 
17691 	/* The NONFRM (non-frame) byte/word swap controls take effect
17692 	 * on descriptor entries, anything which isn't packet data.
17693 	 *
17694 	 * The StrongARM chips on the board (one for tx, one for rx)
17695 	 * are running in big-endian mode.
17696 	 */
17697 	tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
17698 			GRC_MODE_WSWAP_NONFRM_DATA);
17699 #ifdef __BIG_ENDIAN
17700 	tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
17701 #endif
17702 	spin_lock_init(&tp->lock);
17703 	spin_lock_init(&tp->indirect_lock);
17704 	INIT_WORK(&tp->reset_task, tg3_reset_task);
17705 
17706 	tp->regs = pci_ioremap_bar(pdev, BAR_0);
17707 	if (!tp->regs) {
17708 		dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
17709 		err = -ENOMEM;
17710 		goto err_out_free_dev;
17711 	}
17712 
17713 	if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
17714 	    tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761E ||
17715 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S ||
17716 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761SE ||
17717 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
17718 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
17719 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
17720 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
17721 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 ||
17722 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_57767 ||
17723 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_57764 ||
17724 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 ||
17725 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 ||
17726 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727 ||
17727 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_57787) {
17728 		tg3_flag_set(tp, ENABLE_APE);
17729 		tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
17730 		if (!tp->aperegs) {
17731 			dev_err(&pdev->dev,
17732 				"Cannot map APE registers, aborting\n");
17733 			err = -ENOMEM;
17734 			goto err_out_iounmap;
17735 		}
17736 	}
17737 
17738 	tp->rx_pending = TG3_DEF_RX_RING_PENDING;
17739 	tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
17740 
17741 	dev->ethtool_ops = &tg3_ethtool_ops;
17742 	dev->watchdog_timeo = TG3_TX_TIMEOUT;
17743 	dev->netdev_ops = &tg3_netdev_ops;
17744 	dev->irq = pdev->irq;
17745 
17746 	err = tg3_get_invariants(tp, ent);
17747 	if (err) {
17748 		dev_err(&pdev->dev,
17749 			"Problem fetching invariants of chip, aborting\n");
17750 		goto err_out_apeunmap;
17751 	}
17752 
17753 	/* The EPB bridge inside 5714, 5715, and 5780 and any
17754 	 * device behind the EPB cannot support DMA addresses > 40-bit.
17755 	 * On 64-bit systems with IOMMU, use 40-bit dma_mask.
17756 	 * On 64-bit systems without IOMMU, use 64-bit dma_mask and
17757 	 * do DMA address check in tg3_start_xmit().
17758 	 */
17759 	if (tg3_flag(tp, IS_5788))
17760 		persist_dma_mask = dma_mask = DMA_BIT_MASK(32);
17761 	else if (tg3_flag(tp, 40BIT_DMA_BUG)) {
17762 		persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
17763 #ifdef CONFIG_HIGHMEM
17764 		dma_mask = DMA_BIT_MASK(64);
17765 #endif
17766 	} else
17767 		persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
17768 
17769 	/* Configure DMA attributes. */
17770 	if (dma_mask > DMA_BIT_MASK(32)) {
17771 		err = pci_set_dma_mask(pdev, dma_mask);
17772 		if (!err) {
17773 			features |= NETIF_F_HIGHDMA;
17774 			err = pci_set_consistent_dma_mask(pdev,
17775 							  persist_dma_mask);
17776 			if (err < 0) {
17777 				dev_err(&pdev->dev, "Unable to obtain 64 bit "
17778 					"DMA for consistent allocations\n");
17779 				goto err_out_apeunmap;
17780 			}
17781 		}
17782 	}
17783 	if (err || dma_mask == DMA_BIT_MASK(32)) {
17784 		err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
17785 		if (err) {
17786 			dev_err(&pdev->dev,
17787 				"No usable DMA configuration, aborting\n");
17788 			goto err_out_apeunmap;
17789 		}
17790 	}
17791 
17792 	tg3_init_bufmgr_config(tp);
17793 
17794 	/* 5700 B0 chips do not support checksumming correctly due
17795 	 * to hardware bugs.
17796 	 */
17797 	if (tg3_chip_rev_id(tp) != CHIPREV_ID_5700_B0) {
17798 		features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
17799 
17800 		if (tg3_flag(tp, 5755_PLUS))
17801 			features |= NETIF_F_IPV6_CSUM;
17802 	}
17803 
17804 	/* TSO is on by default on chips that support hardware TSO.
17805 	 * Firmware TSO on older chips gives lower performance, so it
17806 	 * is off by default, but can be enabled using ethtool.
17807 	 */
17808 	if ((tg3_flag(tp, HW_TSO_1) ||
17809 	     tg3_flag(tp, HW_TSO_2) ||
17810 	     tg3_flag(tp, HW_TSO_3)) &&
17811 	    (features & NETIF_F_IP_CSUM))
17812 		features |= NETIF_F_TSO;
17813 	if (tg3_flag(tp, HW_TSO_2) || tg3_flag(tp, HW_TSO_3)) {
17814 		if (features & NETIF_F_IPV6_CSUM)
17815 			features |= NETIF_F_TSO6;
17816 		if (tg3_flag(tp, HW_TSO_3) ||
17817 		    tg3_asic_rev(tp) == ASIC_REV_5761 ||
17818 		    (tg3_asic_rev(tp) == ASIC_REV_5784 &&
17819 		     tg3_chip_rev(tp) != CHIPREV_5784_AX) ||
17820 		    tg3_asic_rev(tp) == ASIC_REV_5785 ||
17821 		    tg3_asic_rev(tp) == ASIC_REV_57780)
17822 			features |= NETIF_F_TSO_ECN;
17823 	}
17824 
17825 	dev->features |= features | NETIF_F_HW_VLAN_CTAG_TX |
17826 			 NETIF_F_HW_VLAN_CTAG_RX;
17827 	dev->vlan_features |= features;
17828 
17829 	/*
17830 	 * Add loopback capability only for a subset of devices that support
17831 	 * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY
17832 	 * loopback for the remaining devices.
17833 	 */
17834 	if (tg3_asic_rev(tp) != ASIC_REV_5780 &&
17835 	    !tg3_flag(tp, CPMU_PRESENT))
17836 		/* Add the loopback capability */
17837 		features |= NETIF_F_LOOPBACK;
17838 
17839 	dev->hw_features |= features;
17840 	dev->priv_flags |= IFF_UNICAST_FLT;
17841 
17842 	/* MTU range: 60 - 9000 or 1500, depending on hardware */
17843 	dev->min_mtu = TG3_MIN_MTU;
17844 	dev->max_mtu = TG3_MAX_MTU(tp);
17845 
17846 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 &&
17847 	    !tg3_flag(tp, TSO_CAPABLE) &&
17848 	    !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
17849 		tg3_flag_set(tp, MAX_RXPEND_64);
17850 		tp->rx_pending = 63;
17851 	}
17852 
17853 	err = tg3_get_device_address(tp);
17854 	if (err) {
17855 		dev_err(&pdev->dev,
17856 			"Could not obtain valid ethernet address, aborting\n");
17857 		goto err_out_apeunmap;
17858 	}
17859 
17860 	intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
17861 	rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
17862 	sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
17863 	for (i = 0; i < tp->irq_max; i++) {
17864 		struct tg3_napi *tnapi = &tp->napi[i];
17865 
17866 		tnapi->tp = tp;
17867 		tnapi->tx_pending = TG3_DEF_TX_RING_PENDING;
17868 
17869 		tnapi->int_mbox = intmbx;
17870 		if (i <= 4)
17871 			intmbx += 0x8;
17872 		else
17873 			intmbx += 0x4;
17874 
17875 		tnapi->consmbox = rcvmbx;
17876 		tnapi->prodmbox = sndmbx;
17877 
17878 		if (i)
17879 			tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1);
17880 		else
17881 			tnapi->coal_now = HOSTCC_MODE_NOW;
17882 
17883 		if (!tg3_flag(tp, SUPPORT_MSIX))
17884 			break;
17885 
17886 		/*
17887 		 * If we support MSIX, we'll be using RSS.  If we're using
17888 		 * RSS, the first vector only handles link interrupts and the
17889 		 * remaining vectors handle rx and tx interrupts.  Reuse the
17890 		 * mailbox values for the next iteration.  The values we setup
17891 		 * above are still useful for the single vectored mode.
17892 		 */
17893 		if (!i)
17894 			continue;
17895 
17896 		rcvmbx += 0x8;
17897 
17898 		if (sndmbx & 0x4)
17899 			sndmbx -= 0x4;
17900 		else
17901 			sndmbx += 0xc;
17902 	}
17903 
17904 	/*
17905 	 * Reset chip in case UNDI or EFI driver did not shutdown
17906 	 * DMA self test will enable WDMAC and we'll see (spurious)
17907 	 * pending DMA on the PCI bus at that point.
17908 	 */
17909 	if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
17910 	    (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
17911 		tg3_full_lock(tp, 0);
17912 		tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
17913 		tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
17914 		tg3_full_unlock(tp);
17915 	}
17916 
17917 	err = tg3_test_dma(tp);
17918 	if (err) {
17919 		dev_err(&pdev->dev, "DMA engine test failed, aborting\n");
17920 		goto err_out_apeunmap;
17921 	}
17922 
17923 	tg3_init_coal(tp);
17924 
17925 	pci_set_drvdata(pdev, dev);
17926 
17927 	if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
17928 	    tg3_asic_rev(tp) == ASIC_REV_5720 ||
17929 	    tg3_asic_rev(tp) == ASIC_REV_5762)
17930 		tg3_flag_set(tp, PTP_CAPABLE);
17931 
17932 	tg3_timer_init(tp);
17933 
17934 	tg3_carrier_off(tp);
17935 
17936 	err = register_netdev(dev);
17937 	if (err) {
17938 		dev_err(&pdev->dev, "Cannot register net device, aborting\n");
17939 		goto err_out_apeunmap;
17940 	}
17941 
17942 	if (tg3_flag(tp, PTP_CAPABLE)) {
17943 		tg3_ptp_init(tp);
17944 		tp->ptp_clock = ptp_clock_register(&tp->ptp_info,
17945 						   &tp->pdev->dev);
17946 		if (IS_ERR(tp->ptp_clock))
17947 			tp->ptp_clock = NULL;
17948 	}
17949 
17950 	netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
17951 		    tp->board_part_number,
17952 		    tg3_chip_rev_id(tp),
17953 		    tg3_bus_string(tp, str),
17954 		    dev->dev_addr);
17955 
17956 	if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)) {
17957 		char *ethtype;
17958 
17959 		if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
17960 			ethtype = "10/100Base-TX";
17961 		else if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
17962 			ethtype = "1000Base-SX";
17963 		else
17964 			ethtype = "10/100/1000Base-T";
17965 
17966 		netdev_info(dev, "attached PHY is %s (%s Ethernet) "
17967 			    "(WireSpeed[%d], EEE[%d])\n",
17968 			    tg3_phy_string(tp), ethtype,
17969 			    (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) == 0,
17970 			    (tp->phy_flags & TG3_PHYFLG_EEE_CAP) != 0);
17971 	}
17972 
17973 	netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
17974 		    (dev->features & NETIF_F_RXCSUM) != 0,
17975 		    tg3_flag(tp, USE_LINKCHG_REG) != 0,
17976 		    (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) != 0,
17977 		    tg3_flag(tp, ENABLE_ASF) != 0,
17978 		    tg3_flag(tp, TSO_CAPABLE) != 0);
17979 	netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
17980 		    tp->dma_rwctrl,
17981 		    pdev->dma_mask == DMA_BIT_MASK(32) ? 32 :
17982 		    ((u64)pdev->dma_mask) == DMA_BIT_MASK(40) ? 40 : 64);
17983 
17984 	pci_save_state(pdev);
17985 
17986 	return 0;
17987 
17988 err_out_apeunmap:
17989 	if (tp->aperegs) {
17990 		iounmap(tp->aperegs);
17991 		tp->aperegs = NULL;
17992 	}
17993 
17994 err_out_iounmap:
17995 	if (tp->regs) {
17996 		iounmap(tp->regs);
17997 		tp->regs = NULL;
17998 	}
17999 
18000 err_out_free_dev:
18001 	free_netdev(dev);
18002 
18003 err_out_free_res:
18004 	pci_release_regions(pdev);
18005 
18006 err_out_disable_pdev:
18007 	if (pci_is_enabled(pdev))
18008 		pci_disable_device(pdev);
18009 	return err;
18010 }
18011 
18012 static void tg3_remove_one(struct pci_dev *pdev)
18013 {
18014 	struct net_device *dev = pci_get_drvdata(pdev);
18015 
18016 	if (dev) {
18017 		struct tg3 *tp = netdev_priv(dev);
18018 
18019 		tg3_ptp_fini(tp);
18020 
18021 		release_firmware(tp->fw);
18022 
18023 		tg3_reset_task_cancel(tp);
18024 
18025 		if (tg3_flag(tp, USE_PHYLIB)) {
18026 			tg3_phy_fini(tp);
18027 			tg3_mdio_fini(tp);
18028 		}
18029 
18030 		unregister_netdev(dev);
18031 		if (tp->aperegs) {
18032 			iounmap(tp->aperegs);
18033 			tp->aperegs = NULL;
18034 		}
18035 		if (tp->regs) {
18036 			iounmap(tp->regs);
18037 			tp->regs = NULL;
18038 		}
18039 		free_netdev(dev);
18040 		pci_release_regions(pdev);
18041 		pci_disable_device(pdev);
18042 	}
18043 }
18044 
18045 #ifdef CONFIG_PM_SLEEP
18046 static int tg3_suspend(struct device *device)
18047 {
18048 	struct net_device *dev = dev_get_drvdata(device);
18049 	struct tg3 *tp = netdev_priv(dev);
18050 	int err = 0;
18051 
18052 	rtnl_lock();
18053 
18054 	if (!netif_running(dev))
18055 		goto unlock;
18056 
18057 	tg3_reset_task_cancel(tp);
18058 	tg3_phy_stop(tp);
18059 	tg3_netif_stop(tp);
18060 
18061 	tg3_timer_stop(tp);
18062 
18063 	tg3_full_lock(tp, 1);
18064 	tg3_disable_ints(tp);
18065 	tg3_full_unlock(tp);
18066 
18067 	netif_device_detach(dev);
18068 
18069 	tg3_full_lock(tp, 0);
18070 	tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
18071 	tg3_flag_clear(tp, INIT_COMPLETE);
18072 	tg3_full_unlock(tp);
18073 
18074 	err = tg3_power_down_prepare(tp);
18075 	if (err) {
18076 		int err2;
18077 
18078 		tg3_full_lock(tp, 0);
18079 
18080 		tg3_flag_set(tp, INIT_COMPLETE);
18081 		err2 = tg3_restart_hw(tp, true);
18082 		if (err2)
18083 			goto out;
18084 
18085 		tg3_timer_start(tp);
18086 
18087 		netif_device_attach(dev);
18088 		tg3_netif_start(tp);
18089 
18090 out:
18091 		tg3_full_unlock(tp);
18092 
18093 		if (!err2)
18094 			tg3_phy_start(tp);
18095 	}
18096 
18097 unlock:
18098 	rtnl_unlock();
18099 	return err;
18100 }
18101 
18102 static int tg3_resume(struct device *device)
18103 {
18104 	struct net_device *dev = dev_get_drvdata(device);
18105 	struct tg3 *tp = netdev_priv(dev);
18106 	int err = 0;
18107 
18108 	rtnl_lock();
18109 
18110 	if (!netif_running(dev))
18111 		goto unlock;
18112 
18113 	netif_device_attach(dev);
18114 
18115 	tg3_full_lock(tp, 0);
18116 
18117 	tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
18118 
18119 	tg3_flag_set(tp, INIT_COMPLETE);
18120 	err = tg3_restart_hw(tp,
18121 			     !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN));
18122 	if (err)
18123 		goto out;
18124 
18125 	tg3_timer_start(tp);
18126 
18127 	tg3_netif_start(tp);
18128 
18129 out:
18130 	tg3_full_unlock(tp);
18131 
18132 	if (!err)
18133 		tg3_phy_start(tp);
18134 
18135 unlock:
18136 	rtnl_unlock();
18137 	return err;
18138 }
18139 #endif /* CONFIG_PM_SLEEP */
18140 
18141 static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume);
18142 
18143 static void tg3_shutdown(struct pci_dev *pdev)
18144 {
18145 	struct net_device *dev = pci_get_drvdata(pdev);
18146 	struct tg3 *tp = netdev_priv(dev);
18147 
18148 	rtnl_lock();
18149 	netif_device_detach(dev);
18150 
18151 	if (netif_running(dev))
18152 		dev_close(dev);
18153 
18154 	if (system_state == SYSTEM_POWER_OFF)
18155 		tg3_power_down(tp);
18156 
18157 	rtnl_unlock();
18158 }
18159 
18160 /**
18161  * tg3_io_error_detected - called when PCI error is detected
18162  * @pdev: Pointer to PCI device
18163  * @state: The current pci connection state
18164  *
18165  * This function is called after a PCI bus error affecting
18166  * this device has been detected.
18167  */
18168 static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
18169 					      pci_channel_state_t state)
18170 {
18171 	struct net_device *netdev = pci_get_drvdata(pdev);
18172 	struct tg3 *tp = netdev_priv(netdev);
18173 	pci_ers_result_t err = PCI_ERS_RESULT_NEED_RESET;
18174 
18175 	netdev_info(netdev, "PCI I/O error detected\n");
18176 
18177 	rtnl_lock();
18178 
18179 	/* We probably don't have netdev yet */
18180 	if (!netdev || !netif_running(netdev))
18181 		goto done;
18182 
18183 	/* We needn't recover from permanent error */
18184 	if (state == pci_channel_io_frozen)
18185 		tp->pcierr_recovery = true;
18186 
18187 	tg3_phy_stop(tp);
18188 
18189 	tg3_netif_stop(tp);
18190 
18191 	tg3_timer_stop(tp);
18192 
18193 	/* Want to make sure that the reset task doesn't run */
18194 	tg3_reset_task_cancel(tp);
18195 
18196 	netif_device_detach(netdev);
18197 
18198 	/* Clean up software state, even if MMIO is blocked */
18199 	tg3_full_lock(tp, 0);
18200 	tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
18201 	tg3_full_unlock(tp);
18202 
18203 done:
18204 	if (state == pci_channel_io_perm_failure) {
18205 		if (netdev) {
18206 			tg3_napi_enable(tp);
18207 			dev_close(netdev);
18208 		}
18209 		err = PCI_ERS_RESULT_DISCONNECT;
18210 	} else {
18211 		pci_disable_device(pdev);
18212 	}
18213 
18214 	rtnl_unlock();
18215 
18216 	return err;
18217 }
18218 
18219 /**
18220  * tg3_io_slot_reset - called after the pci bus has been reset.
18221  * @pdev: Pointer to PCI device
18222  *
18223  * Restart the card from scratch, as if from a cold-boot.
18224  * At this point, the card has exprienced a hard reset,
18225  * followed by fixups by BIOS, and has its config space
18226  * set up identically to what it was at cold boot.
18227  */
18228 static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev)
18229 {
18230 	struct net_device *netdev = pci_get_drvdata(pdev);
18231 	struct tg3 *tp = netdev_priv(netdev);
18232 	pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
18233 	int err;
18234 
18235 	rtnl_lock();
18236 
18237 	if (pci_enable_device(pdev)) {
18238 		dev_err(&pdev->dev,
18239 			"Cannot re-enable PCI device after reset.\n");
18240 		goto done;
18241 	}
18242 
18243 	pci_set_master(pdev);
18244 	pci_restore_state(pdev);
18245 	pci_save_state(pdev);
18246 
18247 	if (!netdev || !netif_running(netdev)) {
18248 		rc = PCI_ERS_RESULT_RECOVERED;
18249 		goto done;
18250 	}
18251 
18252 	err = tg3_power_up(tp);
18253 	if (err)
18254 		goto done;
18255 
18256 	rc = PCI_ERS_RESULT_RECOVERED;
18257 
18258 done:
18259 	if (rc != PCI_ERS_RESULT_RECOVERED && netdev && netif_running(netdev)) {
18260 		tg3_napi_enable(tp);
18261 		dev_close(netdev);
18262 	}
18263 	rtnl_unlock();
18264 
18265 	return rc;
18266 }
18267 
18268 /**
18269  * tg3_io_resume - called when traffic can start flowing again.
18270  * @pdev: Pointer to PCI device
18271  *
18272  * This callback is called when the error recovery driver tells
18273  * us that its OK to resume normal operation.
18274  */
18275 static void tg3_io_resume(struct pci_dev *pdev)
18276 {
18277 	struct net_device *netdev = pci_get_drvdata(pdev);
18278 	struct tg3 *tp = netdev_priv(netdev);
18279 	int err;
18280 
18281 	rtnl_lock();
18282 
18283 	if (!netdev || !netif_running(netdev))
18284 		goto done;
18285 
18286 	tg3_full_lock(tp, 0);
18287 	tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
18288 	tg3_flag_set(tp, INIT_COMPLETE);
18289 	err = tg3_restart_hw(tp, true);
18290 	if (err) {
18291 		tg3_full_unlock(tp);
18292 		netdev_err(netdev, "Cannot restart hardware after reset.\n");
18293 		goto done;
18294 	}
18295 
18296 	netif_device_attach(netdev);
18297 
18298 	tg3_timer_start(tp);
18299 
18300 	tg3_netif_start(tp);
18301 
18302 	tg3_full_unlock(tp);
18303 
18304 	tg3_phy_start(tp);
18305 
18306 done:
18307 	tp->pcierr_recovery = false;
18308 	rtnl_unlock();
18309 }
18310 
18311 static const struct pci_error_handlers tg3_err_handler = {
18312 	.error_detected	= tg3_io_error_detected,
18313 	.slot_reset	= tg3_io_slot_reset,
18314 	.resume		= tg3_io_resume
18315 };
18316 
18317 static struct pci_driver tg3_driver = {
18318 	.name		= DRV_MODULE_NAME,
18319 	.id_table	= tg3_pci_tbl,
18320 	.probe		= tg3_init_one,
18321 	.remove		= tg3_remove_one,
18322 	.err_handler	= &tg3_err_handler,
18323 	.driver.pm	= &tg3_pm_ops,
18324 	.shutdown	= tg3_shutdown,
18325 };
18326 
18327 module_pci_driver(tg3_driver);
18328